From c6d181f4611a6c23c0f263d81a6b58d78ba73590 Mon Sep 17 00:00:00 2001 From: Hakeem Kazeem Date: Wed, 17 Jan 2024 19:35:33 +0100 Subject: [PATCH 01/46] scatter nd operator --- nodegen/node/scatter_nd.py | 328 +++ src/operators/tensor/core.cairo | 89 + .../tensor/implementations/tensor_bool.cairo | 9 + .../implementations/tensor_complex64.cairo | 9 + .../implementations/tensor_fp16x16.cairo | 9 + .../implementations/tensor_fp16x16wide.cairo | 9 + .../implementations/tensor_fp32x32.cairo | 9 + .../implementations/tensor_fp64x64.cairo | 9 + .../implementations/tensor_fp8x23.cairo | 9 + .../implementations/tensor_fp8x23wide.cairo | 9 + .../tensor/implementations/tensor_i32.cairo | 9 + .../tensor/implementations/tensor_i8.cairo | 9 + .../tensor/implementations/tensor_u32.cairo | 9 + src/operators/tensor/math.cairo | 1 + src/operators/tensor/math/scatter_nd.cairo | 450 ++++ tests/nodes.cairo | 1891 +++++++++-------- tests/nodes/gather_nd_u32_default.cairo | 4 +- .../nodes/gather_nd_u32_default/input_1.cairo | 8 +- .../nodes/gather_nd_u32_default/input_2.cairo | 14 + .../gather_nd_u32_default/output_0.cairo | 96 +- tests/nodes/scatter_nd_fp16x16_3d_add.cairo | 26 + .../scatter_nd_fp16x16_3d_add/input_0.cairo | 78 + .../scatter_nd_fp16x16_3d_add/input_1.cairo | 46 + .../scatter_nd_fp16x16_3d_add/input_2.cairo | 14 + .../scatter_nd_fp16x16_3d_add/output_0.cairo | 78 + .../nodes/scatter_nd_fp16x16_3d_default.cairo | 26 + .../input_0.cairo | 78 + .../input_1.cairo | 46 + .../input_2.cairo | 14 + .../output_0.cairo | 78 + tests/nodes/scatter_nd_fp16x16_3d_max.cairo | 26 + .../scatter_nd_fp16x16_3d_max/input_0.cairo | 78 + .../scatter_nd_fp16x16_3d_max/input_1.cairo | 46 + .../scatter_nd_fp16x16_3d_max/input_2.cairo | 14 + .../scatter_nd_fp16x16_3d_max/output_0.cairo | 78 + tests/nodes/scatter_nd_fp16x16_3d_min.cairo | 26 + .../scatter_nd_fp16x16_3d_min/input_0.cairo | 78 + .../scatter_nd_fp16x16_3d_min/input_1.cairo | 46 + .../scatter_nd_fp16x16_3d_min/input_2.cairo | 14 + .../scatter_nd_fp16x16_3d_min/output_0.cairo | 78 + tests/nodes/scatter_nd_fp16x16_3d_mul.cairo | 26 + .../scatter_nd_fp16x16_3d_mul/input_0.cairo | 78 + .../scatter_nd_fp16x16_3d_mul/input_1.cairo | 46 + .../scatter_nd_fp16x16_3d_mul/input_2.cairo | 14 + .../scatter_nd_fp16x16_3d_mul/output_0.cairo | 78 + tests/nodes/scatter_nd_fp8x23_3d_add.cairo | 26 + .../scatter_nd_fp8x23_3d_add/input_0.cairo | 78 + .../scatter_nd_fp8x23_3d_add/input_1.cairo | 46 + .../scatter_nd_fp8x23_3d_add/input_2.cairo | 14 + .../scatter_nd_fp8x23_3d_add/output_0.cairo | 78 + .../nodes/scatter_nd_fp8x23_3d_default.cairo | 26 + .../input_0.cairo | 78 + .../input_1.cairo | 46 + .../input_2.cairo | 14 + .../output_0.cairo | 78 + tests/nodes/scatter_nd_fp8x23_3d_max.cairo | 26 + .../scatter_nd_fp8x23_3d_max/input_0.cairo | 78 + .../scatter_nd_fp8x23_3d_max/input_1.cairo | 46 + .../scatter_nd_fp8x23_3d_max/input_2.cairo | 14 + .../scatter_nd_fp8x23_3d_max/output_0.cairo | 78 + tests/nodes/scatter_nd_fp8x23_3d_min.cairo | 26 + .../scatter_nd_fp8x23_3d_min/input_0.cairo | 78 + .../scatter_nd_fp8x23_3d_min/input_1.cairo | 46 + .../scatter_nd_fp8x23_3d_min/input_2.cairo | 14 + .../scatter_nd_fp8x23_3d_min/output_0.cairo | 78 + tests/nodes/scatter_nd_fp8x23_3d_mul.cairo | 26 + .../scatter_nd_fp8x23_3d_mul/input_0.cairo | 78 + .../scatter_nd_fp8x23_3d_mul/input_1.cairo | 46 + .../scatter_nd_fp8x23_3d_mul/input_2.cairo | 14 + .../scatter_nd_fp8x23_3d_mul/output_0.cairo | 78 + tests/nodes/scatter_nd_u32_add.cairo | 24 + tests/nodes/scatter_nd_u32_add/input_0.cairo | 24 + tests/nodes/scatter_nd_u32_add/input_1.cairo | 18 + tests/nodes/scatter_nd_u32_add/input_2.cairo | 14 + tests/nodes/scatter_nd_u32_add/output_0.cairo | 24 + tests/nodes/scatter_nd_u32_default.cairo | 24 + .../scatter_nd_u32_default/input_0.cairo | 24 + .../scatter_nd_u32_default/input_1.cairo | 18 + .../scatter_nd_u32_default/input_2.cairo | 14 + .../scatter_nd_u32_default/output_0.cairo | 24 + tests/nodes/scatter_nd_u32_max.cairo | 24 + tests/nodes/scatter_nd_u32_max/input_0.cairo | 24 + tests/nodes/scatter_nd_u32_max/input_1.cairo | 18 + tests/nodes/scatter_nd_u32_max/input_2.cairo | 14 + tests/nodes/scatter_nd_u32_max/output_0.cairo | 24 + tests/nodes/scatter_nd_u32_min.cairo | 24 + tests/nodes/scatter_nd_u32_min/input_0.cairo | 24 + tests/nodes/scatter_nd_u32_min/input_1.cairo | 18 + tests/nodes/scatter_nd_u32_min/input_2.cairo | 14 + tests/nodes/scatter_nd_u32_min/output_0.cairo | 24 + tests/nodes/scatter_nd_u32_mul.cairo | 24 + tests/nodes/scatter_nd_u32_mul/input_0.cairo | 24 + tests/nodes/scatter_nd_u32_mul/input_1.cairo | 18 + tests/nodes/scatter_nd_u32_mul/input_2.cairo | 14 + tests/nodes/scatter_nd_u32_mul/output_0.cairo | 24 + 95 files changed, 4929 insertions(+), 991 deletions(-) create mode 100644 nodegen/node/scatter_nd.py create mode 100644 src/operators/tensor/math/scatter_nd.cairo create mode 100644 tests/nodes/gather_nd_u32_default/input_2.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_add.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_add/input_0.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_add/input_1.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_add/input_2.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_add/output_0.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_default.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_default/input_0.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_default/input_1.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_default/input_2.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_default/output_0.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_max.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_max/input_0.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_max/input_1.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_max/input_2.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_max/output_0.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_min.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_min/input_0.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_min/input_1.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_min/input_2.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_min/output_0.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_mul.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_mul/input_0.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_mul/input_1.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_mul/input_2.cairo create mode 100644 tests/nodes/scatter_nd_fp16x16_3d_mul/output_0.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_add.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_add/input_0.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_add/input_1.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_add/input_2.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_add/output_0.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_default.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_default/input_0.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_default/input_1.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_default/input_2.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_default/output_0.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_max.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_max/input_0.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_max/input_1.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_max/input_2.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_max/output_0.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_min.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_min/input_0.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_min/input_1.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_min/input_2.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_min/output_0.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_mul.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_mul/input_0.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_mul/input_1.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_mul/input_2.cairo create mode 100644 tests/nodes/scatter_nd_fp8x23_3d_mul/output_0.cairo create mode 100644 tests/nodes/scatter_nd_u32_add.cairo create mode 100644 tests/nodes/scatter_nd_u32_add/input_0.cairo create mode 100644 tests/nodes/scatter_nd_u32_add/input_1.cairo create mode 100644 tests/nodes/scatter_nd_u32_add/input_2.cairo create mode 100644 tests/nodes/scatter_nd_u32_add/output_0.cairo create mode 100644 tests/nodes/scatter_nd_u32_default.cairo create mode 100644 tests/nodes/scatter_nd_u32_default/input_0.cairo create mode 100644 tests/nodes/scatter_nd_u32_default/input_1.cairo create mode 100644 tests/nodes/scatter_nd_u32_default/input_2.cairo create mode 100644 tests/nodes/scatter_nd_u32_default/output_0.cairo create mode 100644 tests/nodes/scatter_nd_u32_max.cairo create mode 100644 tests/nodes/scatter_nd_u32_max/input_0.cairo create mode 100644 tests/nodes/scatter_nd_u32_max/input_1.cairo create mode 100644 tests/nodes/scatter_nd_u32_max/input_2.cairo create mode 100644 tests/nodes/scatter_nd_u32_max/output_0.cairo create mode 100644 tests/nodes/scatter_nd_u32_min.cairo create mode 100644 tests/nodes/scatter_nd_u32_min/input_0.cairo create mode 100644 tests/nodes/scatter_nd_u32_min/input_1.cairo create mode 100644 tests/nodes/scatter_nd_u32_min/input_2.cairo create mode 100644 tests/nodes/scatter_nd_u32_min/output_0.cairo create mode 100644 tests/nodes/scatter_nd_u32_mul.cairo create mode 100644 tests/nodes/scatter_nd_u32_mul/input_0.cairo create mode 100644 tests/nodes/scatter_nd_u32_mul/input_1.cairo create mode 100644 tests/nodes/scatter_nd_u32_mul/input_2.cairo create mode 100644 tests/nodes/scatter_nd_u32_mul/output_0.cairo diff --git a/nodegen/node/scatter_nd.py b/nodegen/node/scatter_nd.py new file mode 100644 index 000000000..84d543d40 --- /dev/null +++ b/nodegen/node/scatter_nd.py @@ -0,0 +1,328 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl + +def scatter_nd_impl(data, indices, updates, reduction="none"): # type: ignore + # Check tensor shapes + assert indices.shape[-1] <= len(data.shape) + assert updates.shape == indices.shape[:-1] + data.shape[indices.shape[-1] :] + + # Compute output + output = np.copy(data) + for i in np.ndindex(indices.shape[:-1]): + # NOTE: The order of iteration in this loop is not specified. + if reduction == "add": + output[tuple(indices[i])] += updates[i] + elif reduction == "mul": + output[tuple(indices[i])] *= updates[i] + elif reduction == "max": + output[tuple(indices[i])] = np.maximum(output[indices[i]], updates[i]) + elif reduction == "min": + output[tuple(indices[i])] = np.minimum(output[indices[i]], updates[i]) + else: + output[tuple(indices[i])] = updates[i] + return output + +data = np.array( + [ + [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], + [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], + [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]], + [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]], + ], + dtype=np.float32, + ) +indices = np.array([[0], [2]], dtype=np.int64) +updates = np.array( + [ + [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]], + ], + dtype=np.float32, + ) + +class Scatter_nd(RunAll): + + @staticmethod + def scatter_nd_fp16x16(): + def scatter_nd_3D(): + def default(): + x1 = data.astype(np.int64) + x2 = indices.astype(np.int64) + x3 = updates.astype(np.uint32) + y = scatter_nd_impl(x1, x2, x3, reduction='none') + + x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) + x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) + x3 = Tensor(Dtype.FP16x16, x3.shape, to_fp(x3.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "scatter_nd_fp16x16_3d_default" + make_test( + inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::None(()))", + name= name) + + def add(): + x1 = data.astype(np.int64) + x2 = indices.astype(np.int64) + x3 = updates.astype(np.uint32) + y = scatter_nd_impl(x1, x2, x3, reduction='add') + + x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) + x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) + x3 = Tensor(Dtype.FP16x16, x3.shape, to_fp(x3.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "scatter_nd_fp16x16_3d_add" + make_test( + inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('add'))", + name= name) + + def mul(): + x1 = data.astype(np.int64) + x2 = indices.astype(np.int64) + x3 = updates.astype(np.uint32) + y = scatter_nd_impl(x1, x2, x3, reduction='mul') + + x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) + x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) + x3 = Tensor(Dtype.FP16x16, x3.shape, to_fp(x3.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "scatter_nd_fp16x16_3d_mul" + make_test( + inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('mul'))", + name= name) + + def max(): + x1 = data.astype(np.int64) + x2 = indices.astype(np.int64) + x3 = updates.astype(np.uint32) + y = scatter_nd_impl(x1, x2, x3, reduction='max') + + x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) + x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) + x3 = Tensor(Dtype.FP16x16, x3.shape, to_fp(x3.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "scatter_nd_fp16x16_3d_max" + make_test( + inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('max'))", + name= name) + + def min(): + x1 = data.astype(np.int64) + x2 = indices.astype(np.int64) + x3 = updates.astype(np.uint32) + y = scatter_nd_impl(x1, x2, x3, reduction='min') + + x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) + x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) + x3 = Tensor(Dtype.FP16x16, x3.shape, to_fp(x3.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "scatter_nd_fp16x16_3d_min" + make_test( + inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('min'))", + name= name) + + default() + add() + mul() + max() + min() + scatter_nd_3D() + + + @staticmethod + def scatter_nd_fp8x23(): + def scatter_nd_3D(): + def default(): + x1 = data.astype(np.int64) + x2 = indices.astype(np.int64) + x3 = updates.astype(np.uint32) + y = scatter_nd_impl(x1, x2, x3, reduction='none') + + x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) + x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) + x3 = Tensor(Dtype.FP8x23, x3.shape, to_fp(x3.flatten(), FixedImpl.FP8x23)) + y = Tensor(Dtype.FP8x23, y.shape, to_fp( + y.flatten(), FixedImpl.FP8x23)) + + name = "scatter_nd_fp8x23_3d_default" + make_test( + inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::None(()))", + name= name) + + def add(): + x1 = data.astype(np.int64) + x2 = indices.astype(np.int64) + x3 = updates.astype(np.uint32) + y = scatter_nd_impl(x1, x2, x3, reduction='add') + + x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) + x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) + x3 = Tensor(Dtype.FP8x23, x3.shape, to_fp(x3.flatten(), FixedImpl.FP8x23)) + y = Tensor(Dtype.FP8x23, y.shape, to_fp( + y.flatten(), FixedImpl.FP8x23)) + + name = "scatter_nd_fp8x23_3d_add" + make_test( + inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('add'))", + name= name) + + def mul(): + x1 = data.astype(np.int64) + x2 = indices.astype(np.int64) + x3 = updates.astype(np.uint32) + y = scatter_nd_impl(x1, x2, x3, reduction='mul') + + x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) + x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) + x3 = Tensor(Dtype.FP8x23, x3.shape, to_fp(x3.flatten(), FixedImpl.FP8x23)) + y = Tensor(Dtype.FP8x23, y.shape, to_fp( + y.flatten(), FixedImpl.FP8x23)) + + name = "scatter_nd_fp8x23_3d_mul" + make_test( + inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('mul'))", + name= name) + + def max(): + x1 = data.astype(np.int64) + x2 = indices.astype(np.int64) + x3 = updates.astype(np.uint32) + y = scatter_nd_impl(x1, x2, x3, reduction='max') + + x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) + x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) + x3 = Tensor(Dtype.FP8x23, x3.shape, to_fp(x3.flatten(), FixedImpl.FP8x23)) + y = Tensor(Dtype.FP8x23, y.shape, to_fp( + y.flatten(), FixedImpl.FP8x23)) + + name = "scatter_nd_fp8x23_3d_max" + make_test( + inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('max'))", + name= name) + + def min(): + x1 = data.astype(np.int64) + x2 = indices.astype(np.int64) + x3 = updates.astype(np.uint32) + y = scatter_nd_impl(x1, x2, x3, reduction='min') + + x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) + x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) + x3 = Tensor(Dtype.FP8x23, x3.shape, to_fp(x3.flatten(), FixedImpl.FP8x23)) + y = Tensor(Dtype.FP8x23, y.shape, to_fp( + y.flatten(), FixedImpl.FP8x23)) + + name = "scatter_nd_fp8x23_3d_min" + make_test( + inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('min'))", + name= name) + + default() + add() + mul() + max() + min() + scatter_nd_3D() + + @staticmethod + def scatter_nd_u32(): + def scatter_nd_3D(): + def default(): + x1 = np.arange(0,12).reshape((4,3)).astype(np.int32) + x2 = np.array([[0],[1]]).astype(np.uint32) + x3 = np.random.randint(low = 0,high=100, size=(2,3)).astype(np.uint32) + y = scatter_nd_impl(x1, x2, x3, reduction='none') + + x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) + x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) + x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) + y = Tensor(Dtype.U32, y.shape, y.flatten()) + + name = "scatter_nd_u32_default" + make_test( + inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::None(()))", + name= name) + + def add(): + x1 = np.arange(0,12).reshape((4,3)).astype(np.int32) + x2 = np.array([[1],[0]]).astype(np.uint32) + x3 = np.random.randint(low = 0,high=100, size=(2,3)).astype(np.uint32) + y = scatter_nd_impl(x1, x2, x3, reduction='add') + + x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) + x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) + x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) + y = Tensor(Dtype.U32, y.shape, y.flatten()) + + name = "scatter_nd_u32_add" + make_test( + inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('add'))", + name= name) + + def mul(): + x1 = np.arange(0,12).reshape((4,3)).astype(np.int32) + x2 =np.array([[0],[1]]).astype(np.uint32) + x3 = np.random.randint(low = 0,high=100, size=(2,3)).astype(np.uint32) + y = scatter_nd_impl(x1, x2, x3, reduction='mul') + + x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) + x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) + x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) + y = Tensor(Dtype.U32, y.shape, y.flatten()) + + name = "scatter_nd_u32_mul" + make_test( + inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('mul'))", + name= name) + + def max(): + x1 = np.arange(0,12).reshape((4,3)).astype(np.int32) + x2 =np.array([[0],[1]]).astype(np.uint32) + x3 = np.random.randint(low = 0,high=100, size=(2,3)).astype(np.uint32) + y = scatter_nd_impl(x1, x2, x3, reduction='max') + + x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) + x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) + x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) + y = Tensor(Dtype.U32, y.shape, y.flatten()) + + name = "scatter_nd_u32_max" + make_test( + inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('max'))", + name= name) + + def min(): + x1 = np.arange(0,12).reshape((4,3)).astype(np.int32) + x2 = np.array([[0],[1]]).astype(np.uint32) + x3 = np.random.randint(low = 0,high=100, size=(2,3)).astype(np.uint32) + y = scatter_nd_impl(x1, x2, x3, reduction='min') + + x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) + x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) + x3 = Tensor(Dtype.U32, x3.shape, x3.flatten()) + y = Tensor(Dtype.U32, y.shape, y.flatten()) + + name = "scatter_nd_u32_min" + make_test( + inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('min'))", + name= name) + + default() + add() + mul() + max() + min() + scatter_nd_3D() + + + \ No newline at end of file diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 796224fb1..4279870f5 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -118,6 +118,8 @@ impl TensorSerde, impl TDrop: Drop> of Serde { /// # tensor.new /// @@ -4966,6 +4968,8 @@ trait TensorTrait { /// ## Returns /// /// A new `Tensor` . + /// + /// ## Example /// fn gather_nd_example() -> Tensor { /// let tensor = TensorTrait::::new( /// shape: array![2, 2].span(), @@ -5009,6 +5013,8 @@ trait TensorTrait { /// ## Returns /// /// A new `Tensor` . + /// + /// ## Example /// fn compress_example() -> Tensor { /// let tensor = TensorTrait::::new( /// shape: array![3, 2].span(), @@ -5178,6 +5184,89 @@ trait TensorTrait { fn split( self: @Tensor, axis: usize, num_outputs: Option, spl: Option> ) -> Array>; + /// # tensor.scatter_nd + /// + /// ```rust + /// fn scatter_nd(self: @Tensor, updates: Tensor, indices: Tensor, reduction: Option) -> Tensor; + /// ``` + /// + /// Produces a copy of input data, and updates value to values specified by updates at specific index positions specified by indices. + /// + /// ## Args + /// + /// * `self`(`@Tensor`) - The input tensor. + /// * `updates`(`Tensor`) - The updates tensor. + /// * `indices`(`Tensor`) - Tensor of indices. + /// * `reduction`(`Option`) - Reduction operation. Default: reduction='none'. + /// + /// ## Panics + /// + /// * Panics if index values are not within bounds [-s, s-1] along axis of size s. + /// * Panics if indices last axis is greater than data rank. + /// + /// ## Returns + /// + /// A new `Tensor` . + /// + /// ## Example + /// + /// ```rust + /// use core::array::{ArrayTrait, SpanTrait}; + /// + /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; + /// + /// fn scatter_nd_example() -> Tensor { + /// let tensor = TensorTrait::::new( + /// shape: array![4, 4, 4].span(), + /// data: array![1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, + /// 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, + /// 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, 8].span() + /// ); + /// + /// let updates = TensorTrait::::new( + /// shape: array![2, 4, 4].span(), + /// data: array![5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 1, 1, 1, 1, 2, 2, + /// 2, 2, 3, 3, 3, 3, 4, 4, 4, 4].span(), + /// ); + /// + /// let indices = TensorTrait::::new( + /// shape: array![2, 1].span(), + /// data: array![0, 2].span(), + /// ); + /// + /// return tensor.scatter_nd( + /// updates: updates + /// indices: indices, + /// reduction: Option::Some('add'), + /// ); + /// } + /// >>> [[[ 6., 7., 8., 9.], + /// [11., 12., 13., 14.], + /// [15., 14., 13., 12.], + /// [12., 11., 10., 9.]], + /// + /// [[ 1., 2., 3., 4.], + /// [ 5., 6., 7., 8.], + /// [ 8., 7., 6., 5.], + /// [ 4., 3., 2., 1.]], + /// + /// [[ 9., 8., 7., 6.], + /// [ 6., 5., 4., 3.], + /// [ 4., 5., 6., 7.], + /// [ 9., 10., 11., 12.]], + /// + /// [[ 8., 7., 6., 5.], + /// [ 4., 3., 2., 1.], + /// [ 1., 2., 3., 4.], + /// [ 5., 6., 7., 8.]]] + /// ``` + /// + fn scatter_nd( + self: @Tensor, + updates: Tensor, + indices: Tensor, + reduction: Option + ) -> Tensor; } /// Cf: TensorTrait::new docstring diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index eaeda00d5..9564e46ef 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -484,6 +484,15 @@ impl BoolTensor of TensorTrait { ) -> Array> { panic(array!['not supported!']) } + + fn scatter_nd( + self: @Tensor, + updates: Tensor, + indices: Tensor, + reduction: Option + ) -> Tensor { + panic(array!['not supported!']) + } } /// Implements partial equal for two `Tensor` using the `PartialEq` trait. diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 52d916b14..44faf7645 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -515,6 +515,15 @@ impl Complex64Tensor of TensorTrait { ) -> Tensor { panic(array!['not supported!']) } + + fn scatter_nd( + self: @Tensor, + updates: Tensor, + indices: Tensor, + reduction: Option + ) -> Tensor { + panic(array!['not supported!']) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index cbe7fdfcc..47e92b8d0 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -560,6 +560,15 @@ impl FP16x16Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn scatter_nd( + self: @Tensor, + updates: Tensor, + indices: Tensor, + reduction: Option + ) -> Tensor { + math::scatter_nd::scatter_nd(self, updates, indices, reduction) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index 584abf5c1..a359daf99 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -512,6 +512,15 @@ impl FP16x16WTensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn scatter_nd( + self: @Tensor, + updates: Tensor, + indices: Tensor, + reduction: Option + ) -> Tensor { + math::scatter_nd::scatter_nd(self, updates, indices, reduction) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index e7b517eb4..60ece9d56 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -561,6 +561,15 @@ impl FP32x32Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn scatter_nd( + self: @Tensor, + updates: Tensor, + indices: Tensor, + reduction: Option + ) -> Tensor { + math::scatter_nd::scatter_nd(self, updates, indices, reduction) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index a8121fc31..508440e5d 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -561,6 +561,15 @@ impl FP64x64Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn scatter_nd( + self: @Tensor, + updates: Tensor, + indices: Tensor, + reduction: Option + ) -> Tensor { + math::scatter_nd::scatter_nd(self, updates, indices, reduction) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index 9f3e78573..7d0ebbb0a 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -559,6 +559,15 @@ impl FP8x23Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn scatter_nd( + self: @Tensor, + updates: Tensor, + indices: Tensor, + reduction: Option + ) -> Tensor { + math::scatter_nd::scatter_nd(self, updates, indices, reduction) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index 6a7ca5ba4..5daae7e8b 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -498,6 +498,15 @@ impl FP8x23WTensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn scatter_nd( + self: @Tensor, + updates: Tensor, + indices: Tensor, + reduction: Option + ) -> Tensor { + math::scatter_nd::scatter_nd(self, updates, indices, reduction) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 2e2ae267d..4f9567ac6 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -540,6 +540,15 @@ impl I32Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn scatter_nd( + self: @Tensor, + updates: Tensor, + indices: Tensor, + reduction: Option + ) -> Tensor { + math::scatter_nd::scatter_nd(self, updates, indices, reduction) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 769368166..bd954e2f1 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -538,6 +538,15 @@ impl I8Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn scatter_nd( + self: @Tensor, + updates: Tensor, + indices: Tensor, + reduction: Option + ) -> Tensor { + math::scatter_nd::scatter_nd(self, updates, indices, reduction) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index d8e02d490..e6021afd7 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -482,6 +482,15 @@ impl U32Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn scatter_nd( + self: @Tensor, + updates: Tensor, + indices: Tensor, + reduction: Option + ) -> Tensor { + math::scatter_nd::scatter_nd(self, updates, indices, reduction) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/math.cairo b/src/operators/tensor/math.cairo index 13c2ca49a..72cf1ec86 100644 --- a/src/operators/tensor/math.cairo +++ b/src/operators/tensor/math.cairo @@ -61,3 +61,4 @@ mod erf; mod layer_normalization; mod resize; mod compress; +mod scatter_nd; \ No newline at end of file diff --git a/src/operators/tensor/math/scatter_nd.cairo b/src/operators/tensor/math/scatter_nd.cairo new file mode 100644 index 000000000..f406c9ac7 --- /dev/null +++ b/src/operators/tensor/math/scatter_nd.cairo @@ -0,0 +1,450 @@ +use alexandria_data_structures::array_ext::SpanTraitExt; +use core::array::ArrayTrait; +use core::array::SpanTrait; + +use core::traits::Into; +use core::debug::PrintTrait; +use core::traits::TryInto; +use core::serde::Serde; +use core::traits::Destruct; +use core::option::OptionTrait; + +use orion::numbers::NumberTrait; +use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; +use core::dict::Felt252DictTrait; +use core::nullable::{nullable_from_box, match_nullable, FromNullableResult}; +/// Cf: TensorTrait::scatter_nd docstring +fn scatter_nd< + T, + impl TTensorTrait: TensorTrait, + impl TCopy: Copy, + impl TDrop: Drop, + impl TAdd: Add, + impl TMul: Mul, + impl TPartialOrd: PartialOrd, + impl TPartialEq: PartialEq, +>( + self: @Tensor, + updates: Tensor, + indices: Tensor, + reduction: Option +) -> Tensor { + + let reduction = match reduction { + Option::Some(val) => val, + Option::None(_) => 'none' + }; + + let data_rank = (*self.shape).len(); + let indices_rank = (indices.shape).len(); + let updates_rank = (updates.shape).len(); + let mut data_shape = *self.shape; + let mut indices_shape = indices.shape; + let updates_shape = updates.shape; + + let indices_last_axis = indices_shape.pop_back().unwrap(); + assert(*indices_last_axis <= data_rank, 'must be <= data rank'); + + let ind_max = indices.data.max().unwrap(); + if (data_rank > 1){ + assert(ind_max < data_rank, 'index is out of bound'); + } + + let mut batch_dims_shape = ArrayTrait::new(); + let mut ind: usize = 0; + + loop { + match indices_shape.pop_front() { + Option::Some(val) => { batch_dims_shape.append(*val);}, + Option::None(_) => { break; } + }; + }; + + let mut data_shape_clone = data_shape.clone(); + loop { + match data_shape_clone.pop_front() { + Option::Some(val) => { + if (ind >= *indices_last_axis) { + batch_dims_shape.append(*val); + } + }, + Option::None(_) => { break; } + }; + }; + + let mut ind: usize = 0; + loop { + match batch_dims_shape.pop_front() { + Option::Some(val) => { + assert(val == *updates_shape[ind], 'must be same'); + }, + Option::None(_) => { break; } + }; + }; + + let mut data_indices = indices.data; + let mut data_updates = updates.data; + + let mut data_shape_clone = data_shape.clone(); + let mut indexer = 1; + let data_shape_first = data_shape_clone.pop_front(); + if data_rank >= 1 { + loop { + match data_shape_clone.pop_front() { + Option::Some(val) => { indexer *= *val;}, + Option::None(_) => { break; } + }; + } + } + + let mut updates_index_dict: Felt252Dict = Default::default(); + let mut dict_ind: usize = 1; + loop { + match data_indices.pop_front() { + Option::Some(val) => { + updates_index_dict.insert((*val).into(), dict_ind); + dict_ind += 1; + }, + Option::None(_) => { break; } + }; + }; + + + let mut output_data = ArrayTrait::::new(); + let mut data = *self.data; + let mut index: usize = 0; + let mut inner_index: usize = 0; + + let num = *data_shape_first.unwrap(); + loop { + if (index == num){ + break; + } + let updates_index = (index/indexer); + let comp_index = updates_index_dict.get(index.into()); + + if (comp_index == 0) { + loop { + if (inner_index == indexer) { + inner_index = 0; + break; + } + let val = *data.at((index * indexer) + inner_index); + output_data.append(val); + inner_index += 1; + }; + } + + else { + loop { + if (inner_index == indexer) { + inner_index = 0; + break; + } + if (reduction == 'none'){ + let val = data_updates.at(((comp_index-1) * indexer) + inner_index); + output_data.append(*val); + } + if (reduction == 'add') { + let val = data_updates.at(((comp_index-1) * indexer) + inner_index); + let data_val = *data.at((index * indexer) + inner_index); + output_data.append(*val + data_val); + } + + if (reduction == 'mul') { + let val = data_updates.at(((comp_index-1) * indexer) + inner_index); + let data_val = *data.at((index * indexer) + inner_index); + output_data.append((*val) * data_val); + } + + if (reduction == 'max') { + let val = data_updates.at(((comp_index-1) * indexer) + inner_index); + let data_val = *data.at((index * indexer) + inner_index); + if (*val > data_val) { + output_data.append(*val); + } + else { + output_data.append(data_val); + } + } + + if (reduction == 'min') { + let val = data_updates.at(((comp_index-1) * indexer) + inner_index); + let data_val = *data.at((index * indexer) + inner_index); + if (*val > data_val) { + output_data.append(data_val); + } + else { + output_data.append(*val); + } + } + + + inner_index += 1; + } + } + index += 1; + + }; + + + let mut output_tensor = TensorTrait::::new(*self.shape, output_data.span()); + // let mut output_tensor = TensorTrait::::new(*self.shape, *self.data); + return output_tensor; + +} + + + + + + + + + + +// Tests-------------------------------------------------------------------------------------------------------------- + +use orion::utils::assert_eq; + +fn indices() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(2); + + TensorTrait::new(shape.span(), data.span()) +} + +fn data() -> Tensor { + let mut sizes = ArrayTrait::new(); + sizes.append(4); + sizes.append(4); + sizes.append(4); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + + data.append(8); + data.append(7); + data.append(6); + data.append(5); + data.append(4); + data.append(3); + data.append(2); + data.append(1); + + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + + data.append(8); + data.append(7); + data.append(6); + data.append(5); + data.append(4); + data.append(3); + data.append(2); + data.append(1); + + data.append(8); + data.append(7); + data.append(6); + data.append(5); + data.append(4); + data.append(3); + data.append(2); + data.append(1); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + + data.append(8); + data.append(7); + data.append(6); + data.append(5); + data.append(4); + data.append(3); + data.append(2); + data.append(1); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + + + let tensor = TensorTrait::::new(sizes.span(), data.span()); + + return tensor; +} + +fn updates() -> Tensor { + let mut sizes = ArrayTrait::new(); + sizes.append(2); + sizes.append(4); + sizes.append(4); + + let mut data = ArrayTrait::new(); + data.append(5); + data.append(5); + data.append(5); + data.append(5); + + data.append(6); + data.append(6); + data.append(6); + data.append(6); + data.append(7); + data.append(7); + data.append(7); + data.append(7); + data.append(8); + data.append(8); + data.append(8); + data.append(8); + + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(2); + data.append(2); + data.append(2); + data.append(2); + data.append(3); + data.append(3); + data.append(3); + data.append(3); + data.append(4); + data.append(4); + data.append(4); + data.append(4); + + let tensor = TensorTrait::::new(sizes.span(), data.span()); + + return tensor; +} + +fn data2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(8); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + + TensorTrait::new(shape.span(), data.span()) +} + +fn indices2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(4); + data.append(3); + data.append(1); + data.append(7); + + TensorTrait::new(shape.span(), data.span()) +} + +fn updates2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(9); + data.append(10); + data.append(11); + data.append(12); + + TensorTrait::new(shape.span(), data.span()) +} + +#[test] +#[available_gas(20000000000)] +fn test_scatter_default() { + let data = data(); + let indices = indices(); + let updates = updates(); + + // let y = data.scatter_nd(updates:updates, indices: indices, reduction:Option::None(())); + let y = data.scatter_nd(updates:updates, indices: indices, reduction:Option::Some('add')); + let mut output = y.data; + + // loop { + // match output.pop_front() { + // Option::Some(val) => { + // (*val).print(); + // }, + // Option::None(_) => { break; } + // }; + // }; + +} + +// #[test] +// #[available_gas(20000000000)] +// fn test_scatter_nd_example() { +// let tensor = TensorTrait::::new( +// shape: array![4, 4, 4].span(), +// data: array![1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, +// 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, +// 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, 8].span() +// ); + +// let updates = TensorTrait::::new( +// shape: array![2, 4, 4].span(), +// data: array![5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 1, 1, 1, 1, 2, 2, +// 2, 2, 3, 3, 3, 3, 4, 4, 4, 4].span(), +// ); + +// let indices = TensorTrait::::new( +// shape: array![2, 1].span(), +// data: array![0, 2].span(), +// ); + +// let y = tensor.scatter_nd(updates:updates, indices: indices, reduction:Option::Some('add')); +// let mut output = y.data; + +// loop { +// match output.pop_front() { +// Option::Some(val) => { +// (*val).print(); +// }, +// Option::None(_) => { break; } +// }; +// }; +// } \ No newline at end of file diff --git a/tests/nodes.cairo b/tests/nodes.cairo index e06f9efde..0fe87669b 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1,938 +1,955 @@ -mod abs_fp16x16; -mod abs_fp8x23; -mod abs_i32; -mod abs_i8; -mod acos_fp16x16; -mod acos_fp8x23; -mod acosh_fp16x16; -mod acosh_fp8x23; -mod add_fp16x16; -mod add_fp16x16_broadcast; -mod add_fp8x23; -mod add_fp8x23_broadcast; -mod add_i32; -mod add_i32_broadcast; -mod add_i8; -mod add_i8_broadcast; -mod add_u32; -mod add_u32_broadcast; -mod argmax_fp16x16_1D_default; -mod argmax_fp16x16_1D_keepdims_false; -mod argmax_fp16x16_1D_last_index; -mod argmax_fp16x16_2D_default; -mod argmax_fp16x16_2D_keepdims_false; -mod argmax_fp16x16_2D_last_index; -mod argmax_fp16x16_3D_default; -mod argmax_fp16x16_3D_keepdims_false; -mod argmax_fp16x16_3D_last_index; -mod argmax_fp8x23_1D_default; -mod argmax_fp8x23_1D_keepdims_false; -mod argmax_fp8x23_1D_last_index; -mod argmax_fp8x23_2D_default; -mod argmax_fp8x23_2D_keepdims_false; -mod argmax_fp8x23_2D_last_index; -mod argmax_fp8x23_3D_default; -mod argmax_fp8x23_3D_keepdims_false; -mod argmax_fp8x23_3D_last_index; -mod argmax_i32_1D_default; -mod argmax_i32_1D_keepdims_false; -mod argmax_i32_1D_last_index; -mod argmax_i32_2D_default; -mod argmax_i32_2D_keepdims_false; -mod argmax_i32_2D_last_index; -mod argmax_i32_3D_default; -mod argmax_i32_3D_keepdims_false; -mod argmax_i32_3D_last_index; -mod argmax_i8_1D_default; -mod argmax_i8_1D_keepdims_false; -mod argmax_i8_1D_last_index; -mod argmax_i8_2D_default; -mod argmax_i8_2D_keepdims_false; -mod argmax_i8_2D_last_index; -mod argmax_i8_3D_default; -mod argmax_i8_3D_keepdims_false; -mod argmax_i8_3D_last_index; -mod argmax_u32_1D_default; -mod argmax_u32_1D_keepdims_false; -mod argmax_u32_1D_last_index; -mod argmax_u32_2D_default; -mod argmax_u32_2D_keepdims_false; -mod argmax_u32_2D_last_index; -mod argmax_u32_3D_default; -mod argmax_u32_3D_keepdims_false; -mod argmax_u32_3D_last_index; -mod argmin_fp16x16_1D_default; -mod argmin_fp16x16_1D_keepdims_false; -mod argmin_fp16x16_1D_last_index; -mod argmin_fp16x16_2D_default; -mod argmin_fp16x16_2D_keepdims_false; -mod argmin_fp16x16_2D_last_index; -mod argmin_fp16x16_3D_default; -mod argmin_fp16x16_3D_keepdims_false; -mod argmin_fp16x16_3D_last_index; -mod argmin_fp8x23_1D_default; -mod argmin_fp8x23_1D_keepdims_false; -mod argmin_fp8x23_1D_last_index; -mod argmin_fp8x23_2D_default; -mod argmin_fp8x23_2D_keepdims_false; -mod argmin_fp8x23_2D_last_index; -mod argmin_fp8x23_3D_default; -mod argmin_fp8x23_3D_keepdims_false; -mod argmin_fp8x23_3D_last_index; -mod argmin_i32_1D_default; -mod argmin_i32_1D_keepdims_false; -mod argmin_i32_1D_last_index; -mod argmin_i32_2D_default; -mod argmin_i32_2D_keepdims_false; -mod argmin_i32_2D_last_index; -mod argmin_i32_3D_default; -mod argmin_i32_3D_keepdims_false; -mod argmin_i32_3D_last_index; -mod argmin_i8_1D_default; -mod argmin_i8_1D_keepdims_false; -mod argmin_i8_1D_last_index; -mod argmin_i8_2D_default; -mod argmin_i8_2D_keepdims_false; -mod argmin_i8_2D_last_index; -mod argmin_i8_3D_default; -mod argmin_i8_3D_keepdims_false; -mod argmin_i8_3D_last_index; -mod argmin_u32_1D_default; -mod argmin_u32_1D_keepdims_false; -mod argmin_u32_1D_last_index; -mod argmin_u32_2D_default; -mod argmin_u32_2D_keepdims_false; -mod argmin_u32_2D_last_index; -mod argmin_u32_3D_default; -mod argmin_u32_3D_keepdims_false; -mod argmin_u32_3D_last_index; -mod asin_fp16x16; -mod asin_fp8x23; -mod asinh_fp16x16; -mod asinh_fp8x23; -mod atan_fp16x16; -mod atan_fp8x23; -mod ceil_fp16x16; -mod ceil_fp8x23; -mod concat_fp16x16_1d; -mod concat_fp16x16_2d; -mod concat_fp16x16_3d_default; -mod concat_fp16x16_3d_axis_1; -mod concat_fp16x16_3d_axis_2; -mod concat_fp16x16_3d_three_tensors_axis_1; -mod concat_fp16x16_3d_three_tensors_axis_2; -mod concat_fp8x23_1d; -mod concat_fp8x23_2d; -mod concat_fp8x23_3d_default; -mod concat_fp8x23_3d_axis_1; -mod concat_fp8x23_3d_axis_2; -mod concat_fp8x23_3d_three_tensors_axis_1; -mod concat_fp8x23_3d_three_tensors_axis_2; -mod concat_i32_1d; -mod concat_i32_2d; -mod concat_i32_3d_default; -mod concat_i32_3d_axis_1; -mod concat_i32_3d_axis_2; -mod concat_i32_3d_three_tensors_axis_1; -mod concat_i32_3d_three_tensors_axis_2; -mod concat_i8_1d; -mod concat_i8_2d; -mod concat_i8_3d_default; -mod concat_i8_3d_axis_1; -mod concat_i8_3d_axis_2; -mod concat_i8_3d_three_tensors_axis_1; -mod concat_i8_3d_three_tensors_axis_2; -mod concat_u32_1d; -mod concat_u32_2d; -mod concat_u32_3d_default; -mod concat_u32_3d_axis_1; -mod concat_u32_3d_axis_2; -mod concat_u32_3d_three_tensors_axis_1; -mod concat_u32_3d_three_tensors_axis_2; -mod cos_fp16x16; -mod cos_fp8x23; -mod cosh_fp16x16; -mod cosh_fp8x23; -mod cumsum_fp16x16_1d_default; -mod cumsum_fp16x16_1d_exclusive; -mod cumsum_fp16x16_1d_reverse; -mod cumsum_fp16x16_1d_reverse_exclusive; -mod cumsum_fp16x16_2d_axis_0; -mod cumsum_fp16x16_2d_axis_1; -mod cumsum_fp8x23_1d_default; -mod cumsum_fp8x23_1d_exclusive; -mod cumsum_fp8x23_1d_reverse; -mod cumsum_fp8x23_1d_reverse_exclusive; -mod cumsum_fp8x23_2d_axis_0; -mod cumsum_fp8x23_2d_axis_1; -mod cumsum_i32_1d_default; -mod cumsum_i32_1d_exclusive; -mod cumsum_i32_1d_reverse; -mod cumsum_i32_1d_reverse_exclusive; -mod cumsum_i32_2d_axis_0; -mod cumsum_i32_2d_axis_1; -mod cumsum_i8_1d_default; -mod cumsum_i8_1d_exclusive; -mod cumsum_i8_1d_reverse; -mod cumsum_i8_1d_reverse_exclusive; -mod cumsum_i8_2d_axis_0; -mod cumsum_i8_2d_axis_1; -mod cumsum_u32_1d_default; -mod cumsum_u32_1d_exclusive; -mod cumsum_u32_1d_reverse; -mod cumsum_u32_1d_reverse_exclusive; -mod cumsum_u32_2d_axis_0; -mod cumsum_u32_2d_axis_1; -mod div_fp16x16; -mod div_fp16x16_broadcast; -mod div_fp8x23; -mod div_fp8x23_broadcast; -mod div_i32; -mod div_i32_broadcast; -mod div_i8; -mod div_i8_broadcast; -mod div_u32; -mod div_u32_broadcast; -mod equal_fp16x16; -mod equal_fp16x16_broadcast; -mod equal_fp8x23; -mod equal_fp8x23_broadcast; -mod equal_i32; -mod equal_i32_broadcast; -mod equal_i8; -mod equal_i8_broadcast; -mod equal_u32; -mod equal_u32_broadcast; -mod exp_fp16x16; -mod exp_fp8x23; -mod less_equal_fp16x16; -mod less_equal_fp16x16_broadcast; -mod less_equal_fp8x23; -mod less_equal_fp8x23_broadcast; -mod less_equal_i32; -mod less_equal_i32_broadcast; -mod less_equal_i8; -mod less_equal_i8_broadcast; -mod less_equal_u32; -mod less_equal_u32_broadcast; -mod greater_fp16x16; -mod greater_fp16x16_broadcast; -mod greater_fp8x23; -mod greater_fp8x23_broadcast; -mod greater_i32; -mod greater_i32_broadcast; -mod greater_i8; -mod greater_i8_broadcast; -mod greater_u32; -mod greater_u32_broadcast; -mod leaky_relu_fp16x16; -mod leaky_relu_fp8x23; -mod linear_fp16x16; -mod linear_fp8x23; -mod linear_i32; -mod linear_i8; -mod linear_u32; -mod log_fp16x16; -mod log_fp8x23; -mod logsoftmax_fp16x16_axis_0; -mod logsoftmax_fp16x16_axis_1; -mod logsoftmax_fp8x23_axis_0; -mod logsoftmax_fp8x23_axis_1; -mod matmul_fp16x16_1d; -mod matmul_fp16x16_2x2; -mod matmul_fp16x16_2x1; -mod matmul_fp16x16_1x2; -mod matmul_fp8x23_1d; -mod matmul_fp8x23_2x2; -mod matmul_fp8x23_2x1; -mod matmul_fp8x23_1x2; -mod matmul_i32_1d; -mod matmul_i32_2x2; -mod matmul_i32_2x1; -mod matmul_i32_1x2; -mod matmul_i8_1d; -mod matmul_i8_2x2; -mod matmul_i8_2x1; -mod matmul_i8_1x2; -mod matmul_u32_1d; -mod matmul_u32_2x2; -mod matmul_u32_2x1; -mod matmul_u32_1x2; -mod mul_fp16x16; -mod mul_fp16x16_broadcast; -mod mul_fp8x23; -mod mul_fp8x23_broadcast; -mod mul_i32; -mod mul_i32_broadcast; -mod mul_i8; -mod mul_i8_broadcast; -mod mul_u32; -mod mul_u32_broadcast; -mod or_fp16x16; -mod or_fp16x16_broadcast; -mod or_fp8x23; -mod or_fp8x23_broadcast; -mod or_i32; -mod or_i32_broadcast; -mod or_i8; -mod or_i8_broadcast; -mod or_u32; -mod or_u32_broadcast; -mod reduce_sum_fp16x16_1D; -mod reduce_sum_fp16x16_2D_default; -mod reduce_sum_fp16x16_2D_keepdims; -mod reduce_sum_fp16x16_2D_axis_1; -mod reduce_sum_fp8x23_1D; -mod reduce_sum_fp8x23_2D_default; -mod reduce_sum_fp8x23_2D_keepdims; -mod reduce_sum_fp8x23_2D_axis_1; -mod reduce_sum_i32_1D; -mod reduce_sum_i32_2D_default; -mod reduce_sum_i32_2D_keepdims; -mod reduce_sum_i32_2D_axis_1; -mod reduce_sum_i8_1D; -mod reduce_sum_i8_2D_default; -mod reduce_sum_i8_2D_keepdims; -mod reduce_sum_i8_2D_axis_1; -mod reduce_sum_u32_1D; -mod reduce_sum_u32_2D_default; -mod reduce_sum_u32_2D_keepdims; -mod reduce_sum_u32_2D_axis_1; -mod relu_fp16x16; -mod relu_fp8x23; -mod relu_i32; -mod relu_i8; -mod sigmoid_fp16x16; -mod sigmoid_fp8x23; -mod sin_fp16x16; -mod sin_fp8x23; -mod sinh_fp16x16; -mod sinh_fp8x23; -mod softmax_fp16x16; -mod softmax_fp8x23; -mod softplus_fp8x23; -mod softplus_fp16x16; -mod softsign_fp8x23; -mod softsign_fp16x16; -mod sqrt_fp16x16; -mod sqrt_fp8x23; -mod sub_fp16x16; -mod sub_fp16x16_broadcast; -mod sub_fp8x23; -mod sub_fp8x23_broadcast; -mod sub_i32; -mod sub_i32_broadcast; -mod sub_i8; -mod sub_i8_broadcast; -mod sub_u32; -mod sub_u32_broadcast; -mod tanh_fp16x16; -mod tanh_fp8x23; -mod transpose_fp16x16_2d; -mod transpose_fp16x16_3d; -mod transpose_fp8x23_2d; -mod transpose_fp8x23_3d; -mod transpose_i32_2d; -mod transpose_i32_3d; -mod transpose_i8_2d; -mod transpose_i8_3d; -mod transpose_u32_2d; -mod transpose_u32_3d; -mod xor_fp16x16; -mod xor_fp16x16_broadcast; -mod xor_fp8x23; -mod xor_fp8x23_broadcast; -mod xor_i32; -mod xor_i32_broadcast; -mod xor_i8; -mod xor_i8_broadcast; -mod xor_u32; -mod xor_u32_broadcast; -mod less_fp16x16; -mod less_fp16x16_broadcast; -mod less_fp8x23; -mod less_fp8x23_broadcast; -mod less_i32; -mod less_i32_broadcast; -mod less_i8; -mod less_i8_broadcast; -mod less_u32; -mod less_u32_broadcast; -mod greater_equal_fp16x16; -mod greater_equal_fp16x16_broadcast; -mod greater_equal_fp8x23; -mod greater_equal_fp8x23_broadcast; -mod greater_equal_i32; -mod greater_equal_i32_broadcast; -mod greater_equal_i8; -mod greater_equal_i8_broadcast; -mod greater_equal_u32; -mod greater_equal_u32_broadcast; -mod slice_fp16x16_2d; -mod slice_fp16x16_3d; -mod slice_fp8x23_2d; -mod slice_fp8x23_3d; -mod slice_i32_2d; -mod slice_i32_3d; -mod slice_i8_2d; -mod slice_i8_3d; -mod slice_u32_2d; -mod slice_u32_3d; -mod gather_fp8x23_3d_default; -mod gather_fp8x23_3d_axis1; -mod gather_fp8x23_3d_axis2; -mod gather_fp16x16_3d_default; -mod gather_fp16x16_3d_axis1; -mod gather_fp16x16_3d_axis2; -mod gather_i8_3d_default; -mod gather_i8_3d_axis1; -mod gather_i8_3d_axis2; -mod gather_i32_3d_default; -mod gather_i32_3d_axis1; -mod gather_i32_3d_axis2; -mod gather_u32_3d_default; -mod gather_u32_3d_axis1; -mod gather_u32_3d_axis2; -mod nonzero_fp16x16_2d; -mod nonzero_fp16x16_3d; -mod nonzero_fp8x23_2d; -mod nonzero_fp8x23_3d; -mod nonzero_i32_2d; -mod nonzero_i32_3d; -mod nonzero_i8_2d; -mod nonzero_i8_3d; -mod nonzero_u32_2d; -mod nonzero_u32_3d; -mod squeeze_fP16x16; -mod squeeze_fP8x23; -mod squeeze_i32; -mod squeeze_i8; -mod squeeze_u32; -mod unsqueeze_fp16x16_2d; -mod unsqueeze_fp16x16_3d; -mod unsqueeze_fp8x23_2d; -mod unsqueeze_fp8x23_3d; -mod unsqueeze_i32_2d; -mod unsqueeze_i32_3d; -mod unsqueeze_i8_2d; -mod unsqueeze_i8_3d; -mod unsqueeze_u32_2d; -mod unsqueeze_u32_3d; -mod sign_fP16x16; -mod sign_fP8x23; -mod sign_fail; -mod sign_i32; -mod sign_i8; -mod clip_fp16x16_2d; -mod clip_fp16x16_3d; -mod clip_fp8x23_2d; -mod clip_fp8x23_3d; -mod clip_i32_2d; -mod clip_i32_3d; -mod clip_i8_2d; -mod clip_i8_3d; -mod clip_u32_2d; -mod clip_u32_3d; -mod identity_fP16x16; -mod identity_fP8x23; -mod identity_i32; -mod identity_i8; -mod identity_u32; -mod thresholded_relu_fp16x16; -mod thresholded_relu_fp8x23; -mod hard_sigmoid_fp8x23; -mod hard_sigmoid_fp16x16; -mod neg_fp16x16; -mod neg_fp8x23; -mod neg_i32; -mod neg_i8; -mod gemm_all_attributes; -mod gemm_alpha; -mod gemm_beta; -mod gemm_default_matrix_bias; -mod gemm_default_vector_bias; -mod gemm_default_no_bias; -mod gemm_transposeA; -mod gemm_transposeB; -mod min_fp16x16_three_tensors; -mod min_fp16x16_broadcast_three_tensors; -mod min_fp16x16_two_tensors; -mod min_fp16x16_broadcast_two_tensors; -mod min_fp8x23_three_tensors; -mod min_fp8x23_broadcast_three_tensors; -mod min_fp8x23_two_tensors; -mod min_fp8x23_broadcast_two_tensors; -mod min_i32_three_tensors; -mod min_i32_broadcast_three_tensors; -mod min_i32_two_tensors; -mod min_i32_broadcast_two_tensors; -mod min_i8_three_tensors; -mod min_i8_broadcast_three_tensors; -mod min_i8_two_tensors; -mod min_i8_broadcast_two_tensors; -mod min_u32_three_tensors; -mod min_u32_broadcast_three_tensors; -mod min_u32_two_tensors; -mod min_u32_broadcast_two_tensors; -mod where_fp16x16; -mod where_fp16x16_broadcast; -mod where_fp8x23; -mod where_fp8x23_broadcast; -mod where_i32; -mod where_i32_broadcast; -mod where_i8; -mod where_i8_broadcast; -mod where_u32; -mod where_u32_broadcast; -mod not_bool; -mod round_fp16x16; -mod round_fp8x23; -mod max_fp16x16_three_tensors; -mod max_fp16x16_broadcast_three_tensors; -mod max_fp16x16_two_tensors; -mod max_fp16x16_broadcast_two_tensors; -mod max_fp8x23_three_tensors; -mod max_fp8x23_broadcast_three_tensors; -mod max_fp8x23_two_tensors; -mod max_fp8x23_broadcast_two_tensors; -mod max_i32_three_tensors; -mod max_i32_broadcast_three_tensors; -mod max_i32_two_tensors; -mod max_i32_broadcast_two_tensors; -mod max_i8_three_tensors; -mod max_i8_broadcast_three_tensors; -mod max_i8_two_tensors; -mod max_i8_broadcast_two_tensors; -mod max_u32_three_tensors; -mod max_u32_broadcast_three_tensors; -mod max_u32_two_tensors; -mod max_u32_broadcast_two_tensors; -mod scatter_fp16x16_3d_default; -mod scatter_fp16x16_3d_axis1; -mod scatter_fp16x16_3d_axis1_add; -mod scatter_fp8x23_default; -mod scatter_fp8x23_axis1; -mod scatter_fp8x23_mul; -mod scatter_i8_default; -mod scatter_i8_axis1; -mod scatter_i8_axis1_max; -mod scatter_u32_default; -mod scatter_u32_axis1; -mod scatter_u32_add; -mod array_feature_extractor_1D_i32; -mod array_feature_extractor_1D_fp8x23; -mod array_feature_extractor_1D_fp16x16; -mod array_feature_extractor_2D_i32; -mod array_feature_extractor_2D_fp8x23; -mod array_feature_extractor_2D_fp16x16; -mod array_feature_extractor_3D_i32; -mod array_feature_extractor_3D_fp8x23; -mod array_feature_extractor_3D_fp16x16; -mod binarizer_fp16x16; -mod binarizer_fp8x23; -mod tril_fp16x16; -mod tril_fp16x16_neg; -mod tril_fp16x16_one_row; -mod tril_fp16x16_out_neg; -mod tril_fp16x16_out_pos; -mod tril_fp16x16_pos; -mod tril_fp16x16_square; -mod tril_fp16x16_square_neg; -mod tril_fp16x16_zero; -mod triu_fp16x16; -mod triu_fp16x16_neg; -mod triu_fp16x16_one_row; -mod triu_fp16x16_out_neg; -mod triu_fp16x16_out_pos; -mod triu_fp16x16_pos; -mod triu_fp16x16_square; -mod triu_fp16x16_square_neg; -mod triu_fp16x16_zero; -mod tril_fp8x23; -mod tril_fp8x23_neg; -mod tril_fp8x23_one_row; -mod tril_fp8x23_out_neg; -mod tril_fp8x23_out_pos; -mod tril_fp8x23_pos; -mod tril_fp8x23_square; -mod tril_fp8x23_square_neg; -mod tril_fp8x23_zero; -mod triu_fp8x23; -mod triu_fp8x23_neg; -mod triu_fp8x23_one_row; -mod triu_fp8x23_out_neg; -mod triu_fp8x23_out_pos; -mod triu_fp8x23_pos; -mod triu_fp8x23_square; -mod triu_fp8x23_square_neg; -mod triu_fp8x23_zero; -mod tril_i32; -mod tril_neg_i32; -mod tril_i32_one_row; -mod tril_i32_out_neg; -mod tril_i32_out_pos; -mod tril_i32_pos; -mod tril_i32_square; -mod tril_i32_square_neg; -mod tril_i32_zero; -mod triu_i32; -mod triu_i32_neg; -mod triu_i32_one_row; -mod triu_i32_out_neg; -mod triu_i32_out_pos; -mod triu_i32_pos; -mod triu_i32_square; -mod triu_i32_square_neg; -mod triu_i32_zero; -mod tril_i8; -mod tril_i8_neg; -mod tril_i8_one_row; -mod tril_i8_out_neg; -mod tril_i8_out_pos; -mod tril_i8_pos; -mod tril_i8_square; -mod tril_i8_square_neg; -mod tril_i8_zero; -mod triu_i8; -mod triu_i8_neg; -mod triu_i8_one_row; -mod triu_i8_out_neg; -mod triu_i8_out_pos; -mod triu_i8_pos; -mod triu_i8_square; -mod triu_i8_square_neg; -mod triu_i8_zero; -mod tril_u32; -mod tril_u32_neg; -mod tril_u32_one_row; -mod tril_u32_out_neg; -mod tril_u32_out_pos; -mod tril_u32_pos; -mod tril_u32_square; -mod tril_u32_square_neg; -mod tril_u32_zero; -mod triu_u32; -mod triu_u32_neg; -mod triu_u32_one_row; -mod triu_u32_out_neg; -mod triu_u32_out_pos; -mod triu_u32_pos; -mod triu_u32_square; -mod triu_u32_square_neg; -mod triu_u32_zero; -mod reduce_sum_square_fp16x16_export_do_not_keepdims; -mod reduce_sum_square_fp16x16_export_keepdims; -mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; -mod reduce_sum_square_fp8x23_export_do_not_keepdims; -mod reduce_sum_square_fp8x23_export_keepdims; -mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; -mod reduce_sum_square_i32_export_do_not_keepdims; -mod reduce_sum_square_i32_export_keepdims; -mod reduce_sum_square_i32_export_negative_axes_keepdims; -mod reduce_sum_square_i8_export_do_not_keepdims; -mod reduce_sum_square_i8_export_keepdims; -mod reduce_sum_square_i8_export_negative_axes_keepdims; -mod reduce_sum_square_u32_export_do_not_keepdims; -mod reduce_sum_square_u32_export_keepdims; -mod reduce_sum_square_u32_export_negative_axes_keepdims; -mod reduce_l2_fp16x16_export_do_not_keepdims; -mod reduce_l2_fp16x16_export_keepdims; -mod reduce_l2_fp16x16_export_negative_axes_keepdims; -mod reduce_l2_fp8x23_export_do_not_keepdims; -mod reduce_l2_fp8x23_export_keepdims; -mod reduce_l2_fp8x23_export_negative_axes_keepdims; -mod reduce_l1_fp16x16_export_do_not_keepdims; -mod reduce_l1_fp16x16_export_keepdims; -mod reduce_l1_fp16x16_export_negative_axes_keepdims; -mod reduce_l1_fp8x23_export_do_not_keepdims; -mod reduce_l1_fp8x23_export_keepdims; -mod reduce_l1_fp8x23_export_negative_axes_keepdims; -mod reduce_l1_i32_export_do_not_keepdims; -mod reduce_l1_i32_export_keepdims; -mod reduce_l1_i32_export_negative_axes_keepdims; -mod reduce_l1_i8_export_do_not_keepdims; -mod reduce_l1_i8_export_keepdims; -mod reduce_l1_i8_export_negative_axes_keepdims; -mod reduce_l1_u32_export_do_not_keepdims; -mod reduce_l1_u32_export_keepdims; -mod reduce_l1_u32_export_negative_axes_keepdims; -mod reduce_prod_fp16x16_1D; -mod reduce_prod_fp16x16_2D_default; -mod reduce_prod_fp16x16_2D_keepdims; -mod reduce_prod_fp16x16_2D_axis_1; -mod reduce_prod_fp8x23_1D; -mod reduce_prod_fp8x23_2D_default; -mod reduce_prod_fp8x23_2D_keepdims; -mod reduce_prod_fp8x23_2D_axis_1; -mod reduce_prod_i32_1D; -mod reduce_prod_i32_2D_default; -mod reduce_prod_i32_2D_keepdims; -mod reduce_prod_i32_2D_axis_1; -mod reduce_prod_i8_1D; -mod reduce_prod_i8_2D_default; -mod reduce_prod_i8_2D_keepdims; -mod reduce_prod_i8_2D_axis_1; -mod reduce_prod_u32_1D; -mod reduce_prod_u32_2D_default; -mod reduce_prod_u32_2D_keepdims; -mod reduce_prod_u32_2D_axis_1; -mod gather_elements_fp16x16_3d_default; -mod gather_elements_fp16x16_3d_axis1; -mod gather_elements_fp16x16_3d_axis2; -mod gather_elements_fp8x23_3d_default; -mod gather_elements_fp8x23_3d_axis1; -mod gather_elements_fp8x23_3d_axis2; -mod gather_elements_i8_3d_default; -mod gather_elements_i8_3d_axis1; -mod gather_elements_i32_3d_default; -mod gather_elements_i32_3d_axis1; -mod gather_elements_i32_3d_axis2; -mod gather_elements_u32_default; -mod gather_elements_u32_axis1; -mod gather_elements_u32_axis2; -mod gather_elements_u32_axis3; -mod sequence_length_fp16x16; -mod sequence_length_fp16x16_broadcast; -mod sequence_length_fp8x23; -mod sequence_length_fp8x23_broadcast; -mod sequence_length_i32; -mod sequence_length_i32_broadcast; -mod sequence_length_i8; -mod sequence_length_i8_broadcast; -mod sequence_length_u32; -mod sequence_length_u32_broadcast; -mod sequence_at_u32_positive; -mod sequence_at_u32_negative; -mod sequence_at_fp16x16_positive; -mod sequence_at_fp16x16_negative; -mod sequence_at_fp8x23_positive; -mod sequence_at_fp8x23_negative; -mod sequence_at_i32_positive; -mod sequence_at_i32_negative; -mod sequence_at_i8_positive; -mod sequence_at_i8_negative; -mod reduce_min_fp16x16_1D; -mod reduce_min_fp16x16_2D_default; -mod reduce_min_fp16x16_2D_keepdims; -mod reduce_min_fp16x16_2D_axis_1; -mod reduce_min_fp8x23_1D; -mod reduce_min_fp8x23_2D_default; -mod reduce_min_fp8x23_2D_keepdims; -mod reduce_min_fp8x23_2D_axis_1; -mod reduce_min_i32_1D; -mod reduce_min_i32_2D_default; -mod reduce_min_i32_2D_keepdims; -mod reduce_min_i32_2D_axis_1; -mod reduce_min_i8_1D; -mod reduce_min_i8_2D_default; -mod reduce_min_i8_2D_keepdims; -mod reduce_min_i8_2D_axis_1; -mod reduce_min_u32_1D; -mod reduce_min_u32_2D_default; -mod reduce_min_u32_2D_keepdims; -mod reduce_min_u32_2D_axis_1; -mod sequence_construct_fp16x16; -mod sequence_construct_fp8x23; -mod sequence_construct_i32; -mod sequence_construct_i8; -mod sequence_construct_u32; -mod shrink_hard_fp16x16; -mod shrink_soft_fp16x16; -mod shrink_hard_fp8x23; -mod shrink_soft_fp8x23; -mod sequence_empty_fp16x16; -mod sequence_empty_fp8x23; -mod sequence_empty_i32; -mod sequence_empty_i8; -mod sequence_empty_u32; -mod reduce_mean_fp16x16_1D; -mod reduce_mean_fp16x16_2D_default; -mod reduce_mean_fp16x16_2D_keepdims; -mod reduce_mean_fp16x16_2D_axis_1; -mod reduce_mean_fp8x23_1D; -mod reduce_mean_fp8x23_2D_default; -mod reduce_mean_fp8x23_2D_keepdims; -mod reduce_mean_fp8x23_2D_axis_1; -mod reduce_mean_i32_1D; -mod reduce_mean_i32_2D_default; -mod reduce_mean_i32_2D_keepdims; -mod reduce_mean_i32_2D_axis_1; -mod reduce_mean_i8_1D; -mod reduce_mean_i8_2D_default; -mod reduce_mean_i8_2D_keepdims; -mod reduce_mean_i8_2D_axis_1; -mod reduce_mean_u32_1D; -mod reduce_mean_u32_2D_default; -mod reduce_mean_u32_2D_keepdims; -mod reduce_mean_u32_2D_axis_1; -mod pow_fp16x16; -mod pow_fp16x16_broadcast; -mod pow_fp8x23; -mod pow_fp8x23_broadcast; -mod sequence_erase_u32_positive; -mod sequence_erase_u32_negative; -mod sequence_erase_u32_empty; -mod sequence_erase_fp16x16_positive; -mod sequence_erase_fp16x16_negative; -mod sequence_erase_fp16x16_empty; -mod sequence_erase_fp8x23_positive; -mod sequence_erase_fp8x23_negative; -mod sequence_erase_fp8x23_empty; -mod sequence_erase_i32_positive; -mod sequence_erase_i32_negative; -mod sequence_erase_i32_empty; -mod sequence_erase_i8_positive; -mod sequence_erase_i8_negative; -mod sequence_erase_i8_empty; -mod sequence_insert_fp16x16; -mod sequence_insert_fp8x23; -mod sequence_insert_i32; -mod sequence_insert_i8; -mod sequence_insert_u32; -mod concat_from_sequence_fp8x23_new_axis_zero; -mod concat_from_sequence_fp8x23_new_axis_one; -mod concat_from_sequence_fp8x23_new_axis_default; -mod concat_from_sequence_fp16x16_new_axis_zero; -mod concat_from_sequence_fp16x16_new_axis_one; -mod concat_from_sequence_fp16x16_new_axis_default; -mod concat_from_sequence_i32_new_axis_zero; -mod concat_from_sequence_i32_new_axis_one; -mod concat_from_sequence_i32_new_axis_default; -mod concat_from_sequence_i8_new_axis_zero; -mod concat_from_sequence_i8_new_axis_one; -mod concat_from_sequence_i8_new_axis_default; -mod concat_from_sequence_u32_new_axis_zero; -mod concat_from_sequence_u32_new_axis_one; -mod concat_from_sequence_u32_new_axis_default; -mod is_nan_fp16x16; -mod is_nan_fp8x23; -mod is_inf_fp16x16; -mod is_inf_fp8x23; -mod is_inf_i32; -mod is_inf_i8; -mod is_inf_u32; -mod is_pos_inf_fp16x16; -mod is_neg_inf_fp16x16; -mod is_pos_inf_fp8x23; -mod is_neg_inf_fp8x23; -mod is_pos_inf_i32; -mod is_neg_inf_i32; -mod is_pos_inf_i8; -mod is_neg_inf_i8; -mod reduce_log_sum_fp8x23_export_do_not_keepdims; -mod reduce_log_sum_fp8x23_export_keepdims; -mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; -mod reduce_log_sum_fp16x16_export_do_not_keepdims; -mod reduce_log_sum_fp16x16_export_keepdims; -mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; -mod and_bool; -mod erf_fp16x16; -mod erf_fp8x23; -mod unique_fp16x16_without_axis_sorted; -mod unique_fp16x16_with_axis_zero_sorted; -mod unique_u32_without_axis_sorted; -mod unique_u32_without_axis_not_sorted; -mod unique_u32_with_axis_zero_sorted; -mod unique_u32_with_axis_zero_not_sorted; -mod unique_u32_with_axis_one_sorted; -mod unique_u32_with_axis_one_not_sorted; -mod gather_nd_fp16x16_3d_default; -mod gather_nd_fp16x16_3d_batch_dims1; -mod gather_nd_fp16x16_3d_batch_dims2; -mod gather_nd_fp8x23_3d_default; -mod gather_nd_fp8x23_3d_batch_dims1; -mod gather_nd_fp8x23_3d_batch_dims2; -mod gather_nd_i32_3d_default; -mod gather_nd_i32_3d_batch_dims1; -mod gather_nd_i32_3d_batch_dims2; -mod gather_nd_i8_3d_default; -mod gather_nd_i8_3d_batch_dims1; +// mod abs_fp16x16; +// mod abs_fp8x23; +// mod abs_i32; +// mod abs_i8; +// mod acos_fp16x16; +// mod acos_fp8x23; +// mod acosh_fp16x16; +// mod acosh_fp8x23; +// mod add_fp16x16; +// mod add_fp16x16_broadcast; +// mod add_fp8x23; +// mod add_fp8x23_broadcast; +// mod add_i32; +// mod add_i32_broadcast; +// mod add_i8; +// mod add_i8_broadcast; +// mod add_u32; +// mod add_u32_broadcast; +// mod argmax_fp16x16_1D_default; +// mod argmax_fp16x16_1D_keepdims_false; +// mod argmax_fp16x16_1D_last_index; +// mod argmax_fp16x16_2D_default; +// mod argmax_fp16x16_2D_keepdims_false; +// mod argmax_fp16x16_2D_last_index; +// mod argmax_fp16x16_3D_default; +// mod argmax_fp16x16_3D_keepdims_false; +// mod argmax_fp16x16_3D_last_index; +// mod argmax_fp8x23_1D_default; +// mod argmax_fp8x23_1D_keepdims_false; +// mod argmax_fp8x23_1D_last_index; +// mod argmax_fp8x23_2D_default; +// mod argmax_fp8x23_2D_keepdims_false; +// mod argmax_fp8x23_2D_last_index; +// mod argmax_fp8x23_3D_default; +// mod argmax_fp8x23_3D_keepdims_false; +// mod argmax_fp8x23_3D_last_index; +// mod argmax_i32_1D_default; +// mod argmax_i32_1D_keepdims_false; +// mod argmax_i32_1D_last_index; +// mod argmax_i32_2D_default; +// mod argmax_i32_2D_keepdims_false; +// mod argmax_i32_2D_last_index; +// mod argmax_i32_3D_default; +// mod argmax_i32_3D_keepdims_false; +// mod argmax_i32_3D_last_index; +// mod argmax_i8_1D_default; +// mod argmax_i8_1D_keepdims_false; +// mod argmax_i8_1D_last_index; +// mod argmax_i8_2D_default; +// mod argmax_i8_2D_keepdims_false; +// mod argmax_i8_2D_last_index; +// mod argmax_i8_3D_default; +// mod argmax_i8_3D_keepdims_false; +// mod argmax_i8_3D_last_index; +// mod argmax_u32_1D_default; +// mod argmax_u32_1D_keepdims_false; +// mod argmax_u32_1D_last_index; +// mod argmax_u32_2D_default; +// mod argmax_u32_2D_keepdims_false; +// mod argmax_u32_2D_last_index; +// mod argmax_u32_3D_default; +// mod argmax_u32_3D_keepdims_false; +// mod argmax_u32_3D_last_index; +// mod argmin_fp16x16_1D_default; +// mod argmin_fp16x16_1D_keepdims_false; +// mod argmin_fp16x16_1D_last_index; +// mod argmin_fp16x16_2D_default; +// mod argmin_fp16x16_2D_keepdims_false; +// mod argmin_fp16x16_2D_last_index; +// mod argmin_fp16x16_3D_default; +// mod argmin_fp16x16_3D_keepdims_false; +// mod argmin_fp16x16_3D_last_index; +// mod argmin_fp8x23_1D_default; +// mod argmin_fp8x23_1D_keepdims_false; +// mod argmin_fp8x23_1D_last_index; +// mod argmin_fp8x23_2D_default; +// mod argmin_fp8x23_2D_keepdims_false; +// mod argmin_fp8x23_2D_last_index; +// mod argmin_fp8x23_3D_default; +// mod argmin_fp8x23_3D_keepdims_false; +// mod argmin_fp8x23_3D_last_index; +// mod argmin_i32_1D_default; +// mod argmin_i32_1D_keepdims_false; +// mod argmin_i32_1D_last_index; +// mod argmin_i32_2D_default; +// mod argmin_i32_2D_keepdims_false; +// mod argmin_i32_2D_last_index; +// mod argmin_i32_3D_default; +// mod argmin_i32_3D_keepdims_false; +// mod argmin_i32_3D_last_index; +// mod argmin_i8_1D_default; +// mod argmin_i8_1D_keepdims_false; +// mod argmin_i8_1D_last_index; +// mod argmin_i8_2D_default; +// mod argmin_i8_2D_keepdims_false; +// mod argmin_i8_2D_last_index; +// mod argmin_i8_3D_default; +// mod argmin_i8_3D_keepdims_false; +// mod argmin_i8_3D_last_index; +// mod argmin_u32_1D_default; +// mod argmin_u32_1D_keepdims_false; +// mod argmin_u32_1D_last_index; +// mod argmin_u32_2D_default; +// mod argmin_u32_2D_keepdims_false; +// mod argmin_u32_2D_last_index; +// mod argmin_u32_3D_default; +// mod argmin_u32_3D_keepdims_false; +// mod argmin_u32_3D_last_index; +// mod asin_fp16x16; +// mod asin_fp8x23; +// mod asinh_fp16x16; +// mod asinh_fp8x23; +// mod atan_fp16x16; +// mod atan_fp8x23; +// mod ceil_fp16x16; +// mod ceil_fp8x23; +// mod concat_fp16x16_1d; +// mod concat_fp16x16_2d; +// mod concat_fp16x16_3d_default; +// mod concat_fp16x16_3d_axis_1; +// mod concat_fp16x16_3d_axis_2; +// mod concat_fp16x16_3d_three_tensors_axis_1; +// mod concat_fp16x16_3d_three_tensors_axis_2; +// mod concat_fp8x23_1d; +// mod concat_fp8x23_2d; +// mod concat_fp8x23_3d_default; +// mod concat_fp8x23_3d_axis_1; +// mod concat_fp8x23_3d_axis_2; +// mod concat_fp8x23_3d_three_tensors_axis_1; +// mod concat_fp8x23_3d_three_tensors_axis_2; +// mod concat_i32_1d; +// mod concat_i32_2d; +// mod concat_i32_3d_default; +// mod concat_i32_3d_axis_1; +// mod concat_i32_3d_axis_2; +// mod concat_i32_3d_three_tensors_axis_1; +// mod concat_i32_3d_three_tensors_axis_2; +// mod concat_i8_1d; +// mod concat_i8_2d; +// mod concat_i8_3d_default; +// mod concat_i8_3d_axis_1; +// mod concat_i8_3d_axis_2; +// mod concat_i8_3d_three_tensors_axis_1; +// mod concat_i8_3d_three_tensors_axis_2; +// mod concat_u32_1d; +// mod concat_u32_2d; +// mod concat_u32_3d_default; +// mod concat_u32_3d_axis_1; +// mod concat_u32_3d_axis_2; +// mod concat_u32_3d_three_tensors_axis_1; +// mod concat_u32_3d_three_tensors_axis_2; +// mod cos_fp16x16; +// mod cos_fp8x23; +// mod cosh_fp16x16; +// mod cosh_fp8x23; +// mod cumsum_fp16x16_1d_default; +// mod cumsum_fp16x16_1d_exclusive; +// mod cumsum_fp16x16_1d_reverse; +// mod cumsum_fp16x16_1d_reverse_exclusive; +// mod cumsum_fp16x16_2d_axis_0; +// mod cumsum_fp16x16_2d_axis_1; +// mod cumsum_fp8x23_1d_default; +// mod cumsum_fp8x23_1d_exclusive; +// mod cumsum_fp8x23_1d_reverse; +// mod cumsum_fp8x23_1d_reverse_exclusive; +// mod cumsum_fp8x23_2d_axis_0; +// mod cumsum_fp8x23_2d_axis_1; +// mod cumsum_i32_1d_default; +// mod cumsum_i32_1d_exclusive; +// mod cumsum_i32_1d_reverse; +// mod cumsum_i32_1d_reverse_exclusive; +// mod cumsum_i32_2d_axis_0; +// mod cumsum_i32_2d_axis_1; +// mod cumsum_i8_1d_default; +// mod cumsum_i8_1d_exclusive; +// mod cumsum_i8_1d_reverse; +// mod cumsum_i8_1d_reverse_exclusive; +// mod cumsum_i8_2d_axis_0; +// mod cumsum_i8_2d_axis_1; +// mod cumsum_u32_1d_default; +// mod cumsum_u32_1d_exclusive; +// mod cumsum_u32_1d_reverse; +// mod cumsum_u32_1d_reverse_exclusive; +// mod cumsum_u32_2d_axis_0; +// mod cumsum_u32_2d_axis_1; +// mod div_fp16x16; +// mod div_fp16x16_broadcast; +// mod div_fp8x23; +// mod div_fp8x23_broadcast; +// mod div_i32; +// mod div_i32_broadcast; +// mod div_i8; +// mod div_i8_broadcast; +// mod div_u32; +// mod div_u32_broadcast; +// mod equal_fp16x16; +// mod equal_fp16x16_broadcast; +// mod equal_fp8x23; +// mod equal_fp8x23_broadcast; +// mod equal_i32; +// mod equal_i32_broadcast; +// mod equal_i8; +// mod equal_i8_broadcast; +// mod equal_u32; +// mod equal_u32_broadcast; +// mod exp_fp16x16; +// mod exp_fp8x23; +// mod less_equal_fp16x16; +// mod less_equal_fp16x16_broadcast; +// mod less_equal_fp8x23; +// mod less_equal_fp8x23_broadcast; +// mod less_equal_i32; +// mod less_equal_i32_broadcast; +// mod less_equal_i8; +// mod less_equal_i8_broadcast; +// mod less_equal_u32; +// mod less_equal_u32_broadcast; +// mod greater_fp16x16; +// mod greater_fp16x16_broadcast; +// mod greater_fp8x23; +// mod greater_fp8x23_broadcast; +// mod greater_i32; +// mod greater_i32_broadcast; +// mod greater_i8; +// mod greater_i8_broadcast; +// mod greater_u32; +// mod greater_u32_broadcast; +// mod leaky_relu_fp16x16; +// mod leaky_relu_fp8x23; +// mod linear_fp16x16; +// mod linear_fp8x23; +// mod linear_i32; +// mod linear_i8; +// mod linear_u32; +// mod log_fp16x16; +// mod log_fp8x23; +// mod logsoftmax_fp16x16_axis_0; +// mod logsoftmax_fp16x16_axis_1; +// mod logsoftmax_fp8x23_axis_0; +// mod logsoftmax_fp8x23_axis_1; +// mod matmul_fp16x16_1d; +// mod matmul_fp16x16_2x2; +// mod matmul_fp16x16_2x1; +// mod matmul_fp16x16_1x2; +// mod matmul_fp8x23_1d; +// mod matmul_fp8x23_2x2; +// mod matmul_fp8x23_2x1; +// mod matmul_fp8x23_1x2; +// mod matmul_i32_1d; +// mod matmul_i32_2x2; +// mod matmul_i32_2x1; +// mod matmul_i32_1x2; +// mod matmul_i8_1d; +// mod matmul_i8_2x2; +// mod matmul_i8_2x1; +// mod matmul_i8_1x2; +// mod matmul_u32_1d; +// mod matmul_u32_2x2; +// mod matmul_u32_2x1; +// mod matmul_u32_1x2; +// mod mul_fp16x16; +// mod mul_fp16x16_broadcast; +// mod mul_fp8x23; +// mod mul_fp8x23_broadcast; +// mod mul_i32; +// mod mul_i32_broadcast; +// mod mul_i8; +// mod mul_i8_broadcast; +// mod mul_u32; +// mod mul_u32_broadcast; +// mod or_fp16x16; +// mod or_fp16x16_broadcast; +// mod or_fp8x23; +// mod or_fp8x23_broadcast; +// mod or_i32; +// mod or_i32_broadcast; +// mod or_i8; +// mod or_i8_broadcast; +// mod or_u32; +// mod or_u32_broadcast; +// mod reduce_sum_fp16x16_1D; +// mod reduce_sum_fp16x16_2D_default; +// mod reduce_sum_fp16x16_2D_keepdims; +// mod reduce_sum_fp16x16_2D_axis_1; +// mod reduce_sum_fp8x23_1D; +// mod reduce_sum_fp8x23_2D_default; +// mod reduce_sum_fp8x23_2D_keepdims; +// mod reduce_sum_fp8x23_2D_axis_1; +// mod reduce_sum_i32_1D; +// mod reduce_sum_i32_2D_default; +// mod reduce_sum_i32_2D_keepdims; +// mod reduce_sum_i32_2D_axis_1; +// mod reduce_sum_i8_1D; +// mod reduce_sum_i8_2D_default; +// mod reduce_sum_i8_2D_keepdims; +// mod reduce_sum_i8_2D_axis_1; +// mod reduce_sum_u32_1D; +// mod reduce_sum_u32_2D_default; +// mod reduce_sum_u32_2D_keepdims; +// mod reduce_sum_u32_2D_axis_1; +// mod relu_fp16x16; +// mod relu_fp8x23; +// mod relu_i32; +// mod relu_i8; +// mod sigmoid_fp16x16; +// mod sigmoid_fp8x23; +// mod sin_fp16x16; +// mod sin_fp8x23; +// mod sinh_fp16x16; +// mod sinh_fp8x23; +// mod softmax_fp16x16; +// mod softmax_fp8x23; +// mod softplus_fp8x23; +// mod softplus_fp16x16; +// mod softsign_fp8x23; +// mod softsign_fp16x16; +// mod sqrt_fp16x16; +// mod sqrt_fp8x23; +// mod sub_fp16x16; +// mod sub_fp16x16_broadcast; +// mod sub_fp8x23; +// mod sub_fp8x23_broadcast; +// mod sub_i32; +// mod sub_i32_broadcast; +// mod sub_i8; +// mod sub_i8_broadcast; +// mod sub_u32; +// mod sub_u32_broadcast; +// mod tanh_fp16x16; +// mod tanh_fp8x23; +// mod transpose_fp16x16_2d; +// mod transpose_fp16x16_3d; +// mod transpose_fp8x23_2d; +// mod transpose_fp8x23_3d; +// mod transpose_i32_2d; +// mod transpose_i32_3d; +// mod transpose_i8_2d; +// mod transpose_i8_3d; +// mod transpose_u32_2d; +// mod transpose_u32_3d; +// mod xor_fp16x16; +// mod xor_fp16x16_broadcast; +// mod xor_fp8x23; +// mod xor_fp8x23_broadcast; +// mod xor_i32; +// mod xor_i32_broadcast; +// mod xor_i8; +// mod xor_i8_broadcast; +// mod xor_u32; +// mod xor_u32_broadcast; +// mod less_fp16x16; +// mod less_fp16x16_broadcast; +// mod less_fp8x23; +// mod less_fp8x23_broadcast; +// mod less_i32; +// mod less_i32_broadcast; +// mod less_i8; +// mod less_i8_broadcast; +// mod less_u32; +// mod less_u32_broadcast; +// mod greater_equal_fp16x16; +// mod greater_equal_fp16x16_broadcast; +// mod greater_equal_fp8x23; +// mod greater_equal_fp8x23_broadcast; +// mod greater_equal_i32; +// mod greater_equal_i32_broadcast; +// mod greater_equal_i8; +// mod greater_equal_i8_broadcast; +// mod greater_equal_u32; +// mod greater_equal_u32_broadcast; +// mod slice_fp16x16_2d; +// mod slice_fp16x16_3d; +// mod slice_fp8x23_2d; +// mod slice_fp8x23_3d; +// mod slice_i32_2d; +// mod slice_i32_3d; +// mod slice_i8_2d; +// mod slice_i8_3d; +// mod slice_u32_2d; +// mod slice_u32_3d; +// mod gather_fp8x23_3d_default; +// mod gather_fp8x23_3d_axis1; +// mod gather_fp8x23_3d_axis2; +// mod gather_fp16x16_3d_default; +// mod gather_fp16x16_3d_axis1; +// mod gather_fp16x16_3d_axis2; +// mod gather_i8_3d_default; +// mod gather_i8_3d_axis1; +// mod gather_i8_3d_axis2; +// mod gather_i32_3d_default; +// mod gather_i32_3d_axis1; +// mod gather_i32_3d_axis2; +// mod gather_u32_3d_default; +// mod gather_u32_3d_axis1; +// mod gather_u32_3d_axis2; +// mod nonzero_fp16x16_2d; +// mod nonzero_fp16x16_3d; +// mod nonzero_fp8x23_2d; +// mod nonzero_fp8x23_3d; +// mod nonzero_i32_2d; +// mod nonzero_i32_3d; +// mod nonzero_i8_2d; +// mod nonzero_i8_3d; +// mod nonzero_u32_2d; +// mod nonzero_u32_3d; +// mod squeeze_fP16x16; +// mod squeeze_fP8x23; +// mod squeeze_i32; +// mod squeeze_i8; +// mod squeeze_u32; +// mod unsqueeze_fp16x16_2d; +// mod unsqueeze_fp16x16_3d; +// mod unsqueeze_fp8x23_2d; +// mod unsqueeze_fp8x23_3d; +// mod unsqueeze_i32_2d; +// mod unsqueeze_i32_3d; +// mod unsqueeze_i8_2d; +// mod unsqueeze_i8_3d; +// mod unsqueeze_u32_2d; +// mod unsqueeze_u32_3d; +// mod sign_fP16x16; +// mod sign_fP8x23; +// mod sign_fail; +// mod sign_i32; +// mod sign_i8; +// mod clip_fp16x16_2d; +// mod clip_fp16x16_3d; +// mod clip_fp8x23_2d; +// mod clip_fp8x23_3d; +// mod clip_i32_2d; +// mod clip_i32_3d; +// mod clip_i8_2d; +// mod clip_i8_3d; +// mod clip_u32_2d; +// mod clip_u32_3d; +// mod identity_fP16x16; +// mod identity_fP8x23; +// mod identity_i32; +// mod identity_i8; +// mod identity_u32; +// mod thresholded_relu_fp16x16; +// mod thresholded_relu_fp8x23; +// mod hard_sigmoid_fp8x23; +// mod hard_sigmoid_fp16x16; +// mod neg_fp16x16; +// mod neg_fp8x23; +// mod neg_i32; +// mod neg_i8; +// mod gemm_all_attributes; +// mod gemm_alpha; +// mod gemm_beta; +// mod gemm_default_matrix_bias; +// mod gemm_default_vector_bias; +// mod gemm_default_no_bias; +// mod gemm_transposeA; +// mod gemm_transposeB; +// mod min_fp16x16_three_tensors; +// mod min_fp16x16_broadcast_three_tensors; +// mod min_fp16x16_two_tensors; +// mod min_fp16x16_broadcast_two_tensors; +// mod min_fp8x23_three_tensors; +// mod min_fp8x23_broadcast_three_tensors; +// mod min_fp8x23_two_tensors; +// mod min_fp8x23_broadcast_two_tensors; +// mod min_i32_three_tensors; +// mod min_i32_broadcast_three_tensors; +// mod min_i32_two_tensors; +// mod min_i32_broadcast_two_tensors; +// mod min_i8_three_tensors; +// mod min_i8_broadcast_three_tensors; +// mod min_i8_two_tensors; +// mod min_i8_broadcast_two_tensors; +// mod min_u32_three_tensors; +// mod min_u32_broadcast_three_tensors; +// mod min_u32_two_tensors; +// mod min_u32_broadcast_two_tensors; +// mod where_fp16x16; +// mod where_fp16x16_broadcast; +// mod where_fp8x23; +// mod where_fp8x23_broadcast; +// mod where_i32; +// mod where_i32_broadcast; +// mod where_i8; +// mod where_i8_broadcast; +// mod where_u32; +// mod where_u32_broadcast; +// mod not_bool; +// mod round_fp16x16; +// mod round_fp8x23; +// mod max_fp16x16_three_tensors; +// mod max_fp16x16_broadcast_three_tensors; +// mod max_fp16x16_two_tensors; +// mod max_fp16x16_broadcast_two_tensors; +// mod max_fp8x23_three_tensors; +// mod max_fp8x23_broadcast_three_tensors; +// mod max_fp8x23_two_tensors; +// mod max_fp8x23_broadcast_two_tensors; +// mod max_i32_three_tensors; +// mod max_i32_broadcast_three_tensors; +// mod max_i32_two_tensors; +// mod max_i32_broadcast_two_tensors; +// mod max_i8_three_tensors; +// mod max_i8_broadcast_three_tensors; +// mod max_i8_two_tensors; +// mod max_i8_broadcast_two_tensors; +// mod max_u32_three_tensors; +// mod max_u32_broadcast_three_tensors; +// mod max_u32_two_tensors; +// mod max_u32_broadcast_two_tensors; +// mod scatter_fp16x16_3d_default; +// mod scatter_fp16x16_3d_axis1; +// mod scatter_fp16x16_3d_axis1_add; +// mod scatter_fp8x23_default; +// mod scatter_fp8x23_axis1; +// mod scatter_fp8x23_mul; +// mod scatter_i8_default; +// mod scatter_i8_axis1; +// mod scatter_i8_axis1_max; +// mod scatter_u32_default; +// mod scatter_u32_axis1; +// mod scatter_u32_add; +// mod array_feature_extractor_1D_i32; +// mod array_feature_extractor_1D_fp8x23; +// mod array_feature_extractor_1D_fp16x16; +// mod array_feature_extractor_2D_i32; +// mod array_feature_extractor_2D_fp8x23; +// mod array_feature_extractor_2D_fp16x16; +// mod array_feature_extractor_3D_i32; +// mod array_feature_extractor_3D_fp8x23; +// mod array_feature_extractor_3D_fp16x16; +// mod binarizer_fp16x16; +// mod binarizer_fp8x23; +// mod tril_fp16x16; +// mod tril_fp16x16_neg; +// mod tril_fp16x16_one_row; +// mod tril_fp16x16_out_neg; +// mod tril_fp16x16_out_pos; +// mod tril_fp16x16_pos; +// mod tril_fp16x16_square; +// mod tril_fp16x16_square_neg; +// mod tril_fp16x16_zero; +// mod triu_fp16x16; +// mod triu_fp16x16_neg; +// mod triu_fp16x16_one_row; +// mod triu_fp16x16_out_neg; +// mod triu_fp16x16_out_pos; +// mod triu_fp16x16_pos; +// mod triu_fp16x16_square; +// mod triu_fp16x16_square_neg; +// mod triu_fp16x16_zero; +// mod tril_fp8x23; +// mod tril_fp8x23_neg; +// mod tril_fp8x23_one_row; +// mod tril_fp8x23_out_neg; +// mod tril_fp8x23_out_pos; +// mod tril_fp8x23_pos; +// mod tril_fp8x23_square; +// mod tril_fp8x23_square_neg; +// mod tril_fp8x23_zero; +// mod triu_fp8x23; +// mod triu_fp8x23_neg; +// mod triu_fp8x23_one_row; +// mod triu_fp8x23_out_neg; +// mod triu_fp8x23_out_pos; +// mod triu_fp8x23_pos; +// mod triu_fp8x23_square; +// mod triu_fp8x23_square_neg; +// mod triu_fp8x23_zero; +// mod tril_i32; +// mod tril_neg_i32; +// mod tril_i32_one_row; +// mod tril_i32_out_neg; +// mod tril_i32_out_pos; +// mod tril_i32_pos; +// mod tril_i32_square; +// mod tril_i32_square_neg; +// mod tril_i32_zero; +// mod triu_i32; +// mod triu_i32_neg; +// mod triu_i32_one_row; +// mod triu_i32_out_neg; +// mod triu_i32_out_pos; +// mod triu_i32_pos; +// mod triu_i32_square; +// mod triu_i32_square_neg; +// mod triu_i32_zero; +// mod tril_i8; +// mod tril_i8_neg; +// mod tril_i8_one_row; +// mod tril_i8_out_neg; +// mod tril_i8_out_pos; +// mod tril_i8_pos; +// mod tril_i8_square; +// mod tril_i8_square_neg; +// mod tril_i8_zero; +// mod triu_i8; +// mod triu_i8_neg; +// mod triu_i8_one_row; +// mod triu_i8_out_neg; +// mod triu_i8_out_pos; +// mod triu_i8_pos; +// mod triu_i8_square; +// mod triu_i8_square_neg; +// mod triu_i8_zero; +// mod tril_u32; +// mod tril_u32_neg; +// mod tril_u32_one_row; +// mod tril_u32_out_neg; +// mod tril_u32_out_pos; +// mod tril_u32_pos; +// mod tril_u32_square; +// mod tril_u32_square_neg; +// mod tril_u32_zero; +// mod triu_u32; +// mod triu_u32_neg; +// mod triu_u32_one_row; +// mod triu_u32_out_neg; +// mod triu_u32_out_pos; +// mod triu_u32_pos; +// mod triu_u32_square; +// mod triu_u32_square_neg; +// mod triu_u32_zero; +// mod reduce_sum_square_fp16x16_export_do_not_keepdims; +// mod reduce_sum_square_fp16x16_export_keepdims; +// mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; +// mod reduce_sum_square_fp8x23_export_do_not_keepdims; +// mod reduce_sum_square_fp8x23_export_keepdims; +// mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; +// mod reduce_sum_square_i32_export_do_not_keepdims; +// mod reduce_sum_square_i32_export_keepdims; +// mod reduce_sum_square_i32_export_negative_axes_keepdims; +// mod reduce_sum_square_i8_export_do_not_keepdims; +// mod reduce_sum_square_i8_export_keepdims; +// mod reduce_sum_square_i8_export_negative_axes_keepdims; +// mod reduce_sum_square_u32_export_do_not_keepdims; +// mod reduce_sum_square_u32_export_keepdims; +// mod reduce_sum_square_u32_export_negative_axes_keepdims; +// mod reduce_l2_fp16x16_export_do_not_keepdims; +// mod reduce_l2_fp16x16_export_keepdims; +// mod reduce_l2_fp16x16_export_negative_axes_keepdims; +// mod reduce_l2_fp8x23_export_do_not_keepdims; +// mod reduce_l2_fp8x23_export_keepdims; +// mod reduce_l2_fp8x23_export_negative_axes_keepdims; +// mod reduce_l1_fp16x16_export_do_not_keepdims; +// mod reduce_l1_fp16x16_export_keepdims; +// mod reduce_l1_fp16x16_export_negative_axes_keepdims; +// mod reduce_l1_fp8x23_export_do_not_keepdims; +// mod reduce_l1_fp8x23_export_keepdims; +// mod reduce_l1_fp8x23_export_negative_axes_keepdims; +// mod reduce_l1_i32_export_do_not_keepdims; +// mod reduce_l1_i32_export_keepdims; +// mod reduce_l1_i32_export_negative_axes_keepdims; +// mod reduce_l1_i8_export_do_not_keepdims; +// mod reduce_l1_i8_export_keepdims; +// mod reduce_l1_i8_export_negative_axes_keepdims; +// mod reduce_l1_u32_export_do_not_keepdims; +// mod reduce_l1_u32_export_keepdims; +// mod reduce_l1_u32_export_negative_axes_keepdims; +// mod reduce_prod_fp16x16_1D; +// mod reduce_prod_fp16x16_2D_default; +// mod reduce_prod_fp16x16_2D_keepdims; +// mod reduce_prod_fp16x16_2D_axis_1; +// mod reduce_prod_fp8x23_1D; +// mod reduce_prod_fp8x23_2D_default; +// mod reduce_prod_fp8x23_2D_keepdims; +// mod reduce_prod_fp8x23_2D_axis_1; +// mod reduce_prod_i32_1D; +// mod reduce_prod_i32_2D_default; +// mod reduce_prod_i32_2D_keepdims; +// mod reduce_prod_i32_2D_axis_1; +// mod reduce_prod_i8_1D; +// mod reduce_prod_i8_2D_default; +// mod reduce_prod_i8_2D_keepdims; +// mod reduce_prod_i8_2D_axis_1; +// mod reduce_prod_u32_1D; +// mod reduce_prod_u32_2D_default; +// mod reduce_prod_u32_2D_keepdims; +// mod reduce_prod_u32_2D_axis_1; +// mod gather_elements_fp16x16_3d_default; +// mod gather_elements_fp16x16_3d_axis1; +// mod gather_elements_fp16x16_3d_axis2; +// mod gather_elements_fp8x23_3d_default; +// mod gather_elements_fp8x23_3d_axis1; +// mod gather_elements_fp8x23_3d_axis2; +// mod gather_elements_i8_3d_default; +// mod gather_elements_i8_3d_axis1; +// mod gather_elements_i32_3d_default; +// mod gather_elements_i32_3d_axis1; +// mod gather_elements_i32_3d_axis2; +// mod gather_elements_u32_default; +// mod gather_elements_u32_axis1; +// mod gather_elements_u32_axis2; +// mod gather_elements_u32_axis3; +// mod sequence_length_fp16x16; +// mod sequence_length_fp16x16_broadcast; +// mod sequence_length_fp8x23; +// mod sequence_length_fp8x23_broadcast; +// mod sequence_length_i32; +// mod sequence_length_i32_broadcast; +// mod sequence_length_i8; +// mod sequence_length_i8_broadcast; +// mod sequence_length_u32; +// mod sequence_length_u32_broadcast; +// mod sequence_at_u32_positive; +// mod sequence_at_u32_negative; +// mod sequence_at_fp16x16_positive; +// mod sequence_at_fp16x16_negative; +// mod sequence_at_fp8x23_positive; +// mod sequence_at_fp8x23_negative; +// mod sequence_at_i32_positive; +// mod sequence_at_i32_negative; +// mod sequence_at_i8_positive; +// mod sequence_at_i8_negative; +// mod reduce_min_fp16x16_1D; +// mod reduce_min_fp16x16_2D_default; +// mod reduce_min_fp16x16_2D_keepdims; +// mod reduce_min_fp16x16_2D_axis_1; +// mod reduce_min_fp8x23_1D; +// mod reduce_min_fp8x23_2D_default; +// mod reduce_min_fp8x23_2D_keepdims; +// mod reduce_min_fp8x23_2D_axis_1; +// mod reduce_min_i32_1D; +// mod reduce_min_i32_2D_default; +// mod reduce_min_i32_2D_keepdims; +// mod reduce_min_i32_2D_axis_1; +// mod reduce_min_i8_1D; +// mod reduce_min_i8_2D_default; +// mod reduce_min_i8_2D_keepdims; +// mod reduce_min_i8_2D_axis_1; +// mod reduce_min_u32_1D; +// mod reduce_min_u32_2D_default; +// mod reduce_min_u32_2D_keepdims; +// mod reduce_min_u32_2D_axis_1; +// mod sequence_construct_fp16x16; +// mod sequence_construct_fp8x23; +// mod sequence_construct_i32; +// mod sequence_construct_i8; +// mod sequence_construct_u32; +// mod shrink_hard_fp16x16; +// mod shrink_soft_fp16x16; +// mod shrink_hard_fp8x23; +// mod shrink_soft_fp8x23; +// mod sequence_empty_fp16x16; +// mod sequence_empty_fp8x23; +// mod sequence_empty_i32; +// mod sequence_empty_i8; +// mod sequence_empty_u32; +// mod reduce_mean_fp16x16_1D; +// mod reduce_mean_fp16x16_2D_default; +// mod reduce_mean_fp16x16_2D_keepdims; +// mod reduce_mean_fp16x16_2D_axis_1; +// mod reduce_mean_fp8x23_1D; +// mod reduce_mean_fp8x23_2D_default; +// mod reduce_mean_fp8x23_2D_keepdims; +// mod reduce_mean_fp8x23_2D_axis_1; +// mod reduce_mean_i32_1D; +// mod reduce_mean_i32_2D_default; +// mod reduce_mean_i32_2D_keepdims; +// mod reduce_mean_i32_2D_axis_1; +// mod reduce_mean_i8_1D; +// mod reduce_mean_i8_2D_default; +// mod reduce_mean_i8_2D_keepdims; +// mod reduce_mean_i8_2D_axis_1; +// mod reduce_mean_u32_1D; +// mod reduce_mean_u32_2D_default; +// mod reduce_mean_u32_2D_keepdims; +// mod reduce_mean_u32_2D_axis_1; +// mod pow_fp16x16; +// mod pow_fp16x16_broadcast; +// mod pow_fp8x23; +// mod pow_fp8x23_broadcast; +// mod sequence_erase_u32_positive; +// mod sequence_erase_u32_negative; +// mod sequence_erase_u32_empty; +// mod sequence_erase_fp16x16_positive; +// mod sequence_erase_fp16x16_negative; +// mod sequence_erase_fp16x16_empty; +// mod sequence_erase_fp8x23_positive; +// mod sequence_erase_fp8x23_negative; +// mod sequence_erase_fp8x23_empty; +// mod sequence_erase_i32_positive; +// mod sequence_erase_i32_negative; +// mod sequence_erase_i32_empty; +// mod sequence_erase_i8_positive; +// mod sequence_erase_i8_negative; +// mod sequence_erase_i8_empty; +// mod sequence_insert_fp16x16; +// mod sequence_insert_fp8x23; +// mod sequence_insert_i32; +// mod sequence_insert_i8; +// mod sequence_insert_u32; +// mod concat_from_sequence_fp8x23_new_axis_zero; +// mod concat_from_sequence_fp8x23_new_axis_one; +// mod concat_from_sequence_fp8x23_new_axis_default; +// mod concat_from_sequence_fp16x16_new_axis_zero; +// mod concat_from_sequence_fp16x16_new_axis_one; +// mod concat_from_sequence_fp16x16_new_axis_default; +// mod concat_from_sequence_i32_new_axis_zero; +// mod concat_from_sequence_i32_new_axis_one; +// mod concat_from_sequence_i32_new_axis_default; +// mod concat_from_sequence_i8_new_axis_zero; +// mod concat_from_sequence_i8_new_axis_one; +// mod concat_from_sequence_i8_new_axis_default; +// mod concat_from_sequence_u32_new_axis_zero; +// mod concat_from_sequence_u32_new_axis_one; +// mod concat_from_sequence_u32_new_axis_default; +// mod is_nan_fp16x16; +// mod is_nan_fp8x23; +// mod is_inf_fp16x16; +// mod is_inf_fp8x23; +// mod is_inf_i32; +// mod is_inf_i8; +// mod is_inf_u32; +// mod is_pos_inf_fp16x16; +// mod is_neg_inf_fp16x16; +// mod is_pos_inf_fp8x23; +// mod is_neg_inf_fp8x23; +// mod is_pos_inf_i32; +// mod is_neg_inf_i32; +// mod is_pos_inf_i8; +// mod is_neg_inf_i8; +// mod reduce_log_sum_fp8x23_export_do_not_keepdims; +// mod reduce_log_sum_fp8x23_export_keepdims; +// mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; +// mod reduce_log_sum_fp16x16_export_do_not_keepdims; +// mod reduce_log_sum_fp16x16_export_keepdims; +// mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; +// mod and_bool; +// mod erf_fp16x16; +// mod erf_fp8x23; +// mod unique_fp16x16_without_axis_sorted; +// mod unique_fp16x16_with_axis_zero_sorted; +// mod unique_u32_without_axis_sorted; +// mod unique_u32_without_axis_not_sorted; +// mod unique_u32_with_axis_zero_sorted; +// mod unique_u32_with_axis_zero_not_sorted; +// mod unique_u32_with_axis_one_sorted; +// mod unique_u32_with_axis_one_not_sorted; +// mod gather_nd_fp16x16_3d_default; +// mod gather_nd_fp16x16_3d_batch_dims1; +// mod gather_nd_fp16x16_3d_batch_dims2; +// mod gather_nd_fp8x23_3d_default; +// mod gather_nd_fp8x23_3d_batch_dims1; +// mod gather_nd_fp8x23_3d_batch_dims2; +// mod gather_nd_i32_3d_default; +// mod gather_nd_i32_3d_batch_dims1; +// mod gather_nd_i32_3d_batch_dims2; +// mod gather_nd_i8_3d_default; +// mod gather_nd_i8_3d_batch_dims1; +// mod gather_nd_u32_default; +// mod gather_nd_u32_batch_dims1; +// mod gather_nd_u32_batch_dims2; +// mod resize_upsample_scales_nearest; +// mod resize_downsample_scales_cubic; +// mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; +// mod resize_downsample_scales_cubic_align_corners; +// mod resize_upsample_scales_linear; +// mod resize_downsample_scales_linear_align_corners; +// mod resize_downsample_scales_nearest; +// mod resize_upsample_scales_cubic; +// mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; +// mod resize_upsample_scales_cubic_align_corners; +// mod resize_upsample_scales_cubic_asymmetric; +// mod resize_upsample_scales_linear_align_corners; +// mod resize_upsample_sizes_nearest; +// mod resize_upsample_sizes_cubic; +// mod resize_downsample_sizes_cubic; +// mod resize_downsample_sizes_nearest; +// mod resize_upsample_scales_linear_half_pixel_symmetric; +// mod resize_downsample_scales_cubic_antialias; +// mod resize_downsample_scales_linear_antialias; +// mod resize_downsample_sizes_cubic_antialias; +// mod resize_downsample_sizes_linear_pytorch_half_pixel; +// mod resize_tf_crop_and_resize; +// mod resize_tf_crop_and_resize_extrapolation_value; +// mod resize_upsample_scales_nearest_axes_2_3; +// mod resize_upsample_scales_nearest_axes_3_2; +// mod resize_upsample_sizes_nearest_axes_2_3; +// mod resize_upsample_sizes_nearest_ceil_half_pixel; +// mod resize_upsample_sizes_nearest_floor_align_corners; +// mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; +// mod resize_downsample_scales_linear_half_pixel_symmetric; +// mod resize_downsample_sizes_nearest_not_larger; +// mod resize_downsample_sizes_nearest_not_smaller; +// mod resize_tf_crop_and_resize_axes_2_3; +// mod resize_tf_crop_and_resize_axes_3_2; +// mod resize_upsample_sizes_nearest_axes_3_2; +// mod resize_upsample_sizes_nearest_not_larger; +// mod resize_upsample_sizes_nearest_not_smaller; +// mod compress_fp16x16_3d_default; +// mod compress_fp16x16_3d_axis1; +// mod compress_fp16x16_3d_axis2; +// mod compress_fp16x16_3d_axis3; +// mod compress_fp16x16_3d_noaxis; +// mod compress_fp8x23_3d_default; +// mod compress_fp8x23_3d_axis1; +// mod compress_fp8x23_3d_axis2; +// mod compress_i32_3d_default; +// mod compress_i32_3d_axis1; +// mod compress_i32_3d_axis2; +// mod compress_i8_3d_default; +// mod compress_i8_3d_axis1; +// mod compress_i8_3d_axis2; +// mod compress_u32_3d_default; +// mod compress_u32_3d_axis1; +// mod compress_u32_3d_axis2; +// mod compress_u32_3d_axis2_2; +// mod compress_u32_3d_axis3; +// mod layer_normalization_default_axis; +// mod layer_normalization_4d_axis0; +// mod layer_normalization_4d_axis1; +// mod layer_normalization_4d_axis2; +// mod layer_normalization_4d_axis3; +// mod layer_normalization_3d_axis0_epsilon; +// mod layer_normalization_3d_axis1_epsilon; +// mod layer_normalization_3d_axis2_epsilon; +// mod layer_normalization_4d_axis_negative_4; +// mod layer_normalization_4d_axis_negative_3; +// mod layer_normalization_4d_axis_negative_2; +// mod layer_normalization_4d_axis_negative_1; +// mod layer_normalization_3d_axis_negative_3_epsilon; +// mod layer_normalization_3d_axis_negative_2_epsilon; +// mod layer_normalization_3d_axis_negative_1_epsilon; +// mod layer_normalization_test; +// mod split_u32_1d_equal_parts; +// mod split_u32_2d_equal_parts; +// mod split_u32_zero_size; +// mod split_u32_1d_variable_parts; +// mod split_u32_2d_variable_parts; +// mod split_u32_1d_uneven; +// mod split_u32_2d_uneven; +// mod split_fp16x16_1d_equal_parts; +// mod split_fp16x16_1d_variable_parts; +// mod split_fp16x16_2d_equal_parts; +// mod split_fp16x16_2d_variable_parts; +// mod split_fp16x16_zero_size; +// mod split_fp16x16_1d_uneven; +// mod split_fp16x16_2d_uneven; + +mod scatter_nd_fp16x16_3d_default; +mod scatter_nd_fp16x16_3d_add; +mod scatter_nd_fp16x16_3d_mul; +mod scatter_nd_fp16x16_3d_max; +mod scatter_nd_fp16x16_3d_min; +mod scatter_nd_fp8x23_3d_default; +mod scatter_nd_fp8x23_3d_add; +mod scatter_nd_fp8x23_3d_mul; +mod scatter_nd_fp8x23_3d_max; +mod scatter_nd_fp8x23_3d_min; mod gather_nd_u32_default; -mod gather_nd_u32_batch_dims1; -mod gather_nd_u32_batch_dims2; -mod resize_upsample_scales_nearest; -mod resize_downsample_scales_cubic; -mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; -mod resize_downsample_scales_cubic_align_corners; -mod resize_upsample_scales_linear; -mod resize_downsample_scales_linear_align_corners; -mod resize_downsample_scales_nearest; -mod resize_upsample_scales_cubic; -mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; -mod resize_upsample_scales_cubic_align_corners; -mod resize_upsample_scales_cubic_asymmetric; -mod resize_upsample_scales_linear_align_corners; -mod resize_upsample_sizes_nearest; -mod resize_upsample_sizes_cubic; -mod resize_downsample_sizes_cubic; -mod resize_downsample_sizes_nearest; -mod resize_upsample_scales_linear_half_pixel_symmetric; -mod resize_downsample_scales_cubic_antialias; -mod resize_downsample_scales_linear_antialias; -mod resize_downsample_sizes_cubic_antialias; -mod resize_downsample_sizes_linear_pytorch_half_pixel; -mod resize_tf_crop_and_resize; -mod resize_tf_crop_and_resize_extrapolation_value; -mod resize_upsample_scales_nearest_axes_2_3; -mod resize_upsample_scales_nearest_axes_3_2; -mod resize_upsample_sizes_nearest_axes_2_3; -mod resize_upsample_sizes_nearest_ceil_half_pixel; -mod resize_upsample_sizes_nearest_floor_align_corners; -mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; -mod resize_downsample_scales_linear_half_pixel_symmetric; -mod resize_downsample_sizes_nearest_not_larger; -mod resize_downsample_sizes_nearest_not_smaller; -mod resize_tf_crop_and_resize_axes_2_3; -mod resize_tf_crop_and_resize_axes_3_2; -mod resize_upsample_sizes_nearest_axes_3_2; -mod resize_upsample_sizes_nearest_not_larger; -mod resize_upsample_sizes_nearest_not_smaller; -mod compress_fp16x16_3d_default; -mod compress_fp16x16_3d_axis1; -mod compress_fp16x16_3d_axis2; -mod compress_fp16x16_3d_axis3; -mod compress_fp16x16_3d_noaxis; -mod compress_fp8x23_3d_default; -mod compress_fp8x23_3d_axis1; -mod compress_fp8x23_3d_axis2; -mod compress_i32_3d_default; -mod compress_i32_3d_axis1; -mod compress_i32_3d_axis2; -mod compress_i8_3d_default; -mod compress_i8_3d_axis1; -mod compress_i8_3d_axis2; -mod compress_u32_3d_default; -mod compress_u32_3d_axis1; -mod compress_u32_3d_axis2; -mod compress_u32_3d_axis2_2; -mod compress_u32_3d_axis3; -mod layer_normalization_default_axis; -mod layer_normalization_4d_axis0; -mod layer_normalization_4d_axis1; -mod layer_normalization_4d_axis2; -mod layer_normalization_4d_axis3; -mod layer_normalization_3d_axis0_epsilon; -mod layer_normalization_3d_axis1_epsilon; -mod layer_normalization_3d_axis2_epsilon; -mod layer_normalization_4d_axis_negative_4; -mod layer_normalization_4d_axis_negative_3; -mod layer_normalization_4d_axis_negative_2; -mod layer_normalization_4d_axis_negative_1; -mod layer_normalization_3d_axis_negative_3_epsilon; -mod layer_normalization_3d_axis_negative_2_epsilon; -mod layer_normalization_3d_axis_negative_1_epsilon; -mod layer_normalization_test; -mod split_u32_1d_equal_parts; -mod split_u32_2d_equal_parts; -mod split_u32_zero_size; -mod split_u32_1d_variable_parts; -mod split_u32_2d_variable_parts; -mod split_u32_1d_uneven; -mod split_u32_2d_uneven; -mod split_fp16x16_1d_equal_parts; -mod split_fp16x16_1d_variable_parts; -mod split_fp16x16_2d_equal_parts; -mod split_fp16x16_2d_variable_parts; -mod split_fp16x16_zero_size; -mod split_fp16x16_1d_uneven; -mod split_fp16x16_2d_uneven; +mod scatter_nd_u32_default; +mod scatter_nd_u32_add; +mod scatter_nd_u32_mul; +mod scatter_nd_u32_max; +mod scatter_nd_u32_min; diff --git a/tests/nodes/gather_nd_u32_default.cairo b/tests/nodes/gather_nd_u32_default.cairo index be6edd699..222bb0b9a 100644 --- a/tests/nodes/gather_nd_u32_default.cairo +++ b/tests/nodes/gather_nd_u32_default.cairo @@ -6,8 +6,8 @@ mod output_0; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::U32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32Tensor; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32Tensor; #[test] #[available_gas(2000000000)] @@ -16,7 +16,7 @@ fn test_gather_nd_u32_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(0)); + let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_u32_default/input_1.cairo b/tests/nodes/gather_nd_u32_default/input_1.cairo index bb54a8189..50e1c78e3 100644 --- a/tests/nodes/gather_nd_u32_default/input_1.cairo +++ b/tests/nodes/gather_nd_u32_default/input_1.cairo @@ -10,22 +10,22 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); - data.append(1); - data.append(1); + data.append(0); data.append(0); data.append(0); data.append(1); data.append(1); data.append(0); data.append(0); + data.append(0); data.append(1); + data.append(0); data.append(1); + data.append(0); data.append(1); data.append(0); data.append(1); data.append(1); data.append(1); - data.append(0); - data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/gather_nd_u32_default/input_2.cairo b/tests/nodes/gather_nd_u32_default/input_2.cairo new file mode 100644 index 000000000..75a33100a --- /dev/null +++ b/tests/nodes/gather_nd_u32_default/input_2.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(1); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/gather_nd_u32_default/output_0.cairo b/tests/nodes/gather_nd_u32_default/output_0.cairo index f54097ea6..ee76830c2 100644 --- a/tests/nodes/gather_nd_u32_default/output_0.cairo +++ b/tests/nodes/gather_nd_u32_default/output_0.cairo @@ -10,6 +10,54 @@ fn output_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + data.append(48); + data.append(49); + data.append(50); + data.append(51); + data.append(52); + data.append(53); + data.append(54); + data.append(55); + data.append(56); + data.append(57); + data.append(58); + data.append(59); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); data.append(12); data.append(13); data.append(14); @@ -22,18 +70,6 @@ fn output_0() -> Tensor { data.append(21); data.append(22); data.append(23); - data.append(36); - data.append(37); - data.append(38); - data.append(39); - data.append(40); - data.append(41); - data.append(42); - data.append(43); - data.append(44); - data.append(45); - data.append(46); - data.append(47); data.append(12); data.append(13); data.append(14); @@ -46,18 +82,6 @@ fn output_0() -> Tensor { data.append(21); data.append(22); data.append(23); - data.append(36); - data.append(37); - data.append(38); - data.append(39); - data.append(40); - data.append(41); - data.append(42); - data.append(43); - data.append(44); - data.append(45); - data.append(46); - data.append(47); data.append(12); data.append(13); data.append(14); @@ -70,18 +94,6 @@ fn output_0() -> Tensor { data.append(21); data.append(22); data.append(23); - data.append(48); - data.append(49); - data.append(50); - data.append(51); - data.append(52); - data.append(53); - data.append(54); - data.append(55); - data.append(56); - data.append(57); - data.append(58); - data.append(59); data.append(12); data.append(13); data.append(14); @@ -106,17 +118,5 @@ fn output_0() -> Tensor { data.append(57); data.append(58); data.append(59); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/scatter_nd_fp16x16_3d_add.cairo b/tests/nodes/scatter_nd_fp16x16_3d_add.cairo new file mode 100644 index 000000000..95b09a56d --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_add.cairo @@ -0,0 +1,26 @@ +mod input_0; +mod input_1; +mod input_2; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16Tensor; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_scatter_nd_fp16x16_3d_add() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('add')); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_add/input_0.cairo b/tests/nodes/scatter_nd_fp16x16_3d_add/input_0.cairo new file mode 100644 index 000000000..a89cf9ea6 --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_add/input_0.cairo @@ -0,0 +1,78 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_add/input_1.cairo b/tests/nodes/scatter_nd_fp16x16_3d_add/input_1.cairo new file mode 100644 index 000000000..2a367e447 --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_add/input_1.cairo @@ -0,0 +1,46 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_add/input_2.cairo b/tests/nodes/scatter_nd_fp16x16_3d_add/input_2.cairo new file mode 100644 index 000000000..deae3cbd1 --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_add/input_2.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(2); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_add/output_0.cairo b/tests/nodes/scatter_nd_fp16x16_3d_add/output_0.cairo new file mode 100644 index 000000000..96d47e439 --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_add/output_0.cairo @@ -0,0 +1,78 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_default.cairo b/tests/nodes/scatter_nd_fp16x16_3d_default.cairo new file mode 100644 index 000000000..80bb892de --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_default.cairo @@ -0,0 +1,26 @@ +mod input_0; +mod input_1; +mod input_2; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16Tensor; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_scatter_nd_fp16x16_3d_default() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::None(())); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_default/input_0.cairo b/tests/nodes/scatter_nd_fp16x16_3d_default/input_0.cairo new file mode 100644 index 000000000..a89cf9ea6 --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_default/input_0.cairo @@ -0,0 +1,78 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_default/input_1.cairo b/tests/nodes/scatter_nd_fp16x16_3d_default/input_1.cairo new file mode 100644 index 000000000..2a367e447 --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_default/input_1.cairo @@ -0,0 +1,46 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_default/input_2.cairo b/tests/nodes/scatter_nd_fp16x16_3d_default/input_2.cairo new file mode 100644 index 000000000..deae3cbd1 --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_default/input_2.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(2); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_default/output_0.cairo b/tests/nodes/scatter_nd_fp16x16_3d_default/output_0.cairo new file mode 100644 index 000000000..270454c2f --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_default/output_0.cairo @@ -0,0 +1,78 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_max.cairo b/tests/nodes/scatter_nd_fp16x16_3d_max.cairo new file mode 100644 index 000000000..84e99545c --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_max.cairo @@ -0,0 +1,26 @@ +mod input_0; +mod input_1; +mod input_2; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16Tensor; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_scatter_nd_fp16x16_3d_max() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('max')); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_max/input_0.cairo b/tests/nodes/scatter_nd_fp16x16_3d_max/input_0.cairo new file mode 100644 index 000000000..a89cf9ea6 --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_max/input_0.cairo @@ -0,0 +1,78 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_max/input_1.cairo b/tests/nodes/scatter_nd_fp16x16_3d_max/input_1.cairo new file mode 100644 index 000000000..2a367e447 --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_max/input_1.cairo @@ -0,0 +1,46 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_max/input_2.cairo b/tests/nodes/scatter_nd_fp16x16_3d_max/input_2.cairo new file mode 100644 index 000000000..deae3cbd1 --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_max/input_2.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(2); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_max/output_0.cairo b/tests/nodes/scatter_nd_fp16x16_3d_max/output_0.cairo new file mode 100644 index 000000000..4cd31a723 --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_max/output_0.cairo @@ -0,0 +1,78 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_min.cairo b/tests/nodes/scatter_nd_fp16x16_3d_min.cairo new file mode 100644 index 000000000..9ee1c89b6 --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_min.cairo @@ -0,0 +1,26 @@ +mod input_0; +mod input_1; +mod input_2; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16Tensor; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_scatter_nd_fp16x16_3d_min() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('min')); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_min/input_0.cairo b/tests/nodes/scatter_nd_fp16x16_3d_min/input_0.cairo new file mode 100644 index 000000000..a89cf9ea6 --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_min/input_0.cairo @@ -0,0 +1,78 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_min/input_1.cairo b/tests/nodes/scatter_nd_fp16x16_3d_min/input_1.cairo new file mode 100644 index 000000000..2a367e447 --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_min/input_1.cairo @@ -0,0 +1,46 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_min/input_2.cairo b/tests/nodes/scatter_nd_fp16x16_3d_min/input_2.cairo new file mode 100644 index 000000000..deae3cbd1 --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_min/input_2.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(2); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_min/output_0.cairo b/tests/nodes/scatter_nd_fp16x16_3d_min/output_0.cairo new file mode 100644 index 000000000..6b5a8a426 --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_min/output_0.cairo @@ -0,0 +1,78 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_mul.cairo b/tests/nodes/scatter_nd_fp16x16_3d_mul.cairo new file mode 100644 index 000000000..2d5716d98 --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_mul.cairo @@ -0,0 +1,26 @@ +mod input_0; +mod input_1; +mod input_2; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16Tensor; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_scatter_nd_fp16x16_3d_mul() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('mul')); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_mul/input_0.cairo b/tests/nodes/scatter_nd_fp16x16_3d_mul/input_0.cairo new file mode 100644 index 000000000..a89cf9ea6 --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_mul/input_0.cairo @@ -0,0 +1,78 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_mul/input_1.cairo b/tests/nodes/scatter_nd_fp16x16_3d_mul/input_1.cairo new file mode 100644 index 000000000..2a367e447 --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_mul/input_1.cairo @@ -0,0 +1,46 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_mul/input_2.cairo b/tests/nodes/scatter_nd_fp16x16_3d_mul/input_2.cairo new file mode 100644 index 000000000..deae3cbd1 --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_mul/input_2.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(2); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp16x16_3d_mul/output_0.cairo b/tests/nodes/scatter_nd_fp16x16_3d_mul/output_0.cairo new file mode 100644 index 000000000..9de67158d --- /dev/null +++ b/tests/nodes/scatter_nd_fp16x16_3d_mul/output_0.cairo @@ -0,0 +1,78 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1966080, sign: false }); + data.append(FP16x16 { mag: 2359296, sign: false }); + data.append(FP16x16 { mag: 2752512, sign: false }); + data.append(FP16x16 { mag: 3145728, sign: false }); + data.append(FP16x16 { mag: 3670016, sign: false }); + data.append(FP16x16 { mag: 3211264, sign: false }); + data.append(FP16x16 { mag: 2752512, sign: false }); + data.append(FP16x16 { mag: 2293760, sign: false }); + data.append(FP16x16 { mag: 2097152, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1835008, sign: false }); + data.append(FP16x16 { mag: 2097152, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_add.cairo b/tests/nodes/scatter_nd_fp8x23_3d_add.cairo new file mode 100644 index 000000000..3b748994a --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_add.cairo @@ -0,0 +1,26 @@ +mod input_0; +mod input_1; +mod input_2; +mod output_0; + + +use orion::operators::tensor::FP8x23Tensor; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::U32Tensor; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_scatter_nd_fp8x23_3d_add() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('add')); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_add/input_0.cairo b/tests/nodes/scatter_nd_fp8x23_3d_add/input_0.cairo new file mode 100644 index 000000000..ca4d75fc8 --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_add/input_0.cairo @@ -0,0 +1,78 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23Tensor; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_add/input_1.cairo b/tests/nodes/scatter_nd_fp8x23_3d_add/input_1.cairo new file mode 100644 index 000000000..c9780806b --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_add/input_1.cairo @@ -0,0 +1,46 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23Tensor; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_add/input_2.cairo b/tests/nodes/scatter_nd_fp8x23_3d_add/input_2.cairo new file mode 100644 index 000000000..deae3cbd1 --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_add/input_2.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(2); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_add/output_0.cairo b/tests/nodes/scatter_nd_fp8x23_3d_add/output_0.cairo new file mode 100644 index 000000000..cc27c0368 --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_add/output_0.cairo @@ -0,0 +1,78 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23Tensor; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 75497472, sign: false }); + data.append(FP8x23 { mag: 92274688, sign: false }); + data.append(FP8x23 { mag: 100663296, sign: false }); + data.append(FP8x23 { mag: 109051904, sign: false }); + data.append(FP8x23 { mag: 117440512, sign: false }); + data.append(FP8x23 { mag: 125829120, sign: false }); + data.append(FP8x23 { mag: 117440512, sign: false }); + data.append(FP8x23 { mag: 109051904, sign: false }); + data.append(FP8x23 { mag: 100663296, sign: false }); + data.append(FP8x23 { mag: 100663296, sign: false }); + data.append(FP8x23 { mag: 92274688, sign: false }); + data.append(FP8x23 { mag: 83886080, sign: false }); + data.append(FP8x23 { mag: 75497472, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 75497472, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 75497472, sign: false }); + data.append(FP8x23 { mag: 83886080, sign: false }); + data.append(FP8x23 { mag: 92274688, sign: false }); + data.append(FP8x23 { mag: 100663296, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_default.cairo b/tests/nodes/scatter_nd_fp8x23_3d_default.cairo new file mode 100644 index 000000000..75dc57f69 --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_default.cairo @@ -0,0 +1,26 @@ +mod input_0; +mod input_1; +mod input_2; +mod output_0; + + +use orion::operators::tensor::FP8x23Tensor; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::U32Tensor; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_scatter_nd_fp8x23_3d_default() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::None(())); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_default/input_0.cairo b/tests/nodes/scatter_nd_fp8x23_3d_default/input_0.cairo new file mode 100644 index 000000000..ca4d75fc8 --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_default/input_0.cairo @@ -0,0 +1,78 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23Tensor; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_default/input_1.cairo b/tests/nodes/scatter_nd_fp8x23_3d_default/input_1.cairo new file mode 100644 index 000000000..c9780806b --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_default/input_1.cairo @@ -0,0 +1,46 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23Tensor; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_default/input_2.cairo b/tests/nodes/scatter_nd_fp8x23_3d_default/input_2.cairo new file mode 100644 index 000000000..deae3cbd1 --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_default/input_2.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(2); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_default/output_0.cairo b/tests/nodes/scatter_nd_fp8x23_3d_default/output_0.cairo new file mode 100644 index 000000000..0837adfb7 --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_default/output_0.cairo @@ -0,0 +1,78 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23Tensor; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_max.cairo b/tests/nodes/scatter_nd_fp8x23_3d_max.cairo new file mode 100644 index 000000000..d09351807 --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_max.cairo @@ -0,0 +1,26 @@ +mod input_0; +mod input_1; +mod input_2; +mod output_0; + + +use orion::operators::tensor::FP8x23Tensor; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::U32Tensor; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_scatter_nd_fp8x23_3d_max() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('max')); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_max/input_0.cairo b/tests/nodes/scatter_nd_fp8x23_3d_max/input_0.cairo new file mode 100644 index 000000000..ca4d75fc8 --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_max/input_0.cairo @@ -0,0 +1,78 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23Tensor; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_max/input_1.cairo b/tests/nodes/scatter_nd_fp8x23_3d_max/input_1.cairo new file mode 100644 index 000000000..c9780806b --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_max/input_1.cairo @@ -0,0 +1,46 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23Tensor; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_max/input_2.cairo b/tests/nodes/scatter_nd_fp8x23_3d_max/input_2.cairo new file mode 100644 index 000000000..deae3cbd1 --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_max/input_2.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(2); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_max/output_0.cairo b/tests/nodes/scatter_nd_fp8x23_3d_max/output_0.cairo new file mode 100644 index 000000000..fe30740b5 --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_max/output_0.cairo @@ -0,0 +1,78 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23Tensor; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_min.cairo b/tests/nodes/scatter_nd_fp8x23_3d_min.cairo new file mode 100644 index 000000000..dadc8d27d --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_min.cairo @@ -0,0 +1,26 @@ +mod input_0; +mod input_1; +mod input_2; +mod output_0; + + +use orion::operators::tensor::FP8x23Tensor; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::U32Tensor; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_scatter_nd_fp8x23_3d_min() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('min')); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_min/input_0.cairo b/tests/nodes/scatter_nd_fp8x23_3d_min/input_0.cairo new file mode 100644 index 000000000..ca4d75fc8 --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_min/input_0.cairo @@ -0,0 +1,78 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23Tensor; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_min/input_1.cairo b/tests/nodes/scatter_nd_fp8x23_3d_min/input_1.cairo new file mode 100644 index 000000000..c9780806b --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_min/input_1.cairo @@ -0,0 +1,46 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23Tensor; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_min/input_2.cairo b/tests/nodes/scatter_nd_fp8x23_3d_min/input_2.cairo new file mode 100644 index 000000000..deae3cbd1 --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_min/input_2.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(2); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_min/output_0.cairo b/tests/nodes/scatter_nd_fp8x23_3d_min/output_0.cairo new file mode 100644 index 000000000..f556a5e17 --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_min/output_0.cairo @@ -0,0 +1,78 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23Tensor; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_mul.cairo b/tests/nodes/scatter_nd_fp8x23_3d_mul.cairo new file mode 100644 index 000000000..853780f6c --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_mul.cairo @@ -0,0 +1,26 @@ +mod input_0; +mod input_1; +mod input_2; +mod output_0; + + +use orion::operators::tensor::FP8x23Tensor; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::U32Tensor; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_scatter_nd_fp8x23_3d_mul() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('mul')); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_mul/input_0.cairo b/tests/nodes/scatter_nd_fp8x23_3d_mul/input_0.cairo new file mode 100644 index 000000000..ca4d75fc8 --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_mul/input_0.cairo @@ -0,0 +1,78 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23Tensor; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_mul/input_1.cairo b/tests/nodes/scatter_nd_fp8x23_3d_mul/input_1.cairo new file mode 100644 index 000000000..c9780806b --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_mul/input_1.cairo @@ -0,0 +1,46 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23Tensor; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_mul/input_2.cairo b/tests/nodes/scatter_nd_fp8x23_3d_mul/input_2.cairo new file mode 100644 index 000000000..deae3cbd1 --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_mul/input_2.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(2); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_fp8x23_3d_mul/output_0.cairo b/tests/nodes/scatter_nd_fp8x23_3d_mul/output_0.cairo new file mode 100644 index 000000000..574c93683 --- /dev/null +++ b/tests/nodes/scatter_nd_fp8x23_3d_mul/output_0.cairo @@ -0,0 +1,78 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23Tensor; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 83886080, sign: false }); + data.append(FP8x23 { mag: 125829120, sign: false }); + data.append(FP8x23 { mag: 167772160, sign: false }); + data.append(FP8x23 { mag: 251658240, sign: false }); + data.append(FP8x23 { mag: 301989888, sign: false }); + data.append(FP8x23 { mag: 352321536, sign: false }); + data.append(FP8x23 { mag: 402653184, sign: false }); + data.append(FP8x23 { mag: 469762048, sign: false }); + data.append(FP8x23 { mag: 411041792, sign: false }); + data.append(FP8x23 { mag: 352321536, sign: false }); + data.append(FP8x23 { mag: 293601280, sign: false }); + data.append(FP8x23 { mag: 268435456, sign: false }); + data.append(FP8x23 { mag: 201326592, sign: false }); + data.append(FP8x23 { mag: 134217728, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 75497472, sign: false }); + data.append(FP8x23 { mag: 100663296, sign: false }); + data.append(FP8x23 { mag: 167772160, sign: false }); + data.append(FP8x23 { mag: 201326592, sign: false }); + data.append(FP8x23 { mag: 234881024, sign: false }); + data.append(FP8x23 { mag: 268435456, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + data.append(FP8x23 { mag: 50331648, sign: false }); + data.append(FP8x23 { mag: 58720256, sign: false }); + data.append(FP8x23 { mag: 67108864, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_u32_add.cairo b/tests/nodes/scatter_nd_u32_add.cairo new file mode 100644 index 000000000..cf3c4018b --- /dev/null +++ b/tests/nodes/scatter_nd_u32_add.cairo @@ -0,0 +1,24 @@ +mod input_0; +mod input_1; +mod input_2; +mod output_0; + + +use orion::operators::tensor::U32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32Tensor; +use orion::operators::tensor::{TensorTrait, Tensor}; + +#[test] +#[available_gas(2000000000)] +fn test_scatter_nd_u32_add() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('add')); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/scatter_nd_u32_add/input_0.cairo b/tests/nodes/scatter_nd_u32_add/input_0.cairo new file mode 100644 index 000000000..e2ac66c04 --- /dev/null +++ b/tests/nodes/scatter_nd_u32_add/input_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_u32_add/input_1.cairo b/tests/nodes/scatter_nd_u32_add/input_1.cairo new file mode 100644 index 000000000..09f6288e7 --- /dev/null +++ b/tests/nodes/scatter_nd_u32_add/input_1.cairo @@ -0,0 +1,18 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(78); + data.append(37); + data.append(45); + data.append(25); + data.append(64); + data.append(10); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_u32_add/input_2.cairo b/tests/nodes/scatter_nd_u32_add/input_2.cairo new file mode 100644 index 000000000..48f489b9d --- /dev/null +++ b/tests/nodes/scatter_nd_u32_add/input_2.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_u32_add/output_0.cairo b/tests/nodes/scatter_nd_u32_add/output_0.cairo new file mode 100644 index 000000000..55bf41a6c --- /dev/null +++ b/tests/nodes/scatter_nd_u32_add/output_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(25); + data.append(65); + data.append(12); + data.append(81); + data.append(41); + data.append(50); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_u32_default.cairo b/tests/nodes/scatter_nd_u32_default.cairo new file mode 100644 index 000000000..076d44277 --- /dev/null +++ b/tests/nodes/scatter_nd_u32_default.cairo @@ -0,0 +1,24 @@ +mod input_0; +mod input_1; +mod input_2; +mod output_0; + + +use orion::operators::tensor::U32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32Tensor; +use orion::operators::tensor::{TensorTrait, Tensor}; + +#[test] +#[available_gas(2000000000)] +fn test_scatter_nd_u32_default() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::None(())); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/scatter_nd_u32_default/input_0.cairo b/tests/nodes/scatter_nd_u32_default/input_0.cairo new file mode 100644 index 000000000..e2ac66c04 --- /dev/null +++ b/tests/nodes/scatter_nd_u32_default/input_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_u32_default/input_1.cairo b/tests/nodes/scatter_nd_u32_default/input_1.cairo new file mode 100644 index 000000000..f17fbb35c --- /dev/null +++ b/tests/nodes/scatter_nd_u32_default/input_1.cairo @@ -0,0 +1,18 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(41); + data.append(6); + data.append(63); + data.append(34); + data.append(67); + data.append(10); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_u32_default/input_2.cairo b/tests/nodes/scatter_nd_u32_default/input_2.cairo new file mode 100644 index 000000000..c1d740472 --- /dev/null +++ b/tests/nodes/scatter_nd_u32_default/input_2.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_u32_default/output_0.cairo b/tests/nodes/scatter_nd_u32_default/output_0.cairo new file mode 100644 index 000000000..ce19be59a --- /dev/null +++ b/tests/nodes/scatter_nd_u32_default/output_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(41); + data.append(6); + data.append(63); + data.append(34); + data.append(67); + data.append(10); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_u32_max.cairo b/tests/nodes/scatter_nd_u32_max.cairo new file mode 100644 index 000000000..5d3a4940d --- /dev/null +++ b/tests/nodes/scatter_nd_u32_max.cairo @@ -0,0 +1,24 @@ +mod input_0; +mod input_1; +mod input_2; +mod output_0; + + +use orion::operators::tensor::U32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32Tensor; +use orion::operators::tensor::{TensorTrait, Tensor}; + +#[test] +#[available_gas(2000000000)] +fn test_scatter_nd_u32_max() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('max')); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/scatter_nd_u32_max/input_0.cairo b/tests/nodes/scatter_nd_u32_max/input_0.cairo new file mode 100644 index 000000000..e2ac66c04 --- /dev/null +++ b/tests/nodes/scatter_nd_u32_max/input_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_u32_max/input_1.cairo b/tests/nodes/scatter_nd_u32_max/input_1.cairo new file mode 100644 index 000000000..f765ef002 --- /dev/null +++ b/tests/nodes/scatter_nd_u32_max/input_1.cairo @@ -0,0 +1,18 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(23); + data.append(0); + data.append(0); + data.append(16); + data.append(12); + data.append(35); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_u32_max/input_2.cairo b/tests/nodes/scatter_nd_u32_max/input_2.cairo new file mode 100644 index 000000000..c1d740472 --- /dev/null +++ b/tests/nodes/scatter_nd_u32_max/input_2.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_u32_max/output_0.cairo b/tests/nodes/scatter_nd_u32_max/output_0.cairo new file mode 100644 index 000000000..7fac393e5 --- /dev/null +++ b/tests/nodes/scatter_nd_u32_max/output_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(23); + data.append(1); + data.append(2); + data.append(16); + data.append(12); + data.append(35); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_u32_min.cairo b/tests/nodes/scatter_nd_u32_min.cairo new file mode 100644 index 000000000..63033d6b7 --- /dev/null +++ b/tests/nodes/scatter_nd_u32_min.cairo @@ -0,0 +1,24 @@ +mod input_0; +mod input_1; +mod input_2; +mod output_0; + + +use orion::operators::tensor::U32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32Tensor; +use orion::operators::tensor::{TensorTrait, Tensor}; + +#[test] +#[available_gas(2000000000)] +fn test_scatter_nd_u32_min() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('min')); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/scatter_nd_u32_min/input_0.cairo b/tests/nodes/scatter_nd_u32_min/input_0.cairo new file mode 100644 index 000000000..e2ac66c04 --- /dev/null +++ b/tests/nodes/scatter_nd_u32_min/input_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_u32_min/input_1.cairo b/tests/nodes/scatter_nd_u32_min/input_1.cairo new file mode 100644 index 000000000..885baf736 --- /dev/null +++ b/tests/nodes/scatter_nd_u32_min/input_1.cairo @@ -0,0 +1,18 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(37); + data.append(23); + data.append(29); + data.append(38); + data.append(74); + data.append(43); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_u32_min/input_2.cairo b/tests/nodes/scatter_nd_u32_min/input_2.cairo new file mode 100644 index 000000000..c1d740472 --- /dev/null +++ b/tests/nodes/scatter_nd_u32_min/input_2.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_u32_min/output_0.cairo b/tests/nodes/scatter_nd_u32_min/output_0.cairo new file mode 100644 index 000000000..e597b512b --- /dev/null +++ b/tests/nodes/scatter_nd_u32_min/output_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_u32_mul.cairo b/tests/nodes/scatter_nd_u32_mul.cairo new file mode 100644 index 000000000..b5367e914 --- /dev/null +++ b/tests/nodes/scatter_nd_u32_mul.cairo @@ -0,0 +1,24 @@ +mod input_0; +mod input_1; +mod input_2; +mod output_0; + + +use orion::operators::tensor::U32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32Tensor; +use orion::operators::tensor::{TensorTrait, Tensor}; + +#[test] +#[available_gas(2000000000)] +fn test_scatter_nd_u32_mul() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('mul')); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/scatter_nd_u32_mul/input_0.cairo b/tests/nodes/scatter_nd_u32_mul/input_0.cairo new file mode 100644 index 000000000..e2ac66c04 --- /dev/null +++ b/tests/nodes/scatter_nd_u32_mul/input_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_u32_mul/input_1.cairo b/tests/nodes/scatter_nd_u32_mul/input_1.cairo new file mode 100644 index 000000000..06d68c0fc --- /dev/null +++ b/tests/nodes/scatter_nd_u32_mul/input_1.cairo @@ -0,0 +1,18 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(46); + data.append(42); + data.append(72); + data.append(75); + data.append(4); + data.append(14); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_u32_mul/input_2.cairo b/tests/nodes/scatter_nd_u32_mul/input_2.cairo new file mode 100644 index 000000000..c1d740472 --- /dev/null +++ b/tests/nodes/scatter_nd_u32_mul/input_2.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/scatter_nd_u32_mul/output_0.cairo b/tests/nodes/scatter_nd_u32_mul/output_0.cairo new file mode 100644 index 000000000..8d7addb9b --- /dev/null +++ b/tests/nodes/scatter_nd_u32_mul/output_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(42); + data.append(144); + data.append(225); + data.append(16); + data.append(70); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + TensorTrait::new(shape.span(), data.span()) +} From 8db8f22dcc4b158b9cd97392ea2d56c149ec3972 Mon Sep 17 00:00:00 2001 From: Hakeem Kazeem Date: Wed, 17 Jan 2024 19:40:24 +0100 Subject: [PATCH 02/46] changelog, summary and readme --- docs/CHANGELOG.md | 4 + docs/SUMMARY.md | 1 + docs/framework/compatibility.md | 3 +- docs/framework/operators/tensor/README.md | 1 + .../operators/tensor/tensor.compress.md | 2 + .../operators/tensor/tensor.gather_nd.md | 2 + .../operators/tensor/tensor.scatter_nd.md | 76 +++++++++++++++++++ 7 files changed, 88 insertions(+), 1 deletion(-) create mode 100644 docs/framework/operators/tensor/tensor.scatter_nd.md diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index a7ec58d5f..16a00107b 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -3,7 +3,11 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [Unreleased] - 2024-01-17 +## Added +- Scatter Nd Operator. +- ## [Unreleased] - 2023-12-25 ## Added diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index f956159ec..10a886009 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -123,6 +123,7 @@ * [tensor.unique](framework/operators/tensor/tensor.unique.md) * [tensor.compress](framework/operators/tensor/tensor.compress.md) * [tensor.layer_normalization](framework/operators/tensor/tensor.layer_normalization.md) + * [tensor.scatter\_nd](framework/operators/tensor/tensor.scatter\_nd.md) * [Neural Network](framework/operators/neural-network/README.md) * [nn.relu](framework/operators/neural-network/nn.relu.md) * [nn.leaky\_relu](framework/operators/neural-network/nn.leaky\_relu.md) diff --git a/docs/framework/compatibility.md b/docs/framework/compatibility.md index 0e0e5be17..b8cd4ca02 100644 --- a/docs/framework/compatibility.md +++ b/docs/framework/compatibility.md @@ -108,5 +108,6 @@ You can see below the list of current supported ONNX Operators: | [Erf](operators/tensor/tensor.erf.md) | :white\_check\_mark: | | [Compress](operators/tensor/tensor.compress.md) | :white\_check\_mark: | | [Layer_normalization](operators/tensor/tensor.layer_normalization.md) | :white\_check\_mark: | +| [ScatterND](operators/tensor/tensor.scatter/_nd.md) | :white\_check\_mark: | -Current Operators support: **97/156 (62%)** +Current Operators support: **98/156 (62%)** diff --git a/docs/framework/operators/tensor/README.md b/docs/framework/operators/tensor/README.md index 281135f63..4dd0a5608 100644 --- a/docs/framework/operators/tensor/README.md +++ b/docs/framework/operators/tensor/README.md @@ -120,6 +120,7 @@ use orion::operators::tensor::TensorTrait; | [`tensor.erf`](tensor.erf.md) | Computes the error function of the given input tensor element-wise. | | [`tensor.layer_normalization`](tensor.layer\_normalization.md) | computes the layer normalization of the input tensor. | | [`tensor.split`](tensor.split.md) | Split a tensor into a list of tensors, along the specified ‘axis’. | +| [`tensor.scatter_nd`](tensor.scatter\_nd.md) | The output of the operation is produced by creating a copy of the input data, and then updating its value to values specified by updates at specific index positions specified by indices. Its output shape is the same as the shape of data | ## Arithmetic Operations diff --git a/docs/framework/operators/tensor/tensor.compress.md b/docs/framework/operators/tensor/tensor.compress.md index 59cb043b3..c385b9a5b 100644 --- a/docs/framework/operators/tensor/tensor.compress.md +++ b/docs/framework/operators/tensor/tensor.compress.md @@ -19,6 +19,8 @@ Selects slices from an input tensor along a given axis where condition evaluates ## Returns A new `Tensor` . + +## Example fn compress_example() -> Tensor { let tensor = TensorTrait::::new( shape: array![3, 2].span(), diff --git a/docs/framework/operators/tensor/tensor.gather_nd.md b/docs/framework/operators/tensor/tensor.gather_nd.md index a922b41ad..07ea27858 100644 --- a/docs/framework/operators/tensor/tensor.gather_nd.md +++ b/docs/framework/operators/tensor/tensor.gather_nd.md @@ -21,6 +21,8 @@ Given data tensor of rank r >= 1, indices tensor of rank q >= 1, and batch_dims ## Returns A new `Tensor` . + +## Example fn gather_nd_example() -> Tensor { let tensor = TensorTrait::::new( shape: array![2, 2].span(), diff --git a/docs/framework/operators/tensor/tensor.scatter_nd.md b/docs/framework/operators/tensor/tensor.scatter_nd.md new file mode 100644 index 000000000..f68c5121b --- /dev/null +++ b/docs/framework/operators/tensor/tensor.scatter_nd.md @@ -0,0 +1,76 @@ +# tensor.scatter_nd + +```rust + fn scatter_nd(self: @Tensor, updates: Tensor, indices: Tensor, reduction: Option) -> Tensor; +``` + +Produces a copy of input data, and updates value to values specified by updates at specific index positions specified by indices. + +## Args + +* `self`(`@Tensor`) - The input tensor. +* `updates`(`Tensor`) - The updates tensor. +* `indices`(`Tensor`) - Tensor of indices. +* `reduction`(`Option`) - Reduction operation. Default: reduction='none'. + +## Panics + +* Panics if index values are not within bounds [-s, s-1] along axis of size s. +* Panics if indices last axis is greater than data rank. + +## Returns + +A new `Tensor` . + +## Example + +```rust +use core::array::{ArrayTrait, SpanTrait}; + +use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; + +fn scatter_nd_example() -> Tensor { + let tensor = TensorTrait::::new( + shape: array![4, 4, 4].span(), + data: array![1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, + 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, + 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, 8].span() + ); + + let updates = TensorTrait::::new( + shape: array![2, 4, 4].span(), + data: array![5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 1, 1, 1, 1, 2, 2, + 2, 2, 3, 3, 3, 3, 4, 4, 4, 4].span(), + ); + + let indices = TensorTrait::::new( + shape: array![2, 1].span(), + data: array![0, 2].span(), + ); + + return tensor.scatter_nd( + updates: updates + indices: indices, + reduction: Option::Some('add'), + ); +} +>>> [[[ 6., 7., 8., 9.], + [11., 12., 13., 14.], + [15., 14., 13., 12.], + [12., 11., 10., 9.]], + + [[ 1., 2., 3., 4.], + [ 5., 6., 7., 8.], + [ 8., 7., 6., 5.], + [ 4., 3., 2., 1.]], + + [[ 9., 8., 7., 6.], + [ 6., 5., 4., 3.], + [ 4., 5., 6., 7.], + [ 9., 10., 11., 12.]], + + [[ 8., 7., 6., 5.], + [ 4., 3., 2., 1.], + [ 1., 2., 3., 4.], + [ 5., 6., 7., 8.]]] +``` From b7785464a8382d136fd1455ca58a40f246e20e62 Mon Sep 17 00:00:00 2001 From: Hakeem Kazeem Date: Wed, 17 Jan 2024 19:46:05 +0100 Subject: [PATCH 03/46] removed in scatter_nd.cairo test --- src/operators/tensor/math/scatter_nd.cairo | 268 +-------------------- 1 file changed, 1 insertion(+), 267 deletions(-) diff --git a/src/operators/tensor/math/scatter_nd.cairo b/src/operators/tensor/math/scatter_nd.cairo index f406c9ac7..ba8dba1c6 100644 --- a/src/operators/tensor/math/scatter_nd.cairo +++ b/src/operators/tensor/math/scatter_nd.cairo @@ -109,12 +109,10 @@ fn scatter_nd< }; }; - let mut output_data = ArrayTrait::::new(); let mut data = *self.data; let mut index: usize = 0; let mut inner_index: usize = 0; - let num = *data_shape_first.unwrap(); loop { if (index == num){ @@ -134,7 +132,6 @@ fn scatter_nd< inner_index += 1; }; } - else { loop { if (inner_index == indexer) { @@ -150,13 +147,11 @@ fn scatter_nd< let data_val = *data.at((index * indexer) + inner_index); output_data.append(*val + data_val); } - if (reduction == 'mul') { let val = data_updates.at(((comp_index-1) * indexer) + inner_index); let data_val = *data.at((index * indexer) + inner_index); output_data.append((*val) * data_val); } - if (reduction == 'max') { let val = data_updates.at(((comp_index-1) * indexer) + inner_index); let data_val = *data.at((index * indexer) + inner_index); @@ -167,7 +162,6 @@ fn scatter_nd< output_data.append(data_val); } } - if (reduction == 'min') { let val = data_updates.at(((comp_index-1) * indexer) + inner_index); let data_val = *data.at((index * indexer) + inner_index); @@ -178,273 +172,13 @@ fn scatter_nd< output_data.append(*val); } } - - inner_index += 1; } } index += 1; - }; - let mut output_tensor = TensorTrait::::new(*self.shape, output_data.span()); - // let mut output_tensor = TensorTrait::::new(*self.shape, *self.data); return output_tensor; -} - - - - - - - - - - -// Tests-------------------------------------------------------------------------------------------------------------- - -use orion::utils::assert_eq; - -fn indices() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(2); - - TensorTrait::new(shape.span(), data.span()) -} - -fn data() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(4); - sizes.append(4); - sizes.append(4); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - - data.append(8); - data.append(7); - data.append(6); - data.append(5); - data.append(4); - data.append(3); - data.append(2); - data.append(1); - - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - - data.append(8); - data.append(7); - data.append(6); - data.append(5); - data.append(4); - data.append(3); - data.append(2); - data.append(1); - - data.append(8); - data.append(7); - data.append(6); - data.append(5); - data.append(4); - data.append(3); - data.append(2); - data.append(1); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - - data.append(8); - data.append(7); - data.append(6); - data.append(5); - data.append(4); - data.append(3); - data.append(2); - data.append(1); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - - - let tensor = TensorTrait::::new(sizes.span(), data.span()); - - return tensor; -} - -fn updates() -> Tensor { - let mut sizes = ArrayTrait::new(); - sizes.append(2); - sizes.append(4); - sizes.append(4); - - let mut data = ArrayTrait::new(); - data.append(5); - data.append(5); - data.append(5); - data.append(5); - - data.append(6); - data.append(6); - data.append(6); - data.append(6); - data.append(7); - data.append(7); - data.append(7); - data.append(7); - data.append(8); - data.append(8); - data.append(8); - data.append(8); - - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(2); - data.append(2); - data.append(2); - data.append(2); - data.append(3); - data.append(3); - data.append(3); - data.append(3); - data.append(4); - data.append(4); - data.append(4); - data.append(4); - - let tensor = TensorTrait::::new(sizes.span(), data.span()); - - return tensor; -} - -fn data2() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(8); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - - TensorTrait::new(shape.span(), data.span()) -} - -fn indices2() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(4); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(4); - data.append(3); - data.append(1); - data.append(7); - - TensorTrait::new(shape.span(), data.span()) -} - -fn updates2() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(4); - - let mut data = ArrayTrait::new(); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - - TensorTrait::new(shape.span(), data.span()) -} - -#[test] -#[available_gas(20000000000)] -fn test_scatter_default() { - let data = data(); - let indices = indices(); - let updates = updates(); - - // let y = data.scatter_nd(updates:updates, indices: indices, reduction:Option::None(())); - let y = data.scatter_nd(updates:updates, indices: indices, reduction:Option::Some('add')); - let mut output = y.data; - - // loop { - // match output.pop_front() { - // Option::Some(val) => { - // (*val).print(); - // }, - // Option::None(_) => { break; } - // }; - // }; - -} - -// #[test] -// #[available_gas(20000000000)] -// fn test_scatter_nd_example() { -// let tensor = TensorTrait::::new( -// shape: array![4, 4, 4].span(), -// data: array![1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, -// 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, -// 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 7, 8].span() -// ); - -// let updates = TensorTrait::::new( -// shape: array![2, 4, 4].span(), -// data: array![5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 1, 1, 1, 1, 2, 2, -// 2, 2, 3, 3, 3, 3, 4, 4, 4, 4].span(), -// ); - -// let indices = TensorTrait::::new( -// shape: array![2, 1].span(), -// data: array![0, 2].span(), -// ); - -// let y = tensor.scatter_nd(updates:updates, indices: indices, reduction:Option::Some('add')); -// let mut output = y.data; - -// loop { -// match output.pop_front() { -// Option::Some(val) => { -// (*val).print(); -// }, -// Option::None(_) => { break; } -// }; -// }; -// } \ No newline at end of file +} \ No newline at end of file From 1a48ae28063699799d9c91ca56ab145d52ede85b Mon Sep 17 00:00:00 2001 From: Hakeem Kazeem Date: Wed, 17 Jan 2024 19:47:56 +0100 Subject: [PATCH 04/46] uncomment node --- tests/nodes.cairo | 1877 ++++++++++++++++++++++----------------------- 1 file changed, 938 insertions(+), 939 deletions(-) diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 0fe87669b..b499cd993 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1,942 +1,941 @@ -// mod abs_fp16x16; -// mod abs_fp8x23; -// mod abs_i32; -// mod abs_i8; -// mod acos_fp16x16; -// mod acos_fp8x23; -// mod acosh_fp16x16; -// mod acosh_fp8x23; -// mod add_fp16x16; -// mod add_fp16x16_broadcast; -// mod add_fp8x23; -// mod add_fp8x23_broadcast; -// mod add_i32; -// mod add_i32_broadcast; -// mod add_i8; -// mod add_i8_broadcast; -// mod add_u32; -// mod add_u32_broadcast; -// mod argmax_fp16x16_1D_default; -// mod argmax_fp16x16_1D_keepdims_false; -// mod argmax_fp16x16_1D_last_index; -// mod argmax_fp16x16_2D_default; -// mod argmax_fp16x16_2D_keepdims_false; -// mod argmax_fp16x16_2D_last_index; -// mod argmax_fp16x16_3D_default; -// mod argmax_fp16x16_3D_keepdims_false; -// mod argmax_fp16x16_3D_last_index; -// mod argmax_fp8x23_1D_default; -// mod argmax_fp8x23_1D_keepdims_false; -// mod argmax_fp8x23_1D_last_index; -// mod argmax_fp8x23_2D_default; -// mod argmax_fp8x23_2D_keepdims_false; -// mod argmax_fp8x23_2D_last_index; -// mod argmax_fp8x23_3D_default; -// mod argmax_fp8x23_3D_keepdims_false; -// mod argmax_fp8x23_3D_last_index; -// mod argmax_i32_1D_default; -// mod argmax_i32_1D_keepdims_false; -// mod argmax_i32_1D_last_index; -// mod argmax_i32_2D_default; -// mod argmax_i32_2D_keepdims_false; -// mod argmax_i32_2D_last_index; -// mod argmax_i32_3D_default; -// mod argmax_i32_3D_keepdims_false; -// mod argmax_i32_3D_last_index; -// mod argmax_i8_1D_default; -// mod argmax_i8_1D_keepdims_false; -// mod argmax_i8_1D_last_index; -// mod argmax_i8_2D_default; -// mod argmax_i8_2D_keepdims_false; -// mod argmax_i8_2D_last_index; -// mod argmax_i8_3D_default; -// mod argmax_i8_3D_keepdims_false; -// mod argmax_i8_3D_last_index; -// mod argmax_u32_1D_default; -// mod argmax_u32_1D_keepdims_false; -// mod argmax_u32_1D_last_index; -// mod argmax_u32_2D_default; -// mod argmax_u32_2D_keepdims_false; -// mod argmax_u32_2D_last_index; -// mod argmax_u32_3D_default; -// mod argmax_u32_3D_keepdims_false; -// mod argmax_u32_3D_last_index; -// mod argmin_fp16x16_1D_default; -// mod argmin_fp16x16_1D_keepdims_false; -// mod argmin_fp16x16_1D_last_index; -// mod argmin_fp16x16_2D_default; -// mod argmin_fp16x16_2D_keepdims_false; -// mod argmin_fp16x16_2D_last_index; -// mod argmin_fp16x16_3D_default; -// mod argmin_fp16x16_3D_keepdims_false; -// mod argmin_fp16x16_3D_last_index; -// mod argmin_fp8x23_1D_default; -// mod argmin_fp8x23_1D_keepdims_false; -// mod argmin_fp8x23_1D_last_index; -// mod argmin_fp8x23_2D_default; -// mod argmin_fp8x23_2D_keepdims_false; -// mod argmin_fp8x23_2D_last_index; -// mod argmin_fp8x23_3D_default; -// mod argmin_fp8x23_3D_keepdims_false; -// mod argmin_fp8x23_3D_last_index; -// mod argmin_i32_1D_default; -// mod argmin_i32_1D_keepdims_false; -// mod argmin_i32_1D_last_index; -// mod argmin_i32_2D_default; -// mod argmin_i32_2D_keepdims_false; -// mod argmin_i32_2D_last_index; -// mod argmin_i32_3D_default; -// mod argmin_i32_3D_keepdims_false; -// mod argmin_i32_3D_last_index; -// mod argmin_i8_1D_default; -// mod argmin_i8_1D_keepdims_false; -// mod argmin_i8_1D_last_index; -// mod argmin_i8_2D_default; -// mod argmin_i8_2D_keepdims_false; -// mod argmin_i8_2D_last_index; -// mod argmin_i8_3D_default; -// mod argmin_i8_3D_keepdims_false; -// mod argmin_i8_3D_last_index; -// mod argmin_u32_1D_default; -// mod argmin_u32_1D_keepdims_false; -// mod argmin_u32_1D_last_index; -// mod argmin_u32_2D_default; -// mod argmin_u32_2D_keepdims_false; -// mod argmin_u32_2D_last_index; -// mod argmin_u32_3D_default; -// mod argmin_u32_3D_keepdims_false; -// mod argmin_u32_3D_last_index; -// mod asin_fp16x16; -// mod asin_fp8x23; -// mod asinh_fp16x16; -// mod asinh_fp8x23; -// mod atan_fp16x16; -// mod atan_fp8x23; -// mod ceil_fp16x16; -// mod ceil_fp8x23; -// mod concat_fp16x16_1d; -// mod concat_fp16x16_2d; -// mod concat_fp16x16_3d_default; -// mod concat_fp16x16_3d_axis_1; -// mod concat_fp16x16_3d_axis_2; -// mod concat_fp16x16_3d_three_tensors_axis_1; -// mod concat_fp16x16_3d_three_tensors_axis_2; -// mod concat_fp8x23_1d; -// mod concat_fp8x23_2d; -// mod concat_fp8x23_3d_default; -// mod concat_fp8x23_3d_axis_1; -// mod concat_fp8x23_3d_axis_2; -// mod concat_fp8x23_3d_three_tensors_axis_1; -// mod concat_fp8x23_3d_three_tensors_axis_2; -// mod concat_i32_1d; -// mod concat_i32_2d; -// mod concat_i32_3d_default; -// mod concat_i32_3d_axis_1; -// mod concat_i32_3d_axis_2; -// mod concat_i32_3d_three_tensors_axis_1; -// mod concat_i32_3d_three_tensors_axis_2; -// mod concat_i8_1d; -// mod concat_i8_2d; -// mod concat_i8_3d_default; -// mod concat_i8_3d_axis_1; -// mod concat_i8_3d_axis_2; -// mod concat_i8_3d_three_tensors_axis_1; -// mod concat_i8_3d_three_tensors_axis_2; -// mod concat_u32_1d; -// mod concat_u32_2d; -// mod concat_u32_3d_default; -// mod concat_u32_3d_axis_1; -// mod concat_u32_3d_axis_2; -// mod concat_u32_3d_three_tensors_axis_1; -// mod concat_u32_3d_three_tensors_axis_2; -// mod cos_fp16x16; -// mod cos_fp8x23; -// mod cosh_fp16x16; -// mod cosh_fp8x23; -// mod cumsum_fp16x16_1d_default; -// mod cumsum_fp16x16_1d_exclusive; -// mod cumsum_fp16x16_1d_reverse; -// mod cumsum_fp16x16_1d_reverse_exclusive; -// mod cumsum_fp16x16_2d_axis_0; -// mod cumsum_fp16x16_2d_axis_1; -// mod cumsum_fp8x23_1d_default; -// mod cumsum_fp8x23_1d_exclusive; -// mod cumsum_fp8x23_1d_reverse; -// mod cumsum_fp8x23_1d_reverse_exclusive; -// mod cumsum_fp8x23_2d_axis_0; -// mod cumsum_fp8x23_2d_axis_1; -// mod cumsum_i32_1d_default; -// mod cumsum_i32_1d_exclusive; -// mod cumsum_i32_1d_reverse; -// mod cumsum_i32_1d_reverse_exclusive; -// mod cumsum_i32_2d_axis_0; -// mod cumsum_i32_2d_axis_1; -// mod cumsum_i8_1d_default; -// mod cumsum_i8_1d_exclusive; -// mod cumsum_i8_1d_reverse; -// mod cumsum_i8_1d_reverse_exclusive; -// mod cumsum_i8_2d_axis_0; -// mod cumsum_i8_2d_axis_1; -// mod cumsum_u32_1d_default; -// mod cumsum_u32_1d_exclusive; -// mod cumsum_u32_1d_reverse; -// mod cumsum_u32_1d_reverse_exclusive; -// mod cumsum_u32_2d_axis_0; -// mod cumsum_u32_2d_axis_1; -// mod div_fp16x16; -// mod div_fp16x16_broadcast; -// mod div_fp8x23; -// mod div_fp8x23_broadcast; -// mod div_i32; -// mod div_i32_broadcast; -// mod div_i8; -// mod div_i8_broadcast; -// mod div_u32; -// mod div_u32_broadcast; -// mod equal_fp16x16; -// mod equal_fp16x16_broadcast; -// mod equal_fp8x23; -// mod equal_fp8x23_broadcast; -// mod equal_i32; -// mod equal_i32_broadcast; -// mod equal_i8; -// mod equal_i8_broadcast; -// mod equal_u32; -// mod equal_u32_broadcast; -// mod exp_fp16x16; -// mod exp_fp8x23; -// mod less_equal_fp16x16; -// mod less_equal_fp16x16_broadcast; -// mod less_equal_fp8x23; -// mod less_equal_fp8x23_broadcast; -// mod less_equal_i32; -// mod less_equal_i32_broadcast; -// mod less_equal_i8; -// mod less_equal_i8_broadcast; -// mod less_equal_u32; -// mod less_equal_u32_broadcast; -// mod greater_fp16x16; -// mod greater_fp16x16_broadcast; -// mod greater_fp8x23; -// mod greater_fp8x23_broadcast; -// mod greater_i32; -// mod greater_i32_broadcast; -// mod greater_i8; -// mod greater_i8_broadcast; -// mod greater_u32; -// mod greater_u32_broadcast; -// mod leaky_relu_fp16x16; -// mod leaky_relu_fp8x23; -// mod linear_fp16x16; -// mod linear_fp8x23; -// mod linear_i32; -// mod linear_i8; -// mod linear_u32; -// mod log_fp16x16; -// mod log_fp8x23; -// mod logsoftmax_fp16x16_axis_0; -// mod logsoftmax_fp16x16_axis_1; -// mod logsoftmax_fp8x23_axis_0; -// mod logsoftmax_fp8x23_axis_1; -// mod matmul_fp16x16_1d; -// mod matmul_fp16x16_2x2; -// mod matmul_fp16x16_2x1; -// mod matmul_fp16x16_1x2; -// mod matmul_fp8x23_1d; -// mod matmul_fp8x23_2x2; -// mod matmul_fp8x23_2x1; -// mod matmul_fp8x23_1x2; -// mod matmul_i32_1d; -// mod matmul_i32_2x2; -// mod matmul_i32_2x1; -// mod matmul_i32_1x2; -// mod matmul_i8_1d; -// mod matmul_i8_2x2; -// mod matmul_i8_2x1; -// mod matmul_i8_1x2; -// mod matmul_u32_1d; -// mod matmul_u32_2x2; -// mod matmul_u32_2x1; -// mod matmul_u32_1x2; -// mod mul_fp16x16; -// mod mul_fp16x16_broadcast; -// mod mul_fp8x23; -// mod mul_fp8x23_broadcast; -// mod mul_i32; -// mod mul_i32_broadcast; -// mod mul_i8; -// mod mul_i8_broadcast; -// mod mul_u32; -// mod mul_u32_broadcast; -// mod or_fp16x16; -// mod or_fp16x16_broadcast; -// mod or_fp8x23; -// mod or_fp8x23_broadcast; -// mod or_i32; -// mod or_i32_broadcast; -// mod or_i8; -// mod or_i8_broadcast; -// mod or_u32; -// mod or_u32_broadcast; -// mod reduce_sum_fp16x16_1D; -// mod reduce_sum_fp16x16_2D_default; -// mod reduce_sum_fp16x16_2D_keepdims; -// mod reduce_sum_fp16x16_2D_axis_1; -// mod reduce_sum_fp8x23_1D; -// mod reduce_sum_fp8x23_2D_default; -// mod reduce_sum_fp8x23_2D_keepdims; -// mod reduce_sum_fp8x23_2D_axis_1; -// mod reduce_sum_i32_1D; -// mod reduce_sum_i32_2D_default; -// mod reduce_sum_i32_2D_keepdims; -// mod reduce_sum_i32_2D_axis_1; -// mod reduce_sum_i8_1D; -// mod reduce_sum_i8_2D_default; -// mod reduce_sum_i8_2D_keepdims; -// mod reduce_sum_i8_2D_axis_1; -// mod reduce_sum_u32_1D; -// mod reduce_sum_u32_2D_default; -// mod reduce_sum_u32_2D_keepdims; -// mod reduce_sum_u32_2D_axis_1; -// mod relu_fp16x16; -// mod relu_fp8x23; -// mod relu_i32; -// mod relu_i8; -// mod sigmoid_fp16x16; -// mod sigmoid_fp8x23; -// mod sin_fp16x16; -// mod sin_fp8x23; -// mod sinh_fp16x16; -// mod sinh_fp8x23; -// mod softmax_fp16x16; -// mod softmax_fp8x23; -// mod softplus_fp8x23; -// mod softplus_fp16x16; -// mod softsign_fp8x23; -// mod softsign_fp16x16; -// mod sqrt_fp16x16; -// mod sqrt_fp8x23; -// mod sub_fp16x16; -// mod sub_fp16x16_broadcast; -// mod sub_fp8x23; -// mod sub_fp8x23_broadcast; -// mod sub_i32; -// mod sub_i32_broadcast; -// mod sub_i8; -// mod sub_i8_broadcast; -// mod sub_u32; -// mod sub_u32_broadcast; -// mod tanh_fp16x16; -// mod tanh_fp8x23; -// mod transpose_fp16x16_2d; -// mod transpose_fp16x16_3d; -// mod transpose_fp8x23_2d; -// mod transpose_fp8x23_3d; -// mod transpose_i32_2d; -// mod transpose_i32_3d; -// mod transpose_i8_2d; -// mod transpose_i8_3d; -// mod transpose_u32_2d; -// mod transpose_u32_3d; -// mod xor_fp16x16; -// mod xor_fp16x16_broadcast; -// mod xor_fp8x23; -// mod xor_fp8x23_broadcast; -// mod xor_i32; -// mod xor_i32_broadcast; -// mod xor_i8; -// mod xor_i8_broadcast; -// mod xor_u32; -// mod xor_u32_broadcast; -// mod less_fp16x16; -// mod less_fp16x16_broadcast; -// mod less_fp8x23; -// mod less_fp8x23_broadcast; -// mod less_i32; -// mod less_i32_broadcast; -// mod less_i8; -// mod less_i8_broadcast; -// mod less_u32; -// mod less_u32_broadcast; -// mod greater_equal_fp16x16; -// mod greater_equal_fp16x16_broadcast; -// mod greater_equal_fp8x23; -// mod greater_equal_fp8x23_broadcast; -// mod greater_equal_i32; -// mod greater_equal_i32_broadcast; -// mod greater_equal_i8; -// mod greater_equal_i8_broadcast; -// mod greater_equal_u32; -// mod greater_equal_u32_broadcast; -// mod slice_fp16x16_2d; -// mod slice_fp16x16_3d; -// mod slice_fp8x23_2d; -// mod slice_fp8x23_3d; -// mod slice_i32_2d; -// mod slice_i32_3d; -// mod slice_i8_2d; -// mod slice_i8_3d; -// mod slice_u32_2d; -// mod slice_u32_3d; -// mod gather_fp8x23_3d_default; -// mod gather_fp8x23_3d_axis1; -// mod gather_fp8x23_3d_axis2; -// mod gather_fp16x16_3d_default; -// mod gather_fp16x16_3d_axis1; -// mod gather_fp16x16_3d_axis2; -// mod gather_i8_3d_default; -// mod gather_i8_3d_axis1; -// mod gather_i8_3d_axis2; -// mod gather_i32_3d_default; -// mod gather_i32_3d_axis1; -// mod gather_i32_3d_axis2; -// mod gather_u32_3d_default; -// mod gather_u32_3d_axis1; -// mod gather_u32_3d_axis2; -// mod nonzero_fp16x16_2d; -// mod nonzero_fp16x16_3d; -// mod nonzero_fp8x23_2d; -// mod nonzero_fp8x23_3d; -// mod nonzero_i32_2d; -// mod nonzero_i32_3d; -// mod nonzero_i8_2d; -// mod nonzero_i8_3d; -// mod nonzero_u32_2d; -// mod nonzero_u32_3d; -// mod squeeze_fP16x16; -// mod squeeze_fP8x23; -// mod squeeze_i32; -// mod squeeze_i8; -// mod squeeze_u32; -// mod unsqueeze_fp16x16_2d; -// mod unsqueeze_fp16x16_3d; -// mod unsqueeze_fp8x23_2d; -// mod unsqueeze_fp8x23_3d; -// mod unsqueeze_i32_2d; -// mod unsqueeze_i32_3d; -// mod unsqueeze_i8_2d; -// mod unsqueeze_i8_3d; -// mod unsqueeze_u32_2d; -// mod unsqueeze_u32_3d; -// mod sign_fP16x16; -// mod sign_fP8x23; -// mod sign_fail; -// mod sign_i32; -// mod sign_i8; -// mod clip_fp16x16_2d; -// mod clip_fp16x16_3d; -// mod clip_fp8x23_2d; -// mod clip_fp8x23_3d; -// mod clip_i32_2d; -// mod clip_i32_3d; -// mod clip_i8_2d; -// mod clip_i8_3d; -// mod clip_u32_2d; -// mod clip_u32_3d; -// mod identity_fP16x16; -// mod identity_fP8x23; -// mod identity_i32; -// mod identity_i8; -// mod identity_u32; -// mod thresholded_relu_fp16x16; -// mod thresholded_relu_fp8x23; -// mod hard_sigmoid_fp8x23; -// mod hard_sigmoid_fp16x16; -// mod neg_fp16x16; -// mod neg_fp8x23; -// mod neg_i32; -// mod neg_i8; -// mod gemm_all_attributes; -// mod gemm_alpha; -// mod gemm_beta; -// mod gemm_default_matrix_bias; -// mod gemm_default_vector_bias; -// mod gemm_default_no_bias; -// mod gemm_transposeA; -// mod gemm_transposeB; -// mod min_fp16x16_three_tensors; -// mod min_fp16x16_broadcast_three_tensors; -// mod min_fp16x16_two_tensors; -// mod min_fp16x16_broadcast_two_tensors; -// mod min_fp8x23_three_tensors; -// mod min_fp8x23_broadcast_three_tensors; -// mod min_fp8x23_two_tensors; -// mod min_fp8x23_broadcast_two_tensors; -// mod min_i32_three_tensors; -// mod min_i32_broadcast_three_tensors; -// mod min_i32_two_tensors; -// mod min_i32_broadcast_two_tensors; -// mod min_i8_three_tensors; -// mod min_i8_broadcast_three_tensors; -// mod min_i8_two_tensors; -// mod min_i8_broadcast_two_tensors; -// mod min_u32_three_tensors; -// mod min_u32_broadcast_three_tensors; -// mod min_u32_two_tensors; -// mod min_u32_broadcast_two_tensors; -// mod where_fp16x16; -// mod where_fp16x16_broadcast; -// mod where_fp8x23; -// mod where_fp8x23_broadcast; -// mod where_i32; -// mod where_i32_broadcast; -// mod where_i8; -// mod where_i8_broadcast; -// mod where_u32; -// mod where_u32_broadcast; -// mod not_bool; -// mod round_fp16x16; -// mod round_fp8x23; -// mod max_fp16x16_three_tensors; -// mod max_fp16x16_broadcast_three_tensors; -// mod max_fp16x16_two_tensors; -// mod max_fp16x16_broadcast_two_tensors; -// mod max_fp8x23_three_tensors; -// mod max_fp8x23_broadcast_three_tensors; -// mod max_fp8x23_two_tensors; -// mod max_fp8x23_broadcast_two_tensors; -// mod max_i32_three_tensors; -// mod max_i32_broadcast_three_tensors; -// mod max_i32_two_tensors; -// mod max_i32_broadcast_two_tensors; -// mod max_i8_three_tensors; -// mod max_i8_broadcast_three_tensors; -// mod max_i8_two_tensors; -// mod max_i8_broadcast_two_tensors; -// mod max_u32_three_tensors; -// mod max_u32_broadcast_three_tensors; -// mod max_u32_two_tensors; -// mod max_u32_broadcast_two_tensors; -// mod scatter_fp16x16_3d_default; -// mod scatter_fp16x16_3d_axis1; -// mod scatter_fp16x16_3d_axis1_add; -// mod scatter_fp8x23_default; -// mod scatter_fp8x23_axis1; -// mod scatter_fp8x23_mul; -// mod scatter_i8_default; -// mod scatter_i8_axis1; -// mod scatter_i8_axis1_max; -// mod scatter_u32_default; -// mod scatter_u32_axis1; -// mod scatter_u32_add; -// mod array_feature_extractor_1D_i32; -// mod array_feature_extractor_1D_fp8x23; -// mod array_feature_extractor_1D_fp16x16; -// mod array_feature_extractor_2D_i32; -// mod array_feature_extractor_2D_fp8x23; -// mod array_feature_extractor_2D_fp16x16; -// mod array_feature_extractor_3D_i32; -// mod array_feature_extractor_3D_fp8x23; -// mod array_feature_extractor_3D_fp16x16; -// mod binarizer_fp16x16; -// mod binarizer_fp8x23; -// mod tril_fp16x16; -// mod tril_fp16x16_neg; -// mod tril_fp16x16_one_row; -// mod tril_fp16x16_out_neg; -// mod tril_fp16x16_out_pos; -// mod tril_fp16x16_pos; -// mod tril_fp16x16_square; -// mod tril_fp16x16_square_neg; -// mod tril_fp16x16_zero; -// mod triu_fp16x16; -// mod triu_fp16x16_neg; -// mod triu_fp16x16_one_row; -// mod triu_fp16x16_out_neg; -// mod triu_fp16x16_out_pos; -// mod triu_fp16x16_pos; -// mod triu_fp16x16_square; -// mod triu_fp16x16_square_neg; -// mod triu_fp16x16_zero; -// mod tril_fp8x23; -// mod tril_fp8x23_neg; -// mod tril_fp8x23_one_row; -// mod tril_fp8x23_out_neg; -// mod tril_fp8x23_out_pos; -// mod tril_fp8x23_pos; -// mod tril_fp8x23_square; -// mod tril_fp8x23_square_neg; -// mod tril_fp8x23_zero; -// mod triu_fp8x23; -// mod triu_fp8x23_neg; -// mod triu_fp8x23_one_row; -// mod triu_fp8x23_out_neg; -// mod triu_fp8x23_out_pos; -// mod triu_fp8x23_pos; -// mod triu_fp8x23_square; -// mod triu_fp8x23_square_neg; -// mod triu_fp8x23_zero; -// mod tril_i32; -// mod tril_neg_i32; -// mod tril_i32_one_row; -// mod tril_i32_out_neg; -// mod tril_i32_out_pos; -// mod tril_i32_pos; -// mod tril_i32_square; -// mod tril_i32_square_neg; -// mod tril_i32_zero; -// mod triu_i32; -// mod triu_i32_neg; -// mod triu_i32_one_row; -// mod triu_i32_out_neg; -// mod triu_i32_out_pos; -// mod triu_i32_pos; -// mod triu_i32_square; -// mod triu_i32_square_neg; -// mod triu_i32_zero; -// mod tril_i8; -// mod tril_i8_neg; -// mod tril_i8_one_row; -// mod tril_i8_out_neg; -// mod tril_i8_out_pos; -// mod tril_i8_pos; -// mod tril_i8_square; -// mod tril_i8_square_neg; -// mod tril_i8_zero; -// mod triu_i8; -// mod triu_i8_neg; -// mod triu_i8_one_row; -// mod triu_i8_out_neg; -// mod triu_i8_out_pos; -// mod triu_i8_pos; -// mod triu_i8_square; -// mod triu_i8_square_neg; -// mod triu_i8_zero; -// mod tril_u32; -// mod tril_u32_neg; -// mod tril_u32_one_row; -// mod tril_u32_out_neg; -// mod tril_u32_out_pos; -// mod tril_u32_pos; -// mod tril_u32_square; -// mod tril_u32_square_neg; -// mod tril_u32_zero; -// mod triu_u32; -// mod triu_u32_neg; -// mod triu_u32_one_row; -// mod triu_u32_out_neg; -// mod triu_u32_out_pos; -// mod triu_u32_pos; -// mod triu_u32_square; -// mod triu_u32_square_neg; -// mod triu_u32_zero; -// mod reduce_sum_square_fp16x16_export_do_not_keepdims; -// mod reduce_sum_square_fp16x16_export_keepdims; -// mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; -// mod reduce_sum_square_fp8x23_export_do_not_keepdims; -// mod reduce_sum_square_fp8x23_export_keepdims; -// mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; -// mod reduce_sum_square_i32_export_do_not_keepdims; -// mod reduce_sum_square_i32_export_keepdims; -// mod reduce_sum_square_i32_export_negative_axes_keepdims; -// mod reduce_sum_square_i8_export_do_not_keepdims; -// mod reduce_sum_square_i8_export_keepdims; -// mod reduce_sum_square_i8_export_negative_axes_keepdims; -// mod reduce_sum_square_u32_export_do_not_keepdims; -// mod reduce_sum_square_u32_export_keepdims; -// mod reduce_sum_square_u32_export_negative_axes_keepdims; -// mod reduce_l2_fp16x16_export_do_not_keepdims; -// mod reduce_l2_fp16x16_export_keepdims; -// mod reduce_l2_fp16x16_export_negative_axes_keepdims; -// mod reduce_l2_fp8x23_export_do_not_keepdims; -// mod reduce_l2_fp8x23_export_keepdims; -// mod reduce_l2_fp8x23_export_negative_axes_keepdims; -// mod reduce_l1_fp16x16_export_do_not_keepdims; -// mod reduce_l1_fp16x16_export_keepdims; -// mod reduce_l1_fp16x16_export_negative_axes_keepdims; -// mod reduce_l1_fp8x23_export_do_not_keepdims; -// mod reduce_l1_fp8x23_export_keepdims; -// mod reduce_l1_fp8x23_export_negative_axes_keepdims; -// mod reduce_l1_i32_export_do_not_keepdims; -// mod reduce_l1_i32_export_keepdims; -// mod reduce_l1_i32_export_negative_axes_keepdims; -// mod reduce_l1_i8_export_do_not_keepdims; -// mod reduce_l1_i8_export_keepdims; -// mod reduce_l1_i8_export_negative_axes_keepdims; -// mod reduce_l1_u32_export_do_not_keepdims; -// mod reduce_l1_u32_export_keepdims; -// mod reduce_l1_u32_export_negative_axes_keepdims; -// mod reduce_prod_fp16x16_1D; -// mod reduce_prod_fp16x16_2D_default; -// mod reduce_prod_fp16x16_2D_keepdims; -// mod reduce_prod_fp16x16_2D_axis_1; -// mod reduce_prod_fp8x23_1D; -// mod reduce_prod_fp8x23_2D_default; -// mod reduce_prod_fp8x23_2D_keepdims; -// mod reduce_prod_fp8x23_2D_axis_1; -// mod reduce_prod_i32_1D; -// mod reduce_prod_i32_2D_default; -// mod reduce_prod_i32_2D_keepdims; -// mod reduce_prod_i32_2D_axis_1; -// mod reduce_prod_i8_1D; -// mod reduce_prod_i8_2D_default; -// mod reduce_prod_i8_2D_keepdims; -// mod reduce_prod_i8_2D_axis_1; -// mod reduce_prod_u32_1D; -// mod reduce_prod_u32_2D_default; -// mod reduce_prod_u32_2D_keepdims; -// mod reduce_prod_u32_2D_axis_1; -// mod gather_elements_fp16x16_3d_default; -// mod gather_elements_fp16x16_3d_axis1; -// mod gather_elements_fp16x16_3d_axis2; -// mod gather_elements_fp8x23_3d_default; -// mod gather_elements_fp8x23_3d_axis1; -// mod gather_elements_fp8x23_3d_axis2; -// mod gather_elements_i8_3d_default; -// mod gather_elements_i8_3d_axis1; -// mod gather_elements_i32_3d_default; -// mod gather_elements_i32_3d_axis1; -// mod gather_elements_i32_3d_axis2; -// mod gather_elements_u32_default; -// mod gather_elements_u32_axis1; -// mod gather_elements_u32_axis2; -// mod gather_elements_u32_axis3; -// mod sequence_length_fp16x16; -// mod sequence_length_fp16x16_broadcast; -// mod sequence_length_fp8x23; -// mod sequence_length_fp8x23_broadcast; -// mod sequence_length_i32; -// mod sequence_length_i32_broadcast; -// mod sequence_length_i8; -// mod sequence_length_i8_broadcast; -// mod sequence_length_u32; -// mod sequence_length_u32_broadcast; -// mod sequence_at_u32_positive; -// mod sequence_at_u32_negative; -// mod sequence_at_fp16x16_positive; -// mod sequence_at_fp16x16_negative; -// mod sequence_at_fp8x23_positive; -// mod sequence_at_fp8x23_negative; -// mod sequence_at_i32_positive; -// mod sequence_at_i32_negative; -// mod sequence_at_i8_positive; -// mod sequence_at_i8_negative; -// mod reduce_min_fp16x16_1D; -// mod reduce_min_fp16x16_2D_default; -// mod reduce_min_fp16x16_2D_keepdims; -// mod reduce_min_fp16x16_2D_axis_1; -// mod reduce_min_fp8x23_1D; -// mod reduce_min_fp8x23_2D_default; -// mod reduce_min_fp8x23_2D_keepdims; -// mod reduce_min_fp8x23_2D_axis_1; -// mod reduce_min_i32_1D; -// mod reduce_min_i32_2D_default; -// mod reduce_min_i32_2D_keepdims; -// mod reduce_min_i32_2D_axis_1; -// mod reduce_min_i8_1D; -// mod reduce_min_i8_2D_default; -// mod reduce_min_i8_2D_keepdims; -// mod reduce_min_i8_2D_axis_1; -// mod reduce_min_u32_1D; -// mod reduce_min_u32_2D_default; -// mod reduce_min_u32_2D_keepdims; -// mod reduce_min_u32_2D_axis_1; -// mod sequence_construct_fp16x16; -// mod sequence_construct_fp8x23; -// mod sequence_construct_i32; -// mod sequence_construct_i8; -// mod sequence_construct_u32; -// mod shrink_hard_fp16x16; -// mod shrink_soft_fp16x16; -// mod shrink_hard_fp8x23; -// mod shrink_soft_fp8x23; -// mod sequence_empty_fp16x16; -// mod sequence_empty_fp8x23; -// mod sequence_empty_i32; -// mod sequence_empty_i8; -// mod sequence_empty_u32; -// mod reduce_mean_fp16x16_1D; -// mod reduce_mean_fp16x16_2D_default; -// mod reduce_mean_fp16x16_2D_keepdims; -// mod reduce_mean_fp16x16_2D_axis_1; -// mod reduce_mean_fp8x23_1D; -// mod reduce_mean_fp8x23_2D_default; -// mod reduce_mean_fp8x23_2D_keepdims; -// mod reduce_mean_fp8x23_2D_axis_1; -// mod reduce_mean_i32_1D; -// mod reduce_mean_i32_2D_default; -// mod reduce_mean_i32_2D_keepdims; -// mod reduce_mean_i32_2D_axis_1; -// mod reduce_mean_i8_1D; -// mod reduce_mean_i8_2D_default; -// mod reduce_mean_i8_2D_keepdims; -// mod reduce_mean_i8_2D_axis_1; -// mod reduce_mean_u32_1D; -// mod reduce_mean_u32_2D_default; -// mod reduce_mean_u32_2D_keepdims; -// mod reduce_mean_u32_2D_axis_1; -// mod pow_fp16x16; -// mod pow_fp16x16_broadcast; -// mod pow_fp8x23; -// mod pow_fp8x23_broadcast; -// mod sequence_erase_u32_positive; -// mod sequence_erase_u32_negative; -// mod sequence_erase_u32_empty; -// mod sequence_erase_fp16x16_positive; -// mod sequence_erase_fp16x16_negative; -// mod sequence_erase_fp16x16_empty; -// mod sequence_erase_fp8x23_positive; -// mod sequence_erase_fp8x23_negative; -// mod sequence_erase_fp8x23_empty; -// mod sequence_erase_i32_positive; -// mod sequence_erase_i32_negative; -// mod sequence_erase_i32_empty; -// mod sequence_erase_i8_positive; -// mod sequence_erase_i8_negative; -// mod sequence_erase_i8_empty; -// mod sequence_insert_fp16x16; -// mod sequence_insert_fp8x23; -// mod sequence_insert_i32; -// mod sequence_insert_i8; -// mod sequence_insert_u32; -// mod concat_from_sequence_fp8x23_new_axis_zero; -// mod concat_from_sequence_fp8x23_new_axis_one; -// mod concat_from_sequence_fp8x23_new_axis_default; -// mod concat_from_sequence_fp16x16_new_axis_zero; -// mod concat_from_sequence_fp16x16_new_axis_one; -// mod concat_from_sequence_fp16x16_new_axis_default; -// mod concat_from_sequence_i32_new_axis_zero; -// mod concat_from_sequence_i32_new_axis_one; -// mod concat_from_sequence_i32_new_axis_default; -// mod concat_from_sequence_i8_new_axis_zero; -// mod concat_from_sequence_i8_new_axis_one; -// mod concat_from_sequence_i8_new_axis_default; -// mod concat_from_sequence_u32_new_axis_zero; -// mod concat_from_sequence_u32_new_axis_one; -// mod concat_from_sequence_u32_new_axis_default; -// mod is_nan_fp16x16; -// mod is_nan_fp8x23; -// mod is_inf_fp16x16; -// mod is_inf_fp8x23; -// mod is_inf_i32; -// mod is_inf_i8; -// mod is_inf_u32; -// mod is_pos_inf_fp16x16; -// mod is_neg_inf_fp16x16; -// mod is_pos_inf_fp8x23; -// mod is_neg_inf_fp8x23; -// mod is_pos_inf_i32; -// mod is_neg_inf_i32; -// mod is_pos_inf_i8; -// mod is_neg_inf_i8; -// mod reduce_log_sum_fp8x23_export_do_not_keepdims; -// mod reduce_log_sum_fp8x23_export_keepdims; -// mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; -// mod reduce_log_sum_fp16x16_export_do_not_keepdims; -// mod reduce_log_sum_fp16x16_export_keepdims; -// mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; -// mod and_bool; -// mod erf_fp16x16; -// mod erf_fp8x23; -// mod unique_fp16x16_without_axis_sorted; -// mod unique_fp16x16_with_axis_zero_sorted; -// mod unique_u32_without_axis_sorted; -// mod unique_u32_without_axis_not_sorted; -// mod unique_u32_with_axis_zero_sorted; -// mod unique_u32_with_axis_zero_not_sorted; -// mod unique_u32_with_axis_one_sorted; -// mod unique_u32_with_axis_one_not_sorted; -// mod gather_nd_fp16x16_3d_default; -// mod gather_nd_fp16x16_3d_batch_dims1; -// mod gather_nd_fp16x16_3d_batch_dims2; -// mod gather_nd_fp8x23_3d_default; -// mod gather_nd_fp8x23_3d_batch_dims1; -// mod gather_nd_fp8x23_3d_batch_dims2; -// mod gather_nd_i32_3d_default; -// mod gather_nd_i32_3d_batch_dims1; -// mod gather_nd_i32_3d_batch_dims2; -// mod gather_nd_i8_3d_default; -// mod gather_nd_i8_3d_batch_dims1; -// mod gather_nd_u32_default; -// mod gather_nd_u32_batch_dims1; -// mod gather_nd_u32_batch_dims2; -// mod resize_upsample_scales_nearest; -// mod resize_downsample_scales_cubic; -// mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; -// mod resize_downsample_scales_cubic_align_corners; -// mod resize_upsample_scales_linear; -// mod resize_downsample_scales_linear_align_corners; -// mod resize_downsample_scales_nearest; -// mod resize_upsample_scales_cubic; -// mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; -// mod resize_upsample_scales_cubic_align_corners; -// mod resize_upsample_scales_cubic_asymmetric; -// mod resize_upsample_scales_linear_align_corners; -// mod resize_upsample_sizes_nearest; -// mod resize_upsample_sizes_cubic; -// mod resize_downsample_sizes_cubic; -// mod resize_downsample_sizes_nearest; -// mod resize_upsample_scales_linear_half_pixel_symmetric; -// mod resize_downsample_scales_cubic_antialias; -// mod resize_downsample_scales_linear_antialias; -// mod resize_downsample_sizes_cubic_antialias; -// mod resize_downsample_sizes_linear_pytorch_half_pixel; -// mod resize_tf_crop_and_resize; -// mod resize_tf_crop_and_resize_extrapolation_value; -// mod resize_upsample_scales_nearest_axes_2_3; -// mod resize_upsample_scales_nearest_axes_3_2; -// mod resize_upsample_sizes_nearest_axes_2_3; -// mod resize_upsample_sizes_nearest_ceil_half_pixel; -// mod resize_upsample_sizes_nearest_floor_align_corners; -// mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; -// mod resize_downsample_scales_linear_half_pixel_symmetric; -// mod resize_downsample_sizes_nearest_not_larger; -// mod resize_downsample_sizes_nearest_not_smaller; -// mod resize_tf_crop_and_resize_axes_2_3; -// mod resize_tf_crop_and_resize_axes_3_2; -// mod resize_upsample_sizes_nearest_axes_3_2; -// mod resize_upsample_sizes_nearest_not_larger; -// mod resize_upsample_sizes_nearest_not_smaller; -// mod compress_fp16x16_3d_default; -// mod compress_fp16x16_3d_axis1; -// mod compress_fp16x16_3d_axis2; -// mod compress_fp16x16_3d_axis3; -// mod compress_fp16x16_3d_noaxis; -// mod compress_fp8x23_3d_default; -// mod compress_fp8x23_3d_axis1; -// mod compress_fp8x23_3d_axis2; -// mod compress_i32_3d_default; -// mod compress_i32_3d_axis1; -// mod compress_i32_3d_axis2; -// mod compress_i8_3d_default; -// mod compress_i8_3d_axis1; -// mod compress_i8_3d_axis2; -// mod compress_u32_3d_default; -// mod compress_u32_3d_axis1; -// mod compress_u32_3d_axis2; -// mod compress_u32_3d_axis2_2; -// mod compress_u32_3d_axis3; -// mod layer_normalization_default_axis; -// mod layer_normalization_4d_axis0; -// mod layer_normalization_4d_axis1; -// mod layer_normalization_4d_axis2; -// mod layer_normalization_4d_axis3; -// mod layer_normalization_3d_axis0_epsilon; -// mod layer_normalization_3d_axis1_epsilon; -// mod layer_normalization_3d_axis2_epsilon; -// mod layer_normalization_4d_axis_negative_4; -// mod layer_normalization_4d_axis_negative_3; -// mod layer_normalization_4d_axis_negative_2; -// mod layer_normalization_4d_axis_negative_1; -// mod layer_normalization_3d_axis_negative_3_epsilon; -// mod layer_normalization_3d_axis_negative_2_epsilon; -// mod layer_normalization_3d_axis_negative_1_epsilon; -// mod layer_normalization_test; -// mod split_u32_1d_equal_parts; -// mod split_u32_2d_equal_parts; -// mod split_u32_zero_size; -// mod split_u32_1d_variable_parts; -// mod split_u32_2d_variable_parts; -// mod split_u32_1d_uneven; -// mod split_u32_2d_uneven; -// mod split_fp16x16_1d_equal_parts; -// mod split_fp16x16_1d_variable_parts; -// mod split_fp16x16_2d_equal_parts; -// mod split_fp16x16_2d_variable_parts; -// mod split_fp16x16_zero_size; -// mod split_fp16x16_1d_uneven; -// mod split_fp16x16_2d_uneven; - +mod abs_fp16x16; +mod abs_fp8x23; +mod abs_i32; +mod abs_i8; +mod acos_fp16x16; +mod acos_fp8x23; +mod acosh_fp16x16; +mod acosh_fp8x23; +mod add_fp16x16; +mod add_fp16x16_broadcast; +mod add_fp8x23; +mod add_fp8x23_broadcast; +mod add_i32; +mod add_i32_broadcast; +mod add_i8; +mod add_i8_broadcast; +mod add_u32; +mod add_u32_broadcast; +mod argmax_fp16x16_1D_default; +mod argmax_fp16x16_1D_keepdims_false; +mod argmax_fp16x16_1D_last_index; +mod argmax_fp16x16_2D_default; +mod argmax_fp16x16_2D_keepdims_false; +mod argmax_fp16x16_2D_last_index; +mod argmax_fp16x16_3D_default; +mod argmax_fp16x16_3D_keepdims_false; +mod argmax_fp16x16_3D_last_index; +mod argmax_fp8x23_1D_default; +mod argmax_fp8x23_1D_keepdims_false; +mod argmax_fp8x23_1D_last_index; +mod argmax_fp8x23_2D_default; +mod argmax_fp8x23_2D_keepdims_false; +mod argmax_fp8x23_2D_last_index; +mod argmax_fp8x23_3D_default; +mod argmax_fp8x23_3D_keepdims_false; +mod argmax_fp8x23_3D_last_index; +mod argmax_i32_1D_default; +mod argmax_i32_1D_keepdims_false; +mod argmax_i32_1D_last_index; +mod argmax_i32_2D_default; +mod argmax_i32_2D_keepdims_false; +mod argmax_i32_2D_last_index; +mod argmax_i32_3D_default; +mod argmax_i32_3D_keepdims_false; +mod argmax_i32_3D_last_index; +mod argmax_i8_1D_default; +mod argmax_i8_1D_keepdims_false; +mod argmax_i8_1D_last_index; +mod argmax_i8_2D_default; +mod argmax_i8_2D_keepdims_false; +mod argmax_i8_2D_last_index; +mod argmax_i8_3D_default; +mod argmax_i8_3D_keepdims_false; +mod argmax_i8_3D_last_index; +mod argmax_u32_1D_default; +mod argmax_u32_1D_keepdims_false; +mod argmax_u32_1D_last_index; +mod argmax_u32_2D_default; +mod argmax_u32_2D_keepdims_false; +mod argmax_u32_2D_last_index; +mod argmax_u32_3D_default; +mod argmax_u32_3D_keepdims_false; +mod argmax_u32_3D_last_index; +mod argmin_fp16x16_1D_default; +mod argmin_fp16x16_1D_keepdims_false; +mod argmin_fp16x16_1D_last_index; +mod argmin_fp16x16_2D_default; +mod argmin_fp16x16_2D_keepdims_false; +mod argmin_fp16x16_2D_last_index; +mod argmin_fp16x16_3D_default; +mod argmin_fp16x16_3D_keepdims_false; +mod argmin_fp16x16_3D_last_index; +mod argmin_fp8x23_1D_default; +mod argmin_fp8x23_1D_keepdims_false; +mod argmin_fp8x23_1D_last_index; +mod argmin_fp8x23_2D_default; +mod argmin_fp8x23_2D_keepdims_false; +mod argmin_fp8x23_2D_last_index; +mod argmin_fp8x23_3D_default; +mod argmin_fp8x23_3D_keepdims_false; +mod argmin_fp8x23_3D_last_index; +mod argmin_i32_1D_default; +mod argmin_i32_1D_keepdims_false; +mod argmin_i32_1D_last_index; +mod argmin_i32_2D_default; +mod argmin_i32_2D_keepdims_false; +mod argmin_i32_2D_last_index; +mod argmin_i32_3D_default; +mod argmin_i32_3D_keepdims_false; +mod argmin_i32_3D_last_index; +mod argmin_i8_1D_default; +mod argmin_i8_1D_keepdims_false; +mod argmin_i8_1D_last_index; +mod argmin_i8_2D_default; +mod argmin_i8_2D_keepdims_false; +mod argmin_i8_2D_last_index; +mod argmin_i8_3D_default; +mod argmin_i8_3D_keepdims_false; +mod argmin_i8_3D_last_index; +mod argmin_u32_1D_default; +mod argmin_u32_1D_keepdims_false; +mod argmin_u32_1D_last_index; +mod argmin_u32_2D_default; +mod argmin_u32_2D_keepdims_false; +mod argmin_u32_2D_last_index; +mod argmin_u32_3D_default; +mod argmin_u32_3D_keepdims_false; +mod argmin_u32_3D_last_index; +mod asin_fp16x16; +mod asin_fp8x23; +mod asinh_fp16x16; +mod asinh_fp8x23; +mod atan_fp16x16; +mod atan_fp8x23; +mod ceil_fp16x16; +mod ceil_fp8x23; +mod concat_fp16x16_1d; +mod concat_fp16x16_2d; +mod concat_fp16x16_3d_default; +mod concat_fp16x16_3d_axis_1; +mod concat_fp16x16_3d_axis_2; +mod concat_fp16x16_3d_three_tensors_axis_1; +mod concat_fp16x16_3d_three_tensors_axis_2; +mod concat_fp8x23_1d; +mod concat_fp8x23_2d; +mod concat_fp8x23_3d_default; +mod concat_fp8x23_3d_axis_1; +mod concat_fp8x23_3d_axis_2; +mod concat_fp8x23_3d_three_tensors_axis_1; +mod concat_fp8x23_3d_three_tensors_axis_2; +mod concat_i32_1d; +mod concat_i32_2d; +mod concat_i32_3d_default; +mod concat_i32_3d_axis_1; +mod concat_i32_3d_axis_2; +mod concat_i32_3d_three_tensors_axis_1; +mod concat_i32_3d_three_tensors_axis_2; +mod concat_i8_1d; +mod concat_i8_2d; +mod concat_i8_3d_default; +mod concat_i8_3d_axis_1; +mod concat_i8_3d_axis_2; +mod concat_i8_3d_three_tensors_axis_1; +mod concat_i8_3d_three_tensors_axis_2; +mod concat_u32_1d; +mod concat_u32_2d; +mod concat_u32_3d_default; +mod concat_u32_3d_axis_1; +mod concat_u32_3d_axis_2; +mod concat_u32_3d_three_tensors_axis_1; +mod concat_u32_3d_three_tensors_axis_2; +mod cos_fp16x16; +mod cos_fp8x23; +mod cosh_fp16x16; +mod cosh_fp8x23; +mod cumsum_fp16x16_1d_default; +mod cumsum_fp16x16_1d_exclusive; +mod cumsum_fp16x16_1d_reverse; +mod cumsum_fp16x16_1d_reverse_exclusive; +mod cumsum_fp16x16_2d_axis_0; +mod cumsum_fp16x16_2d_axis_1; +mod cumsum_fp8x23_1d_default; +mod cumsum_fp8x23_1d_exclusive; +mod cumsum_fp8x23_1d_reverse; +mod cumsum_fp8x23_1d_reverse_exclusive; +mod cumsum_fp8x23_2d_axis_0; +mod cumsum_fp8x23_2d_axis_1; +mod cumsum_i32_1d_default; +mod cumsum_i32_1d_exclusive; +mod cumsum_i32_1d_reverse; +mod cumsum_i32_1d_reverse_exclusive; +mod cumsum_i32_2d_axis_0; +mod cumsum_i32_2d_axis_1; +mod cumsum_i8_1d_default; +mod cumsum_i8_1d_exclusive; +mod cumsum_i8_1d_reverse; +mod cumsum_i8_1d_reverse_exclusive; +mod cumsum_i8_2d_axis_0; +mod cumsum_i8_2d_axis_1; +mod cumsum_u32_1d_default; +mod cumsum_u32_1d_exclusive; +mod cumsum_u32_1d_reverse; +mod cumsum_u32_1d_reverse_exclusive; +mod cumsum_u32_2d_axis_0; +mod cumsum_u32_2d_axis_1; +mod div_fp16x16; +mod div_fp16x16_broadcast; +mod div_fp8x23; +mod div_fp8x23_broadcast; +mod div_i32; +mod div_i32_broadcast; +mod div_i8; +mod div_i8_broadcast; +mod div_u32; +mod div_u32_broadcast; +mod equal_fp16x16; +mod equal_fp16x16_broadcast; +mod equal_fp8x23; +mod equal_fp8x23_broadcast; +mod equal_i32; +mod equal_i32_broadcast; +mod equal_i8; +mod equal_i8_broadcast; +mod equal_u32; +mod equal_u32_broadcast; +mod exp_fp16x16; +mod exp_fp8x23; +mod less_equal_fp16x16; +mod less_equal_fp16x16_broadcast; +mod less_equal_fp8x23; +mod less_equal_fp8x23_broadcast; +mod less_equal_i32; +mod less_equal_i32_broadcast; +mod less_equal_i8; +mod less_equal_i8_broadcast; +mod less_equal_u32; +mod less_equal_u32_broadcast; +mod greater_fp16x16; +mod greater_fp16x16_broadcast; +mod greater_fp8x23; +mod greater_fp8x23_broadcast; +mod greater_i32; +mod greater_i32_broadcast; +mod greater_i8; +mod greater_i8_broadcast; +mod greater_u32; +mod greater_u32_broadcast; +mod leaky_relu_fp16x16; +mod leaky_relu_fp8x23; +mod linear_fp16x16; +mod linear_fp8x23; +mod linear_i32; +mod linear_i8; +mod linear_u32; +mod log_fp16x16; +mod log_fp8x23; +mod logsoftmax_fp16x16_axis_0; +mod logsoftmax_fp16x16_axis_1; +mod logsoftmax_fp8x23_axis_0; +mod logsoftmax_fp8x23_axis_1; +mod matmul_fp16x16_1d; +mod matmul_fp16x16_2x2; +mod matmul_fp16x16_2x1; +mod matmul_fp16x16_1x2; +mod matmul_fp8x23_1d; +mod matmul_fp8x23_2x2; +mod matmul_fp8x23_2x1; +mod matmul_fp8x23_1x2; +mod matmul_i32_1d; +mod matmul_i32_2x2; +mod matmul_i32_2x1; +mod matmul_i32_1x2; +mod matmul_i8_1d; +mod matmul_i8_2x2; +mod matmul_i8_2x1; +mod matmul_i8_1x2; +mod matmul_u32_1d; +mod matmul_u32_2x2; +mod matmul_u32_2x1; +mod matmul_u32_1x2; +mod mul_fp16x16; +mod mul_fp16x16_broadcast; +mod mul_fp8x23; +mod mul_fp8x23_broadcast; +mod mul_i32; +mod mul_i32_broadcast; +mod mul_i8; +mod mul_i8_broadcast; +mod mul_u32; +mod mul_u32_broadcast; +mod or_fp16x16; +mod or_fp16x16_broadcast; +mod or_fp8x23; +mod or_fp8x23_broadcast; +mod or_i32; +mod or_i32_broadcast; +mod or_i8; +mod or_i8_broadcast; +mod or_u32; +mod or_u32_broadcast; +mod reduce_sum_fp16x16_1D; +mod reduce_sum_fp16x16_2D_default; +mod reduce_sum_fp16x16_2D_keepdims; +mod reduce_sum_fp16x16_2D_axis_1; +mod reduce_sum_fp8x23_1D; +mod reduce_sum_fp8x23_2D_default; +mod reduce_sum_fp8x23_2D_keepdims; +mod reduce_sum_fp8x23_2D_axis_1; +mod reduce_sum_i32_1D; +mod reduce_sum_i32_2D_default; +mod reduce_sum_i32_2D_keepdims; +mod reduce_sum_i32_2D_axis_1; +mod reduce_sum_i8_1D; +mod reduce_sum_i8_2D_default; +mod reduce_sum_i8_2D_keepdims; +mod reduce_sum_i8_2D_axis_1; +mod reduce_sum_u32_1D; +mod reduce_sum_u32_2D_default; +mod reduce_sum_u32_2D_keepdims; +mod reduce_sum_u32_2D_axis_1; +mod relu_fp16x16; +mod relu_fp8x23; +mod relu_i32; +mod relu_i8; +mod sigmoid_fp16x16; +mod sigmoid_fp8x23; +mod sin_fp16x16; +mod sin_fp8x23; +mod sinh_fp16x16; +mod sinh_fp8x23; +mod softmax_fp16x16; +mod softmax_fp8x23; +mod softplus_fp8x23; +mod softplus_fp16x16; +mod softsign_fp8x23; +mod softsign_fp16x16; +mod sqrt_fp16x16; +mod sqrt_fp8x23; +mod sub_fp16x16; +mod sub_fp16x16_broadcast; +mod sub_fp8x23; +mod sub_fp8x23_broadcast; +mod sub_i32; +mod sub_i32_broadcast; +mod sub_i8; +mod sub_i8_broadcast; +mod sub_u32; +mod sub_u32_broadcast; +mod tanh_fp16x16; +mod tanh_fp8x23; +mod transpose_fp16x16_2d; +mod transpose_fp16x16_3d; +mod transpose_fp8x23_2d; +mod transpose_fp8x23_3d; +mod transpose_i32_2d; +mod transpose_i32_3d; +mod transpose_i8_2d; +mod transpose_i8_3d; +mod transpose_u32_2d; +mod transpose_u32_3d; +mod xor_fp16x16; +mod xor_fp16x16_broadcast; +mod xor_fp8x23; +mod xor_fp8x23_broadcast; +mod xor_i32; +mod xor_i32_broadcast; +mod xor_i8; +mod xor_i8_broadcast; +mod xor_u32; +mod xor_u32_broadcast; +mod less_fp16x16; +mod less_fp16x16_broadcast; +mod less_fp8x23; +mod less_fp8x23_broadcast; +mod less_i32; +mod less_i32_broadcast; +mod less_i8; +mod less_i8_broadcast; +mod less_u32; +mod less_u32_broadcast; +mod greater_equal_fp16x16; +mod greater_equal_fp16x16_broadcast; +mod greater_equal_fp8x23; +mod greater_equal_fp8x23_broadcast; +mod greater_equal_i32; +mod greater_equal_i32_broadcast; +mod greater_equal_i8; +mod greater_equal_i8_broadcast; +mod greater_equal_u32; +mod greater_equal_u32_broadcast; +mod slice_fp16x16_2d; +mod slice_fp16x16_3d; +mod slice_fp8x23_2d; +mod slice_fp8x23_3d; +mod slice_i32_2d; +mod slice_i32_3d; +mod slice_i8_2d; +mod slice_i8_3d; +mod slice_u32_2d; +mod slice_u32_3d; +mod gather_fp8x23_3d_default; +mod gather_fp8x23_3d_axis1; +mod gather_fp8x23_3d_axis2; +mod gather_fp16x16_3d_default; +mod gather_fp16x16_3d_axis1; +mod gather_fp16x16_3d_axis2; +mod gather_i8_3d_default; +mod gather_i8_3d_axis1; +mod gather_i8_3d_axis2; +mod gather_i32_3d_default; +mod gather_i32_3d_axis1; +mod gather_i32_3d_axis2; +mod gather_u32_3d_default; +mod gather_u32_3d_axis1; +mod gather_u32_3d_axis2; +mod nonzero_fp16x16_2d; +mod nonzero_fp16x16_3d; +mod nonzero_fp8x23_2d; +mod nonzero_fp8x23_3d; +mod nonzero_i32_2d; +mod nonzero_i32_3d; +mod nonzero_i8_2d; +mod nonzero_i8_3d; +mod nonzero_u32_2d; +mod nonzero_u32_3d; +mod squeeze_fP16x16; +mod squeeze_fP8x23; +mod squeeze_i32; +mod squeeze_i8; +mod squeeze_u32; +mod unsqueeze_fp16x16_2d; +mod unsqueeze_fp16x16_3d; +mod unsqueeze_fp8x23_2d; +mod unsqueeze_fp8x23_3d; +mod unsqueeze_i32_2d; +mod unsqueeze_i32_3d; +mod unsqueeze_i8_2d; +mod unsqueeze_i8_3d; +mod unsqueeze_u32_2d; +mod unsqueeze_u32_3d; +mod sign_fP16x16; +mod sign_fP8x23; +mod sign_fail; +mod sign_i32; +mod sign_i8; +mod clip_fp16x16_2d; +mod clip_fp16x16_3d; +mod clip_fp8x23_2d; +mod clip_fp8x23_3d; +mod clip_i32_2d; +mod clip_i32_3d; +mod clip_i8_2d; +mod clip_i8_3d; +mod clip_u32_2d; +mod clip_u32_3d; +mod identity_fP16x16; +mod identity_fP8x23; +mod identity_i32; +mod identity_i8; +mod identity_u32; +mod thresholded_relu_fp16x16; +mod thresholded_relu_fp8x23; +mod hard_sigmoid_fp8x23; +mod hard_sigmoid_fp16x16; +mod neg_fp16x16; +mod neg_fp8x23; +mod neg_i32; +mod neg_i8; +mod gemm_all_attributes; +mod gemm_alpha; +mod gemm_beta; +mod gemm_default_matrix_bias; +mod gemm_default_vector_bias; +mod gemm_default_no_bias; +mod gemm_transposeA; +mod gemm_transposeB; +mod min_fp16x16_three_tensors; +mod min_fp16x16_broadcast_three_tensors; +mod min_fp16x16_two_tensors; +mod min_fp16x16_broadcast_two_tensors; +mod min_fp8x23_three_tensors; +mod min_fp8x23_broadcast_three_tensors; +mod min_fp8x23_two_tensors; +mod min_fp8x23_broadcast_two_tensors; +mod min_i32_three_tensors; +mod min_i32_broadcast_three_tensors; +mod min_i32_two_tensors; +mod min_i32_broadcast_two_tensors; +mod min_i8_three_tensors; +mod min_i8_broadcast_three_tensors; +mod min_i8_two_tensors; +mod min_i8_broadcast_two_tensors; +mod min_u32_three_tensors; +mod min_u32_broadcast_three_tensors; +mod min_u32_two_tensors; +mod min_u32_broadcast_two_tensors; +mod where_fp16x16; +mod where_fp16x16_broadcast; +mod where_fp8x23; +mod where_fp8x23_broadcast; +mod where_i32; +mod where_i32_broadcast; +mod where_i8; +mod where_i8_broadcast; +mod where_u32; +mod where_u32_broadcast; +mod not_bool; +mod round_fp16x16; +mod round_fp8x23; +mod max_fp16x16_three_tensors; +mod max_fp16x16_broadcast_three_tensors; +mod max_fp16x16_two_tensors; +mod max_fp16x16_broadcast_two_tensors; +mod max_fp8x23_three_tensors; +mod max_fp8x23_broadcast_three_tensors; +mod max_fp8x23_two_tensors; +mod max_fp8x23_broadcast_two_tensors; +mod max_i32_three_tensors; +mod max_i32_broadcast_three_tensors; +mod max_i32_two_tensors; +mod max_i32_broadcast_two_tensors; +mod max_i8_three_tensors; +mod max_i8_broadcast_three_tensors; +mod max_i8_two_tensors; +mod max_i8_broadcast_two_tensors; +mod max_u32_three_tensors; +mod max_u32_broadcast_three_tensors; +mod max_u32_two_tensors; +mod max_u32_broadcast_two_tensors; +mod scatter_fp16x16_3d_default; +mod scatter_fp16x16_3d_axis1; +mod scatter_fp16x16_3d_axis1_add; +mod scatter_fp8x23_default; +mod scatter_fp8x23_axis1; +mod scatter_fp8x23_mul; +mod scatter_i8_default; +mod scatter_i8_axis1; +mod scatter_i8_axis1_max; +mod scatter_u32_default; +mod scatter_u32_axis1; +mod scatter_u32_add; +mod array_feature_extractor_1D_i32; +mod array_feature_extractor_1D_fp8x23; +mod array_feature_extractor_1D_fp16x16; +mod array_feature_extractor_2D_i32; +mod array_feature_extractor_2D_fp8x23; +mod array_feature_extractor_2D_fp16x16; +mod array_feature_extractor_3D_i32; +mod array_feature_extractor_3D_fp8x23; +mod array_feature_extractor_3D_fp16x16; +mod binarizer_fp16x16; +mod binarizer_fp8x23; +mod tril_fp16x16; +mod tril_fp16x16_neg; +mod tril_fp16x16_one_row; +mod tril_fp16x16_out_neg; +mod tril_fp16x16_out_pos; +mod tril_fp16x16_pos; +mod tril_fp16x16_square; +mod tril_fp16x16_square_neg; +mod tril_fp16x16_zero; +mod triu_fp16x16; +mod triu_fp16x16_neg; +mod triu_fp16x16_one_row; +mod triu_fp16x16_out_neg; +mod triu_fp16x16_out_pos; +mod triu_fp16x16_pos; +mod triu_fp16x16_square; +mod triu_fp16x16_square_neg; +mod triu_fp16x16_zero; +mod tril_fp8x23; +mod tril_fp8x23_neg; +mod tril_fp8x23_one_row; +mod tril_fp8x23_out_neg; +mod tril_fp8x23_out_pos; +mod tril_fp8x23_pos; +mod tril_fp8x23_square; +mod tril_fp8x23_square_neg; +mod tril_fp8x23_zero; +mod triu_fp8x23; +mod triu_fp8x23_neg; +mod triu_fp8x23_one_row; +mod triu_fp8x23_out_neg; +mod triu_fp8x23_out_pos; +mod triu_fp8x23_pos; +mod triu_fp8x23_square; +mod triu_fp8x23_square_neg; +mod triu_fp8x23_zero; +mod tril_i32; +mod tril_neg_i32; +mod tril_i32_one_row; +mod tril_i32_out_neg; +mod tril_i32_out_pos; +mod tril_i32_pos; +mod tril_i32_square; +mod tril_i32_square_neg; +mod tril_i32_zero; +mod triu_i32; +mod triu_i32_neg; +mod triu_i32_one_row; +mod triu_i32_out_neg; +mod triu_i32_out_pos; +mod triu_i32_pos; +mod triu_i32_square; +mod triu_i32_square_neg; +mod triu_i32_zero; +mod tril_i8; +mod tril_i8_neg; +mod tril_i8_one_row; +mod tril_i8_out_neg; +mod tril_i8_out_pos; +mod tril_i8_pos; +mod tril_i8_square; +mod tril_i8_square_neg; +mod tril_i8_zero; +mod triu_i8; +mod triu_i8_neg; +mod triu_i8_one_row; +mod triu_i8_out_neg; +mod triu_i8_out_pos; +mod triu_i8_pos; +mod triu_i8_square; +mod triu_i8_square_neg; +mod triu_i8_zero; +mod tril_u32; +mod tril_u32_neg; +mod tril_u32_one_row; +mod tril_u32_out_neg; +mod tril_u32_out_pos; +mod tril_u32_pos; +mod tril_u32_square; +mod tril_u32_square_neg; +mod tril_u32_zero; +mod triu_u32; +mod triu_u32_neg; +mod triu_u32_one_row; +mod triu_u32_out_neg; +mod triu_u32_out_pos; +mod triu_u32_pos; +mod triu_u32_square; +mod triu_u32_square_neg; +mod triu_u32_zero; +mod reduce_sum_square_fp16x16_export_do_not_keepdims; +mod reduce_sum_square_fp16x16_export_keepdims; +mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; +mod reduce_sum_square_fp8x23_export_do_not_keepdims; +mod reduce_sum_square_fp8x23_export_keepdims; +mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; +mod reduce_sum_square_i32_export_do_not_keepdims; +mod reduce_sum_square_i32_export_keepdims; +mod reduce_sum_square_i32_export_negative_axes_keepdims; +mod reduce_sum_square_i8_export_do_not_keepdims; +mod reduce_sum_square_i8_export_keepdims; +mod reduce_sum_square_i8_export_negative_axes_keepdims; +mod reduce_sum_square_u32_export_do_not_keepdims; +mod reduce_sum_square_u32_export_keepdims; +mod reduce_sum_square_u32_export_negative_axes_keepdims; +mod reduce_l2_fp16x16_export_do_not_keepdims; +mod reduce_l2_fp16x16_export_keepdims; +mod reduce_l2_fp16x16_export_negative_axes_keepdims; +mod reduce_l2_fp8x23_export_do_not_keepdims; +mod reduce_l2_fp8x23_export_keepdims; +mod reduce_l2_fp8x23_export_negative_axes_keepdims; +mod reduce_l1_fp16x16_export_do_not_keepdims; +mod reduce_l1_fp16x16_export_keepdims; +mod reduce_l1_fp16x16_export_negative_axes_keepdims; +mod reduce_l1_fp8x23_export_do_not_keepdims; +mod reduce_l1_fp8x23_export_keepdims; +mod reduce_l1_fp8x23_export_negative_axes_keepdims; +mod reduce_l1_i32_export_do_not_keepdims; +mod reduce_l1_i32_export_keepdims; +mod reduce_l1_i32_export_negative_axes_keepdims; +mod reduce_l1_i8_export_do_not_keepdims; +mod reduce_l1_i8_export_keepdims; +mod reduce_l1_i8_export_negative_axes_keepdims; +mod reduce_l1_u32_export_do_not_keepdims; +mod reduce_l1_u32_export_keepdims; +mod reduce_l1_u32_export_negative_axes_keepdims; +mod reduce_prod_fp16x16_1D; +mod reduce_prod_fp16x16_2D_default; +mod reduce_prod_fp16x16_2D_keepdims; +mod reduce_prod_fp16x16_2D_axis_1; +mod reduce_prod_fp8x23_1D; +mod reduce_prod_fp8x23_2D_default; +mod reduce_prod_fp8x23_2D_keepdims; +mod reduce_prod_fp8x23_2D_axis_1; +mod reduce_prod_i32_1D; +mod reduce_prod_i32_2D_default; +mod reduce_prod_i32_2D_keepdims; +mod reduce_prod_i32_2D_axis_1; +mod reduce_prod_i8_1D; +mod reduce_prod_i8_2D_default; +mod reduce_prod_i8_2D_keepdims; +mod reduce_prod_i8_2D_axis_1; +mod reduce_prod_u32_1D; +mod reduce_prod_u32_2D_default; +mod reduce_prod_u32_2D_keepdims; +mod reduce_prod_u32_2D_axis_1; +mod gather_elements_fp16x16_3d_default; +mod gather_elements_fp16x16_3d_axis1; +mod gather_elements_fp16x16_3d_axis2; +mod gather_elements_fp8x23_3d_default; +mod gather_elements_fp8x23_3d_axis1; +mod gather_elements_fp8x23_3d_axis2; +mod gather_elements_i8_3d_default; +mod gather_elements_i8_3d_axis1; +mod gather_elements_i32_3d_default; +mod gather_elements_i32_3d_axis1; +mod gather_elements_i32_3d_axis2; +mod gather_elements_u32_default; +mod gather_elements_u32_axis1; +mod gather_elements_u32_axis2; +mod gather_elements_u32_axis3; +mod sequence_length_fp16x16; +mod sequence_length_fp16x16_broadcast; +mod sequence_length_fp8x23; +mod sequence_length_fp8x23_broadcast; +mod sequence_length_i32; +mod sequence_length_i32_broadcast; +mod sequence_length_i8; +mod sequence_length_i8_broadcast; +mod sequence_length_u32; +mod sequence_length_u32_broadcast; +mod sequence_at_u32_positive; +mod sequence_at_u32_negative; +mod sequence_at_fp16x16_positive; +mod sequence_at_fp16x16_negative; +mod sequence_at_fp8x23_positive; +mod sequence_at_fp8x23_negative; +mod sequence_at_i32_positive; +mod sequence_at_i32_negative; +mod sequence_at_i8_positive; +mod sequence_at_i8_negative; +mod reduce_min_fp16x16_1D; +mod reduce_min_fp16x16_2D_default; +mod reduce_min_fp16x16_2D_keepdims; +mod reduce_min_fp16x16_2D_axis_1; +mod reduce_min_fp8x23_1D; +mod reduce_min_fp8x23_2D_default; +mod reduce_min_fp8x23_2D_keepdims; +mod reduce_min_fp8x23_2D_axis_1; +mod reduce_min_i32_1D; +mod reduce_min_i32_2D_default; +mod reduce_min_i32_2D_keepdims; +mod reduce_min_i32_2D_axis_1; +mod reduce_min_i8_1D; +mod reduce_min_i8_2D_default; +mod reduce_min_i8_2D_keepdims; +mod reduce_min_i8_2D_axis_1; +mod reduce_min_u32_1D; +mod reduce_min_u32_2D_default; +mod reduce_min_u32_2D_keepdims; +mod reduce_min_u32_2D_axis_1; +mod sequence_construct_fp16x16; +mod sequence_construct_fp8x23; +mod sequence_construct_i32; +mod sequence_construct_i8; +mod sequence_construct_u32; +mod shrink_hard_fp16x16; +mod shrink_soft_fp16x16; +mod shrink_hard_fp8x23; +mod shrink_soft_fp8x23; +mod sequence_empty_fp16x16; +mod sequence_empty_fp8x23; +mod sequence_empty_i32; +mod sequence_empty_i8; +mod sequence_empty_u32; +mod reduce_mean_fp16x16_1D; +mod reduce_mean_fp16x16_2D_default; +mod reduce_mean_fp16x16_2D_keepdims; +mod reduce_mean_fp16x16_2D_axis_1; +mod reduce_mean_fp8x23_1D; +mod reduce_mean_fp8x23_2D_default; +mod reduce_mean_fp8x23_2D_keepdims; +mod reduce_mean_fp8x23_2D_axis_1; +mod reduce_mean_i32_1D; +mod reduce_mean_i32_2D_default; +mod reduce_mean_i32_2D_keepdims; +mod reduce_mean_i32_2D_axis_1; +mod reduce_mean_i8_1D; +mod reduce_mean_i8_2D_default; +mod reduce_mean_i8_2D_keepdims; +mod reduce_mean_i8_2D_axis_1; +mod reduce_mean_u32_1D; +mod reduce_mean_u32_2D_default; +mod reduce_mean_u32_2D_keepdims; +mod reduce_mean_u32_2D_axis_1; +mod pow_fp16x16; +mod pow_fp16x16_broadcast; +mod pow_fp8x23; +mod pow_fp8x23_broadcast; +mod sequence_erase_u32_positive; +mod sequence_erase_u32_negative; +mod sequence_erase_u32_empty; +mod sequence_erase_fp16x16_positive; +mod sequence_erase_fp16x16_negative; +mod sequence_erase_fp16x16_empty; +mod sequence_erase_fp8x23_positive; +mod sequence_erase_fp8x23_negative; +mod sequence_erase_fp8x23_empty; +mod sequence_erase_i32_positive; +mod sequence_erase_i32_negative; +mod sequence_erase_i32_empty; +mod sequence_erase_i8_positive; +mod sequence_erase_i8_negative; +mod sequence_erase_i8_empty; +mod sequence_insert_fp16x16; +mod sequence_insert_fp8x23; +mod sequence_insert_i32; +mod sequence_insert_i8; +mod sequence_insert_u32; +mod concat_from_sequence_fp8x23_new_axis_zero; +mod concat_from_sequence_fp8x23_new_axis_one; +mod concat_from_sequence_fp8x23_new_axis_default; +mod concat_from_sequence_fp16x16_new_axis_zero; +mod concat_from_sequence_fp16x16_new_axis_one; +mod concat_from_sequence_fp16x16_new_axis_default; +mod concat_from_sequence_i32_new_axis_zero; +mod concat_from_sequence_i32_new_axis_one; +mod concat_from_sequence_i32_new_axis_default; +mod concat_from_sequence_i8_new_axis_zero; +mod concat_from_sequence_i8_new_axis_one; +mod concat_from_sequence_i8_new_axis_default; +mod concat_from_sequence_u32_new_axis_zero; +mod concat_from_sequence_u32_new_axis_one; +mod concat_from_sequence_u32_new_axis_default; +mod is_nan_fp16x16; +mod is_nan_fp8x23; +mod is_inf_fp16x16; +mod is_inf_fp8x23; +mod is_inf_i32; +mod is_inf_i8; +mod is_inf_u32; +mod is_pos_inf_fp16x16; +mod is_neg_inf_fp16x16; +mod is_pos_inf_fp8x23; +mod is_neg_inf_fp8x23; +mod is_pos_inf_i32; +mod is_neg_inf_i32; +mod is_pos_inf_i8; +mod is_neg_inf_i8; +mod reduce_log_sum_fp8x23_export_do_not_keepdims; +mod reduce_log_sum_fp8x23_export_keepdims; +mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; +mod reduce_log_sum_fp16x16_export_do_not_keepdims; +mod reduce_log_sum_fp16x16_export_keepdims; +mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; +mod and_bool; +mod erf_fp16x16; +mod erf_fp8x23; +mod unique_fp16x16_without_axis_sorted; +mod unique_fp16x16_with_axis_zero_sorted; +mod unique_u32_without_axis_sorted; +mod unique_u32_without_axis_not_sorted; +mod unique_u32_with_axis_zero_sorted; +mod unique_u32_with_axis_zero_not_sorted; +mod unique_u32_with_axis_one_sorted; +mod unique_u32_with_axis_one_not_sorted; +mod gather_nd_fp16x16_3d_default; +mod gather_nd_fp16x16_3d_batch_dims1; +mod gather_nd_fp16x16_3d_batch_dims2; +mod gather_nd_fp8x23_3d_default; +mod gather_nd_fp8x23_3d_batch_dims1; +mod gather_nd_fp8x23_3d_batch_dims2; +mod gather_nd_i32_3d_default; +mod gather_nd_i32_3d_batch_dims1; +mod gather_nd_i32_3d_batch_dims2; +mod gather_nd_i8_3d_default; +mod gather_nd_i8_3d_batch_dims1; +mod gather_nd_u32_default; +mod gather_nd_u32_batch_dims1; +mod gather_nd_u32_batch_dims2; +mod resize_upsample_scales_nearest; +mod resize_downsample_scales_cubic; +mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; +mod resize_downsample_scales_cubic_align_corners; +mod resize_upsample_scales_linear; +mod resize_downsample_scales_linear_align_corners; +mod resize_downsample_scales_nearest; +mod resize_upsample_scales_cubic; +mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; +mod resize_upsample_scales_cubic_align_corners; +mod resize_upsample_scales_cubic_asymmetric; +mod resize_upsample_scales_linear_align_corners; +mod resize_upsample_sizes_nearest; +mod resize_upsample_sizes_cubic; +mod resize_downsample_sizes_cubic; +mod resize_downsample_sizes_nearest; +mod resize_upsample_scales_linear_half_pixel_symmetric; +mod resize_downsample_scales_cubic_antialias; +mod resize_downsample_scales_linear_antialias; +mod resize_downsample_sizes_cubic_antialias; +mod resize_downsample_sizes_linear_pytorch_half_pixel; +mod resize_tf_crop_and_resize; +mod resize_tf_crop_and_resize_extrapolation_value; +mod resize_upsample_scales_nearest_axes_2_3; +mod resize_upsample_scales_nearest_axes_3_2; +mod resize_upsample_sizes_nearest_axes_2_3; +mod resize_upsample_sizes_nearest_ceil_half_pixel; +mod resize_upsample_sizes_nearest_floor_align_corners; +mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; +mod resize_downsample_scales_linear_half_pixel_symmetric; +mod resize_downsample_sizes_nearest_not_larger; +mod resize_downsample_sizes_nearest_not_smaller; +mod resize_tf_crop_and_resize_axes_2_3; +mod resize_tf_crop_and_resize_axes_3_2; +mod resize_upsample_sizes_nearest_axes_3_2; +mod resize_upsample_sizes_nearest_not_larger; +mod resize_upsample_sizes_nearest_not_smaller; +mod compress_fp16x16_3d_default; +mod compress_fp16x16_3d_axis1; +mod compress_fp16x16_3d_axis2; +mod compress_fp16x16_3d_axis3; +mod compress_fp16x16_3d_noaxis; +mod compress_fp8x23_3d_default; +mod compress_fp8x23_3d_axis1; +mod compress_fp8x23_3d_axis2; +mod compress_i32_3d_default; +mod compress_i32_3d_axis1; +mod compress_i32_3d_axis2; +mod compress_i8_3d_default; +mod compress_i8_3d_axis1; +mod compress_i8_3d_axis2; +mod compress_u32_3d_default; +mod compress_u32_3d_axis1; +mod compress_u32_3d_axis2; +mod compress_u32_3d_axis2_2; +mod compress_u32_3d_axis3; +mod layer_normalization_default_axis; +mod layer_normalization_4d_axis0; +mod layer_normalization_4d_axis1; +mod layer_normalization_4d_axis2; +mod layer_normalization_4d_axis3; +mod layer_normalization_3d_axis0_epsilon; +mod layer_normalization_3d_axis1_epsilon; +mod layer_normalization_3d_axis2_epsilon; +mod layer_normalization_4d_axis_negative_4; +mod layer_normalization_4d_axis_negative_3; +mod layer_normalization_4d_axis_negative_2; +mod layer_normalization_4d_axis_negative_1; +mod layer_normalization_3d_axis_negative_3_epsilon; +mod layer_normalization_3d_axis_negative_2_epsilon; +mod layer_normalization_3d_axis_negative_1_epsilon; +mod layer_normalization_test; +mod split_u32_1d_equal_parts; +mod split_u32_2d_equal_parts; +mod split_u32_zero_size; +mod split_u32_1d_variable_parts; +mod split_u32_2d_variable_parts; +mod split_u32_1d_uneven; +mod split_u32_2d_uneven; +mod split_fp16x16_1d_equal_parts; +mod split_fp16x16_1d_variable_parts; +mod split_fp16x16_2d_equal_parts; +mod split_fp16x16_2d_variable_parts; +mod split_fp16x16_zero_size; +mod split_fp16x16_1d_uneven; +mod split_fp16x16_2d_uneven; mod scatter_nd_fp16x16_3d_default; mod scatter_nd_fp16x16_3d_add; mod scatter_nd_fp16x16_3d_mul; From bfb5424c30fae292734e1cc39ff0b9baeb4f1628 Mon Sep 17 00:00:00 2001 From: Hakeem Kazeem Date: Wed, 17 Jan 2024 19:51:26 +0100 Subject: [PATCH 05/46] node fixed file defined multiple times --- tests/nodes.cairo | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/nodes.cairo b/tests/nodes.cairo index b499cd993..6ea5a7ac5 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -946,7 +946,6 @@ mod scatter_nd_fp8x23_3d_add; mod scatter_nd_fp8x23_3d_mul; mod scatter_nd_fp8x23_3d_max; mod scatter_nd_fp8x23_3d_min; -mod gather_nd_u32_default; mod scatter_nd_u32_default; mod scatter_nd_u32_add; mod scatter_nd_u32_mul; From 0cee3a915139c66249ea2fefb95195c9b0056770 Mon Sep 17 00:00:00 2001 From: chachaleo Date: Thu, 18 Jan 2024 11:35:06 +0100 Subject: [PATCH 06/46] feat: SVM Classifier --- docgen/src/main.rs | 8 + docs/SUMMARY.md | 2 + .../machine-learning/svm-classifier/README.md | 23 + .../svm-classifier/svm_classifier.predict.md | 196 +++ src/operators/matrix.cairo | 41 + src/operators/ml/svm.cairo | 1 + src/operators/ml/svm/svm_classifier.cairo | 1240 +++++++++++++++++ .../implementations/tensor_fp64x64.cairo | 2 +- tests/ml.cairo | 2 + tests/ml/svm_classifier_test.cairo | 875 ++++++++++++ 10 files changed, 2389 insertions(+), 1 deletion(-) create mode 100644 docs/framework/operators/machine-learning/svm-classifier/README.md create mode 100644 docs/framework/operators/machine-learning/svm-classifier/svm_classifier.predict.md create mode 100644 src/operators/ml/svm/svm_classifier.cairo create mode 100644 tests/ml/svm_classifier_test.cairo diff --git a/docgen/src/main.rs b/docgen/src/main.rs index ed2a69460..59c58724d 100644 --- a/docgen/src/main.rs +++ b/docgen/src/main.rs @@ -90,6 +90,14 @@ fn main() { let trait_name: &str = "SVMRegressorTrait"; doc_trait(trait_path, doc_path, label); doc_functions(trait_path, doc_path, trait_name, label); + + // SVM CLASSIFIER DOC + let trait_path = "src/operators/ml/svm/svm_classifier.cairo"; + let doc_path = "docs/framework/operators/machine-learning/svm-classifier"; + let label = "svm_classifier"; + let trait_name: &str = "SVMClassifierTrait"; + doc_trait(trait_path, doc_path, label); + doc_functions(trait_path, doc_path, trait_name, label); } fn doc_trait(trait_path: &str, doc_path: &str, label: &str) { diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index f956159ec..80a937af0 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -147,6 +147,8 @@ * [linear\_regressor.predict](framework/operators/machine-learning/linear-regressor/linear\_regressor.predict.md) * [SVM Regressor](framework/operators/machine-learning/svm-regressor/README.md) * [svm\_regressor.predict](framework/operators/machine-learning/svm-regressor/svm\_regressor.predict.md) + * [SVM Classifier](framework/operators/machine-learning/svm-classifier/README.md) + * [svm\_classifier.predict](framework/operators/machine-learning/svm-classifier/svm\_classifier.predict.md) * [Sequence](framework/operators/sequence/README.md) * [sequence.sequence\_construct](framework/operators/sequence/sequence.sequence\_construct.md) * [sequence.sequence\_empty](framework/operators/sequence/sequence.sequence\_empty.md) diff --git a/docs/framework/operators/machine-learning/svm-classifier/README.md b/docs/framework/operators/machine-learning/svm-classifier/README.md new file mode 100644 index 000000000..46a46aeb8 --- /dev/null +++ b/docs/framework/operators/machine-learning/svm-classifier/README.md @@ -0,0 +1,23 @@ +# SVM Classifier + +`SVMClassifierTrait` provides a trait definition for svm classification problem. + +```rust +use orion::operators::ml::SVMClassifierTrait; +``` + +### Data types + +Orion supports currently only fixed point data types for `SVMClassifierTrait`. + +| Data type | dtype | +| -------------------- | ------------------------------------------------------------- | +| Fixed point (signed) | `SVMClassifierTrait` | + + +*** + +| function | description | +| --- | --- | +| [`svm_classifier.predict`](svm_classifier.predict.md) | Returns the top class for each of N inputs. | + diff --git a/docs/framework/operators/machine-learning/svm-classifier/svm_classifier.predict.md b/docs/framework/operators/machine-learning/svm-classifier/svm_classifier.predict.md new file mode 100644 index 000000000..4ea027b0a --- /dev/null +++ b/docs/framework/operators/machine-learning/svm-classifier/svm_classifier.predict.md @@ -0,0 +1,196 @@ +# SVMClassifierTrait::predict + +```rust + fn predict(ref self: SVMClassifier, X: Tensor) -> (Span, Tensor); +``` + +Support Vector Machine classification. + +## Args + +* `self`: SVMClassifier - A SVMClassifier object. +* `X`: Input 2D tensor. + +## Returns + +* N Top class for each point +* The class score Matrix for each class, for each point. If prob_a and prob_b are provided they are probabilities for each class, otherwise they are raw scores. + +## Type Constraints + +`SVMClassifier` and `X` must be fixed points + +## Examples + +```rust +fn example_svm_classifier_noprob_linear_sv_none() -> (Span, Tensor) { + let coefficients: Span = array![ + FP16x16 { mag: 50226, sign: false }, + FP16x16 { mag: 5711, sign: false }, + FP16x16 { mag: 7236, sign: false }, + FP16x16 { mag: 63175, sign: true } + ] + .span(); + let kernel_params: Span = array![ + FP16x16 { mag: 8025, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 196608, sign: false } + ] + .span(); + let kernel_type = KERNEL_TYPE::LINEAR; + let prob_a: Span = array![].span(); + let prob_b: Span = array![].span(); + let rho: Span = array![FP16x16 { mag: 146479, sign: false }].span(); + + let support_vectors: Span = array![ + FP16x16 { mag: 314572, sign: false }, + FP16x16 { mag: 222822, sign: false }, + FP16x16 { mag: 124518, sign: false }, + FP16x16 { mag: 327680, sign: false }, + FP16x16 { mag: 196608, sign: false }, + FP16x16 { mag: 104857, sign: false }, + FP16x16 { mag: 294912, sign: false }, + FP16x16 { mag: 150732, sign: false }, + FP16x16 { mag: 85196, sign: false }, + FP16x16 { mag: 334233, sign: false }, + FP16x16 { mag: 163840, sign: false }, + FP16x16 { mag: 196608, sign: false } + ] + .span(); + let classlabels: Span = array![0, 1].span(); + + let vectors_per_class = Option::Some(array![3, 1].span()); + + let post_transform = POST_TRANSFORM::NONE; + + let mut classifier: SVMClassifier = SVMClassifier { + classlabels, + coefficients, + kernel_params, + kernel_type, + post_transform, + prob_a, + prob_b, + rho, + support_vectors, + vectors_per_class, + }; + + let mut X: Tensor = TensorTrait::new( + array![3, 3].span(), + array![ + FP16x16 { mag: 65536, sign: true }, + FP16x16 { mag: 52428, sign: true }, + FP16x16 { mag: 39321, sign: true }, + FP16x16 { mag: 26214, sign: true }, + FP16x16 { mag: 13107, sign: true }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 13107, sign: false }, + FP16x16 { mag: 26214, sign: false }, + FP16x16 { mag: 39321, sign: false }, + ] + .span() + ); + + return SVMClassifierTrait::predict(ref classifier, X); + +} +// >>> ([0, 0, 0], +// [[-2.662655, 2.662655], +// [-2.21481, 2.21481], +// [-1.766964, 1.766964]]) + + +fn example_svm_classifier_binary_softmax_fp64x64() -> (Span, Tensor) { + let coefficients: Span = array![ + FP64x64 { mag: 18446744073709551616, sign: false }, + FP64x64 { mag: 18446744073709551616, sign: false }, + FP64x64 { mag: 18446744073709551616, sign: false }, + FP64x64 { mag: 18446744073709551616, sign: false }, + FP64x64 { mag: 18446744073709551616, sign: true }, + FP64x64 { mag: 18446744073709551616, sign: true }, + FP64x64 { mag: 18446744073709551616, sign: true }, + FP64x64 { mag: 18446744073709551616, sign: true } + ] + .span(); + let kernel_params: Span = array![ + FP64x64 { mag: 7054933896252620800, sign: false }, + FP64x64 { mag: 0, sign: false }, + FP64x64 { mag: 55340232221128654848, sign: false } + ] + .span(); + let kernel_type = KERNEL_TYPE::RBF; + let prob_a: Span = array![FP64x64 { mag: 94799998099962986496, sign: true }].span(); + let prob_b: Span = array![FP64x64 { mag: 1180576833385529344, sign: false }].span(); + let rho: Span = array![FP64x64 { mag: 3082192501545631744, sign: false }].span(); + + let support_vectors: Span = array![ + FP64x64 { mag: 3528081300248330240, sign: false }, + FP64x64 { mag: 19594207602596118528, sign: true }, + FP64x64 { mag: 9235613999318433792, sign: false }, + FP64x64 { mag: 10869715877100519424, sign: true }, + FP64x64 { mag: 5897111318564962304, sign: true }, + FP64x64 { mag: 1816720038917308416, sign: false }, + FP64x64 { mag: 4564890528671334400, sign: false }, + FP64x64 { mag: 21278987070814027776, sign: true }, + FP64x64 { mag: 7581529597213147136, sign: false }, + FP64x64 { mag: 10953113834067329024, sign: true }, + FP64x64 { mag: 24318984989010034688, sign: true }, + FP64x64 { mag: 30296187483321270272, sign: true }, + FP64x64 { mag: 10305112258191032320, sign: false }, + FP64x64 { mag: 17005441559857987584, sign: true }, + FP64x64 { mag: 11555205301925838848, sign: false }, + FP64x64 { mag: 2962701975885447168, sign: true }, + FP64x64 { mag: 11741665981322231808, sign: true }, + FP64x64 { mag: 15376232508819505152, sign: false }, + FP64x64 { mag: 13908474645692022784, sign: false }, + FP64x64 { mag: 7323415394302033920, sign: true }, + FP64x64 { mag: 3284258824352956416, sign: true }, + FP64x64 { mag: 11374683084831064064, sign: true }, + FP64x64 { mag: 9087138148126818304, sign: false }, + FP64x64 { mag: 8247488946750095360, sign: false } + ] + .span(); + let classlabels: Span = array![0, 1].span(); + + let vectors_per_class = Option::Some(array![4, 4].span()); + let post_transform = POST_TRANSFORM::SOFTMAX; + + let mut classifier: SVMClassifier = SVMClassifier { + classlabels, + coefficients, + kernel_params, + kernel_type, + post_transform, + prob_a, + prob_b, + rho, + support_vectors, + vectors_per_class, + }; + + let mut X: Tensor = TensorTrait::new( + array![3, 3].span(), + array![ + FP64x64 { mag: 18446744073709551616, sign: true }, + FP64x64 { mag: 14757395258967642112, sign: true }, + FP64x64 { mag: 11068046444225730560, sign: true }, + FP64x64 { mag: 7378697629483821056, sign: true }, + FP64x64 { mag: 3689348814741910528, sign: true }, + FP64x64 { mag: 0, sign: false }, + FP64x64 { mag: 3689348814741910528, sign: false }, + FP64x64 { mag: 7378697629483821056, sign: false }, + FP64x64 { mag: 11068046444225730560, sign: false } + ] + .span() + ); + + + return SVMClassifierTrait::predict(ref classifier, X); + +} +>>> ([0, 1, 1], + [[0.728411, 0.271589], + [0.484705, 0.515295], + [0.274879, 0.725121]]) +``` \ No newline at end of file diff --git a/src/operators/matrix.cairo b/src/operators/matrix.cairo index 0d19a7a4d..755e13ce4 100644 --- a/src/operators/matrix.cairo +++ b/src/operators/matrix.cairo @@ -34,6 +34,46 @@ impl MutMatrixImpl< } } + /// Get the value at (row, col) + fn at(ref self: MutMatrix, row: usize, col: usize) -> T { + return match self.get(row, col) { + Option::Some(val) => val, + Option::None => NumberTrait::zero(), + }; + } + + /// Performs the product between a m x n `MutMatrix` and a n x 1 `NullableVec`. + /// Returns the resulta as a `NullableVec`. + fn matrix_vector_product<+Mul, +Add, +Div, +AddEq>( + ref self: MutMatrix, ref vec: NullableVec + ) -> NullableVec { + assert(self.cols == vec.len, 'wrong matrix shape for dot'); + let m = self.rows; + let n = self.cols; + + let mut result_vec = VecTrait::new(); + + let mut i = 0_usize; + loop { + if i == m { + break (); + } + let mut sum: T = NumberTrait::zero(); + let mut k = 0_usize; + loop { + if k == n { + break (); + } + sum += MutMatrixImpl::at(ref self, i, k) * VecTrait::at(ref vec, k); + k += 1; + }; + VecTrait::set(ref result_vec, i, sum); + + i += 1; + }; + return result_vec; + } + /// Set the value at (row, col) fn set(ref self: MutMatrix, row: usize, col: usize, value: T) { if row < self.rows && col < self.cols { @@ -341,3 +381,4 @@ impl MutMatrixImpl< result } } + diff --git a/src/operators/ml/svm.cairo b/src/operators/ml/svm.cairo index 93ab4515e..20e9793c4 100644 --- a/src/operators/ml/svm.cairo +++ b/src/operators/ml/svm.cairo @@ -1,2 +1,3 @@ mod core; mod svm_regressor; +mod svm_classifier; diff --git a/src/operators/ml/svm/svm_classifier.cairo b/src/operators/ml/svm/svm_classifier.cairo new file mode 100644 index 000000000..1c7f4dc2a --- /dev/null +++ b/src/operators/ml/svm/svm_classifier.cairo @@ -0,0 +1,1240 @@ +use core::array::ArrayTrait; +use orion::numbers::NumberTrait; +use orion::operators::tensor::{ + TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor +}; +use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; + +use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl}; +use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; + +use orion::numbers::{FP64x64, FP64x64Impl}; +use orion::operators::tensor::implementations::tensor_fp64x64::{FP64x64Tensor}; +use orion::operators::nn::{NNTrait, FP16x16NN, FP64x64NN}; +use orion::utils::get_row; + +use orion::operators::ml::svm::core::{kernel_dot, KERNEL_TYPE}; + + +#[derive(Copy, Drop, Destruct)] +struct SVMClassifier { + classlabels: Span, + coefficients: Span, + kernel_params: Span, + kernel_type: KERNEL_TYPE, + post_transform: POST_TRANSFORM, + prob_a: Span, + prob_b: Span, + rho: Span, + support_vectors: Span, + vectors_per_class: Option>, +} + + +#[derive(Copy, Drop)] +enum POST_TRANSFORM { + NONE, + SOFTMAX, + LOGISTIC, + SOFTMAXZERO, + PROBIT, +} + + +#[derive(Copy, Drop)] +enum MODE { + SVM_LINEAR, + SVM_SVC, +} + + +/// +/// predict - Returns the top class for each of N inputs. +trait SVMClassifierTrait { + /// # SVMClassifierTrait::predict + /// + /// ```rust + /// fn predict(ref self: SVMClassifier, X: Tensor) -> (Span, Tensor); + /// ``` + /// + /// Support Vector Machine classification. + /// + /// ## Args + /// + /// * `self`: SVMClassifier - A SVMClassifier object. + /// * `X`: Input 2D tensor. + /// + /// ## Returns + /// + /// * N Top class for each point + /// * The class score Matrix for each class, for each point. If prob_a and prob_b are provided they are probabilities for each class, otherwise they are raw scores. + /// + /// ## Type Constraints + /// + /// `SVMClassifier` and `X` must be fixed points + /// + /// ## Examples + /// + /// ```rust + /// fn example_svm_classifier_noprob_linear_sv_none() -> (Span, Tensor) { + /// let coefficients: Span = array![ + /// FP16x16 { mag: 50226, sign: false }, + /// FP16x16 { mag: 5711, sign: false }, + /// FP16x16 { mag: 7236, sign: false }, + /// FP16x16 { mag: 63175, sign: true } + /// ] + /// .span(); + /// let kernel_params: Span = array![ + /// FP16x16 { mag: 8025, sign: false }, + /// FP16x16 { mag: 0, sign: false }, + /// FP16x16 { mag: 196608, sign: false } + /// ] + /// .span(); + /// let kernel_type = KERNEL_TYPE::LINEAR; + /// let prob_a: Span = array![].span(); + /// let prob_b: Span = array![].span(); + /// let rho: Span = array![FP16x16 { mag: 146479, sign: false }].span(); + /// + /// let support_vectors: Span = array![ + /// FP16x16 { mag: 314572, sign: false }, + /// FP16x16 { mag: 222822, sign: false }, + /// FP16x16 { mag: 124518, sign: false }, + /// FP16x16 { mag: 327680, sign: false }, + /// FP16x16 { mag: 196608, sign: false }, + /// FP16x16 { mag: 104857, sign: false }, + /// FP16x16 { mag: 294912, sign: false }, + /// FP16x16 { mag: 150732, sign: false }, + /// FP16x16 { mag: 85196, sign: false }, + /// FP16x16 { mag: 334233, sign: false }, + /// FP16x16 { mag: 163840, sign: false }, + /// FP16x16 { mag: 196608, sign: false } + /// ] + /// .span(); + /// let classlabels: Span = array![0, 1].span(); + /// + /// let vectors_per_class = Option::Some(array![3, 1].span()); + /// + /// let post_transform = POST_TRANSFORM::NONE; + /// + /// let mut classifier: SVMClassifier = SVMClassifier { + /// classlabels, + /// coefficients, + /// kernel_params, + /// kernel_type, + /// post_transform, + /// prob_a, + /// prob_b, + /// rho, + /// support_vectors, + /// vectors_per_class, + /// }; + /// + /// let mut X: Tensor = TensorTrait::new( + /// array![3, 3].span(), + /// array![ + /// FP16x16 { mag: 65536, sign: true }, + /// FP16x16 { mag: 52428, sign: true }, + /// FP16x16 { mag: 39321, sign: true }, + /// FP16x16 { mag: 26214, sign: true }, + /// FP16x16 { mag: 13107, sign: true }, + /// FP16x16 { mag: 0, sign: false }, + /// FP16x16 { mag: 13107, sign: false }, + /// FP16x16 { mag: 26214, sign: false }, + /// FP16x16 { mag: 39321, sign: false }, + /// ] + /// .span() + /// ); + /// + /// return SVMClassifierTrait::predict(ref classifier, X); + /// + /// } + /// // >>> ([0, 0, 0], + /// // [[-2.662655, 2.662655], + /// // [-2.21481, 2.21481], + /// // [-1.766964, 1.766964]]) + /// + /// + /// fn example_svm_classifier_binary_softmax_fp64x64() -> (Span, Tensor) { + /// let coefficients: Span = array![ + /// FP64x64 { mag: 18446744073709551616, sign: false }, + /// FP64x64 { mag: 18446744073709551616, sign: false }, + /// FP64x64 { mag: 18446744073709551616, sign: false }, + /// FP64x64 { mag: 18446744073709551616, sign: false }, + /// FP64x64 { mag: 18446744073709551616, sign: true }, + /// FP64x64 { mag: 18446744073709551616, sign: true }, + /// FP64x64 { mag: 18446744073709551616, sign: true }, + /// FP64x64 { mag: 18446744073709551616, sign: true } + /// ] + /// .span(); + /// let kernel_params: Span = array![ + /// FP64x64 { mag: 7054933896252620800, sign: false }, + /// FP64x64 { mag: 0, sign: false }, + /// FP64x64 { mag: 55340232221128654848, sign: false } + /// ] + /// .span(); + /// let kernel_type = KERNEL_TYPE::RBF; + /// let prob_a: Span = array![FP64x64 { mag: 94799998099962986496, sign: true }].span(); + /// let prob_b: Span = array![FP64x64 { mag: 1180576833385529344, sign: false }].span(); + /// let rho: Span = array![FP64x64 { mag: 3082192501545631744, sign: false }].span(); + /// + /// let support_vectors: Span = array![ + /// FP64x64 { mag: 3528081300248330240, sign: false }, + /// FP64x64 { mag: 19594207602596118528, sign: true }, + /// FP64x64 { mag: 9235613999318433792, sign: false }, + /// FP64x64 { mag: 10869715877100519424, sign: true }, + /// FP64x64 { mag: 5897111318564962304, sign: true }, + /// FP64x64 { mag: 1816720038917308416, sign: false }, + /// FP64x64 { mag: 4564890528671334400, sign: false }, + /// FP64x64 { mag: 21278987070814027776, sign: true }, + /// FP64x64 { mag: 7581529597213147136, sign: false }, + /// FP64x64 { mag: 10953113834067329024, sign: true }, + /// FP64x64 { mag: 24318984989010034688, sign: true }, + /// FP64x64 { mag: 30296187483321270272, sign: true }, + /// FP64x64 { mag: 10305112258191032320, sign: false }, + /// FP64x64 { mag: 17005441559857987584, sign: true }, + /// FP64x64 { mag: 11555205301925838848, sign: false }, + /// FP64x64 { mag: 2962701975885447168, sign: true }, + /// FP64x64 { mag: 11741665981322231808, sign: true }, + /// FP64x64 { mag: 15376232508819505152, sign: false }, + /// FP64x64 { mag: 13908474645692022784, sign: false }, + /// FP64x64 { mag: 7323415394302033920, sign: true }, + /// FP64x64 { mag: 3284258824352956416, sign: true }, + /// FP64x64 { mag: 11374683084831064064, sign: true }, + /// FP64x64 { mag: 9087138148126818304, sign: false }, + /// FP64x64 { mag: 8247488946750095360, sign: false } + /// ] + /// .span(); + /// let classlabels: Span = array![0, 1].span(); + /// + /// let vectors_per_class = Option::Some(array![4, 4].span()); + /// let post_transform = POST_TRANSFORM::SOFTMAX; + /// + /// let mut classifier: SVMClassifier = SVMClassifier { + /// classlabels, + /// coefficients, + /// kernel_params, + /// kernel_type, + /// post_transform, + /// prob_a, + /// prob_b, + /// rho, + /// support_vectors, + /// vectors_per_class, + /// }; + /// + /// let mut X: Tensor = TensorTrait::new( + /// array![3, 3].span(), + /// array![ + /// FP64x64 { mag: 18446744073709551616, sign: true }, + /// FP64x64 { mag: 14757395258967642112, sign: true }, + /// FP64x64 { mag: 11068046444225730560, sign: true }, + /// FP64x64 { mag: 7378697629483821056, sign: true }, + /// FP64x64 { mag: 3689348814741910528, sign: true }, + /// FP64x64 { mag: 0, sign: false }, + /// FP64x64 { mag: 3689348814741910528, sign: false }, + /// FP64x64 { mag: 7378697629483821056, sign: false }, + /// FP64x64 { mag: 11068046444225730560, sign: false } + /// ] + /// .span() + /// ); + /// + /// + /// return SVMClassifierTrait::predict(ref classifier, X); + /// + /// } + /// >>> ([0, 1, 1], + /// [[0.728411, 0.271589], + /// [0.484705, 0.515295], + /// [0.274879, 0.725121]]) + /// ``` + fn predict(ref self: SVMClassifier, X: Tensor) -> (Span, Tensor); +} + + +impl SVMClassifierImpl< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +Into, + +PartialOrd, + +PartialEq, + +Add, + +TensorTrait, + +AddEq, + +Div, + +Mul, + +Neg, + +Sub, + +NNTrait, +> of SVMClassifierTrait { + fn predict(ref self: SVMClassifier, X: Tensor) -> (Span, Tensor) { + let mut vector_count_ = 0; + let class_count_ = max(self.classlabels.len(), 1); + let mut starting_vector_ = ArrayTrait::new(); + + let (vectors_per_class_, starting_vector_) = match self.vectors_per_class { + Option::Some(vectors_per_class) => { + let mut i = 0; + loop { + if i == vectors_per_class.len() { + break; + } + starting_vector_.append(vector_count_); + vector_count_ += *vectors_per_class.at(i); + i += 1; + }; + (vectors_per_class, starting_vector_.span()) + }, + Option::None => { (array![].span(), array![].span()) }, + }; + + let (mode, kernel_type_, sv, coefs) = if vector_count_ > 0 { + let mode = MODE::SVM_SVC; + let kernel_type_ = self.kernel_type; + let sv = TensorTrait::new( + array![vector_count_, self.support_vectors.len() / vector_count_].span(), + self.support_vectors + ); + let coefs = TensorTrait::new( + array![self.coefficients.len() / vector_count_, vector_count_].span(), + self.coefficients + ); + (mode, kernel_type_, sv, coefs) + } else { + let mode = MODE::SVM_LINEAR; + let kernel_type_ = KERNEL_TYPE::LINEAR; + let sv = TensorTrait::new( + array![self.support_vectors.len()].span(), self.support_vectors + ); + let coefs = TensorTrait::new( + array![class_count_, self.coefficients.len() / class_count_].span(), + self.coefficients + ); + (mode, kernel_type_, sv, coefs) + }; + + let weights_are_all_positive_ = (min(self.coefficients) >= NumberTrait::zero()); + + // SVM + let (res, votes) = match mode { + MODE::SVM_LINEAR => { + let mut res = ArrayTrait::new(); + let mut n = 0; + loop { + if n == *X.shape.at(0) { + break; + } + let mut x_n = get_row(@X, n); + let scores = run_linear(ref self, x_n, coefs, class_count_, kernel_type_); + let mut i = 0; + loop { + if i == scores.len() { + break; + } + res.append(*scores.at(i)); + i += 1; + }; + n += 1; + }; + + ( + TensorTrait::new(array![*X.shape.at(0), class_count_].span(), res.span()), + Option::None + ) + }, + MODE::SVM_SVC => { + let mut res = ArrayTrait::new(); + let mut votes = ArrayTrait::new(); + let mut n = 0; + loop { + if n == *X.shape.at(0) { + break; + } + let mut x_n = get_row(@X, n); + let (scores, mut vote) = run_svm( + ref self, + x_n, + sv, + vector_count_, + kernel_type_, + class_count_, + starting_vector_, + coefs, + vectors_per_class_ + ); + let mut i = 0; + loop { + if i == scores.len() { + break; + } + res.append(*scores.at(i)); + i += 1; + }; + let mut i = 0; + loop { + if i == vote.len() { + break; + } + votes.append(vote.at(i)); + i += 1; + }; + n += 1; + }; + + ( + TensorTrait::new( + array![*X.shape.at(0), class_count_ * (class_count_ - 1) / 2].span(), + res.span() + ), + Option::Some( + TensorTrait::new(array![*X.shape.at(0), class_count_].span(), votes.span()) + ) + ) + }, + }; + + // Proba + let (scores, has_proba) = match mode { + MODE::SVM_LINEAR => { (res, false) }, + MODE::SVM_SVC => { + let (scores, has_proba) = if self.prob_a.len() > 0 { + let mut scores = ArrayTrait::new(); + let mut n = 0; + loop { + if n == *res.shape.at(0) { + break; + } + let res_n = get_row(@res, n); + let mut s = probablities(ref self, res_n, class_count_); + + let mut i = 0; + loop { + if i == s.len() { + break; + } + scores.append(s.at(i)); + i += 1; + }; + + n += 1; + }; + ( + TensorTrait::new( + array![*res.shape.at(0), scores.len() / *res.shape.at(0)].span(), + scores.span() + ), + true + ) + } else { + (res, false) + }; + (scores, has_proba) + }, + }; + + // Finalization + let mut labels = ArrayTrait::new(); + let mut final_scores = ArrayTrait::new(); + + let mut n = 0; + loop { + if n == *scores.shape.at(0) { + break; + } + let mut scores_n = get_row(@scores, n); + match votes { + Option::Some(votes) => { + let mut votes_n = get_row(@votes, n); + let (label, new_scores) = compute_final_scores( + ref self, + votes_n, + scores_n, + weights_are_all_positive_, + has_proba, + self.classlabels + ); + let mut i = 0; + loop { + if i == new_scores.data.len() { + break; + } + final_scores.append(*new_scores.data.at(i)); + i += 1; + }; + labels.append(label); + }, + Option::None => { + let (label, new_scores) = compute_final_scores( + ref self, + array![].span(), + scores_n, + weights_are_all_positive_, + has_proba, + self.classlabels + ); + let mut i = 0; + loop { + if i == new_scores.data.len() { + break; + } + final_scores.append(*new_scores.data.at(i)); + i += 1; + }; + labels.append(label); + }, + } + n += 1; + }; + let labels = labels.span(); + + // Labels + if self.classlabels.len() > 0 { + let mut class_labels = ArrayTrait::new(); + let mut i = 0; + loop { + if i == labels.len() { + break; + } + class_labels.append(*self.classlabels.at(*labels.at(i))); + i += 1; + }; + return ( + class_labels.span(), + TensorTrait::new( + array![*X.shape.at(0), final_scores.len() / *X.shape.at(0)].span(), + final_scores.span() + ) + ); + } + return ( + labels, + TensorTrait::new( + array![*X.shape.at(0), final_scores.len() / *X.shape.at(0)].span(), + final_scores.span() + ) + ); + } +} + + +fn run_svm< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +Add, + +TensorTrait, + +AddEq, + +Mul, + +Neg, + +Sub, + +PartialOrd, +>( + ref self: SVMClassifier, + X: Span, + sv: Tensor, + vector_count_: usize, + kernel: KERNEL_TYPE, + class_count_: usize, + starting_vector_: Span, + coefs: Tensor, + vectors_per_class_: Span +) -> (Array, NullableVec) { + let mut evals = 0; + let mut kernels = ArrayTrait::new(); + + let mut j = 0; + loop { + if j == vector_count_ { + break; + } + let sv_j = get_row(@sv, j); + kernels.append(kernel_dot(self.kernel_params, X, sv_j, kernel)); + j += 1; + }; + + let kernels = kernels.span(); + + let mut scores = ArrayTrait::new(); + + let mut votes = VecTrait::new(); + VecTrait::set(ref votes, class_count_ - 1, NumberTrait::zero()); + + let mut i = 0; + loop { + if i == class_count_ { + break; + } + + let si_i = *starting_vector_.at(i); + let class_i_sc = *vectors_per_class_.at(i); + + let mut j = i + 1; + loop { + if j == class_count_ { + break; + } + let si_j = *starting_vector_.at(j); + let class_j_sc = *vectors_per_class_.at(j); + + let s1 = dot_start_end( + coefs.data, + kernels, + (j - 1) * *coefs.shape.at(0) + si_i, + (j - 1) * *coefs.shape.at(0) + si_i + class_i_sc, + si_i, + si_i + class_i_sc + ); + + let s2 = dot_start_end( + coefs.data, + kernels, + i * *coefs.shape.at(0) + si_j, + i * *coefs.shape.at(0) + si_j + class_j_sc, + si_j, + si_j + class_j_sc + ); + + let s = *self.rho.at(evals) + s1 + s2; + scores.append(s); + + if s > NumberTrait::zero() { + VecTrait::set(ref votes, i, VecTrait::at(ref votes, i) + NumberTrait::one()); + } else { + VecTrait::set(ref votes, j, VecTrait::at(ref votes, j) + NumberTrait::one()); + } + evals += 1; + j += 1; + }; + i += 1; + }; + return (scores, votes); +} + +fn run_linear< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +Add, + +TensorTrait, + +AddEq, + +Mul, + +Neg, + +Sub, +>( + ref self: SVMClassifier, + X: Span, + coefs: Tensor, + class_count_: usize, + kernel: KERNEL_TYPE +) -> Array { + let mut scores = ArrayTrait::new(); + + let mut j = 0; + loop { + if j == class_count_ { + break; + } + + let coefs_j = get_row(@coefs, j); + + let d = kernel_dot(self.kernel_params, X, coefs_j, kernel); + + let score = *self.rho.at(0) + d; + + scores.append(score); + j += 1; + }; + return scores; +} + + +fn compute_final_scores< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +NNTrait, + +Into, + +Add, + +TensorTrait, + +AddEq, + +Mul, + +Neg, + +Sub, + +Div, + +PartialOrd, +>( + ref self: SVMClassifier, + votes: Span, + scores: Span, + weights_are_all_positive_: bool, + has_proba: bool, + classlabels: Span +) -> (usize, Tensor) { + let mut max_weight = 0; + + let (max_class, max_weight) = if votes.len() > 0 { + let max_class = argmax_span(votes); + let max_weight = *votes.at(max_class); + (max_class, max_weight) + } else { + let max_class = argmax_span(scores); + let max_weight = *scores.at(max_class); + (max_class, max_weight) + }; + + let (label, write_additional_scores) = if self.rho.len() == 1 { + let (label, write_additional_scores) = set_score_svm( + max_weight, max_class, weights_are_all_positive_, has_proba, classlabels, 1, 0 + ); + (label, write_additional_scores) + } else if classlabels.len() > 0 { + let label = *classlabels.at(max_class); + (label, 4) + } else { + (max_class, 4) + }; + + let new_scores = write_scores( + scores.len(), + TensorTrait::new(array![scores.len()].span(), scores), + self.post_transform, + write_additional_scores + ); + + return (label, new_scores); +} + +fn write_scores< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +TensorTrait, + +PartialOrd, + +NNTrait, + +Neg, + +Sub, +>( + n_classes: usize, scores: Tensor, post_transform: POST_TRANSFORM, add_second_class: usize +) -> Tensor { + let mut write_additional_scores = 0; + + let new_scores = if n_classes >= 2 { + let new_scores = match post_transform { + POST_TRANSFORM::NONE => scores, + POST_TRANSFORM::SOFTMAX => NNTrait::softmax(@scores, 0), + POST_TRANSFORM::LOGISTIC => NNTrait::sigmoid(@scores), + POST_TRANSFORM::SOFTMAXZERO => NNTrait::softmax_zero(@scores, 0), + POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'), + }; + new_scores + } else { //if n_classes == 1 + let new_scores = match post_transform { + POST_TRANSFORM::NONE => { + let scores = if add_second_class == 0 || add_second_class == 1 { + TensorTrait::new( + array![2].span(), + array![NumberTrait::one() - *scores.data.at(0), *scores.data.at(0)].span() + ) + } else if add_second_class == 2 || add_second_class == 3 { + TensorTrait::new( + array![2].span(), array![-*scores.data.at(0), *scores.data.at(0)].span() + ) + } else { + TensorTrait::new(array![1].span(), array![*scores.data.at(0)].span()) + }; + scores + }, + POST_TRANSFORM::SOFTMAX => { + let scores = if add_second_class == 0 || add_second_class == 1 { + TensorTrait::new( + array![2].span(), + array![NumberTrait::one() - *scores.data.at(0), *scores.data.at(0)].span() + ) + } else if add_second_class == 2 || add_second_class == 3 { + // + NNTrait::softmax( + @TensorTrait::new( + array![2].span(), array![-*scores.data.at(0), *scores.data.at(0)].span() + ), + 0 + ) + } else { + TensorTrait::new(array![1].span(), array![*scores.data.at(0)].span()) + }; + scores + }, + POST_TRANSFORM::LOGISTIC => { + let scores = if add_second_class == 0 || add_second_class == 1 { + TensorTrait::new( + array![2].span(), + array![NumberTrait::one() - *scores.data.at(0), *scores.data.at(0)].span() + ) + } else if add_second_class == 2 || add_second_class == 3 { + // + NNTrait::sigmoid( + @TensorTrait::new( + array![2].span(), array![-*scores.data.at(0), *scores.data.at(0)].span() + ) + ) + } else { + TensorTrait::new(array![1].span(), array![*scores.data.at(0)].span()) + }; + scores + }, + POST_TRANSFORM::SOFTMAXZERO => { + let scores = if add_second_class == 0 || add_second_class == 1 { + TensorTrait::new( + array![2].span(), + array![NumberTrait::one() - *scores.data.at(0), *scores.data.at(0)].span() + ) + } else if add_second_class == 2 || add_second_class == 3 { + // + NNTrait::softmax_zero( + @TensorTrait::new( + array![2].span(), array![-*scores.data.at(0), *scores.data.at(0)].span() + ), + 0 + ) + } else { + TensorTrait::new(array![1].span(), array![*scores.data.at(0)].span()) + }; + scores + }, + POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not applicable here.'), + }; + new_scores + }; + return new_scores; +} + +fn set_score_svm< + T, MAG, +Drop, +Copy, +NumberTrait, +TensorTrait, +PartialOrd, +>( + max_weight: T, + maxclass: usize, + weights_are_all_positive_: bool, + has_proba: bool, + classlabels: Span, + posclass: usize, + negclass: usize +) -> (usize, usize) { + let mut write_additional_scores = 0; + + if classlabels.len() == 2 { + write_additional_scores = 2; + if !has_proba { + if weights_are_all_positive_ && max_weight >= NumberTrait::half() { + return (*classlabels.at(1), write_additional_scores); + }; + }; + return (*classlabels.at(maxclass), write_additional_scores); + } + if max_weight >= NumberTrait::zero() { + return (posclass, write_additional_scores); + }; + return (negclass, write_additional_scores); +} + +fn argmax_span, +Copy, +PartialOrd,>(span: Span) -> usize { + let mut max = 0; + let mut i = 0; + loop { + if i == span.len() { + break; + } + if *span.at(i) > *span.at(max) { + max = i; + } + i += 1; + }; + return max; +} + + +fn probablities< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +Into, + +Add, + +TensorTrait, + +AddEq, + +Mul, + +Neg, + +Sub, + +Div, + +PartialOrd, +>( + ref self: SVMClassifier, scores: Span, class_count_: usize +) -> NullableVec { + let mut probsp2: MutMatrix = MutMatrixImpl::new(class_count_, class_count_); + let mut index = 0; + let mut i = 0; + loop { + if i == class_count_ { + break; + } + let mut j = i + 1; + loop { + if j == class_count_ { + break; + } + let val1 = sigmoid_probability( + *scores.at(index), *self.prob_a.at(index), *self.prob_b.at(index) + ); + + let mut val2 = NumberTrait::max(val1, NumberTrait::zero()); // ONNX : max(val1, 1.0e-7) + let mut val2 = NumberTrait::min( + val1, NumberTrait::one() + ); // ONNX : min(val2, (1 - 1.0e-7)) + probsp2.set(i, j, val2); + probsp2.set(j, i, NumberTrait::one() - val2); + + j += 1; + index += 1; + }; + i += 1; + }; + return multiclass_probability(class_count_, ref probsp2); +} + +fn multiclass_probability< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +PartialOrd, + +Add, + +Mul, + +Div, + +Sub, + +Neg, + +AddEq, + +Into, +>( + k: usize, ref R: MutMatrix +) -> NullableVec { + let max_iter = max(100, k); + let k_fp = NumberTrait::::new_unscaled(k.into(), false); + + let mut Q: MutMatrix = MutMatrixImpl::new(k, k); + MutMatrixImpl::set(ref Q, k - 1, k - 1, NumberTrait::zero()); + + let mut P = VecTrait::new(); + VecTrait::set(ref P, k - 1, NumberTrait::zero()); + + let a: usize = 100; + let eps = (NumberTrait::half() / NumberTrait::new_unscaled(a.into(), false)) / k_fp; + let mut t = 0; + + loop { + if t == k { + break; + } + VecTrait::set(ref P, t, NumberTrait::one() / k_fp); + + let mut i = 0; + let mut acc1 = NumberTrait::zero(); + loop { + if i == t { + break; + } + let r_i = MutMatrixImpl::at(ref R, i, t); + acc1 += r_i * r_i; + i += 1; + }; + MutMatrixImpl::set(ref Q, t, t, acc1); + + let mut i = 0; + loop { + if i == t { + break; + } + MutMatrixImpl::set(ref Q, t, i, MutMatrixImpl::at(ref Q, i, t)); + i += 1; + }; + + let mut i = t + 1; + let mut acc2 = NumberTrait::zero(); + loop { + if i == k { + break; + } + let r_i = MutMatrixImpl::at(ref R, i, t); + acc2 += r_i * r_i; + i += 1; + }; + MutMatrixImpl::set(ref Q, t, t, acc1 + acc2); + + let mut i = t + 1; + let mut acc = NumberTrait::zero(); + loop { + if i == k { + break; + } + acc += -MutMatrixImpl::at(ref R, i, t) * MutMatrixImpl::at(ref R, t, i); + i += 1; + }; + + let mut i = t + 1; + loop { + if i == k { + break; + } + MutMatrixImpl::set(ref Q, t, i, acc); + i += 1; + }; + t += 1; + }; + + let mut i = 0; + loop { + if i == max_iter { + break; + } + + let mut Qp = MutMatrixImpl::matrix_vector_product(ref Q, ref P); + let mut pQp = dot(ref P, ref Qp); + + let mut max_error = NumberTrait::zero(); + let mut t = 0; + loop { + if t == k { + break; + } + let error = NumberTrait::abs(Qp.at(t) - pQp); + if error > max_error { + max_error = error; + } + t += 1; + }; + + if max_error < eps { + break; + } + + let mut t = 0; + loop { + if t == k { + break; + } + + let diff = (-VecTrait::at(ref Qp, t) + pQp) / MutMatrixImpl::at(ref Q, t, t); + VecTrait::set(ref P, t, VecTrait::at(ref P, t) + diff); + + pQp = + (pQp + + diff + * (diff * MutMatrixImpl::at(ref Q, t, t) + + (NumberTrait::one() + NumberTrait::one()) * VecTrait::at(ref Qp, t))) + / ((NumberTrait::one() + diff) * (NumberTrait::one() + diff)); + + div_element_wise(ref P, NumberTrait::one() + diff); + + Qp_computation(ref Q, ref Qp, diff, t); + + t += 1; + }; + i += 1; + }; + return P; +} + +/// Computation of the matrix Qb in the multiclass_probability computation +/// +/// Qp[:] = (Qp + diff * Q[t, :]) / (1 + diff) +/// +fn Qp_computation< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +PartialOrd, + +Mul, + +Add, + +Div, + +AddEq +>( + ref Q: MutMatrix, ref Qp: NullableVec, diff: T, t: usize +) { + let m = Qp.len; + + let mut i = 0_usize; + loop { + if i == m { + break (); + } + let elem = (VecTrait::at(ref Qp, i) + diff * MutMatrixImpl::at(ref Q, t, i)) + / (NumberTrait::one() + diff); + + VecTrait::set(ref Qp, i, elem); + i += 1; + }; +} + + +fn sigmoid_probability< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +PartialOrd, + +Add, + +Mul, + +Div, + +Sub, + +Neg, +>( + score: T, prob_a: T, prob_b: T +) -> T { + let val = score * prob_a + prob_b; + + let mut v = NumberTrait::one() + / (NumberTrait::one() + NumberTrait::exp(-NumberTrait::abs(val))); + + v = if val < NumberTrait::zero() { + NumberTrait::one() - v + } else { + v + }; + + return NumberTrait::one() - v; +} + + +fn max(a: usize, b: usize) -> usize { + if a > b { + return a; + }; + b +} + +fn min, +Drop, +PartialOrd,>(a: Span) -> T { + let mut min = *a.at(0); + + let mut i = 0; + loop { + if i == a.len() { + break; + } + if min > *a.at(i) { + min = *a.at(i); + } + i += 1; + }; + return min; +} + + +fn dot_start_end< + T, MAG, +Drop, +Copy, +NumberTrait, +Add, +TensorTrait, +AddEq, +Mul, +>( + pA: Span, pB: Span, a_start: usize, a_end: usize, b_start: usize, b_end: usize +) -> T { + let mut sum = NumberTrait::zero(); + let mut index_a = a_start; + let mut index_b = b_start; + loop { + if index_a == a_end || index_b == b_end { + break; + } + sum = sum + *pA.at(index_a) * *pB.at(index_b); + index_a += 1; + index_b += 1; + }; + + return sum; +} + + +fn sv_dot< + T, MAG, +Drop, +Copy, +NumberTrait, +Add, +TensorTrait, +AddEq, +Mul, +>( + pA: Span, pB: Span +) -> T { + let mut i = 0; + let mut sum = NumberTrait::zero(); + loop { + if i == pA.len() { + break; + } + sum = sum + *pA.at(i) * *pB.at(i); + i += 1; + }; + + return sum; +} + +fn squared_diff< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +Add, + +TensorTrait, + +AddEq, + +Mul, + +Sub, +>( + pA: Span, pB: Span +) -> T { + let mut i = 0; + let mut sum = NumberTrait::zero(); + loop { + if i == pA.len() { + break; + } + sum = sum + (*pA.at(i) - *pB.at(i)).pow(NumberTrait::one() + NumberTrait::one()); + i += 1; + }; + return sum; +} + +fn dot, +Copy, +NumberTrait, +Mul, +AddEq, +Add, +Div>( + ref self: NullableVec, ref vec: NullableVec +) -> T { + assert(self.len == vec.len, 'wrong vec len for dot prod'); + let n = self.len; + let mut sum: T = NumberTrait::zero(); + let mut i = 0_usize; + loop { + if i == n { + break (); + } + sum += self.at(i) * vec.at(i); + i += 1; + }; + return sum; +} + +fn div_element_wise, +Add, +Div, +NumberTrait, +Drop, +Copy>( + ref self: NullableVec, elem: T +) { + let m = self.len; + + let mut i = 0_usize; + loop { + if i == m { + break (); + } + VecTrait::set(ref self, i, VecTrait::at(ref self, i) / elem); + i += 1; + }; +} + diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index a8121fc31..63d736771 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -671,7 +671,7 @@ impl FP64x64TensorPartialOrd of PartialOrd> { // Internals -const PRECISION: u128 = 75497; // 0.009 +const PRECISION: u128 = 1660000000000000; // 9e-05 fn relative_eq(lhs: @FP64x64, rhs: @FP64x64) -> bool { let diff = *lhs - *rhs; diff --git a/tests/ml.cairo b/tests/ml.cairo index 4a5abf9a8..3f071f13d 100644 --- a/tests/ml.cairo +++ b/tests/ml.cairo @@ -3,3 +3,5 @@ mod tree_ensemble_regressor; mod linear_regressor_test; mod linear_classifier_test; mod svm_regressor_test; +mod svm_classifier_test; + diff --git a/tests/ml/svm_classifier_test.cairo b/tests/ml/svm_classifier_test.cairo new file mode 100644 index 000000000..ee50c7931 --- /dev/null +++ b/tests/ml/svm_classifier_test.cairo @@ -0,0 +1,875 @@ +use orion::numbers::FP16x16; +use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; + +use orion::numbers::FP64x64; +use orion::operators::tensor::implementations::tensor_fp64x64::{ + FP64x64Tensor, FP64x64TensorPartialEq +}; + +use orion::operators::ml::svm::svm_classifier::{SVMClassifierTrait, POST_TRANSFORM, SVMClassifier}; +use orion::operators::ml::svm::core::{KERNEL_TYPE}; + + +#[test] +#[available_gas(200000000000)] +fn test_svm_classifier_noprob_linear_sv_none() { + let post_transform = POST_TRANSFORM::NONE; + let (mut classifier, X) = svm_classifier_binary_noprob_linear_sv(post_transform); + + let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X); + + // ASSERT LABELS + assert(*labels[0] == 0, 'labels[0]'); + assert(*labels[1] == 0, 'labels[1]'); + assert(*labels[2] == 0, 'labels[2]'); + assert(labels.len() == 3, 'len(labels)'); + + // ASSERT SCORES + let mut expected_scores: Tensor = TensorTrait::new( + array![3, 2].span(), + array![ + FP16x16 { mag: 174499, sign: true }, + FP16x16 { mag: 174499, sign: false }, + FP16x16 { mag: 145149, sign: true }, + FP16x16 { mag: 145149, sign: false }, + FP16x16 { mag: 115799, sign: true }, + FP16x16 { mag: 115799, sign: false } + ] + .span() + ); + + assert_eq(scores, expected_scores); +} + + +#[test] +#[available_gas(200000000000)] +fn test_svm_classifier_noprob_linear_sv_logistic() { + let post_transform = POST_TRANSFORM::LOGISTIC; + let (mut classifier, X) = svm_classifier_binary_noprob_linear_sv(post_transform); + + let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X); + + // ASSERT LABELS + assert(*labels[0] == 0, 'labels[0]'); + assert(*labels[1] == 0, 'labels[1]'); + assert(*labels[2] == 0, 'labels[2]'); + assert(labels.len() == 3, 'len(labels)'); + + // ASSERT SCORES + let mut expected_scores: Tensor = TensorTrait::new( + array![3, 2].span(), + array![ + FP16x16 { mag: 4273, sign: false }, + FP16x16 { mag: 61262, sign: false }, + FP16x16 { mag: 6450, sign: false }, + FP16x16 { mag: 59085, sign: false }, + FP16x16 { mag: 9563, sign: false }, + FP16x16 { mag: 55972, sign: false } + ] + .span() + ); + + assert_eq(scores, expected_scores); +} + +#[test] +#[available_gas(200000000000)] +fn test_svm_classifier_noprob_linear_sv_softmax() { + let post_transform = POST_TRANSFORM::SOFTMAX; + let (mut classifier, X) = svm_classifier_binary_noprob_linear_sv(post_transform); + + let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X); + + // ASSERT LABELS + assert(*labels[0] == 0, 'labels[0]'); + assert(*labels[1] == 0, 'labels[1]'); + assert(*labels[2] == 0, 'labels[2]'); + assert(labels.len() == 3, 'len(labels)'); + + // ASSERT SCORES + let mut expected_scores: Tensor = TensorTrait::new( + array![3, 2].span(), + array![ + FP16x16 { mag: 317, sign: false }, + FP16x16 { mag: 65218, sign: false }, + FP16x16 { mag: 771, sign: false }, + FP16x16 { mag: 64764, sign: false }, + FP16x16 { mag: 1858, sign: false }, + FP16x16 { mag: 63677, sign: false } + ] + .span() + ); + + assert_eq(scores, expected_scores); +} + +#[test] +#[available_gas(200000000000)] +fn test_svm_classifier_noprob_linear_sv_softmax_zero() { + let post_transform = POST_TRANSFORM::SOFTMAXZERO; + let (mut classifier, X) = svm_classifier_binary_noprob_linear_sv(post_transform); + + let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X); + + // ASSERT LABELS + assert(*labels[0] == 0, 'labels[0]'); + assert(*labels[1] == 0, 'labels[1]'); + assert(*labels[2] == 0, 'labels[2]'); + assert(labels.len() == 3, 'len(labels)'); + + // ASSERT SCORES + let mut expected_scores: Tensor = TensorTrait::new( + array![3, 2].span(), + array![ + FP16x16 { mag: 317, sign: false }, + FP16x16 { mag: 65218, sign: false }, + FP16x16 { mag: 771, sign: false }, + FP16x16 { mag: 64764, sign: false }, + FP16x16 { mag: 1858, sign: false }, + FP16x16 { mag: 63677, sign: false } + ] + .span() + ); + + assert_eq(scores, expected_scores); +} + + +#[test] +#[available_gas(200000000000)] +fn test_svm_classifier_noprob_linear_none() { + let post_transform = POST_TRANSFORM::NONE; + let (mut classifier, X) = svm_classifier_helper_noprob_linear(post_transform); + + let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X); + + // ASSERT LABELS + assert(*labels[0] == 2, 'labels[0]'); + assert(*labels[1] == 3, 'labels[1]'); + assert(*labels[2] == 0, 'labels[2]'); + assert(labels.len() == 3, 'len(labels)'); + + // ASSERT SCORES + let mut expected_scores: Tensor = TensorTrait::new( + array![3, 4].span(), + array![ + FP16x16 { mag: 7738, sign: true }, + FP16x16 { mag: 29929, sign: true }, + FP16x16 { mag: 27248, sign: false }, + FP16x16 { mag: 21922, sign: false }, + FP16x16 { mag: 4021, sign: true }, + FP16x16 { mag: 15167, sign: true }, + FP16x16 { mag: 4843, sign: false }, + FP16x16 { mag: 5979, sign: false }, + FP16x16 { mag: 304, sign: true }, + FP16x16 { mag: 406, sign: true }, + FP16x16 { mag: 17562, sign: true }, + FP16x16 { mag: 9962, sign: true }, + ] + .span() + ); + + assert_eq(scores, expected_scores); +} + + +#[test] +#[available_gas(200000000000)] +fn test_svm_classifier_noprob_linear_logistic() { + let post_transform = POST_TRANSFORM::LOGISTIC; + let (mut classifier, X) = svm_classifier_helper_noprob_linear(post_transform); + + let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X); + + // ASSERT LABELS + assert(*labels[0] == 2, 'labels[0]'); + assert(*labels[1] == 3, 'labels[1]'); + assert(*labels[2] == 0, 'labels[2]'); + assert(labels.len() == 3, 'len(labels)'); + + // ASSERT SCORES + let mut expected_scores: Tensor = TensorTrait::new( + array![3, 4].span(), + array![ + FP16x16 { mag: 30835, sign: false }, + FP16x16 { mag: 25413, sign: false }, + FP16x16 { mag: 39483, sign: false }, + FP16x16 { mag: 38197, sign: false }, + FP16x16 { mag: 31762, sign: false }, + FP16x16 { mag: 28992, sign: false }, + FP16x16 { mag: 33978, sign: false }, + FP16x16 { mag: 34261, sign: false }, + FP16x16 { mag: 32691, sign: false }, + FP16x16 { mag: 32666, sign: false }, + FP16x16 { mag: 28403, sign: false }, + FP16x16 { mag: 30282, sign: false } + ] + .span() + ); + + assert_eq(scores, expected_scores); +} + + +#[test] +#[available_gas(200000000000)] +fn test_svm_classifier_noprob_linear_softmax() { + let post_transform = POST_TRANSFORM::SOFTMAX; + let (mut classifier, X) = svm_classifier_helper_noprob_linear(post_transform); + + let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X); + + // ASSERT LABELS + assert(*labels[0] == 2, 'labels[0]'); + assert(*labels[1] == 3, 'labels[1]'); + assert(*labels[2] == 0, 'labels[2]'); + assert(labels.len() == 3, 'len(labels)'); + + // ASSERT SCORES + let mut expected_scores: Tensor = TensorTrait::new( + array![3, 4].span(), + array![ + FP16x16 { mag: 13131, sign: false }, + FP16x16 { mag: 9359, sign: false }, + FP16x16 { mag: 22396, sign: false }, + FP16x16 { mag: 20648, sign: false }, + FP16x16 { mag: 15779, sign: false }, + FP16x16 { mag: 13311, sign: false }, + FP16x16 { mag: 18064, sign: false }, + FP16x16 { mag: 18380, sign: false }, + FP16x16 { mag: 18054, sign: false }, + FP16x16 { mag: 18026, sign: false }, + FP16x16 { mag: 13874, sign: false }, + FP16x16 { mag: 15580, sign: false }, + ] + .span() + ); + + assert_eq(scores, expected_scores); +} + +#[test] +#[available_gas(200000000000)] +fn test_svm_classifier_noprob_linear_softmax_zero() { + let post_transform = POST_TRANSFORM::SOFTMAXZERO; + let (mut classifier, X) = svm_classifier_helper_noprob_linear(post_transform); + + let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X); + + // ASSERT LABELS + assert(*labels[0] == 2, 'labels[0]'); + assert(*labels[1] == 3, 'labels[1]'); + assert(*labels[2] == 0, 'labels[2]'); + assert(labels.len() == 3, 'len(labels)'); + + // ASSERT SCORES + let mut expected_scores: Tensor = TensorTrait::new( + array![3, 4].span(), + array![ + FP16x16 { mag: 13131, sign: false }, + FP16x16 { mag: 9359, sign: false }, + FP16x16 { mag: 22396, sign: false }, + FP16x16 { mag: 20648, sign: false }, + FP16x16 { mag: 15779, sign: false }, + FP16x16 { mag: 13311, sign: false }, + FP16x16 { mag: 18064, sign: false }, + FP16x16 { mag: 18380, sign: false }, + FP16x16 { mag: 18054, sign: false }, + FP16x16 { mag: 18026, sign: false }, + FP16x16 { mag: 13874, sign: false }, + FP16x16 { mag: 15580, sign: false }, + ] + .span() + ); + + assert_eq(scores, expected_scores); +} + +#[test] +#[available_gas(200000000000)] +fn test_svm_classifier_linear_none() { + let post_transform = POST_TRANSFORM::NONE; + let (mut classifier, X) = svm_classifier_helper_linear(post_transform); + + let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X); + + // ASSERT LABELS + assert(*labels[0] == 2, 'labels[0]'); + assert(*labels[1] == 3, 'labels[1]'); + assert(*labels[2] == 0, 'labels[2]'); + assert(labels.len() == 3, 'len(labels)'); + + // ASSERT SCORES + let mut expected_scores: Tensor = TensorTrait::new( + array![3, 4].span(), + array![ + FP16x16 { mag: 7738, sign: true }, + FP16x16 { mag: 29929, sign: true }, + FP16x16 { mag: 27248, sign: false }, + FP16x16 { mag: 21922, sign: false }, + FP16x16 { mag: 4021, sign: true }, + FP16x16 { mag: 15167, sign: true }, + FP16x16 { mag: 4843, sign: false }, + FP16x16 { mag: 5979, sign: false }, + FP16x16 { mag: 304, sign: true }, + FP16x16 { mag: 406, sign: true }, + FP16x16 { mag: 17562, sign: true }, + FP16x16 { mag: 9962, sign: true }, + ] + .span() + ); + + assert_eq(scores, expected_scores); +} + +#[test] +#[available_gas(200000000000)] +fn test_svm_classifier_linear_logistic() { + let post_transform = POST_TRANSFORM::LOGISTIC; + let (mut classifier, X) = svm_classifier_helper_linear(post_transform); + + let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X); + + // ASSERT LABELS + assert(*labels[0] == 2, 'labels[0]'); + assert(*labels[1] == 3, 'labels[1]'); + assert(*labels[2] == 0, 'labels[2]'); + assert(labels.len() == 3, 'len(labels)'); + + // ASSERT SCORES + let mut expected_scores: Tensor = TensorTrait::new( + array![3, 4].span(), + array![ + FP16x16 { mag: 30835, sign: false }, + FP16x16 { mag: 25413, sign: false }, + FP16x16 { mag: 39483, sign: false }, + FP16x16 { mag: 38197, sign: false }, + FP16x16 { mag: 31762, sign: false }, + FP16x16 { mag: 28992, sign: false }, + FP16x16 { mag: 33978, sign: false }, + FP16x16 { mag: 34261, sign: false }, + FP16x16 { mag: 32691, sign: false }, + FP16x16 { mag: 32666, sign: false }, + FP16x16 { mag: 28403, sign: false }, + FP16x16 { mag: 30282, sign: false } + ] + .span() + ); + + assert_eq(scores, expected_scores); +} + +#[test] +#[available_gas(200000000000)] +fn test_svm_classifier_linear_softmax() { + let post_transform = POST_TRANSFORM::SOFTMAX; + let (mut classifier, X) = svm_classifier_helper_linear(post_transform); + + let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X); + + // ASSERT LABELS + assert(*labels[0] == 2, 'labels[0]'); + assert(*labels[1] == 3, 'labels[1]'); + assert(*labels[2] == 0, 'labels[2]'); + assert(labels.len() == 3, 'len(labels)'); + + // ASSERT SCORES + let mut expected_scores: Tensor = TensorTrait::new( + array![3, 4].span(), + array![ + FP16x16 { mag: 13131, sign: false }, + FP16x16 { mag: 9359, sign: false }, + FP16x16 { mag: 22396, sign: false }, + FP16x16 { mag: 20648, sign: false }, + FP16x16 { mag: 15779, sign: false }, + FP16x16 { mag: 13311, sign: false }, + FP16x16 { mag: 18064, sign: false }, + FP16x16 { mag: 18380, sign: false }, + FP16x16 { mag: 18054, sign: false }, + FP16x16 { mag: 18026, sign: false }, + FP16x16 { mag: 13874, sign: false }, + FP16x16 { mag: 15580, sign: false }, + ] + .span() + ); + + assert_eq(scores, expected_scores); +} + + +#[test] +#[available_gas(200000000000)] +fn test_svm_classifier_linear_softmax_zero() { + let post_transform = POST_TRANSFORM::SOFTMAXZERO; + let (mut classifier, X) = svm_classifier_helper_linear(post_transform); + + let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X); + + // ASSERT LABELS + assert(*labels[0] == 2, 'labels[0]'); + assert(*labels[1] == 3, 'labels[1]'); + assert(*labels[2] == 0, 'labels[2]'); + assert(labels.len() == 3, 'len(labels)'); + + // ASSERT SCORES + let mut expected_scores: Tensor = TensorTrait::new( + array![3, 4].span(), + array![ + FP16x16 { mag: 13131, sign: false }, + FP16x16 { mag: 9359, sign: false }, + FP16x16 { mag: 22396, sign: false }, + FP16x16 { mag: 20648, sign: false }, + FP16x16 { mag: 15779, sign: false }, + FP16x16 { mag: 13311, sign: false }, + FP16x16 { mag: 18064, sign: false }, + FP16x16 { mag: 18380, sign: false }, + FP16x16 { mag: 18054, sign: false }, + FP16x16 { mag: 18026, sign: false }, + FP16x16 { mag: 13874, sign: false }, + FP16x16 { mag: 15580, sign: false }, + ] + .span() + ); + + assert_eq(scores, expected_scores); +} + + +#[test] +#[available_gas(200000000000)] +fn test_svm_classifier_binary_none_fp64x64() { + let post_transform = POST_TRANSFORM::NONE; + let (mut classifier, X) = svm_classifier_helper_fp64x64(post_transform); + + let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X); + + // ASSERT LABELS + assert(*labels[0] == 0, 'labels[0]'); + assert(*labels[1] == 1, 'labels[1]'); + assert(*labels[2] == 1, 'labels[2]'); + assert(labels.len() == 3, 'len(labels)'); + + // ASSERT SCORES + let mut expected_scores: Tensor = TensorTrait::new( + array![3, 2].span(), + array![ + FP64x64 { mag: 18322911080742739968, sign: false }, + FP64x64 { mag: 123832992966812224, sign: false }, + FP64x64 { mag: 8658920114943337472, sign: false }, + FP64x64 { mag: 9787823958766215168, sign: false }, + FP64x64 { mag: 276645820873422144, sign: false }, + FP64x64 { mag: 18170098252836128768, sign: false } + ] + .span() + ); + + assert_eq(scores, expected_scores); +} + + +#[test] +#[available_gas(200000000000)] +fn test_svm_classifier_binary_logistic_fp64x64() { + let post_transform = POST_TRANSFORM::LOGISTIC; + let (mut classifier, X) = svm_classifier_helper_fp64x64(post_transform); + + let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X); + + // ASSERT LABELS + assert(*labels[0] == 0, 'labels[0]'); + assert(*labels[1] == 1, 'labels[1]'); + assert(*labels[2] == 1, 'labels[2]'); + assert(labels.len() == 3, 'len(labels)'); + + // ASSERT SCORES + let mut expected_scores: Tensor = TensorTrait::new( + array![3, 2].span(), + array![ + FP64x64 { mag: 13461271680116586496, sign: false }, + FP64x64 { mag: 9254325673410459648, sign: false }, + FP64x64 { mag: 11349211717397211136, sign: false }, + FP64x64 { mag: 11614494343921229824, sign: false }, + FP64x64 { mag: 9292528880387112960, sign: false }, + FP64x64 { mag: 13431074360067923968, sign: false } + ] + .span() + ); + + assert_eq(scores, expected_scores); +} + +#[test] +#[available_gas(200000000000)] +fn test_svm_classifier_binary_softmax_fp64x64() { + let post_transform = POST_TRANSFORM::SOFTMAX; + let (mut classifier, X) = svm_classifier_helper_fp64x64(post_transform); + + let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X); + + // ASSERT LABELS + assert(*labels[0] == 0, 'labels[0]'); + assert(*labels[1] == 1, 'labels[1]'); + assert(*labels[2] == 1, 'labels[2]'); + assert(labels.len() == 3, 'len(labels)'); + + // ASSERT SCORES + let mut expected_scores: Tensor = TensorTrait::new( + array![3, 2].span(), + array![ + FP64x64 { mag: 13436811297474848768, sign: false }, + FP64x64 { mag: 5009932776234703872, sign: false }, + FP64x64 { mag: 8941229086247388160, sign: false }, + FP64x64 { mag: 9505514987462162432, sign: false }, + FP64x64 { mag: 5070622564237207552, sign: false }, + FP64x64 { mag: 13376121509472344064, sign: false } + ] + .span() + ); + + assert_eq(scores, expected_scores); +} + +#[test] +#[available_gas(200000000000)] +fn test_svm_classifier_binary_softmax_zero_fp64x64() { + let post_transform = POST_TRANSFORM::SOFTMAXZERO; + let (mut classifier, X) = svm_classifier_helper_fp64x64(post_transform); + + let (labels, scores) = SVMClassifierTrait::predict(ref classifier, X); + + // ASSERT LABELS + assert(*labels[0] == 0, 'labels[0]'); + assert(*labels[1] == 1, 'labels[1]'); + assert(*labels[2] == 1, 'labels[2]'); + assert(labels.len() == 3, 'len(labels)'); + + // ASSERT SCORES + let mut expected_scores: Tensor = TensorTrait::new( + array![3, 2].span(), + array![ + FP64x64 { mag: 13436811297474848768, sign: false }, + FP64x64 { mag: 5009932776234703872, sign: false }, + FP64x64 { mag: 8941229086247388160, sign: false }, + FP64x64 { mag: 9505514987462162432, sign: false }, + FP64x64 { mag: 5070622564237207552, sign: false }, + FP64x64 { mag: 13376121509472344064, sign: false } + ] + .span() + ); + + assert_eq(scores, expected_scores); +} + + +// ============ HELPER ============ // + +fn svm_classifier_helper_linear( + post_transform: POST_TRANSFORM +) -> (SVMClassifier, Tensor) { + let coefficients: Span = array![ + FP16x16 { mag: 10169, sign: true }, + FP16x16 { mag: 15905, sign: false }, + FP16x16 { mag: 459, sign: false }, + FP16x16 { mag: 26713, sign: false }, + FP16x16 { mag: 2129, sign: true }, + FP16x16 { mag: 18, sign: false }, + FP16x16 { mag: 12830, sign: true }, + FP16x16 { mag: 23097, sign: true }, + FP16x16 { mag: 1415, sign: true }, + FP16x16 { mag: 28717, sign: true }, + FP16x16 { mag: 2994, sign: false }, + FP16x16 { mag: 847, sign: true } + ] + .span(); + let kernel_params: Span = array![ + FP16x16 { mag: 65, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 196608, sign: false } + ] + .span(); + let kernel_type = KERNEL_TYPE::LINEAR; + let prob_a: Span = array![FP16x16 { mag: 336797, sign: true }].span(); + let prob_b: Span = array![FP16x16 { mag: 4194, sign: false }].span(); + let rho: Span = array![ + FP16x16 { mag: 4908, sign: true }, + FP16x16 { mag: 11563, sign: true }, + FP16x16 { mag: 13872, sign: true }, + FP16x16 { mag: 33829, sign: true } + ] + .span(); + + let support_vectors: Span = array![].span(); + let classlabels: Span = array![0, 1, 2, 3].span(); + + let vectors_per_class = Option::None; + + let mut classifier: SVMClassifier = SVMClassifier { + classlabels, + coefficients, + kernel_params, + kernel_type, + post_transform, + prob_a, + prob_b, + rho, + support_vectors, + vectors_per_class, + }; + + let mut X: Tensor = TensorTrait::new( + array![3, 3].span(), + array![ + FP16x16 { mag: 65536, sign: true }, + FP16x16 { mag: 52428, sign: true }, + FP16x16 { mag: 39321, sign: true }, + FP16x16 { mag: 26214, sign: true }, + FP16x16 { mag: 13107, sign: true }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 13107, sign: false }, + FP16x16 { mag: 26214, sign: false }, + FP16x16 { mag: 39321, sign: false }, + ] + .span() + ); + + (classifier, X) +} + + +fn svm_classifier_binary_noprob_linear_sv( + post_transform: POST_TRANSFORM +) -> (SVMClassifier, Tensor) { + let coefficients: Span = array![ + FP16x16 { mag: 50226, sign: false }, + FP16x16 { mag: 5711, sign: false }, + FP16x16 { mag: 7236, sign: false }, + FP16x16 { mag: 63175, sign: true } + ] + .span(); + let kernel_params: Span = array![ + FP16x16 { mag: 8025, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 196608, sign: false } + ] + .span(); + let kernel_type = KERNEL_TYPE::LINEAR; + let prob_a: Span = array![].span(); + let prob_b: Span = array![].span(); + let rho: Span = array![FP16x16 { mag: 146479, sign: false }].span(); + + let support_vectors: Span = array![ + FP16x16 { mag: 314572, sign: false }, + FP16x16 { mag: 222822, sign: false }, + FP16x16 { mag: 124518, sign: false }, + FP16x16 { mag: 327680, sign: false }, + FP16x16 { mag: 196608, sign: false }, + FP16x16 { mag: 104857, sign: false }, + FP16x16 { mag: 294912, sign: false }, + FP16x16 { mag: 150732, sign: false }, + FP16x16 { mag: 85196, sign: false }, + FP16x16 { mag: 334233, sign: false }, + FP16x16 { mag: 163840, sign: false }, + FP16x16 { mag: 196608, sign: false } + ] + .span(); + let classlabels: Span = array![0, 1].span(); + + let vectors_per_class = Option::Some(array![3, 1].span()); + + let mut classifier: SVMClassifier = SVMClassifier { + classlabels, + coefficients, + kernel_params, + kernel_type, + post_transform, + prob_a, + prob_b, + rho, + support_vectors, + vectors_per_class, + }; + + let mut X: Tensor = TensorTrait::new( + array![3, 3].span(), + array![ + FP16x16 { mag: 65536, sign: true }, + FP16x16 { mag: 52428, sign: true }, + FP16x16 { mag: 39321, sign: true }, + FP16x16 { mag: 26214, sign: true }, + FP16x16 { mag: 13107, sign: true }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 13107, sign: false }, + FP16x16 { mag: 26214, sign: false }, + FP16x16 { mag: 39321, sign: false }, + ] + .span() + ); + + (classifier, X) +} + + +fn svm_classifier_helper_noprob_linear( + post_transform: POST_TRANSFORM +) -> (SVMClassifier, Tensor) { + let coefficients: Span = array![ + FP16x16 { mag: 10169, sign: true }, + FP16x16 { mag: 15905, sign: false }, + FP16x16 { mag: 459, sign: false }, + FP16x16 { mag: 26713, sign: false }, + FP16x16 { mag: 2129, sign: true }, + FP16x16 { mag: 18, sign: false }, + FP16x16 { mag: 12830, sign: true }, + FP16x16 { mag: 23097, sign: true }, + FP16x16 { mag: 1415, sign: true }, + FP16x16 { mag: 28717, sign: true }, + FP16x16 { mag: 2994, sign: false }, + FP16x16 { mag: 847, sign: true } + ] + .span(); + let kernel_params: Span = array![ + FP16x16 { mag: 65, sign: false }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 196608, sign: false } + ] + .span(); + let kernel_type = KERNEL_TYPE::LINEAR; + let prob_a: Span = array![].span(); + let prob_b: Span = array![].span(); + let rho: Span = array![ + FP16x16 { mag: 4908, sign: true }, + FP16x16 { mag: 11563, sign: true }, + FP16x16 { mag: 13872, sign: true }, + FP16x16 { mag: 33829, sign: true } + ] + .span(); + + let support_vectors: Span = array![].span(); + let classlabels: Span = array![0, 1, 2, 3].span(); + + let vectors_per_class = Option::None; + + let mut classifier: SVMClassifier = SVMClassifier { + classlabels, + coefficients, + kernel_params, + kernel_type, + post_transform, + prob_a, + prob_b, + rho, + support_vectors, + vectors_per_class, + }; + + let mut X: Tensor = TensorTrait::new( + array![3, 3].span(), + array![ + FP16x16 { mag: 65536, sign: true }, + FP16x16 { mag: 52428, sign: true }, + FP16x16 { mag: 39321, sign: true }, + FP16x16 { mag: 26214, sign: true }, + FP16x16 { mag: 13107, sign: true }, + FP16x16 { mag: 0, sign: false }, + FP16x16 { mag: 13107, sign: false }, + FP16x16 { mag: 26214, sign: false }, + FP16x16 { mag: 39321, sign: false }, + ] + .span() + ); + + (classifier, X) +} + + +fn svm_classifier_helper_fp64x64( + post_transform: POST_TRANSFORM +) -> (SVMClassifier, Tensor) { + let coefficients: Span = array![ + FP64x64 { mag: 18446744073709551616, sign: false }, + FP64x64 { mag: 18446744073709551616, sign: false }, + FP64x64 { mag: 18446744073709551616, sign: false }, + FP64x64 { mag: 18446744073709551616, sign: false }, + FP64x64 { mag: 18446744073709551616, sign: true }, + FP64x64 { mag: 18446744073709551616, sign: true }, + FP64x64 { mag: 18446744073709551616, sign: true }, + FP64x64 { mag: 18446744073709551616, sign: true } + ] + .span(); + let kernel_params: Span = array![ + FP64x64 { mag: 7054933896252620800, sign: false }, + FP64x64 { mag: 0, sign: false }, + FP64x64 { mag: 55340232221128654848, sign: false } + ] + .span(); + let kernel_type = KERNEL_TYPE::RBF; + let prob_a: Span = array![FP64x64 { mag: 94799998099962986496, sign: true }].span(); + let prob_b: Span = array![FP64x64 { mag: 1180576833385529344, sign: false }].span(); + let rho: Span = array![FP64x64 { mag: 3082192501545631744, sign: false }].span(); + + let support_vectors: Span = array![ + FP64x64 { mag: 3528081300248330240, sign: false }, + FP64x64 { mag: 19594207602596118528, sign: true }, + FP64x64 { mag: 9235613999318433792, sign: false }, + FP64x64 { mag: 10869715877100519424, sign: true }, + FP64x64 { mag: 5897111318564962304, sign: true }, + FP64x64 { mag: 1816720038917308416, sign: false }, + FP64x64 { mag: 4564890528671334400, sign: false }, + FP64x64 { mag: 21278987070814027776, sign: true }, + FP64x64 { mag: 7581529597213147136, sign: false }, + FP64x64 { mag: 10953113834067329024, sign: true }, + FP64x64 { mag: 24318984989010034688, sign: true }, + FP64x64 { mag: 30296187483321270272, sign: true }, + FP64x64 { mag: 10305112258191032320, sign: false }, + FP64x64 { mag: 17005441559857987584, sign: true }, + FP64x64 { mag: 11555205301925838848, sign: false }, + FP64x64 { mag: 2962701975885447168, sign: true }, + FP64x64 { mag: 11741665981322231808, sign: true }, + FP64x64 { mag: 15376232508819505152, sign: false }, + FP64x64 { mag: 13908474645692022784, sign: false }, + FP64x64 { mag: 7323415394302033920, sign: true }, + FP64x64 { mag: 3284258824352956416, sign: true }, + FP64x64 { mag: 11374683084831064064, sign: true }, + FP64x64 { mag: 9087138148126818304, sign: false }, + FP64x64 { mag: 8247488946750095360, sign: false } + ] + .span(); + let classlabels: Span = array![0, 1].span(); + + let vectors_per_class = Option::Some(array![4, 4].span()); + + let mut classifier: SVMClassifier = SVMClassifier { + classlabels, + coefficients, + kernel_params, + kernel_type, + post_transform, + prob_a, + prob_b, + rho, + support_vectors, + vectors_per_class, + }; + + let mut X: Tensor = TensorTrait::new( + array![3, 3].span(), + array![ + FP64x64 { mag: 18446744073709551616, sign: true }, + FP64x64 { mag: 14757395258967642112, sign: true }, + FP64x64 { mag: 11068046444225730560, sign: true }, + FP64x64 { mag: 7378697629483821056, sign: true }, + FP64x64 { mag: 3689348814741910528, sign: true }, + FP64x64 { mag: 0, sign: false }, + FP64x64 { mag: 3689348814741910528, sign: false }, + FP64x64 { mag: 7378697629483821056, sign: false }, + FP64x64 { mag: 11068046444225730560, sign: false } + ] + .span() + ); + + (classifier, X) +} + From 5553329134cea476b5da6b76e8f5fe0781e0b315 Mon Sep 17 00:00:00 2001 From: Canace Date: Tue, 23 Jan 2024 15:20:31 +0800 Subject: [PATCH 07/46] Fixed: Merge dynamic_quantize_linear operator in v 0.2.1 1. Fixed use new cairo integers and testcase failed 2. Add TryInto in FP23wide and FP16wide --- .../tensor/tensor.dynamic_quantize_linear.md | 54 ++++++++++++ .../implementations/fp16x16wide/core.cairo | 6 +- .../implementations/fp8x23wide/core.cairo | 10 +++ src/operators/tensor/core.cairo | 65 +++++++++++++- .../tensor/implementations/tensor_bool.cairo | 6 ++ .../implementations/tensor_complex64.cairo | 6 ++ .../implementations/tensor_fp16x16.cairo | 12 +++ .../implementations/tensor_fp16x16wide.cairo | 12 +++ .../implementations/tensor_fp32x32.cairo | 13 +++ .../implementations/tensor_fp64x64.cairo | 12 +++ .../implementations/tensor_fp8x23.cairo | 12 +++ .../implementations/tensor_fp8x23wide.cairo | 12 +++ .../tensor/implementations/tensor_i32.cairo | 6 ++ .../tensor/implementations/tensor_i8.cairo | 6 ++ .../tensor/implementations/tensor_u32.cairo | 6 ++ src/operators/tensor/quantization.cairo | 1 + .../dynamic_quantize_linear.cairo | 78 +++++++++++++++++ tests/performance.cairo | 1 + .../dynamic_quantize_linear_test.cairo | 1 + .../dynamic_quantize_linear_fp_test.cairo | 84 +++++++++++++++++++ 20 files changed, 397 insertions(+), 6 deletions(-) create mode 100644 docs/framework/operators/tensor/tensor.dynamic_quantize_linear.md create mode 100644 src/operators/tensor/quantization/dynamic_quantize_linear.cairo create mode 100644 tests/performance/dynamic_quantize_linear_test.cairo create mode 100644 tests/performance/dynamic_quantize_linear_test/dynamic_quantize_linear_fp_test.cairo diff --git a/docs/framework/operators/tensor/tensor.dynamic_quantize_linear.md b/docs/framework/operators/tensor/tensor.dynamic_quantize_linear.md new file mode 100644 index 000000000..38f4fe367 --- /dev/null +++ b/docs/framework/operators/tensor/tensor.dynamic_quantize_linear.md @@ -0,0 +1,54 @@ +# tensor.dynamic_quantize_linear + +```rust +fn dynamic_quantize_linear(self: @Tensor) -> (Tensor::, Tensor, Tensor); +``` + +Quantizes a Tensor using dynamic linear quantization. + +The dynamic linear quantization operator. It consumes a high precision tensor +to compute the low precision / quantized tensor dynamicly. +Right now only uint8 is supported, it saturates to [0, 255]. + +## Args + +* `self`(`@Tensor`) - The input tensor. + +## Returns + +A new `Tensor` with the same shape as the input tensor, containing the quantized values. +* `y_scale`(`@Tensor`) - Scale for doing quantization to get `y`. +* `y_zero_point`(`@Tensor`) - Zero point for doing quantization to get `y`. + +## Type Constraints + +* `T` in (`Tensor`, `Tensor`, `Tensor`, `tensor`) +* `Q` in (`Tensor`)- Constrain `y` to 8-bit unsigned integer tensor. + +## Examples + +```rust +use array::{ArrayTrait, SpanTrait}; + +use orion::operators::tensor::{TensorTrait, Tensor, I8Tensor, I32Tensor}; +use orion::numbers::{u8, i32, IntegerTrait}; + +fn dynamic_quantize_linear_example() -> (Tensor, Tensor, Tensor) { + // We instantiate a 1D Tensor here. + let x = TensorTrait::::new( + shape: array![6].span(), + data: array![ + FP16x16 { mag: 10945, sign: false }, + FP16x16 { mag: 190054, sign: false }, + FP16x16 { mag: 196608, sign: false }, + FP16x16 { mag: 229376, sign: false }, + FP16x16 { mag: 196608, sign: true }, + FP16x16 { mag: 229376, sign: true }, + ] + .span(), + ); + + return x.dynamic_quantize_linear(); +} +>>> [133, 233, 236, 255, -18, -0] +``` diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo index b3fe4d39b..cc31554b2 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo @@ -296,13 +296,13 @@ impl FP16x16WTryIntoU64 of TryInto { } } -impl FP16x16WTryIntoU32 of TryInto { - fn try_into(self: FP16x16W) -> Option { +impl FP16x16WTryIntoU32 of TryInto { + fn try_into(self: FP16x16W) -> Option { if self.sign { return Option::None(()); } else { // Unscale the magnitude and round down - return Option::Some(self.mag / ONE); + return (self.mag / ONE).try_into(); } } } diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo index c4b49c798..e45625531 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo @@ -283,6 +283,16 @@ impl FP8x23WTryIntoU64 of TryInto { } } +impl FP8x23WTryIntoU32 of TryInto { + fn try_into(self: FP8x23W) -> Option { + if self.sign { + Option::None(()) + } else { + // Unscale the magnitude and round down + return (self.mag / ONE).try_into(); + } + } +} impl FP8x23WTryIntoU16 of TryInto { fn try_into(self: FP8x23W) -> Option { diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 70344eb97..ed818b735 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -118,6 +118,7 @@ impl TensorSerde, impl TDrop: Drop> of Serde8Bit conversion of FP32 Input data. trait TensorTrait { /// # tensor.new /// @@ -2459,7 +2460,7 @@ trait TensorTrait { /// /// ## Returns /// - /// A new `Tensor` with the same shape as the input tensor, containing the quantized values. + /// A new `Tensor` with the same shape as the input tensor, containing the quantized values. /// /// ## Type Constraints /// @@ -2512,7 +2513,7 @@ trait TensorTrait { /// /// ## Args /// - /// * `self`(`@Tensor`) - The input tensor. + /// * `self`(`@Tensor`) - The input tensor. /// * `x_scale`(`@Tensor`) - Scale for input `x`. /// * `x_zero_point`(`@Tensor`) - Zero point for input `x`. /// @@ -5117,7 +5118,7 @@ trait TensorTrait { /// fn split(self: @Tensor, axis: usize, num_outputs: Option, split: Option> /// ) -> Array>; /// ``` - /// + /// ## Args /// Split a tensor into a list of tensors, along the specified ‘axis’ /// /// @@ -5162,6 +5163,64 @@ trait TensorTrait { fn split( self: @Tensor, axis: usize, num_outputs: Option, spl: Option> ) -> Array>; + /// # tensor.dynamic_quantize_linear + /// + /// ```rust + /// fn dynamic_quantize_linear(self: @Tensor) -> (Tensor::, Tensor, Tensor); + /// ``` + /// + /// Quantizes a Tensor using dynamic linear quantization. + /// + /// The dynamic linear quantization operator. It consumes a high precision tensor + /// to compute the low precision / quantized tensor dynamicly. + /// Right now only uint8 is supported, it saturates to [0, 255]. + /// + /// ## Args + /// + /// * `self`(`@Tensor`) - The input tensor. + /// + /// ## Returns + /// + /// A new `Tensor` with the same shape as the input tensor, containing the quantized values. + /// * `y_scale`(`@Tensor`) - Scale for doing quantization to get `y`. + /// * `y_zero_point`(`@Tensor`) - Zero point for doing quantization to get `y`. + /// + /// ## Type Constraints + /// + /// * `T` in (`Tensor`, `Tensor`, `Tensor`, `tensor`) + /// * `Q` in (`Tensor`)- Constrain `y` to 8-bit unsigned integer tensor. + /// + /// ## Examples + /// + /// ```rust + /// use array::{ArrayTrait, SpanTrait}; + /// + /// use orion::operators::tensor::{TensorTrait, Tensor, I8Tensor, I32Tensor}; + /// use orion::numbers::{u8, i32, IntegerTrait}; + /// + /// fn dynamic_quantize_linear_example() -> (Tensor, Tensor, Tensor) { + /// // We instantiate a 1D Tensor here. + /// let x = TensorTrait::::new( + /// shape: array![6].span(), + /// data: array![ + /// FP16x16 { mag: 10945, sign: false }, + /// FP16x16 { mag: 190054, sign: false }, + /// FP16x16 { mag: 196608, sign: false }, + /// FP16x16 { mag: 229376, sign: false }, + /// FP16x16 { mag: 196608, sign: true }, + /// FP16x16 { mag: 229376, sign: true }, + /// ] + /// .span(), + /// ); + /// + /// return x.dynamic_quantize_linear(); + /// } + /// >>> [133, 233, 236, 255, -18, -0] + /// ``` + /// + fn dynamic_quantize_linear( + self: @Tensor + ) -> (Tensor, Tensor, Tensor); } /// Cf: TensorTrait::new docstring diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index 3da518ec8..7e34dabef 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -484,6 +484,12 @@ impl BoolTensor of TensorTrait { ) -> Array> { panic(array!['not supported!']) } + + fn dynamic_quantize_linear( + self: @Tensor + ) -> (Tensor::, Tensor::, Tensor){ + panic(array!['not supported!']) + } } /// Implements partial equal for two `Tensor` using the `PartialEq` trait. diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 74acba5c6..315fc71a0 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -515,6 +515,12 @@ impl Complex64Tensor of TensorTrait { ) -> Tensor { panic(array!['not supported!']) } + + fn dynamic_quantize_linear( + self: @Tensor + ) -> (Tensor::, Tensor::, Tensor){ + panic(array!['not supported!']) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index cdc50bc4f..90e6f4c9a 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -560,6 +560,18 @@ impl FP16x16Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn dynamic_quantize_linear( + self: @Tensor + ) -> (Tensor::, Tensor::, Tensor){ + quantization::dynamic_quantize_linear::dynamic_quantize_linear( + self, + NumberTrait::new_unscaled(0, false), + NumberTrait::new_unscaled(255, false), + NumberTrait::new_unscaled(0, false), + NumberTrait::new_unscaled(1, false), + ) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index b0dc2d858..7954d49f3 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -512,6 +512,18 @@ impl FP16x16WTensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn dynamic_quantize_linear( + self: @Tensor + ) -> (Tensor::, Tensor::, Tensor){ + quantization::dynamic_quantize_linear::dynamic_quantize_linear( + self, + NumberTrait::new_unscaled(0, false), + NumberTrait::new_unscaled(255, false), + NumberTrait::new_unscaled(0, false), + NumberTrait::new_unscaled(1, false), + ) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 4f862fd0e..f8c602fb0 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -561,6 +561,18 @@ impl FP32x32Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn dynamic_quantize_linear( + self: @Tensor + ) -> (Tensor::, Tensor::, Tensor){ + quantization::dynamic_quantize_linear::dynamic_quantize_linear( + self, + NumberTrait::new_unscaled(0, false), + NumberTrait::new_unscaled(255, false), + NumberTrait::new_unscaled(0, false), + NumberTrait::new_unscaled(1, false), + ) + } } /// Implements addition for `Tensor` using the `Add` trait. @@ -644,6 +656,7 @@ impl FP32x32TryIntoI8 of TryInto { Option::Some(number_i8) } } + impl TensorI8IntoTensorFP32x32 of Into, Tensor> { fn into(self: Tensor) -> Tensor { tensor_i8_to_tensor_fp32x32(@self) diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 1fe5591fc..cd82a8106 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -561,6 +561,18 @@ impl FP64x64Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn dynamic_quantize_linear( + self: @Tensor + ) -> (Tensor::, Tensor::, Tensor){ + quantization::dynamic_quantize_linear::dynamic_quantize_linear( + self, + NumberTrait::new_unscaled(0, false), + NumberTrait::new_unscaled(255, false), + NumberTrait::new_unscaled(0, false), + NumberTrait::new_unscaled(1, false), + ) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index 77d183c21..169b60232 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -559,6 +559,18 @@ impl FP8x23Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn dynamic_quantize_linear( + self: @Tensor + ) -> (Tensor::, Tensor::, Tensor){ + quantization::dynamic_quantize_linear::dynamic_quantize_linear( + self, + NumberTrait::new_unscaled(0, false), + NumberTrait::new_unscaled(255, false), + NumberTrait::new_unscaled(0, false), + NumberTrait::new_unscaled(1, false), + ) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index ff6069087..b8792fb67 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -498,6 +498,18 @@ impl FP8x23WTensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn dynamic_quantize_linear( + self: @Tensor + ) -> (Tensor::, Tensor::, Tensor){ + quantization::dynamic_quantize_linear::dynamic_quantize_linear( + self, + NumberTrait::new_unscaled(0, false), + NumberTrait::new_unscaled(255, false), + NumberTrait::new_unscaled(0, false), + NumberTrait::new_unscaled(1, false), + ) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 50383d2df..7309ce22b 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -541,6 +541,12 @@ impl I32Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn dynamic_quantize_linear( + self: @Tensor + ) -> (Tensor::, Tensor::, Tensor){ + panic(array!['not supported!']) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 7e81d90eb..ec325d4a0 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -539,6 +539,12 @@ impl I8Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn dynamic_quantize_linear( + self: @Tensor + ) -> (Tensor::, Tensor::, Tensor){ + panic(array!['not supported!']) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index 5a926a538..357adbe9d 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -482,6 +482,12 @@ impl U32Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn dynamic_quantize_linear( + self: @Tensor + ) -> (Tensor::, Tensor::, Tensor){ + panic(array!['not supported!']) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/quantization.cairo b/src/operators/tensor/quantization.cairo index 559dc061e..4f56fc5cd 100644 --- a/src/operators/tensor/quantization.cairo +++ b/src/operators/tensor/quantization.cairo @@ -1,4 +1,5 @@ mod quantize_linear; +mod dynamic_quantize_linear; mod dequantize_linear; mod qlinear_matmul; mod qlinear_concat; diff --git a/src/operators/tensor/quantization/dynamic_quantize_linear.cairo b/src/operators/tensor/quantization/dynamic_quantize_linear.cairo new file mode 100644 index 000000000..085132e92 --- /dev/null +++ b/src/operators/tensor/quantization/dynamic_quantize_linear.cairo @@ -0,0 +1,78 @@ +use core::array::ArrayTrait; +use core::array::SpanTrait; +use core::option::OptionTrait; +use core::debug::PrintTrait; +use orion::numbers::NumberTrait; +use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear; +use orion::operators::tensor::quantization::quantize_linear::quantize_linear; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::saturate; + +fn dynamic_quantize_linear< + T, + Q, + impl TTensor: TensorTrait, + impl QTensor: TensorTrait, + impl TAdd: Add, + impl TSub: Sub, + impl TDiv: Div, + impl TTensorDiv: Div>, + impl TPartialOrd: PartialOrd, + impl TPartialEq: PartialEq, + impl TTryInto: TryInto, + impl TCopy: Copy, + impl TDrop: Drop, + impl QCopy: Copy, + impl QDrop: Drop, +>( + x: @Tensor, min: T, max: T, zero: T, one: T +) -> (Tensor, Tensor, Tensor) { + // y_scale = (maximum(0, max(x)) - minimum(0, min(x))) / (qmax - qmin) + let mut x_max: T = x.max_in_tensor(); + let mut x_min: T = x.min_in_tensor(); + if x_max < zero { + x_max = zero; + } + if x_min > zero { + x_min = zero + } + + // scale = max == min ? 1.0f : (max - min) / float(qmax - qmin); + let mut y_scale_values = ArrayTrait::new(); + let y_scale_value: T = (x_max - x_min) / (max - min); + if x_max == x_min { + y_scale_values.append(one); + }else{ + y_scale_values.append(y_scale_value); + } + + + let mut y_scale_tensor_shape = ArrayTrait::new(); + y_scale_tensor_shape.append(y_scale_values.len()); + + let y_scale = TensorTrait::::new( + shape: y_scale_tensor_shape.span(), data: y_scale_values.span(), + ); + + // intermediate_zero_point = qmin - min(x)/y_scale + let intermediate_zero_point: T = min - x_min / y_scale_value; + + // y_zero_point = cast(round(saturate(itermediate_zero_point))) + let mut y_zero_point_value: T = saturate(min, max, intermediate_zero_point); + let mut y_zero_point_values = ArrayTrait::new(); + y_zero_point_values.append(y_zero_point_value); + + let mut y_zero_point_tensor_shape = ArrayTrait::new(); + y_zero_point_tensor_shape.append(y_zero_point_values.len()); + + let mut y_zero_point_values = ArrayTrait::new(); + y_zero_point_values.append(y_zero_point_value); + let mut y_zero_point = TensorTrait::::new( + shape: y_zero_point_tensor_shape.span(), data: y_zero_point_values.span(), + ); + // y_zero_point = y_zero_point.round(); // tensor only supported! + + // y = saturate (round (x / y_scale) + y_zero_point) + + return (quantize_linear(x, @y_scale, @y_zero_point, min, max), y_scale, y_zero_point); +} \ No newline at end of file diff --git a/tests/performance.cairo b/tests/performance.cairo index a5ac0ba2f..da71869ed 100644 --- a/tests/performance.cairo +++ b/tests/performance.cairo @@ -1,2 +1,3 @@ mod quantize_linear_test; mod dequantize_linear_test; +mod dynamic_quantize_linear_test; \ No newline at end of file diff --git a/tests/performance/dynamic_quantize_linear_test.cairo b/tests/performance/dynamic_quantize_linear_test.cairo new file mode 100644 index 000000000..bbd43eb29 --- /dev/null +++ b/tests/performance/dynamic_quantize_linear_test.cairo @@ -0,0 +1 @@ +mod dynamic_quantize_linear_fp_test; \ No newline at end of file diff --git a/tests/performance/dynamic_quantize_linear_test/dynamic_quantize_linear_fp_test.cairo b/tests/performance/dynamic_quantize_linear_test/dynamic_quantize_linear_fp_test.cairo new file mode 100644 index 000000000..e1817dff9 --- /dev/null +++ b/tests/performance/dynamic_quantize_linear_test/dynamic_quantize_linear_fp_test.cairo @@ -0,0 +1,84 @@ +#[cfg(test)] +mod fp8x23 { + use core::array::ArrayTrait; + use core::array::SpanTrait; + use core::traits::Into; + use core::debug::PrintTrait; + + use orion::numbers::fixed_point::core::{FixedTrait}; + use orion::numbers::fixed_point::implementations::fp8x23::core::FP8x23Impl; + use orion::operators::tensor::implementations::tensor_fp8x23::FP8x23Tensor; + use orion::operators::tensor::{TensorTrait, Tensor}; + use orion::numbers::FP8x23; + + #[test] + #[available_gas(2000000)] + fn dynamic_quantize_linear() { + // X + let mut shape = ArrayTrait::::new(); + shape.append(6); + let mut data = ArrayTrait::::new(); + data.append(FixedTrait::new(0, false)); + data.append(FixedTrait::new(587203, false)); // 0.07 + data.append(FixedTrait::new(838861, false)); // 0.1 + data.append(FixedTrait::new(1677722, false)); // 0.2 + data.append(FixedTrait::new(4194304, false)); // 0.5 + data.append(FixedTrait::new(7549747, false)); // 0.9 + + let x = TensorTrait::new(shape.span(), data.span()); + + let (y, y_scale, y_zero_point) = x.dynamic_quantize_linear(); + + assert((*(y_scale.data).at(0)).into() == 29606, '*y_scale[0].mag == 0.00353'); + assert((*(y_zero_point.data).at(0)).into() == 0, '*y_zero_point[0].mag == 0'); + assert((*(y.data).at(0)).into() == 0, '*result[0] == 0'); + assert((*(y.data).at(1)).into() == 19, '*result[1] == 19'); + assert((*(y.data).at(2)).into() == 28, '*result[2] == 28'); + assert((*(y.data).at(3)).into() == 56, '*result[3] == 56'); + assert((*(y.data).at(4)).into() == 141, '*result[4] == 141'); + assert((*(y.data).at(5)).into() == 255, '*result[5] == 255'); + } +} + + +#[cfg(test)] +mod fp16x16 { + use core::array::ArrayTrait; + use core::array::SpanTrait; + use core::traits::Into; + use core::debug::PrintTrait; + + use orion::numbers::fixed_point::core::{FixedTrait}; + use orion::numbers::fixed_point::implementations::fp16x16::core::FP16x16Impl; + use orion::operators::tensor::implementations::tensor_fp16x16::FP16x16Tensor; + use orion::operators::tensor::{TensorTrait, Tensor}; + use orion::numbers::FP16x16; + + #[test] + #[available_gas(2000000)] + fn dynamic_quantize_linear() { + // X + let mut shape = ArrayTrait::::new(); + shape.append(6); + let mut data = ArrayTrait::::new(); + data.append(FixedTrait::new(10945, false)); // 0.167 + data.append(FixedTrait::new(190054, false)); // 2.9 + data.append(FixedTrait::new_unscaled(3, false)); // 3.0 + data.append(FixedTrait::new(229376, false)); // 3.5 + data.append(FixedTrait::new_unscaled(3, true)); // -3.0 + data.append(FixedTrait::new(229376, true)); // -3.5 + + let x = TensorTrait::new(shape.span(), data.span()); + + let (y, y_scale, y_zero_point) = x.dynamic_quantize_linear(); + + assert((*(y_scale.data).at(0)).into() == 1799, '*y_scale[0].mag == 0.02745'); + assert((*(y_zero_point.data).at(0)).into() == 8355967, '*y_zero_point[0].mag == 128'); + assert((*(y.data).at(0)).into() == 133, '*result[0] == 134'); + assert((*(y.data).at(1)).into() == 233, '*result[1] == 233'); + assert((*(y.data).at(2)).into() == 236, '*result[2] == 237'); + assert((*(y.data).at(3)).into() == 255, '*result[3] == 255'); + assert((*(y.data).at(4)).into() == 18, '*result[4] == -18'); + assert((*(y.data).at(5)).into() == 0, '*result[5] == -0'); + } +} From 4ea4c987acba25c915e51ffdea248cf5694bba4b Mon Sep 17 00:00:00 2001 From: Canace Date: Tue, 23 Jan 2024 15:59:49 +0800 Subject: [PATCH 08/46] docs: `.md` modification --- docs/framework/operators/tensor/tensor.dequantize_linear.md | 2 +- .../operators/tensor/tensor.dynamic_quantize_linear.md | 4 ++-- docs/framework/operators/tensor/tensor.quantize_linear.md | 2 +- docs/framework/operators/tensor/tensor.split.md | 2 +- src/operators/tensor/core.cairo | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/framework/operators/tensor/tensor.dequantize_linear.md b/docs/framework/operators/tensor/tensor.dequantize_linear.md index 745d98782..e1dd6b594 100644 --- a/docs/framework/operators/tensor/tensor.dequantize_linear.md +++ b/docs/framework/operators/tensor/tensor.dequantize_linear.md @@ -13,7 +13,7 @@ or a 1-D tensor for per-axis quantization. ## Args -* `self`(`@Tensor`) - The input tensor. +* `self`(`@Tensor`) - The input tensor. * `x_scale`(`@Tensor`) - Scale for input `x`. * `x_zero_point`(`@Tensor`) - Zero point for input `x`. diff --git a/docs/framework/operators/tensor/tensor.dynamic_quantize_linear.md b/docs/framework/operators/tensor/tensor.dynamic_quantize_linear.md index 38f4fe367..d79fa2d98 100644 --- a/docs/framework/operators/tensor/tensor.dynamic_quantize_linear.md +++ b/docs/framework/operators/tensor/tensor.dynamic_quantize_linear.md @@ -33,7 +33,7 @@ use array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, I8Tensor, I32Tensor}; use orion::numbers::{u8, i32, IntegerTrait}; -fn dynamic_quantize_linear_example() -> (Tensor, Tensor, Tensor) { +fn dynamic_quantize_linear_example() -> (Tensor, Tensor, Tensor) { // We instantiate a 1D Tensor here. let x = TensorTrait::::new( shape: array![6].span(), @@ -50,5 +50,5 @@ fn dynamic_quantize_linear_example() -> (Tensor, Tensor, Tensor return x.dynamic_quantize_linear(); } ->>> [133, 233, 236, 255, -18, -0] +>>> ([133, 233, 236, 255, -18, -0], [0.02745], [128] ``` diff --git a/docs/framework/operators/tensor/tensor.quantize_linear.md b/docs/framework/operators/tensor/tensor.quantize_linear.md index aae8e088b..1d1702645 100644 --- a/docs/framework/operators/tensor/tensor.quantize_linear.md +++ b/docs/framework/operators/tensor/tensor.quantize_linear.md @@ -20,7 +20,7 @@ For (x / y_scale), it's rounding to the nearest even. ## Returns -A new `Tensor` with the same shape as the input tensor, containing the quantized values. +A new `Tensor` with the same shape as the input tensor, containing the quantized values. ## Type Constraints diff --git a/docs/framework/operators/tensor/tensor.split.md b/docs/framework/operators/tensor/tensor.split.md index 26b4a546f..7c0e8b157 100644 --- a/docs/framework/operators/tensor/tensor.split.md +++ b/docs/framework/operators/tensor/tensor.split.md @@ -4,7 +4,7 @@ fn split(self: @Tensor, axis: usize, num_outputs: Option, split: Option> ) -> Array>; ``` - +## Args Split a tensor into a list of tensors, along the specified ‘axis’ diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index ed818b735..12f3b9c54 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -5198,7 +5198,7 @@ trait TensorTrait { /// use orion::operators::tensor::{TensorTrait, Tensor, I8Tensor, I32Tensor}; /// use orion::numbers::{u8, i32, IntegerTrait}; /// - /// fn dynamic_quantize_linear_example() -> (Tensor, Tensor, Tensor) { + /// fn dynamic_quantize_linear_example() -> (Tensor, Tensor, Tensor) { /// // We instantiate a 1D Tensor here. /// let x = TensorTrait::::new( /// shape: array![6].span(), @@ -5215,7 +5215,7 @@ trait TensorTrait { /// /// return x.dynamic_quantize_linear(); /// } - /// >>> [133, 233, 236, 255, -18, -0] + /// >>> ([133, 233, 236, 255, -18, -0], [0.02745], [128] /// ``` /// fn dynamic_quantize_linear( From d4eb668d3746f0d7c0c78b11bc7798e6b5cf2683 Mon Sep 17 00:00:00 2001 From: zhangzhichao Date: Wed, 24 Jan 2024 14:38:38 +0800 Subject: [PATCH 09/46] fixed: Implement space to depth operator --- .../operators/neural-network/nn.gemm.md | 11 +- .../neural-network/nn.space_to_depth.md | 57 +++++++++ nodegen/node/space_to_depth.py | 108 ++++++++++++++++++ src/operators/nn/core.cairo | 59 ++++++++++ src/operators/nn/functional.cairo | 1 + .../nn/functional/space_to_depth.cairo | 37 ++++++ .../nn/implementations/nn_fp16x16.cairo | 4 + .../nn/implementations/nn_fp32x32.cairo | 4 + .../nn/implementations/nn_fp64x64.cairo | 4 + .../nn/implementations/nn_fp8x23.cairo | 4 + src/operators/nn/implementations/nn_i32.cairo | 4 + src/operators/nn/implementations/nn_i8.cairo | 4 + src/operators/nn/implementations/nn_u32.cairo | 4 + tests/nodes.cairo | 5 + tests/nodes/space_to_depth_fp16x16.cairo | 20 ++++ .../space_to_depth_fp16x16/input_0.cairo | 31 +++++ .../space_to_depth_fp16x16/output_0.cairo | 31 +++++ tests/nodes/space_to_depth_fp8x23.cairo | 20 ++++ .../nodes/space_to_depth_fp8x23/input_0.cairo | 31 +++++ .../space_to_depth_fp8x23/output_0.cairo | 31 +++++ tests/nodes/space_to_depth_i32.cairo | 20 ++++ tests/nodes/space_to_depth_i32/input_0.cairo | 31 +++++ tests/nodes/space_to_depth_i32/output_0.cairo | 31 +++++ tests/nodes/space_to_depth_i8.cairo | 20 ++++ tests/nodes/space_to_depth_i8/input_0.cairo | 31 +++++ tests/nodes/space_to_depth_i8/output_0.cairo | 31 +++++ tests/nodes/space_to_depth_u32.cairo | 20 ++++ tests/nodes/space_to_depth_u32/input_0.cairo | 31 +++++ tests/nodes/space_to_depth_u32/output_0.cairo | 31 +++++ 29 files changed, 711 insertions(+), 5 deletions(-) create mode 100644 docs/framework/operators/neural-network/nn.space_to_depth.md create mode 100644 nodegen/node/space_to_depth.py create mode 100644 src/operators/nn/functional/space_to_depth.cairo create mode 100644 tests/nodes/space_to_depth_fp16x16.cairo create mode 100644 tests/nodes/space_to_depth_fp16x16/input_0.cairo create mode 100644 tests/nodes/space_to_depth_fp16x16/output_0.cairo create mode 100644 tests/nodes/space_to_depth_fp8x23.cairo create mode 100644 tests/nodes/space_to_depth_fp8x23/input_0.cairo create mode 100644 tests/nodes/space_to_depth_fp8x23/output_0.cairo create mode 100644 tests/nodes/space_to_depth_i32.cairo create mode 100644 tests/nodes/space_to_depth_i32/input_0.cairo create mode 100644 tests/nodes/space_to_depth_i32/output_0.cairo create mode 100644 tests/nodes/space_to_depth_i8.cairo create mode 100644 tests/nodes/space_to_depth_i8/input_0.cairo create mode 100644 tests/nodes/space_to_depth_i8/output_0.cairo create mode 100644 tests/nodes/space_to_depth_u32.cairo create mode 100644 tests/nodes/space_to_depth_u32/input_0.cairo create mode 100644 tests/nodes/space_to_depth_u32/output_0.cairo diff --git a/docs/framework/operators/neural-network/nn.gemm.md b/docs/framework/operators/neural-network/nn.gemm.md index 4ac734d73..b89d884fc 100644 --- a/docs/framework/operators/neural-network/nn.gemm.md +++ b/docs/framework/operators/neural-network/nn.gemm.md @@ -1,4 +1,4 @@ -# nn.gemm +# NNTrait::gemm ```rust fn gemm( @@ -12,18 +12,19 @@ ) -> Tensor; ``` -Performs General Matrix multiplication: [https://en.wikipedia.org/wiki/Basic\_Linear\_Algebra\_Subprograms#Level\_3](https://en.wikipedia.org/wiki/Basic\_Linear\_Algebra\_Subprograms#Level\_3) +Performs General Matrix multiplication: https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3 * A' = transpose(A) if transA else A * B' = transpose(B) if transB else B -Compute `Y = alpha * A' * B' + beta * C`, where input tensor A has shape (M, K) or (K, M), input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), and output tensor Y has shape (M, N). `A` will be transposed before doing the computation if attribute `transA` is `true`, same for `B` and `transB`. +Compute `Y = alpha * A' * B' + beta * C`, where input tensor A has shape (M, K) or (K, M), input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), and output tensor Y has shape (M, N). +`A` will be transposed before doing the computation if attribute `transA` is `true`, same for `B` and `transB`. ## Args * `A`(`Tensor`) - Input tensor A. The shape of `A` should be (M, K) if `transA` is `false`, or (K, M) if `transA` is `true`. * `B`(`Tensor`) - Input tensor B. The shape of `B` should be (K, N) if `transB` is `false`, or (N, K) if `transB` is `true`. -* `C`(`Option>`) - Optional input tensor C. The shape of C should be unidirectional broadcastable to (M, N). +* `C`(`Option>`) - Optional input tensor C. The shape of C should be unidirectional broadcastable to (M, N). * `alpha`(`Option`) - Optional scalar multiplier for the product of input tensors `A * B`. * `beta`(`Option`) - Optional scalar multiplier for input tensor `C`. * `transA`(`bool`) - Whether `A` should be transposed. @@ -63,4 +64,4 @@ A `Tensor` of shape (M, N). return y; } >>> tensor of shape [3;5] -``` +```` diff --git a/docs/framework/operators/neural-network/nn.space_to_depth.md b/docs/framework/operators/neural-network/nn.space_to_depth.md new file mode 100644 index 000000000..fa44bf6e6 --- /dev/null +++ b/docs/framework/operators/neural-network/nn.space_to_depth.md @@ -0,0 +1,57 @@ +# NNTrait::space_to_depth + +```rust + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor; +``` + +SpaceToDepth rearranges blocks of spatial data into depth. More specifically, this op outputs a copy of the input tensor where values from the height and width dimensions are moved to the depth dimension. + +## Args + +* `tensor`(`@Tensor`) - The input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width. +* `blocksize`(`usize`) - The size of the blocks to move along [blocksize, blocksize]. + +## Returns + +A `Tensor` of [N, C * blocksize * blocksize, H/blocksize, W/blocksize]. + +## Examples + +```rust +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; +use orion::operators::nn::NNTrait; +use orion::operators::nn::I8NN; +use orion::numbers::FixedTrait; + +fn space_to_depth_example() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(2); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(-3); + data.append(0); + data.append(0); + data.append(0); + data.append(-1); + data.append(1); + data.append(-2); + data.append(-3); + data.append(2); + data.append(-2); + data.append(-3); + data.append(-3); + data.append(-1); + data.append(0); + data.append(1); + data.append(-3); + let tensor = TensorTrait::new(shape.span(), data.span()); + return NNTrait::space_to_depth(@tensor, 2); +} +>>> [[[[-3, 0]], [[2, -3]], [[0, 0]], [[-2, -3]], [[-1, -2]], [[-1, 1]], [[1, -3]], [[0, -3]]]] +``` diff --git a/nodegen/node/space_to_depth.py b/nodegen/node/space_to_depth.py new file mode 100644 index 000000000..2101d543b --- /dev/null +++ b/nodegen/node/space_to_depth.py @@ -0,0 +1,108 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait + +def space_to_depth(data: np.ndarray, blocksize: int = 2) -> np.ndarray: + if len(data.shape) != 4: + raise RuntimeError(f"Unexpected shape {data.shape!r}.") + b, C, H, W = data.shape + tmpshape = ( + b, + C, + H // blocksize, + blocksize, + W // blocksize, + blocksize, + ) + reshaped = np.reshape(data, tmpshape) + transposed = np.transpose(reshaped, [0, 3, 5, 1, 2, 4]) + finalshape = ( + b, + C * blocksize * blocksize, + H // blocksize, + W // blocksize, + ) + y = np.reshape(transposed, finalshape).astype(data.dtype) + return y + +class Space_to_depth(RunAll): + + + @staticmethod + def fp8x23(): + x = np.random.uniform(-3, 3, (1, 2, 2, 4)).astype(np.float64) + y = space_to_depth(x) + + x = Tensor(Dtype.FP8x23, x.shape, to_fp( + x.flatten(), FixedImpl.FP8x23)) + y = Tensor(Dtype.FP8x23, y.shape, to_fp( + y.flatten(), FixedImpl.FP8x23)) + + name = "space_to_depth_fp8x23" + make_test([x], y, "NNTrait::space_to_depth(@input_0, 2)", + name, Trait.NN) + + @staticmethod + def fp16x16(): + x = np.random.uniform(-3, 3, (1, 2, 2, 4)).astype(np.float16) + y = space_to_depth(x) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp( + x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "space_to_depth_fp16x16" + make_test([x], y, "NNTrait::space_to_depth(@input_0, 2)", + name, Trait.NN) + + # @staticmethod + # def fp64x64(): + # x = np.random.uniform(-3, 3, (1, 2, 2, 4)).astype(np.float64) + # y = space_to_depth(x) + + # x = Tensor(Dtype.FP64x64, x.shape, to_fp( + # x.flatten(), FixedImpl.FP64x64)) + # y = Tensor(Dtype.FP64x64, y.shape, to_fp( + # y.flatten(), FixedImpl.FP64x64)) + + # name = "space_to_depth_fp64x64" + # make_test([x], y, "NNTrait::space_to_depth(@input_0, 2)", + # name, Trait.NN) + + @staticmethod + def fpi8(): + x = np.random.randint(-3, 3, (1, 2, 2, 4)).astype(np.int8) + y = space_to_depth(x) + + x = Tensor(Dtype.I8, x.shape, x.flatten()) + y = Tensor(Dtype.I8, y.shape, y.flatten()) + + name = "space_to_depth_i8" + make_test([x], y, "NNTrait::space_to_depth(@input_0, 2)", + name, Trait.NN) + + @staticmethod + def fpi32(): + x = np.random.randint(-3, 3, (1, 2, 2, 4)).astype(np.int32) + y = space_to_depth(x) + + x = Tensor(Dtype.I32, x.shape, x.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "space_to_depth_i32" + make_test([x], y, "NNTrait::space_to_depth(@input_0, 2)", + name, Trait.NN) + + + @staticmethod + def fpu32(): + x = np.random.randint(-3, 3, (1, 2, 2, 4)).astype(np.uint32) + y = space_to_depth(x) + + x = Tensor(Dtype.U32, x.shape, x.flatten()) + y = Tensor(Dtype.U32, y.shape, y.flatten()) + + name = "space_to_depth_u32" + make_test([x], y, "NNTrait::space_to_depth(@input_0, 2)", + name, Trait.NN) diff --git a/src/operators/nn/core.cairo b/src/operators/nn/core.cairo index 3c99f4733..86a5b9e05 100644 --- a/src/operators/nn/core.cairo +++ b/src/operators/nn/core.cairo @@ -617,6 +617,65 @@ trait NNTrait { /// ``` /// fn thresholded_relu(tensor: @Tensor, alpha: @T) -> Tensor; + /// # NNTrait::space_to_depth + /// + /// ```rust + /// fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor; + /// ``` + /// + /// SpaceToDepth rearranges blocks of spatial data into depth. More specifically, this op outputs a copy of the input tensor where values from the height and width dimensions are moved to the depth dimension. + /// + /// ## Args + /// + /// * `tensor`(`@Tensor`) - The input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width. + /// * `blocksize`(`usize`) - The size of the blocks to move along [blocksize, blocksize]. + /// + /// ## Returns + /// + /// A `Tensor` of [N, C * blocksize * blocksize, H/blocksize, W/blocksize]. + /// + /// ## Examples + /// + /// ```rust + /// use core::array::{ArrayTrait, SpanTrait}; + /// use orion::operators::tensor::{TensorTrait, Tensor}; + /// use orion::operators::tensor::{I8Tensor, I8TensorAdd}; + /// use orion::numbers::NumberTrait; + /// use orion::operators::nn::NNTrait; + /// use orion::operators::nn::I8NN; + /// use orion::numbers::FixedTrait; + /// + /// fn space_to_depth_example() -> Tensor { + /// let mut shape = ArrayTrait::::new(); + /// shape.append(1); + /// shape.append(2); + /// shape.append(2); + /// shape.append(4); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(-3); + /// data.append(0); + /// data.append(0); + /// data.append(0); + /// data.append(-1); + /// data.append(1); + /// data.append(-2); + /// data.append(-3); + /// data.append(2); + /// data.append(-2); + /// data.append(-3); + /// data.append(-3); + /// data.append(-1); + /// data.append(0); + /// data.append(1); + /// data.append(-3); + /// let tensor = TensorTrait::new(shape.span(), data.span()); + /// return NNTrait::space_to_depth(@tensor, 2); + /// } + /// >>> [[[[-3, 0]], [[2, -3]], [[0, 0]], [[-2, -3]], [[-1, -2]], [[-1, 1]], [[1, -3]], [[0, -3]]]] + /// ``` + /// + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor; /// # NNTrait::gemm /// /// ```rust diff --git a/src/operators/nn/functional.cairo b/src/operators/nn/functional.cairo index a0fd96cc8..5716b10ae 100644 --- a/src/operators/nn/functional.cairo +++ b/src/operators/nn/functional.cairo @@ -10,3 +10,4 @@ mod logsoftmax; mod thresholded_relu; mod hard_sigmoid; mod gemm; +mod space_to_depth; diff --git a/src/operators/nn/functional/space_to_depth.cairo b/src/operators/nn/functional/space_to_depth.cairo new file mode 100644 index 000000000..6b0881d8b --- /dev/null +++ b/src/operators/nn/functional/space_to_depth.cairo @@ -0,0 +1,37 @@ +use core::traits::Into; +use core::traits::TryInto; +use orion::operators::tensor::core::{Tensor, TensorTrait}; +use core::option::OptionTrait; + +use orion::numbers::fixed_point::core::FixedTrait; +use orion::numbers::NumberTrait; + +use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; +use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; + + +/// Cf: NNTrait::space_to_depth docstring +fn space_to_depth< + T, + impl TTensor: TensorTrait, + impl TAdd: Add, + impl TMul: Mul, + impl TTensorAdd: Add>, + impl TPartialOrd: PartialOrd, + impl TAddEq: AddEq, + impl TCopy: Copy, + impl TDrop: Drop, +>( + tensor: Tensor, blocksize: usize +) -> Tensor { + assert!((tensor.shape).len() == 4, "Unexpected shape 4."); + let b = (tensor.shape).at(0); + let C = (tensor.shape).at(1); + let H = (tensor.shape).at(2); + let W = (tensor.shape).at(3); + let tmpshape = array![*b, *C, *H / blocksize, blocksize, *W / blocksize, blocksize]; + let reshaped = (tensor).reshape(target_shape: tmpshape.span()); + let transposed = reshaped.transpose(axes: array![0, 3, 5, 1, 2, 4].span()); + let finalshape = array![*b, *C * blocksize * blocksize, *H / blocksize, *W / blocksize]; + return transposed.reshape(target_shape: finalshape.span()); +} diff --git a/src/operators/nn/implementations/nn_fp16x16.cairo b/src/operators/nn/implementations/nn_fp16x16.cairo index 785d3c9fa..fa9c2f563 100644 --- a/src/operators/nn/implementations/nn_fp16x16.cairo +++ b/src/operators/nn/implementations/nn_fp16x16.cairo @@ -61,6 +61,10 @@ impl FP16x16NN of NNTrait { functional::hard_sigmoid::hard_sigmoid(*tensor, alpha, beta) } + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor { + functional::space_to_depth::space_to_depth(*tensor, blocksize) + } + fn gemm( A: Tensor, B: Tensor, diff --git a/src/operators/nn/implementations/nn_fp32x32.cairo b/src/operators/nn/implementations/nn_fp32x32.cairo index 0427ea5f7..03dcbe5f9 100644 --- a/src/operators/nn/implementations/nn_fp32x32.cairo +++ b/src/operators/nn/implementations/nn_fp32x32.cairo @@ -55,6 +55,10 @@ impl FP32x32NN of NNTrait { functional::hard_sigmoid::hard_sigmoid(*tensor, alpha, beta) } + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor { + functional::space_to_depth::space_to_depth(*tensor, blocksize) + } + fn gemm( A: Tensor, B: Tensor, diff --git a/src/operators/nn/implementations/nn_fp64x64.cairo b/src/operators/nn/implementations/nn_fp64x64.cairo index fec810679..a83f607f4 100644 --- a/src/operators/nn/implementations/nn_fp64x64.cairo +++ b/src/operators/nn/implementations/nn_fp64x64.cairo @@ -55,6 +55,10 @@ impl FP64x64NN of NNTrait { functional::hard_sigmoid::hard_sigmoid(*tensor, alpha, beta) } + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor { + functional::space_to_depth::space_to_depth(*tensor, blocksize) + } + fn gemm( A: Tensor, B: Tensor, diff --git a/src/operators/nn/implementations/nn_fp8x23.cairo b/src/operators/nn/implementations/nn_fp8x23.cairo index 9f5416121..d0c92d7ea 100644 --- a/src/operators/nn/implementations/nn_fp8x23.cairo +++ b/src/operators/nn/implementations/nn_fp8x23.cairo @@ -59,6 +59,10 @@ impl FP8x23NN of NNTrait { functional::hard_sigmoid::hard_sigmoid(*tensor, alpha, beta) } + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor { + functional::space_to_depth::space_to_depth(*tensor, blocksize) + } + fn gemm( A: Tensor, B: Tensor, diff --git a/src/operators/nn/implementations/nn_i32.cairo b/src/operators/nn/implementations/nn_i32.cairo index 1db66a1c6..7875feb70 100644 --- a/src/operators/nn/implementations/nn_i32.cairo +++ b/src/operators/nn/implementations/nn_i32.cairo @@ -50,6 +50,10 @@ impl I32NN of NNTrait { panic(array!['not supported!']) } + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor { + functional::space_to_depth::space_to_depth(*tensor, blocksize) + } + fn gemm( A: Tensor, B: Tensor, diff --git a/src/operators/nn/implementations/nn_i8.cairo b/src/operators/nn/implementations/nn_i8.cairo index e67bb7504..5c11ebba1 100644 --- a/src/operators/nn/implementations/nn_i8.cairo +++ b/src/operators/nn/implementations/nn_i8.cairo @@ -50,6 +50,10 @@ impl I8NN of NNTrait { panic(array!['not supported!']) } + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor { + functional::space_to_depth::space_to_depth(*tensor, blocksize) + } + fn gemm( A: Tensor, B: Tensor, diff --git a/src/operators/nn/implementations/nn_u32.cairo b/src/operators/nn/implementations/nn_u32.cairo index 370880e8d..3278b57b0 100644 --- a/src/operators/nn/implementations/nn_u32.cairo +++ b/src/operators/nn/implementations/nn_u32.cairo @@ -50,6 +50,10 @@ impl U32NN of NNTrait { panic(array!['not supported!']) } + fn space_to_depth(tensor: @Tensor, blocksize: usize) -> Tensor { + functional::space_to_depth::space_to_depth(*tensor, blocksize) + } + fn gemm( A: Tensor, B: Tensor, diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 6c70b42cb..6ea5f4c45 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -936,3 +936,8 @@ mod split_fp16x16_2d_variable_parts; mod split_fp16x16_zero_size; mod split_fp16x16_1d_uneven; mod split_fp16x16_2d_uneven; +mod space_to_depth_fp16x16; +mod space_to_depth_fp8x23; +mod space_to_depth_i32; +mod space_to_depth_i8; +mod space_to_depth_u32; diff --git a/tests/nodes/space_to_depth_fp16x16.cairo b/tests/nodes/space_to_depth_fp16x16.cairo new file mode 100644 index 000000000..794e34a85 --- /dev/null +++ b/tests/nodes/space_to_depth_fp16x16.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::FP16x16NN; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; + +#[test] +#[available_gas(2000000000)] +fn test_space_to_depth_fp16x16() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::space_to_depth(@input_0, 2); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/space_to_depth_fp16x16/input_0.cairo b/tests/nodes/space_to_depth_fp16x16/input_0.cairo new file mode 100644 index 000000000..558cbe98b --- /dev/null +++ b/tests/nodes/space_to_depth_fp16x16/input_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(2); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 35648, sign: false }); + data.append(FP16x16 { mag: 93312, sign: false }); + data.append(FP16x16 { mag: 68608, sign: false }); + data.append(FP16x16 { mag: 93888, sign: true }); + data.append(FP16x16 { mag: 180864, sign: false }); + data.append(FP16x16 { mag: 7268, sign: false }); + data.append(FP16x16 { mag: 188800, sign: true }); + data.append(FP16x16 { mag: 104576, sign: true }); + data.append(FP16x16 { mag: 84288, sign: true }); + data.append(FP16x16 { mag: 44864, sign: false }); + data.append(FP16x16 { mag: 180480, sign: false }); + data.append(FP16x16 { mag: 147584, sign: true }); + data.append(FP16x16 { mag: 179584, sign: true }); + data.append(FP16x16 { mag: 172800, sign: false }); + data.append(FP16x16 { mag: 182912, sign: true }); + data.append(FP16x16 { mag: 19408, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/space_to_depth_fp16x16/output_0.cairo b/tests/nodes/space_to_depth_fp16x16/output_0.cairo new file mode 100644 index 000000000..a03e2cf00 --- /dev/null +++ b/tests/nodes/space_to_depth_fp16x16/output_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(8); + shape.append(1); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 35648, sign: false }); + data.append(FP16x16 { mag: 68608, sign: false }); + data.append(FP16x16 { mag: 84288, sign: true }); + data.append(FP16x16 { mag: 180480, sign: false }); + data.append(FP16x16 { mag: 93312, sign: false }); + data.append(FP16x16 { mag: 93888, sign: true }); + data.append(FP16x16 { mag: 44864, sign: false }); + data.append(FP16x16 { mag: 147584, sign: true }); + data.append(FP16x16 { mag: 180864, sign: false }); + data.append(FP16x16 { mag: 188800, sign: true }); + data.append(FP16x16 { mag: 179584, sign: true }); + data.append(FP16x16 { mag: 182912, sign: true }); + data.append(FP16x16 { mag: 7268, sign: false }); + data.append(FP16x16 { mag: 104576, sign: true }); + data.append(FP16x16 { mag: 172800, sign: false }); + data.append(FP16x16 { mag: 19408, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/space_to_depth_fp8x23.cairo b/tests/nodes/space_to_depth_fp8x23.cairo new file mode 100644 index 000000000..7ccdb6576 --- /dev/null +++ b/tests/nodes/space_to_depth_fp8x23.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::nn::FP8x23NN; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; + +#[test] +#[available_gas(2000000000)] +fn test_space_to_depth_fp8x23() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::space_to_depth(@input_0, 2); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/space_to_depth_fp8x23/input_0.cairo b/tests/nodes/space_to_depth_fp8x23/input_0.cairo new file mode 100644 index 000000000..5a462058e --- /dev/null +++ b/tests/nodes/space_to_depth_fp8x23/input_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(2); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 19097721, sign: true }); + data.append(FP8x23 { mag: 20388727, sign: false }); + data.append(FP8x23 { mag: 18733446, sign: false }); + data.append(FP8x23 { mag: 5803068, sign: false }); + data.append(FP8x23 { mag: 21193100, sign: false }); + data.append(FP8x23 { mag: 7531714, sign: false }); + data.append(FP8x23 { mag: 16983892, sign: false }); + data.append(FP8x23 { mag: 18182574, sign: true }); + data.append(FP8x23 { mag: 3066595, sign: false }); + data.append(FP8x23 { mag: 17329855, sign: false }); + data.append(FP8x23 { mag: 14812767, sign: true }); + data.append(FP8x23 { mag: 5408423, sign: false }); + data.append(FP8x23 { mag: 23872828, sign: true }); + data.append(FP8x23 { mag: 19363658, sign: false }); + data.append(FP8x23 { mag: 6503203, sign: false }); + data.append(FP8x23 { mag: 6090326, sign: true }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/space_to_depth_fp8x23/output_0.cairo b/tests/nodes/space_to_depth_fp8x23/output_0.cairo new file mode 100644 index 000000000..b7efd2acf --- /dev/null +++ b/tests/nodes/space_to_depth_fp8x23/output_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(8); + shape.append(1); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 19097721, sign: true }); + data.append(FP8x23 { mag: 18733446, sign: false }); + data.append(FP8x23 { mag: 3066595, sign: false }); + data.append(FP8x23 { mag: 14812767, sign: true }); + data.append(FP8x23 { mag: 20388727, sign: false }); + data.append(FP8x23 { mag: 5803068, sign: false }); + data.append(FP8x23 { mag: 17329855, sign: false }); + data.append(FP8x23 { mag: 5408423, sign: false }); + data.append(FP8x23 { mag: 21193100, sign: false }); + data.append(FP8x23 { mag: 16983892, sign: false }); + data.append(FP8x23 { mag: 23872828, sign: true }); + data.append(FP8x23 { mag: 6503203, sign: false }); + data.append(FP8x23 { mag: 7531714, sign: false }); + data.append(FP8x23 { mag: 18182574, sign: true }); + data.append(FP8x23 { mag: 19363658, sign: false }); + data.append(FP8x23 { mag: 6090326, sign: true }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/space_to_depth_i32.cairo b/tests/nodes/space_to_depth_i32.cairo new file mode 100644 index 000000000..e90f4ce72 --- /dev/null +++ b/tests/nodes/space_to_depth_i32.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::I32NN; +use orion::operators::tensor::I32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; + +#[test] +#[available_gas(2000000000)] +fn test_space_to_depth_i32() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::space_to_depth(@input_0, 2); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/space_to_depth_i32/input_0.cairo b/tests/nodes/space_to_depth_i32/input_0.cairo new file mode 100644 index 000000000..c38670056 --- /dev/null +++ b/tests/nodes/space_to_depth_i32/input_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(2); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(-1); + data.append(-3); + data.append(0); + data.append(2); + data.append(1); + data.append(-3); + data.append(2); + data.append(0); + data.append(2); + data.append(2); + data.append(1); + data.append(-3); + data.append(-3); + data.append(-1); + data.append(-2); + data.append(-2); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/space_to_depth_i32/output_0.cairo b/tests/nodes/space_to_depth_i32/output_0.cairo new file mode 100644 index 000000000..fc9691b9e --- /dev/null +++ b/tests/nodes/space_to_depth_i32/output_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(8); + shape.append(1); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(-1); + data.append(0); + data.append(2); + data.append(1); + data.append(-3); + data.append(2); + data.append(2); + data.append(-3); + data.append(1); + data.append(2); + data.append(-3); + data.append(-2); + data.append(-3); + data.append(0); + data.append(-1); + data.append(-2); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/space_to_depth_i8.cairo b/tests/nodes/space_to_depth_i8.cairo new file mode 100644 index 000000000..0931f5172 --- /dev/null +++ b/tests/nodes/space_to_depth_i8.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::numbers::FixedTrait; +use orion::operators::nn::I8NN; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::NNTrait; +use orion::operators::tensor::I8TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_space_to_depth_i8() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::space_to_depth(@input_0, 2); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/space_to_depth_i8/input_0.cairo b/tests/nodes/space_to_depth_i8/input_0.cairo new file mode 100644 index 000000000..b5f4cadf1 --- /dev/null +++ b/tests/nodes/space_to_depth_i8/input_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(2); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(-3); + data.append(0); + data.append(0); + data.append(0); + data.append(-1); + data.append(1); + data.append(-2); + data.append(-3); + data.append(2); + data.append(-2); + data.append(-3); + data.append(-3); + data.append(-1); + data.append(0); + data.append(1); + data.append(-3); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/space_to_depth_i8/output_0.cairo b/tests/nodes/space_to_depth_i8/output_0.cairo new file mode 100644 index 000000000..3b7c78547 --- /dev/null +++ b/tests/nodes/space_to_depth_i8/output_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(8); + shape.append(1); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(-3); + data.append(0); + data.append(2); + data.append(-3); + data.append(0); + data.append(0); + data.append(-2); + data.append(-3); + data.append(-1); + data.append(-2); + data.append(-1); + data.append(1); + data.append(1); + data.append(-3); + data.append(0); + data.append(-3); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/space_to_depth_u32.cairo b/tests/nodes/space_to_depth_u32.cairo new file mode 100644 index 000000000..edc40a62b --- /dev/null +++ b/tests/nodes/space_to_depth_u32.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::U32NN; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_space_to_depth_u32() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::space_to_depth(@input_0, 2); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/space_to_depth_u32/input_0.cairo b/tests/nodes/space_to_depth_u32/input_0.cairo new file mode 100644 index 000000000..dd5c8e219 --- /dev/null +++ b/tests/nodes/space_to_depth_u32/input_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(2); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(4294967295); + data.append(4294967295); + data.append(0); + data.append(4294967294); + data.append(2); + data.append(4294967295); + data.append(4294967294); + data.append(4294967294); + data.append(4294967295); + data.append(4294967295); + data.append(2); + data.append(2); + data.append(1); + data.append(4294967293); + data.append(4294967294); + data.append(2); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/space_to_depth_u32/output_0.cairo b/tests/nodes/space_to_depth_u32/output_0.cairo new file mode 100644 index 000000000..66124611e --- /dev/null +++ b/tests/nodes/space_to_depth_u32/output_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(8); + shape.append(1); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(4294967295); + data.append(0); + data.append(4294967295); + data.append(2); + data.append(4294967295); + data.append(4294967294); + data.append(4294967295); + data.append(2); + data.append(2); + data.append(4294967294); + data.append(1); + data.append(4294967294); + data.append(4294967295); + data.append(4294967294); + data.append(4294967293); + data.append(2); + TensorTrait::new(shape.span(), data.span()) +} From 1a638bd89c0cf54afb3c2c735464d99064c70402 Mon Sep 17 00:00:00 2001 From: zhangzhichao Date: Wed, 24 Jan 2024 15:19:52 +0800 Subject: [PATCH 10/46] feat: Implement DepthToSpace operator --- .../neural-network/nn.depth_to_space.md | 54 ++++++++ nodegen/node/depth_to_space.py | 120 ++++++++++++++++++ src/operators/nn/core.cairo | 60 +++++++++ src/operators/nn/functional.cairo | 1 + .../nn/functional/depth_to_space.cairo | 46 +++++++ .../nn/implementations/nn_fp16x16.cairo | 4 + .../nn/implementations/nn_fp32x32.cairo | 4 + .../nn/implementations/nn_fp64x64.cairo | 4 + .../nn/implementations/nn_fp8x23.cairo | 4 + src/operators/nn/implementations/nn_i32.cairo | 4 + src/operators/nn/implementations/nn_i8.cairo | 4 + src/operators/nn/implementations/nn_u32.cairo | 4 + tests/nodes.cairo | 6 + tests/nodes/depth_to_space_fp16x16.cairo | 20 +++ .../depth_to_space_fp16x16/input_0.cairo | 31 +++++ .../depth_to_space_fp16x16/output_0.cairo | 31 +++++ tests/nodes/depth_to_space_fp8x23.cairo | 20 +++ .../nodes/depth_to_space_fp8x23/input_0.cairo | 31 +++++ .../depth_to_space_fp8x23/output_0.cairo | 31 +++++ tests/nodes/depth_to_space_i32.cairo | 20 +++ tests/nodes/depth_to_space_i32/input_0.cairo | 31 +++++ tests/nodes/depth_to_space_i32/output_0.cairo | 31 +++++ tests/nodes/depth_to_space_i8.cairo | 20 +++ tests/nodes/depth_to_space_i8/input_0.cairo | 31 +++++ tests/nodes/depth_to_space_i8/output_0.cairo | 31 +++++ tests/nodes/depth_to_space_u32.cairo | 20 +++ tests/nodes/depth_to_space_u32/input_0.cairo | 31 +++++ tests/nodes/depth_to_space_u32/output_0.cairo | 31 +++++ 28 files changed, 725 insertions(+) create mode 100644 docs/framework/operators/neural-network/nn.depth_to_space.md create mode 100644 nodegen/node/depth_to_space.py create mode 100644 src/operators/nn/functional/depth_to_space.cairo create mode 100644 tests/nodes/depth_to_space_fp16x16.cairo create mode 100644 tests/nodes/depth_to_space_fp16x16/input_0.cairo create mode 100644 tests/nodes/depth_to_space_fp16x16/output_0.cairo create mode 100644 tests/nodes/depth_to_space_fp8x23.cairo create mode 100644 tests/nodes/depth_to_space_fp8x23/input_0.cairo create mode 100644 tests/nodes/depth_to_space_fp8x23/output_0.cairo create mode 100644 tests/nodes/depth_to_space_i32.cairo create mode 100644 tests/nodes/depth_to_space_i32/input_0.cairo create mode 100644 tests/nodes/depth_to_space_i32/output_0.cairo create mode 100644 tests/nodes/depth_to_space_i8.cairo create mode 100644 tests/nodes/depth_to_space_i8/input_0.cairo create mode 100644 tests/nodes/depth_to_space_i8/output_0.cairo create mode 100644 tests/nodes/depth_to_space_u32.cairo create mode 100644 tests/nodes/depth_to_space_u32/input_0.cairo create mode 100644 tests/nodes/depth_to_space_u32/output_0.cairo diff --git a/docs/framework/operators/neural-network/nn.depth_to_space.md b/docs/framework/operators/neural-network/nn.depth_to_space.md new file mode 100644 index 000000000..7e23e27d0 --- /dev/null +++ b/docs/framework/operators/neural-network/nn.depth_to_space.md @@ -0,0 +1,54 @@ +# NNTrait::depth_to_space + +```rust + fn depth_to_space(tensor: @Tensor, blocksize: usize) -> Tensor; +``` + +DepthToSpace rearranges (permutes) data from depth into blocks of spatial data. This is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of the input tensor where values from the depth dimension are moved in spatial blocks to the height and width dimensions. By default, mode = DCR. In the DCR mode, elements along the depth dimension from the input tensor are rearranged in the following order: depth, column, and then row. + +## Args + +* `tensor`(`@Tensor`) - The input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width. +* `blocksize`(`usize`) - The size of the blocks to move along [blocksize, blocksize]. +* `mode`(felt252) - DCR (default) for depth-column-row order re-arrangement. Use CRD for column-row-depth order. + +## Returns + +A `Tensor` of [N, C/(blocksize * blocksize), H * blocksize, W * blocksize]. + +## Examples + +```rust +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I8Tensor; +use orion::numbers::{IntegerTrait, i8}; + +fn relu_example() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(4); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(i8 { mag: 1, sign: false }); + data.append(i8 { mag: 3, sign: true }); + data.append(i8 { mag: 3, sign: true }); + data.append(i8 { mag: 1, sign: false }); + data.append(i8 { mag: 1, sign: true }); + data.append(i8 { mag: 3, sign: true }); + data.append(i8 { mag: 2, sign: true }); + data.append(i8 { mag: 1, sign: true }); + data.append(i8 { mag: 1, sign: true }); + data.append(i8 { mag: 2, sign: false }); + data.append(i8 { mag: 1, sign: true }); + data.append(i8 { mag: 2, sign: true }); + data.append(i8 { mag: 3, sign: true }); + data.append(i8 { mag: 3, sign: true }); + data.append(i8 { mag: 2, sign: false }); + data.append(i8 { mag: 2, sign: false }); + let tensor = TensorTrait::new(shape.span(), data.span()); +} +>>> [[[[1, 1, 3, 3], [1, 3, 2, 3], [3, 2, 1, 1], [1, 2, 2, 2]]]] +``` diff --git a/nodegen/node/depth_to_space.py b/nodegen/node/depth_to_space.py new file mode 100644 index 000000000..c07af64a4 --- /dev/null +++ b/nodegen/node/depth_to_space.py @@ -0,0 +1,120 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait + +def depth_to_space(data: np.ndarray, blocksize: int = 2, mode = "DCR") -> np.ndarray: + if len(data.shape) != 4: + raise RuntimeError(f"Unexpected shape {data.shape!r}.") + b, c, h, w = data.shape + if mode == "DCR": + tmpshape = ( + b, + blocksize, + blocksize, + c // (blocksize * blocksize), + h, + w, + ) + reshaped = data.reshape(tmpshape) + transposed = np.transpose(reshaped, [0, 3, 4, 1, 5, 2]) + else: + # assert mode == "CRD" + tmpshape = ( + b, + c // (blocksize * blocksize), + blocksize, + blocksize, + h, + w, + ) + reshaped = data.reshape(tmpshape) + transposed = np.transpose(reshaped, [0, 1, 4, 2, 5, 3]) + finalshape = ( + b, + c // (blocksize * blocksize), + h * blocksize, + w * blocksize, + ) + y = np.reshape(transposed, finalshape) + return y + +class Depth_to_space(RunAll): + + @staticmethod + def fp8x23(): + x = np.random.uniform(-3, 3, (1, 4, 2, 2)).astype(np.float64) + y = depth_to_space(x) + + x = Tensor(Dtype.FP8x23, x.shape, to_fp( + x.flatten(), FixedImpl.FP8x23)) + y = Tensor(Dtype.FP8x23, y.shape, to_fp( + y.flatten(), FixedImpl.FP8x23)) + + name = "depth_to_space_fp8x23" + make_test([x], y, "NNTrait::depth_to_space(@input_0, 2, 'DCR')", + name, Trait.NN) + + @staticmethod + def fp16x16(): + x = np.random.uniform(-3, 3, (1, 4, 2, 2)).astype(np.float16) + y = depth_to_space(x) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp( + x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "depth_to_space_fp16x16" + make_test([x], y, "NNTrait::depth_to_space(@input_0, 2, 'DCR')", + name, Trait.NN) + + # @staticmethod + # def fp64x64(): + # x = np.random.uniform(-3, 3, (1, 4, 2, 2)).astype(np.float64) + # y = depth_to_space(x) + + # x = Tensor(Dtype.FP64x64, x.shape, to_fp( + # x.flatten(), FixedImpl.FP64x64)) + # y = Tensor(Dtype.FP64x64, y.shape, to_fp( + # y.flatten(), FixedImpl.FP64x64)) + + # name = "depth_to_space_fp64x64" + # make_test([x], y, "NNTrait::depth_to_space(@input_0, 2, 'DCR')", + # name, Trait.NN) + + @staticmethod + def fpi8(): + x = np.random.randint(-3, 3, (1, 4, 2, 2)).astype(np.int8) + y = depth_to_space(x) + + x = Tensor(Dtype.I8, x.shape, x.flatten()) + y = Tensor(Dtype.I8, y.shape, y.flatten()) + + name = "depth_to_space_i8" + make_test([x], y, "NNTrait::depth_to_space(@input_0, 2, 'DCR')", + name, Trait.NN) + + @staticmethod + def fpi32(): + x = np.random.randint(-3, 3, (1, 4, 2, 2)).astype(np.int32) + y = depth_to_space(x) + + x = Tensor(Dtype.I32, x.shape, x.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "depth_to_space_i32" + make_test([x], y, "NNTrait::depth_to_space(@input_0, 2, 'CRD')", + name, Trait.NN) + + + @staticmethod + def fpu32(): + x = np.random.randint(-3, 3, (1, 4, 2, 2)).astype(np.uint32) + y = depth_to_space(x) + + x = Tensor(Dtype.U32, x.shape, x.flatten()) + y = Tensor(Dtype.U32, y.shape, y.flatten()) + + name = "depth_to_space_u32" + make_test([x], y, "NNTrait::depth_to_space(@input_0, 2, 'CRD')", + name, Trait.NN) diff --git a/src/operators/nn/core.cairo b/src/operators/nn/core.cairo index 3c99f4733..24df7f84b 100644 --- a/src/operators/nn/core.cairo +++ b/src/operators/nn/core.cairo @@ -617,6 +617,66 @@ trait NNTrait { /// ``` /// fn thresholded_relu(tensor: @Tensor, alpha: @T) -> Tensor; + /// # NNTrait::depth_to_space + /// + /// ```rust + /// fn depth_to_space(tensor: @Tensor, blocksize: usize) -> Tensor; + /// ``` + /// + /// DepthToSpace rearranges (permutes) data from depth into blocks of spatial data. This is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of the input tensor where values from the depth dimension are moved in spatial blocks to the height and width dimensions. By default, mode = DCR. In the DCR mode, elements along the depth dimension from the input tensor are rearranged in the following order: depth, column, and then row. + /// + /// ## Args + /// + /// * `tensor`(`@Tensor`) - The input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width. + /// * `blocksize`(`usize`) - The size of the blocks to move along [blocksize, blocksize]. + /// * `mode`(felt252) - DCR (default) for depth-column-row order re-arrangement. Use CRD for column-row-depth order. + /// + /// ## Returns + /// + /// A `Tensor` of [N, C/(blocksize * blocksize), H * blocksize, W * blocksize]. + /// + /// ## Examples + /// + /// ```rust + /// use core::array::{ArrayTrait, SpanTrait}; + /// use orion::operators::tensor::{TensorTrait, Tensor}; + /// use orion::operators::tensor::{I8Tensor, I8TensorAdd}; + /// use orion::numbers::NumberTrait; + /// use orion::operators::nn::NNTrait; + /// use orion::operators::nn::I8NN; + /// use orion::numbers::FixedTrait; + /// + /// fn depth_to_space_example() -> Tensor { + /// let mut shape = ArrayTrait::::new(); + /// shape.append(1); + /// shape.append(4); + /// shape.append(2); + /// shape.append(2); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(-2); + /// data.append(0); + /// data.append(-1); + /// data.append(0); + /// data.append(0); + /// data.append(-3); + /// data.append(2); + /// data.append(1); + /// data.append(-2); + /// data.append(-2); + /// data.append(0); + /// data.append(-2); + /// data.append(-1); + /// data.append(-1); + /// data.append(2); + /// data.append(2); + /// let tensor = TensorTrait::new(shape.span(), data.span()); + /// return NNTrait::depth_to_space(@tensor, 2, 'DCR'); + /// } + /// >>> [[[[-2, 0, 0, -3], [-2, -1, -2, -1], [-1, 2, 0, 1], [0, 2, -2, 2]]]] + /// ``` + /// + fn depth_to_space(tensor: @Tensor, blocksize: usize, mode: felt252) -> Tensor; /// # NNTrait::gemm /// /// ```rust diff --git a/src/operators/nn/functional.cairo b/src/operators/nn/functional.cairo index a0fd96cc8..bcbbc78a5 100644 --- a/src/operators/nn/functional.cairo +++ b/src/operators/nn/functional.cairo @@ -10,3 +10,4 @@ mod logsoftmax; mod thresholded_relu; mod hard_sigmoid; mod gemm; +mod depth_to_space; diff --git a/src/operators/nn/functional/depth_to_space.cairo b/src/operators/nn/functional/depth_to_space.cairo new file mode 100644 index 000000000..c9efe3f66 --- /dev/null +++ b/src/operators/nn/functional/depth_to_space.cairo @@ -0,0 +1,46 @@ +use core::traits::Into; +use core::traits::TryInto; +use orion::operators::tensor::core::{Tensor, TensorTrait}; +use core::option::OptionTrait; + +use orion::numbers::fixed_point::core::FixedTrait; +use orion::numbers::NumberTrait; + +use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; +use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; + + +/// Cf: NNTrait::depth_to_space docstring +fn depth_to_space< + T, + impl TTensor: TensorTrait, + impl TAdd: Add, + impl TMul: Mul, + impl TTensorAdd: Add>, + impl TPartialOrd: PartialOrd, + impl TAddEq: AddEq, + impl TCopy: Copy, + impl TDrop: Drop, +>( + tensor: Tensor, blocksize: usize, mode: felt252 +) -> Tensor { + assert!((tensor.shape).len() == 4, "Unexpected shape 4."); + let b = (tensor.shape).at(0); + let C = (tensor.shape).at(1); + let H = (tensor.shape).at(2); + let W = (tensor.shape).at(3); + let finalshape = array![*b, *C / (blocksize * blocksize), *H * blocksize, *W * blocksize]; + if mode == 'DCR' { + let tmpshape = array![*b, blocksize, blocksize, *C / (blocksize * blocksize), *H, *W]; + let reshaped = (tensor).reshape(target_shape: tmpshape.span()); + let transposed = reshaped.transpose(axes: array![0, 3, 4, 1, 5, 2].span()); + return transposed.reshape(target_shape: finalshape.span()); + } + else { + // assert mode == "CRD" + let tmpshape = array![*b, *C / (blocksize * blocksize), blocksize, blocksize, *H, *W]; + let reshaped = (tensor).reshape(target_shape: tmpshape.span()); + let transposed = reshaped.transpose(axes: array![0, 1, 4, 2, 5, 3].span()); + return transposed.reshape(target_shape: finalshape.span()); + } +} diff --git a/src/operators/nn/implementations/nn_fp16x16.cairo b/src/operators/nn/implementations/nn_fp16x16.cairo index 785d3c9fa..254f763ff 100644 --- a/src/operators/nn/implementations/nn_fp16x16.cairo +++ b/src/operators/nn/implementations/nn_fp16x16.cairo @@ -61,6 +61,10 @@ impl FP16x16NN of NNTrait { functional::hard_sigmoid::hard_sigmoid(*tensor, alpha, beta) } + fn depth_to_space(tensor: @Tensor, blocksize: usize, mode: felt252) -> Tensor { + functional::depth_to_space::depth_to_space(*tensor, blocksize, mode) + } + fn gemm( A: Tensor, B: Tensor, diff --git a/src/operators/nn/implementations/nn_fp32x32.cairo b/src/operators/nn/implementations/nn_fp32x32.cairo index 0427ea5f7..c22b8e54b 100644 --- a/src/operators/nn/implementations/nn_fp32x32.cairo +++ b/src/operators/nn/implementations/nn_fp32x32.cairo @@ -55,6 +55,10 @@ impl FP32x32NN of NNTrait { functional::hard_sigmoid::hard_sigmoid(*tensor, alpha, beta) } + fn depth_to_space(tensor: @Tensor, blocksize: usize, mode: felt252) -> Tensor { + functional::depth_to_space::depth_to_space(*tensor, blocksize, mode) + } + fn gemm( A: Tensor, B: Tensor, diff --git a/src/operators/nn/implementations/nn_fp64x64.cairo b/src/operators/nn/implementations/nn_fp64x64.cairo index fec810679..ec573855d 100644 --- a/src/operators/nn/implementations/nn_fp64x64.cairo +++ b/src/operators/nn/implementations/nn_fp64x64.cairo @@ -55,6 +55,10 @@ impl FP64x64NN of NNTrait { functional::hard_sigmoid::hard_sigmoid(*tensor, alpha, beta) } + fn depth_to_space(tensor: @Tensor, blocksize: usize, mode: felt252) -> Tensor { + functional::depth_to_space::depth_to_space(*tensor, blocksize, mode) + } + fn gemm( A: Tensor, B: Tensor, diff --git a/src/operators/nn/implementations/nn_fp8x23.cairo b/src/operators/nn/implementations/nn_fp8x23.cairo index 9f5416121..17bc9388d 100644 --- a/src/operators/nn/implementations/nn_fp8x23.cairo +++ b/src/operators/nn/implementations/nn_fp8x23.cairo @@ -59,6 +59,10 @@ impl FP8x23NN of NNTrait { functional::hard_sigmoid::hard_sigmoid(*tensor, alpha, beta) } + fn depth_to_space(tensor: @Tensor, blocksize: usize, mode: felt252) -> Tensor { + functional::depth_to_space::depth_to_space(*tensor, blocksize, mode) + } + fn gemm( A: Tensor, B: Tensor, diff --git a/src/operators/nn/implementations/nn_i32.cairo b/src/operators/nn/implementations/nn_i32.cairo index 1db66a1c6..d121a23df 100644 --- a/src/operators/nn/implementations/nn_i32.cairo +++ b/src/operators/nn/implementations/nn_i32.cairo @@ -50,6 +50,10 @@ impl I32NN of NNTrait { panic(array!['not supported!']) } + fn depth_to_space(tensor: @Tensor, blocksize: usize, mode: felt252) -> Tensor { + functional::depth_to_space::depth_to_space(*tensor, blocksize, mode) + } + fn gemm( A: Tensor, B: Tensor, diff --git a/src/operators/nn/implementations/nn_i8.cairo b/src/operators/nn/implementations/nn_i8.cairo index e67bb7504..8d636c138 100644 --- a/src/operators/nn/implementations/nn_i8.cairo +++ b/src/operators/nn/implementations/nn_i8.cairo @@ -50,6 +50,10 @@ impl I8NN of NNTrait { panic(array!['not supported!']) } + fn depth_to_space(tensor: @Tensor, blocksize: usize, mode: felt252) -> Tensor { + functional::depth_to_space::depth_to_space(*tensor, blocksize, mode) + } + fn gemm( A: Tensor, B: Tensor, diff --git a/src/operators/nn/implementations/nn_u32.cairo b/src/operators/nn/implementations/nn_u32.cairo index 370880e8d..36ba938c4 100644 --- a/src/operators/nn/implementations/nn_u32.cairo +++ b/src/operators/nn/implementations/nn_u32.cairo @@ -50,6 +50,10 @@ impl U32NN of NNTrait { panic(array!['not supported!']) } + fn depth_to_space(tensor: @Tensor, blocksize: usize, mode: felt252) -> Tensor { + functional::depth_to_space::depth_to_space(*tensor, blocksize, mode) + } + fn gemm( A: Tensor, B: Tensor, diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 6c70b42cb..989aafedc 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -936,3 +936,9 @@ mod split_fp16x16_2d_variable_parts; mod split_fp16x16_zero_size; mod split_fp16x16_1d_uneven; mod split_fp16x16_2d_uneven; +mod depth_to_space_fp16x16; +mod depth_to_space_fp8x23; +mod depth_to_space_i32; +mod depth_to_space_i8; +mod depth_to_space_u32; + diff --git a/tests/nodes/depth_to_space_fp16x16.cairo b/tests/nodes/depth_to_space_fp16x16.cairo new file mode 100644 index 000000000..626bc484f --- /dev/null +++ b/tests/nodes/depth_to_space_fp16x16.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::FP16x16NN; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::utils::{assert_eq, assert_seq_eq}; + +#[test] +#[available_gas(2000000000)] +fn test_depth_to_space_fp16x16() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::depth_to_space(@input_0, 2, 'DCR'); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/depth_to_space_fp16x16/input_0.cairo b/tests/nodes/depth_to_space_fp16x16/input_0.cairo new file mode 100644 index 000000000..aa9c1825f --- /dev/null +++ b/tests/nodes/depth_to_space_fp16x16/input_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(4); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 172672, sign: false }); + data.append(FP16x16 { mag: 110976, sign: true }); + data.append(FP16x16 { mag: 102912, sign: true }); + data.append(FP16x16 { mag: 146944, sign: true }); + data.append(FP16x16 { mag: 159232, sign: false }); + data.append(FP16x16 { mag: 130112, sign: false }); + data.append(FP16x16 { mag: 106304, sign: false }); + data.append(FP16x16 { mag: 26832, sign: false }); + data.append(FP16x16 { mag: 26800, sign: false }); + data.append(FP16x16 { mag: 172928, sign: true }); + data.append(FP16x16 { mag: 177280, sign: true }); + data.append(FP16x16 { mag: 102208, sign: false }); + data.append(FP16x16 { mag: 11808, sign: true }); + data.append(FP16x16 { mag: 111488, sign: true }); + data.append(FP16x16 { mag: 53120, sign: true }); + data.append(FP16x16 { mag: 165888, sign: true }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/depth_to_space_fp16x16/output_0.cairo b/tests/nodes/depth_to_space_fp16x16/output_0.cairo new file mode 100644 index 000000000..ea5dc09bd --- /dev/null +++ b/tests/nodes/depth_to_space_fp16x16/output_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 172672, sign: false }); + data.append(FP16x16 { mag: 159232, sign: false }); + data.append(FP16x16 { mag: 110976, sign: true }); + data.append(FP16x16 { mag: 130112, sign: false }); + data.append(FP16x16 { mag: 26800, sign: false }); + data.append(FP16x16 { mag: 11808, sign: true }); + data.append(FP16x16 { mag: 172928, sign: true }); + data.append(FP16x16 { mag: 111488, sign: true }); + data.append(FP16x16 { mag: 102912, sign: true }); + data.append(FP16x16 { mag: 106304, sign: false }); + data.append(FP16x16 { mag: 146944, sign: true }); + data.append(FP16x16 { mag: 26832, sign: false }); + data.append(FP16x16 { mag: 177280, sign: true }); + data.append(FP16x16 { mag: 53120, sign: true }); + data.append(FP16x16 { mag: 102208, sign: false }); + data.append(FP16x16 { mag: 165888, sign: true }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/depth_to_space_fp8x23.cairo b/tests/nodes/depth_to_space_fp8x23.cairo new file mode 100644 index 000000000..7069e0eaa --- /dev/null +++ b/tests/nodes/depth_to_space_fp8x23.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::FP8x23NN; +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP8x23TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_depth_to_space_fp8x23() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::depth_to_space(@input_0, 2, 'DCR'); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/depth_to_space_fp8x23/input_0.cairo b/tests/nodes/depth_to_space_fp8x23/input_0.cairo new file mode 100644 index 000000000..f9309afc8 --- /dev/null +++ b/tests/nodes/depth_to_space_fp8x23/input_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(4); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 2107888, sign: true }); + data.append(FP8x23 { mag: 18609267, sign: false }); + data.append(FP8x23 { mag: 21110896, sign: false }); + data.append(FP8x23 { mag: 20658169, sign: true }); + data.append(FP8x23 { mag: 15019497, sign: false }); + data.append(FP8x23 { mag: 18600854, sign: false }); + data.append(FP8x23 { mag: 17219045, sign: false }); + data.append(FP8x23 { mag: 5826906, sign: false }); + data.append(FP8x23 { mag: 1835376, sign: false }); + data.append(FP8x23 { mag: 3485937, sign: false }); + data.append(FP8x23 { mag: 23249935, sign: true }); + data.append(FP8x23 { mag: 428809, sign: false }); + data.append(FP8x23 { mag: 20996700, sign: false }); + data.append(FP8x23 { mag: 7565588, sign: true }); + data.append(FP8x23 { mag: 15581476, sign: true }); + data.append(FP8x23 { mag: 7136954, sign: true }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/depth_to_space_fp8x23/output_0.cairo b/tests/nodes/depth_to_space_fp8x23/output_0.cairo new file mode 100644 index 000000000..c81501ad1 --- /dev/null +++ b/tests/nodes/depth_to_space_fp8x23/output_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 2107888, sign: true }); + data.append(FP8x23 { mag: 15019497, sign: false }); + data.append(FP8x23 { mag: 18609267, sign: false }); + data.append(FP8x23 { mag: 18600854, sign: false }); + data.append(FP8x23 { mag: 1835376, sign: false }); + data.append(FP8x23 { mag: 20996700, sign: false }); + data.append(FP8x23 { mag: 3485937, sign: false }); + data.append(FP8x23 { mag: 7565588, sign: true }); + data.append(FP8x23 { mag: 21110896, sign: false }); + data.append(FP8x23 { mag: 17219045, sign: false }); + data.append(FP8x23 { mag: 20658169, sign: true }); + data.append(FP8x23 { mag: 5826906, sign: false }); + data.append(FP8x23 { mag: 23249935, sign: true }); + data.append(FP8x23 { mag: 15581476, sign: true }); + data.append(FP8x23 { mag: 428809, sign: false }); + data.append(FP8x23 { mag: 7136954, sign: true }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/depth_to_space_i32.cairo b/tests/nodes/depth_to_space_i32.cairo new file mode 100644 index 000000000..5c237c959 --- /dev/null +++ b/tests/nodes/depth_to_space_i32.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::I32NN; +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_depth_to_space_i32() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::depth_to_space(@input_0, 2, 'CRD'); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/depth_to_space_i32/input_0.cairo b/tests/nodes/depth_to_space_i32/input_0.cairo new file mode 100644 index 000000000..5c8487dd4 --- /dev/null +++ b/tests/nodes/depth_to_space_i32/input_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(4); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(-1); + data.append(-2); + data.append(2); + data.append(0); + data.append(-1); + data.append(-3); + data.append(-3); + data.append(-2); + data.append(0); + data.append(-3); + data.append(2); + data.append(2); + data.append(1); + data.append(0); + data.append(1); + data.append(2); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/depth_to_space_i32/output_0.cairo b/tests/nodes/depth_to_space_i32/output_0.cairo new file mode 100644 index 000000000..0c44c753c --- /dev/null +++ b/tests/nodes/depth_to_space_i32/output_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(-1); + data.append(-1); + data.append(-2); + data.append(-3); + data.append(0); + data.append(1); + data.append(-3); + data.append(0); + data.append(2); + data.append(-3); + data.append(0); + data.append(-2); + data.append(2); + data.append(1); + data.append(2); + data.append(2); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/depth_to_space_i8.cairo b/tests/nodes/depth_to_space_i8.cairo new file mode 100644 index 000000000..0f6572c79 --- /dev/null +++ b/tests/nodes/depth_to_space_i8.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I8TensorPartialEq; +use orion::operators::nn::I8NN; + +#[test] +#[available_gas(2000000000)] +fn test_depth_to_space_i8() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::depth_to_space(@input_0, 2, 'DCR'); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/depth_to_space_i8/input_0.cairo b/tests/nodes/depth_to_space_i8/input_0.cairo new file mode 100644 index 000000000..c63d595a0 --- /dev/null +++ b/tests/nodes/depth_to_space_i8/input_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(4); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(-2); + data.append(-3); + data.append(0); + data.append(-1); + data.append(0); + data.append(-1); + data.append(2); + data.append(1); + data.append(2); + data.append(-3); + data.append(-2); + data.append(0); + data.append(2); + data.append(0); + data.append(1); + data.append(1); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/depth_to_space_i8/output_0.cairo b/tests/nodes/depth_to_space_i8/output_0.cairo new file mode 100644 index 000000000..dcb54c3ea --- /dev/null +++ b/tests/nodes/depth_to_space_i8/output_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(-2); + data.append(0); + data.append(-3); + data.append(-1); + data.append(2); + data.append(2); + data.append(-3); + data.append(0); + data.append(0); + data.append(2); + data.append(-1); + data.append(1); + data.append(-2); + data.append(1); + data.append(0); + data.append(1); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/depth_to_space_u32.cairo b/tests/nodes/depth_to_space_u32.cairo new file mode 100644 index 000000000..298e66cef --- /dev/null +++ b/tests/nodes/depth_to_space_u32.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::nn::U32NN; + +#[test] +#[available_gas(2000000000)] +fn test_depth_to_space_u32() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::depth_to_space(@input_0, 2, 'CRD'); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/depth_to_space_u32/input_0.cairo b/tests/nodes/depth_to_space_u32/input_0.cairo new file mode 100644 index 000000000..acd483661 --- /dev/null +++ b/tests/nodes/depth_to_space_u32/input_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(4); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(4294967293); + data.append(4294967294); + data.append(2); + data.append(4294967293); + data.append(2); + data.append(2); + data.append(2); + data.append(2); + data.append(4294967295); + data.append(4294967293); + data.append(4294967293); + data.append(0); + data.append(0); + data.append(4294967294); + data.append(2); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/depth_to_space_u32/output_0.cairo b/tests/nodes/depth_to_space_u32/output_0.cairo new file mode 100644 index 000000000..6321339c1 --- /dev/null +++ b/tests/nodes/depth_to_space_u32/output_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(4294967293); + data.append(2); + data.append(4294967294); + data.append(2); + data.append(4294967295); + data.append(0); + data.append(4294967293); + data.append(4294967294); + data.append(2); + data.append(2); + data.append(4294967293); + data.append(2); + data.append(4294967293); + data.append(2); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} From 35ed707ecc89be177d882f77a0f0644f296507ce Mon Sep 17 00:00:00 2001 From: zhangzhichao Date: Thu, 25 Jan 2024 14:29:12 +0800 Subject: [PATCH 11/46] feat: Implement ReverseSequence operator --- .../tensor/tensor.reverse_sequence.md | 44 +++++ nodegen/node/reverse_sequence.py | 94 +++++++++++ src/operators/tensor/core.cairo | 51 ++++++ .../tensor/implementations/tensor_bool.cairo | 6 + .../implementations/tensor_complex64.cairo | 6 + .../implementations/tensor_fp16x16.cairo | 6 + .../implementations/tensor_fp16x16wide.cairo | 6 + .../implementations/tensor_fp32x32.cairo | 6 + .../implementations/tensor_fp64x64.cairo | 6 + .../implementations/tensor_fp8x23.cairo | 6 + .../implementations/tensor_fp8x23wide.cairo | 6 + .../tensor/implementations/tensor_i32.cairo | 6 + .../tensor/implementations/tensor_i8.cairo | 6 + .../tensor/implementations/tensor_u32.cairo | 6 + src/operators/tensor/manipulation.cairo | 1 + .../manipulation/reverse_sequence.cairo | 152 ++++++++++++++++++ ...equence_fp16x16_2d_batch_equal_parts.cairo | 21 +++ .../input_0.cairo | 29 ++++ .../output_0.cairo | 29 ++++ ...sequence_fp16x16_2d_time_equal_parts.cairo | 21 +++ .../input_0.cairo | 29 ++++ .../output_0.cairo | 29 ++++ ...se_sequence_i32_2d_batch_equal_parts.cairo | 21 +++ .../input_0.cairo | 28 ++++ .../output_0.cairo | 30 ++++ ...rse_sequence_i32_2d_time_equal_parts.cairo | 21 +++ .../input_0.cairo | 29 ++++ .../output_0.cairo | 29 ++++ ...rse_sequence_i8_2d_batch_equal_parts.cairo | 21 +++ .../input_0.cairo | 29 ++++ .../output_0.cairo | 28 ++++ ...erse_sequence_i8_2d_time_equal_parts.cairo | 21 +++ .../input_0.cairo | 30 ++++ .../output_0.cairo | 29 ++++ ...se_sequence_u32_2d_batch_equal_parts.cairo | 20 +++ .../input_0.cairo | 28 ++++ .../output_0.cairo | 28 ++++ ...rse_sequence_u32_2d_time_equal_parts.cairo | 20 +++ .../input_0.cairo | 28 ++++ .../output_0.cairo | 28 ++++ .../reverse_sequence_u32_zero_size.cairo | 20 +++ .../input_0.cairo | 12 ++ .../output_0.cairo | 12 ++ 43 files changed, 1078 insertions(+) create mode 100644 docs/framework/operators/tensor/tensor.reverse_sequence.md create mode 100644 nodegen/node/reverse_sequence.py create mode 100644 src/operators/tensor/manipulation/reverse_sequence.cairo create mode 100644 tests/nodes/reverse_sequence_fp16x16_2d_batch_equal_parts.cairo create mode 100644 tests/nodes/reverse_sequence_fp16x16_2d_batch_equal_parts/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_fp16x16_2d_batch_equal_parts/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_fp16x16_2d_time_equal_parts.cairo create mode 100644 tests/nodes/reverse_sequence_fp16x16_2d_time_equal_parts/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_fp16x16_2d_time_equal_parts/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_i32_2d_batch_equal_parts.cairo create mode 100644 tests/nodes/reverse_sequence_i32_2d_batch_equal_parts/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_i32_2d_batch_equal_parts/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_i32_2d_time_equal_parts.cairo create mode 100644 tests/nodes/reverse_sequence_i32_2d_time_equal_parts/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_i32_2d_time_equal_parts/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_i8_2d_batch_equal_parts.cairo create mode 100644 tests/nodes/reverse_sequence_i8_2d_batch_equal_parts/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_i8_2d_batch_equal_parts/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_i8_2d_time_equal_parts.cairo create mode 100644 tests/nodes/reverse_sequence_i8_2d_time_equal_parts/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_i8_2d_time_equal_parts/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_u32_2d_batch_equal_parts.cairo create mode 100644 tests/nodes/reverse_sequence_u32_2d_batch_equal_parts/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_u32_2d_batch_equal_parts/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_u32_2d_time_equal_parts.cairo create mode 100644 tests/nodes/reverse_sequence_u32_2d_time_equal_parts/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_u32_2d_time_equal_parts/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_u32_zero_size.cairo create mode 100644 tests/nodes/reverse_sequence_u32_zero_size/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_u32_zero_size/output_0.cairo diff --git a/docs/framework/operators/tensor/tensor.reverse_sequence.md b/docs/framework/operators/tensor/tensor.reverse_sequence.md new file mode 100644 index 000000000..d03eaf7b8 --- /dev/null +++ b/docs/framework/operators/tensor/tensor.reverse_sequence.md @@ -0,0 +1,44 @@ +# tensor.reverse_sequence + +```rust + fn reverse_sequence(self: @Array>, sequence_lens: @Tensor, batch_axis: Option, time_axis: Option) -> + Array>; +``` + +Reverse batch of sequences having different lengths specified by sequence_lens. + +* `self`(`@Array>`) - Tensor of rank r >= 2. +* `sequence_lens`(`@Tensor`) - Tensor specifying lengths of the sequences in a batch. It has shape [batch_size]. +* `batch_axis`(`Option`) - (Optional) Specify which axis is batch axis. Must be one of 1 (default), or 0. +* `time_axis`(`Option`) - (Optional) Specify which axis is time axis. Must be one of 0 (default), or 1. + +## Panics + +* Panics if the 'batch_axis' == 'time_axis'. +* Panics if the 'batch_axis' and 'time_axis' are not 0 and 1. +* Panics if the 'sequence_len' exceeding the sequence range. + +## Returns + +Tensor with same shape of input. + +## Example +```rust +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; +use core::option::OptionTrait; +fn reverse_sequence_example() -> Tensor { + let tensor: Tensor = TensorTrait::::new( + shape: array![4,4].span(), + data: array![ + 0, 1, 2, 3, 4, 5, 6, 7,8,9,10,11,12,13,14,15,16 + ].span(), + ); + let sequence_lens = TensorTrait::::new(array![4,4].span(), array![1,2,3,4].span()); + let batch_axis = Option::Some(0); + let time_axis = Option::Some(1); + // We can call `split` function as follows. + return tensor.reverse_sequence(sequence_lens, batch_axis, time_axis); +} +>>> [0,1,2,3,5,4,6,7,10,9,8,11,15,14,13,12] +``` diff --git a/nodegen/node/reverse_sequence.py b/nodegen/node/reverse_sequence.py new file mode 100644 index 000000000..f446268b3 --- /dev/null +++ b/nodegen/node/reverse_sequence.py @@ -0,0 +1,94 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl + + +class Reverse_sequence(RunAll): + @staticmethod + def Reverse_sequence_u32(): + def reverse_sequence_2d_batch(): + x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], dtype=np.uint32).reshape((4, 4)) + y = np.array([0, 1, 2, 3, 5, 4, 6, 7, 10, 9, 8, 11, 15, 14, 13, 12], dtype=np.uint32).reshape((4, 4)) + _x = Tensor(Dtype.U32, x.shape, x.flatten()) + _y = Tensor(Dtype.U32, y.shape, y.flatten()) + name = "reverse_sequence_u32_2d_batch_equal_parts" + + make_test([_x], _y, "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1))", name) + + def reverse_sequence_2d_time(): + x = np.array([0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15], dtype=np.uint32).reshape((4, 4)) + y = np.array([3, 6, 9, 12, 2, 5, 8, 13, 1, 4, 10, 14, 0, 7, 11, 15], dtype=np.uint32).reshape((4, 4)) + _x = Tensor(Dtype.U32, x.shape, x.flatten()) + _y = Tensor(Dtype.U32, y.shape, y.flatten()) + name = "reverse_sequence_u32_2d_time_equal_parts" + make_test([_x], _y, "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0))", name) + reverse_sequence_2d_batch() + reverse_sequence_2d_time() + + @staticmethod + def Reverse_sequence_i32(): + def reverse_sequence_2d_batch(): + x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], dtype=np.int32).reshape((4, 4)) + y = np.array([0, 1, 2, 3, 5, 4, 6, 7, 10, 9, 8, 11, 15, 14, 13, 12], dtype=np.int32).reshape((4, 4)) + _x = Tensor(Dtype.I32, x.shape, x.flatten()) + _y = Tensor(Dtype.I32, y.shape, y.flatten()) + name = "reverse_sequence_i32_2d_batch_equal_parts" + + make_test([_x], _y, "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1))", name) + + def reverse_sequence_2d_time(): + x = np.array([0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15], dtype=np.int32).reshape((4, 4)) + y = np.array([3, 6, 9, 12, 2, 5, 8, 13, 1, 4, 10, 14, 0, 7, 11, 15], dtype=np.int32).reshape((4, 4)) + _x = Tensor(Dtype.I32, x.shape, x.flatten()) + _y = Tensor(Dtype.I32, y.shape, y.flatten()) + name = "reverse_sequence_i32_2d_time_equal_parts" + make_test([_x], _y, "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0))", name) + reverse_sequence_2d_batch() + reverse_sequence_2d_time() + + @staticmethod + def Reverse_sequence_i8(): + def reverse_sequence_2d_batch(): + x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], dtype=np.int8).reshape((4, 4)) + y = np.array([0, 1, 2, 3, 5, 4, 6, 7, 10, 9, 8, 11, 15, 14, 13, 12], dtype=np.int8).reshape((4, 4)) + _x = Tensor(Dtype.I8, x.shape, x.flatten()) + _y = Tensor(Dtype.I8, y.shape, y.flatten()) + name = "reverse_sequence_i8_2d_batch_equal_parts" + + make_test([_x], _y, "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1))", name) + + def reverse_sequence_2d_time(): + x = np.array([0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15], dtype=np.uint32).reshape((4, 4)) + y = np.array([3, 6, 9, 12, 2, 5, 8, 13, 1, 4, 10, 14, 0, 7, 11, 15], dtype=np.uint32).reshape((4, 4)) + _x = Tensor(Dtype.U32, x.shape, x.flatten()) + _y = Tensor(Dtype.U32, y.shape, y.flatten()) + name = "reverse_sequence_i8_2d_time_equal_parts" + make_test([_x], _y, "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0))", name) + reverse_sequence_2d_batch() + reverse_sequence_2d_time() + + def Reverse_sequence_fp16x16(): + def reverse_sequence_2d_batch(): + x = to_fp(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], dtype=np.int64).reshape(4, 4), FixedImpl.FP16x16) + y = to_fp(np.array([0, 1, 2, 3, 5, 4, 6, 7, 10, 9, 8, 11, 15, 14, 13, 12], dtype=np.int64).reshape(4, 4), FixedImpl.FP16x16) + _x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) + _y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) + name = "reverse_sequence_fp16x16_2d_batch_equal_parts" + make_test([_x], _y, "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1))", name) + def reverse_sequence_2d_time(): + x = to_fp(np.array([0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15], dtype=np.int64).reshape(4, 4), FixedImpl.FP16x16) + y = to_fp(np.array([3, 6, 9, 12, 2, 5, 8, 13, 1, 4, 10, 14, 0, 7, 11, 15], dtype=np.int64).reshape(4, 4), FixedImpl.FP16x16) + _x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) + _y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) + name = "reverse_sequence_fp16x16_2d_time_equal_parts" + make_test([_x], _y, "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0))", name) + reverse_sequence_2d_batch() + reverse_sequence_2d_time() + + def reverse_sequence_zero_size(): + x = np.array([]).astype(np.uint32) + y = np.array([]).astype(np.uint32) + _x = Tensor(Dtype.U32, x.shape, y.flatten()) + _y = Tensor(Dtype.U32, x.shape, y.flatten()) + name = "reverse_sequence_u32_zero_size" + make_test([_x], _y, "input_0.reverse_sequence(TensorTrait::::new(array![].span(), array![].span()), Option::Some(1), Option::Some(0))", name) \ No newline at end of file diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 70344eb97..b959db427 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -118,6 +118,8 @@ impl TensorSerde, impl TDrop: Drop> of Serde { /// # tensor.new /// @@ -5162,6 +5164,55 @@ trait TensorTrait { fn split( self: @Tensor, axis: usize, num_outputs: Option, spl: Option> ) -> Array>; + + /// # tensor.reverse_sequence + /// + /// ```rust + /// fn reverse_sequence(self: @Array>, sequence_lens: @Tensor, batch_axis: Option, time_axis: Option) -> + /// Array>; + /// ``` + /// + /// Reverse batch of sequences having different lengths specified by sequence_lens. + /// + /// * `self`(`@Array>`) - Tensor of rank r >= 2. + /// * `sequence_lens`(`@Tensor`) - Tensor specifying lengths of the sequences in a batch. It has shape [batch_size]. + /// * `batch_axis`(`Option`) - (Optional) Specify which axis is batch axis. Must be one of 1 (default), or 0. + /// * `time_axis`(`Option`) - (Optional) Specify which axis is time axis. Must be one of 0 (default), or 1. + /// + /// ## Panics + /// + /// * Panics if the 'batch_axis' == 'time_axis'. + /// * Panics if the 'batch_axis' and 'time_axis' are not 0 and 1. + /// * Panics if the 'sequence_len' exceeding the sequence range. + /// + /// ## Returns + /// + /// Tensor with same shape of input. + /// + /// ## Example + /// ```rust + /// use core::array::{ArrayTrait, SpanTrait}; + /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; + /// use core::option::OptionTrait; + /// fn reverse_sequence_example() -> Tensor { + /// let tensor: Tensor = TensorTrait::::new( + /// shape: array![4,4].span(), + /// data: array![ + /// 0, 1, 2, 3, 4, 5, 6, 7,8,9,10,11,12,13,14,15,16 + /// ].span(), + /// ); + /// let sequence_lens = TensorTrait::::new(array![4,4].span(), array![1,2,3,4].span()); + /// let batch_axis = Option::Some(0); + /// let time_axis = Option::Some(1); + /// // We can call `split` function as follows. + /// return tensor.reverse_sequence(sequence_lens, batch_axis, time_axis); + /// } + /// >>> [0,1,2,3,5,4,6,7,10,9,8,11,15,14,13,12] + /// ``` + /// + fn reverse_sequence( + self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + ) -> Tensor; } /// Cf: TensorTrait::new docstring diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index 3da518ec8..0ab62f3bb 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -484,6 +484,12 @@ impl BoolTensor of TensorTrait { ) -> Array> { panic(array!['not supported!']) } + + fn reverse_sequence( + self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + ) -> Tensor { + panic(array!['not supported!']) + } } /// Implements partial equal for two `Tensor` using the `PartialEq` trait. diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 74acba5c6..77cdca826 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -498,6 +498,12 @@ impl Complex64Tensor of TensorTrait { panic(array!['not supported!']) } + fn reverse_sequence( + self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + ) -> Tensor { + panic(array!['not supported!']) + } + fn resize( self: @Tensor, roi: Option>, diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index cdc50bc4f..ea28604e3 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -560,6 +560,12 @@ impl FP16x16Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn reverse_sequence( + self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + ) -> Tensor { + manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index b0dc2d858..6a62ad444 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -512,6 +512,12 @@ impl FP16x16WTensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn reverse_sequence( + self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + ) -> Tensor { + manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 4f862fd0e..36e1b82d0 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -561,6 +561,12 @@ impl FP32x32Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn reverse_sequence( + self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + ) -> Tensor { + manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 1fe5591fc..0fbc8d3ad 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -561,6 +561,12 @@ impl FP64x64Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn reverse_sequence( + self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + ) -> Tensor { + manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index 77d183c21..c8da0c128 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -559,6 +559,12 @@ impl FP8x23Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn reverse_sequence( + self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + ) -> Tensor { + manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index ff6069087..0de8e97fc 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -498,6 +498,12 @@ impl FP8x23WTensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn reverse_sequence( + self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + ) -> Tensor { + manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 50383d2df..091c97a3b 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -541,6 +541,12 @@ impl I32Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn reverse_sequence( + self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + ) -> Tensor { + manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 7e81d90eb..996f728f9 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -539,6 +539,12 @@ impl I8Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn reverse_sequence( + self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + ) -> Tensor { + manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index 5a926a538..0c6ec584c 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -482,6 +482,12 @@ impl U32Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn reverse_sequence( + self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option + ) -> Tensor { + manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/manipulation.cairo b/src/operators/tensor/manipulation.cairo index 584eae027..47ba292fa 100644 --- a/src/operators/tensor/manipulation.cairo +++ b/src/operators/tensor/manipulation.cairo @@ -1,2 +1,3 @@ mod unique; mod split; +mod reverse_sequence; \ No newline at end of file diff --git a/src/operators/tensor/manipulation/reverse_sequence.cairo b/src/operators/tensor/manipulation/reverse_sequence.cairo new file mode 100644 index 000000000..5bfde0d60 --- /dev/null +++ b/src/operators/tensor/manipulation/reverse_sequence.cairo @@ -0,0 +1,152 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; + +/// Cf: TensorTrait::reverse_sequence docstring +fn reverse_sequence< + T, + impl TTensor: TensorTrait, + impl TCopy: Copy, + impl TDrop: Drop +>( + self: @Tensor, + sequence_lens: Tensor, + batch_axis: Option, + time_axis: Option +) -> Tensor{ + let shape = *self.shape; + let mut data: Array = array![]; + if (*self.data).len() == 0 { + data = ArrayTrait::::new(); + } else { + let has_batch_axis: usize = match batch_axis { + Option::Some(value) => { + assert!((value != 0) || (value != 1), "batch_axis must be one of 1 or 0."); + value + }, + Option::None => 0, + }; + let has_time_axis: usize = match time_axis { + Option::Some(value) => { + assert!((value != 0) || (value != 1), "time_axis must be one of 1 or 0."); + value + }, + Option::None => 1, + }; + assert!(has_batch_axis != has_time_axis, "batch_axis and time_axis cannot be equal"); + + let control: bool = if has_batch_axis == 0 && has_time_axis == 1 { + true + } else { + false + }; + + let mut index: Array = reverse_index(*self.shape, sequence_lens, control); + // let shape = self.shape; + // let mut data = ArrayTrait::::new(); + loop { + match index.pop_front() { + Option::Some(ele) => { + data.append(*((*self).data).at(ele)); + }, + Option::None(_) => { + break; + } + } + }; + } + + TensorTrait::::new(shape, data.span()) +} + + +fn reverse_index( + shape: Span, sequence_lens: Tensor, control: bool +) -> Array { + let mut result = ArrayTrait::::new(); + let x: usize = *shape.at(0); + let y: usize = *shape.at(1); + + if control { + //[i, slice] + assert!(sequence_lens.data.len() <= x,"The length of sequence_lens cannot exceed batch_axis"); + let mut i: usize = 0; + loop { + if i >= x { + break; + } + + let reverse: usize = *(sequence_lens.data).at(i); + assert!(reverse <= y && reverse >= 1, "sequence_lens must be greater than one and less than batch_size"); + let mut j: usize = reverse - 1; + loop { + + if j == 0 { + result.append(i * y + j); + break; + } + result.append(i * y + j); + j -= 1; + }; + + let current_index_len: usize = (i + 1) * y - 1; + let mut j: usize = result.len(); + loop { + if j > current_index_len { + break; + } + result.append(j); + j += 1; + }; + i += 1; + }; + } else { + // [slice, i] + assert!(sequence_lens.data.len() <= y,"The length of sequence_lens cannot exceed time_axis"); + let mut tmp = ArrayTrait::::new(); + let mut i: usize = 0; + loop { + if i >= y { + break; + } + let reverse: usize = *(sequence_lens.data).at(i); + assert!(reverse <= x && reverse >= 1, "sequence_lens must be greater than one and less than batch_size"); + + let mut j: usize = reverse - 1; + loop { + if j == 0 { + tmp.append(j * y + i); + break; + } + tmp.append(j * y + i); + j -= 1; + }; + + let mut j: usize = reverse; + loop { + if j > x - 1 { + break; + } + tmp.append(j * y + i); + j += 1; + }; + i += 1; + }; + let tmp = tmp.span(); + let mut i : usize = 0; + loop { + if i >= x { + break; + } + let mut j: usize = 0; + loop { + if j >= y { + break; + } + result.append(*tmp.at(j * y + i)); + j += 1; + }; + i += 1; + } + } + result +} \ No newline at end of file diff --git a/tests/nodes/reverse_sequence_fp16x16_2d_batch_equal_parts.cairo b/tests/nodes/reverse_sequence_fp16x16_2d_batch_equal_parts.cairo new file mode 100644 index 000000000..9dcfd511c --- /dev/null +++ b/tests/nodes/reverse_sequence_fp16x16_2d_batch_equal_parts.cairo @@ -0,0 +1,21 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::U32Tensor; +use orion::operators::tensor::FP16x16TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_fp16x16_2d_batch_equal_parts() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_fp16x16_2d_batch_equal_parts/input_0.cairo b/tests/nodes/reverse_sequence_fp16x16_2d_batch_equal_parts/input_0.cairo new file mode 100644 index 000000000..be4a33330 --- /dev/null +++ b/tests/nodes/reverse_sequence_fp16x16_2d_batch_equal_parts/input_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_fp16x16_2d_batch_equal_parts/output_0.cairo b/tests/nodes/reverse_sequence_fp16x16_2d_batch_equal_parts/output_0.cairo new file mode 100644 index 000000000..f306a58d5 --- /dev/null +++ b/tests/nodes/reverse_sequence_fp16x16_2d_batch_equal_parts/output_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_fp16x16_2d_time_equal_parts.cairo b/tests/nodes/reverse_sequence_fp16x16_2d_time_equal_parts.cairo new file mode 100644 index 000000000..518db31ca --- /dev/null +++ b/tests/nodes/reverse_sequence_fp16x16_2d_time_equal_parts.cairo @@ -0,0 +1,21 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::U32Tensor; +use orion::operators::tensor::FP16x16TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_fp16x16_2d_time_equal_parts() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_fp16x16_2d_time_equal_parts/input_0.cairo b/tests/nodes/reverse_sequence_fp16x16_2d_time_equal_parts/input_0.cairo new file mode 100644 index 000000000..b9a894740 --- /dev/null +++ b/tests/nodes/reverse_sequence_fp16x16_2d_time_equal_parts/input_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_fp16x16_2d_time_equal_parts/output_0.cairo b/tests/nodes/reverse_sequence_fp16x16_2d_time_equal_parts/output_0.cairo new file mode 100644 index 000000000..aefecdffe --- /dev/null +++ b/tests/nodes/reverse_sequence_fp16x16_2d_time_equal_parts/output_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16Tensor; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_i32_2d_batch_equal_parts.cairo b/tests/nodes/reverse_sequence_i32_2d_batch_equal_parts.cairo new file mode 100644 index 000000000..350607eae --- /dev/null +++ b/tests/nodes/reverse_sequence_i32_2d_batch_equal_parts.cairo @@ -0,0 +1,21 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::U32Tensor; +use orion::operators::tensor::I32Tensor; +use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_i32_2d_batch_equal_parts() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_i32_2d_batch_equal_parts/input_0.cairo b/tests/nodes/reverse_sequence_i32_2d_batch_equal_parts/input_0.cairo new file mode 100644 index 000000000..d6f64f2b9 --- /dev/null +++ b/tests/nodes/reverse_sequence_i32_2d_batch_equal_parts/input_0.cairo @@ -0,0 +1,28 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + data.append(12); + data.append(13); + data.append(14); + data.append(15); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_i32_2d_batch_equal_parts/output_0.cairo b/tests/nodes/reverse_sequence_i32_2d_batch_equal_parts/output_0.cairo new file mode 100644 index 000000000..9b7f776c8 --- /dev/null +++ b/tests/nodes/reverse_sequence_i32_2d_batch_equal_parts/output_0.cairo @@ -0,0 +1,30 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32Tensor; +use orion::numbers::NumberTrait; + + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(5); + data.append(4); + data.append(6); + data.append(7); + data.append(10); + data.append(9); + data.append(8); + data.append(11); + data.append(15); + data.append(14); + data.append(13); + data.append(12); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_i32_2d_time_equal_parts.cairo b/tests/nodes/reverse_sequence_i32_2d_time_equal_parts.cairo new file mode 100644 index 000000000..dd47c062e --- /dev/null +++ b/tests/nodes/reverse_sequence_i32_2d_time_equal_parts.cairo @@ -0,0 +1,21 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::U32Tensor; +use orion::operators::tensor::I32Tensor; +use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_i32_2d_time_equal_parts() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_i32_2d_time_equal_parts/input_0.cairo b/tests/nodes/reverse_sequence_i32_2d_time_equal_parts/input_0.cairo new file mode 100644 index 000000000..70982f3c2 --- /dev/null +++ b/tests/nodes/reverse_sequence_i32_2d_time_equal_parts/input_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(4); + data.append(8); + data.append(12); + data.append(1); + data.append(5); + data.append(9); + data.append(13); + data.append(2); + data.append(6); + data.append(10); + data.append(14); + data.append(3); + data.append(7); + data.append(11); + data.append(15); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_i32_2d_time_equal_parts/output_0.cairo b/tests/nodes/reverse_sequence_i32_2d_time_equal_parts/output_0.cairo new file mode 100644 index 000000000..e98c2ee03 --- /dev/null +++ b/tests/nodes/reverse_sequence_i32_2d_time_equal_parts/output_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32Tensor; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(3); + data.append(6); + data.append(9); + data.append(12); + data.append(2); + data.append(5); + data.append(8); + data.append(13); + data.append(1); + data.append(4); + data.append(10); + data.append(14); + data.append(0); + data.append(7); + data.append(11); + data.append(15); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_i8_2d_batch_equal_parts.cairo b/tests/nodes/reverse_sequence_i8_2d_batch_equal_parts.cairo new file mode 100644 index 000000000..7a9ebc438 --- /dev/null +++ b/tests/nodes/reverse_sequence_i8_2d_batch_equal_parts.cairo @@ -0,0 +1,21 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::U32Tensor; +use orion::operators::tensor::I8TensorPartialEq; +use orion::operators::tensor::I8Tensor; +use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_i8_2d_batch_equal_parts() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_i8_2d_batch_equal_parts/input_0.cairo b/tests/nodes/reverse_sequence_i8_2d_batch_equal_parts/input_0.cairo new file mode 100644 index 000000000..692195bec --- /dev/null +++ b/tests/nodes/reverse_sequence_i8_2d_batch_equal_parts/input_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + data.append(12); + data.append(13); + data.append(14); + data.append(15); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_i8_2d_batch_equal_parts/output_0.cairo b/tests/nodes/reverse_sequence_i8_2d_batch_equal_parts/output_0.cairo new file mode 100644 index 000000000..6b9dde9d7 --- /dev/null +++ b/tests/nodes/reverse_sequence_i8_2d_batch_equal_parts/output_0.cairo @@ -0,0 +1,28 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I8Tensor; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0,); + data.append(1,); + data.append(2,); + data.append(3,); + data.append(5,); + data.append(4,); + data.append(6,); + data.append(7,); + data.append(10); + data.append(9,); + data.append(8,); + data.append(11); + data.append(15); + data.append(14); + data.append(13); + data.append(12); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_i8_2d_time_equal_parts.cairo b/tests/nodes/reverse_sequence_i8_2d_time_equal_parts.cairo new file mode 100644 index 000000000..e99616d89 --- /dev/null +++ b/tests/nodes/reverse_sequence_i8_2d_time_equal_parts.cairo @@ -0,0 +1,21 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::U32Tensor; +use orion::operators::tensor::I8TensorPartialEq; +use orion::operators::tensor::I8Tensor; +use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_i8_2d_time_equal_parts() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_i8_2d_time_equal_parts/input_0.cairo b/tests/nodes/reverse_sequence_i8_2d_time_equal_parts/input_0.cairo new file mode 100644 index 000000000..38f628042 --- /dev/null +++ b/tests/nodes/reverse_sequence_i8_2d_time_equal_parts/input_0.cairo @@ -0,0 +1,30 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; + + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(4); + data.append(8); + data.append(12); + data.append(1); + data.append(5); + data.append(9); + data.append(13); + data.append(2); + data.append(6); + data.append(10); + data.append(14); + data.append(3); + data.append(7); + data.append(11); + data.append(15); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_i8_2d_time_equal_parts/output_0.cairo b/tests/nodes/reverse_sequence_i8_2d_time_equal_parts/output_0.cairo new file mode 100644 index 000000000..eea151ef2 --- /dev/null +++ b/tests/nodes/reverse_sequence_i8_2d_time_equal_parts/output_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(3); + data.append(6); + data.append(9); + data.append(12); + data.append(2); + data.append(5); + data.append(8); + data.append(13); + data.append(1); + data.append(4); + data.append(10); + data.append(14); + data.append(0); + data.append(7); + data.append(11); + data.append(15); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_u32_2d_batch_equal_parts.cairo b/tests/nodes/reverse_sequence_u32_2d_batch_equal_parts.cairo new file mode 100644 index 000000000..9dcfc9735 --- /dev/null +++ b/tests/nodes/reverse_sequence_u32_2d_batch_equal_parts.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::U32Tensor; +use orion::operators::tensor::U32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_u32_2d_batch_equal_parts() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_u32_2d_batch_equal_parts/input_0.cairo b/tests/nodes/reverse_sequence_u32_2d_batch_equal_parts/input_0.cairo new file mode 100644 index 000000000..124eacb27 --- /dev/null +++ b/tests/nodes/reverse_sequence_u32_2d_batch_equal_parts/input_0.cairo @@ -0,0 +1,28 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + data.append(12); + data.append(13); + data.append(14); + data.append(15); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_u32_2d_batch_equal_parts/output_0.cairo b/tests/nodes/reverse_sequence_u32_2d_batch_equal_parts/output_0.cairo new file mode 100644 index 000000000..402b698c1 --- /dev/null +++ b/tests/nodes/reverse_sequence_u32_2d_batch_equal_parts/output_0.cairo @@ -0,0 +1,28 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(5); + data.append(4); + data.append(6); + data.append(7); + data.append(10); + data.append(9); + data.append(8); + data.append(11); + data.append(15); + data.append(14); + data.append(13); + data.append(12); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_u32_2d_time_equal_parts.cairo b/tests/nodes/reverse_sequence_u32_2d_time_equal_parts.cairo new file mode 100644 index 000000000..d89e73242 --- /dev/null +++ b/tests/nodes/reverse_sequence_u32_2d_time_equal_parts.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::U32Tensor; +use orion::operators::tensor::U32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_u32_2d_time_equal_parts() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_u32_2d_time_equal_parts/input_0.cairo b/tests/nodes/reverse_sequence_u32_2d_time_equal_parts/input_0.cairo new file mode 100644 index 000000000..947e999de --- /dev/null +++ b/tests/nodes/reverse_sequence_u32_2d_time_equal_parts/input_0.cairo @@ -0,0 +1,28 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(4); + data.append(8); + data.append(12); + data.append(1); + data.append(5); + data.append(9); + data.append(13); + data.append(2); + data.append(6); + data.append(10); + data.append(14); + data.append(3); + data.append(7); + data.append(11); + data.append(15); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_u32_2d_time_equal_parts/output_0.cairo b/tests/nodes/reverse_sequence_u32_2d_time_equal_parts/output_0.cairo new file mode 100644 index 000000000..38ac25940 --- /dev/null +++ b/tests/nodes/reverse_sequence_u32_2d_time_equal_parts/output_0.cairo @@ -0,0 +1,28 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32Tensor; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(3); + data.append(6); + data.append(9); + data.append(12); + data.append(2); + data.append(5); + data.append(8); + data.append(13); + data.append(1); + data.append(4); + data.append(10); + data.append(14); + data.append(0); + data.append(7); + data.append(11); + data.append(15); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_u32_zero_size.cairo b/tests/nodes/reverse_sequence_u32_zero_size.cairo new file mode 100644 index 000000000..cabda40b5 --- /dev/null +++ b/tests/nodes/reverse_sequence_u32_zero_size.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_u32_zero_size() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![0].span(), array![].span()), Option::Some(1), Option::Some(0)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_u32_zero_size/input_0.cairo b/tests/nodes/reverse_sequence_u32_zero_size/input_0.cairo new file mode 100644 index 000000000..0d9d86004 --- /dev/null +++ b/tests/nodes/reverse_sequence_u32_zero_size/input_0.cairo @@ -0,0 +1,12 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(0); + + let mut data = ArrayTrait::new(); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_u32_zero_size/output_0.cairo b/tests/nodes/reverse_sequence_u32_zero_size/output_0.cairo new file mode 100644 index 000000000..ebe0ce3f2 --- /dev/null +++ b/tests/nodes/reverse_sequence_u32_zero_size/output_0.cairo @@ -0,0 +1,12 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(0); + + let mut data = ArrayTrait::new(); + TensorTrait::new(shape.span(), data.span()) +} From 071dd641ca5a890b353d190e523f6d963a6ac014 Mon Sep 17 00:00:00 2001 From: zhangzhichao Date: Fri, 26 Jan 2024 17:02:35 +0800 Subject: [PATCH 12/46] feat: Implement optional correlation operator optional, optional_has_element, optional_get_element --- .../operators/tensor/tensor.optional.md | 42 +++++++++++ src/numbers.cairo | 15 ++++ src/operators/tensor/core.cairo | 46 ++++++++++++ src/operators/tensor/helpers.cairo | 58 ++++++++++++++- .../tensor/implementations/tensor_bool.cairo | 5 ++ .../implementations/tensor_complex64.cairo | 4 ++ .../implementations/tensor_fp16x16.cairo | 4 ++ .../implementations/tensor_fp16x16wide.cairo | 4 ++ .../implementations/tensor_fp32x32.cairo | 4 ++ .../implementations/tensor_fp64x64.cairo | 4 ++ .../implementations/tensor_fp8x23.cairo | 4 ++ .../implementations/tensor_fp8x23wide.cairo | 4 ++ .../tensor/implementations/tensor_i32.cairo | 4 ++ .../tensor/implementations/tensor_i8.cairo | 4 ++ .../tensor/implementations/tensor_u32.cairo | 4 ++ src/operators/tensor/manipulation.cairo | 1 + .../tensor/manipulation/optional.cairo | 14 ++++ src/operators/tensor/manipulation/split.cairo | 2 +- tests/operators.cairo | 1 + tests/operators/optional.cairo | 3 + .../optional/optional_get_element_test.cairo | 70 +++++++++++++++++++ .../optional/optional_has_element_test.cairo | 67 ++++++++++++++++++ tests/operators/optional/optional_test.cairo | 70 +++++++++++++++++++ 23 files changed, 432 insertions(+), 2 deletions(-) create mode 100644 docs/framework/operators/tensor/tensor.optional.md create mode 100644 src/operators/tensor/manipulation/optional.cairo create mode 100644 tests/operators/optional.cairo create mode 100644 tests/operators/optional/optional_get_element_test.cairo create mode 100644 tests/operators/optional/optional_has_element_test.cairo create mode 100644 tests/operators/optional/optional_test.cairo diff --git a/docs/framework/operators/tensor/tensor.optional.md b/docs/framework/operators/tensor/tensor.optional.md new file mode 100644 index 000000000..7f11b175c --- /dev/null +++ b/docs/framework/operators/tensor/tensor.optional.md @@ -0,0 +1,42 @@ +# tensor.optional + +```rust + fn optional(self: @Tensor) -> Option>; +``` + +Constructs an optional-type value containing either an empty optional of a certain +type specified by the attribute, or a non-empty value containing the input element. + +## Args + +* `self`(`@Tensor`) - The input tensor. + +## Returns + +The optional output enclosing the input element. + +## Examples + +```rust +use core::option::OptionTrait; +fn optional_example() -> Option> { + let a = TensorTrait::< + FP16x16 + >::new( + shape: array![4, 2].span(), + data: array![ + 1_i8, + 2_i8, + 3_i8, + 4_i8, + 5_i8, + 6_i8, + 7_i8, + 8_i8 + ].span(), + ); + a.optional() +} +>>> Option[Tensor[1,2,3,4,5,6,7,8]] + +``` diff --git a/src/numbers.cairo b/src/numbers.cairo index 936c128e1..25cbbbc22 100644 --- a/src/numbers.cairo +++ b/src/numbers.cairo @@ -1432,6 +1432,9 @@ impl FP32x32Number of NumberTrait { impl I8Number of NumberTrait { fn new(mag: i8, sign: bool) -> i8 { + if sign{ + return -mag; + } mag } @@ -1774,6 +1777,9 @@ impl I8IntoFP32x32 of Into { impl I16Number of NumberTrait { fn new(mag: i16, sign: bool) -> i16 { + if sign{ + return -mag; + } mag } @@ -2064,6 +2070,9 @@ impl I16DivEq of DivEq { impl I32Number of NumberTrait { fn new(mag: i32, sign: bool) -> i32 { + if sign{ + return -mag; + } mag } @@ -2367,6 +2376,9 @@ impl I32IntoU32 of Into { impl I64Number of NumberTrait { fn new(mag: i64, sign: bool) -> i64 { + if sign{ + return -mag; + } mag } @@ -2657,6 +2669,9 @@ impl I64DivEq of DivEq { impl I128Number of NumberTrait { fn new(mag: i128, sign: bool) -> i128 { + if sign{ + return -mag; + } mag } diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 70344eb97..e475e65c1 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -118,6 +118,7 @@ impl TensorSerde, impl TDrop: Drop> of Serde { /// # tensor.new /// @@ -5120,6 +5121,7 @@ trait TensorTrait { /// /// Split a tensor into a list of tensors, along the specified ‘axis’ /// + /// ## Args /// /// * `self`(`@Tensor`) - The input tensor. /// * `axis`(`usize`) - The axis along which to split on. @@ -5162,6 +5164,50 @@ trait TensorTrait { fn split( self: @Tensor, axis: usize, num_outputs: Option, spl: Option> ) -> Array>; + /// # tensor.optional + /// + /// ```rust + /// fn optional(self: @Tensor) -> Option>; + /// ``` + /// + /// Constructs an optional-type value containing either an empty optional of a certain + /// type specified by the attribute, or a non-empty value containing the input element. + /// + /// ## Args + /// + /// * `self`(`@Tensor`) - The input tensor. + /// + /// ## Returns + /// + /// The optional output enclosing the input element. + /// + /// ## Examples + /// + /// ```rust + /// use core::option::OptionTrait; + /// fn optional_example() -> Option> { + /// let a = TensorTrait::< + /// FP16x16 + /// >::new( + /// shape: array![4, 2].span(), + /// data: array![ + /// 1_i8, + /// 2_i8, + /// 3_i8, + /// 4_i8, + /// 5_i8, + /// 6_i8, + /// 7_i8, + /// 8_i8 + /// ].span(), + /// ); + /// a.optional() + /// } + /// >>> Option[Tensor[1,2,3,4,5,6,7,8]] + /// + /// ``` + /// + fn optional(self: @Tensor) -> Option>; } /// Cf: TensorTrait::new docstring diff --git a/src/operators/tensor/helpers.cairo b/src/operators/tensor/helpers.cairo index 894dfc8d4..c8e606ebc 100644 --- a/src/operators/tensor/helpers.cairo +++ b/src/operators/tensor/helpers.cairo @@ -5,7 +5,7 @@ use core::option::OptionTrait; use alexandria_data_structures::array_ext::ArrayTraitExt; use orion::utils::u32_max; -use orion::operators::tensor::core::{stride, Tensor, TensorTrait}; +use orion::operators::tensor::{core::{Tensor, TensorTrait, stride}, BoolTensor}; /// Calculates the number of elements in a tensor given its shape. /// @@ -497,3 +497,59 @@ impl SpanPartialOrd, +Copy, +PartialEq, +PartialOrd> of Par span_cmp(lhs, rhs) < 0 } } + +/// Returns true if (1) the input is an optional-type and contains an element, +/// or, (2) the input is a tensor type. +/// If the input is not provided or is an empty optional-type, this op returns false. +/// +/// # Arguments +/// * `x` - The optional input. +/// +/// # Returns +/// * A scalar boolean tensor. +/// If true, it indicates that optional-type input contains an element. Otherwise, it is empty. +fn optional_has_element, +Drop, +TensorTrait,>( + x: Option> +) -> Tensor { + match x{ + Option::Some(ele) => { + let mut shape = ArrayTrait::::new(); + shape.append(1); + let mut data = ArrayTrait::::new(); + data.append(true); + TensorTrait::new(shape.span(), data.span()) + }, + Option::None(_) => { + let mut shape = ArrayTrait::::new(); + shape.append(1); + let mut data = ArrayTrait::::new(); + data.append(false); + TensorTrait::new(shape.span(), data.span()) + } + } +} + +/// If the input is a tensor type, it returns the input. +/// If the input is an optional type, it outputs the element in the input. +/// +/// # Arguments +/// * `x` - The optional input. +/// +/// # Panics +/// * Panics if the input is an empty optional-type (i.e. does not have an element) +/// and the behavior is undefined in this case. +/// +/// # Returns +/// * Output element in the optional input. +fn optional_get_element, +Drop, +TensorTrait,>( + x: Option> +) -> Tensor { + match x{ + Option::Some(ele) => { + ele + }, + Option::None(_) => { + panic(array!['The input is an empty', 'optional-type.']) + } + } +} \ No newline at end of file diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index 3da518ec8..ab92db9fa 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -484,6 +484,11 @@ impl BoolTensor of TensorTrait { ) -> Array> { panic(array!['not supported!']) } + + fn optional(self: @Tensor) -> Option>{ + manipulation::optional::optional(self) + } + } /// Implements partial equal for two `Tensor` using the `PartialEq` trait. diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 74acba5c6..39dc6d174 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -515,6 +515,10 @@ impl Complex64Tensor of TensorTrait { ) -> Tensor { panic(array!['not supported!']) } + + fn optional(self: @Tensor) -> Option>{ + manipulation::optional::optional(self) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index cdc50bc4f..5010ff471 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -560,6 +560,10 @@ impl FP16x16Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn optional(self: @Tensor) -> Option>{ + manipulation::optional::optional(self) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index b0dc2d858..3b634b4d5 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -512,6 +512,10 @@ impl FP16x16WTensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn optional(self: @Tensor) -> Option>{ + manipulation::optional::optional(self) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 4f862fd0e..dd18c67fd 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -561,6 +561,10 @@ impl FP32x32Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn optional(self: @Tensor) -> Option>{ + manipulation::optional::optional(self) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 1fe5591fc..20a755469 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -561,6 +561,10 @@ impl FP64x64Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn optional(self: @Tensor) -> Option>{ + manipulation::optional::optional(self) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index 77d183c21..d501bc087 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -559,6 +559,10 @@ impl FP8x23Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn optional(self: @Tensor) -> Option>{ + manipulation::optional::optional(self) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index ff6069087..85f7e1488 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -498,6 +498,10 @@ impl FP8x23WTensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn optional(self: @Tensor) -> Option>{ + manipulation::optional::optional(self) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 50383d2df..efaebc3ab 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -541,6 +541,10 @@ impl I32Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn optional(self: @Tensor) -> Option>{ + manipulation::optional::optional(self) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 7e81d90eb..849d4d4b8 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -539,6 +539,10 @@ impl I8Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn optional(self: @Tensor) -> Option>{ + manipulation::optional::optional(self) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index 5a926a538..38b903a7a 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -482,6 +482,10 @@ impl U32Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn optional(self: @Tensor) -> Option> { + manipulation::optional::optional(self) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/manipulation.cairo b/src/operators/tensor/manipulation.cairo index 584eae027..ace5f5493 100644 --- a/src/operators/tensor/manipulation.cairo +++ b/src/operators/tensor/manipulation.cairo @@ -1,2 +1,3 @@ mod unique; mod split; +mod optional; diff --git a/src/operators/tensor/manipulation/optional.cairo b/src/operators/tensor/manipulation/optional.cairo new file mode 100644 index 000000000..e57e35e69 --- /dev/null +++ b/src/operators/tensor/manipulation/optional.cairo @@ -0,0 +1,14 @@ +use core::option::OptionTrait; +use orion::operators::tensor::{Tensor, TensorTrait}; + +/// Cf: TensorTrait::optional docstring +fn optional< + T, + +Copy, + +Drop, + impl TOption: OptionTrait +>( + self: @Tensor +) -> Option> { + Option::Some(*self) +} diff --git a/src/operators/tensor/manipulation/split.cairo b/src/operators/tensor/manipulation/split.cairo index bf0274aec..e765cc978 100644 --- a/src/operators/tensor/manipulation/split.cairo +++ b/src/operators/tensor/manipulation/split.cairo @@ -3,7 +3,7 @@ use core::array::{ArrayTrait, SpanTrait}; use core::option::OptionTrait; use orion::operators::matrix::{MutMatrixTrait, MutMatrix, MutMatrixImpl}; -/// Cf: NNTrait::split docstring +/// Cf: TensorTrait::split docstring fn split< T, +Copy, diff --git a/tests/operators.cairo b/tests/operators.cairo index d02bf25a4..b2228224c 100644 --- a/tests/operators.cairo +++ b/tests/operators.cairo @@ -4,3 +4,4 @@ mod qlinear_concat_test; mod qlinear_add_test; mod constant_of_shape_test; mod qlinear_leakyrelu_test; +mod optional; diff --git a/tests/operators/optional.cairo b/tests/operators/optional.cairo new file mode 100644 index 000000000..a464a4a46 --- /dev/null +++ b/tests/operators/optional.cairo @@ -0,0 +1,3 @@ +mod optional_test; +mod optional_has_element_test; +mod optional_get_element_test; diff --git a/tests/operators/optional/optional_get_element_test.cairo b/tests/operators/optional/optional_get_element_test.cairo new file mode 100644 index 000000000..576e12cd6 --- /dev/null +++ b/tests/operators/optional/optional_get_element_test.cairo @@ -0,0 +1,70 @@ +use core::debug::PrintTrait; +use core::array::{ArrayTrait, SpanTrait}; +use core::option::OptionTrait; + +use orion::operators::tensor::{TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor}; +use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; +use orion::numbers::{NumberTrait}; +use orion::operators::tensor::helpers::{optional_has_element, optional_get_element}; + +#[test] +#[available_gas(200000000000)] +fn optional_get_element_i8_test() { + let a = TensorTrait::< + i8 + >::new( + shape: array![4, 2].span(), + data: array![ + 1_i8, + 2_i8, + 3_i8, + 4_i8, + 5_i8, + 6_i8, + 7_i8, + 8_i8 + ] + .span(), + ); + let ele = optional_get_element(a.optional()); + + assert(*(ele.data).at(0) == *(a.data).at(0), 'ele[0] == a[0]'); + assert(*(ele.data).at(1) == *(a.data).at(1), 'ele[1] == a[1]'); + assert(*(ele.data).at(2) == *(a.data).at(2), 'ele[2] == a[2]'); + assert(*(ele.data).at(3) == *(a.data).at(3), 'ele[3] == a[3]'); + assert(*(ele.data).at(4) == *(a.data).at(4), 'ele[4] == a[4]'); + assert(*(ele.data).at(5) == *(a.data).at(5), 'ele[5] == a[5]'); + assert(*(ele.data).at(6) == *(a.data).at(6), 'ele[6] == a[6]'); + assert(*(ele.data).at(7) == *(a.data).at(7), 'ele[7] == a[7]'); +} + +#[test] +#[available_gas(200000000000)] +fn optional_get_element_fp16x16_test() { + let a = TensorTrait::< + FP16x16 + >::new( + shape: array![4, 2].span(), + data: array![ + FixedTrait::::new_unscaled(1, false), + FixedTrait::::new_unscaled(2, false), + FixedTrait::::new_unscaled(3, false), + FixedTrait::::new_unscaled(4, false), + FixedTrait::::new_unscaled(5, false), + FixedTrait::::new_unscaled(6, false), + FixedTrait::::new_unscaled(7, false), + FixedTrait::::new_unscaled(8, false) + ] + .span(), + ); + let ele = optional_get_element(a.optional()); + + assert(*(ele.data).at(0) == *(a.data).at(0), 'ele[0] == a[0]'); + assert(*(ele.data).at(1) == *(a.data).at(1), 'ele[1] == a[1]'); + assert(*(ele.data).at(2) == *(a.data).at(2), 'ele[2] == a[2]'); + assert(*(ele.data).at(3) == *(a.data).at(3), 'ele[3] == a[3]'); + assert(*(ele.data).at(4) == *(a.data).at(4), 'ele[4] == a[4]'); + assert(*(ele.data).at(5) == *(a.data).at(5), 'ele[5] == a[5]'); + assert(*(ele.data).at(6) == *(a.data).at(6), 'ele[6] == a[6]'); + assert(*(ele.data).at(7) == *(a.data).at(7), 'ele[7] == a[7]'); +} \ No newline at end of file diff --git a/tests/operators/optional/optional_has_element_test.cairo b/tests/operators/optional/optional_has_element_test.cairo new file mode 100644 index 000000000..f08bdcc73 --- /dev/null +++ b/tests/operators/optional/optional_has_element_test.cairo @@ -0,0 +1,67 @@ +use core::debug::PrintTrait; +use core::array::{ArrayTrait, SpanTrait}; +use core::option::OptionTrait; + +use orion::operators::tensor::{TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor}; +use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; +use orion::numbers::{NumberTrait}; +use orion::operators::tensor::helpers::{optional_has_element, optional_get_element}; + +#[test] +#[available_gas(200000000000)] +fn optional_has_element_i8_test() { + let a = TensorTrait::< + i8 + >::new( + shape: array![4, 2].span(), + data: array![ + 1_i8, + 2_i8, + 3_i8, + 4_i8, + 5_i8, + 6_i8, + 7_i8, + 8_i8 + ] + .span(), + ); + let a_optional = a.optional(); + let has_ele = optional_has_element(a_optional); + + assert(*(has_ele.data).at(0) == true, 'has_ele[0] == true'); +} + +#[test] +#[available_gas(200000000000)] +fn optional_has_element_fp16x16_test() { + let a = TensorTrait::< + FP16x16 + >::new( + shape: array![4, 2].span(), + data: array![ + FixedTrait::::new_unscaled(1, false), + FixedTrait::::new_unscaled(2, false), + FixedTrait::::new_unscaled(3, false), + FixedTrait::::new_unscaled(4, false), + FixedTrait::::new_unscaled(5, false), + FixedTrait::::new_unscaled(6, false), + FixedTrait::::new_unscaled(7, false), + FixedTrait::::new_unscaled(8, false) + ] + .span(), + ); + let a_optional = a.optional(); + let has_ele = optional_has_element(a_optional); + + assert(*(has_ele.data).at(0) == true, 'has_ele[0] == true'); +} + +#[test] +#[available_gas(200000000000)] +fn optional_has_element_none_test() { + let a: Option> = Option::None(()); + let has_ele = optional_has_element(a); + + assert(*(has_ele.data).at(0) == false, 'has_ele[0] == false'); +} \ No newline at end of file diff --git a/tests/operators/optional/optional_test.cairo b/tests/operators/optional/optional_test.cairo new file mode 100644 index 000000000..3632e173a --- /dev/null +++ b/tests/operators/optional/optional_test.cairo @@ -0,0 +1,70 @@ +use core::debug::PrintTrait; +use core::array::{ArrayTrait, SpanTrait}; +use core::option::OptionTrait; + +use orion::operators::tensor::{TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor}; +use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; +use orion::numbers::{NumberTrait}; +use orion::operators::tensor::helpers::{optional_has_element, optional_get_element}; + +#[test] +#[available_gas(200000000000)] +fn optional_i8_test() { + let a = TensorTrait::< + i8 + >::new( + shape: array![4, 2].span(), + data: array![ + 1_i8, + 2_i8, + 3_i8, + 4_i8, + 5_i8, + 6_i8, + 7_i8, + 8_i8 + ] + .span(), + ); + let a_optional = a.optional(); + + assert(*(optional_get_element(a_optional).data).at(0) == *(a.data).at(0), 'a_optional[0] == Option(a)[0]'); + assert(*(optional_get_element(a_optional).data).at(1) == *(a.data).at(1), 'a_optional[1] == Option(a)[1]'); + assert(*(optional_get_element(a_optional).data).at(2) == *(a.data).at(2), 'a_optional[2] == Option(a)[2]'); + assert(*(optional_get_element(a_optional).data).at(3) == *(a.data).at(3), 'a_optional[3] == Option(a)[3]'); + assert(*(optional_get_element(a_optional).data).at(4) == *(a.data).at(4), 'a_optional[4] == Option(a)[4]'); + assert(*(optional_get_element(a_optional).data).at(5) == *(a.data).at(5), 'a_optional[5] == Option(a)[5]'); + assert(*(optional_get_element(a_optional).data).at(6) == *(a.data).at(6), 'a_optional[6] == Option(a)[6]'); + assert(*(optional_get_element(a_optional).data).at(7) == *(a.data).at(7), 'a_optional[7] == Option(a)[7]'); +} + +#[test] +#[available_gas(200000000000)] +fn optional_fp16x16_test() { + let a = TensorTrait::< + FP16x16 + >::new( + shape: array![4, 2].span(), + data: array![ + FixedTrait::::new_unscaled(1, false), + FixedTrait::::new_unscaled(2, false), + FixedTrait::::new_unscaled(3, false), + FixedTrait::::new_unscaled(4, false), + FixedTrait::::new_unscaled(5, false), + FixedTrait::::new_unscaled(6, false), + FixedTrait::::new_unscaled(7, false), + FixedTrait::::new_unscaled(8, false) + ] + .span(), + ); + let a_optional = a.optional(); + + assert(*(optional_get_element(a_optional).data).at(0) == *(a.data).at(0), 'a_optional[0] == Option(a)[0]'); + assert(*(optional_get_element(a_optional).data).at(1) == *(a.data).at(1), 'a_optional[1] == Option(a)[1]'); + assert(*(optional_get_element(a_optional).data).at(2) == *(a.data).at(2), 'a_optional[2] == Option(a)[2]'); + assert(*(optional_get_element(a_optional).data).at(3) == *(a.data).at(3), 'a_optional[3] == Option(a)[3]'); + assert(*(optional_get_element(a_optional).data).at(4) == *(a.data).at(4), 'a_optional[4] == Option(a)[4]'); + assert(*(optional_get_element(a_optional).data).at(5) == *(a.data).at(5), 'a_optional[5] == Option(a)[5]'); + assert(*(optional_get_element(a_optional).data).at(6) == *(a.data).at(6), 'a_optional[6] == Option(a)[6]'); + assert(*(optional_get_element(a_optional).data).at(7) == *(a.data).at(7), 'a_optional[7] == Option(a)[7]'); +} \ No newline at end of file From 448621f0acb9cc5971eac13ca97469cda98776ef Mon Sep 17 00:00:00 2001 From: chachaleo Date: Sun, 28 Jan 2024 14:37:59 +0100 Subject: [PATCH 13/46] feat: conv_transpose (group = 1) --- docs/SUMMARY.md | 1 + docs/framework/compatibility.md | 1 + .../operators/neural-network/README.md | 1 + .../neural-network/nn.conv_transpose.md | 128 ++++ nodegen/node/conv_transpose.py | 500 ++++++++++++++ src/numbers.cairo | 69 +- .../implementations/fp16x16/core.cairo | 3 +- .../implementations/fp16x16wide/core.cairo | 2 +- .../implementations/fp32x32/core.cairo | 3 +- .../implementations/fp64x64/core.cairo | 3 +- .../implementations/fp8x23/core.cairo | 4 +- .../implementations/fp8x23wide/core.cairo | 2 +- src/operators/nn/core.cairo | 143 ++++ src/operators/nn/functional.cairo | 1 + .../nn/functional/conv_transpose.cairo | 648 ++++++++++++++++++ .../nn/implementations/nn_fp16x16.cairo | 28 + .../nn/implementations/nn_fp32x32.cairo | 28 + .../nn/implementations/nn_fp64x64.cairo | 28 + .../nn/implementations/nn_fp8x23.cairo | 28 + src/operators/nn/implementations/nn_i32.cairo | 28 + src/operators/nn/implementations/nn_i8.cairo | 28 + src/operators/nn/implementations/nn_u32.cairo | 28 + .../sequence/functional/sequence_at.cairo | 4 +- .../sequence/functional/sequence_erase.cairo | 3 +- .../sequence/functional/sequence_insert.cairo | 4 +- src/operators/tensor/core.cairo | 8 +- src/operators/tensor/helpers.cairo | 2 +- .../tensor/implementations/tensor_i32.cairo | 10 +- .../tensor/implementations/tensor_i8.cairo | 2 +- .../tensor/math/layer_normalization.cairo | 3 +- src/test_helper/tensor/i32.cairo | 3 +- src/test_helper/tensor/i8.cairo | 3 +- tests/nodes.cairo | 7 + tests/nodes/clip_fp16x16_2d.cairo | 6 +- tests/nodes/clip_fp16x16_3d.cairo | 6 +- tests/nodes/clip_fp8x23_2d.cairo | 6 +- tests/nodes/clip_fp8x23_3d.cairo | 6 +- tests/nodes/compress_fp16x16_3d_axis1.cairo | 2 +- tests/nodes/compress_fp16x16_3d_axis2.cairo | 2 +- tests/nodes/compress_fp16x16_3d_axis3.cairo | 2 +- tests/nodes/compress_fp16x16_3d_default.cairo | 2 +- tests/nodes/compress_fp16x16_3d_noaxis.cairo | 2 +- tests/nodes/compress_fp8x23_3d_axis1.cairo | 2 +- tests/nodes/compress_fp8x23_3d_axis2.cairo | 2 +- tests/nodes/compress_fp8x23_3d_default.cairo | 2 +- tests/nodes/compress_i32_3d_axis1.cairo | 2 +- tests/nodes/compress_i32_3d_axis2.cairo | 2 +- tests/nodes/compress_i32_3d_default.cairo | 2 +- tests/nodes/compress_i8_3d_axis1.cairo | 2 +- tests/nodes/compress_i8_3d_axis2.cairo | 2 +- tests/nodes/compress_i8_3d_default.cairo | 2 +- tests/nodes/compress_u32_3d_axis1.cairo | 2 +- tests/nodes/compress_u32_3d_axis2.cairo | 2 +- tests/nodes/compress_u32_3d_axis2_2.cairo | 2 +- tests/nodes/compress_u32_3d_axis3.cairo | 2 +- tests/nodes/compress_u32_3d_default.cairo | 2 +- tests/nodes/conv_transpose.cairo | 34 + tests/nodes/conv_transpose/input_0.cairo | 24 + tests/nodes/conv_transpose/input_1.cairo | 33 + tests/nodes/conv_transpose/output_0.cairo | 65 ++ tests/nodes/conv_transpose_1d.cairo | 34 + tests/nodes/conv_transpose_1d/input_0.cairo | 17 + tests/nodes/conv_transpose_1d/input_1.cairo | 20 + tests/nodes/conv_transpose_1d/output_0.cairo | 24 + tests/nodes/conv_transpose_3d.cairo | 34 + tests/nodes/conv_transpose_3d/input_0.cairo | 76 ++ tests/nodes/conv_transpose_3d/input_1.cairo | 70 ++ tests/nodes/conv_transpose_3d/output_0.cairo | 436 ++++++++++++ tests/nodes/conv_transpose_attributes.cairo | 34 + .../conv_transpose_attributes/input_0.cairo | 24 + .../conv_transpose_attributes/input_1.cairo | 33 + .../conv_transpose_attributes/output_0.cairo | 65 ++ tests/nodes/conv_transpose_autopad_same.cairo | 36 + .../conv_transpose_autopad_same/input_0.cairo | 24 + .../conv_transpose_autopad_same/input_1.cairo | 33 + .../output_0.cairo | 87 +++ tests/nodes/conv_transpose_dilations.cairo | 34 + .../conv_transpose_dilations/input_0.cairo | 24 + .../conv_transpose_dilations/input_1.cairo | 19 + .../conv_transpose_dilations/output_0.cairo | 40 ++ tests/nodes/conv_transpose_pads.cairo | 34 + tests/nodes/conv_transpose_pads/input_0.cairo | 24 + tests/nodes/conv_transpose_pads/input_1.cairo | 33 + .../nodes/conv_transpose_pads/output_0.cairo | 175 +++++ tests/nodes/gather_fp16x16_3d_axis1.cairo | 2 +- tests/nodes/gather_fp16x16_3d_axis2.cairo | 2 +- tests/nodes/gather_fp16x16_3d_default.cairo | 2 +- tests/nodes/gather_fp8x23_3d_axis1.cairo | 2 +- tests/nodes/gather_fp8x23_3d_axis2.cairo | 2 +- tests/nodes/gather_fp8x23_3d_default.cairo | 2 +- tests/nodes/gather_i32_3d_axis1.cairo | 2 +- tests/nodes/gather_i32_3d_axis2.cairo | 2 +- tests/nodes/gather_i32_3d_default.cairo | 2 +- tests/nodes/gather_i8_3d_axis1.cairo | 2 +- tests/nodes/gather_i8_3d_axis2.cairo | 2 +- tests/nodes/gather_i8_3d_default.cairo | 2 +- .../gather_nd_fp16x16_3d_batch_dims1.cairo | 2 +- .../gather_nd_fp16x16_3d_batch_dims2.cairo | 2 +- .../nodes/gather_nd_fp16x16_3d_default.cairo | 2 +- .../gather_nd_fp8x23_3d_batch_dims1.cairo | 2 +- .../gather_nd_fp8x23_3d_batch_dims2.cairo | 2 +- tests/nodes/gather_nd_fp8x23_3d_default.cairo | 2 +- .../nodes/gather_nd_i32_3d_batch_dims1.cairo | 2 +- .../nodes/gather_nd_i32_3d_batch_dims2.cairo | 2 +- tests/nodes/gather_nd_i32_3d_default.cairo | 2 +- tests/nodes/gather_nd_i8_3d_batch_dims1.cairo | 2 +- tests/nodes/gather_nd_i8_3d_default.cairo | 2 +- tests/nodes/gather_nd_u32_batch_dims1.cairo | 2 +- tests/nodes/gather_nd_u32_batch_dims2.cairo | 2 +- tests/nodes/gather_nd_u32_default.cairo | 2 +- tests/nodes/gather_u32_3d_axis1.cairo | 2 +- tests/nodes/gather_u32_3d_axis2.cairo | 2 +- tests/nodes/gather_u32_3d_default.cairo | 2 +- tests/nodes/gemm_all_attributes.cairo | 10 +- tests/nodes/gemm_alpha.cairo | 10 +- tests/nodes/gemm_beta.cairo | 10 +- tests/nodes/gemm_default_matrix_bias.cairo | 4 +- tests/nodes/gemm_default_no_bias.cairo | 4 +- tests/nodes/gemm_default_vector_bias.cairo | 4 +- tests/nodes/gemm_transposeA.cairo | 4 +- tests/nodes/gemm_transposeB.cairo | 4 +- tests/nodes/hard_sigmoid_fp16x16.cairo | 4 +- tests/nodes/hard_sigmoid_fp8x23.cairo | 4 +- tests/nodes/is_nan_fp16x16/input_0.cairo | 2 +- ...layer_normalization_3d_axis0_epsilon.cairo | 9 +- ...layer_normalization_3d_axis1_epsilon.cairo | 9 +- ...layer_normalization_3d_axis2_epsilon.cairo | 9 +- ...alization_3d_axis_negative_1_epsilon.cairo | 9 +- ...alization_3d_axis_negative_2_epsilon.cairo | 9 +- ...alization_3d_axis_negative_3_epsilon.cairo | 9 +- .../nodes/layer_normalization_4d_axis0.cairo | 5 +- .../nodes/layer_normalization_4d_axis1.cairo | 5 +- .../nodes/layer_normalization_4d_axis2.cairo | 5 +- .../nodes/layer_normalization_4d_axis3.cairo | 5 +- ...yer_normalization_4d_axis_negative_1.cairo | 5 +- ...yer_normalization_4d_axis_negative_2.cairo | 5 +- ...yer_normalization_4d_axis_negative_3.cairo | 5 +- ...yer_normalization_4d_axis_negative_4.cairo | 5 +- .../layer_normalization_default_axis.cairo | 5 +- tests/nodes/layer_normalization_test.cairo | 5 +- tests/nodes/scatter_fp16x16_3d_axis1.cairo | 8 +- .../nodes/scatter_fp16x16_3d_axis1_add.cairo | 8 +- tests/nodes/scatter_fp16x16_3d_default.cairo | 8 +- tests/nodes/scatter_fp8x23_axis1.cairo | 8 +- tests/nodes/scatter_fp8x23_default.cairo | 8 +- tests/nodes/scatter_fp8x23_mul.cairo | 8 +- tests/nodes/scatter_i8_axis1.cairo | 8 +- tests/nodes/scatter_i8_axis1_max.cairo | 8 +- tests/nodes/scatter_i8_default.cairo | 8 +- tests/nodes/scatter_u32_add.cairo | 8 +- tests/nodes/scatter_u32_axis1.cairo | 8 +- tests/nodes/scatter_u32_default.cairo | 8 +- tests/nodes/sequence_insert_fp16x16.cairo | 2 +- tests/nodes/sequence_insert_fp8x23.cairo | 2 +- tests/nodes/sequence_insert_i32.cairo | 2 +- tests/nodes/sequence_insert_i8.cairo | 2 +- tests/nodes/sequence_insert_u32.cairo | 2 +- tests/nodes/sequence_length_fp16x16.cairo | 4 +- tests/nodes/shrink_hard_fp16x16.cairo | 4 +- tests/nodes/shrink_hard_fp8x23.cairo | 4 +- tests/nodes/shrink_soft_fp16x16.cairo | 6 +- tests/nodes/shrink_soft_fp8x23.cairo | 6 +- tests/nodes/slice_fp16x16_2d.cairo | 8 +- tests/nodes/slice_fp16x16_3d.cairo | 8 +- tests/nodes/slice_fp8x23_2d.cairo | 8 +- tests/nodes/slice_fp8x23_3d.cairo | 8 +- tests/nodes/slice_i32_2d.cairo | 8 +- tests/nodes/slice_i32_3d.cairo | 8 +- tests/nodes/slice_i8_2d.cairo | 8 +- tests/nodes/slice_i8_3d.cairo | 8 +- tests/nodes/slice_u32_2d.cairo | 8 +- tests/nodes/slice_u32_3d.cairo | 8 +- tests/nodes/where_fp16x16.cairo | 2 +- tests/nodes/where_fp16x16_broadcast.cairo | 2 +- tests/nodes/where_fp8x23.cairo | 2 +- tests/nodes/where_fp8x23_broadcast.cairo | 2 +- tests/nodes/where_i32.cairo | 2 +- tests/nodes/where_i32_broadcast.cairo | 2 +- tests/nodes/where_i8.cairo | 2 +- tests/nodes/where_i8_broadcast.cairo | 2 +- tests/nodes/where_u32.cairo | 2 +- tests/nodes/where_u32_broadcast.cairo | 2 +- tests/operators/qlinear_add_test.cairo | 70 +- tests/operators/qlinear_concat_test.cairo | 77 +-- tests/operators/qlinear_leakyrelu_test.cairo | 10 +- tests/operators/qlinear_matmul_test.cairo | 76 +- tests/operators/qlinear_mul_test.cairo | 74 +- 187 files changed, 3691 insertions(+), 470 deletions(-) create mode 100644 docs/framework/operators/neural-network/nn.conv_transpose.md create mode 100644 nodegen/node/conv_transpose.py create mode 100644 src/operators/nn/functional/conv_transpose.cairo create mode 100644 tests/nodes/conv_transpose.cairo create mode 100644 tests/nodes/conv_transpose/input_0.cairo create mode 100644 tests/nodes/conv_transpose/input_1.cairo create mode 100644 tests/nodes/conv_transpose/output_0.cairo create mode 100644 tests/nodes/conv_transpose_1d.cairo create mode 100644 tests/nodes/conv_transpose_1d/input_0.cairo create mode 100644 tests/nodes/conv_transpose_1d/input_1.cairo create mode 100644 tests/nodes/conv_transpose_1d/output_0.cairo create mode 100644 tests/nodes/conv_transpose_3d.cairo create mode 100644 tests/nodes/conv_transpose_3d/input_0.cairo create mode 100644 tests/nodes/conv_transpose_3d/input_1.cairo create mode 100644 tests/nodes/conv_transpose_3d/output_0.cairo create mode 100644 tests/nodes/conv_transpose_attributes.cairo create mode 100644 tests/nodes/conv_transpose_attributes/input_0.cairo create mode 100644 tests/nodes/conv_transpose_attributes/input_1.cairo create mode 100644 tests/nodes/conv_transpose_attributes/output_0.cairo create mode 100644 tests/nodes/conv_transpose_autopad_same.cairo create mode 100644 tests/nodes/conv_transpose_autopad_same/input_0.cairo create mode 100644 tests/nodes/conv_transpose_autopad_same/input_1.cairo create mode 100644 tests/nodes/conv_transpose_autopad_same/output_0.cairo create mode 100644 tests/nodes/conv_transpose_dilations.cairo create mode 100644 tests/nodes/conv_transpose_dilations/input_0.cairo create mode 100644 tests/nodes/conv_transpose_dilations/input_1.cairo create mode 100644 tests/nodes/conv_transpose_dilations/output_0.cairo create mode 100644 tests/nodes/conv_transpose_pads.cairo create mode 100644 tests/nodes/conv_transpose_pads/input_0.cairo create mode 100644 tests/nodes/conv_transpose_pads/input_1.cairo create mode 100644 tests/nodes/conv_transpose_pads/output_0.cairo diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 649e411f9..07a08e641 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -160,6 +160,7 @@ * [nn.hard\_sigmoid](framework/operators/neural-network/nn.hard\_sigmoid.md) * [nn.thresholded\_relu](framework/operators/neural-network/nn.thresholded\_relu.md) * [nn.gemm](framework/operators/neural-network/nn.gemm.md) + * [nn.conv_transpose](framework/operators/neural-network/nn.conv\_transpose.md) * [Machine Learning](framework/operators/machine-learning/README.md) * [Tree Ensemble Classifier](framework/operators/machine-learning/tree-ensemble-classifier/README.md) * [tree\_ensemble\_classifier.predict](framework/operators/machine-learning/tree-ensemble-classifier/tree\_ensemble\_classifier.predict.md) diff --git a/docs/framework/compatibility.md b/docs/framework/compatibility.md index 0e0e5be17..ba911ea6b 100644 --- a/docs/framework/compatibility.md +++ b/docs/framework/compatibility.md @@ -43,6 +43,7 @@ You can see below the list of current supported ONNX Operators: | [Softplus](operators/neural-network/nn.softplus.md) | :white\_check\_mark: | | [Linear](operators/neural-network/nn.linear.md) | :white\_check\_mark: | | [HardSigmoid](operators/neural-network/nn.hard\_sigmoid.md) | :white\_check\_mark: | +| [ConvTranspose](operators/neural-network/nn.conv\_transpose_.md) | :white\_check\_mark: | | [Sinh](operators/tensor/tensor.sinh.md) | :white\_check\_mark: | | [Asinh](operators/tensor/tensor.asinh.md) | :white\_check\_mark: | | [Atanh](operators/tensor/tensor.atanh.md) | :white\_check\_mark: | diff --git a/docs/framework/operators/neural-network/README.md b/docs/framework/operators/neural-network/README.md index 8343d0c90..194c1f78b 100644 --- a/docs/framework/operators/neural-network/README.md +++ b/docs/framework/operators/neural-network/README.md @@ -35,4 +35,5 @@ Orion supports currently these `NN` types. | [`nn.hard_sigmoid`](nn.hard\_sigmoid.md) | Applies the Hard Sigmoid function to an n-dimensional input tensor. | | [`nn.thresholded_relu`](nn.thresholded\_relu.md) | Performs the thresholded relu activation function element-wise. | | [`nn.gemm`](nn.gemm.md) | Performs General Matrix multiplication. | +| [`nn.conv_transpose`](nn.conv\_transpose.md) | Performs the convolution transpose of the input data tensor and weigth tensor. | diff --git a/docs/framework/operators/neural-network/nn.conv_transpose.md b/docs/framework/operators/neural-network/nn.conv_transpose.md new file mode 100644 index 000000000..29ce733a0 --- /dev/null +++ b/docs/framework/operators/neural-network/nn.conv_transpose.md @@ -0,0 +1,128 @@ +# NNTrait::conv_transpose + +```rust + conv_transpose( + X: @Tensor, + W: @Tensor, + B: Option<@Tensor>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + output_padding: Option>, + output_shape: Option>, + pads: Option>, + strides: Option>, +) -> Tensor +``` + +The convolution transpose operator consumes an input tensor and a input weigth tensor, and computes the output. + +## Args + +* `X`(`@Tensor`) - Input data tensor, has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W if 2D, otherwise the size is (N x C x D1 x D2 ... x Dn). +* `W`(`@Tensor`) - The weight tensor, has size (C x M/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps if 2D, for more than 2 dimensions, the weight shape will be (C x M/group x k1 x k2 x ... x kn). +* `B`(`Option<@Tensor>`) - Optional 1D bias to be added to the convolution, has size of M. +* `auto_pad`(`Option`) - Default is NOTSET, auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. NOTSET means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = input_shape[i] * strides[i]` for each axis `i`. +* `dilations`(`Option>`) - Dilation value along each spatial axis of the filter. If not present, the dilation defaults to 1 along each spatial axis. +* `group`(`Option`) - Default is 1, number of groups input channels and output channels are divided into. +* `kernel_shape`(`Option>`) - The shape of the convolution kernel. If not present, should be inferred from input W. +* `output_padding`(`Option>`) - Additional elements added to the side with higher coordinate indices in the output. Each padding value in "output_padding" must be less than the corresponding stride/dilation dimension. By default, this attribute is a zero vector. +* `output_shape`(`Option>`) - The shape of the output can be explicitly set which will cause pads values to be auto generated. If output_shape is specified pads values are ignored. See doc for details for equations to generate pads. +* `pads`(`Option>`) - Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis. +* `strides`(`Option>`) - Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis. + +## Returns + +A `Tensor` of shape (M, N). + +## Examples + +```rust +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::operators::nn::FP16x16NN; +use orion::numbers::FP16x16; +use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor}; + +fn example_conv_transpose() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + let W = TensorTrait::new(shape.span(), data.span()); + + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + let mut X = TensorTrait::new(shape.span(), data.span()); + + return NNTrait::conv_transpose( + @X, + @W, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + ); + +} +>>> [ + [ + [ + [0.0, 1.0, 3.0, 3.0, 2.0], + [3.0, 8.0, 15.0, 12.0, 7.0], + [9.0, 21.0, 36.0, 27.0, 15.0], + [9.0, 20.0, 33.0, 24.0, 13.0], + [6.0, 13.0, 21.0, 15.0, 8.0], + ], + [ + [0.0, 1.0, 3.0, 3.0, 2.0], + [3.0, 8.0, 15.0, 12.0, 7.0], + [9.0, 21.0, 36.0, 27.0, 15.0], + [9.0, 20.0, 33.0, 24.0, 13.0], + [6.0, 13.0, 21.0, 15.0, 8.0], + ], + ] + ] + +```` diff --git a/nodegen/node/conv_transpose.py b/nodegen/node/conv_transpose.py new file mode 100644 index 000000000..ebd320ce6 --- /dev/null +++ b/nodegen/node/conv_transpose.py @@ -0,0 +1,500 @@ + + +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait + + +def conv_transpose( + X, + W, + B=None, + auto_pad=None, + dilations=None, + group=None, + kernel_shape=None, + output_padding=None, + output_shape=None, + pads=None, + strides=None, +): + if dilations is None: + dilations = [1 for s in X.shape[2:]] + if kernel_shape is None: + kernel_shape = W.shape[2:] + if output_padding is None: + output_padding = [0 for s in X.shape[2:]] * 2 + if strides is None: + strides = [1 for s in X.shape[2:]] + if pads is None and auto_pad not in {"SAME_UPPER", "SAME_LOWER"}: + pads = [0 for i in range(2 * len(strides))] + if pads is None: + if output_shape is None: + output_shape = [ + X.shape[i + 2] * strides[i] for i in range(len(strides)) + ] + total_padding = [ + strides[i] * (X.shape[i + 2] - 1) + + output_padding[i] + + ((kernel_shape[i] - 1) * dilations[i] + 1) + - output_shape[i] + for i in range(len(output_shape)) + ] + pads_1 = [] + pads_2 = [] + for i in range(len(output_shape)): + if auto_pad == "SAME_UPPER": + pads_1.append(total_padding[i] // 2) + pads_2.append(total_padding[i] - (total_padding[i] // 2)) + else: + pads_1.append(total_padding[i] - (total_padding[i] // 2)) + pads_2.append(total_padding[i] // 2) + pads = pads_1 + pads_2 + n_dims = len(pads) // 2 + else: + n_dims = len(X.shape) - 2 + new_pads = np.array([(pads[i], pads[i + n_dims]) for i in range(n_dims)]) + if output_shape is None: + output_shape = [ + strides[i] * (X.shape[i + 2] - 1) + + output_padding[i] + + ((kernel_shape[i] - 1) * dilations[i] + 1) + - new_pads[i, :].sum() + for i in range(n_dims) + ] + kernel_shape = W.shape[2:] + kernel_size = np.prod(kernel_shape) + num_output_channels = W.shape[1] * group + kernel_dim = num_output_channels // group * kernel_size + C = X.shape[1] # num_inputs_channels + m = kernel_dim # kernel_dim + n = np.prod(X.shape[2:]) # input_image_size + k = C // group + w_reshaped = W.reshape((group, k, m)) + final = None + + # N x C x H x W = X.shape + # C x M/group x k1 x k2 = W.shape + if group == 1: + for image_id in range(X.shape[0]): + w_t = w_reshaped[0].T + gemm = np.matmul(w_t, X[image_id].reshape((k, n))) + gemmc = gemm.reshape((num_output_channels, -1, gemm.shape[-1])) + for c in range(num_output_channels): + res = col2im_naive_implementation( + gemmc[c], output_shape, kernel_shape, dilations, pads, strides + ) + if final is None: + final = np.empty( + X.shape[:1] + (num_output_channels,) + res.shape, + dtype=X.dtype, + ) + if B is not None: + res += B[c] + final[image_id, c, ...] = res[...] + else: + raise NotImplementedError( + f"Implementation for group={group} > 1 is not available yet." + ) + + + return (final.astype(X.dtype),) + + + +def _get_indices(i, shape): + res = np.empty((len(shape),), dtype=np.int64) + k = len(shape) - 1 + while k > 0: + m = i % shape[k] + res[k] = m + i -= m + i /= shape[k] + k -= 1 + res[0] = i + return res + +def _col2im_shape_check(X, output_shape, kernel_shape, dilations, pads, strides): # type: ignore + n_input_plane = X.shape[0] + + kernel_size = np.prod(kernel_shape) + + if n_input_plane % kernel_size != 0: + raise ValueError( + f"Expected size of input's dimension 1 to be divisible by the " + f"product of kernel_size={kernel_size}, " + f"but got input.size(1)={n_input_plane} " + f"and kernel_shape={kernel_shape}, X.shape={X.shape}, output_shape={output_shape}." + ) + + input_length = X.shape[1] + n_dims = len(output_shape) + n_blocks = [] + + + for i in range(n_dims): + n_block = ( + output_shape[i] + + pads[i, :].sum() + - dilations[i] * (kernel_shape[i] - 1) + - 1 + ) // strides[i] + 1 + n_blocks.append(n_block) + + block_size = np.prod(n_blocks) + if input_length != block_size: + raise ValueError( + f"Given n_input_plane={n_input_plane}, X.shape={X.shape}, " + f"output_shape={output_shape}, kernel_shape={kernel_shape}, " + f"dilations={dilations}, pads={pads}, strides={strides}, " + f"expected size of input's dimension 2 to match the calculated number of " + f"sliding blocks {n_blocks} = {block_size}, " + f"but got input.size(2)={input_length}.", + ) + + +def col2im_naive_implementation(data, image_shape, kernel_shape, dilations, pads, strides): # type: ignore + + n_dims = len(pads) // 2 + new_pads = np.array([(pads[i], pads[i + n_dims]) for i in range(n_dims)]) + _col2im_shape_check(data, image_shape, kernel_shape, dilations, new_pads, strides) + + data_col = data + data_im = np.zeros(image_shape, dtype=data.dtype) + + dim_col = [] + for i in range(n_dims): + col = ( + image_shape[i] + + new_pads[i, :].sum() + - (dilations[i] * (kernel_shape[i] - 1) + 1) + ) // strides[i] + 1 + dim_col.append(col) + kernel_size = np.prod(kernel_shape) + col_size = np.prod(dim_col) + for c_col in range(kernel_size): + offset = _get_indices(c_col, kernel_shape) + + for col in range(col_size): + + ind_col = _get_indices(col, dim_col) + ind_im = [] + for i in range(n_dims): + ind = ( + ind_col[i] * strides[i] - new_pads[i, 0] + offset[i] * dilations[i] + ) + ind_im.append(ind) + if not _is_out(ind_im, data_im.shape): + data_im[tuple(ind_im)] += data_col[c_col, col] + + + return data_im + + +def _is_out(ind, shape): + for i, s in zip(ind, shape): + if i < 0: + return True + if i >= s: + return True + return False + + +class Conv_transpose(RunAll): + + @staticmethod + def export_conv_transpose() -> None: + x = np.array( + [[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3) + ).astype(np.float32) + + w = np.array( + [ + [ + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3) + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ] + ] + ).astype(np.float32) + + y = conv_transpose(x, w, group=1)[0] + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "conv_transpose" + func_sig = "NNTrait::conv_transpose(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None)" + make_test( + [x, w], y, func_sig, name, Trait.NN) + + + @staticmethod + def export_convtranspose_1d() -> None: + x = np.array([[[0.0, 1.0, 2.0]]]).astype(np.float32) # (1, 1, 3) + + w = np.array([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]).astype( # (1, 2, 3) + np.float32 + ) + + y = conv_transpose(x, w, group=1)[0] + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "conv_transpose_1d" + func_sig = "NNTrait::conv_transpose(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None)" + make_test( + [x, w], y, func_sig, name, Trait.NN) + + + @staticmethod + def export_convtranspose_3d() -> None: + x = np.array( + [ + [ + [ + [ + [0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 3, 4, 5) + [5.0, 6.0, 7.0, 8.0, 9.0], + [10.0, 11.0, 12.0, 13.0, 14.0], + [15.0, 16.0, 17.0, 18.0, 19.0], + ], + [ + [20.0, 21.0, 22.0, 23.0, 24.0], + [25.0, 26.0, 27.0, 28.0, 29.0], + [30.0, 31.0, 32.0, 33.0, 34.0], + [35.0, 36.0, 37.0, 38.0, 39.0], + ], + [ + [40.0, 41.0, 42.0, 43.0, 44.0], + [45.0, 46.0, 47.0, 48.0, 49.0], + [50.0, 51.0, 52.0, 53.0, 54.0], + [55.0, 56.0, 57.0, 58.0, 59.0], + ], + ] + ] + ] + ).astype(np.float32) + + w = np.array( + [ + [ + [ + [ + [1.0, 1.0, 1.0], # (1, 2, 3, 3, 3) + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + ], + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ], + [ + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ], + ] + ] + ).astype(np.float32) + + y = conv_transpose(x, w, group=1)[0] + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + + name = "conv_transpose_3d" + func_sig = "NNTrait::conv_transpose(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None)" + make_test( + [x, w], y, func_sig, name, Trait.NN) + + + + @staticmethod + def export_convtranspose_attributes() -> None: + x = np.array( + [[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3) + ).astype(np.float32) + + w = np.array( + [ + [ + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3) + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ] + ] + ).astype(np.float32) + + y = conv_transpose(x, w, group=1)[0] + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + + name = "conv_transpose_attributes" + func_sig = "NNTrait::conv_transpose(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None)" + make_test( + [x, w], y, func_sig, name, Trait.NN) + + + + + @staticmethod + def export_convtranspose_pads() -> None: + x = np.array( + [[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3) + ).astype(np.float32) + + w = np.array( + [ + [ + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3) + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ] + ] + ).astype(np.float32) + + y = conv_transpose(x, w, group=1,strides=[3, 2],output_shape=[10, 8], kernel_shape=[3, 3], output_padding=[1, 1],)[0] + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + + name = "conv_transpose_pads" + func_sig = "NNTrait::conv_transpose(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![3, 3].span())," + func_sig += "Option::Some(array![1, 1].span())," + func_sig += "Option::Some(array![10, 8].span())," + func_sig += "Option::None," + func_sig += "Option::Some(array![3, 2].span()))" + make_test( + [x, w], y, func_sig, name, Trait.NN) + + @staticmethod + def export_convtranspose_dilations() -> None: + x = np.array( + [[[[3.0, 8.0, 1.0], [9.0, 5.0, 7.0], [3.0, 2.0, 6.0]]]] # (1, 1, 3, 3) + ).astype(np.float32) + w = np.array([[[[7.0, 2.0], [1.0, 9.0]]]]).astype(np.float32) # (1, 1, 2, 2) + + y = conv_transpose(x, w, group=1, dilations=[2, 2])[0] + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "conv_transpose_dilations" + func_sig = "NNTrait::conv_transpose(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![2, 2].span())," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None,)" + make_test( + [x, w], y, func_sig, name, Trait.NN) + + + @staticmethod + def export_convtranspose_autopad_same() -> None: + x = np.array( + [[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3) + ).astype(np.float32) + + w = np.array( + [ + [ + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3) + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ] + ] + ).astype(np.float32) + + y = conv_transpose(x, w, group=1, auto_pad="SAME_UPPER", strides=[2, 2])[0] + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "conv_transpose_autopad_same" + func_sig = "NNTrait::conv_transpose(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::Some(AUTO_PAD::SAME_UPPER)," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![2, 2].span()))" + make_test( + [x, w], y, func_sig, name, Trait.NN) + + + + + + + diff --git a/src/numbers.cairo b/src/numbers.cairo index 936c128e1..1ce8a803d 100644 --- a/src/numbers.cairo +++ b/src/numbers.cairo @@ -2,10 +2,10 @@ mod fixed_point; mod complex_number; use orion::numbers::fixed_point::core::FixedTrait; -use orion::numbers::fixed_point::implementations::fp8x23::core::{ONE as ONE_fp8x23 }; -use orion::numbers::fixed_point::implementations::fp16x16::core::{ONE as ONE_fp16x16 }; -use orion::numbers::fixed_point::implementations::fp64x64::core::{ONE as ONE_fp64x64 }; -use orion::numbers::fixed_point::implementations::fp32x32::core::{ONE as ONE_fp32x32 }; +use orion::numbers::fixed_point::implementations::fp8x23::core::{ONE as ONE_fp8x23}; +use orion::numbers::fixed_point::implementations::fp16x16::core::{ONE as ONE_fp16x16}; +use orion::numbers::fixed_point::implementations::fp64x64::core::{ONE as ONE_fp64x64}; +use orion::numbers::fixed_point::implementations::fp32x32::core::{ONE as ONE_fp32x32}; // Common methods from Fixed Point and Signed Integers. trait NumberTrait { @@ -1535,7 +1535,7 @@ impl I8Number of NumberTrait { 0 } fn is_zero(self: i8) -> bool { - self == 0 + self == 0 } fn half() -> i8 { @@ -1571,7 +1571,7 @@ impl I8Number of NumberTrait { } fn max_value() -> i8 { - 127 + 127 } fn min(self: i8, other: i8) -> i8 { @@ -1661,7 +1661,7 @@ impl I8Number of NumberTrait { } fn is_neg_inf(self: i8) -> bool { - self == -127 + self == -127 } fn bitwise_and(lhs: i8, rhs: i8) -> i8 { @@ -1702,7 +1702,7 @@ impl I8Div of Div { let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); - let mut result = lhs_u128 / rhs_u128; + let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i8 = felt_result.try_into().unwrap(); if lhs * rhs < 0 { @@ -1729,7 +1729,7 @@ impl I8IntoFP8x23 of Into { } let number_felt: felt252 = self_positive.into(); let number_u32: u32 = number_felt.try_into().unwrap(); - FP8x23 {mag: number_u32 * ONE_fp8x23, sign: number_sign} + FP8x23 { mag: number_u32 * ONE_fp8x23, sign: number_sign } } } @@ -1742,7 +1742,7 @@ impl I8IntoFP16x16 of Into { } let number_felt: felt252 = self_positive.into(); let number_u32: u32 = number_felt.try_into().unwrap(); - FP16x16 {mag: number_u32 * ONE_fp16x16, sign: number_sign} + FP16x16 { mag: number_u32 * ONE_fp16x16, sign: number_sign } } } @@ -1755,7 +1755,7 @@ impl I8IntoFP64x64 of Into { } let number_felt: felt252 = self_positive.into(); let number_u128: u128 = number_felt.try_into().unwrap(); - FP64x64 {mag: number_u128 * ONE_fp64x64, sign: number_sign} + FP64x64 { mag: number_u128 * ONE_fp64x64, sign: number_sign } } } @@ -1768,7 +1768,7 @@ impl I8IntoFP32x32 of Into { } let number_felt: felt252 = self_positive.into(); let number_u128: u64 = number_felt.try_into().unwrap(); - FP32x32 {mag: number_u128 * ONE_fp32x32, sign: number_sign} + FP32x32 { mag: number_u128 * ONE_fp32x32, sign: number_sign } } } @@ -1877,7 +1877,7 @@ impl I16Number of NumberTrait { 0 } fn is_zero(self: i16) -> bool { - self == 0 + self == 0 } fn half() -> i16 { @@ -2003,7 +2003,7 @@ impl I16Number of NumberTrait { } fn is_neg_inf(self: i16) -> bool { - self == -32767 + self == -32767 } fn bitwise_and(lhs: i16, rhs: i16) -> i16 { @@ -2044,7 +2044,7 @@ impl I16Div of Div { let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); - let mut result = lhs_u128 / rhs_u128; + let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i16 = felt_result.try_into().unwrap(); if lhs * rhs < 0 { @@ -2167,7 +2167,7 @@ impl I32Number of NumberTrait { 0 } fn is_zero(self: i32) -> bool { - self == 0 + self == 0 } fn half() -> i32 { @@ -2203,7 +2203,7 @@ impl I32Number of NumberTrait { } fn max_value() -> i32 { - 2147483647 + 2147483647 } fn min(self: i32, other: i32) -> i32 { @@ -2281,7 +2281,7 @@ impl I32Number of NumberTrait { } fn INF() -> i32 { - 2147483647 + 2147483647 } fn is_inf(self: i32) -> bool { @@ -2289,11 +2289,11 @@ impl I32Number of NumberTrait { } fn is_pos_inf(self: i32) -> bool { - self == 2147483647 + self == 2147483647 } fn is_neg_inf(self: i32) -> bool { - self == -2147483647 + self == -2147483647 } fn bitwise_and(lhs: i32, rhs: i32) -> i32 { @@ -2334,7 +2334,7 @@ impl I32Div of Div { let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); - let mut result = lhs_u128 / rhs_u128; + let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i32 = felt_result.try_into().unwrap(); if lhs * rhs < 0 { @@ -2470,7 +2470,7 @@ impl I64Number of NumberTrait { 0 } fn is_zero(self: i64) -> bool { - self == 0 + self == 0 } fn half() -> i64 { @@ -2506,7 +2506,7 @@ impl I64Number of NumberTrait { } fn max_value() -> i64 { - 9223372036854775807 + 9223372036854775807 } fn min(self: i64, other: i64) -> i64 { @@ -2584,7 +2584,7 @@ impl I64Number of NumberTrait { } fn INF() -> i64 { - 9223372036854775807 + 9223372036854775807 } fn is_inf(self: i64) -> bool { @@ -2592,11 +2592,11 @@ impl I64Number of NumberTrait { } fn is_pos_inf(self: i64) -> bool { - self == 9223372036854775807 + self == 9223372036854775807 } fn is_neg_inf(self: i64) -> bool { - self == -9223372036854775807 + self == -9223372036854775807 } fn bitwise_and(lhs: i64, rhs: i64) -> i64 { @@ -2637,7 +2637,7 @@ impl I64Div of Div { let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); - let mut result = lhs_u128 / rhs_u128; + let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i64 = felt_result.try_into().unwrap(); if lhs * rhs < 0 { @@ -2760,7 +2760,7 @@ impl I128Number of NumberTrait { 0 } fn is_zero(self: i128) -> bool { - self == 0 + self == 0 } fn half() -> i128 { @@ -2796,7 +2796,7 @@ impl I128Number of NumberTrait { } fn max_value() -> i128 { - 170141183460469231731687303715884105727 + 170141183460469231731687303715884105727 } fn min(self: i128, other: i128) -> i128 { @@ -2874,19 +2874,20 @@ impl I128Number of NumberTrait { } fn INF() -> i128 { - 170141183460469231731687303715884105727 + 170141183460469231731687303715884105727 } fn is_inf(self: i128) -> bool { - (self == 170141183460469231731687303715884105727 || self == -170141183460469231731687303715884105727) + (self == 170141183460469231731687303715884105727 + || self == -170141183460469231731687303715884105727) } fn is_pos_inf(self: i128) -> bool { - self == 170141183460469231731687303715884105727 + self == 170141183460469231731687303715884105727 } fn is_neg_inf(self: i128) -> bool { - self == -170141183460469231731687303715884105727 + self == -170141183460469231731687303715884105727 } fn bitwise_and(lhs: i128, rhs: i128) -> i128 { @@ -2927,7 +2928,7 @@ impl I128Div of Div { let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); - let mut result = lhs_u128 / rhs_u128; + let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i128 = felt_result.try_into().unwrap(); // assigning the sign and returning diff --git a/src/numbers/fixed_point/implementations/fp16x16/core.cairo b/src/numbers/fixed_point/implementations/fp16x16/core.cairo index cff7996af..a260d886f 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/core.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/core.cairo @@ -436,9 +436,8 @@ fn _i8_try_from_fp(x: FP16x16) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, - Option::None(_) => Option::None(()) } } diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo index b3fe4d39b..176c1a115 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo @@ -451,7 +451,7 @@ fn _i8_try_from_fp(x: FP16x16W) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, Option::None(_) => Option::None(()) } diff --git a/src/numbers/fixed_point/implementations/fp32x32/core.cairo b/src/numbers/fixed_point/implementations/fp32x32/core.cairo index 9fa722e8e..34b06bc44 100644 --- a/src/numbers/fixed_point/implementations/fp32x32/core.cairo +++ b/src/numbers/fixed_point/implementations/fp32x32/core.cairo @@ -402,9 +402,8 @@ fn _i8_try_from_fp(x: FP32x32) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, - Option::None(_) => Option::None(()) } } diff --git a/src/numbers/fixed_point/implementations/fp64x64/core.cairo b/src/numbers/fixed_point/implementations/fp64x64/core.cairo index c98cb7c57..d35cb9cfa 100644 --- a/src/numbers/fixed_point/implementations/fp64x64/core.cairo +++ b/src/numbers/fixed_point/implementations/fp64x64/core.cairo @@ -402,9 +402,8 @@ fn _i8_try_from_fp(x: FP64x64) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, - Option::None(_) => Option::None(()) } } diff --git a/src/numbers/fixed_point/implementations/fp8x23/core.cairo b/src/numbers/fixed_point/implementations/fp8x23/core.cairo index b1ab1b6ac..6db9a5a43 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/core.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/core.cairo @@ -425,7 +425,7 @@ fn _i32_into_fp(x: FP8x23) -> i32 { fn _i8_try_from_fp(x: FP8x23) -> Option { let unscaled_mag: Option = (x.mag / ONE).try_into(); -// Option::Some(i8 { mag: unscaled_mag.unwrap(), sign: x.sign }) + // Option::Some(i8 { mag: unscaled_mag.unwrap(), sign: x.sign }) match unscaled_mag { Option::Some(val) => { let number_felt: felt252 = unscaled_mag.unwrap().into(); @@ -433,7 +433,7 @@ fn _i8_try_from_fp(x: FP8x23) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, Option::None(_) => Option::None(()) } diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo index c4b49c798..9d9b985de 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo @@ -439,7 +439,7 @@ fn _i8_try_from_fp(x: FP8x23W) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, Option::None(_) => Option::None(()) } diff --git a/src/operators/nn/core.cairo b/src/operators/nn/core.cairo index 3c99f4733..522aa3eb5 100644 --- a/src/operators/nn/core.cairo +++ b/src/operators/nn/core.cairo @@ -14,6 +14,7 @@ use orion::operators::tensor::core::Tensor; /// hard_sigmoid - Applies the Hard Sigmoid function to an n-dimensional input tensor. /// thresholded_relu - Performs the thresholded relu activation function element-wise. /// gemm - Performs General Matrix multiplication. +/// conv_transpose - Performs the convolution transpose of the input data tensor and weigth tensor. trait NNTrait { /// # NNTrait::relu /// @@ -694,4 +695,146 @@ trait NNTrait { transA: bool, transB: bool ) -> Tensor; + /// # NNTrait::conv_transpose + /// + /// ```rust + /// conv_transpose( + /// X: @Tensor, + /// W: @Tensor, + /// B: Option<@Tensor>, + /// auto_pad: Option, + /// dilations: Option>, + /// group: Option, + /// kernel_shape: Option>, + /// output_padding: Option>, + /// output_shape: Option>, + /// pads: Option>, + /// strides: Option>, + /// ) -> Tensor + /// ``` + /// + /// The convolution transpose operator consumes an input tensor and a input weigth tensor, and computes the output. + /// + /// ## Args + /// + /// * `X`(`@Tensor`) - Input data tensor, has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W if 2D, otherwise the size is (N x C x D1 x D2 ... x Dn). + /// * `W`(`@Tensor`) - The weight tensor, has size (C x M/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps if 2D, for more than 2 dimensions, the weight shape will be (C x M/group x k1 x k2 x ... x kn). + /// * `B`(`Option<@Tensor>`) - Optional 1D bias to be added to the convolution, has size of M. + /// * `auto_pad`(`Option`) - Default is NOTSET, auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. NOTSET means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = input_shape[i] * strides[i]` for each axis `i`. + /// * `dilations`(`Option>`) - Dilation value along each spatial axis of the filter. If not present, the dilation defaults to 1 along each spatial axis. + /// * `group`(`Option`) - Default is 1, number of groups input channels and output channels are divided into. + /// * `kernel_shape`(`Option>`) - The shape of the convolution kernel. If not present, should be inferred from input W. + /// * `output_padding`(`Option>`) - Additional elements added to the side with higher coordinate indices in the output. Each padding value in "output_padding" must be less than the corresponding stride/dilation dimension. By default, this attribute is a zero vector. + /// * `output_shape`(`Option>`) - The shape of the output can be explicitly set which will cause pads values to be auto generated. If output_shape is specified pads values are ignored. See doc for details for equations to generate pads. + /// * `pads`(`Option>`) - Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis. + /// * `strides`(`Option>`) - Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis. + /// + /// ## Returns + /// + /// A `Tensor` of shape (M, N). + /// + /// ## Examples + /// + /// ```rust + /// use orion::operators::nn::NNTrait; + /// use orion::numbers::FixedTrait; + /// use orion::operators::nn::FP16x16NN; + /// use orion::numbers::FP16x16; + /// use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor}; + /// + /// fn example_conv_transpose() -> Tensor { + /// let mut shape = ArrayTrait::::new(); + /// shape.append(1); + /// shape.append(2); + /// shape.append(3); + /// shape.append(3); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// let W = TensorTrait::new(shape.span(), data.span()); + /// + /// let mut shape = ArrayTrait::::new(); + /// shape.append(1); + /// shape.append(1); + /// shape.append(3); + /// shape.append(3); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 131072, sign: false }); + /// data.append(FP16x16 { mag: 196608, sign: false }); + /// data.append(FP16x16 { mag: 262144, sign: false }); + /// data.append(FP16x16 { mag: 327680, sign: false }); + /// data.append(FP16x16 { mag: 393216, sign: false }); + /// data.append(FP16x16 { mag: 458752, sign: false }); + /// data.append(FP16x16 { mag: 524288, sign: false }); + /// let mut X = TensorTrait::new(shape.span(), data.span()); + /// + /// return NNTrait::conv_transpose( + /// @X, + /// @W, + /// Option::None, + /// Option::None, + /// Option::None, + /// Option::None, + /// Option::None, + /// Option::None, + /// Option::None, + /// Option::None, + /// Option::None, + /// ); + /// + /// } + /// >>> [ + /// [ + /// [ + /// [0.0, 1.0, 3.0, 3.0, 2.0], + /// [3.0, 8.0, 15.0, 12.0, 7.0], + /// [9.0, 21.0, 36.0, 27.0, 15.0], + /// [9.0, 20.0, 33.0, 24.0, 13.0], + /// [6.0, 13.0, 21.0, 15.0, 8.0], + /// ], + /// [ + /// [0.0, 1.0, 3.0, 3.0, 2.0], + /// [3.0, 8.0, 15.0, 12.0, 7.0], + /// [9.0, 21.0, 36.0, 27.0, 15.0], + /// [9.0, 20.0, 33.0, 24.0, 13.0], + /// [6.0, 13.0, 21.0, 15.0, 8.0], + /// ], + /// ] + /// ] + /// + /// ```` + /// + fn conv_transpose( + X: @Tensor, + W: @Tensor, + B: Option<@Tensor>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + output_padding: Option>, + output_shape: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor; } diff --git a/src/operators/nn/functional.cairo b/src/operators/nn/functional.cairo index a0fd96cc8..53e9591ed 100644 --- a/src/operators/nn/functional.cairo +++ b/src/operators/nn/functional.cairo @@ -10,3 +10,4 @@ mod logsoftmax; mod thresholded_relu; mod hard_sigmoid; mod gemm; +mod conv_transpose; diff --git a/src/operators/nn/functional/conv_transpose.cairo b/src/operators/nn/functional/conv_transpose.cairo new file mode 100644 index 000000000..adb933c23 --- /dev/null +++ b/src/operators/nn/functional/conv_transpose.cairo @@ -0,0 +1,648 @@ +use orion::numbers::NumberTrait; +use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,}; +use orion::operators::vec::{NullableVec, NullableVecImpl}; +use orion::operators::tensor::core::{stride}; + +#[derive(Copy, Drop)] +enum AUTO_PAD { + NOTSET, + SAME_UPPER, + SAME_LOWER, + VALID +} + +fn conv_transpose< + T, MAG, +TensorTrait, +NumberTrait, +Copy, +Drop, +Add, +Mul, +>( + X: @Tensor, + W: @Tensor, + B: Option<@Tensor>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + output_padding: Option>, + output_shape: Option>, + pads: Option>, + strides: Option>, +) -> Tensor { + let auto_pad = match auto_pad { + Option::Some(auto_pad) => auto_pad, + Option::None => { AUTO_PAD::NOTSET }, + }; + let dilations = match dilations { + Option::Some(dilations) => dilations, + Option::None => { + let mut dilations = ArrayTrait::new(); + let mut i = 2; + loop { + if i >= (*X).shape.len() { + break; + } + dilations.append(1); + i += 1; + }; + dilations.span() + }, + }; + let kernel_shape = match kernel_shape { + Option::Some(kernel_shape) => kernel_shape, + Option::None => { + let mut kernel_shape = ArrayTrait::new(); + let mut i = 2; + loop { + if i >= (*W).shape.len() { + break; + } + kernel_shape.append(*(*W).shape.at(i)); + i += 1; + }; + kernel_shape.span() + }, + }; + let output_padding = match output_padding { + Option::Some(output_padding) => output_padding, + Option::None => { + let mut output_padding = ArrayTrait::new(); + let mut i = 2; + loop { + if i >= (*X).shape.len() { + break; + } + output_padding.append(0); + output_padding.append(0); + i += 1; + }; + output_padding.span() + }, + }; + let strides = match strides { + Option::Some(strides) => strides, + Option::None => { + let mut strides = ArrayTrait::new(); + let mut i = 2; + loop { + if i >= (*X).shape.len() { + break; + } + strides.append(1); + i += 1; + }; + strides.span() + }, + }; + let (pads, n_dims, output_shape) = match pads { + Option::Some(pads) => { + let n_dims = (*X).shape.len() - 2; + + let output_shape = match output_shape { + Option::Some(output_shape) => output_shape, + Option::None => { + let mut output_shape = ArrayTrait::new(); + let mut i = 0; + loop { + if i == n_dims { + break; + } + output_shape + .append( + (*(*X).shape.at(i + 2) - 1) * *strides.at(i) + + *output_padding.at(i) + + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1) + - (*pads.at(i) + *pads.at(i + n_dims)) + ); + i += 1; + }; + output_shape.span() + }, + }; + (pads, n_dims, output_shape) + }, + Option::None => { + let (pads, n_dims, output_shape) = match auto_pad { + AUTO_PAD::NOTSET => { + let mut pads = ArrayTrait::new(); + let mut i = 0; + loop { + if i == strides.len() * 2 { + break; + } + pads.append(0); + i += 1; + }; + let pads = pads.span(); + + let n_dims = (*X).shape.len() - 2; + + let output_shape = match output_shape { + Option::Some(output_shape) => output_shape, + Option::None => { + let mut output_shape = ArrayTrait::new(); + let mut i = 0; + loop { + if i == n_dims { + break; + } + + output_shape + .append( + (*(*X).shape.at(i + 2) - 1) * *strides.at(i) + + *output_padding.at(i) + + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1) + - (*pads.at(i) + *pads.at(i + n_dims)) + ); + i += 1; + }; + output_shape.span() + }, + }; + + (pads, n_dims, output_shape) + }, + AUTO_PAD::SAME_UPPER => { + let output_shape = match output_shape { + Option::Some(output_shape) => output_shape, + Option::None => { + let mut output_shape = ArrayTrait::new(); + let mut i = 0; + loop { + if i == strides.len() { + break; + } + output_shape.append(*(*X).shape.at(i + 2) * *strides.at(i)); + i += 1; + }; + output_shape.span() + }, + }; + let mut total_padding = ArrayTrait::new(); + + let mut i = 0; + loop { + if i == output_shape.len() { + break; + } + total_padding + .append( + (*(*X).shape.at(i + 2) - 1) * *strides.at(i) + + *output_padding.at(i) + + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1) + - *output_shape.at(i) + ); + i += 1; + }; + let total_padding = total_padding.span(); + + let mut pads = ArrayTrait::new(); + + let mut i = 0; + loop { + if i == output_shape.len() { + break; + } + pads.append(*total_padding.at(i) / 2); + i += 1; + }; + let mut i = 0; + loop { + if i == output_shape.len() { + break; + } + pads.append(*total_padding.at(i) - (*total_padding.at(i) / 2)); + i += 1; + }; + (pads.span(), pads.len() / 2, output_shape) + }, + AUTO_PAD::SAME_LOWER => { + let output_shape = match output_shape { + Option::Some(output_shape) => output_shape, + Option::None => { + let mut output_shape = ArrayTrait::new(); + let mut i = 0; + loop { + if i == strides.len() { + break; + } + output_shape.append(*(*X).shape.at(i + 2) * *strides.at(i)); + i += 1; + }; + output_shape.span() + }, + }; + let mut total_padding = ArrayTrait::new(); + + let mut i = 0; + loop { + if i == output_shape.len() { + break; + } + total_padding + .append( + (*(*X).shape.at(i + 2) - 1) * *strides.at(i) + + *output_padding.at(i) + + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1) + - *output_shape.at(i) + ); + i += 1; + }; + let total_padding = total_padding.span(); + + let mut pads = ArrayTrait::new(); + + let mut i = 0; + loop { + if i == output_shape.len() { + break; + } + pads.append(*total_padding.at(i) - *total_padding.at(i) / 2); + i += 1; + }; + let mut i = 0; + loop { + if i == output_shape.len() { + break; + } + pads.append(*total_padding.at(i) / 2); + i += 1; + }; + (pads.span(), pads.len() / 2, output_shape) + }, + AUTO_PAD::VALID => { + let mut pads = ArrayTrait::new(); + let mut i = 0; + loop { + if i == strides.len() * 2 { + break; + } + pads.append(0); + i += 1; + }; + let pads = pads.span(); + + let n_dims = (*X).shape.len() - 2; + let output_shape = match output_shape { + Option::Some(output_shape) => output_shape, + Option::None => { + let mut output_shape = ArrayTrait::new(); + let mut i = 0; + loop { + if i == n_dims { + break; + } + output_shape + .append( + (*(*X).shape.at(i + 2) - 1) * *strides.at(i) + + *output_padding.at(i) + + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1) + - (*pads.at(i) + *pads.at(i + n_dims)) + ); + i += 1; + }; + output_shape.span() + }, + }; + (pads, n_dims, output_shape) + }, + }; + (pads, n_dims, output_shape) + }, + }; + let group = match group { + Option::Some(group) => group, + Option::None => { 1 }, + }; + + let mut kernel_shape = ArrayTrait::new(); + let mut i = 2; + loop { + if i >= (*W).shape.len() { + break; + } + kernel_shape.append(*(*W).shape.at(i)); + i += 1; + }; + let kernel_shape = kernel_shape.span(); + let kernel_size = prod(kernel_shape, 0); + + let mut num_output_channels = *(*W).shape.at(1) * group; + let mut kernel_dim = (num_output_channels / group) * kernel_size; + + let C = *(*X).shape.at(1); + let m = kernel_dim; + let n = prod((*X).shape, 2); + let k = C / group; + + let w_reshaped = TensorTrait::new(array![group, k, m].span(), (*W).data); + + let mut final = ArrayTrait::new(); + + if group == 1 { + let mut image_id = 0; + loop { + if image_id == *(*X).shape.at(0) { + break; + } + let w_t = TensorTrait::new(array![k, m].span(), (*W).data) + .transpose(array![1, 0].span()); + + let image = SpanTrait::slice((*X).data, image_id * k * n, k * n); + let gemm = w_t.matmul(@TensorTrait::new(array![k, n].span(), image)); + + let gemmc = gemm + .reshape(array![num_output_channels, m / num_output_channels, n].span()); + let mut c = 0; + loop { + if c == num_output_channels { + break; + } + let gemmc_c = TensorTrait::new( + array![m / num_output_channels, n].span(), + SpanTrait::slice( + gemmc.data, (m / num_output_channels) * n * c, (m / num_output_channels) * n + ) + ); + + let mut res = col2im_naive_implementation( + @gemmc_c, output_shape, kernel_shape, dilations, pads, strides + ); + + match B { + Option::Some(B) => { + let mut i = 0; + loop { + if i == res.len() { + break; + } + res.set(i, res.at(i) + *(*B).data.at(c)); + i += 1; + }; + }, + Option::None => {}, + } + c += 1; + + let mut i = 0; + loop { + if i == res.len() { + break; + } + final.append(res.at(i)); + i += 1; + }; + }; + image_id += 1; + }; + } else { + panic(array!['group > 1 not supported']); + } + let mut shape = array![*(*X).shape.at(0), num_output_channels]; + + let mut i = 0; + loop { + if i == output_shape.len() { + break; + } + shape.append(*output_shape.at(i)); + i += 1; + }; + + return TensorTrait::new(shape.span(), final.span()); +} + +fn get_image, +Copy>(self: @Tensor, row: usize) -> Span { + assert((*self).shape.len() == 2, 'Expected a 2D tensor'); + + let row_length = *self.shape[1]; + let start = row * row_length; + + (*self).data.slice(start, row_length) +} + +fn col2im_naive_implementation< + T, MAG, +TensorTrait, +NumberTrait, +Copy, +Drop, +Add, +>( + data: @Tensor, + image_shape: Span, + kernel_shape: Span, + dilations: Span, + pads: Span, + strides: Span, +) -> NullableVec { + let n_dims = pads.len() / 2; + + col2im_shape_check(data, image_shape, kernel_shape, dilations, pads, strides); + + let data_col = data; + let mut dim_col = ArrayTrait::new(); + let mut i = 0; + loop { + if i == n_dims { + break; + } + dim_col + .append( + (*image_shape.at(i) + + (*pads.at(i) + *pads.at(i + n_dims)) + - (*dilations.at(i) * (*kernel_shape.at(i) - 1) + 1)) + / *strides.at(i) + + 1 + ); + + i += 1; + }; + let dim_col = dim_col.span(); + + let stride_img = stride(image_shape); + + let mut data_im = NullableVecImpl::new(); + data_im.set(*image_shape.at(0) * *stride_img.at(0) - 1, NumberTrait::zero()); + + let kernel_size = prod(kernel_shape, 0); + let col_size = prod(dim_col, 0); + let mut c_col = 0; + loop { + if c_col == kernel_size { + break; + } + let offset = get_indices(c_col, kernel_shape).span(); + + let mut col = 0; + loop { + if col == col_size { + break; + } + let ind_col = get_indices(col, dim_col).span(); + let mut ind_im = ArrayTrait::new(); + let mut i = 0; + loop { + if i == n_dims { + break; + } + if (*ind_col.at(i) * *strides.at(i) + *offset.at(i) * *dilations.at(i)) < *pads + .at(i) { + let neg_index = *pads.at(i) + - (*ind_col.at(i) * *strides.at(i) + *offset.at(i) * *dilations.at(i)); + ind_im.append(*image_shape.at(i) + neg_index); + } else { + ind_im + .append( + *ind_col.at(i) * *strides.at(i) + + *offset.at(i) * *dilations.at(i) + - *pads.at(i) + ); + } + + i += 1; + }; + let ind_im = ind_im.span(); + if !is_out(ind_im, image_shape) { + let mut index = 0; + let mut i = 0; + loop { + if i == image_shape.len() { + break; + } + index += *stride_img.at(i) * *ind_im.at(i); + i += 1; + }; + data_im.set(index, data_im.at(index) + *(*data).data.at(c_col * col_size + col)); + } + col += 1; + }; + c_col += 1; + }; + + return data_im; +} + +fn col2im_shape_check, +Copy, +Drop,>( + X: @Tensor, + output_shape: Span, + kernel_shape: Span, + dilations: Span, + pads: Span, + strides: Span, +) { + let n_input_plane = *(*X).shape.at(0); + + let kernel_size = prod(kernel_shape, 0); + + assert(n_input_plane % kernel_size == 0, 'wrong input dimension'); + + let input_length = *(*X).shape.at(1); + let n_dims = output_shape.len(); + let mut n_blocks = ArrayTrait::new(); + + let mut i = 0; + loop { + if i == n_dims { + break; + } + n_blocks + .append( + (*output_shape.at(i) + + (*pads.at(i) + *pads.at(i + n_dims)) + - *dilations.at(i) * (*kernel_shape.at(i) - 1) + - 1) + / *strides.at(i) + + 1 + ); + i += 1; + }; + + let block_size = prod(n_blocks.span(), 0); + + assert(input_length == block_size, 'input_length != block_size'); +} + + +fn rec_add_chars(ref arr: Array, str_len: felt252, str: u128) { + if str_len == 0 { + return; + } + let (str, char) = DivRem::div_rem(str, 256_u128.try_into().unwrap()); + rec_add_chars(ref arr, str_len - 1, str); + if char != 0 { + arr.append(char); + } +} + +fn get_indices(index: usize, shape: Span,) -> Array { + let mut i = index; + let mut res = ArrayTrait::new(); + let mut k = shape.len() - 1; + loop { + if k == 0 { + break; + } + let m = i % *shape.at(k); + res.append(m); + i -= m; + i /= *shape.at(k); + k -= 1; + }; + + let mut new_res = ArrayTrait::new(); + new_res.append(i); + let mut i = shape.len() - 1; + loop { + if i == 0 { + break; + } + new_res.append(*res.at(i - 1)); + i -= 1; + }; + return new_res; +} + +fn is_out(ind: Span, shape: Span,) -> bool { + let mut n = 0; + let is_out = loop { + if n == ind.len() { + break false; + } + let s = *shape.at(n); + let i = *ind.at(n); + if i < 0 { + break true; + } + if i >= s { + break true; + } + n += 1; + }; + return is_out; +} + + +fn rec_get_indices(ref arr: Array, mut i: usize, mut k: usize, shape: Span,) { + if k == 0 { + arr.append(i); + return; + } + let m = i % *shape.at(k); + i -= m; + i /= *shape.at(k); + k -= 1; + rec_get_indices(ref arr, i, k, shape); + if k != 0 { + arr.append(m); + } +} + + +fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul,>( + pA: Span, start: usize +) -> T { + let mut i = start; + let mut prod = NumberTrait::one(); + loop { + if i == pA.len() { + break; + } + prod = prod * (*pA.at(i)); + i += 1; + }; + return prod; +} + diff --git a/src/operators/nn/implementations/nn_fp16x16.cairo b/src/operators/nn/implementations/nn_fp16x16.cairo index 785d3c9fa..605c5234c 100644 --- a/src/operators/nn/implementations/nn_fp16x16.cairo +++ b/src/operators/nn/implementations/nn_fp16x16.cairo @@ -72,4 +72,32 @@ impl FP16x16NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn conv_transpose( + X: @Tensor, + W: @Tensor, + B: Option<@Tensor>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + output_padding: Option>, + output_shape: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::conv_transpose::conv_transpose( + X, + W, + B, + auto_pad, + dilations, + group, + kernel_shape, + output_padding, + output_shape, + pads, + strides + ) + } } diff --git a/src/operators/nn/implementations/nn_fp32x32.cairo b/src/operators/nn/implementations/nn_fp32x32.cairo index 0427ea5f7..59c206c96 100644 --- a/src/operators/nn/implementations/nn_fp32x32.cairo +++ b/src/operators/nn/implementations/nn_fp32x32.cairo @@ -66,4 +66,32 @@ impl FP32x32NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn conv_transpose( + X: @Tensor, + W: @Tensor, + B: Option<@Tensor>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + output_padding: Option>, + output_shape: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::conv_transpose::conv_transpose( + X, + W, + B, + auto_pad, + dilations, + group, + kernel_shape, + output_padding, + output_shape, + pads, + strides + ) + } } diff --git a/src/operators/nn/implementations/nn_fp64x64.cairo b/src/operators/nn/implementations/nn_fp64x64.cairo index fec810679..edd69e9d5 100644 --- a/src/operators/nn/implementations/nn_fp64x64.cairo +++ b/src/operators/nn/implementations/nn_fp64x64.cairo @@ -66,4 +66,32 @@ impl FP64x64NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn conv_transpose( + X: @Tensor, + W: @Tensor, + B: Option<@Tensor>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + output_padding: Option>, + output_shape: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::conv_transpose::conv_transpose( + X, + W, + B, + auto_pad, + dilations, + group, + kernel_shape, + output_padding, + output_shape, + pads, + strides + ) + } } diff --git a/src/operators/nn/implementations/nn_fp8x23.cairo b/src/operators/nn/implementations/nn_fp8x23.cairo index 9f5416121..b9b73cf15 100644 --- a/src/operators/nn/implementations/nn_fp8x23.cairo +++ b/src/operators/nn/implementations/nn_fp8x23.cairo @@ -70,4 +70,32 @@ impl FP8x23NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn conv_transpose( + X: @Tensor, + W: @Tensor, + B: Option<@Tensor>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + output_padding: Option>, + output_shape: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::conv_transpose::conv_transpose( + X, + W, + B, + auto_pad, + dilations, + group, + kernel_shape, + output_padding, + output_shape, + pads, + strides + ) + } } diff --git a/src/operators/nn/implementations/nn_i32.cairo b/src/operators/nn/implementations/nn_i32.cairo index 1db66a1c6..88ba7ff7f 100644 --- a/src/operators/nn/implementations/nn_i32.cairo +++ b/src/operators/nn/implementations/nn_i32.cairo @@ -61,4 +61,32 @@ impl I32NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn conv_transpose( + X: @Tensor, + W: @Tensor, + B: Option<@Tensor>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + output_padding: Option>, + output_shape: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::conv_transpose::conv_transpose( + X, + W, + B, + auto_pad, + dilations, + group, + kernel_shape, + output_padding, + output_shape, + pads, + strides + ) + } } diff --git a/src/operators/nn/implementations/nn_i8.cairo b/src/operators/nn/implementations/nn_i8.cairo index e67bb7504..4902e26d7 100644 --- a/src/operators/nn/implementations/nn_i8.cairo +++ b/src/operators/nn/implementations/nn_i8.cairo @@ -61,4 +61,32 @@ impl I8NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn conv_transpose( + X: @Tensor, + W: @Tensor, + B: Option<@Tensor>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + output_padding: Option>, + output_shape: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::conv_transpose::conv_transpose( + X, + W, + B, + auto_pad, + dilations, + group, + kernel_shape, + output_padding, + output_shape, + pads, + strides + ) + } } diff --git a/src/operators/nn/implementations/nn_u32.cairo b/src/operators/nn/implementations/nn_u32.cairo index 370880e8d..96fa43d0c 100644 --- a/src/operators/nn/implementations/nn_u32.cairo +++ b/src/operators/nn/implementations/nn_u32.cairo @@ -61,4 +61,32 @@ impl U32NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn conv_transpose( + X: @Tensor, + W: @Tensor, + B: Option<@Tensor>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + output_padding: Option>, + output_shape: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::conv_transpose::conv_transpose( + X, + W, + B, + auto_pad, + dilations, + group, + kernel_shape, + output_padding, + output_shape, + pads, + strides + ) + } } diff --git a/src/operators/sequence/functional/sequence_at.cairo b/src/operators/sequence/functional/sequence_at.cairo index 7953abb9d..4a4aa9203 100644 --- a/src/operators/sequence/functional/sequence_at.cairo +++ b/src/operators/sequence/functional/sequence_at.cairo @@ -8,7 +8,9 @@ use orion::numbers::{NumberTrait, I32IntoU32, U32IntoI32}; fn sequence_at, impl TCopy: Copy, impl TDrop: Drop>( sequence: Array>, position: Tensor ) -> Tensor { - assert(position.shape.len() == 0 && position.data.len().into() == 1, 'Position must be a scalar'); + assert( + position.shape.len() == 0 && position.data.len().into() == 1, 'Position must be a scalar' + ); let position_value_i32: i32 = *position.data.at(0); let is_negative: bool = position_value_i32 < 0; diff --git a/src/operators/sequence/functional/sequence_erase.cairo b/src/operators/sequence/functional/sequence_erase.cairo index dd2a2aad6..573087b1f 100644 --- a/src/operators/sequence/functional/sequence_erase.cairo +++ b/src/operators/sequence/functional/sequence_erase.cairo @@ -3,7 +3,7 @@ use core::option::OptionTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::I32Tensor; -use orion::numbers::{ NumberTrait, I32IntoU32}; +use orion::numbers::{NumberTrait, I32IntoU32}; /// Cf: SequenceTrait::sequence_erase docstring fn sequence_erase, impl TCopy: Copy, impl TDrop: Drop>( @@ -56,4 +56,3 @@ fn sequence_erase, impl TCopy: Copy, impl TDr return output_sequence; } - diff --git a/src/operators/sequence/functional/sequence_insert.cairo b/src/operators/sequence/functional/sequence_insert.cairo index 256a1b91c..412fc6c4b 100644 --- a/src/operators/sequence/functional/sequence_insert.cairo +++ b/src/operators/sequence/functional/sequence_insert.cairo @@ -3,7 +3,7 @@ use core::option::OptionTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::I32Tensor; -use orion::numbers::{ NumberTrait, I32IntoU32}; +use orion::numbers::{NumberTrait, I32IntoU32}; /// Cf: SequenceTrait::sequence_insert docstring fn sequence_insert, impl TCopy: Copy, impl TDrop: Drop>( @@ -55,4 +55,4 @@ fn sequence_insert, impl TCopy: Copy, impl TD }; return new_sequence; -} \ No newline at end of file +} diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 70344eb97..4245b418f 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -5559,10 +5559,14 @@ fn squeeze(self: @Tensor, axes: Option>) -> Tensor { let mut reshape: Array = ArrayTrait::new(); let mut index = 0_i32; let axis = if *axis < 0 { - assert(*axis <= (*self.shape).len().into(), 'axis out of accepted range'); + assert( + *axis <= (*self.shape).len().into(), 'axis out of accepted range' + ); (*self.shape).len().into() - *axis } else { - assert(*axis < (*self.shape).len().into(), 'axis out of accepted range'); + assert( + *axis < (*self.shape).len().into(), 'axis out of accepted range' + ); *axis }; diff --git a/src/operators/tensor/helpers.cairo b/src/operators/tensor/helpers.cairo index 8c7e2b359..894dfc8d4 100644 --- a/src/operators/tensor/helpers.cairo +++ b/src/operators/tensor/helpers.cairo @@ -496,4 +496,4 @@ impl SpanPartialOrd, +Copy, +PartialEq, +PartialOrd> of Par fn lt(lhs: Span, rhs: Span) -> bool { span_cmp(lhs, rhs) < 0 } -} \ No newline at end of file +} diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 50383d2df..890a2d3b2 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -3,7 +3,7 @@ use core::array::SpanTrait; use core::option::OptionTrait; use core::traits::{TryInto, Into}; -use orion::numbers::{ I32Div, I32DivEq }; +use orion::numbers::{I32Div, I32DivEq}; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ @@ -221,13 +221,7 @@ impl I32Tensor of TensorTrait { fn quantize_linear( self: @Tensor, y_scale: @Tensor, y_zero_point: @Tensor ) -> Tensor:: { - quantization::quantize_linear::quantize_linear( - self, - y_scale, - y_zero_point, - -127, - 127 - ) + quantization::quantize_linear::quantize_linear(self, y_scale, y_zero_point, -127, 127) } fn dequantize_linear( diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 7e81d90eb..9366a0347 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -3,7 +3,7 @@ use core::array::SpanTrait; use core::option::OptionTrait; use core::traits::{TryInto, Into}; -use orion::numbers::{ I8Div, I8DivEq }; +use orion::numbers::{I8Div, I8DivEq}; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ diff --git a/src/operators/tensor/math/layer_normalization.cairo b/src/operators/tensor/math/layer_normalization.cairo index 372d5b1c2..bb0d9579b 100644 --- a/src/operators/tensor/math/layer_normalization.cairo +++ b/src/operators/tensor/math/layer_normalization.cairo @@ -3,7 +3,7 @@ use core::array::ArrayTrait; use core::array::SpanTrait; use core::option::OptionTrait; use core::traits::Into; -use orion::numbers::{ NumberTrait, I32IntoU32}; +use orion::numbers::{NumberTrait, I32IntoU32}; use orion::operators::tensor::{ TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor }; @@ -51,7 +51,6 @@ fn layer_normalization< Option::None => 1, }; - let axis = if axis < 0 { X_rank - axis.into() } else { diff --git a/src/test_helper/tensor/i32.cairo b/src/test_helper/tensor/i32.cairo index 0451fa442..89979eef0 100644 --- a/src/test_helper/tensor/i32.cairo +++ b/src/test_helper/tensor/i32.cairo @@ -93,7 +93,7 @@ fn i32_tensor_3x3_neg_helper() -> Tensor { sizes.append(3); let mut data = ArrayTrait::new(); - + data.append(0_i32); data.append(-1_i32); data.append(-2_i32); @@ -338,7 +338,6 @@ fn i32_tensor_3x3x3_helper() -> Tensor { data.append(24_i32); data.append(25_i32); data.append(26_i32); - let tensor = TensorTrait::new(sizes.span(), data.span()); diff --git a/src/test_helper/tensor/i8.cairo b/src/test_helper/tensor/i8.cairo index e492ad913..6d85e4b3e 100644 --- a/src/test_helper/tensor/i8.cairo +++ b/src/test_helper/tensor/i8.cairo @@ -93,7 +93,7 @@ fn i8_tensor_3x3_neg_helper() -> Tensor { sizes.append(3); let mut data = ArrayTrait::new(); - + data.append(0_i8); data.append(-1_i8); data.append(-2_i8); @@ -338,7 +338,6 @@ fn i8_tensor_3x3x3_helper() -> Tensor { data.append(24_i8); data.append(25_i8); data.append(26_i8); - let tensor = TensorTrait::new(sizes.span(), data.span()); diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 6c70b42cb..5853c3664 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -936,3 +936,10 @@ mod split_fp16x16_2d_variable_parts; mod split_fp16x16_zero_size; mod split_fp16x16_1d_uneven; mod split_fp16x16_2d_uneven; +mod conv_transpose; +mod conv_transpose_1d; +mod conv_transpose_3d; +mod conv_transpose_attributes; +mod conv_transpose_autopad_same; +mod conv_transpose_dilations; +mod conv_transpose_pads; diff --git a/tests/nodes/clip_fp16x16_2d.cairo b/tests/nodes/clip_fp16x16_2d.cairo index d779d2790..b576203eb 100644 --- a/tests/nodes/clip_fp16x16_2d.cairo +++ b/tests/nodes/clip_fp16x16_2d.cairo @@ -15,7 +15,11 @@ fn test_clip_fp16x16_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.clip(Option::Some(FP16x16 { mag: 655360, sign: true }), Option::Some(FP16x16 { mag: 1310720, sign: false })); + let y = input_0 + .clip( + Option::Some(FP16x16 { mag: 655360, sign: true }), + Option::Some(FP16x16 { mag: 1310720, sign: false }) + ); assert_eq(y, z); } diff --git a/tests/nodes/clip_fp16x16_3d.cairo b/tests/nodes/clip_fp16x16_3d.cairo index d82de09dc..98bed1a61 100644 --- a/tests/nodes/clip_fp16x16_3d.cairo +++ b/tests/nodes/clip_fp16x16_3d.cairo @@ -15,7 +15,11 @@ fn test_clip_fp16x16_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.clip(Option::Some(FP16x16 { mag: 655360, sign: true }), Option::Some(FP16x16 { mag: 1310720, sign: false })); + let y = input_0 + .clip( + Option::Some(FP16x16 { mag: 655360, sign: true }), + Option::Some(FP16x16 { mag: 1310720, sign: false }) + ); assert_eq(y, z); } diff --git a/tests/nodes/clip_fp8x23_2d.cairo b/tests/nodes/clip_fp8x23_2d.cairo index 64f1792a1..60b38b565 100644 --- a/tests/nodes/clip_fp8x23_2d.cairo +++ b/tests/nodes/clip_fp8x23_2d.cairo @@ -15,7 +15,11 @@ fn test_clip_fp8x23_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.clip(Option::Some(FP8x23 { mag: 83886080, sign: true }), Option::Some(FP8x23 { mag: 167772160, sign: false })); + let y = input_0 + .clip( + Option::Some(FP8x23 { mag: 83886080, sign: true }), + Option::Some(FP8x23 { mag: 167772160, sign: false }) + ); assert_eq(y, z); } diff --git a/tests/nodes/clip_fp8x23_3d.cairo b/tests/nodes/clip_fp8x23_3d.cairo index 511b33859..cc80a61d7 100644 --- a/tests/nodes/clip_fp8x23_3d.cairo +++ b/tests/nodes/clip_fp8x23_3d.cairo @@ -15,7 +15,11 @@ fn test_clip_fp8x23_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.clip(Option::Some(FP8x23 { mag: 83886080, sign: true }), Option::Some(FP8x23 { mag: 167772160, sign: false })); + let y = input_0 + .clip( + Option::Some(FP8x23 { mag: 83886080, sign: true }), + Option::Some(FP8x23 { mag: 167772160, sign: false }) + ); assert_eq(y, z); } diff --git a/tests/nodes/compress_fp16x16_3d_axis1.cairo b/tests/nodes/compress_fp16x16_3d_axis1.cairo index 2463dfa93..4189bd1e9 100644 --- a/tests/nodes/compress_fp16x16_3d_axis1.cairo +++ b/tests/nodes/compress_fp16x16_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp16x16_3d_axis2.cairo b/tests/nodes/compress_fp16x16_3d_axis2.cairo index a425e0988..e17e6bed4 100644 --- a/tests/nodes/compress_fp16x16_3d_axis2.cairo +++ b/tests/nodes/compress_fp16x16_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp16x16_3d_axis3.cairo b/tests/nodes/compress_fp16x16_3d_axis3.cairo index 3ad15cc97..fa9efb511 100644 --- a/tests/nodes/compress_fp16x16_3d_axis3.cairo +++ b/tests/nodes/compress_fp16x16_3d_axis3.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_axis3() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(3)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(3)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp16x16_3d_default.cairo b/tests/nodes/compress_fp16x16_3d_default.cairo index 4bff29c09..0a8b68bf2 100644 --- a/tests/nodes/compress_fp16x16_3d_default.cairo +++ b/tests/nodes/compress_fp16x16_3d_default.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp16x16_3d_noaxis.cairo b/tests/nodes/compress_fp16x16_3d_noaxis.cairo index e637f47c8..4e1b1620e 100644 --- a/tests/nodes/compress_fp16x16_3d_noaxis.cairo +++ b/tests/nodes/compress_fp16x16_3d_noaxis.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_noaxis() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::None(())); + let y_0 = input_0.compress(condition: input_1, axis: Option::None(())); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp8x23_3d_axis1.cairo b/tests/nodes/compress_fp8x23_3d_axis1.cairo index 24829c58f..03bdc8815 100644 --- a/tests/nodes/compress_fp8x23_3d_axis1.cairo +++ b/tests/nodes/compress_fp8x23_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_compress_fp8x23_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp8x23_3d_axis2.cairo b/tests/nodes/compress_fp8x23_3d_axis2.cairo index c4cf9a814..ca6bc4ec6 100644 --- a/tests/nodes/compress_fp8x23_3d_axis2.cairo +++ b/tests/nodes/compress_fp8x23_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_compress_fp8x23_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp8x23_3d_default.cairo b/tests/nodes/compress_fp8x23_3d_default.cairo index 6f590b622..f9acf8b7b 100644 --- a/tests/nodes/compress_fp8x23_3d_default.cairo +++ b/tests/nodes/compress_fp8x23_3d_default.cairo @@ -18,7 +18,7 @@ fn test_compress_fp8x23_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i32_3d_axis1.cairo b/tests/nodes/compress_i32_3d_axis1.cairo index e3d6a8072..6d3142fec 100644 --- a/tests/nodes/compress_i32_3d_axis1.cairo +++ b/tests/nodes/compress_i32_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_compress_i32_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i32_3d_axis2.cairo b/tests/nodes/compress_i32_3d_axis2.cairo index 3ae5828c8..242aef0ae 100644 --- a/tests/nodes/compress_i32_3d_axis2.cairo +++ b/tests/nodes/compress_i32_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_compress_i32_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i32_3d_default.cairo b/tests/nodes/compress_i32_3d_default.cairo index dde8e15cf..ab19213b0 100644 --- a/tests/nodes/compress_i32_3d_default.cairo +++ b/tests/nodes/compress_i32_3d_default.cairo @@ -18,7 +18,7 @@ fn test_compress_i32_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i8_3d_axis1.cairo b/tests/nodes/compress_i8_3d_axis1.cairo index 8fd8bb267..4ab02896a 100644 --- a/tests/nodes/compress_i8_3d_axis1.cairo +++ b/tests/nodes/compress_i8_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_compress_i8_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i8_3d_axis2.cairo b/tests/nodes/compress_i8_3d_axis2.cairo index 220210744..f0dbaef06 100644 --- a/tests/nodes/compress_i8_3d_axis2.cairo +++ b/tests/nodes/compress_i8_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_compress_i8_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i8_3d_default.cairo b/tests/nodes/compress_i8_3d_default.cairo index b802e589c..e4ad1fbc8 100644 --- a/tests/nodes/compress_i8_3d_default.cairo +++ b/tests/nodes/compress_i8_3d_default.cairo @@ -18,7 +18,7 @@ fn test_compress_i8_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_axis1.cairo b/tests/nodes/compress_u32_3d_axis1.cairo index 136f8b8ce..41a2adc63 100644 --- a/tests/nodes/compress_u32_3d_axis1.cairo +++ b/tests/nodes/compress_u32_3d_axis1.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_axis2.cairo b/tests/nodes/compress_u32_3d_axis2.cairo index 347e36676..801886380 100644 --- a/tests/nodes/compress_u32_3d_axis2.cairo +++ b/tests/nodes/compress_u32_3d_axis2.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_axis2_2.cairo b/tests/nodes/compress_u32_3d_axis2_2.cairo index abc515486..c5a20dbc2 100644 --- a/tests/nodes/compress_u32_3d_axis2_2.cairo +++ b/tests/nodes/compress_u32_3d_axis2_2.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_axis2_2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_axis3.cairo b/tests/nodes/compress_u32_3d_axis3.cairo index 10e1e507e..4edd5c8dc 100644 --- a/tests/nodes/compress_u32_3d_axis3.cairo +++ b/tests/nodes/compress_u32_3d_axis3.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_axis3() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(3)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(3)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_default.cairo b/tests/nodes/compress_u32_3d_default.cairo index ce12adac8..32068f9b7 100644 --- a/tests/nodes/compress_u32_3d_default.cairo +++ b/tests/nodes/compress_u32_3d_default.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/conv_transpose.cairo b/tests/nodes/conv_transpose.cairo new file mode 100644 index 000000000..a8e9e8a33 --- /dev/null +++ b/tests/nodes/conv_transpose.cairo @@ -0,0 +1,34 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_conv_transpose() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::conv_transpose( + @input_0, + @input_1, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/conv_transpose/input_0.cairo b/tests/nodes/conv_transpose/input_0.cairo new file mode 100644 index 000000000..5208e1993 --- /dev/null +++ b/tests/nodes/conv_transpose/input_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose/input_1.cairo b/tests/nodes/conv_transpose/input_1.cairo new file mode 100644 index 000000000..7e2c62d2d --- /dev/null +++ b/tests/nodes/conv_transpose/input_1.cairo @@ -0,0 +1,33 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose/output_0.cairo b/tests/nodes/conv_transpose/output_0.cairo new file mode 100644 index 000000000..8c33a5189 --- /dev/null +++ b/tests/nodes/conv_transpose/output_0.cairo @@ -0,0 +1,65 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(5); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 2359296, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 2359296, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_1d.cairo b/tests/nodes/conv_transpose_1d.cairo new file mode 100644 index 000000000..4971927db --- /dev/null +++ b/tests/nodes/conv_transpose_1d.cairo @@ -0,0 +1,34 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_conv_transpose_1d() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::conv_transpose( + @input_0, + @input_1, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/conv_transpose_1d/input_0.cairo b/tests/nodes/conv_transpose_1d/input_0.cairo new file mode 100644 index 000000000..25470794d --- /dev/null +++ b/tests/nodes/conv_transpose_1d/input_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_1d/input_1.cairo b/tests/nodes/conv_transpose_1d/input_1.cairo new file mode 100644 index 000000000..3a0149692 --- /dev/null +++ b/tests/nodes/conv_transpose_1d/input_1.cairo @@ -0,0 +1,20 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_1d/output_0.cairo b/tests/nodes/conv_transpose_1d/output_0.cairo new file mode 100644 index 000000000..e671c21d7 --- /dev/null +++ b/tests/nodes/conv_transpose_1d/output_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_3d.cairo b/tests/nodes/conv_transpose_3d.cairo new file mode 100644 index 000000000..f1163fc99 --- /dev/null +++ b/tests/nodes/conv_transpose_3d.cairo @@ -0,0 +1,34 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_conv_transpose_3d() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::conv_transpose( + @input_0, + @input_1, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/conv_transpose_3d/input_0.cairo b/tests/nodes/conv_transpose_3d/input_0.cairo new file mode 100644 index 000000000..6c0e4afa1 --- /dev/null +++ b/tests/nodes/conv_transpose_3d/input_0.cairo @@ -0,0 +1,76 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(4); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1703936, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 1835008, sign: false }); + data.append(FP16x16 { mag: 1900544, sign: false }); + data.append(FP16x16 { mag: 1966080, sign: false }); + data.append(FP16x16 { mag: 2031616, sign: false }); + data.append(FP16x16 { mag: 2097152, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 2228224, sign: false }); + data.append(FP16x16 { mag: 2293760, sign: false }); + data.append(FP16x16 { mag: 2359296, sign: false }); + data.append(FP16x16 { mag: 2424832, sign: false }); + data.append(FP16x16 { mag: 2490368, sign: false }); + data.append(FP16x16 { mag: 2555904, sign: false }); + data.append(FP16x16 { mag: 2621440, sign: false }); + data.append(FP16x16 { mag: 2686976, sign: false }); + data.append(FP16x16 { mag: 2752512, sign: false }); + data.append(FP16x16 { mag: 2818048, sign: false }); + data.append(FP16x16 { mag: 2883584, sign: false }); + data.append(FP16x16 { mag: 2949120, sign: false }); + data.append(FP16x16 { mag: 3014656, sign: false }); + data.append(FP16x16 { mag: 3080192, sign: false }); + data.append(FP16x16 { mag: 3145728, sign: false }); + data.append(FP16x16 { mag: 3211264, sign: false }); + data.append(FP16x16 { mag: 3276800, sign: false }); + data.append(FP16x16 { mag: 3342336, sign: false }); + data.append(FP16x16 { mag: 3407872, sign: false }); + data.append(FP16x16 { mag: 3473408, sign: false }); + data.append(FP16x16 { mag: 3538944, sign: false }); + data.append(FP16x16 { mag: 3604480, sign: false }); + data.append(FP16x16 { mag: 3670016, sign: false }); + data.append(FP16x16 { mag: 3735552, sign: false }); + data.append(FP16x16 { mag: 3801088, sign: false }); + data.append(FP16x16 { mag: 3866624, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_3d/input_1.cairo b/tests/nodes/conv_transpose_3d/input_1.cairo new file mode 100644 index 000000000..763528767 --- /dev/null +++ b/tests/nodes/conv_transpose_3d/input_1.cairo @@ -0,0 +1,70 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(3); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_3d/output_0.cairo b/tests/nodes/conv_transpose_3d/output_0.cairo new file mode 100644 index 000000000..d2269f01e --- /dev/null +++ b/tests/nodes/conv_transpose_3d/output_0.cairo @@ -0,0 +1,436 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(5); + shape.append(6); + shape.append(7); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 3538944, sign: false }); + data.append(FP16x16 { mag: 4128768, sign: false }); + data.append(FP16x16 { mag: 4718592, sign: false }); + data.append(FP16x16 { mag: 3342336, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 1966080, sign: false }); + data.append(FP16x16 { mag: 4128768, sign: false }); + data.append(FP16x16 { mag: 6488064, sign: false }); + data.append(FP16x16 { mag: 7077888, sign: false }); + data.append(FP16x16 { mag: 7667712, sign: false }); + data.append(FP16x16 { mag: 5308416, sign: false }); + data.append(FP16x16 { mag: 2752512, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 3407872, sign: false }); + data.append(FP16x16 { mag: 5308416, sign: false }); + data.append(FP16x16 { mag: 5701632, sign: false }); + data.append(FP16x16 { mag: 6094848, sign: false }); + data.append(FP16x16 { mag: 4194304, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 2031616, sign: false }); + data.append(FP16x16 { mag: 3145728, sign: false }); + data.append(FP16x16 { mag: 3342336, sign: false }); + data.append(FP16x16 { mag: 3538944, sign: false }); + data.append(FP16x16 { mag: 2424832, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 2752512, sign: false }); + data.append(FP16x16 { mag: 4325376, sign: false }); + data.append(FP16x16 { mag: 4718592, sign: false }); + data.append(FP16x16 { mag: 5111808, sign: false }); + data.append(FP16x16 { mag: 3538944, sign: false }); + data.append(FP16x16 { mag: 1835008, sign: false }); + data.append(FP16x16 { mag: 3276800, sign: false }); + data.append(FP16x16 { mag: 6815744, sign: false }); + data.append(FP16x16 { mag: 10616832, sign: false }); + data.append(FP16x16 { mag: 11403264, sign: false }); + data.append(FP16x16 { mag: 12189696, sign: false }); + data.append(FP16x16 { mag: 8388608, sign: false }); + data.append(FP16x16 { mag: 4325376, sign: false }); + data.append(FP16x16 { mag: 5898240, sign: false }); + data.append(FP16x16 { mag: 12189696, sign: false }); + data.append(FP16x16 { mag: 18874368, sign: false }); + data.append(FP16x16 { mag: 20054016, sign: false }); + data.append(FP16x16 { mag: 21233664, sign: false }); + data.append(FP16x16 { mag: 14548992, sign: false }); + data.append(FP16x16 { mag: 7471104, sign: false }); + data.append(FP16x16 { mag: 7864320, sign: false }); + data.append(FP16x16 { mag: 16121856, sign: false }); + data.append(FP16x16 { mag: 24772608, sign: false }); + data.append(FP16x16 { mag: 25952256, sign: false }); + data.append(FP16x16 { mag: 27131904, sign: false }); + data.append(FP16x16 { mag: 18481152, sign: false }); + data.append(FP16x16 { mag: 9437184, sign: false }); + data.append(FP16x16 { mag: 5898240, sign: false }); + data.append(FP16x16 { mag: 12058624, sign: false }); + data.append(FP16x16 { mag: 18481152, sign: false }); + data.append(FP16x16 { mag: 19267584, sign: false }); + data.append(FP16x16 { mag: 20054016, sign: false }); + data.append(FP16x16 { mag: 13631488, sign: false }); + data.append(FP16x16 { mag: 6946816, sign: false }); + data.append(FP16x16 { mag: 3276800, sign: false }); + data.append(FP16x16 { mag: 6684672, sign: false }); + data.append(FP16x16 { mag: 10223616, sign: false }); + data.append(FP16x16 { mag: 10616832, sign: false }); + data.append(FP16x16 { mag: 11010048, sign: false }); + data.append(FP16x16 { mag: 7471104, sign: false }); + data.append(FP16x16 { mag: 3801088, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 8060928, sign: false }); + data.append(FP16x16 { mag: 12386304, sign: false }); + data.append(FP16x16 { mag: 12976128, sign: false }); + data.append(FP16x16 { mag: 13565952, sign: false }); + data.append(FP16x16 { mag: 9240576, sign: false }); + data.append(FP16x16 { mag: 4718592, sign: false }); + data.append(FP16x16 { mag: 8847360, sign: false }); + data.append(FP16x16 { mag: 18087936, sign: false }); + data.append(FP16x16 { mag: 27721728, sign: false }); + data.append(FP16x16 { mag: 28901376, sign: false }); + data.append(FP16x16 { mag: 30081024, sign: false }); + data.append(FP16x16 { mag: 20447232, sign: false }); + data.append(FP16x16 { mag: 10420224, sign: false }); + data.append(FP16x16 { mag: 14745600, sign: false }); + data.append(FP16x16 { mag: 30081024, sign: false }); + data.append(FP16x16 { mag: 46006272, sign: false }); + data.append(FP16x16 { mag: 47775744, sign: false }); + data.append(FP16x16 { mag: 49545216, sign: false }); + data.append(FP16x16 { mag: 33619968, sign: false }); + data.append(FP16x16 { mag: 17104896, sign: false }); + data.append(FP16x16 { mag: 17694720, sign: false }); + data.append(FP16x16 { mag: 35979264, sign: false }); + data.append(FP16x16 { mag: 54853632, sign: false }); + data.append(FP16x16 { mag: 56623104, sign: false }); + data.append(FP16x16 { mag: 58392576, sign: false }); + data.append(FP16x16 { mag: 39518208, sign: false }); + data.append(FP16x16 { mag: 20054016, sign: false }); + data.append(FP16x16 { mag: 12779520, sign: false }); + data.append(FP16x16 { mag: 25952256, sign: false }); + data.append(FP16x16 { mag: 39518208, sign: false }); + data.append(FP16x16 { mag: 40697856, sign: false }); + data.append(FP16x16 { mag: 41877504, sign: false }); + data.append(FP16x16 { mag: 28311552, sign: false }); + data.append(FP16x16 { mag: 14352384, sign: false }); + data.append(FP16x16 { mag: 6881280, sign: false }); + data.append(FP16x16 { mag: 13959168, sign: false }); + data.append(FP16x16 { mag: 21233664, sign: false }); + data.append(FP16x16 { mag: 21823488, sign: false }); + data.append(FP16x16 { mag: 22413312, sign: false }); + data.append(FP16x16 { mag: 15138816, sign: false }); + data.append(FP16x16 { mag: 7667712, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 7995392, sign: false }); + data.append(FP16x16 { mag: 12189696, sign: false }); + data.append(FP16x16 { mag: 12582912, sign: false }); + data.append(FP16x16 { mag: 12976128, sign: false }); + data.append(FP16x16 { mag: 8781824, sign: false }); + data.append(FP16x16 { mag: 4456448, sign: false }); + data.append(FP16x16 { mag: 8519680, sign: false }); + data.append(FP16x16 { mag: 17301504, sign: false }); + data.append(FP16x16 { mag: 26345472, sign: false }); + data.append(FP16x16 { mag: 27131904, sign: false }); + data.append(FP16x16 { mag: 27918336, sign: false }); + data.append(FP16x16 { mag: 18874368, sign: false }); + data.append(FP16x16 { mag: 9568256, sign: false }); + data.append(FP16x16 { mag: 13762560, sign: false }); + data.append(FP16x16 { mag: 27918336, sign: false }); + data.append(FP16x16 { mag: 42467328, sign: false }); + data.append(FP16x16 { mag: 43646976, sign: false }); + data.append(FP16x16 { mag: 44826624, sign: false }); + data.append(FP16x16 { mag: 30277632, sign: false }); + data.append(FP16x16 { mag: 15335424, sign: false }); + data.append(FP16x16 { mag: 15728640, sign: false }); + data.append(FP16x16 { mag: 31850496, sign: false }); + data.append(FP16x16 { mag: 48365568, sign: false }); + data.append(FP16x16 { mag: 49545216, sign: false }); + data.append(FP16x16 { mag: 50724864, sign: false }); + data.append(FP16x16 { mag: 34209792, sign: false }); + data.append(FP16x16 { mag: 17301504, sign: false }); + data.append(FP16x16 { mag: 11141120, sign: false }); + data.append(FP16x16 { mag: 22544384, sign: false }); + data.append(FP16x16 { mag: 34209792, sign: false }); + data.append(FP16x16 { mag: 34996224, sign: false }); + data.append(FP16x16 { mag: 35782656, sign: false }); + data.append(FP16x16 { mag: 24117248, sign: false }); + data.append(FP16x16 { mag: 12189696, sign: false }); + data.append(FP16x16 { mag: 5898240, sign: false }); + data.append(FP16x16 { mag: 11927552, sign: false }); + data.append(FP16x16 { mag: 18087936, sign: false }); + data.append(FP16x16 { mag: 18481152, sign: false }); + data.append(FP16x16 { mag: 18874368, sign: false }); + data.append(FP16x16 { mag: 12713984, sign: false }); + data.append(FP16x16 { mag: 6422528, sign: false }); + data.append(FP16x16 { mag: 2621440, sign: false }); + data.append(FP16x16 { mag: 5308416, sign: false }); + data.append(FP16x16 { mag: 8060928, sign: false }); + data.append(FP16x16 { mag: 8257536, sign: false }); + data.append(FP16x16 { mag: 8454144, sign: false }); + data.append(FP16x16 { mag: 5701632, sign: false }); + data.append(FP16x16 { mag: 2883584, sign: false }); + data.append(FP16x16 { mag: 5570560, sign: false }); + data.append(FP16x16 { mag: 11272192, sign: false }); + data.append(FP16x16 { mag: 17104896, sign: false }); + data.append(FP16x16 { mag: 17498112, sign: false }); + data.append(FP16x16 { mag: 17891328, sign: false }); + data.append(FP16x16 { mag: 12058624, sign: false }); + data.append(FP16x16 { mag: 6094848, sign: false }); + data.append(FP16x16 { mag: 8847360, sign: false }); + data.append(FP16x16 { mag: 17891328, sign: false }); + data.append(FP16x16 { mag: 27131904, sign: false }); + data.append(FP16x16 { mag: 27721728, sign: false }); + data.append(FP16x16 { mag: 28311552, sign: false }); + data.append(FP16x16 { mag: 19070976, sign: false }); + data.append(FP16x16 { mag: 9633792, sign: false }); + data.append(FP16x16 { mag: 9830400, sign: false }); + data.append(FP16x16 { mag: 19857408, sign: false }); + data.append(FP16x16 { mag: 30081024, sign: false }); + data.append(FP16x16 { mag: 30670848, sign: false }); + data.append(FP16x16 { mag: 31260672, sign: false }); + data.append(FP16x16 { mag: 21037056, sign: false }); + data.append(FP16x16 { mag: 10616832, sign: false }); + data.append(FP16x16 { mag: 6881280, sign: false }); + data.append(FP16x16 { mag: 13893632, sign: false }); + data.append(FP16x16 { mag: 21037056, sign: false }); + data.append(FP16x16 { mag: 21430272, sign: false }); + data.append(FP16x16 { mag: 21823488, sign: false }); + data.append(FP16x16 { mag: 14680064, sign: false }); + data.append(FP16x16 { mag: 7405568, sign: false }); + data.append(FP16x16 { mag: 3604480, sign: false }); + data.append(FP16x16 { mag: 7274496, sign: false }); + data.append(FP16x16 { mag: 11010048, sign: false }); + data.append(FP16x16 { mag: 11206656, sign: false }); + data.append(FP16x16 { mag: 11403264, sign: false }); + data.append(FP16x16 { mag: 7667712, sign: false }); + data.append(FP16x16 { mag: 3866624, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 3538944, sign: false }); + data.append(FP16x16 { mag: 4128768, sign: false }); + data.append(FP16x16 { mag: 4718592, sign: false }); + data.append(FP16x16 { mag: 3342336, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 1966080, sign: false }); + data.append(FP16x16 { mag: 4128768, sign: false }); + data.append(FP16x16 { mag: 6488064, sign: false }); + data.append(FP16x16 { mag: 7077888, sign: false }); + data.append(FP16x16 { mag: 7667712, sign: false }); + data.append(FP16x16 { mag: 5308416, sign: false }); + data.append(FP16x16 { mag: 2752512, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 3407872, sign: false }); + data.append(FP16x16 { mag: 5308416, sign: false }); + data.append(FP16x16 { mag: 5701632, sign: false }); + data.append(FP16x16 { mag: 6094848, sign: false }); + data.append(FP16x16 { mag: 4194304, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 2031616, sign: false }); + data.append(FP16x16 { mag: 3145728, sign: false }); + data.append(FP16x16 { mag: 3342336, sign: false }); + data.append(FP16x16 { mag: 3538944, sign: false }); + data.append(FP16x16 { mag: 2424832, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 2752512, sign: false }); + data.append(FP16x16 { mag: 4325376, sign: false }); + data.append(FP16x16 { mag: 4718592, sign: false }); + data.append(FP16x16 { mag: 5111808, sign: false }); + data.append(FP16x16 { mag: 3538944, sign: false }); + data.append(FP16x16 { mag: 1835008, sign: false }); + data.append(FP16x16 { mag: 3276800, sign: false }); + data.append(FP16x16 { mag: 6815744, sign: false }); + data.append(FP16x16 { mag: 10616832, sign: false }); + data.append(FP16x16 { mag: 11403264, sign: false }); + data.append(FP16x16 { mag: 12189696, sign: false }); + data.append(FP16x16 { mag: 8388608, sign: false }); + data.append(FP16x16 { mag: 4325376, sign: false }); + data.append(FP16x16 { mag: 5898240, sign: false }); + data.append(FP16x16 { mag: 12189696, sign: false }); + data.append(FP16x16 { mag: 18874368, sign: false }); + data.append(FP16x16 { mag: 20054016, sign: false }); + data.append(FP16x16 { mag: 21233664, sign: false }); + data.append(FP16x16 { mag: 14548992, sign: false }); + data.append(FP16x16 { mag: 7471104, sign: false }); + data.append(FP16x16 { mag: 7864320, sign: false }); + data.append(FP16x16 { mag: 16121856, sign: false }); + data.append(FP16x16 { mag: 24772608, sign: false }); + data.append(FP16x16 { mag: 25952256, sign: false }); + data.append(FP16x16 { mag: 27131904, sign: false }); + data.append(FP16x16 { mag: 18481152, sign: false }); + data.append(FP16x16 { mag: 9437184, sign: false }); + data.append(FP16x16 { mag: 5898240, sign: false }); + data.append(FP16x16 { mag: 12058624, sign: false }); + data.append(FP16x16 { mag: 18481152, sign: false }); + data.append(FP16x16 { mag: 19267584, sign: false }); + data.append(FP16x16 { mag: 20054016, sign: false }); + data.append(FP16x16 { mag: 13631488, sign: false }); + data.append(FP16x16 { mag: 6946816, sign: false }); + data.append(FP16x16 { mag: 3276800, sign: false }); + data.append(FP16x16 { mag: 6684672, sign: false }); + data.append(FP16x16 { mag: 10223616, sign: false }); + data.append(FP16x16 { mag: 10616832, sign: false }); + data.append(FP16x16 { mag: 11010048, sign: false }); + data.append(FP16x16 { mag: 7471104, sign: false }); + data.append(FP16x16 { mag: 3801088, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 8060928, sign: false }); + data.append(FP16x16 { mag: 12386304, sign: false }); + data.append(FP16x16 { mag: 12976128, sign: false }); + data.append(FP16x16 { mag: 13565952, sign: false }); + data.append(FP16x16 { mag: 9240576, sign: false }); + data.append(FP16x16 { mag: 4718592, sign: false }); + data.append(FP16x16 { mag: 8847360, sign: false }); + data.append(FP16x16 { mag: 18087936, sign: false }); + data.append(FP16x16 { mag: 27721728, sign: false }); + data.append(FP16x16 { mag: 28901376, sign: false }); + data.append(FP16x16 { mag: 30081024, sign: false }); + data.append(FP16x16 { mag: 20447232, sign: false }); + data.append(FP16x16 { mag: 10420224, sign: false }); + data.append(FP16x16 { mag: 14745600, sign: false }); + data.append(FP16x16 { mag: 30081024, sign: false }); + data.append(FP16x16 { mag: 46006272, sign: false }); + data.append(FP16x16 { mag: 47775744, sign: false }); + data.append(FP16x16 { mag: 49545216, sign: false }); + data.append(FP16x16 { mag: 33619968, sign: false }); + data.append(FP16x16 { mag: 17104896, sign: false }); + data.append(FP16x16 { mag: 17694720, sign: false }); + data.append(FP16x16 { mag: 35979264, sign: false }); + data.append(FP16x16 { mag: 54853632, sign: false }); + data.append(FP16x16 { mag: 56623104, sign: false }); + data.append(FP16x16 { mag: 58392576, sign: false }); + data.append(FP16x16 { mag: 39518208, sign: false }); + data.append(FP16x16 { mag: 20054016, sign: false }); + data.append(FP16x16 { mag: 12779520, sign: false }); + data.append(FP16x16 { mag: 25952256, sign: false }); + data.append(FP16x16 { mag: 39518208, sign: false }); + data.append(FP16x16 { mag: 40697856, sign: false }); + data.append(FP16x16 { mag: 41877504, sign: false }); + data.append(FP16x16 { mag: 28311552, sign: false }); + data.append(FP16x16 { mag: 14352384, sign: false }); + data.append(FP16x16 { mag: 6881280, sign: false }); + data.append(FP16x16 { mag: 13959168, sign: false }); + data.append(FP16x16 { mag: 21233664, sign: false }); + data.append(FP16x16 { mag: 21823488, sign: false }); + data.append(FP16x16 { mag: 22413312, sign: false }); + data.append(FP16x16 { mag: 15138816, sign: false }); + data.append(FP16x16 { mag: 7667712, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 7995392, sign: false }); + data.append(FP16x16 { mag: 12189696, sign: false }); + data.append(FP16x16 { mag: 12582912, sign: false }); + data.append(FP16x16 { mag: 12976128, sign: false }); + data.append(FP16x16 { mag: 8781824, sign: false }); + data.append(FP16x16 { mag: 4456448, sign: false }); + data.append(FP16x16 { mag: 8519680, sign: false }); + data.append(FP16x16 { mag: 17301504, sign: false }); + data.append(FP16x16 { mag: 26345472, sign: false }); + data.append(FP16x16 { mag: 27131904, sign: false }); + data.append(FP16x16 { mag: 27918336, sign: false }); + data.append(FP16x16 { mag: 18874368, sign: false }); + data.append(FP16x16 { mag: 9568256, sign: false }); + data.append(FP16x16 { mag: 13762560, sign: false }); + data.append(FP16x16 { mag: 27918336, sign: false }); + data.append(FP16x16 { mag: 42467328, sign: false }); + data.append(FP16x16 { mag: 43646976, sign: false }); + data.append(FP16x16 { mag: 44826624, sign: false }); + data.append(FP16x16 { mag: 30277632, sign: false }); + data.append(FP16x16 { mag: 15335424, sign: false }); + data.append(FP16x16 { mag: 15728640, sign: false }); + data.append(FP16x16 { mag: 31850496, sign: false }); + data.append(FP16x16 { mag: 48365568, sign: false }); + data.append(FP16x16 { mag: 49545216, sign: false }); + data.append(FP16x16 { mag: 50724864, sign: false }); + data.append(FP16x16 { mag: 34209792, sign: false }); + data.append(FP16x16 { mag: 17301504, sign: false }); + data.append(FP16x16 { mag: 11141120, sign: false }); + data.append(FP16x16 { mag: 22544384, sign: false }); + data.append(FP16x16 { mag: 34209792, sign: false }); + data.append(FP16x16 { mag: 34996224, sign: false }); + data.append(FP16x16 { mag: 35782656, sign: false }); + data.append(FP16x16 { mag: 24117248, sign: false }); + data.append(FP16x16 { mag: 12189696, sign: false }); + data.append(FP16x16 { mag: 5898240, sign: false }); + data.append(FP16x16 { mag: 11927552, sign: false }); + data.append(FP16x16 { mag: 18087936, sign: false }); + data.append(FP16x16 { mag: 18481152, sign: false }); + data.append(FP16x16 { mag: 18874368, sign: false }); + data.append(FP16x16 { mag: 12713984, sign: false }); + data.append(FP16x16 { mag: 6422528, sign: false }); + data.append(FP16x16 { mag: 2621440, sign: false }); + data.append(FP16x16 { mag: 5308416, sign: false }); + data.append(FP16x16 { mag: 8060928, sign: false }); + data.append(FP16x16 { mag: 8257536, sign: false }); + data.append(FP16x16 { mag: 8454144, sign: false }); + data.append(FP16x16 { mag: 5701632, sign: false }); + data.append(FP16x16 { mag: 2883584, sign: false }); + data.append(FP16x16 { mag: 5570560, sign: false }); + data.append(FP16x16 { mag: 11272192, sign: false }); + data.append(FP16x16 { mag: 17104896, sign: false }); + data.append(FP16x16 { mag: 17498112, sign: false }); + data.append(FP16x16 { mag: 17891328, sign: false }); + data.append(FP16x16 { mag: 12058624, sign: false }); + data.append(FP16x16 { mag: 6094848, sign: false }); + data.append(FP16x16 { mag: 8847360, sign: false }); + data.append(FP16x16 { mag: 17891328, sign: false }); + data.append(FP16x16 { mag: 27131904, sign: false }); + data.append(FP16x16 { mag: 27721728, sign: false }); + data.append(FP16x16 { mag: 28311552, sign: false }); + data.append(FP16x16 { mag: 19070976, sign: false }); + data.append(FP16x16 { mag: 9633792, sign: false }); + data.append(FP16x16 { mag: 9830400, sign: false }); + data.append(FP16x16 { mag: 19857408, sign: false }); + data.append(FP16x16 { mag: 30081024, sign: false }); + data.append(FP16x16 { mag: 30670848, sign: false }); + data.append(FP16x16 { mag: 31260672, sign: false }); + data.append(FP16x16 { mag: 21037056, sign: false }); + data.append(FP16x16 { mag: 10616832, sign: false }); + data.append(FP16x16 { mag: 6881280, sign: false }); + data.append(FP16x16 { mag: 13893632, sign: false }); + data.append(FP16x16 { mag: 21037056, sign: false }); + data.append(FP16x16 { mag: 21430272, sign: false }); + data.append(FP16x16 { mag: 21823488, sign: false }); + data.append(FP16x16 { mag: 14680064, sign: false }); + data.append(FP16x16 { mag: 7405568, sign: false }); + data.append(FP16x16 { mag: 3604480, sign: false }); + data.append(FP16x16 { mag: 7274496, sign: false }); + data.append(FP16x16 { mag: 11010048, sign: false }); + data.append(FP16x16 { mag: 11206656, sign: false }); + data.append(FP16x16 { mag: 11403264, sign: false }); + data.append(FP16x16 { mag: 7667712, sign: false }); + data.append(FP16x16 { mag: 3866624, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_attributes.cairo b/tests/nodes/conv_transpose_attributes.cairo new file mode 100644 index 000000000..47dfc368d --- /dev/null +++ b/tests/nodes/conv_transpose_attributes.cairo @@ -0,0 +1,34 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_conv_transpose_attributes() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::conv_transpose( + @input_0, + @input_1, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/conv_transpose_attributes/input_0.cairo b/tests/nodes/conv_transpose_attributes/input_0.cairo new file mode 100644 index 000000000..5208e1993 --- /dev/null +++ b/tests/nodes/conv_transpose_attributes/input_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_attributes/input_1.cairo b/tests/nodes/conv_transpose_attributes/input_1.cairo new file mode 100644 index 000000000..7e2c62d2d --- /dev/null +++ b/tests/nodes/conv_transpose_attributes/input_1.cairo @@ -0,0 +1,33 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_attributes/output_0.cairo b/tests/nodes/conv_transpose_attributes/output_0.cairo new file mode 100644 index 000000000..8c33a5189 --- /dev/null +++ b/tests/nodes/conv_transpose_attributes/output_0.cairo @@ -0,0 +1,65 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(5); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 2359296, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 2359296, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_autopad_same.cairo b/tests/nodes/conv_transpose_autopad_same.cairo new file mode 100644 index 000000000..6bed5de4e --- /dev/null +++ b/tests/nodes/conv_transpose_autopad_same.cairo @@ -0,0 +1,36 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::FP16x16NN; + +use orion::operators::nn::functional::conv_transpose::AUTO_PAD; + +#[test] +#[available_gas(2000000000)] +fn test_conv_transpose_autopad_same() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::conv_transpose( + @input_0, + @input_1, + Option::None, + Option::Some(AUTO_PAD::SAME_UPPER), + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::Some(array![2, 2].span()) + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/conv_transpose_autopad_same/input_0.cairo b/tests/nodes/conv_transpose_autopad_same/input_0.cairo new file mode 100644 index 000000000..5208e1993 --- /dev/null +++ b/tests/nodes/conv_transpose_autopad_same/input_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_autopad_same/input_1.cairo b/tests/nodes/conv_transpose_autopad_same/input_1.cairo new file mode 100644 index 000000000..7e2c62d2d --- /dev/null +++ b/tests/nodes/conv_transpose_autopad_same/input_1.cairo @@ -0,0 +1,33 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_autopad_same/output_0.cairo b/tests/nodes/conv_transpose_autopad_same/output_0.cairo new file mode 100644 index 000000000..6f0baa471 --- /dev/null +++ b/tests/nodes/conv_transpose_autopad_same/output_0.cairo @@ -0,0 +1,87 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(6); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_dilations.cairo b/tests/nodes/conv_transpose_dilations.cairo new file mode 100644 index 000000000..da3054a69 --- /dev/null +++ b/tests/nodes/conv_transpose_dilations.cairo @@ -0,0 +1,34 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_conv_transpose_dilations() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::conv_transpose( + @input_0, + @input_1, + Option::None, + Option::None, + Option::Some(array![2, 2].span()), + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/conv_transpose_dilations/input_0.cairo b/tests/nodes/conv_transpose_dilations/input_0.cairo new file mode 100644 index 000000000..69a038182 --- /dev/null +++ b/tests/nodes/conv_transpose_dilations/input_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_dilations/input_1.cairo b/tests/nodes/conv_transpose_dilations/input_1.cairo new file mode 100644 index 000000000..610fca73b --- /dev/null +++ b/tests/nodes/conv_transpose_dilations/input_1.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_dilations/output_0.cairo b/tests/nodes/conv_transpose_dilations/output_0.cairo new file mode 100644 index 000000000..ca1189011 --- /dev/null +++ b/tests/nodes/conv_transpose_dilations/output_0.cairo @@ -0,0 +1,40 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(5); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 3670016, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 4128768, sign: false }); + data.append(FP16x16 { mag: 2293760, sign: false }); + data.append(FP16x16 { mag: 4390912, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 4980736, sign: false }); + data.append(FP16x16 { mag: 4980736, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 5767168, sign: false }); + data.append(FP16x16 { mag: 2949120, sign: false }); + data.append(FP16x16 { mag: 4128768, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 3538944, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_pads.cairo b/tests/nodes/conv_transpose_pads.cairo new file mode 100644 index 000000000..ccbd8b5d5 --- /dev/null +++ b/tests/nodes/conv_transpose_pads.cairo @@ -0,0 +1,34 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_conv_transpose_pads() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::conv_transpose( + @input_0, + @input_1, + Option::None, + Option::None, + Option::None, + Option::None, + Option::Some(array![3, 3].span()), + Option::Some(array![1, 1].span()), + Option::Some(array![10, 8].span()), + Option::None, + Option::Some(array![3, 2].span()) + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/conv_transpose_pads/input_0.cairo b/tests/nodes/conv_transpose_pads/input_0.cairo new file mode 100644 index 000000000..5208e1993 --- /dev/null +++ b/tests/nodes/conv_transpose_pads/input_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_pads/input_1.cairo b/tests/nodes/conv_transpose_pads/input_1.cairo new file mode 100644 index 000000000..7e2c62d2d --- /dev/null +++ b/tests/nodes/conv_transpose_pads/input_1.cairo @@ -0,0 +1,33 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_pads/output_0.cairo b/tests/nodes/conv_transpose_pads/output_0.cairo new file mode 100644 index 000000000..391d3adf4 --- /dev/null +++ b/tests/nodes/conv_transpose_pads/output_0.cairo @@ -0,0 +1,175 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(10); + shape.append(8); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/gather_fp16x16_3d_axis1.cairo b/tests/nodes/gather_fp16x16_3d_axis1.cairo index 8c4af9664..429d085d4 100644 --- a/tests/nodes/gather_fp16x16_3d_axis1.cairo +++ b/tests/nodes/gather_fp16x16_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_gather_fp16x16_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(1)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp16x16_3d_axis2.cairo b/tests/nodes/gather_fp16x16_3d_axis2.cairo index 0b4f77ed8..cfb8a61d2 100644 --- a/tests/nodes/gather_fp16x16_3d_axis2.cairo +++ b/tests/nodes/gather_fp16x16_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_gather_fp16x16_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(2)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp16x16_3d_default.cairo b/tests/nodes/gather_fp16x16_3d_default.cairo index 91c9ebdd4..ee49aac75 100644 --- a/tests/nodes/gather_fp16x16_3d_default.cairo +++ b/tests/nodes/gather_fp16x16_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_fp16x16_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(0)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp8x23_3d_axis1.cairo b/tests/nodes/gather_fp8x23_3d_axis1.cairo index 6a5d1a046..c9c6dcf7f 100644 --- a/tests/nodes/gather_fp8x23_3d_axis1.cairo +++ b/tests/nodes/gather_fp8x23_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_gather_fp8x23_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(1)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp8x23_3d_axis2.cairo b/tests/nodes/gather_fp8x23_3d_axis2.cairo index d5a913163..726411dd2 100644 --- a/tests/nodes/gather_fp8x23_3d_axis2.cairo +++ b/tests/nodes/gather_fp8x23_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_gather_fp8x23_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(2)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp8x23_3d_default.cairo b/tests/nodes/gather_fp8x23_3d_default.cairo index 7f9492f8d..e844827f9 100644 --- a/tests/nodes/gather_fp8x23_3d_default.cairo +++ b/tests/nodes/gather_fp8x23_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_fp8x23_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(0)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i32_3d_axis1.cairo b/tests/nodes/gather_i32_3d_axis1.cairo index 8b1777d8f..6dbb78c47 100644 --- a/tests/nodes/gather_i32_3d_axis1.cairo +++ b/tests/nodes/gather_i32_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_gather_i32_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(1)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i32_3d_axis2.cairo b/tests/nodes/gather_i32_3d_axis2.cairo index bdc557d7a..29bd217b3 100644 --- a/tests/nodes/gather_i32_3d_axis2.cairo +++ b/tests/nodes/gather_i32_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_gather_i32_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(2)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i32_3d_default.cairo b/tests/nodes/gather_i32_3d_default.cairo index 9288c3dab..4c0b9c9bd 100644 --- a/tests/nodes/gather_i32_3d_default.cairo +++ b/tests/nodes/gather_i32_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_i32_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(0)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i8_3d_axis1.cairo b/tests/nodes/gather_i8_3d_axis1.cairo index 10dd5ce6f..140608123 100644 --- a/tests/nodes/gather_i8_3d_axis1.cairo +++ b/tests/nodes/gather_i8_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_gather_i8_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(1)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i8_3d_axis2.cairo b/tests/nodes/gather_i8_3d_axis2.cairo index 35f50077a..992cee33e 100644 --- a/tests/nodes/gather_i8_3d_axis2.cairo +++ b/tests/nodes/gather_i8_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_gather_i8_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(2)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i8_3d_default.cairo b/tests/nodes/gather_i8_3d_default.cairo index 5bc437a7b..0f8e6dec2 100644 --- a/tests/nodes/gather_i8_3d_default.cairo +++ b/tests/nodes/gather_i8_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_i8_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(0)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp16x16_3d_batch_dims1.cairo b/tests/nodes/gather_nd_fp16x16_3d_batch_dims1.cairo index 86de6e9b9..037d2ad93 100644 --- a/tests/nodes/gather_nd_fp16x16_3d_batch_dims1.cairo +++ b/tests/nodes/gather_nd_fp16x16_3d_batch_dims1.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp16x16_3d_batch_dims1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp16x16_3d_batch_dims2.cairo b/tests/nodes/gather_nd_fp16x16_3d_batch_dims2.cairo index d2ac3b2ce..3661bb6c5 100644 --- a/tests/nodes/gather_nd_fp16x16_3d_batch_dims2.cairo +++ b/tests/nodes/gather_nd_fp16x16_3d_batch_dims2.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp16x16_3d_batch_dims2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp16x16_3d_default.cairo b/tests/nodes/gather_nd_fp16x16_3d_default.cairo index 157266adb..60f116c86 100644 --- a/tests/nodes/gather_nd_fp16x16_3d_default.cairo +++ b/tests/nodes/gather_nd_fp16x16_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp16x16_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp8x23_3d_batch_dims1.cairo b/tests/nodes/gather_nd_fp8x23_3d_batch_dims1.cairo index 6da924b6c..c523e0135 100644 --- a/tests/nodes/gather_nd_fp8x23_3d_batch_dims1.cairo +++ b/tests/nodes/gather_nd_fp8x23_3d_batch_dims1.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp8x23_3d_batch_dims1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp8x23_3d_batch_dims2.cairo b/tests/nodes/gather_nd_fp8x23_3d_batch_dims2.cairo index 251d442ba..edb022910 100644 --- a/tests/nodes/gather_nd_fp8x23_3d_batch_dims2.cairo +++ b/tests/nodes/gather_nd_fp8x23_3d_batch_dims2.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp8x23_3d_batch_dims2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp8x23_3d_default.cairo b/tests/nodes/gather_nd_fp8x23_3d_default.cairo index 8ce119604..70b25cea1 100644 --- a/tests/nodes/gather_nd_fp8x23_3d_default.cairo +++ b/tests/nodes/gather_nd_fp8x23_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp8x23_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_i32_3d_batch_dims1.cairo b/tests/nodes/gather_nd_i32_3d_batch_dims1.cairo index 1d275fb4a..923c7f9ba 100644 --- a/tests/nodes/gather_nd_i32_3d_batch_dims1.cairo +++ b/tests/nodes/gather_nd_i32_3d_batch_dims1.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_i32_3d_batch_dims1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_i32_3d_batch_dims2.cairo b/tests/nodes/gather_nd_i32_3d_batch_dims2.cairo index 6bfa5cf4a..44ed06b2c 100644 --- a/tests/nodes/gather_nd_i32_3d_batch_dims2.cairo +++ b/tests/nodes/gather_nd_i32_3d_batch_dims2.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_i32_3d_batch_dims2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_i32_3d_default.cairo b/tests/nodes/gather_nd_i32_3d_default.cairo index 4fa1c55f1..5268e13f4 100644 --- a/tests/nodes/gather_nd_i32_3d_default.cairo +++ b/tests/nodes/gather_nd_i32_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_i32_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_i8_3d_batch_dims1.cairo b/tests/nodes/gather_nd_i8_3d_batch_dims1.cairo index b42d1a430..1d47f72ff 100644 --- a/tests/nodes/gather_nd_i8_3d_batch_dims1.cairo +++ b/tests/nodes/gather_nd_i8_3d_batch_dims1.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_i8_3d_batch_dims1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_i8_3d_default.cairo b/tests/nodes/gather_nd_i8_3d_default.cairo index 6ee8e0a9e..f9152f412 100644 --- a/tests/nodes/gather_nd_i8_3d_default.cairo +++ b/tests/nodes/gather_nd_i8_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_i8_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_u32_batch_dims1.cairo b/tests/nodes/gather_nd_u32_batch_dims1.cairo index d1bfb099c..7689359ee 100644 --- a/tests/nodes/gather_nd_u32_batch_dims1.cairo +++ b/tests/nodes/gather_nd_u32_batch_dims1.cairo @@ -16,7 +16,7 @@ fn test_gather_nd_u32_batch_dims1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_u32_batch_dims2.cairo b/tests/nodes/gather_nd_u32_batch_dims2.cairo index 2cd029255..4659cfaa7 100644 --- a/tests/nodes/gather_nd_u32_batch_dims2.cairo +++ b/tests/nodes/gather_nd_u32_batch_dims2.cairo @@ -16,7 +16,7 @@ fn test_gather_nd_u32_batch_dims2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_u32_default.cairo b/tests/nodes/gather_nd_u32_default.cairo index 5893b5017..e226d0eb0 100644 --- a/tests/nodes/gather_nd_u32_default.cairo +++ b/tests/nodes/gather_nd_u32_default.cairo @@ -16,7 +16,7 @@ fn test_gather_nd_u32_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_u32_3d_axis1.cairo b/tests/nodes/gather_u32_3d_axis1.cairo index 641d67f80..1a7a56d37 100644 --- a/tests/nodes/gather_u32_3d_axis1.cairo +++ b/tests/nodes/gather_u32_3d_axis1.cairo @@ -16,7 +16,7 @@ fn test_gather_u32_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(1)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_u32_3d_axis2.cairo b/tests/nodes/gather_u32_3d_axis2.cairo index 94f91a138..30d5f6a61 100644 --- a/tests/nodes/gather_u32_3d_axis2.cairo +++ b/tests/nodes/gather_u32_3d_axis2.cairo @@ -16,7 +16,7 @@ fn test_gather_u32_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(2)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_u32_3d_default.cairo b/tests/nodes/gather_u32_3d_default.cairo index 7931d3e27..8f223c4af 100644 --- a/tests/nodes/gather_u32_3d_default.cairo +++ b/tests/nodes/gather_u32_3d_default.cairo @@ -16,7 +16,7 @@ fn test_gather_u32_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(0)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gemm_all_attributes.cairo b/tests/nodes/gemm_all_attributes.cairo index c543ddb3b..2cbd9cab3 100644 --- a/tests/nodes/gemm_all_attributes.cairo +++ b/tests/nodes/gemm_all_attributes.cairo @@ -18,7 +18,15 @@ fn test_gemm_all_attributes() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::Some(FixedTrait::new(16384, false)), Option::Some(FixedTrait::new(22938, false)), true, true); + let y = NNTrait::gemm( + input_0, + input_1, + Option::Some(input_2), + Option::Some(FixedTrait::new(16384, false)), + Option::Some(FixedTrait::new(22938, false)), + true, + true + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_alpha.cairo b/tests/nodes/gemm_alpha.cairo index 074392584..dad8187f4 100644 --- a/tests/nodes/gemm_alpha.cairo +++ b/tests/nodes/gemm_alpha.cairo @@ -16,7 +16,15 @@ fn test_gemm_alpha() { let input_1 = input_1::input_1(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::None(()), Option::Some(FixedTrait::new(32768, false)), Option::None(()), false, false); + let y = NNTrait::gemm( + input_0, + input_1, + Option::None(()), + Option::Some(FixedTrait::new(32768, false)), + Option::None(()), + false, + false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_beta.cairo b/tests/nodes/gemm_beta.cairo index 9ec8fe530..9f417e32a 100644 --- a/tests/nodes/gemm_beta.cairo +++ b/tests/nodes/gemm_beta.cairo @@ -18,7 +18,15 @@ fn test_gemm_beta() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::None(()), Option::Some(FixedTrait::new(32768, false)), false, false); + let y = NNTrait::gemm( + input_0, + input_1, + Option::Some(input_2), + Option::None(()), + Option::Some(FixedTrait::new(32768, false)), + false, + false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_default_matrix_bias.cairo b/tests/nodes/gemm_default_matrix_bias.cairo index 76c6fff0c..16d00f933 100644 --- a/tests/nodes/gemm_default_matrix_bias.cairo +++ b/tests/nodes/gemm_default_matrix_bias.cairo @@ -18,7 +18,9 @@ fn test_gemm_default_matrix_bias() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::None(()), Option::None(()), false, false); + let y = NNTrait::gemm( + input_0, input_1, Option::Some(input_2), Option::None(()), Option::None(()), false, false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_default_no_bias.cairo b/tests/nodes/gemm_default_no_bias.cairo index b702bcfc3..ea43cd0fe 100644 --- a/tests/nodes/gemm_default_no_bias.cairo +++ b/tests/nodes/gemm_default_no_bias.cairo @@ -16,7 +16,9 @@ fn test_gemm_default_no_bias() { let input_1 = input_1::input_1(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::None(()), Option::None(()), Option::None(()), false, false); + let y = NNTrait::gemm( + input_0, input_1, Option::None(()), Option::None(()), Option::None(()), false, false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_default_vector_bias.cairo b/tests/nodes/gemm_default_vector_bias.cairo index 7f4f2646b..24826f739 100644 --- a/tests/nodes/gemm_default_vector_bias.cairo +++ b/tests/nodes/gemm_default_vector_bias.cairo @@ -18,7 +18,9 @@ fn test_gemm_default_vector_bias() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::None(()), Option::None(()), false, false); + let y = NNTrait::gemm( + input_0, input_1, Option::Some(input_2), Option::None(()), Option::None(()), false, false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_transposeA.cairo b/tests/nodes/gemm_transposeA.cairo index c0b49d799..76c4592e4 100644 --- a/tests/nodes/gemm_transposeA.cairo +++ b/tests/nodes/gemm_transposeA.cairo @@ -16,7 +16,9 @@ fn test_gemm_transposeA() { let input_1 = input_1::input_1(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::None(()), Option::None(()), Option::None(()), true, false); + let y = NNTrait::gemm( + input_0, input_1, Option::None(()), Option::None(()), Option::None(()), true, false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_transposeB.cairo b/tests/nodes/gemm_transposeB.cairo index 4c7ccbef4..1728fd014 100644 --- a/tests/nodes/gemm_transposeB.cairo +++ b/tests/nodes/gemm_transposeB.cairo @@ -16,7 +16,9 @@ fn test_gemm_transposeB() { let input_1 = input_1::input_1(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::None(()), Option::None(()), Option::None(()), false, true); + let y = NNTrait::gemm( + input_0, input_1, Option::None(()), Option::None(()), Option::None(()), false, true + ); assert_eq(y, z); } diff --git a/tests/nodes/hard_sigmoid_fp16x16.cairo b/tests/nodes/hard_sigmoid_fp16x16.cairo index 8a8f8672a..6ad8c8c6c 100644 --- a/tests/nodes/hard_sigmoid_fp16x16.cairo +++ b/tests/nodes/hard_sigmoid_fp16x16.cairo @@ -14,7 +14,9 @@ fn test_hard_sigmoid_fp16x16() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = NNTrait::hard_sigmoid(@input_0, @FixedTrait::new(13107, false), @FixedTrait::new(32768, false)); + let y = NNTrait::hard_sigmoid( + @input_0, @FixedTrait::new(13107, false), @FixedTrait::new(32768, false) + ); assert_eq(y, z); } diff --git a/tests/nodes/hard_sigmoid_fp8x23.cairo b/tests/nodes/hard_sigmoid_fp8x23.cairo index 317c25425..3697b1d7a 100644 --- a/tests/nodes/hard_sigmoid_fp8x23.cairo +++ b/tests/nodes/hard_sigmoid_fp8x23.cairo @@ -14,7 +14,9 @@ fn test_hard_sigmoid_fp8x23() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = NNTrait::hard_sigmoid(@input_0, @FixedTrait::new(1677721, false), @FixedTrait::new(4194304, false)); + let y = NNTrait::hard_sigmoid( + @input_0, @FixedTrait::new(1677721, false), @FixedTrait::new(4194304, false) + ); assert_eq(y, z); } diff --git a/tests/nodes/is_nan_fp16x16/input_0.cairo b/tests/nodes/is_nan_fp16x16/input_0.cairo index 576456503..8c86af4fb 100644 --- a/tests/nodes/is_nan_fp16x16/input_0.cairo +++ b/tests/nodes/is_nan_fp16x16/input_0.cairo @@ -15,4 +15,4 @@ fn input_0() -> Tensor { data.append(FixedTrait::NaN()); data.append(FixedTrait::NaN()); TensorTrait::new(shape.span(), data.span()) -} \ No newline at end of file +} diff --git a/tests/nodes/layer_normalization_3d_axis0_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis0_epsilon.cairo index 6931c44ec..93373e675 100644 --- a/tests/nodes/layer_normalization_3d_axis0_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis0_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis0_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(0),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(0), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_3d_axis1_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis1_epsilon.cairo index 1bdb8700d..72d384de1 100644 --- a/tests/nodes/layer_normalization_3d_axis1_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis1_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis1_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(1),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(1), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_3d_axis2_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis2_epsilon.cairo index 06505280b..44a5f550d 100644 --- a/tests/nodes/layer_normalization_3d_axis2_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis2_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis2_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(2),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(2), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_3d_axis_negative_1_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis_negative_1_epsilon.cairo index 4c095bf62..0b5b77e17 100644 --- a/tests/nodes/layer_normalization_3d_axis_negative_1_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis_negative_1_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis_negative_1_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-1),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(-1), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_3d_axis_negative_2_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis_negative_2_epsilon.cairo index 0be005ddd..5f632aa6e 100644 --- a/tests/nodes/layer_normalization_3d_axis_negative_2_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis_negative_2_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis_negative_2_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-2),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(-2), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_3d_axis_negative_3_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis_negative_3_epsilon.cairo index e3c602e1f..d08c443f8 100644 --- a/tests/nodes/layer_normalization_3d_axis_negative_3_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis_negative_3_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis_negative_3_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-3),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(-3), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis0.cairo b/tests/nodes/layer_normalization_4d_axis0.cairo index 45a825cd5..279acc624 100644 --- a/tests/nodes/layer_normalization_4d_axis0.cairo +++ b/tests/nodes/layer_normalization_4d_axis0.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis0() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(0),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(0), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis1.cairo b/tests/nodes/layer_normalization_4d_axis1.cairo index e7ee8885c..d8e00b332 100644 --- a/tests/nodes/layer_normalization_4d_axis1.cairo +++ b/tests/nodes/layer_normalization_4d_axis1.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis1() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(1),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(1), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis2.cairo b/tests/nodes/layer_normalization_4d_axis2.cairo index 3bd45e907..65b738957 100644 --- a/tests/nodes/layer_normalization_4d_axis2.cairo +++ b/tests/nodes/layer_normalization_4d_axis2.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis2() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(2),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(2), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis3.cairo b/tests/nodes/layer_normalization_4d_axis3.cairo index 4b173b4f6..fae5a51c7 100644 --- a/tests/nodes/layer_normalization_4d_axis3.cairo +++ b/tests/nodes/layer_normalization_4d_axis3.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis3() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(3),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(3), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis_negative_1.cairo b/tests/nodes/layer_normalization_4d_axis_negative_1.cairo index d7b04e192..2f879f988 100644 --- a/tests/nodes/layer_normalization_4d_axis_negative_1.cairo +++ b/tests/nodes/layer_normalization_4d_axis_negative_1.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis_negative_1() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-1),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(-1), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis_negative_2.cairo b/tests/nodes/layer_normalization_4d_axis_negative_2.cairo index 5e17a8b52..718c97ad5 100644 --- a/tests/nodes/layer_normalization_4d_axis_negative_2.cairo +++ b/tests/nodes/layer_normalization_4d_axis_negative_2.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis_negative_2() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(2),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(2), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis_negative_3.cairo b/tests/nodes/layer_normalization_4d_axis_negative_3.cairo index 4188eec6c..b97678d38 100644 --- a/tests/nodes/layer_normalization_4d_axis_negative_3.cairo +++ b/tests/nodes/layer_normalization_4d_axis_negative_3.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis_negative_3() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-3),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(-3), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis_negative_4.cairo b/tests/nodes/layer_normalization_4d_axis_negative_4.cairo index 5aa5971dc..94be87f32 100644 --- a/tests/nodes/layer_normalization_4d_axis_negative_4.cairo +++ b/tests/nodes/layer_normalization_4d_axis_negative_4.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis_negative_4() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-4),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(-4), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_default_axis.cairo b/tests/nodes/layer_normalization_default_axis.cairo index dd792e731..994ab7106 100644 --- a/tests/nodes/layer_normalization_default_axis.cairo +++ b/tests/nodes/layer_normalization_default_axis.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_default_axis() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::None,Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::None, Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_test.cairo b/tests/nodes/layer_normalization_test.cairo index 631dc6f46..ad8baa5f2 100644 --- a/tests/nodes/layer_normalization_test.cairo +++ b/tests/nodes/layer_normalization_test.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_test() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::None,Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::None, Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_fp16x16_3d_axis1.cairo b/tests/nodes/scatter_fp16x16_3d_axis1.cairo index b471e028c..5173d8bd7 100644 --- a/tests/nodes/scatter_fp16x16_3d_axis1.cairo +++ b/tests/nodes/scatter_fp16x16_3d_axis1.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp16x16_3d_axis1() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_fp16x16_3d_axis1_add.cairo b/tests/nodes/scatter_fp16x16_3d_axis1_add.cairo index c6fc48b15..be927416d 100644 --- a/tests/nodes/scatter_fp16x16_3d_axis1_add.cairo +++ b/tests/nodes/scatter_fp16x16_3d_axis1_add.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp16x16_3d_axis1_add() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('add')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('add') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_fp16x16_3d_default.cairo b/tests/nodes/scatter_fp16x16_3d_default.cairo index c14bbc0a6..b106de54d 100644 --- a/tests/nodes/scatter_fp16x16_3d_default.cairo +++ b/tests/nodes/scatter_fp16x16_3d_default.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp16x16_3d_default() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_fp8x23_axis1.cairo b/tests/nodes/scatter_fp8x23_axis1.cairo index e0008d409..8ff871c7b 100644 --- a/tests/nodes/scatter_fp8x23_axis1.cairo +++ b/tests/nodes/scatter_fp8x23_axis1.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp8x23_axis1() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_fp8x23_default.cairo b/tests/nodes/scatter_fp8x23_default.cairo index bdaea6568..157aca0bb 100644 --- a/tests/nodes/scatter_fp8x23_default.cairo +++ b/tests/nodes/scatter_fp8x23_default.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp8x23_default() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_fp8x23_mul.cairo b/tests/nodes/scatter_fp8x23_mul.cairo index 4430bf041..5b2305aee 100644 --- a/tests/nodes/scatter_fp8x23_mul.cairo +++ b/tests/nodes/scatter_fp8x23_mul.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp8x23_mul() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('mul')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('mul') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_i8_axis1.cairo b/tests/nodes/scatter_i8_axis1.cairo index e143463f1..c42123f3d 100644 --- a/tests/nodes/scatter_i8_axis1.cairo +++ b/tests/nodes/scatter_i8_axis1.cairo @@ -20,7 +20,13 @@ fn test_scatter_i8_axis1() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_i8_axis1_max.cairo b/tests/nodes/scatter_i8_axis1_max.cairo index 53dabbe40..844911a8d 100644 --- a/tests/nodes/scatter_i8_axis1_max.cairo +++ b/tests/nodes/scatter_i8_axis1_max.cairo @@ -20,7 +20,13 @@ fn test_scatter_i8_axis1_max() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('max')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('max') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_i8_default.cairo b/tests/nodes/scatter_i8_default.cairo index c41b29d7b..f658268ce 100644 --- a/tests/nodes/scatter_i8_default.cairo +++ b/tests/nodes/scatter_i8_default.cairo @@ -20,7 +20,13 @@ fn test_scatter_i8_default() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_u32_add.cairo b/tests/nodes/scatter_u32_add.cairo index 735b8fb5e..2b14d68d1 100644 --- a/tests/nodes/scatter_u32_add.cairo +++ b/tests/nodes/scatter_u32_add.cairo @@ -18,7 +18,13 @@ fn test_scatter_u32_add() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('add')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('add') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_u32_axis1.cairo b/tests/nodes/scatter_u32_axis1.cairo index e2a96e71b..2c85e2a6c 100644 --- a/tests/nodes/scatter_u32_axis1.cairo +++ b/tests/nodes/scatter_u32_axis1.cairo @@ -18,7 +18,13 @@ fn test_scatter_u32_axis1() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_u32_default.cairo b/tests/nodes/scatter_u32_default.cairo index 1ccdac72f..5fb16207c 100644 --- a/tests/nodes/scatter_u32_default.cairo +++ b/tests/nodes/scatter_u32_default.cairo @@ -18,7 +18,13 @@ fn test_scatter_u32_default() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/sequence_insert_fp16x16.cairo b/tests/nodes/sequence_insert_fp16x16.cairo index d30b0d3e1..70316ebb9 100644 --- a/tests/nodes/sequence_insert_fp16x16.cairo +++ b/tests/nodes/sequence_insert_fp16x16.cairo @@ -20,7 +20,7 @@ fn test_sequence_insert_fp16x16() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.sequence_insert(@input_1,Option::Some(input_2)); + let y = input_0.sequence_insert(@input_1, Option::Some(input_2)); assert_seq_eq(y, z); } diff --git a/tests/nodes/sequence_insert_fp8x23.cairo b/tests/nodes/sequence_insert_fp8x23.cairo index ad4d12be4..fb474c6d4 100644 --- a/tests/nodes/sequence_insert_fp8x23.cairo +++ b/tests/nodes/sequence_insert_fp8x23.cairo @@ -20,7 +20,7 @@ fn test_sequence_insert_fp8x23() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.sequence_insert(@input_1,Option::Some(input_2)); + let y = input_0.sequence_insert(@input_1, Option::Some(input_2)); assert_seq_eq(y, z); } diff --git a/tests/nodes/sequence_insert_i32.cairo b/tests/nodes/sequence_insert_i32.cairo index 3a397715d..7bcadba2d 100644 --- a/tests/nodes/sequence_insert_i32.cairo +++ b/tests/nodes/sequence_insert_i32.cairo @@ -18,7 +18,7 @@ fn test_sequence_insert_i32() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.sequence_insert(@input_1,Option::Some(input_2)); + let y = input_0.sequence_insert(@input_1, Option::Some(input_2)); assert_seq_eq(y, z); } diff --git a/tests/nodes/sequence_insert_i8.cairo b/tests/nodes/sequence_insert_i8.cairo index a304ff2c4..ff1be34fe 100644 --- a/tests/nodes/sequence_insert_i8.cairo +++ b/tests/nodes/sequence_insert_i8.cairo @@ -20,7 +20,7 @@ fn test_sequence_insert_i8() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.sequence_insert(@input_1,Option::Some(input_2)); + let y = input_0.sequence_insert(@input_1, Option::Some(input_2)); assert_seq_eq(y, z); } diff --git a/tests/nodes/sequence_insert_u32.cairo b/tests/nodes/sequence_insert_u32.cairo index dcd905f72..079d6a4a0 100644 --- a/tests/nodes/sequence_insert_u32.cairo +++ b/tests/nodes/sequence_insert_u32.cairo @@ -20,7 +20,7 @@ fn test_sequence_insert_u32() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.sequence_insert(@input_1,Option::Some(input_2)); + let y = input_0.sequence_insert(@input_1, Option::Some(input_2)); assert_seq_eq(y, z); } diff --git a/tests/nodes/sequence_length_fp16x16.cairo b/tests/nodes/sequence_length_fp16x16.cairo index d971d5569..559ec3ff6 100644 --- a/tests/nodes/sequence_length_fp16x16.cairo +++ b/tests/nodes/sequence_length_fp16x16.cairo @@ -13,10 +13,10 @@ use orion::operators::sequence::SequenceTrait; #[test] #[available_gas(2000000000)] fn test_sequence_length_fp16x16() { - let input_0 = input_0::input_0(); + let input_0 = input_0::input_0(); let z = output_0::output_0(); let y = input_0.sequence_length(); assert_eq(y, z); -} +} diff --git a/tests/nodes/shrink_hard_fp16x16.cairo b/tests/nodes/shrink_hard_fp16x16.cairo index 0818844b2..2f5ec5312 100644 --- a/tests/nodes/shrink_hard_fp16x16.cairo +++ b/tests/nodes/shrink_hard_fp16x16.cairo @@ -15,7 +15,9 @@ fn test_shrink_hard_fp16x16() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = TensorTrait::shrink(input_0, Option::None(()), Option::Some(FixedTrait::new(65536, false))); + let y = TensorTrait::shrink( + input_0, Option::None(()), Option::Some(FixedTrait::new(65536, false)) + ); assert_eq(y, z); } diff --git a/tests/nodes/shrink_hard_fp8x23.cairo b/tests/nodes/shrink_hard_fp8x23.cairo index 3c054f433..c76eec1ec 100644 --- a/tests/nodes/shrink_hard_fp8x23.cairo +++ b/tests/nodes/shrink_hard_fp8x23.cairo @@ -15,7 +15,9 @@ fn test_shrink_hard_fp8x23() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = TensorTrait::shrink(input_0, Option::None(()), Option::Some(FixedTrait::new(8388608, false))); + let y = TensorTrait::shrink( + input_0, Option::None(()), Option::Some(FixedTrait::new(8388608, false)) + ); assert_eq(y, z); } diff --git a/tests/nodes/shrink_soft_fp16x16.cairo b/tests/nodes/shrink_soft_fp16x16.cairo index 924ecfde5..aa975069c 100644 --- a/tests/nodes/shrink_soft_fp16x16.cairo +++ b/tests/nodes/shrink_soft_fp16x16.cairo @@ -15,7 +15,11 @@ fn test_shrink_soft_fp16x16() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = TensorTrait::shrink(input_0, Option::Some(FixedTrait::new(65536, false)), Option::Some(FixedTrait::new(65536, false))); + let y = TensorTrait::shrink( + input_0, + Option::Some(FixedTrait::new(65536, false)), + Option::Some(FixedTrait::new(65536, false)) + ); assert_eq(y, z); } diff --git a/tests/nodes/shrink_soft_fp8x23.cairo b/tests/nodes/shrink_soft_fp8x23.cairo index 01a314e10..8413beccd 100644 --- a/tests/nodes/shrink_soft_fp8x23.cairo +++ b/tests/nodes/shrink_soft_fp8x23.cairo @@ -15,7 +15,11 @@ fn test_shrink_soft_fp8x23() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = TensorTrait::shrink(input_0, Option::Some(FixedTrait::new(8388608, false)), Option::Some(FixedTrait::new(8388608, false))); + let y = TensorTrait::shrink( + input_0, + Option::Some(FixedTrait::new(8388608, false)), + Option::Some(FixedTrait::new(8388608, false)) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_fp16x16_2d.cairo b/tests/nodes/slice_fp16x16_2d.cairo index 5e3d593be..2a95e6e4b 100644 --- a/tests/nodes/slice_fp16x16_2d.cairo +++ b/tests/nodes/slice_fp16x16_2d.cairo @@ -14,7 +14,13 @@ fn test_slice_fp16x16_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span())); + let y = input_0 + .slice( + array![0, 2].span(), + array![2, 4].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 1].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_fp16x16_3d.cairo b/tests/nodes/slice_fp16x16_3d.cairo index d0b5462c4..a681191ce 100644 --- a/tests/nodes/slice_fp16x16_3d.cairo +++ b/tests/nodes/slice_fp16x16_3d.cairo @@ -14,7 +14,13 @@ fn test_slice_fp16x16_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span())); + let y = input_0 + .slice( + array![0, 0].span(), + array![3, 10].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 3].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_fp8x23_2d.cairo b/tests/nodes/slice_fp8x23_2d.cairo index 6a80a5422..56fed5a6a 100644 --- a/tests/nodes/slice_fp8x23_2d.cairo +++ b/tests/nodes/slice_fp8x23_2d.cairo @@ -14,7 +14,13 @@ fn test_slice_fp8x23_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span())); + let y = input_0 + .slice( + array![0, 2].span(), + array![2, 4].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 1].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_fp8x23_3d.cairo b/tests/nodes/slice_fp8x23_3d.cairo index 5c2af30b7..fd5e95485 100644 --- a/tests/nodes/slice_fp8x23_3d.cairo +++ b/tests/nodes/slice_fp8x23_3d.cairo @@ -14,7 +14,13 @@ fn test_slice_fp8x23_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span())); + let y = input_0 + .slice( + array![0, 0].span(), + array![3, 10].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 3].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_i32_2d.cairo b/tests/nodes/slice_i32_2d.cairo index 082b8f15f..f26a2a809 100644 --- a/tests/nodes/slice_i32_2d.cairo +++ b/tests/nodes/slice_i32_2d.cairo @@ -14,7 +14,13 @@ fn test_slice_i32_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span())); + let y = input_0 + .slice( + array![0, 2].span(), + array![2, 4].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 1].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_i32_3d.cairo b/tests/nodes/slice_i32_3d.cairo index 1683e6987..16fd3f51b 100644 --- a/tests/nodes/slice_i32_3d.cairo +++ b/tests/nodes/slice_i32_3d.cairo @@ -14,7 +14,13 @@ fn test_slice_i32_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span())); + let y = input_0 + .slice( + array![0, 0].span(), + array![3, 10].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 3].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_i8_2d.cairo b/tests/nodes/slice_i8_2d.cairo index fc7f35364..2dc5f6ab4 100644 --- a/tests/nodes/slice_i8_2d.cairo +++ b/tests/nodes/slice_i8_2d.cairo @@ -14,7 +14,13 @@ fn test_slice_i8_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span())); + let y = input_0 + .slice( + array![0, 2].span(), + array![2, 4].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 1].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_i8_3d.cairo b/tests/nodes/slice_i8_3d.cairo index ec8ea9ffd..a140d8681 100644 --- a/tests/nodes/slice_i8_3d.cairo +++ b/tests/nodes/slice_i8_3d.cairo @@ -14,7 +14,13 @@ fn test_slice_i8_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span())); + let y = input_0 + .slice( + array![0, 0].span(), + array![3, 10].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 3].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_u32_2d.cairo b/tests/nodes/slice_u32_2d.cairo index 27678fc0c..c5ad63061 100644 --- a/tests/nodes/slice_u32_2d.cairo +++ b/tests/nodes/slice_u32_2d.cairo @@ -14,7 +14,13 @@ fn test_slice_u32_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span())); + let y = input_0 + .slice( + array![0, 2].span(), + array![2, 4].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 1].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_u32_3d.cairo b/tests/nodes/slice_u32_3d.cairo index a3ca0e1bc..08a77cf55 100644 --- a/tests/nodes/slice_u32_3d.cairo +++ b/tests/nodes/slice_u32_3d.cairo @@ -14,7 +14,13 @@ fn test_slice_u32_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span())); + let y = input_0 + .slice( + array![0, 0].span(), + array![3, 10].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 3].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/where_fp16x16.cairo b/tests/nodes/where_fp16x16.cairo index 05467ef51..ae3416d67 100644 --- a/tests/nodes/where_fp16x16.cairo +++ b/tests/nodes/where_fp16x16.cairo @@ -18,7 +18,7 @@ fn test_where_fp16x16() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_fp16x16_broadcast.cairo b/tests/nodes/where_fp16x16_broadcast.cairo index b0d9b9faa..5df239b78 100644 --- a/tests/nodes/where_fp16x16_broadcast.cairo +++ b/tests/nodes/where_fp16x16_broadcast.cairo @@ -18,7 +18,7 @@ fn test_where_fp16x16_broadcast() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_fp8x23.cairo b/tests/nodes/where_fp8x23.cairo index 8661bf163..492db3766 100644 --- a/tests/nodes/where_fp8x23.cairo +++ b/tests/nodes/where_fp8x23.cairo @@ -18,7 +18,7 @@ fn test_where_fp8x23() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_fp8x23_broadcast.cairo b/tests/nodes/where_fp8x23_broadcast.cairo index 771c00bf4..112f9ef74 100644 --- a/tests/nodes/where_fp8x23_broadcast.cairo +++ b/tests/nodes/where_fp8x23_broadcast.cairo @@ -18,7 +18,7 @@ fn test_where_fp8x23_broadcast() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_i32.cairo b/tests/nodes/where_i32.cairo index 1662b010d..a455f8ac1 100644 --- a/tests/nodes/where_i32.cairo +++ b/tests/nodes/where_i32.cairo @@ -18,7 +18,7 @@ fn test_where_i32() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_i32_broadcast.cairo b/tests/nodes/where_i32_broadcast.cairo index 53aaf91e2..62891b235 100644 --- a/tests/nodes/where_i32_broadcast.cairo +++ b/tests/nodes/where_i32_broadcast.cairo @@ -18,7 +18,7 @@ fn test_where_i32_broadcast() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_i8.cairo b/tests/nodes/where_i8.cairo index 0627fd33b..6f54a1271 100644 --- a/tests/nodes/where_i8.cairo +++ b/tests/nodes/where_i8.cairo @@ -18,7 +18,7 @@ fn test_where_i8() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_i8_broadcast.cairo b/tests/nodes/where_i8_broadcast.cairo index 69e02821f..4bcb86a3d 100644 --- a/tests/nodes/where_i8_broadcast.cairo +++ b/tests/nodes/where_i8_broadcast.cairo @@ -18,7 +18,7 @@ fn test_where_i8_broadcast() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_u32.cairo b/tests/nodes/where_u32.cairo index a14d685ac..5f8a3119a 100644 --- a/tests/nodes/where_u32.cairo +++ b/tests/nodes/where_u32.cairo @@ -18,7 +18,7 @@ fn test_where_u32() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_u32_broadcast.cairo b/tests/nodes/where_u32_broadcast.cairo index b810f7143..4aedc56a1 100644 --- a/tests/nodes/where_u32_broadcast.cairo +++ b/tests/nodes/where_u32_broadcast.cairo @@ -18,7 +18,7 @@ fn test_where_u32_broadcast() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/operators/qlinear_add_test.cairo b/tests/operators/qlinear_add_test.cairo index 3163fb8e6..fe7f2af47 100644 --- a/tests/operators/qlinear_add_test.cairo +++ b/tests/operators/qlinear_add_test.cairo @@ -13,33 +13,13 @@ fn qlinearadd_test() { i8 >::new( shape: array![4, 2].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8 - ] - .span(), + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8].span(), ); let b = TensorTrait::< i8 >::new( shape: array![4, 2].span(), - data: array![ - 2_i8, - 4_i8, - 6_i8, - 8_i8, - 10_i8, - 12_i8, - 14_i8, - 16_i8 - ] - .span(), + data: array![2_i8, 4_i8, 6_i8, 8_i8, 10_i8, 12_i8, 14_i8, 16_i8].span(), ); let a_scale = TensorTrait::< @@ -82,30 +62,11 @@ fn qlinearadd_broadcast_test() { i8 >::new( shape: array![2, 4].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8 - ] - .span(), + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8].span(), ); let b = TensorTrait::< i8 - >::new( - shape: array![1, 4].span(), - data: array![ - 2_i8, - 4_i8, - 6_i8, - 8_i8, - ] - .span(), - ); + >::new(shape: array![1, 4].span(), data: array![2_i8, 4_i8, 6_i8, 8_i8,].span(),); let a_scale = TensorTrait::< FP16x16 @@ -146,29 +107,10 @@ fn qlinearadd_broadcast_test() { fn test_example_doc() { let a = TensorTrait::< i8 - >::new( - shape: array![2, 3].span(), - data: array![ - 6_i8, - 6_i8, - 6_i8, - 11_i8, - 11_i8, - 11_i8 - ] - .span(), - ); + >::new(shape: array![2, 3].span(), data: array![6_i8, 6_i8, 6_i8, 11_i8, 11_i8, 11_i8].span(),); let b = TensorTrait::< i8 - >::new( - shape: array![1, 3].span(), - data: array![ - 40_i8, - 40_i8, - 40_i8 - ] - .span(), - ); + >::new(shape: array![1, 3].span(), data: array![40_i8, 40_i8, 40_i8].span(),); let a_scale = TensorTrait::< FP16x16 diff --git a/tests/operators/qlinear_concat_test.cairo b/tests/operators/qlinear_concat_test.cairo index 101cefaa8..4c86b3ff8 100644 --- a/tests/operators/qlinear_concat_test.cairo +++ b/tests/operators/qlinear_concat_test.cairo @@ -19,28 +19,10 @@ fn print_span(mut span: Span) { fn qlinear_concat_test() { let tensor1 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 10_i8, - 20_i8, - 30_i8, - 40_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![10_i8, 20_i8, 30_i8, 40_i8,].span(),); let tensor2 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 20_i8, - 40_i8, - 60_i8, - 80_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![20_i8, 40_i8, 60_i8, 80_i8,].span(),); let tensors = array![tensor1, tensor2].span(); @@ -90,40 +72,13 @@ fn qlinear_concat_test() { fn qlinear_concat_test_shape() { let tensor1 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 2_i8, - 2_i8, - 2_i8, - 2_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![2_i8, 2_i8, 2_i8, 2_i8,].span(),); let tensor2 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 8_i8, - 8_i8, - 8_i8, - 8_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![8_i8, 8_i8, 8_i8, 8_i8,].span(),); let tensor3 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 10_i8, - 10_i8, - 10_i8, - 10_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![10_i8, 10_i8, 10_i8, 10_i8,].span(),); let tensors = array![tensor1, tensor2, tensor3].span(); @@ -177,28 +132,10 @@ fn qlinear_concat_test_shape() { fn qlinear_concat_example_doc() { let tensor1 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 5_i8, - 5_i8, - 5_i8, - 5_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![5_i8, 5_i8, 5_i8, 5_i8,].span(),); let tensor2 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 1_i8, - 1_i8, - 1_i8, - 1_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![1_i8, 1_i8, 1_i8, 1_i8,].span(),); let tensors = array![tensor1, tensor2].span(); diff --git a/tests/operators/qlinear_leakyrelu_test.cairo b/tests/operators/qlinear_leakyrelu_test.cairo index 9e6473d06..e180ab33b 100644 --- a/tests/operators/qlinear_leakyrelu_test.cairo +++ b/tests/operators/qlinear_leakyrelu_test.cairo @@ -12,15 +12,7 @@ fn qlinear_leakyrelu_test() { i8 >::new( shape: array![2, 3].span(), - data: array![ - -10_i8, - -10_i8, - -10_i8, - 10_i8, - 10_i8, - 10_i8 - ] - .span(), + data: array![-10_i8, -10_i8, -10_i8, 10_i8, 10_i8, 10_i8].span(), ); let a_scale = TensorTrait::< diff --git a/tests/operators/qlinear_matmul_test.cairo b/tests/operators/qlinear_matmul_test.cairo index bfbe04714..9d3f8fa4b 100644 --- a/tests/operators/qlinear_matmul_test.cairo +++ b/tests/operators/qlinear_matmul_test.cairo @@ -15,36 +15,13 @@ fn qlinearmatmul_2D_test() { i8 >::new( shape: array![2, 4].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8 - ] - .span(), + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8].span(), ); let b = TensorTrait::< i8 >::new( shape: array![4, 3].span(), - data: array![ - 2_i8, - 4_i8, - 6_i8, - 8_i8, - 10_i8, - 12_i8, - 14_i8, - 16_i8, - 18_i8, - 20_i8, - 22_i8, - 24_i8 - ] + data: array![2_i8, 4_i8, 6_i8, 8_i8, 10_i8, 12_i8, 14_i8, 16_i8, 18_i8, 20_i8, 22_i8, 24_i8] .span(), ); @@ -90,18 +67,7 @@ fn qlinearmatmul_3D_test() { >::new( shape: array![2, 2, 3].span(), data: array![ - -1_i8, - -2_i8, - -2_i8, - -3_i8, - -4_i8, - -4_i8, - -5_i8, - -6_i8, - -6_i8, - -7_i8, - -8_i8, - -8_i8 + -1_i8, -2_i8, -2_i8, -3_i8, -4_i8, -4_i8, -5_i8, -6_i8, -6_i8, -7_i8, -8_i8, -8_i8 ] .span(), ); @@ -110,18 +76,7 @@ fn qlinearmatmul_3D_test() { >::new( shape: array![2, 3, 2].span(), data: array![ - -2_i8, - -4_i8, - -6_i8, - -8_i8, - -10_i8, - -12_i8, - -2_i8, - -4_i8, - -6_i8, - -8_i8, - -10_i8, - -12_i8 + -2_i8, -4_i8, -6_i8, -8_i8, -10_i8, -12_i8, -2_i8, -4_i8, -6_i8, -8_i8, -10_i8, -12_i8 ] .span(), ); @@ -167,29 +122,10 @@ fn qlinearmatmul_3D_test() { fn test_example_doc() { let a = TensorTrait::< i8 - >::new( - shape: array![2, 3].span(), - data: array![ - 3_i8, - 4_i8, - 5_i8, - 2_i8, - 4_i8, - 3_i8 - ] - .span(), - ); + >::new(shape: array![2, 3].span(), data: array![3_i8, 4_i8, 5_i8, 2_i8, 4_i8, 3_i8].span(),); let b = TensorTrait::< i8 - >::new( - shape: array![3, 1].span(), - data: array![ - 4_i8, - 8_i8, - 4_i8 - ] - .span(), - ); + >::new(shape: array![3, 1].span(), data: array![4_i8, 8_i8, 4_i8].span(),); let a_scale = TensorTrait::< FP16x16 diff --git a/tests/operators/qlinear_mul_test.cairo b/tests/operators/qlinear_mul_test.cairo index 6bf292bcc..3babc1800 100644 --- a/tests/operators/qlinear_mul_test.cairo +++ b/tests/operators/qlinear_mul_test.cairo @@ -14,40 +14,14 @@ fn qlinearmul_test() { i8 >::new( shape: array![4, 3].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8, - 9_i8, - 10_i8, - 11_i8, - 12_i8 - ] + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8, 9_i8, 10_i8, 11_i8, 12_i8] .span(), ); let b = TensorTrait::< i8 >::new( shape: array![4, 3].span(), - data: array![ - 2_i8, - 4_i8, - 6_i8, - 8_i8, - 10_i8, - 12_i8, - 14_i8, - 16_i8, - 18_i8, - 20_i8, - 22_i8, - 24_i8 - ] + data: array![2_i8, 4_i8, 6_i8, 8_i8, 10_i8, 12_i8, 14_i8, 16_i8, 18_i8, 20_i8, 22_i8, 24_i8] .span(), ); @@ -96,30 +70,11 @@ fn qlinear_mul_broadcast_test() { i8 >::new( shape: array![2, 4].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8 - ] - .span(), + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8].span(), ); let b = TensorTrait::< i8 - >::new( - shape: array![1, 4].span(), - data: array![ - 2_i8, - 4_i8, - 6_i8, - 8_i8, - ] - .span(), - ); + >::new(shape: array![1, 4].span(), data: array![2_i8, 4_i8, 6_i8, 8_i8,].span(),); let a_scale = TensorTrait::< FP16x16 @@ -161,28 +116,11 @@ fn test_example_doc() { let a = TensorTrait::< i8 >::new( - shape: array![2, 3].span(), - data: array![ - 21_i8, - 21_i8, - 21_i8, - 41_i8, - 41_i8, - 41_i8 - ] - .span(), + shape: array![2, 3].span(), data: array![21_i8, 21_i8, 21_i8, 41_i8, 41_i8, 41_i8].span(), ); let b = TensorTrait::< i8 - >::new( - shape: array![1, 3].span(), - data: array![ - 4_i8, - 8_i8, - 12_i8 - ] - .span(), - ); + >::new(shape: array![1, 3].span(), data: array![4_i8, 8_i8, 12_i8].span(),); let a_scale = TensorTrait::< FP16x16 From e34c71302c7d8934ad6666770769a97746bc9fb1 Mon Sep 17 00:00:00 2001 From: chachaleo Date: Sun, 28 Jan 2024 14:57:43 +0100 Subject: [PATCH 14/46] supports group >=1 --- nodegen/node/conv_transpose.py | 135 +++++++++++++- .../nn/functional/conv_transpose.cairo | 157 ++++++++++++++--- tests/nodes.cairo | 2 + tests/nodes/conv_transpose_group_2.cairo | 34 ++++ .../conv_transpose_group_2/input_0.cairo | 33 ++++ .../conv_transpose_group_2/input_1.cairo | 33 ++++ .../conv_transpose_group_2/output_0.cairo | 65 +++++++ .../conv_transpose_group_2_image_3.cairo | 34 ++++ .../input_0.cairo | 69 ++++++++ .../input_1.cairo | 33 ++++ .../output_0.cairo | 165 ++++++++++++++++++ 11 files changed, 730 insertions(+), 30 deletions(-) create mode 100644 tests/nodes/conv_transpose_group_2.cairo create mode 100644 tests/nodes/conv_transpose_group_2/input_0.cairo create mode 100644 tests/nodes/conv_transpose_group_2/input_1.cairo create mode 100644 tests/nodes/conv_transpose_group_2/output_0.cairo create mode 100644 tests/nodes/conv_transpose_group_2_image_3.cairo create mode 100644 tests/nodes/conv_transpose_group_2_image_3/input_0.cairo create mode 100644 tests/nodes/conv_transpose_group_2_image_3/input_1.cairo create mode 100644 tests/nodes/conv_transpose_group_2_image_3/output_0.cairo diff --git a/nodegen/node/conv_transpose.py b/nodegen/node/conv_transpose.py index ebd320ce6..2b9259397 100644 --- a/nodegen/node/conv_transpose.py +++ b/nodegen/node/conv_transpose.py @@ -93,9 +93,36 @@ def conv_transpose( res += B[c] final[image_id, c, ...] = res[...] else: - raise NotImplementedError( - f"Implementation for group={group} > 1 is not available yet." - ) + final = np.zeros((X.shape[0], num_output_channels ) + tuple(output_shape)) + + output_array = [] + + for group_id in range(group): + group_X = X[:, group_id * C // group : (group_id + 1) * C // group, ...] + group_W = W[group_id * num_output_channels // group : (group_id + 1) * num_output_channels // group, ...] + + group_output = conv_transpose( + group_X, + group_W, + B=B, + auto_pad=auto_pad, + dilations=dilations, + group=1, + kernel_shape=kernel_shape, + output_padding=output_padding, + output_shape=output_shape, + pads=pads, + strides=strides, + ) + group_output = np.array(group_output[0]) + + output_array.append(group_output) + + + for image_id in range(X.shape[0]): + for group_id in range(group): + group_output = output_array[group_id] + final[image_id, group_id:(group_id+1), ...] = group_output[image_id, ...] return (final.astype(X.dtype),) @@ -493,8 +520,110 @@ def export_convtranspose_autopad_same() -> None: [x, w], y, func_sig, name, Trait.NN) + @staticmethod + def export_convtranspose_group_2() -> None: + x = np.array( + [ + [ + [ + [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], + [ + [9.0, 10.0, 11.0], [12.0, 13.0, 14.0], [15.0, 16.0, 17.0]] + ] + ] + ).astype(np.float32) + w = np.array( + [ + [ + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ], + [ + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ] + ] + ).astype(np.float32) + + y = conv_transpose(x, w, group=2)[0] + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "conv_transpose_group_2" + func_sig = "NNTrait::conv_transpose(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(2)," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None,)" + make_test( + [x, w], y, func_sig, name, Trait.NN) + + @staticmethod + def export_convtranspose_group_2_image_3() -> None: + x = np.array( + [ + [ + [ + [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], + [ + [9.0, 10.0, 11.0], [12.0, 13.0, 14.0], [15.0, 16.0, 17.0] + ] + ], + [ + [ + [18.0, 19.0, 20.0], [21.0, 22.0, 23.0], [24.0, 25.0, 26.0] + ], + [ + [9.0, 10.0, 11.0], [12.0, 13.0, 14.0], [15.0, 16.0, 17.0] + ] + ], + [ + [ + [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0] + ], + [ + [9.0, 10.0, 11.0], [12.0, 13.0, 14.0], [15.0, 16.0, 17.0] + ] + ] + ] + ).astype(np.float32) + w = np.array( + [ + [ + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ], + [ + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ] + ] + ).astype(np.float32) + y = conv_transpose(x, w, group=2)[0] + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + name = "conv_transpose_group_2_image_3" + func_sig = "NNTrait::conv_transpose(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(2)," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None,)" + make_test( + [x, w], y, func_sig, name, Trait.NN) diff --git a/src/operators/nn/functional/conv_transpose.cairo b/src/operators/nn/functional/conv_transpose.cairo index adb933c23..dd62b844f 100644 --- a/src/operators/nn/functional/conv_transpose.cairo +++ b/src/operators/nn/functional/conv_transpose.cairo @@ -393,7 +393,136 @@ fn conv_transpose< image_id += 1; }; } else { - panic(array!['group > 1 not supported']); + + let mut output_array = ArrayTrait::new(); + + let mut i = 0; + let mut output_size = 1; + loop { + if i == output_shape.len() { + break; + } + output_size *= *output_shape.at(i); + i += 1; + }; + + // Computation of conv transposition per group + let mut group_id = 0; + loop { + if group_id == group { + break; + } + let mut group_X = ArrayTrait::new(); + let mut group_W = ArrayTrait::new(); + + let mut image_id = 0; + loop { + if image_id == *(*X).shape.at(0) { + break; + } + let start = image_id * n * C + (group_id * C / group) * n; + let end = image_id * n * C + ((group_id + 1) * C / group) * n; + + let mut i = start; + loop { + if i == end { + break; + } + group_X.append(*(*X).data.at(i)); + + i += 1; + }; + image_id += 1; + }; + + let start = (group_id * C / group) * *(*W).shape.at(1) * kernel_size; + let end = (group_id + 1) * C / group * *(*W).shape.at(1) * kernel_size; + let mut i = start; + loop { + if i == end { + break; + } + group_W.append(*(*W).data.at(i)); + i += 1; + }; + + let mut shape_X = ArrayTrait::new(); + shape_X.append(*(*X).shape.at(0)); + shape_X.append(C / group); + + let mut i = 2; + loop { + if i >= (*X).shape.len() { + break; + } + shape_X.append(*(*X).shape.at(i)); + i += 1; + }; + + let mut shape_W = ArrayTrait::new(); + shape_W.append(C / group); + + let mut i = 1; + loop { + if i >= (*W).shape.len() { + break; + } + shape_W.append(*(*W).shape.at(i)); + i += 1; + }; + + // group_X : N x (C / group) x X.shape[2:] + let group_X = TensorTrait::new(shape_X.span(), group_X.span()); + // group_W : (C / group) x *(*W).shape.at(1) x W.shape[2:] + let group_W = TensorTrait::new(shape_W.span(), group_W.span()); + + // group output : N x (num_output_channels / group) x output_shape + let group_output = conv_transpose( + @group_X, + @group_W, + B, + Option::Some(auto_pad), + Option::Some(dilations), + Option::Some(1), + Option::Some(kernel_shape), + Option::Some(output_padding), + Option::Some(output_shape), + Option::Some(pads), + Option::Some(strides) + ); + + output_array.append(group_output.data); + + group_id += 1; + }; + let output_array = output_array.span(); + + // Sorting result per item of the batch + // output size : N (batch size) x num_output_channels x output_shape + let mut image_id = 0; + loop { + if image_id == *(*X).shape.at(0) { + break; + } + let mut group_id = 0; + loop { + if group_id == group { + break; + } + let group_output = *output_array.at(group_id); + let mut i = image_id * output_size * (num_output_channels / group); + + loop { + if i == (image_id + 1) * output_size * (num_output_channels / group) { + break; + } + final.append(*group_output.at(i)); + i += 1; + }; + group_id += 1; + }; + image_id += 1; + }; } let mut shape = array![*(*X).shape.at(0), num_output_channels]; @@ -556,16 +685,6 @@ fn col2im_shape_check, +Copy, +Drop,>( } -fn rec_add_chars(ref arr: Array, str_len: felt252, str: u128) { - if str_len == 0 { - return; - } - let (str, char) = DivRem::div_rem(str, 256_u128.try_into().unwrap()); - rec_add_chars(ref arr, str_len - 1, str); - if char != 0 { - arr.append(char); - } -} fn get_indices(index: usize, shape: Span,) -> Array { let mut i = index; @@ -615,22 +734,6 @@ fn is_out(ind: Span, shape: Span,) -> bool { } -fn rec_get_indices(ref arr: Array, mut i: usize, mut k: usize, shape: Span,) { - if k == 0 { - arr.append(i); - return; - } - let m = i % *shape.at(k); - i -= m; - i /= *shape.at(k); - k -= 1; - rec_get_indices(ref arr, i, k, shape); - if k != 0 { - arr.append(m); - } -} - - fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul,>( pA: Span, start: usize ) -> T { diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 5853c3664..61aa152d0 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -943,3 +943,5 @@ mod conv_transpose_attributes; mod conv_transpose_autopad_same; mod conv_transpose_dilations; mod conv_transpose_pads; +mod conv_transpose_group_2; +mod conv_transpose_group_2_image_3; \ No newline at end of file diff --git a/tests/nodes/conv_transpose_group_2.cairo b/tests/nodes/conv_transpose_group_2.cairo new file mode 100644 index 000000000..8c9453996 --- /dev/null +++ b/tests/nodes/conv_transpose_group_2.cairo @@ -0,0 +1,34 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_conv_transpose_group_2() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::conv_transpose( + @input_0, + @input_1, + Option::None, + Option::None, + Option::None, + Option::Some(2), + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/conv_transpose_group_2/input_0.cairo b/tests/nodes/conv_transpose_group_2/input_0.cairo new file mode 100644 index 000000000..e152fc043 --- /dev/null +++ b/tests/nodes/conv_transpose_group_2/input_0.cairo @@ -0,0 +1,33 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_group_2/input_1.cairo b/tests/nodes/conv_transpose_group_2/input_1.cairo new file mode 100644 index 000000000..badf32363 --- /dev/null +++ b/tests/nodes/conv_transpose_group_2/input_1.cairo @@ -0,0 +1,33 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_group_2/output_0.cairo b/tests/nodes/conv_transpose_group_2/output_0.cairo new file mode 100644 index 000000000..30561c42f --- /dev/null +++ b/tests/nodes/conv_transpose_group_2/output_0.cairo @@ -0,0 +1,65 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(5); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 2359296, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1966080, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 2883584, sign: false }); + data.append(FP16x16 { mag: 4521984, sign: false }); + data.append(FP16x16 { mag: 3145728, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 2359296, sign: false }); + data.append(FP16x16 { mag: 4915200, sign: false }); + data.append(FP16x16 { mag: 7667712, sign: false }); + data.append(FP16x16 { mag: 5308416, sign: false }); + data.append(FP16x16 { mag: 2752512, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 3670016, sign: false }); + data.append(FP16x16 { mag: 5701632, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 2031616, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 2031616, sign: false }); + data.append(FP16x16 { mag: 3145728, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_group_2_image_3.cairo b/tests/nodes/conv_transpose_group_2_image_3.cairo new file mode 100644 index 000000000..7e4e3e481 --- /dev/null +++ b/tests/nodes/conv_transpose_group_2_image_3.cairo @@ -0,0 +1,34 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_conv_transpose_group_2_image_3() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::conv_transpose( + @input_0, + @input_1, + Option::None, + Option::None, + Option::None, + Option::Some(2), + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/conv_transpose_group_2_image_3/input_0.cairo b/tests/nodes/conv_transpose_group_2_image_3/input_0.cairo new file mode 100644 index 000000000..17e944907 --- /dev/null +++ b/tests/nodes/conv_transpose_group_2_image_3/input_0.cairo @@ -0,0 +1,69 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1703936, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_group_2_image_3/input_1.cairo b/tests/nodes/conv_transpose_group_2_image_3/input_1.cairo new file mode 100644 index 000000000..badf32363 --- /dev/null +++ b/tests/nodes/conv_transpose_group_2_image_3/input_1.cairo @@ -0,0 +1,33 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_transpose_group_2_image_3/output_0.cairo b/tests/nodes/conv_transpose_group_2_image_3/output_0.cairo new file mode 100644 index 000000000..f1ecdfb6b --- /dev/null +++ b/tests/nodes/conv_transpose_group_2_image_3/output_0.cairo @@ -0,0 +1,165 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(5); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 2359296, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1966080, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 2883584, sign: false }); + data.append(FP16x16 { mag: 4521984, sign: false }); + data.append(FP16x16 { mag: 3145728, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 2359296, sign: false }); + data.append(FP16x16 { mag: 4915200, sign: false }); + data.append(FP16x16 { mag: 7667712, sign: false }); + data.append(FP16x16 { mag: 5308416, sign: false }); + data.append(FP16x16 { mag: 2752512, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 3670016, sign: false }); + data.append(FP16x16 { mag: 5701632, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 2031616, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 2031616, sign: false }); + data.append(FP16x16 { mag: 3145728, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 2424832, sign: false }); + data.append(FP16x16 { mag: 3735552, sign: false }); + data.append(FP16x16 { mag: 2555904, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 2555904, sign: false }); + data.append(FP16x16 { mag: 5242880, sign: false }); + data.append(FP16x16 { mag: 8060928, sign: false }); + data.append(FP16x16 { mag: 5505024, sign: false }); + data.append(FP16x16 { mag: 2818048, sign: false }); + data.append(FP16x16 { mag: 4128768, sign: false }); + data.append(FP16x16 { mag: 8454144, sign: false }); + data.append(FP16x16 { mag: 12976128, sign: false }); + data.append(FP16x16 { mag: 8847360, sign: false }); + data.append(FP16x16 { mag: 4521984, sign: false }); + data.append(FP16x16 { mag: 2949120, sign: false }); + data.append(FP16x16 { mag: 6029312, sign: false }); + data.append(FP16x16 { mag: 9240576, sign: false }); + data.append(FP16x16 { mag: 6291456, sign: false }); + data.append(FP16x16 { mag: 3211264, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 3211264, sign: false }); + data.append(FP16x16 { mag: 4915200, sign: false }); + data.append(FP16x16 { mag: 3342336, sign: false }); + data.append(FP16x16 { mag: 1703936, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1966080, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 2883584, sign: false }); + data.append(FP16x16 { mag: 4521984, sign: false }); + data.append(FP16x16 { mag: 3145728, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 2359296, sign: false }); + data.append(FP16x16 { mag: 4915200, sign: false }); + data.append(FP16x16 { mag: 7667712, sign: false }); + data.append(FP16x16 { mag: 5308416, sign: false }); + data.append(FP16x16 { mag: 2752512, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 3670016, sign: false }); + data.append(FP16x16 { mag: 5701632, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 2031616, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 2031616, sign: false }); + data.append(FP16x16 { mag: 3145728, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 2359296, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1966080, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 2883584, sign: false }); + data.append(FP16x16 { mag: 4521984, sign: false }); + data.append(FP16x16 { mag: 3145728, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 2359296, sign: false }); + data.append(FP16x16 { mag: 4915200, sign: false }); + data.append(FP16x16 { mag: 7667712, sign: false }); + data.append(FP16x16 { mag: 5308416, sign: false }); + data.append(FP16x16 { mag: 2752512, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 3670016, sign: false }); + data.append(FP16x16 { mag: 5701632, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 2031616, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 2031616, sign: false }); + data.append(FP16x16 { mag: 3145728, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} From 5fcbaf119fff5248fa1a597380befefffc3ebc7e Mon Sep 17 00:00:00 2001 From: zhangzhichao Date: Mon, 29 Jan 2024 14:47:07 +0800 Subject: [PATCH 15/46] fix: Fixed errors in handling indexes in code fest: Added support for bool and complex64 types test; Added test cases with different dimensions; --- docs/framework/operators/tensor/README.md | 1 + nodegen/node/reverse_sequence.py | 209 ++++++++++++++---- src/operators/tensor/core.cairo | 13 +- .../tensor/implementations/tensor_bool.cairo | 2 +- .../implementations/tensor_complex64.cairo | 2 +- .../manipulation/reverse_sequence.cairo | 85 ++++--- tests/nodes.cairo | 15 ++ ...se_sequence_different_dimensions_1_6.cairo | 20 ++ .../input_0.cairo | 19 ++ .../output_0.cairo | 19 ++ ...se_sequence_different_dimensions_2_4.cairo | 20 ++ .../input_0.cairo | 21 ++ .../output_0.cairo | 21 ++ ...uence_different_dimensions_3x9_batch.cairo | 20 ++ .../input_0.cairo | 40 ++++ .../output_0.cairo | 40 ++++ ...quence_different_dimensions_3x9_time.cairo | 20 ++ .../input_0.cairo | 40 ++++ .../output_0.cairo | 40 ++++ ...se_sequence_different_dimensions_4_5.cairo | 20 ++ .../input_0.cairo | 33 +++ .../output_0.cairo | 33 +++ ...e_sequence_fp16x16_batch_equal_parts.cairo | 21 ++ .../input_0.cairo | 29 +++ .../output_0.cairo | 29 +++ ...se_sequence_fp16x16_time_equal_parts.cairo | 21 ++ .../input_0.cairo | 29 +++ .../output_0.cairo | 29 +++ ...verse_sequence_i32_batch_equal_parts.cairo | 22 ++ .../input_0.cairo | 29 +++ .../output_0.cairo | 29 +++ ...everse_sequence_i32_time_equal_parts.cairo | 22 ++ .../input_0.cairo | 29 +++ .../output_0.cairo | 29 +++ ...everse_sequence_i8_batch_equal_parts.cairo | 21 ++ .../input_0.cairo | 29 +++ .../output_0.cairo | 29 +++ ...reverse_sequence_i8_time_equal_parts.cairo | 22 ++ .../input_0.cairo | 29 +++ .../output_0.cairo | 29 +++ .../reverse_sequence_time_equal_parts.cairo | 20 ++ .../input_0.cairo | 29 +++ .../output_0.cairo | 29 +++ .../reverse_sequence_u32_3x3_batch.cairo | 20 ++ .../input_0.cairo | 22 ++ .../output_0.cairo | 22 ++ .../nodes/reverse_sequence_u32_3x3_time.cairo | 20 ++ .../input_0.cairo | 22 ++ .../output_0.cairo | 22 ++ .../reverse_sequence_u32_4x4_batch.cairo | 20 ++ .../input_0.cairo | 29 +++ .../output_0.cairo | 29 +++ .../nodes/reverse_sequence_u32_4x4_time.cairo | 20 ++ .../input_0.cairo | 29 +++ .../output_0.cairo | 29 +++ 55 files changed, 1481 insertions(+), 91 deletions(-) create mode 100644 tests/nodes/reverse_sequence_different_dimensions_1_6.cairo create mode 100644 tests/nodes/reverse_sequence_different_dimensions_1_6/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_different_dimensions_1_6/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_different_dimensions_2_4.cairo create mode 100644 tests/nodes/reverse_sequence_different_dimensions_2_4/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_different_dimensions_2_4/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_different_dimensions_3x9_batch.cairo create mode 100644 tests/nodes/reverse_sequence_different_dimensions_3x9_batch/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_different_dimensions_3x9_batch/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_different_dimensions_3x9_time.cairo create mode 100644 tests/nodes/reverse_sequence_different_dimensions_3x9_time/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_different_dimensions_3x9_time/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_different_dimensions_4_5.cairo create mode 100644 tests/nodes/reverse_sequence_different_dimensions_4_5/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_different_dimensions_4_5/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_fp16x16_batch_equal_parts.cairo create mode 100644 tests/nodes/reverse_sequence_fp16x16_batch_equal_parts/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_fp16x16_batch_equal_parts/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_fp16x16_time_equal_parts.cairo create mode 100644 tests/nodes/reverse_sequence_fp16x16_time_equal_parts/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_fp16x16_time_equal_parts/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_i32_batch_equal_parts.cairo create mode 100644 tests/nodes/reverse_sequence_i32_batch_equal_parts/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_i32_batch_equal_parts/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_i32_time_equal_parts.cairo create mode 100644 tests/nodes/reverse_sequence_i32_time_equal_parts/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_i32_time_equal_parts/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_i8_batch_equal_parts.cairo create mode 100644 tests/nodes/reverse_sequence_i8_batch_equal_parts/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_i8_batch_equal_parts/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_i8_time_equal_parts.cairo create mode 100644 tests/nodes/reverse_sequence_i8_time_equal_parts/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_i8_time_equal_parts/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_time_equal_parts.cairo create mode 100644 tests/nodes/reverse_sequence_time_equal_parts/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_time_equal_parts/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_u32_3x3_batch.cairo create mode 100644 tests/nodes/reverse_sequence_u32_3x3_batch/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_u32_3x3_batch/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_u32_3x3_time.cairo create mode 100644 tests/nodes/reverse_sequence_u32_3x3_time/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_u32_3x3_time/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_u32_4x4_batch.cairo create mode 100644 tests/nodes/reverse_sequence_u32_4x4_batch/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_u32_4x4_batch/output_0.cairo create mode 100644 tests/nodes/reverse_sequence_u32_4x4_time.cairo create mode 100644 tests/nodes/reverse_sequence_u32_4x4_time/input_0.cairo create mode 100644 tests/nodes/reverse_sequence_u32_4x4_time/output_0.cairo diff --git a/docs/framework/operators/tensor/README.md b/docs/framework/operators/tensor/README.md index 281135f63..aae6d3252 100644 --- a/docs/framework/operators/tensor/README.md +++ b/docs/framework/operators/tensor/README.md @@ -120,6 +120,7 @@ use orion::operators::tensor::TensorTrait; | [`tensor.erf`](tensor.erf.md) | Computes the error function of the given input tensor element-wise. | | [`tensor.layer_normalization`](tensor.layer\_normalization.md) | computes the layer normalization of the input tensor. | | [`tensor.split`](tensor.split.md) | Split a tensor into a list of tensors, along the specified ‘axis’. | +| [`tensor.reverse_sequence`](tensor.reverse\_sequence.md) | Reverse batch of sequences having different lengths specified by sequence_lens. | ## Arithmetic Operations diff --git a/nodegen/node/reverse_sequence.py b/nodegen/node/reverse_sequence.py index f446268b3..ee8a8bbae 100644 --- a/nodegen/node/reverse_sequence.py +++ b/nodegen/node/reverse_sequence.py @@ -6,89 +6,220 @@ class Reverse_sequence(RunAll): @staticmethod def Reverse_sequence_u32(): - def reverse_sequence_2d_batch(): + def reverse_sequence_u32_4x4_batch(): x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], dtype=np.uint32).reshape((4, 4)) y = np.array([0, 1, 2, 3, 5, 4, 6, 7, 10, 9, 8, 11, 15, 14, 13, 12], dtype=np.uint32).reshape((4, 4)) _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = Tensor(Dtype.U32, y.shape, y.flatten()) - name = "reverse_sequence_u32_2d_batch_equal_parts" + name = "reverse_sequence_u32_4x4_batch" - make_test([_x], _y, "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1))", name) + make_test( + [_x], + _y, + "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1))", + name + ) - def reverse_sequence_2d_time(): + def reverse_sequence_u32_4x4_time(): x = np.array([0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15], dtype=np.uint32).reshape((4, 4)) y = np.array([3, 6, 9, 12, 2, 5, 8, 13, 1, 4, 10, 14, 0, 7, 11, 15], dtype=np.uint32).reshape((4, 4)) _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = Tensor(Dtype.U32, y.shape, y.flatten()) - name = "reverse_sequence_u32_2d_time_equal_parts" - make_test([_x], _y, "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0))", name) - reverse_sequence_2d_batch() - reverse_sequence_2d_time() + name = "reverse_sequence_u32_4x4_time" + make_test( + [_x], + _y, + "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0))", + name + ) + def reverse_sequence_u32_3x3_batch(): + x = np.array([0,1,2,3,4,5,6,7,8], dtype=np.uint32).reshape(3,3) + y = np.array([2,1,0,3,4,5,7,6,8], dtype=np.uint32).reshape(3,3) + _x = Tensor(Dtype.U32, x.shape, x.flatten()) + _y = Tensor(Dtype.U32, y.shape, y.flatten()) + name = "reverse_sequence_u32_3x3_batch" + make_test( + [_x], + _y, + "input_0.reverse_sequence(TensorTrait::::new(array![3].span(), array![3,1,2].span()), Option::Some(0), Option::Some(1))", + name + ) + def reverse_sequence_u32_3x3_time(): + x = np.array([0,1,2,3,4,5,6,7,8], dtype=np.uint32).reshape(3,3) + y = np.array([0,7,8,3,4,5,6,1,2], dtype=np.uint32).reshape(3,3) + _x = Tensor(Dtype.U32, x.shape, x.flatten()) + _y = Tensor(Dtype.U32, y.shape, y.flatten()) + name = "reverse_sequence_u32_3x3_time" + make_test( + [_x], + _y, + "input_0.reverse_sequence(TensorTrait::::new(array![3].span(), array![1,3,3].span()), Option::Some(1), Option::Some(0))", + name + ) + + reverse_sequence_u32_4x4_batch() + reverse_sequence_u32_4x4_time() + reverse_sequence_u32_3x3_batch() + reverse_sequence_u32_3x3_time() + @staticmethod def Reverse_sequence_i32(): - def reverse_sequence_2d_batch(): + def reverse_sequence_i32_batch(): x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], dtype=np.int32).reshape((4, 4)) y = np.array([0, 1, 2, 3, 5, 4, 6, 7, 10, 9, 8, 11, 15, 14, 13, 12], dtype=np.int32).reshape((4, 4)) _x = Tensor(Dtype.I32, x.shape, x.flatten()) _y = Tensor(Dtype.I32, y.shape, y.flatten()) - name = "reverse_sequence_i32_2d_batch_equal_parts" + name = "reverse_sequence_i32_batch_equal_parts" - make_test([_x], _y, "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1))", name) + make_test( + [_x], + _y, + "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1))", + name + ) - def reverse_sequence_2d_time(): + def reverse_sequence_i32_time(): x = np.array([0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15], dtype=np.int32).reshape((4, 4)) y = np.array([3, 6, 9, 12, 2, 5, 8, 13, 1, 4, 10, 14, 0, 7, 11, 15], dtype=np.int32).reshape((4, 4)) _x = Tensor(Dtype.I32, x.shape, x.flatten()) _y = Tensor(Dtype.I32, y.shape, y.flatten()) - name = "reverse_sequence_i32_2d_time_equal_parts" - make_test([_x], _y, "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0))", name) - reverse_sequence_2d_batch() - reverse_sequence_2d_time() + name = "reverse_sequence_i32_time_equal_parts" + make_test( + [_x], + _y, + "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0))", + name + ) + + reverse_sequence_i32_batch() + reverse_sequence_i32_time() @staticmethod def Reverse_sequence_i8(): - def reverse_sequence_2d_batch(): + def reverse_sequence_batch(): x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], dtype=np.int8).reshape((4, 4)) y = np.array([0, 1, 2, 3, 5, 4, 6, 7, 10, 9, 8, 11, 15, 14, 13, 12], dtype=np.int8).reshape((4, 4)) _x = Tensor(Dtype.I8, x.shape, x.flatten()) _y = Tensor(Dtype.I8, y.shape, y.flatten()) - name = "reverse_sequence_i8_2d_batch_equal_parts" + name = "reverse_sequence_i8_batch_equal_parts" - make_test([_x], _y, "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1))", name) + make_test( + [_x], + _y, + "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1))", + name + ) - def reverse_sequence_2d_time(): + def reverse_sequence_time(): x = np.array([0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15], dtype=np.uint32).reshape((4, 4)) y = np.array([3, 6, 9, 12, 2, 5, 8, 13, 1, 4, 10, 14, 0, 7, 11, 15], dtype=np.uint32).reshape((4, 4)) _x = Tensor(Dtype.U32, x.shape, x.flatten()) _y = Tensor(Dtype.U32, y.shape, y.flatten()) - name = "reverse_sequence_i8_2d_time_equal_parts" - make_test([_x], _y, "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0))", name) - reverse_sequence_2d_batch() - reverse_sequence_2d_time() + name = "reverse_sequence_i8_time_equal_parts" + make_test( + [_x], + _y, + "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0))", + name + ) + reverse_sequence_batch() + reverse_sequence_time() def Reverse_sequence_fp16x16(): - def reverse_sequence_2d_batch(): + def reverse_sequence_batch(): x = to_fp(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], dtype=np.int64).reshape(4, 4), FixedImpl.FP16x16) y = to_fp(np.array([0, 1, 2, 3, 5, 4, 6, 7, 10, 9, 8, 11, 15, 14, 13, 12], dtype=np.int64).reshape(4, 4), FixedImpl.FP16x16) _x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) _y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) - name = "reverse_sequence_fp16x16_2d_batch_equal_parts" - make_test([_x], _y, "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1))", name) - def reverse_sequence_2d_time(): + name = "reverse_sequence_fp16x16_batch_equal_parts" + make_test( + [_x], + _y, "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1))", + name + ) + def reverse_sequence_time(): x = to_fp(np.array([0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15], dtype=np.int64).reshape(4, 4), FixedImpl.FP16x16) y = to_fp(np.array([3, 6, 9, 12, 2, 5, 8, 13, 1, 4, 10, 14, 0, 7, 11, 15], dtype=np.int64).reshape(4, 4), FixedImpl.FP16x16) _x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) _y = Tensor(Dtype.FP16x16, y.shape, y.flatten()) - name = "reverse_sequence_fp16x16_2d_time_equal_parts" - make_test([_x], _y, "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0))", name) - reverse_sequence_2d_batch() - reverse_sequence_2d_time() + name = "reverse_sequence_fp16x16_time_equal_parts" + make_test( + [_x], + _y, + "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0))", + name + ) + reverse_sequence_batch() + reverse_sequence_time() - def reverse_sequence_zero_size(): - x = np.array([]).astype(np.uint32) - y = np.array([]).astype(np.uint32) - _x = Tensor(Dtype.U32, x.shape, y.flatten()) - _y = Tensor(Dtype.U32, x.shape, y.flatten()) - name = "reverse_sequence_u32_zero_size" - make_test([_x], _y, "input_0.reverse_sequence(TensorTrait::::new(array![].span(), array![].span()), Option::Some(1), Option::Some(0))", name) \ No newline at end of file + def reverse_sequence_different_dimensions(): + def reverse_sequence_different_dimensions_4_5(): + x = np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20], dtype=np.uint32).reshape(4,5) + y = np.array([5,4,3,2,1,9,8,7,6,10,13,12,11,14,15,17,16,18,19,20], dtype=np.uint32).reshape(4,5) + _x = Tensor(Dtype.U32, x.shape, x.flatten()) + _y = Tensor(Dtype.U32, y.shape, y.flatten()) + name = "reverse_sequence_different_dimensions_4_5" + make_test( + [_x], + _y, + "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![5,4,3,2].span()), Option::Some(0), Option::Some(1))", + name + ) + + def reverse_sequence_different_dimensions_2_4(): + x = np.array([1,2,3,4,5,6,7,8], dtype=np.uint32).reshape(2,4) + y = np.array([5,6,7,8,1,2,3,4], dtype=np.uint32).reshape(2,4) + _x = Tensor(Dtype.U32, x.shape, x.flatten()) + _y = Tensor(Dtype.U32, y.shape, y.flatten()) + name = "reverse_sequence_different_dimensions_2_4" + make_test( + [_x], + _y, + "input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![2,2,2,2].span()), Option::Some(1), Option::Some(0))", + name + ) + def reverse_sequence_different_dimensions_1_6(): + x = np.array([0,1,2,3,4,5], dtype=np.uint32).reshape(1,6) + y = np.array([4,3,2,1,0,5], dtype=np.uint32).reshape(1,6) + _x = Tensor(Dtype.U32, x.shape, x.flatten()) + _y = Tensor(Dtype.U32, y.shape, y.flatten()) + name = "reverse_sequence_different_dimensions_1_6" + make_test( + [_x], + _y, + "input_0.reverse_sequence(TensorTrait::::new(array![1].span(), array![5].span()), Option::Some(0), Option::Some(1))", + name + ) + + def reverse_sequence_different_dimensions_3x9_batch(): + x = np.array([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26], dtype=np.uint32).reshape(3,9) + y = np.array([6,5,4,3,2,1,0,7,8,16,15,14,13,12,11,10,9,17,26,25,24,23,22,21,20,19,18], dtype=np.uint32).reshape(3,9) + _x = Tensor(Dtype.U32, x.shape, x.flatten()) + _y = Tensor(Dtype.U32, y.shape, y.flatten()) + name = "reverse_sequence_different_dimensions_3x9_batch" + make_test( + [_x], + _y, + "input_0.reverse_sequence(TensorTrait::::new(array![3].span(), array![7,8,9].span()), Option::Some(0), Option::Some(1))", + name + ) + def reverse_sequence_different_dimensions_3x9_time(): + x = np.array([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26], dtype=np.uint32).reshape(3,9) + y = np.array([18,10,20,12,22,14,24,16,8,9,1,11,3,13,5,15,7,17,0,19,2,21,4,23,6,25,26], dtype=np.uint32).reshape(3,9) + _x = Tensor(Dtype.U32, x.shape, x.flatten()) + _y = Tensor(Dtype.U32, y.shape, y.flatten()) + name = "reverse_sequence_different_dimensions_3x9_time" + make_test( + [_x], + _y, + "input_0.reverse_sequence(TensorTrait::::new(array![9].span(), array![3,2,3,2,3,2,3,2,1].span()), Option::Some(1), Option::Some(0))", + name + ) + + reverse_sequence_different_dimensions_4_5() + reverse_sequence_different_dimensions_2_4() + reverse_sequence_different_dimensions_1_6() + reverse_sequence_different_dimensions_3x9_batch() + reverse_sequence_different_dimensions_3x9_time() + \ No newline at end of file diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index b959db427..9e2d5e3c8 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -5168,8 +5168,8 @@ trait TensorTrait { /// # tensor.reverse_sequence /// /// ```rust - /// fn reverse_sequence(self: @Array>, sequence_lens: @Tensor, batch_axis: Option, time_axis: Option) -> - /// Array>; + /// fn reverse_sequence(self: @Tensor, sequence_lens: @Tensor, batch_axis: Option, time_axis: Option) -> + /// Tensor; /// ``` /// /// Reverse batch of sequences having different lengths specified by sequence_lens. @@ -5201,13 +5201,18 @@ trait TensorTrait { /// 0, 1, 2, 3, 4, 5, 6, 7,8,9,10,11,12,13,14,15,16 /// ].span(), /// ); - /// let sequence_lens = TensorTrait::::new(array![4,4].span(), array![1,2,3,4].span()); + /// let sequence_lens = TensorTrait::::new(array![4].span(), array![1,2,3,4].span()); /// let batch_axis = Option::Some(0); /// let time_axis = Option::Some(1); /// // We can call `split` function as follows. /// return tensor.reverse_sequence(sequence_lens, batch_axis, time_axis); /// } - /// >>> [0,1,2,3,5,4,6,7,10,9,8,11,15,14,13,12] + /// >>> [ + /// [0,1,2,3], + /// [5,4,6,7], + /// [10,9,8,11], + /// [15,14,13,12] + /// ] /// ``` /// fn reverse_sequence( diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index 0ab62f3bb..78ee94085 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -488,7 +488,7 @@ impl BoolTensor of TensorTrait { fn reverse_sequence( self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option ) -> Tensor { - panic(array!['not supported!']) + manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 77cdca826..5d4dd6dc7 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -501,7 +501,7 @@ impl Complex64Tensor of TensorTrait { fn reverse_sequence( self: @Tensor, sequence_lens: Tensor, batch_axis: Option, time_axis: Option ) -> Tensor { - panic(array!['not supported!']) + manipulation::reverse_sequence::reverse_sequence(self, sequence_lens, batch_axis, time_axis) } fn resize( diff --git a/src/operators/tensor/manipulation/reverse_sequence.cairo b/src/operators/tensor/manipulation/reverse_sequence.cairo index 5bfde0d60..e59485ffd 100644 --- a/src/operators/tensor/manipulation/reverse_sequence.cairo +++ b/src/operators/tensor/manipulation/reverse_sequence.cairo @@ -15,46 +15,41 @@ fn reverse_sequence< ) -> Tensor{ let shape = *self.shape; let mut data: Array = array![]; - if (*self.data).len() == 0 { - data = ArrayTrait::::new(); - } else { - let has_batch_axis: usize = match batch_axis { + + let has_batch_axis: usize = match batch_axis { + Option::Some(value) => { + assert!((value != 0) || (value != 1), "batch_axis must be one of 1 or 0."); + value + }, + Option::None => 0, + }; + let has_time_axis: usize = match time_axis { Option::Some(value) => { - assert!((value != 0) || (value != 1), "batch_axis must be one of 1 or 0."); + assert!((value != 0) || (value != 1), "time_axis must be one of 1 or 0."); value }, - Option::None => 0, - }; - let has_time_axis: usize = match time_axis { - Option::Some(value) => { - assert!((value != 0) || (value != 1), "time_axis must be one of 1 or 0."); - value - }, - Option::None => 1, - }; - assert!(has_batch_axis != has_time_axis, "batch_axis and time_axis cannot be equal"); - - let control: bool = if has_batch_axis == 0 && has_time_axis == 1 { - true - } else { - false - }; + Option::None => 1, + }; + assert!(has_batch_axis != has_time_axis, "batch_axis and time_axis cannot be equal"); + assert!((*self.data).len() >= 2, "Tensor of rank r >= 2"); + let control: bool = if has_batch_axis == 0 && has_time_axis == 1 { + true + } else { + false + }; - let mut index: Array = reverse_index(*self.shape, sequence_lens, control); - // let shape = self.shape; - // let mut data = ArrayTrait::::new(); - loop { - match index.pop_front() { - Option::Some(ele) => { - data.append(*((*self).data).at(ele)); - }, - Option::None(_) => { - break; - } + let mut index: Array = reverse_index(*self.shape, sequence_lens, control); + loop { + match index.pop_front() { + Option::Some(ele) => { + data.append(*((*self).data).at(ele)); + }, + Option::None(_) => { + break; } - }; - } - + } + }; + TensorTrait::::new(shape, data.span()) } @@ -62,12 +57,12 @@ fn reverse_sequence< fn reverse_index( shape: Span, sequence_lens: Tensor, control: bool ) -> Array { - let mut result = ArrayTrait::::new(); let x: usize = *shape.at(0); let y: usize = *shape.at(1); + let mut result = ArrayTrait::::new(); if control { - //[i, slice] + // [i, slice] assert!(sequence_lens.data.len() <= x,"The length of sequence_lens cannot exceed batch_axis"); let mut i: usize = 0; loop { @@ -75,7 +70,7 @@ fn reverse_index( break; } - let reverse: usize = *(sequence_lens.data).at(i); + let reverse: usize = (*sequence_lens.data.at(i)); assert!(reverse <= y && reverse >= 1, "sequence_lens must be greater than one and less than batch_size"); let mut j: usize = reverse - 1; loop { @@ -87,7 +82,6 @@ fn reverse_index( result.append(i * y + j); j -= 1; }; - let current_index_len: usize = (i + 1) * y - 1; let mut j: usize = result.len(); loop { @@ -105,10 +99,10 @@ fn reverse_index( let mut tmp = ArrayTrait::::new(); let mut i: usize = 0; loop { - if i >= y { + if i > y - 1 { break; } - let reverse: usize = *(sequence_lens.data).at(i); + let reverse: usize = *sequence_lens.data.at(i); assert!(reverse <= x && reverse >= 1, "sequence_lens must be greater than one and less than batch_size"); let mut j: usize = reverse - 1; @@ -120,7 +114,6 @@ fn reverse_index( tmp.append(j * y + i); j -= 1; }; - let mut j: usize = reverse; loop { if j > x - 1 { @@ -134,19 +127,19 @@ fn reverse_index( let tmp = tmp.span(); let mut i : usize = 0; loop { - if i >= x { + if i > x - 1 { break; } let mut j: usize = 0; loop { - if j >= y { + if j > y - 1 { break; } - result.append(*tmp.at(j * y + i)); + result.append((*tmp.at(j * x + i))); j += 1; }; i += 1; - } + }; } result } \ No newline at end of file diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 6c70b42cb..8c5745a92 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -936,3 +936,18 @@ mod split_fp16x16_2d_variable_parts; mod split_fp16x16_zero_size; mod split_fp16x16_1d_uneven; mod split_fp16x16_2d_uneven; +mod reverse_sequence_fp16x16_batch_equal_parts; +mod reverse_sequence_fp16x16_time_equal_parts; +mod reverse_sequence_i32_batch_equal_parts; +mod reverse_sequence_i32_time_equal_parts; +mod reverse_sequence_i8_batch_equal_parts; +mod reverse_sequence_i8_time_equal_parts; +mod reverse_sequence_u32_4x4_batch; +mod reverse_sequence_u32_4x4_time; +mod reverse_sequence_u32_3x3_batch; +mod reverse_sequence_u32_3x3_time; +mod reverse_sequence_different_dimensions_4_5; +mod reverse_sequence_different_dimensions_2_4; +mod reverse_sequence_different_dimensions_1_6; +mod reverse_sequence_different_dimensions_3x9_batch; +mod reverse_sequence_different_dimensions_3x9_time; diff --git a/tests/nodes/reverse_sequence_different_dimensions_1_6.cairo b/tests/nodes/reverse_sequence_different_dimensions_1_6.cairo new file mode 100644 index 000000000..1ae01e30b --- /dev/null +++ b/tests/nodes/reverse_sequence_different_dimensions_1_6.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_different_dimensions_1_6() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![1].span(), array![5].span()), Option::Some(0), Option::Some(1)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_different_dimensions_1_6/input_0.cairo b/tests/nodes/reverse_sequence_different_dimensions_1_6/input_0.cairo new file mode 100644 index 000000000..4a105c04a --- /dev/null +++ b/tests/nodes/reverse_sequence_different_dimensions_1_6/input_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_different_dimensions_1_6/output_0.cairo b/tests/nodes/reverse_sequence_different_dimensions_1_6/output_0.cairo new file mode 100644 index 000000000..ad5eaa48f --- /dev/null +++ b/tests/nodes/reverse_sequence_different_dimensions_1_6/output_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(4); + data.append(3); + data.append(2); + data.append(1); + data.append(0); + data.append(5); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_different_dimensions_2_4.cairo b/tests/nodes/reverse_sequence_different_dimensions_2_4.cairo new file mode 100644 index 000000000..3fc2a4d28 --- /dev/null +++ b/tests/nodes/reverse_sequence_different_dimensions_2_4.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_different_dimensions_2_4() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![2,2,2,2].span()), Option::Some(1), Option::Some(0)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_different_dimensions_2_4/input_0.cairo b/tests/nodes/reverse_sequence_different_dimensions_2_4/input_0.cairo new file mode 100644 index 000000000..05ef8a7a4 --- /dev/null +++ b/tests/nodes/reverse_sequence_different_dimensions_2_4/input_0.cairo @@ -0,0 +1,21 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_different_dimensions_2_4/output_0.cairo b/tests/nodes/reverse_sequence_different_dimensions_2_4/output_0.cairo new file mode 100644 index 000000000..75097ac3b --- /dev/null +++ b/tests/nodes/reverse_sequence_different_dimensions_2_4/output_0.cairo @@ -0,0 +1,21 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_different_dimensions_3x9_batch.cairo b/tests/nodes/reverse_sequence_different_dimensions_3x9_batch.cairo new file mode 100644 index 000000000..254ade4de --- /dev/null +++ b/tests/nodes/reverse_sequence_different_dimensions_3x9_batch.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_different_dimensions_3x9_batch() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![3].span(), array![7,8,9].span()), Option::Some(0), Option::Some(1)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_different_dimensions_3x9_batch/input_0.cairo b/tests/nodes/reverse_sequence_different_dimensions_3x9_batch/input_0.cairo new file mode 100644 index 000000000..cebccf12f --- /dev/null +++ b/tests/nodes/reverse_sequence_different_dimensions_3x9_batch/input_0.cairo @@ -0,0 +1,40 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(9); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + data.append(12); + data.append(13); + data.append(14); + data.append(15); + data.append(16); + data.append(17); + data.append(18); + data.append(19); + data.append(20); + data.append(21); + data.append(22); + data.append(23); + data.append(24); + data.append(25); + data.append(26); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_different_dimensions_3x9_batch/output_0.cairo b/tests/nodes/reverse_sequence_different_dimensions_3x9_batch/output_0.cairo new file mode 100644 index 000000000..afce94d7e --- /dev/null +++ b/tests/nodes/reverse_sequence_different_dimensions_3x9_batch/output_0.cairo @@ -0,0 +1,40 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(9); + + let mut data = ArrayTrait::new(); + data.append(6); + data.append(5); + data.append(4); + data.append(3); + data.append(2); + data.append(1); + data.append(0); + data.append(7); + data.append(8); + data.append(16); + data.append(15); + data.append(14); + data.append(13); + data.append(12); + data.append(11); + data.append(10); + data.append(9); + data.append(17); + data.append(26); + data.append(25); + data.append(24); + data.append(23); + data.append(22); + data.append(21); + data.append(20); + data.append(19); + data.append(18); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_different_dimensions_3x9_time.cairo b/tests/nodes/reverse_sequence_different_dimensions_3x9_time.cairo new file mode 100644 index 000000000..aa8667bdd --- /dev/null +++ b/tests/nodes/reverse_sequence_different_dimensions_3x9_time.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_different_dimensions_3x9_time() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![9].span(), array![3,2,3,2,3,2,3,2,1].span()), Option::Some(1), Option::Some(0)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_different_dimensions_3x9_time/input_0.cairo b/tests/nodes/reverse_sequence_different_dimensions_3x9_time/input_0.cairo new file mode 100644 index 000000000..cebccf12f --- /dev/null +++ b/tests/nodes/reverse_sequence_different_dimensions_3x9_time/input_0.cairo @@ -0,0 +1,40 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(9); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + data.append(12); + data.append(13); + data.append(14); + data.append(15); + data.append(16); + data.append(17); + data.append(18); + data.append(19); + data.append(20); + data.append(21); + data.append(22); + data.append(23); + data.append(24); + data.append(25); + data.append(26); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_different_dimensions_3x9_time/output_0.cairo b/tests/nodes/reverse_sequence_different_dimensions_3x9_time/output_0.cairo new file mode 100644 index 000000000..9a5d7bbc6 --- /dev/null +++ b/tests/nodes/reverse_sequence_different_dimensions_3x9_time/output_0.cairo @@ -0,0 +1,40 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(9); + + let mut data = ArrayTrait::new(); + data.append(18); + data.append(10); + data.append(20); + data.append(12); + data.append(22); + data.append(14); + data.append(24); + data.append(16); + data.append(8); + data.append(9); + data.append(1); + data.append(11); + data.append(3); + data.append(13); + data.append(5); + data.append(15); + data.append(7); + data.append(17); + data.append(0); + data.append(19); + data.append(2); + data.append(21); + data.append(4); + data.append(23); + data.append(6); + data.append(25); + data.append(26); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_different_dimensions_4_5.cairo b/tests/nodes/reverse_sequence_different_dimensions_4_5.cairo new file mode 100644 index 000000000..053f187e1 --- /dev/null +++ b/tests/nodes/reverse_sequence_different_dimensions_4_5.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_different_dimensions_4_5() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![5,4,3,2].span()), Option::Some(0), Option::Some(1)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_different_dimensions_4_5/input_0.cairo b/tests/nodes/reverse_sequence_different_dimensions_4_5/input_0.cairo new file mode 100644 index 000000000..51a2a6f1b --- /dev/null +++ b/tests/nodes/reverse_sequence_different_dimensions_4_5/input_0.cairo @@ -0,0 +1,33 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + data.append(12); + data.append(13); + data.append(14); + data.append(15); + data.append(16); + data.append(17); + data.append(18); + data.append(19); + data.append(20); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_different_dimensions_4_5/output_0.cairo b/tests/nodes/reverse_sequence_different_dimensions_4_5/output_0.cairo new file mode 100644 index 000000000..677e80193 --- /dev/null +++ b/tests/nodes/reverse_sequence_different_dimensions_4_5/output_0.cairo @@ -0,0 +1,33 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(5); + data.append(4); + data.append(3); + data.append(2); + data.append(1); + data.append(9); + data.append(8); + data.append(7); + data.append(6); + data.append(10); + data.append(13); + data.append(12); + data.append(11); + data.append(14); + data.append(15); + data.append(17); + data.append(16); + data.append(18); + data.append(19); + data.append(20); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_fp16x16_batch_equal_parts.cairo b/tests/nodes/reverse_sequence_fp16x16_batch_equal_parts.cairo new file mode 100644 index 000000000..c1bcdbc6a --- /dev/null +++ b/tests/nodes/reverse_sequence_fp16x16_batch_equal_parts.cairo @@ -0,0 +1,21 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_fp16x16_batch_equal_parts() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_fp16x16_batch_equal_parts/input_0.cairo b/tests/nodes/reverse_sequence_fp16x16_batch_equal_parts/input_0.cairo new file mode 100644 index 000000000..ec0ca8b8b --- /dev/null +++ b/tests/nodes/reverse_sequence_fp16x16_batch_equal_parts/input_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_fp16x16_batch_equal_parts/output_0.cairo b/tests/nodes/reverse_sequence_fp16x16_batch_equal_parts/output_0.cairo new file mode 100644 index 000000000..f5f2cb955 --- /dev/null +++ b/tests/nodes/reverse_sequence_fp16x16_batch_equal_parts/output_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_fp16x16_time_equal_parts.cairo b/tests/nodes/reverse_sequence_fp16x16_time_equal_parts.cairo new file mode 100644 index 000000000..c2f27748e --- /dev/null +++ b/tests/nodes/reverse_sequence_fp16x16_time_equal_parts.cairo @@ -0,0 +1,21 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_fp16x16_time_equal_parts() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_fp16x16_time_equal_parts/input_0.cairo b/tests/nodes/reverse_sequence_fp16x16_time_equal_parts/input_0.cairo new file mode 100644 index 000000000..086b8c60d --- /dev/null +++ b/tests/nodes/reverse_sequence_fp16x16_time_equal_parts/input_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_fp16x16_time_equal_parts/output_0.cairo b/tests/nodes/reverse_sequence_fp16x16_time_equal_parts/output_0.cairo new file mode 100644 index 000000000..c37a284e8 --- /dev/null +++ b/tests/nodes/reverse_sequence_fp16x16_time_equal_parts/output_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_i32_batch_equal_parts.cairo b/tests/nodes/reverse_sequence_i32_batch_equal_parts.cairo new file mode 100644 index 000000000..86f1855e4 --- /dev/null +++ b/tests/nodes/reverse_sequence_i32_batch_equal_parts.cairo @@ -0,0 +1,22 @@ +mod input_0; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; + + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_i32_batch_equal_parts() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_i32_batch_equal_parts/input_0.cairo b/tests/nodes/reverse_sequence_i32_batch_equal_parts/input_0.cairo new file mode 100644 index 000000000..7b06d05fb --- /dev/null +++ b/tests/nodes/reverse_sequence_i32_batch_equal_parts/input_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + data.append(12); + data.append(13); + data.append(14); + data.append(15); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_i32_batch_equal_parts/output_0.cairo b/tests/nodes/reverse_sequence_i32_batch_equal_parts/output_0.cairo new file mode 100644 index 000000000..079e647fe --- /dev/null +++ b/tests/nodes/reverse_sequence_i32_batch_equal_parts/output_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(5); + data.append(4); + data.append(6); + data.append(7); + data.append(10); + data.append(9); + data.append(8); + data.append(11); + data.append(15); + data.append(14); + data.append(13); + data.append(12); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_i32_time_equal_parts.cairo b/tests/nodes/reverse_sequence_i32_time_equal_parts.cairo new file mode 100644 index 000000000..28c9d0a84 --- /dev/null +++ b/tests/nodes/reverse_sequence_i32_time_equal_parts.cairo @@ -0,0 +1,22 @@ +mod input_0; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; + + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_i32_time_equal_parts() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_i32_time_equal_parts/input_0.cairo b/tests/nodes/reverse_sequence_i32_time_equal_parts/input_0.cairo new file mode 100644 index 000000000..70982f3c2 --- /dev/null +++ b/tests/nodes/reverse_sequence_i32_time_equal_parts/input_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(4); + data.append(8); + data.append(12); + data.append(1); + data.append(5); + data.append(9); + data.append(13); + data.append(2); + data.append(6); + data.append(10); + data.append(14); + data.append(3); + data.append(7); + data.append(11); + data.append(15); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_i32_time_equal_parts/output_0.cairo b/tests/nodes/reverse_sequence_i32_time_equal_parts/output_0.cairo new file mode 100644 index 000000000..cd044452e --- /dev/null +++ b/tests/nodes/reverse_sequence_i32_time_equal_parts/output_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(3); + data.append(6); + data.append(9); + data.append(12); + data.append(2); + data.append(5); + data.append(8); + data.append(13); + data.append(1); + data.append(4); + data.append(10); + data.append(14); + data.append(0); + data.append(7); + data.append(11); + data.append(15); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_i8_batch_equal_parts.cairo b/tests/nodes/reverse_sequence_i8_batch_equal_parts.cairo new file mode 100644 index 000000000..9b5afdecf --- /dev/null +++ b/tests/nodes/reverse_sequence_i8_batch_equal_parts.cairo @@ -0,0 +1,21 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::I8TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_i8_batch_equal_parts() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_i8_batch_equal_parts/input_0.cairo b/tests/nodes/reverse_sequence_i8_batch_equal_parts/input_0.cairo new file mode 100644 index 000000000..692195bec --- /dev/null +++ b/tests/nodes/reverse_sequence_i8_batch_equal_parts/input_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + data.append(12); + data.append(13); + data.append(14); + data.append(15); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_i8_batch_equal_parts/output_0.cairo b/tests/nodes/reverse_sequence_i8_batch_equal_parts/output_0.cairo new file mode 100644 index 000000000..d33c71d65 --- /dev/null +++ b/tests/nodes/reverse_sequence_i8_batch_equal_parts/output_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(5); + data.append(4); + data.append(6); + data.append(7); + data.append(10); + data.append(9); + data.append(8); + data.append(11); + data.append(15); + data.append(14); + data.append(13); + data.append(12); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_i8_time_equal_parts.cairo b/tests/nodes/reverse_sequence_i8_time_equal_parts.cairo new file mode 100644 index 000000000..a803ef02e --- /dev/null +++ b/tests/nodes/reverse_sequence_i8_time_equal_parts.cairo @@ -0,0 +1,22 @@ +mod input_0; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::I8TensorPartialEq; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; + + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_i8_time_equal_parts() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_i8_time_equal_parts/input_0.cairo b/tests/nodes/reverse_sequence_i8_time_equal_parts/input_0.cairo new file mode 100644 index 000000000..a8d22c5de --- /dev/null +++ b/tests/nodes/reverse_sequence_i8_time_equal_parts/input_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(4); + data.append(8); + data.append(12); + data.append(1); + data.append(5); + data.append(9); + data.append(13); + data.append(2); + data.append(6); + data.append(10); + data.append(14); + data.append(3); + data.append(7); + data.append(11); + data.append(15); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_i8_time_equal_parts/output_0.cairo b/tests/nodes/reverse_sequence_i8_time_equal_parts/output_0.cairo new file mode 100644 index 000000000..eea151ef2 --- /dev/null +++ b/tests/nodes/reverse_sequence_i8_time_equal_parts/output_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(3); + data.append(6); + data.append(9); + data.append(12); + data.append(2); + data.append(5); + data.append(8); + data.append(13); + data.append(1); + data.append(4); + data.append(10); + data.append(14); + data.append(0); + data.append(7); + data.append(11); + data.append(15); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_time_equal_parts.cairo b/tests/nodes/reverse_sequence_time_equal_parts.cairo new file mode 100644 index 000000000..a79efe4af --- /dev/null +++ b/tests/nodes/reverse_sequence_time_equal_parts.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_time_equal_parts() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_time_equal_parts/input_0.cairo b/tests/nodes/reverse_sequence_time_equal_parts/input_0.cairo new file mode 100644 index 000000000..785e8463d --- /dev/null +++ b/tests/nodes/reverse_sequence_time_equal_parts/input_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(4); + data.append(8); + data.append(12); + data.append(1); + data.append(5); + data.append(9); + data.append(13); + data.append(2); + data.append(6); + data.append(10); + data.append(14); + data.append(3); + data.append(7); + data.append(11); + data.append(15); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_time_equal_parts/output_0.cairo b/tests/nodes/reverse_sequence_time_equal_parts/output_0.cairo new file mode 100644 index 000000000..c08ed0421 --- /dev/null +++ b/tests/nodes/reverse_sequence_time_equal_parts/output_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(3); + data.append(6); + data.append(9); + data.append(12); + data.append(2); + data.append(5); + data.append(8); + data.append(13); + data.append(1); + data.append(4); + data.append(10); + data.append(14); + data.append(0); + data.append(7); + data.append(11); + data.append(15); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_u32_3x3_batch.cairo b/tests/nodes/reverse_sequence_u32_3x3_batch.cairo new file mode 100644 index 000000000..e8ff9ca9d --- /dev/null +++ b/tests/nodes/reverse_sequence_u32_3x3_batch.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_u32_3x3_batch() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![3].span(), array![3,1,2].span()), Option::Some(0), Option::Some(1)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_u32_3x3_batch/input_0.cairo b/tests/nodes/reverse_sequence_u32_3x3_batch/input_0.cairo new file mode 100644 index 000000000..c8ef4315d --- /dev/null +++ b/tests/nodes/reverse_sequence_u32_3x3_batch/input_0.cairo @@ -0,0 +1,22 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_u32_3x3_batch/output_0.cairo b/tests/nodes/reverse_sequence_u32_3x3_batch/output_0.cairo new file mode 100644 index 000000000..3f51b2c76 --- /dev/null +++ b/tests/nodes/reverse_sequence_u32_3x3_batch/output_0.cairo @@ -0,0 +1,22 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(2); + data.append(1); + data.append(0); + data.append(3); + data.append(4); + data.append(5); + data.append(7); + data.append(6); + data.append(8); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_u32_3x3_time.cairo b/tests/nodes/reverse_sequence_u32_3x3_time.cairo new file mode 100644 index 000000000..ac2b62361 --- /dev/null +++ b/tests/nodes/reverse_sequence_u32_3x3_time.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_u32_3x3_time() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![3].span(), array![1,3,3].span()), Option::Some(1), Option::Some(0)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_u32_3x3_time/input_0.cairo b/tests/nodes/reverse_sequence_u32_3x3_time/input_0.cairo new file mode 100644 index 000000000..c8ef4315d --- /dev/null +++ b/tests/nodes/reverse_sequence_u32_3x3_time/input_0.cairo @@ -0,0 +1,22 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_u32_3x3_time/output_0.cairo b/tests/nodes/reverse_sequence_u32_3x3_time/output_0.cairo new file mode 100644 index 000000000..8da82b624 --- /dev/null +++ b/tests/nodes/reverse_sequence_u32_3x3_time/output_0.cairo @@ -0,0 +1,22 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(7); + data.append(8); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(1); + data.append(2); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_u32_4x4_batch.cairo b/tests/nodes/reverse_sequence_u32_4x4_batch.cairo new file mode 100644 index 000000000..ce124a89e --- /dev/null +++ b/tests/nodes/reverse_sequence_u32_4x4_batch.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_u32_4x4_batch() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![1,2,3,4].span()), Option::Some(0), Option::Some(1)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_u32_4x4_batch/input_0.cairo b/tests/nodes/reverse_sequence_u32_4x4_batch/input_0.cairo new file mode 100644 index 000000000..eec3be47b --- /dev/null +++ b/tests/nodes/reverse_sequence_u32_4x4_batch/input_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + data.append(12); + data.append(13); + data.append(14); + data.append(15); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_u32_4x4_batch/output_0.cairo b/tests/nodes/reverse_sequence_u32_4x4_batch/output_0.cairo new file mode 100644 index 000000000..da45dba14 --- /dev/null +++ b/tests/nodes/reverse_sequence_u32_4x4_batch/output_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + data.append(5); + data.append(4); + data.append(6); + data.append(7); + data.append(10); + data.append(9); + data.append(8); + data.append(11); + data.append(15); + data.append(14); + data.append(13); + data.append(12); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_u32_4x4_time.cairo b/tests/nodes/reverse_sequence_u32_4x4_time.cairo new file mode 100644 index 000000000..a0d9ca889 --- /dev/null +++ b/tests/nodes/reverse_sequence_u32_4x4_time.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reverse_sequence_u32_4x4_time() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reverse_sequence(TensorTrait::::new(array![4].span(), array![4,3,2,1].span()), Option::Some(1), Option::Some(0)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reverse_sequence_u32_4x4_time/input_0.cairo b/tests/nodes/reverse_sequence_u32_4x4_time/input_0.cairo new file mode 100644 index 000000000..785e8463d --- /dev/null +++ b/tests/nodes/reverse_sequence_u32_4x4_time/input_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(4); + data.append(8); + data.append(12); + data.append(1); + data.append(5); + data.append(9); + data.append(13); + data.append(2); + data.append(6); + data.append(10); + data.append(14); + data.append(3); + data.append(7); + data.append(11); + data.append(15); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reverse_sequence_u32_4x4_time/output_0.cairo b/tests/nodes/reverse_sequence_u32_4x4_time/output_0.cairo new file mode 100644 index 000000000..c08ed0421 --- /dev/null +++ b/tests/nodes/reverse_sequence_u32_4x4_time/output_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(3); + data.append(6); + data.append(9); + data.append(12); + data.append(2); + data.append(5); + data.append(8); + data.append(13); + data.append(1); + data.append(4); + data.append(10); + data.append(14); + data.append(0); + data.append(7); + data.append(11); + data.append(15); + TensorTrait::new(shape.span(), data.span()) +} From f3ff905d3dad14b8cd4f7014f98e438758ebedc4 Mon Sep 17 00:00:00 2001 From: tekkac Date: Mon, 29 Jan 2024 20:43:07 +0100 Subject: [PATCH 16/46] =?UTF-8?q?=E2=9C=8F=EF=B8=8F=20fix=20typos?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 2 +- .../tutorials/verifiable-linear-regression-model-in-orion.md | 4 ++-- .../tutorials/verifiable-principal-components-analysis.md | 2 +- docs/academy/tutorials/verifiable-support-vector-machine.md | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 1f48201d3..d9b231393 100644 --- a/README.md +++ b/README.md @@ -47,7 +47,7 @@ Join the community and help build a safer and transparent AI in our [Discord](ht ## 🚀 Orion Usage - For an insightful overview of impressive proof of concepts, models, and tutorials created by our community, please visit [Orion Usage](https://github.com/gizatechxyz/orion/blob/main/orion-usage.md). -- Discover a currated list of tutorials and models developed using Orion in [Orion-Hub](https://github.com/gizatechxyz/Orion-Hub). +- Discover a curated list of tutorials and models developed using Orion in [Orion-Hub](https://github.com/gizatechxyz/Orion-Hub). ## ✍️ Authors & contributors diff --git a/docs/academy/tutorials/verifiable-linear-regression-model-in-orion.md b/docs/academy/tutorials/verifiable-linear-regression-model-in-orion.md index d5bb802ea..ecf81e4c4 100644 --- a/docs/academy/tutorials/verifiable-linear-regression-model-in-orion.md +++ b/docs/academy/tutorials/verifiable-linear-regression-model-in-orion.md @@ -24,7 +24,7 @@ $$ #### Generating the dataset -In the following [notebook](https://github.com/gizatechxyz/orion\_tutorials/tree/main/verifiable\_linear\_regression\_model), we will create a synthetic dataset that will serve as the backbone throughout our tutorial. +In the following [notebook](https://github.com/gizatechxyz/orion\_tutorials/tree/main/basic/verifiable\_linear\_regression\_model), we will create a synthetic dataset that will serve as the backbone throughout our tutorial. ```python import numpy as np @@ -152,7 +152,7 @@ test = "scarb cairo-test -f linear_regression_test" ``` -#### Gerating the dataset in Cairo +#### Generating the dataset in Cairo Now let’s generate the files required to begin our transition to Cairo. In our Jupyter Notebook, we will execute the code required to turn our synthetic dataset to fixed point values and represent our X and y values as Fixedpoint Tensors in Orion. diff --git a/docs/academy/tutorials/verifiable-principal-components-analysis.md b/docs/academy/tutorials/verifiable-principal-components-analysis.md index 683b4ee3e..5a692686f 100644 --- a/docs/academy/tutorials/verifiable-principal-components-analysis.md +++ b/docs/academy/tutorials/verifiable-principal-components-analysis.md @@ -302,7 +302,7 @@ version = "0.1.0" orion = { git = "https://github.com/gizatechxyz/orion.git", rev = "v0.1.7" } ``` -#### Gerating the dataset in Cairo +#### Generating the dataset in Cairo Now let's generate the necessary files to begin our transition to Cairo. In our Jupyter Notebook, we will run the necessary code to convert our iris dataset obtained from sklearn.datasets into fixed point values and represent our X, and y values as fixed point tensors in Orion. For the purposes of the tutorial, we will work directly with the Xstd data obtained from python, so we will also convert these to fixed point values. diff --git a/docs/academy/tutorials/verifiable-support-vector-machine.md b/docs/academy/tutorials/verifiable-support-vector-machine.md index 6986539f1..faffedec0 100644 --- a/docs/academy/tutorials/verifiable-support-vector-machine.md +++ b/docs/academy/tutorials/verifiable-support-vector-machine.md @@ -3,7 +3,7 @@
{% hint style="info" %} -Repository and Notebooks can be found [here](https://github.com/gizatechxyz/orion\_tutorials/tree/main/verifiable\_support\_vector\_machine). +Repository and Notebooks can be found [here](https://github.com/gizatechxyz/orion_tutorials/tree/main/basic/verifiable_support_vector_machine). {% endhint %} The Support Vector Machines (SVM) model is a supervised learning technique used for classification and regression. It is employed to solve binary classification problems where it identifies the hyperplane that best divides a data set into classes. This hyperplane results from maximizing the margin between the two classes. By determining this optimal hyperplane, predictions can be made for new data points and understand how the input attributes influence classification. @@ -177,7 +177,7 @@ version = "0.1.0" orion = { git = "https://github.com/gizatechxyz/orion.git", rev = "v0.1.0" } ``` -### Gerating the dataset in Cairo +### Generating the dataset in Cairo Now let's generate the necessary files to begin our transition to Cairo. In our Jupyter Notebook, we'll run the necessary code to convert our dataset obtained with make\_blobs from Scikit-learn into fixed-point values and represent our X\_train, y\_train, X\_test, and y\_test values as fixed-point tensors in Orion. From cf8921fb6d273ac093da88f2f6b3fdcabf177356 Mon Sep 17 00:00:00 2001 From: zhangzhichao Date: Thu, 1 Feb 2024 17:22:09 +0800 Subject: [PATCH 17/46] =?UTF-8?q?feat:=20Implement=20SplitToSequence=20ope?= =?UTF-8?q?rator=EF=BC=9B=20fixed=20split=20docs,=20add=20split=20in=20ten?= =?UTF-8?q?sor=20and=20tensor?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../operators/tensor/tensor.split.md | 2 +- .../tensor/tensor.split_to_sequence.md | 50 +++ nodegen/node/split_to_sequence.py | 348 ++++++++++++++++++ src/operators/tensor/core.cairo | 57 ++- .../tensor/implementations/tensor_bool.cairo | 8 +- .../implementations/tensor_complex64.cairo | 8 +- .../implementations/tensor_fp16x16.cairo | 6 + .../implementations/tensor_fp16x16wide.cairo | 6 + .../implementations/tensor_fp32x32.cairo | 6 + .../implementations/tensor_fp64x64.cairo | 6 + .../implementations/tensor_fp8x23.cairo | 9 + .../implementations/tensor_fp8x23wide.cairo | 6 + .../tensor/implementations/tensor_i32.cairo | 6 + .../tensor/implementations/tensor_i8.cairo | 6 + .../tensor/implementations/tensor_u32.cairo | 6 + src/operators/tensor/manipulation.cairo | 1 + src/operators/tensor/manipulation/split.cairo | 8 +- .../manipulation/split_to_sequence.cairo | 231 ++++++++++++ tests/nodes.cairo | 16 + .../split_to_sequence_1d_nokeepdims.cairo | 20 + .../input_0.cairo | 20 + .../output_0.cairo | 74 ++++ .../split_to_sequence_2d_nokeepdims.cairo | 20 + .../input_0.cairo | 29 ++ .../output_0.cairo | 90 +++++ tests/nodes/split_to_sequence_2d_scalar.cairo | 20 + .../split_to_sequence_2d_scalar/input_0.cairo | 29 ++ .../output_0.cairo | 90 +++++ ...t_to_sequence_fp16x16_1d_equal_parts.cairo | 20 + .../input_0.cairo | 18 + .../output_0.cairo | 37 ++ .../split_to_sequence_fp16x16_1d_uneven.cairo | 20 + .../input_0.cairo | 19 + .../output_0.cairo | 45 +++ ...o_sequence_fp16x16_1d_variable_parts.cairo | 20 + .../input_0.cairo | 18 + .../output_0.cairo | 30 ++ ...t_to_sequence_fp16x16_2d_equal_parts.cairo | 20 + .../input_0.cairo | 25 ++ .../output_0.cairo | 38 ++ .../split_to_sequence_fp16x16_2d_uneven.cairo | 20 + .../input_0.cairo | 29 ++ .../output_0.cairo | 50 +++ ...o_sequence_fp16x16_2d_variable_parts.cairo | 20 + .../input_0.cairo | 25 ++ .../output_0.cairo | 38 ++ .../split_to_sequence_fp16x16_zero_size.cairo | 20 + .../input_0.cairo | 12 + .../output_0.cairo | 31 ++ ...split_to_sequence_u32_1d_equal_parts.cairo | 20 + .../input_0.cairo | 18 + .../output_0.cairo | 37 ++ .../split_to_sequence_u32_1d_uneven.cairo | 20 + .../input_0.cairo | 19 + .../output_0.cairo | 45 +++ ...it_to_sequence_u32_1d_variable_parts.cairo | 20 + .../input_0.cairo | 18 + .../output_0.cairo | 30 ++ ...split_to_sequence_u32_2d_equal_parts.cairo | 20 + .../input_0.cairo | 25 ++ .../output_0.cairo | 38 ++ .../split_to_sequence_u32_2d_uneven.cairo | 20 + .../input_0.cairo | 29 ++ .../output_0.cairo | 50 +++ ...it_to_sequence_u32_2d_variable_parts.cairo | 20 + .../input_0.cairo | 25 ++ .../output_0.cairo | 38 ++ .../split_to_sequence_u32_zero_size.cairo | 20 + .../input_0.cairo | 12 + .../output_0.cairo | 31 ++ 70 files changed, 2278 insertions(+), 10 deletions(-) create mode 100644 docs/framework/operators/tensor/tensor.split_to_sequence.md create mode 100644 nodegen/node/split_to_sequence.py create mode 100644 src/operators/tensor/manipulation/split_to_sequence.cairo create mode 100644 tests/nodes/split_to_sequence_1d_nokeepdims.cairo create mode 100644 tests/nodes/split_to_sequence_1d_nokeepdims/input_0.cairo create mode 100644 tests/nodes/split_to_sequence_1d_nokeepdims/output_0.cairo create mode 100644 tests/nodes/split_to_sequence_2d_nokeepdims.cairo create mode 100644 tests/nodes/split_to_sequence_2d_nokeepdims/input_0.cairo create mode 100644 tests/nodes/split_to_sequence_2d_nokeepdims/output_0.cairo create mode 100644 tests/nodes/split_to_sequence_2d_scalar.cairo create mode 100644 tests/nodes/split_to_sequence_2d_scalar/input_0.cairo create mode 100644 tests/nodes/split_to_sequence_2d_scalar/output_0.cairo create mode 100644 tests/nodes/split_to_sequence_fp16x16_1d_equal_parts.cairo create mode 100644 tests/nodes/split_to_sequence_fp16x16_1d_equal_parts/input_0.cairo create mode 100644 tests/nodes/split_to_sequence_fp16x16_1d_equal_parts/output_0.cairo create mode 100644 tests/nodes/split_to_sequence_fp16x16_1d_uneven.cairo create mode 100644 tests/nodes/split_to_sequence_fp16x16_1d_uneven/input_0.cairo create mode 100644 tests/nodes/split_to_sequence_fp16x16_1d_uneven/output_0.cairo create mode 100644 tests/nodes/split_to_sequence_fp16x16_1d_variable_parts.cairo create mode 100644 tests/nodes/split_to_sequence_fp16x16_1d_variable_parts/input_0.cairo create mode 100644 tests/nodes/split_to_sequence_fp16x16_1d_variable_parts/output_0.cairo create mode 100644 tests/nodes/split_to_sequence_fp16x16_2d_equal_parts.cairo create mode 100644 tests/nodes/split_to_sequence_fp16x16_2d_equal_parts/input_0.cairo create mode 100644 tests/nodes/split_to_sequence_fp16x16_2d_equal_parts/output_0.cairo create mode 100644 tests/nodes/split_to_sequence_fp16x16_2d_uneven.cairo create mode 100644 tests/nodes/split_to_sequence_fp16x16_2d_uneven/input_0.cairo create mode 100644 tests/nodes/split_to_sequence_fp16x16_2d_uneven/output_0.cairo create mode 100644 tests/nodes/split_to_sequence_fp16x16_2d_variable_parts.cairo create mode 100644 tests/nodes/split_to_sequence_fp16x16_2d_variable_parts/input_0.cairo create mode 100644 tests/nodes/split_to_sequence_fp16x16_2d_variable_parts/output_0.cairo create mode 100644 tests/nodes/split_to_sequence_fp16x16_zero_size.cairo create mode 100644 tests/nodes/split_to_sequence_fp16x16_zero_size/input_0.cairo create mode 100644 tests/nodes/split_to_sequence_fp16x16_zero_size/output_0.cairo create mode 100644 tests/nodes/split_to_sequence_u32_1d_equal_parts.cairo create mode 100644 tests/nodes/split_to_sequence_u32_1d_equal_parts/input_0.cairo create mode 100644 tests/nodes/split_to_sequence_u32_1d_equal_parts/output_0.cairo create mode 100644 tests/nodes/split_to_sequence_u32_1d_uneven.cairo create mode 100644 tests/nodes/split_to_sequence_u32_1d_uneven/input_0.cairo create mode 100644 tests/nodes/split_to_sequence_u32_1d_uneven/output_0.cairo create mode 100644 tests/nodes/split_to_sequence_u32_1d_variable_parts.cairo create mode 100644 tests/nodes/split_to_sequence_u32_1d_variable_parts/input_0.cairo create mode 100644 tests/nodes/split_to_sequence_u32_1d_variable_parts/output_0.cairo create mode 100644 tests/nodes/split_to_sequence_u32_2d_equal_parts.cairo create mode 100644 tests/nodes/split_to_sequence_u32_2d_equal_parts/input_0.cairo create mode 100644 tests/nodes/split_to_sequence_u32_2d_equal_parts/output_0.cairo create mode 100644 tests/nodes/split_to_sequence_u32_2d_uneven.cairo create mode 100644 tests/nodes/split_to_sequence_u32_2d_uneven/input_0.cairo create mode 100644 tests/nodes/split_to_sequence_u32_2d_uneven/output_0.cairo create mode 100644 tests/nodes/split_to_sequence_u32_2d_variable_parts.cairo create mode 100644 tests/nodes/split_to_sequence_u32_2d_variable_parts/input_0.cairo create mode 100644 tests/nodes/split_to_sequence_u32_2d_variable_parts/output_0.cairo create mode 100644 tests/nodes/split_to_sequence_u32_zero_size.cairo create mode 100644 tests/nodes/split_to_sequence_u32_zero_size/input_0.cairo create mode 100644 tests/nodes/split_to_sequence_u32_zero_size/output_0.cairo diff --git a/docs/framework/operators/tensor/tensor.split.md b/docs/framework/operators/tensor/tensor.split.md index 26b4a546f..0bb7c87af 100644 --- a/docs/framework/operators/tensor/tensor.split.md +++ b/docs/framework/operators/tensor/tensor.split.md @@ -40,7 +40,7 @@ fn split_tensor_example() -> Array> { // split = Option::Some(array![1, 1].span()); let split_num: Option> = Option::None(()); // We can call `split` function as follows. - return tensor.split(0, num_outputs, split_num); + return tensor.split(1, num_outputs, split_num); } >>> [[0,1],[4,5]] [[2,3],[6,7]] diff --git a/docs/framework/operators/tensor/tensor.split_to_sequence.md b/docs/framework/operators/tensor/tensor.split_to_sequence.md new file mode 100644 index 000000000..110039f5b --- /dev/null +++ b/docs/framework/operators/tensor/tensor.split_to_sequence.md @@ -0,0 +1,50 @@ +# tensor.split_to_sequence + +```rust + fn split_to_sequence( + self: @Tensor, axis: usize, keepdims: usize, split: Option> + ) -> Array>; +``` + +Split a tensor into a sequence of tensors, along the specified ‘axis’ + + +## Args +* `self`(`@Tensor`) - The input tensor to split. +* `axis`(`usize`) - The axis along which to split on. +* `keepdims `(`usize`) - Keep the split dimension or not. If input ‘split’ is specified, this attribute is ignored. +* `split `(`Option>`) - Length of each output. It can be either a scalar(tensor of empty shape), or a 1-D tensor. All values must be >= 0. + +## Panics + +* Panics if the 'axis' accepted range is not [-rank, rank-1] where r = rank(input). +* Panics if the 'split' is not either a scalar (tensor of empty shape), or a 1-D tensor. + +## Returns + +One or more outputs forming a sequence of tensors after splitting. + +## Examples + +```rust +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; +use core::option::OptionTrait; +fn split_to_sequence_example() -> Array> { + let tensor: Tensor = TensorTrait::::new( + shape: array![2,4].span(), + data: array![ + 0, 1, 2, 3, 4, 5, 6, 7 + ].span(), + ); + let num_outputs = Option::Some(2); + // let split = Option::Some(TensorTrait::new(array![1].span(), array![2].span())); + let split: Option> = Option::Some(TensorTrait::new(array![2].span(), array![2, 2].span())); + // We can call `split_to_sequence` function as follows. + return tensor.split_to_sequence(1, 1, split); +} +>>> [ + [[0,1],[4,5]], + [[2,3],[6,7]] + ] +``` diff --git a/nodegen/node/split_to_sequence.py b/nodegen/node/split_to_sequence.py new file mode 100644 index 000000000..73abc0222 --- /dev/null +++ b/nodegen/node/split_to_sequence.py @@ -0,0 +1,348 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl + + +class Split_to_sequence(RunAll): + @staticmethod + def split_to_sequence_u32(): + def split_to_sequence_1D(): + x = np.random.randint(0, 255, 6).astype(np.uint32) + y = [ + np.array(x[0:2]).astype(np.uint32), + np.array(x[2:4]).astype(np.uint32), + np.array(x[4:6]).astype(np.uint32), + ] + + _x = Tensor(Dtype.U32, x.shape, x.flatten()) + _y = [ + Tensor(Dtype.U32, y[0].shape, y[0].flatten()), + Tensor(Dtype.U32, y[1].shape, y[1].flatten()), + Tensor(Dtype.U32, y[2].shape, y[2].flatten()), + ] + + name = "split_to_sequence_u32_1d_equal_parts" + make_test( + [_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),)))", name) + y = [ + np.array(x[0:2]).astype(np.uint32), + np.array(x[2:6]).astype(np.uint32), + ] + _y = [ + Tensor(Dtype.U32, y[0].shape, y[0].flatten()), + Tensor(Dtype.U32, y[1].shape, y[1].flatten()), + ] + name = "split_to_sequence_u32_1d_variable_parts" + make_test( + [_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),)))", name) + def split_to_sequence_2D(): + x = np.random.randint(0, 255, (2, 6)).astype(np.uint32) + y = [ + np.array(x[0:2, 0:3]).astype(np.uint32), + np.array(x[0:2, 3:6]).astype(np.uint32), + ] + _x = Tensor(Dtype.U32, x.shape, x.flatten()) + _y = [ + Tensor(Dtype.U32, y[0].shape, y[0].flatten()), + Tensor(Dtype.U32, y[1].shape, y[1].flatten()), + ] + name = "split_to_sequence_u32_2d_equal_parts" + make_test( + [_x], _y, "input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![2].span(),)))", name) + + y = [ + np.array(x[0:2, 0:2]).astype(np.uint32), + np.array(x[0:2, 2:6]).astype(np.uint32) + ] + _y = [ + Tensor(Dtype.U32, y[0].shape, y[0].flatten()), + Tensor(Dtype.U32, y[1].shape, y[1].flatten()), + ] + name = "split_to_sequence_u32_2d_variable_parts" + make_test( + [_x], _y, "input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),)))", name) + + def split_to_sequence_zero_size(): + # 1-dimensional tensor with dimension_size=0 + x = np.array([]).astype(np.uint32) + y = [ + np.array([]).astype(np.uint32), + np.array([]).astype(np.uint32), + np.array([]).astype(np.uint32), + ] + _x = Tensor(Dtype.U32, x.shape, x.flatten()) + _y = [ + Tensor(Dtype.U32, y[0].shape, y[0].flatten()), + Tensor(Dtype.U32, y[1].shape, y[1].flatten()), + Tensor(Dtype.U32, y[2].shape, y[2].flatten()), + ] + # Split emtpy tensor to tensors of size zero + name = "split_to_sequence_u32_zero_size" + make_test( + [_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![3].span(), data: array![0, 0, 0].span(),)))", name) + + + def split_to_sequence_1d_uneven(): + x = np.random.randint(0, 255, 7).astype(np.uint32) + y = [ + np.array(x[0:2]).astype(np.uint32), + np.array(x[2:4]).astype(np.uint32), + np.array(x[4:6]).astype(np.uint32), + np.array(x[6:7]).astype(np.uint32), + ] + + _x = Tensor(Dtype.U32, x.shape, x.flatten()) + _y = [ + Tensor(Dtype.U32, y[0].shape, y[0].flatten()), + Tensor(Dtype.U32, y[1].shape, y[1].flatten()), + Tensor(Dtype.U32, y[2].shape, y[2].flatten()), + Tensor(Dtype.U32, y[3].shape, y[3].flatten()), + ] + + name = "split_to_sequence_u32_1d_uneven" + make_test( + [_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![4].span(),)))", name) + + + def split_to_sequence_2d_uneven(): + x = np.random.randint(0, 255, (2, 8)).astype(np.uint32) + y = [ + np.array(x[0:2, 0:3]).astype(np.uint32), + np.array(x[0:2, 3:6]).astype(np.uint32), + np.array(x[0:2, 6:8]).astype(np.uint32) + ] + _x = Tensor(Dtype.U32, x.shape, x.flatten()) + _y = [ + Tensor(Dtype.U32, y[0].shape, y[0].flatten()), + Tensor(Dtype.U32, y[1].shape, y[1].flatten()), + Tensor(Dtype.U32, y[2].shape, y[2].flatten()), + ] + + name = "split_to_sequence_u32_2d_uneven" + make_test( + [_x], _y, "input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),)))", name) + + def split_to_sequence_2d_scalar(): + x = np.random.randint(0, 255, (2, 8)).astype(np.uint32) + y = [ + np.array(x[0:2, 0:1]).astype(np.uint32), + np.array(x[0:2, 1:2]).astype(np.uint32), + np.array(x[0:2, 2:3]).astype(np.uint32), + np.array(x[0:2, 3:4]).astype(np.uint32), + np.array(x[0:2, 4:5]).astype(np.uint32), + np.array(x[0:2, 5:6]).astype(np.uint32), + np.array(x[0:2, 6:7]).astype(np.uint32), + np.array(x[0:2, 7:8]).astype(np.uint32) + ] + _x = Tensor(Dtype.U32, x.shape, x.flatten()) + _y = [ + Tensor(Dtype.U32, y[0].shape, y[0].flatten()), + Tensor(Dtype.U32, y[1].shape, y[1].flatten()), + Tensor(Dtype.U32, y[2].shape, y[2].flatten()), + Tensor(Dtype.U32, y[3].shape, y[3].flatten()), + Tensor(Dtype.U32, y[4].shape, y[4].flatten()), + Tensor(Dtype.U32, y[5].shape, y[5].flatten()), + Tensor(Dtype.U32, y[6].shape, y[6].flatten()), + Tensor(Dtype.U32, y[7].shape, y[7].flatten()), + ] + + name = "split_to_sequence_2d_scalar" + make_test( + [_x], _y, "input_0.split_to_sequence(1, 1, Option::None(()))", name) + + def split_to_sequence_2d_nokeepdims(): + x = np.random.randint(0, 255, (2, 8)).astype(np.uint32) + y = [ + np.array(x[0:2, 0:1]).astype(np.uint32), + np.array(x[0:2, 1:2]).astype(np.uint32), + np.array(x[0:2, 2:3]).astype(np.uint32), + np.array(x[0:2, 3:4]).astype(np.uint32), + np.array(x[0:2, 4:5]).astype(np.uint32), + np.array(x[0:2, 5:6]).astype(np.uint32), + np.array(x[0:2, 6:7]).astype(np.uint32), + np.array(x[0:2, 7:8]).astype(np.uint32) + ] + _x = Tensor(Dtype.U32, x.shape, x.flatten()) + _y = [ + Tensor(Dtype.U32, y[0].shape, y[0].flatten()), + Tensor(Dtype.U32, y[1].shape, y[1].flatten()), + Tensor(Dtype.U32, y[2].shape, y[2].flatten()), + Tensor(Dtype.U32, y[3].shape, y[3].flatten()), + Tensor(Dtype.U32, y[4].shape, y[4].flatten()), + Tensor(Dtype.U32, y[5].shape, y[5].flatten()), + Tensor(Dtype.U32, y[6].shape, y[6].flatten()), + Tensor(Dtype.U32, y[7].shape, y[7].flatten()), + ] + + name = "split_to_sequence_2d_nokeepdims" + make_test( + [_x], _y, "input_0.split_to_sequence(1, 0, Option::None(()))", name) + + def split_to_sequence_1d_nokeepdims(): + x = np.random.randint(0, 255, 8).astype(np.uint32) + y = [ + np.array(x[0:1]).astype(np.uint32), + np.array(x[1:2]).astype(np.uint32), + np.array(x[2:3]).astype(np.uint32), + np.array(x[3:4]).astype(np.uint32), + np.array(x[4:5]).astype(np.uint32), + np.array(x[5:6]).astype(np.uint32), + np.array(x[6:7]).astype(np.uint32), + np.array(x[7:8]).astype(np.uint32) + ] + _x = Tensor(Dtype.U32, x.shape, x.flatten()) + _y = [ + Tensor(Dtype.U32, y[0].shape, y[0].flatten()), + Tensor(Dtype.U32, y[1].shape, y[1].flatten()), + Tensor(Dtype.U32, y[2].shape, y[2].flatten()), + Tensor(Dtype.U32, y[3].shape, y[3].flatten()), + Tensor(Dtype.U32, y[4].shape, y[4].flatten()), + Tensor(Dtype.U32, y[5].shape, y[5].flatten()), + Tensor(Dtype.U32, y[6].shape, y[6].flatten()), + Tensor(Dtype.U32, y[7].shape, y[7].flatten()), + ] + + name = "split_to_sequence_1d_nokeepdims" + make_test( + [_x], _y, "input_0.split_to_sequence(0, 0, Option::None(()))", name) + + + split_to_sequence_1D() + split_to_sequence_2D() + split_to_sequence_zero_size() + split_to_sequence_1d_uneven() + split_to_sequence_2d_uneven() + split_to_sequence_2d_scalar() + split_to_sequence_1d_nokeepdims() + split_to_sequence_2d_nokeepdims() + + @staticmethod + def split_to_sequence_fp16x16(): + def split_to_sequence_1D(): + x = to_fp(np.random.randint(-127, 127, 6 + ).astype(np.int64), FixedImpl.FP16x16) + y = [ + np.array(x[0:2]).astype(np.int64), + np.array(x[2:4]).astype(np.int64), + np.array(x[4:6]).astype(np.int64), + ] + + _x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) + _y = [ + Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()), + Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()), + Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()), + ] + + name = "split_to_sequence_fp16x16_1d_equal_parts" + make_test( + [_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),)))", name) + y = [ + np.array(x[0:2]).astype(np.int64), + np.array(x[2:6]).astype(np.int64), + ] + _y = [ + Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()), + Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()), + ] + name = "split_to_sequence_fp16x16_1d_variable_parts" + make_test( + [_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),)))", name) + def split_to_sequence_2D(): + x = to_fp(np.random.randint(-127, 127, (2, 6) + ).astype(np.int64), FixedImpl.FP16x16) + y = [ + np.array(x[0:2, 0:3]).astype(np.int64), + np.array(x[0:2, 3:6]).astype(np.int64), + ] + _x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) + _y = [ + Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()), + Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()), + ] + name = "split_to_sequence_fp16x16_2d_equal_parts" + make_test( + [_x], _y, "input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![2].span(),)))", name) + + y = [ + np.array(x[0:2, 0:2]).astype(np.int64), + np.array(x[0:2, 2:6]).astype(np.int64) + ] + _y = [ + Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()), + Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()), + ] + name = "split_to_sequence_fp16x16_2d_variable_parts" + make_test( + [_x], _y, "input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),)))", name) + + def split_to_sequence_zero_size(): + # 1-dimensional tensor with dimension_size=0 + x = to_fp(np.array([]).astype(np.int64 + ).astype(np.int64), FixedImpl.FP16x16) + y = [ + np.array([]).astype(np.int64), + np.array([]).astype(np.int64), + np.array([]).astype(np.int64), + ] + _x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) + _y = [ + Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()), + Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()), + Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()), + ] + # Split emtpy tensor to tensors of size zero + name = "split_to_sequence_fp16x16_zero_size" + make_test( + [_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![3].span(), data: array![0, 0, 0].span(),)))", name) + + + def split_to_sequence_1d_uneven(): + x = to_fp(np.random.randint(-127, 127, 7 + ).astype(np.int64), FixedImpl.FP16x16) + y = [ + np.array(x[0:2]).astype(np.int64), + np.array(x[2:4]).astype(np.int64), + np.array(x[4:6]).astype(np.int64), + np.array(x[6:7]).astype(np.int64), + ] + + _x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) + _y = [ + Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()), + Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()), + Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()), + Tensor(Dtype.FP16x16, y[3].shape, y[3].flatten()), + ] + + name = "split_to_sequence_fp16x16_1d_uneven" + make_test( + [_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![4].span())))", name) + + + def split_to_sequence_2d_uneven(): + x = to_fp(np.random.randint(-127, 127, (2, 8) + ).astype(np.int64), FixedImpl.FP16x16) + y = [ + np.array(x[0:2, 0:3]).astype(np.int64), + np.array(x[0:2, 3:6]).astype(np.int64), + np.array(x[0:2, 6:8]).astype(np.int64) + ] + _x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) + _y = [ + Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()), + Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()), + Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()), + ] + + name = "split_to_sequence_fp16x16_2d_uneven" + make_test( + [_x], _y, "input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),)))", name) + + + split_to_sequence_1D() + split_to_sequence_2D() + split_to_sequence_zero_size() + split_to_sequence_1d_uneven() + split_to_sequence_2d_uneven() + \ No newline at end of file diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 70344eb97..0f631e39c 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -5153,7 +5153,7 @@ trait TensorTrait { /// // split = Option::Some(array![1, 1].span()); /// let split_num: Option> = Option::None(()); /// // We can call `split` function as follows. - /// return tensor.split(0, num_outputs, split_num); + /// return tensor.split(1, num_outputs, split_num); /// } /// >>> [[0,1],[4,5]] /// [[2,3],[6,7]] @@ -5162,6 +5162,61 @@ trait TensorTrait { fn split( self: @Tensor, axis: usize, num_outputs: Option, spl: Option> ) -> Array>; + + /// # tensor.split_to_sequence + /// + /// ```rust + /// fn split_to_sequence( + /// self: @Tensor, axis: usize, keepdims: usize, split: Option> + /// ) -> Array>; + /// ``` + /// + /// Split a tensor into a sequence of tensors, along the specified ‘axis’ + /// + /// + /// ## Args + /// * `self`(`@Tensor`) - The input tensor to split. + /// * `axis`(`usize`) - The axis along which to split on. + /// * `keepdims `(`usize`) - Keep the split dimension or not. If input ‘split’ is specified, this attribute is ignored. + /// * `split `(`Option>`) - Length of each output. It can be either a scalar(tensor of empty shape), or a 1-D tensor. All values must be >= 0. + /// + /// ## Panics + /// + /// * Panics if the 'axis' accepted range is not [-rank, rank-1] where r = rank(input). + /// * Panics if the 'split' is not either a scalar (tensor of empty shape), or a 1-D tensor. + /// + /// ## Returns + /// + /// One or more outputs forming a sequence of tensors after splitting. + /// + /// ## Examples + /// + /// ```rust + /// use core::array::{ArrayTrait, SpanTrait}; + /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; + /// use core::option::OptionTrait; + /// fn split_to_sequence_example() -> Array> { + /// let tensor: Tensor = TensorTrait::::new( + /// shape: array![2,4].span(), + /// data: array![ + /// 0, 1, 2, 3, 4, 5, 6, 7 + /// ].span(), + /// ); + /// let num_outputs = Option::Some(2); + /// // let split = Option::Some(TensorTrait::new(array![1].span(), array![2].span())); + /// let split: Option> = Option::Some(TensorTrait::new(array![2].span(), array![2, 2].span())); + /// // We can call `split_to_sequence` function as follows. + /// return tensor.split_to_sequence(1, 1, split); + /// } + /// >>> [ + /// [[0,1],[4,5]], + /// [[2,3],[6,7]] + /// ] + /// ``` + /// + fn split_to_sequence( + self: @Tensor, axis: usize, keepdims: usize, split: Option> + ) -> Array>; } /// Cf: TensorTrait::new docstring diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index 3da518ec8..18187cd36 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -482,7 +482,13 @@ impl BoolTensor of TensorTrait { fn split( self: @Tensor, axis: usize, num_outputs: Option, spl: Option> ) -> Array> { - panic(array!['not supported!']) + manipulation::split::split(self, axis, num_outputs, spl) + } + + fn split_to_sequence( + self: @Tensor, axis: usize, keepdims: usize, split: Option> + ) -> Array> { + manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) } } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 74acba5c6..e71060b4a 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -495,7 +495,7 @@ impl Complex64Tensor of TensorTrait { num_outputs: Option, spl: Option> ) -> Array> { - panic(array!['not supported!']) + manipulation::split::split(self, axis, num_outputs, spl) } fn resize( @@ -515,6 +515,12 @@ impl Complex64Tensor of TensorTrait { ) -> Tensor { panic(array!['not supported!']) } + + fn split_to_sequence( + self: @Tensor, axis: usize, keepdims: usize, split: Option> + ) -> Array> { + manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index cdc50bc4f..36fdb2e5d 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -560,6 +560,12 @@ impl FP16x16Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn split_to_sequence( + self: @Tensor, axis: usize, keepdims: usize, split: Option> + ) -> Array> { + manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index b0dc2d858..c7feb675b 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -512,6 +512,12 @@ impl FP16x16WTensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn split_to_sequence( + self: @Tensor, axis: usize, keepdims: usize, split: Option> + ) -> Array> { + manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 4f862fd0e..e64372fd0 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -561,6 +561,12 @@ impl FP32x32Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn split_to_sequence( + self: @Tensor, axis: usize, keepdims: usize, split: Option> + ) -> Array> { + manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 1fe5591fc..84cdb64d7 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -561,6 +561,12 @@ impl FP64x64Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn split_to_sequence( + self: @Tensor, axis: usize, keepdims: usize, split: Option> + ) -> Array> { + manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index 77d183c21..8fde81335 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -559,6 +559,15 @@ impl FP8x23Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn split_to_sequence( + self: @Tensor, + axis: usize, + keepdims: usize, + split: Option> + ) -> Array> { + manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index ff6069087..c2a9c9ece 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -498,6 +498,12 @@ impl FP8x23WTensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn split_to_sequence( + self: @Tensor, axis: usize, keepdims: usize, split: Option> + ) -> Array> { + manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 50383d2df..51e626b30 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -541,6 +541,12 @@ impl I32Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn split_to_sequence( + self: @Tensor, axis: usize, keepdims: usize, split: Option> + ) -> Array> { + manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 7e81d90eb..406a70e88 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -539,6 +539,12 @@ impl I8Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn split_to_sequence( + self: @Tensor, axis: usize, keepdims: usize, split: Option> + ) -> Array> { + manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index 5a926a538..0ba971c75 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -482,6 +482,12 @@ impl U32Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn split_to_sequence( + self: @Tensor, axis: usize, keepdims: usize, split: Option> + ) -> Array> { + manipulation::split_to_sequence::split_to_sequence(self, axis, keepdims, split) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/manipulation.cairo b/src/operators/tensor/manipulation.cairo index 584eae027..ea387b7f3 100644 --- a/src/operators/tensor/manipulation.cairo +++ b/src/operators/tensor/manipulation.cairo @@ -1,2 +1,3 @@ mod unique; mod split; +mod split_to_sequence; diff --git a/src/operators/tensor/manipulation/split.cairo b/src/operators/tensor/manipulation/split.cairo index bf0274aec..6ebbe59ef 100644 --- a/src/operators/tensor/manipulation/split.cairo +++ b/src/operators/tensor/manipulation/split.cairo @@ -9,10 +9,6 @@ fn split< +Copy, +Drop, +TensorTrait, - +PartialOrd, - +PartialEq, - +PartialEq>, - +PartialOrd> >( self: @Tensor, axis: usize, num_outputs: Option, split: Option> ) -> Array> { @@ -43,7 +39,7 @@ fn split< /// Subfunction split for tensors (wth num_outputs). /// Cf: TensorTrait::split docstring -fn split_num_outputs, +Drop, +TensorTrait, +PartialOrd, +PartialEq,>( +fn split_num_outputs, +Drop, +TensorTrait,>( t: @Tensor, mut axis: usize, num_outputs: usize ) -> Array> { let mut splited_t: Array> = array![]; @@ -130,7 +126,7 @@ fn split_num_outputs, +Drop, +TensorTrait, +PartialOrd, +Pa /// Subfunction split for tensors (wth split). /// Cf: TensorTrait::split docstring -fn split_has_split, +Drop, +TensorTrait, +PartialOrd, +PartialEq,>( +fn split_has_split, +Drop, +TensorTrait,>( t: @Tensor, axis: usize, split: Tensor ) -> Array> { let mut splited_t: Array> = array![]; diff --git a/src/operators/tensor/manipulation/split_to_sequence.cairo b/src/operators/tensor/manipulation/split_to_sequence.cairo new file mode 100644 index 000000000..53f0f07bc --- /dev/null +++ b/src/operators/tensor/manipulation/split_to_sequence.cairo @@ -0,0 +1,231 @@ +use orion::operators::tensor::{Tensor, TensorTrait, U32Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use core::option::OptionTrait; +use orion::operators::matrix::{MutMatrixTrait, MutMatrix, MutMatrixImpl}; + +/// Cf: NNTrait::split docstring +fn split_to_sequence< + T, + +Copy, + +Drop, + +TensorTrait, +>( + self: @Tensor, axis: usize, keepdims: usize, split: Option> +) -> Array> { + let has_split = match split { + Option::Some(value) => { true }, + Option::None => false, + }; + let mut has_num_outputs = false; + let mut split_unwrap: Tensor = TensorTrait::new(array![1].span(), array![1].span()); + + if (!has_split){ + let split_length = *(*self.shape).at(axis); + let mut split_data: Array = array![]; + let mut i = 0; + loop{ + if (i >= split_length) { + break; + } + split_data.append(1); + i += 1; + }; + split_unwrap = TensorTrait::new(array![split_length].span(), split_data.span()); + }else if (split.unwrap().data.len() == 1 && *(split.unwrap().shape.at(0)) == 1) { + // A scalar + has_num_outputs = true; + split_unwrap = split.unwrap(); + }else{ + split_unwrap = split.unwrap(); + } + + + let mut splited_t: Array> = array![]; + + let rank = (*self).shape.len(); + // assert(axis < rank && axis > -rank, 'axis out of dimensions'); + assert(axis < rank, 'axis out of dimensions'); + + if (has_num_outputs) { + splited_t = split_num_outputs(self, axis, *(split_unwrap.data).at(0)); + } else { + splited_t = split_has_split(self, axis, split_unwrap); + } + + if (keepdims==0 && has_split==false) { + let mut splited_t_temp: Array> = array![]; + let mut i = 0; + loop{ + if (i >= splited_t.len()) { + break; + } + let mut shape: Array = array![]; + let mut j = 0; + let shape_in_splited: Span = *splited_t.at(i).shape; + loop{ + if ( j >= shape_in_splited.len()) { + break; + } + if (j!=axis) { + shape.append(*shape_in_splited.at(j)) + } + j += 1; + }; + splited_t_temp.append(splited_t[i].reshape(shape.span())); + i += 1; + }; + return splited_t_temp; + } + splited_t +} + + +/// Subfunction split for tensors (wth num_outputs). +/// Cf: TensorTrait::split docstring +fn split_num_outputs< + T, + +Copy, + +Drop, + +TensorTrait, +>( + t: @Tensor, mut axis: usize, num_outputs: usize +) -> Array> { + let mut splited_t: Array> = array![]; + let mut div: usize = 0; + // consturct split array + let mut split: Array = array![]; + // if axis==0 { + // axis = 1; + // } + if (*(*t).shape.at(axis) % num_outputs == 0) { + div = *(*t).shape.at(axis) / num_outputs; + let mut i = 0; + loop { + if (i >= num_outputs) { + break; + } + split.append(div); + i += 1; + }; + } else { + div = *(*t).shape.at(axis) / num_outputs + 1; + let mut i = 0; + loop { + if (i >= num_outputs) { + break; + } + split.append(div); + i += 1; + }; + match split.pop_front() { + Option::Some(split_last_one) => { + split.append(split_last_one + *(*t).shape.at(axis) - div * (num_outputs - 1)); + }, + Option::None(_) => { assert(false, 'split is none array'); } + } + } + + let mut sli: MutMatrix = MutMatrixImpl::new((*t).shape.len(), 2); + let mut pos: usize = 0; + let mut i = 0; + loop { + if (i >= (*t).shape.len()) { + break; + } + let s: usize = *(*t).shape.at(i); + sli.set(i, 0, 0); + sli.set(i, 1, s); + i += 1; + }; + let mut i: usize = 0; + loop { + if (i >= split.len()) { + break; + } + let spl = *split.at(i); + sli.set(axis, 0, pos); + pos += spl; + sli.set(axis, 1, pos); + + let end_ele_0 = match sli.get(axis, 0) { + Option::Some(res) => { res }, + Option::None(_) => { + assert(false, 'Get end_ele_0 is failed'); + 0 + }, + }; + let end_ele_1 = match sli.get(axis, 1) { + Option::Some(res) => { res }, + Option::None(_) => { + assert(false, 'Get end_ele_0 is failed'); + 0 + }, + }; + let starts: Span = array![sli.get(0, 0).unwrap(), end_ele_0].span(); + let ends: Span = array![sli.get(0, 1).unwrap(), end_ele_1].span(); + let axes: Option> = Option::None(()); + let steps: Option> = Option::None(()); + let sub_t: Tensor = t.slice(starts, ends, axes, steps); + splited_t.append(sub_t); + i += 1; + }; + splited_t +} + +/// Subfunction split for tensors (wth split). +/// Cf: TensorTrait::split docstring +fn split_has_split< + T, + +Copy, + +Drop, + +TensorTrait, +>( + t: @Tensor, axis: usize, split: Tensor +) -> Array> { + let mut splited_t: Array> = array![]; + let mut sli: MutMatrix = MutMatrixImpl::new((*t).shape.len(), 2); + let mut pos: usize = 0; + let mut i = 0; + loop { + if (i >= (*t).shape.len()) { + break; + } + let s: usize = *(*t).shape.at(i); + sli.set(i, 0, 0); + sli.set(i, 1, s); + i += 1; + }; + let mut i: usize = 0; + loop { + if (i >= split.data.len()) { + break; + } + let spl: usize = split.at(indices: array![i].span()); + sli.set(axis, 0, pos); + pos += spl; + sli.set(axis, 1, pos); + + let end_ele_0 = match sli.get(axis, 0) { + Option::Some(res) => { res }, + Option::None(_) => { + assert(false, 'Get end_ele_0 is failed'); + 0 + }, + }; + let end_ele_1 = match sli.get(axis, 1) { + Option::Some(res) => { res }, + Option::None(_) => { + assert(false, 'Get end_ele_0 is failed'); + 0 + }, + }; + let starts: Span = array![sli.get(0, 0).unwrap(), end_ele_0].span(); + let ends: Span = array![sli.get(0, 1).unwrap(), end_ele_1].span(); + let axes: Option> = Option::None(()); + let steps: Option> = Option::None(()); + let sub_t: Tensor = t.slice(starts, ends, axes, steps); + splited_t.append(sub_t); + i += 1; + }; + splited_t +} diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 6c70b42cb..5ed0c17b7 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -936,3 +936,19 @@ mod split_fp16x16_2d_variable_parts; mod split_fp16x16_zero_size; mod split_fp16x16_1d_uneven; mod split_fp16x16_2d_uneven; +mod split_to_sequence_fp16x16_1d_equal_parts; +mod split_to_sequence_fp16x16_1d_variable_parts; +mod split_to_sequence_fp16x16_2d_equal_parts; +mod split_to_sequence_fp16x16_2d_variable_parts; +mod split_to_sequence_fp16x16_zero_size; +mod split_to_sequence_fp16x16_1d_uneven; +mod split_to_sequence_fp16x16_2d_uneven; +mod split_to_sequence_u32_1d_equal_parts; +mod split_to_sequence_u32_1d_variable_parts; +mod split_to_sequence_u32_2d_equal_parts; +mod split_to_sequence_u32_2d_variable_parts; +mod split_to_sequence_u32_zero_size; +mod split_to_sequence_u32_1d_uneven; +mod split_to_sequence_u32_2d_uneven; +mod split_to_sequence_2d_scalar; +mod split_to_sequence_2d_nokeepdims; diff --git a/tests/nodes/split_to_sequence_1d_nokeepdims.cairo b/tests/nodes/split_to_sequence_1d_nokeepdims.cairo new file mode 100644 index 000000000..2892e3a11 --- /dev/null +++ b/tests/nodes/split_to_sequence_1d_nokeepdims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; + +#[test] +#[available_gas(2000000000)] +fn test_split_to_sequence_1d_nokeepdims() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.split_to_sequence(0, 0, Option::None(())); + + assert_seq_eq(y, z); +} diff --git a/tests/nodes/split_to_sequence_1d_nokeepdims/input_0.cairo b/tests/nodes/split_to_sequence_1d_nokeepdims/input_0.cairo new file mode 100644 index 000000000..ad0c50e32 --- /dev/null +++ b/tests/nodes/split_to_sequence_1d_nokeepdims/input_0.cairo @@ -0,0 +1,20 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(8); + + let mut data = ArrayTrait::new(); + data.append(67); + data.append(177); + data.append(5); + data.append(93); + data.append(183); + data.append(173); + data.append(207); + data.append(194); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/split_to_sequence_1d_nokeepdims/output_0.cairo b/tests/nodes/split_to_sequence_1d_nokeepdims/output_0.cairo new file mode 100644 index 000000000..b367d6e9b --- /dev/null +++ b/tests/nodes/split_to_sequence_1d_nokeepdims/output_0.cairo @@ -0,0 +1,74 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Array> { + let mut sequence = ArrayTrait::new(); + + let mut shape = ArrayTrait::::new(); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(67); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(177); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(5); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(93); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(183); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(173); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(207); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(194); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + sequence +} diff --git a/tests/nodes/split_to_sequence_2d_nokeepdims.cairo b/tests/nodes/split_to_sequence_2d_nokeepdims.cairo new file mode 100644 index 000000000..54085b7cf --- /dev/null +++ b/tests/nodes/split_to_sequence_2d_nokeepdims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; + +#[test] +#[available_gas(2000000000)] +fn test_split_to_sequence_2d_nokeepdims() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.split_to_sequence(1, 0, Option::None(())); + + assert_seq_eq(y, z); +} diff --git a/tests/nodes/split_to_sequence_2d_nokeepdims/input_0.cairo b/tests/nodes/split_to_sequence_2d_nokeepdims/input_0.cairo new file mode 100644 index 000000000..0870c7675 --- /dev/null +++ b/tests/nodes/split_to_sequence_2d_nokeepdims/input_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(8); + + let mut data = ArrayTrait::new(); + data.append(32); + data.append(67); + data.append(110); + data.append(16); + data.append(154); + data.append(139); + data.append(43); + data.append(0); + data.append(104); + data.append(246); + data.append(70); + data.append(120); + data.append(221); + data.append(191); + data.append(140); + data.append(118); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/split_to_sequence_2d_nokeepdims/output_0.cairo b/tests/nodes/split_to_sequence_2d_nokeepdims/output_0.cairo new file mode 100644 index 000000000..b0352312d --- /dev/null +++ b/tests/nodes/split_to_sequence_2d_nokeepdims/output_0.cairo @@ -0,0 +1,90 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Array> { + let mut sequence = ArrayTrait::new(); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(32); + data.append(104); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(67); + data.append(246); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(110); + data.append(70); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(16); + data.append(120); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(154); + data.append(221); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(139); + data.append(191); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(43); + data.append(140); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(118); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + sequence +} diff --git a/tests/nodes/split_to_sequence_2d_scalar.cairo b/tests/nodes/split_to_sequence_2d_scalar.cairo new file mode 100644 index 000000000..455d35126 --- /dev/null +++ b/tests/nodes/split_to_sequence_2d_scalar.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; + +#[test] +#[available_gas(2000000000)] +fn test_split_to_sequence_2d_scalar() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.split_to_sequence(1, 1, Option::None(())); + + assert_seq_eq(y, z); +} diff --git a/tests/nodes/split_to_sequence_2d_scalar/input_0.cairo b/tests/nodes/split_to_sequence_2d_scalar/input_0.cairo new file mode 100644 index 000000000..545ca2f9a --- /dev/null +++ b/tests/nodes/split_to_sequence_2d_scalar/input_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(8); + + let mut data = ArrayTrait::new(); + data.append(230); + data.append(131); + data.append(175); + data.append(106); + data.append(106); + data.append(44); + data.append(254); + data.append(157); + data.append(131); + data.append(251); + data.append(38); + data.append(14); + data.append(0); + data.append(116); + data.append(225); + data.append(107); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/split_to_sequence_2d_scalar/output_0.cairo b/tests/nodes/split_to_sequence_2d_scalar/output_0.cairo new file mode 100644 index 000000000..3ce7c1b64 --- /dev/null +++ b/tests/nodes/split_to_sequence_2d_scalar/output_0.cairo @@ -0,0 +1,90 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Array> { + let mut sequence = ArrayTrait::new(); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(230); + data.append(131); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(131); + data.append(251); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(175); + data.append(38); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(106); + data.append(14); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(106); + data.append(0); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(44); + data.append(116); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(254); + data.append(225); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(157); + data.append(107); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + sequence +} diff --git a/tests/nodes/split_to_sequence_fp16x16_1d_equal_parts.cairo b/tests/nodes/split_to_sequence_fp16x16_1d_equal_parts.cairo new file mode 100644 index 000000000..a1ac7a9ec --- /dev/null +++ b/tests/nodes/split_to_sequence_fp16x16_1d_equal_parts.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_split_to_sequence_fp16x16_1d_equal_parts() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),))); + + assert_seq_eq(y, z); +} diff --git a/tests/nodes/split_to_sequence_fp16x16_1d_equal_parts/input_0.cairo b/tests/nodes/split_to_sequence_fp16x16_1d_equal_parts/input_0.cairo new file mode 100644 index 000000000..3db759292 --- /dev/null +++ b/tests/nodes/split_to_sequence_fp16x16_1d_equal_parts/input_0.cairo @@ -0,0 +1,18 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 2752512, sign: true }); + data.append(FP16x16 { mag: 6750208, sign: false }); + data.append(FP16x16 { mag: 4849664, sign: true }); + data.append(FP16x16 { mag: 1966080, sign: false }); + data.append(FP16x16 { mag: 7733248, sign: false }); + data.append(FP16x16 { mag: 1835008, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/split_to_sequence_fp16x16_1d_equal_parts/output_0.cairo b/tests/nodes/split_to_sequence_fp16x16_1d_equal_parts/output_0.cairo new file mode 100644 index 000000000..6e5bdc7c3 --- /dev/null +++ b/tests/nodes/split_to_sequence_fp16x16_1d_equal_parts/output_0.cairo @@ -0,0 +1,37 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Array> { + let mut sequence = ArrayTrait::new(); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 2752512, sign: true }); + data.append(FP16x16 { mag: 6750208, sign: false }); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 4849664, sign: true }); + data.append(FP16x16 { mag: 1966080, sign: false }); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 7733248, sign: false }); + data.append(FP16x16 { mag: 1835008, sign: false }); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + sequence +} diff --git a/tests/nodes/split_to_sequence_fp16x16_1d_uneven.cairo b/tests/nodes/split_to_sequence_fp16x16_1d_uneven.cairo new file mode 100644 index 000000000..42f0dc900 --- /dev/null +++ b/tests/nodes/split_to_sequence_fp16x16_1d_uneven.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_split_to_sequence_fp16x16_1d_uneven() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![4].span()))); + + assert_seq_eq(y, z); +} diff --git a/tests/nodes/split_to_sequence_fp16x16_1d_uneven/input_0.cairo b/tests/nodes/split_to_sequence_fp16x16_1d_uneven/input_0.cairo new file mode 100644 index 000000000..b0fd17b2a --- /dev/null +++ b/tests/nodes/split_to_sequence_fp16x16_1d_uneven/input_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(7); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 6815744, sign: true }); + data.append(FP16x16 { mag: 458752, sign: true }); + data.append(FP16x16 { mag: 5439488, sign: true }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 3211264, sign: false }); + data.append(FP16x16 { mag: 5373952, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/split_to_sequence_fp16x16_1d_uneven/output_0.cairo b/tests/nodes/split_to_sequence_fp16x16_1d_uneven/output_0.cairo new file mode 100644 index 000000000..78f9008e8 --- /dev/null +++ b/tests/nodes/split_to_sequence_fp16x16_1d_uneven/output_0.cairo @@ -0,0 +1,45 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Array> { + let mut sequence = ArrayTrait::new(); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 6815744, sign: true }); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 458752, sign: true }); + data.append(FP16x16 { mag: 5439488, sign: true }); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 3211264, sign: false }); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 5373952, sign: false }); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + sequence +} diff --git a/tests/nodes/split_to_sequence_fp16x16_1d_variable_parts.cairo b/tests/nodes/split_to_sequence_fp16x16_1d_variable_parts.cairo new file mode 100644 index 000000000..1c3faf614 --- /dev/null +++ b/tests/nodes/split_to_sequence_fp16x16_1d_variable_parts.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_split_to_sequence_fp16x16_1d_variable_parts() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),))); + + assert_seq_eq(y, z); +} diff --git a/tests/nodes/split_to_sequence_fp16x16_1d_variable_parts/input_0.cairo b/tests/nodes/split_to_sequence_fp16x16_1d_variable_parts/input_0.cairo new file mode 100644 index 000000000..3db759292 --- /dev/null +++ b/tests/nodes/split_to_sequence_fp16x16_1d_variable_parts/input_0.cairo @@ -0,0 +1,18 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 2752512, sign: true }); + data.append(FP16x16 { mag: 6750208, sign: false }); + data.append(FP16x16 { mag: 4849664, sign: true }); + data.append(FP16x16 { mag: 1966080, sign: false }); + data.append(FP16x16 { mag: 7733248, sign: false }); + data.append(FP16x16 { mag: 1835008, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/split_to_sequence_fp16x16_1d_variable_parts/output_0.cairo b/tests/nodes/split_to_sequence_fp16x16_1d_variable_parts/output_0.cairo new file mode 100644 index 000000000..9f31677f4 --- /dev/null +++ b/tests/nodes/split_to_sequence_fp16x16_1d_variable_parts/output_0.cairo @@ -0,0 +1,30 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Array> { + let mut sequence = ArrayTrait::new(); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 2752512, sign: true }); + data.append(FP16x16 { mag: 6750208, sign: false }); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 4849664, sign: true }); + data.append(FP16x16 { mag: 1966080, sign: false }); + data.append(FP16x16 { mag: 7733248, sign: false }); + data.append(FP16x16 { mag: 1835008, sign: false }); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + sequence +} diff --git a/tests/nodes/split_to_sequence_fp16x16_2d_equal_parts.cairo b/tests/nodes/split_to_sequence_fp16x16_2d_equal_parts.cairo new file mode 100644 index 000000000..96e743399 --- /dev/null +++ b/tests/nodes/split_to_sequence_fp16x16_2d_equal_parts.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_split_to_sequence_fp16x16_2d_equal_parts() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![2].span(),))); + + assert_seq_eq(y, z); +} diff --git a/tests/nodes/split_to_sequence_fp16x16_2d_equal_parts/input_0.cairo b/tests/nodes/split_to_sequence_fp16x16_2d_equal_parts/input_0.cairo new file mode 100644 index 000000000..09790603c --- /dev/null +++ b/tests/nodes/split_to_sequence_fp16x16_2d_equal_parts/input_0.cairo @@ -0,0 +1,25 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 2752512, sign: true }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 5636096, sign: true }); + data.append(FP16x16 { mag: 4718592, sign: true }); + data.append(FP16x16 { mag: 6094848, sign: false }); + data.append(FP16x16 { mag: 3080192, sign: true }); + data.append(FP16x16 { mag: 6684672, sign: true }); + data.append(FP16x16 { mag: 4718592, sign: true }); + data.append(FP16x16 { mag: 4063232, sign: true }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 786432, sign: true }); + data.append(FP16x16 { mag: 2686976, sign: true }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/split_to_sequence_fp16x16_2d_equal_parts/output_0.cairo b/tests/nodes/split_to_sequence_fp16x16_2d_equal_parts/output_0.cairo new file mode 100644 index 000000000..a1f8cec88 --- /dev/null +++ b/tests/nodes/split_to_sequence_fp16x16_2d_equal_parts/output_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Array> { + let mut sequence = ArrayTrait::new(); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 2752512, sign: true }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 5636096, sign: true }); + data.append(FP16x16 { mag: 6684672, sign: true }); + data.append(FP16x16 { mag: 4718592, sign: true }); + data.append(FP16x16 { mag: 4063232, sign: true }); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 4718592, sign: true }); + data.append(FP16x16 { mag: 6094848, sign: false }); + data.append(FP16x16 { mag: 3080192, sign: true }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 786432, sign: true }); + data.append(FP16x16 { mag: 2686976, sign: true }); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + sequence +} diff --git a/tests/nodes/split_to_sequence_fp16x16_2d_uneven.cairo b/tests/nodes/split_to_sequence_fp16x16_2d_uneven.cairo new file mode 100644 index 000000000..bfd1f8cec --- /dev/null +++ b/tests/nodes/split_to_sequence_fp16x16_2d_uneven.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_split_to_sequence_fp16x16_2d_uneven() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),))); + + assert_seq_eq(y, z); +} diff --git a/tests/nodes/split_to_sequence_fp16x16_2d_uneven/input_0.cairo b/tests/nodes/split_to_sequence_fp16x16_2d_uneven/input_0.cairo new file mode 100644 index 000000000..1c5d23fb3 --- /dev/null +++ b/tests/nodes/split_to_sequence_fp16x16_2d_uneven/input_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(8); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 7995392, sign: true }); + data.append(FP16x16 { mag: 5898240, sign: true }); + data.append(FP16x16 { mag: 5767168, sign: true }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: true }); + data.append(FP16x16 { mag: 589824, sign: true }); + data.append(FP16x16 { mag: 5373952, sign: false }); + data.append(FP16x16 { mag: 786432, sign: true }); + data.append(FP16x16 { mag: 7536640, sign: true }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 5111808, sign: false }); + data.append(FP16x16 { mag: 5898240, sign: true }); + data.append(FP16x16 { mag: 6160384, sign: true }); + data.append(FP16x16 { mag: 2490368, sign: true }); + data.append(FP16x16 { mag: 7208960, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/split_to_sequence_fp16x16_2d_uneven/output_0.cairo b/tests/nodes/split_to_sequence_fp16x16_2d_uneven/output_0.cairo new file mode 100644 index 000000000..54fc9eab0 --- /dev/null +++ b/tests/nodes/split_to_sequence_fp16x16_2d_uneven/output_0.cairo @@ -0,0 +1,50 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Array> { + let mut sequence = ArrayTrait::new(); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 7995392, sign: true }); + data.append(FP16x16 { mag: 5898240, sign: true }); + data.append(FP16x16 { mag: 5767168, sign: true }); + data.append(FP16x16 { mag: 786432, sign: true }); + data.append(FP16x16 { mag: 7536640, sign: true }); + data.append(FP16x16 { mag: 327680, sign: false }); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: true }); + data.append(FP16x16 { mag: 5111808, sign: false }); + data.append(FP16x16 { mag: 5898240, sign: true }); + data.append(FP16x16 { mag: 6160384, sign: true }); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 589824, sign: true }); + data.append(FP16x16 { mag: 5373952, sign: false }); + data.append(FP16x16 { mag: 2490368, sign: true }); + data.append(FP16x16 { mag: 7208960, sign: false }); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + sequence +} diff --git a/tests/nodes/split_to_sequence_fp16x16_2d_variable_parts.cairo b/tests/nodes/split_to_sequence_fp16x16_2d_variable_parts.cairo new file mode 100644 index 000000000..5cd4e1845 --- /dev/null +++ b/tests/nodes/split_to_sequence_fp16x16_2d_variable_parts.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_split_to_sequence_fp16x16_2d_variable_parts() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),))); + + assert_seq_eq(y, z); +} diff --git a/tests/nodes/split_to_sequence_fp16x16_2d_variable_parts/input_0.cairo b/tests/nodes/split_to_sequence_fp16x16_2d_variable_parts/input_0.cairo new file mode 100644 index 000000000..09790603c --- /dev/null +++ b/tests/nodes/split_to_sequence_fp16x16_2d_variable_parts/input_0.cairo @@ -0,0 +1,25 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 2752512, sign: true }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 5636096, sign: true }); + data.append(FP16x16 { mag: 4718592, sign: true }); + data.append(FP16x16 { mag: 6094848, sign: false }); + data.append(FP16x16 { mag: 3080192, sign: true }); + data.append(FP16x16 { mag: 6684672, sign: true }); + data.append(FP16x16 { mag: 4718592, sign: true }); + data.append(FP16x16 { mag: 4063232, sign: true }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 786432, sign: true }); + data.append(FP16x16 { mag: 2686976, sign: true }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/split_to_sequence_fp16x16_2d_variable_parts/output_0.cairo b/tests/nodes/split_to_sequence_fp16x16_2d_variable_parts/output_0.cairo new file mode 100644 index 000000000..9c37e69d5 --- /dev/null +++ b/tests/nodes/split_to_sequence_fp16x16_2d_variable_parts/output_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Array> { + let mut sequence = ArrayTrait::new(); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 2752512, sign: true }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 6684672, sign: true }); + data.append(FP16x16 { mag: 4718592, sign: true }); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 5636096, sign: true }); + data.append(FP16x16 { mag: 4718592, sign: true }); + data.append(FP16x16 { mag: 6094848, sign: false }); + data.append(FP16x16 { mag: 3080192, sign: true }); + data.append(FP16x16 { mag: 4063232, sign: true }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 786432, sign: true }); + data.append(FP16x16 { mag: 2686976, sign: true }); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + sequence +} diff --git a/tests/nodes/split_to_sequence_fp16x16_zero_size.cairo b/tests/nodes/split_to_sequence_fp16x16_zero_size.cairo new file mode 100644 index 000000000..e8ecfba30 --- /dev/null +++ b/tests/nodes/split_to_sequence_fp16x16_zero_size.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_split_to_sequence_fp16x16_zero_size() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![3].span(), data: array![0, 0, 0].span(),))); + + assert_seq_eq(y, z); +} diff --git a/tests/nodes/split_to_sequence_fp16x16_zero_size/input_0.cairo b/tests/nodes/split_to_sequence_fp16x16_zero_size/input_0.cairo new file mode 100644 index 000000000..6bd7081a8 --- /dev/null +++ b/tests/nodes/split_to_sequence_fp16x16_zero_size/input_0.cairo @@ -0,0 +1,12 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(0); + + let mut data = ArrayTrait::new(); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/split_to_sequence_fp16x16_zero_size/output_0.cairo b/tests/nodes/split_to_sequence_fp16x16_zero_size/output_0.cairo new file mode 100644 index 000000000..0f621d682 --- /dev/null +++ b/tests/nodes/split_to_sequence_fp16x16_zero_size/output_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Array> { + let mut sequence = ArrayTrait::new(); + + let mut shape = ArrayTrait::::new(); + shape.append(0); + + let mut data = ArrayTrait::new(); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(0); + + let mut data = ArrayTrait::new(); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(0); + + let mut data = ArrayTrait::new(); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + sequence +} diff --git a/tests/nodes/split_to_sequence_u32_1d_equal_parts.cairo b/tests/nodes/split_to_sequence_u32_1d_equal_parts.cairo new file mode 100644 index 000000000..9c14470b5 --- /dev/null +++ b/tests/nodes/split_to_sequence_u32_1d_equal_parts.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; + +#[test] +#[available_gas(2000000000)] +fn test_split_to_sequence_u32_1d_equal_parts() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),))); + + assert_seq_eq(y, z); +} diff --git a/tests/nodes/split_to_sequence_u32_1d_equal_parts/input_0.cairo b/tests/nodes/split_to_sequence_u32_1d_equal_parts/input_0.cairo new file mode 100644 index 000000000..ff9a4c48f --- /dev/null +++ b/tests/nodes/split_to_sequence_u32_1d_equal_parts/input_0.cairo @@ -0,0 +1,18 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(28); + data.append(113); + data.append(47); + data.append(203); + data.append(45); + data.append(94); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/split_to_sequence_u32_1d_equal_parts/output_0.cairo b/tests/nodes/split_to_sequence_u32_1d_equal_parts/output_0.cairo new file mode 100644 index 000000000..b681c6a9b --- /dev/null +++ b/tests/nodes/split_to_sequence_u32_1d_equal_parts/output_0.cairo @@ -0,0 +1,37 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Array> { + let mut sequence = ArrayTrait::new(); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(28); + data.append(113); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(47); + data.append(203); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(45); + data.append(94); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + sequence +} diff --git a/tests/nodes/split_to_sequence_u32_1d_uneven.cairo b/tests/nodes/split_to_sequence_u32_1d_uneven.cairo new file mode 100644 index 000000000..0dfb5547f --- /dev/null +++ b/tests/nodes/split_to_sequence_u32_1d_uneven.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; + +#[test] +#[available_gas(2000000000)] +fn test_split_to_sequence_u32_1d_uneven() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![4].span(),))); + + assert_seq_eq(y, z); +} diff --git a/tests/nodes/split_to_sequence_u32_1d_uneven/input_0.cairo b/tests/nodes/split_to_sequence_u32_1d_uneven/input_0.cairo new file mode 100644 index 000000000..b2b3cd1b6 --- /dev/null +++ b/tests/nodes/split_to_sequence_u32_1d_uneven/input_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(7); + + let mut data = ArrayTrait::new(); + data.append(72); + data.append(209); + data.append(27); + data.append(147); + data.append(22); + data.append(98); + data.append(135); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/split_to_sequence_u32_1d_uneven/output_0.cairo b/tests/nodes/split_to_sequence_u32_1d_uneven/output_0.cairo new file mode 100644 index 000000000..b1f04d0d5 --- /dev/null +++ b/tests/nodes/split_to_sequence_u32_1d_uneven/output_0.cairo @@ -0,0 +1,45 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Array> { + let mut sequence = ArrayTrait::new(); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(72); + data.append(209); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(27); + data.append(147); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(22); + data.append(98); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(135); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + sequence +} diff --git a/tests/nodes/split_to_sequence_u32_1d_variable_parts.cairo b/tests/nodes/split_to_sequence_u32_1d_variable_parts.cairo new file mode 100644 index 000000000..4df4fbee7 --- /dev/null +++ b/tests/nodes/split_to_sequence_u32_1d_variable_parts.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; + +#[test] +#[available_gas(2000000000)] +fn test_split_to_sequence_u32_1d_variable_parts() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),))); + + assert_seq_eq(y, z); +} diff --git a/tests/nodes/split_to_sequence_u32_1d_variable_parts/input_0.cairo b/tests/nodes/split_to_sequence_u32_1d_variable_parts/input_0.cairo new file mode 100644 index 000000000..ff9a4c48f --- /dev/null +++ b/tests/nodes/split_to_sequence_u32_1d_variable_parts/input_0.cairo @@ -0,0 +1,18 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(28); + data.append(113); + data.append(47); + data.append(203); + data.append(45); + data.append(94); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/split_to_sequence_u32_1d_variable_parts/output_0.cairo b/tests/nodes/split_to_sequence_u32_1d_variable_parts/output_0.cairo new file mode 100644 index 000000000..65e0db938 --- /dev/null +++ b/tests/nodes/split_to_sequence_u32_1d_variable_parts/output_0.cairo @@ -0,0 +1,30 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Array> { + let mut sequence = ArrayTrait::new(); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(28); + data.append(113); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(47); + data.append(203); + data.append(45); + data.append(94); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + sequence +} diff --git a/tests/nodes/split_to_sequence_u32_2d_equal_parts.cairo b/tests/nodes/split_to_sequence_u32_2d_equal_parts.cairo new file mode 100644 index 000000000..24c06c857 --- /dev/null +++ b/tests/nodes/split_to_sequence_u32_2d_equal_parts.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; + +#[test] +#[available_gas(2000000000)] +fn test_split_to_sequence_u32_2d_equal_parts() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![2].span(),))); + + assert_seq_eq(y, z); +} diff --git a/tests/nodes/split_to_sequence_u32_2d_equal_parts/input_0.cairo b/tests/nodes/split_to_sequence_u32_2d_equal_parts/input_0.cairo new file mode 100644 index 000000000..10422c64f --- /dev/null +++ b/tests/nodes/split_to_sequence_u32_2d_equal_parts/input_0.cairo @@ -0,0 +1,25 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(24); + data.append(113); + data.append(214); + data.append(210); + data.append(195); + data.append(92); + data.append(187); + data.append(1); + data.append(10); + data.append(135); + data.append(216); + data.append(113); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/split_to_sequence_u32_2d_equal_parts/output_0.cairo b/tests/nodes/split_to_sequence_u32_2d_equal_parts/output_0.cairo new file mode 100644 index 000000000..75d425856 --- /dev/null +++ b/tests/nodes/split_to_sequence_u32_2d_equal_parts/output_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Array> { + let mut sequence = ArrayTrait::new(); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(24); + data.append(113); + data.append(214); + data.append(187); + data.append(1); + data.append(10); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(210); + data.append(195); + data.append(92); + data.append(135); + data.append(216); + data.append(113); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + sequence +} diff --git a/tests/nodes/split_to_sequence_u32_2d_uneven.cairo b/tests/nodes/split_to_sequence_u32_2d_uneven.cairo new file mode 100644 index 000000000..7ab6604be --- /dev/null +++ b/tests/nodes/split_to_sequence_u32_2d_uneven.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; + +#[test] +#[available_gas(2000000000)] +fn test_split_to_sequence_u32_2d_uneven() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![1].span(), data: array![3].span(),))); + + assert_seq_eq(y, z); +} diff --git a/tests/nodes/split_to_sequence_u32_2d_uneven/input_0.cairo b/tests/nodes/split_to_sequence_u32_2d_uneven/input_0.cairo new file mode 100644 index 000000000..2b516056a --- /dev/null +++ b/tests/nodes/split_to_sequence_u32_2d_uneven/input_0.cairo @@ -0,0 +1,29 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(8); + + let mut data = ArrayTrait::new(); + data.append(181); + data.append(95); + data.append(164); + data.append(86); + data.append(6); + data.append(169); + data.append(184); + data.append(122); + data.append(132); + data.append(59); + data.append(125); + data.append(118); + data.append(247); + data.append(59); + data.append(17); + data.append(130); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/split_to_sequence_u32_2d_uneven/output_0.cairo b/tests/nodes/split_to_sequence_u32_2d_uneven/output_0.cairo new file mode 100644 index 000000000..e566857e6 --- /dev/null +++ b/tests/nodes/split_to_sequence_u32_2d_uneven/output_0.cairo @@ -0,0 +1,50 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Array> { + let mut sequence = ArrayTrait::new(); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(181); + data.append(95); + data.append(164); + data.append(132); + data.append(59); + data.append(125); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(86); + data.append(6); + data.append(169); + data.append(118); + data.append(247); + data.append(59); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(184); + data.append(122); + data.append(17); + data.append(130); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + sequence +} diff --git a/tests/nodes/split_to_sequence_u32_2d_variable_parts.cairo b/tests/nodes/split_to_sequence_u32_2d_variable_parts.cairo new file mode 100644 index 000000000..dc81b4325 --- /dev/null +++ b/tests/nodes/split_to_sequence_u32_2d_variable_parts.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; + +#[test] +#[available_gas(2000000000)] +fn test_split_to_sequence_u32_2d_variable_parts() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),))); + + assert_seq_eq(y, z); +} diff --git a/tests/nodes/split_to_sequence_u32_2d_variable_parts/input_0.cairo b/tests/nodes/split_to_sequence_u32_2d_variable_parts/input_0.cairo new file mode 100644 index 000000000..10422c64f --- /dev/null +++ b/tests/nodes/split_to_sequence_u32_2d_variable_parts/input_0.cairo @@ -0,0 +1,25 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(24); + data.append(113); + data.append(214); + data.append(210); + data.append(195); + data.append(92); + data.append(187); + data.append(1); + data.append(10); + data.append(135); + data.append(216); + data.append(113); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/split_to_sequence_u32_2d_variable_parts/output_0.cairo b/tests/nodes/split_to_sequence_u32_2d_variable_parts/output_0.cairo new file mode 100644 index 000000000..43a897518 --- /dev/null +++ b/tests/nodes/split_to_sequence_u32_2d_variable_parts/output_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Array> { + let mut sequence = ArrayTrait::new(); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(24); + data.append(113); + data.append(187); + data.append(1); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(214); + data.append(210); + data.append(195); + data.append(92); + data.append(10); + data.append(135); + data.append(216); + data.append(113); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + sequence +} diff --git a/tests/nodes/split_to_sequence_u32_zero_size.cairo b/tests/nodes/split_to_sequence_u32_zero_size.cairo new file mode 100644 index 000000000..815ba7d4e --- /dev/null +++ b/tests/nodes/split_to_sequence_u32_zero_size.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; + +#[test] +#[available_gas(2000000000)] +fn test_split_to_sequence_u32_zero_size() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::::new(shape: array![3].span(), data: array![0, 0, 0].span(),))); + + assert_seq_eq(y, z); +} diff --git a/tests/nodes/split_to_sequence_u32_zero_size/input_0.cairo b/tests/nodes/split_to_sequence_u32_zero_size/input_0.cairo new file mode 100644 index 000000000..0d9d86004 --- /dev/null +++ b/tests/nodes/split_to_sequence_u32_zero_size/input_0.cairo @@ -0,0 +1,12 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(0); + + let mut data = ArrayTrait::new(); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/split_to_sequence_u32_zero_size/output_0.cairo b/tests/nodes/split_to_sequence_u32_zero_size/output_0.cairo new file mode 100644 index 000000000..5ed21a395 --- /dev/null +++ b/tests/nodes/split_to_sequence_u32_zero_size/output_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Array> { + let mut sequence = ArrayTrait::new(); + + let mut shape = ArrayTrait::::new(); + shape.append(0); + + let mut data = ArrayTrait::new(); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(0); + + let mut data = ArrayTrait::new(); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + let mut shape = ArrayTrait::::new(); + shape.append(0); + + let mut data = ArrayTrait::new(); + + sequence.append(TensorTrait::new(shape.span(), data.span())); + + sequence +} From 7e4b843a08503459ad1c3d90b43338b4e9cf8544 Mon Sep 17 00:00:00 2001 From: zhangzhichao Date: Thu, 1 Feb 2024 17:59:44 +0800 Subject: [PATCH 18/46] feat: Implement Window correlation operators: Added `Range` operator; Added `HannWindow` operator; Added `Hamming_Window` operator; --- docs/framework/operators/tensor/README.md | 3 + .../operators/tensor/tensor.hamming_window.md | 32 +++++ .../operators/tensor/tensor.hann_window.md | 32 +++++ .../operators/tensor/tensor.range.md | 33 +++++ nodegen/node/hamming_window.py | 132 ++++++++++++++++++ nodegen/node/hann_window.py | 130 +++++++++++++++++ nodegen/node/range.py | 108 ++++++++++++++ src/operators/tensor/core.cairo | 106 ++++++++++++++ .../tensor/implementations/tensor_bool.cairo | 12 ++ .../implementations/tensor_complex64.cairo | 12 ++ .../implementations/tensor_fp16x16.cairo | 13 ++ .../implementations/tensor_fp16x16wide.cairo | 13 ++ .../implementations/tensor_fp32x32.cairo | 12 ++ .../implementations/tensor_fp64x64.cairo | 12 ++ .../implementations/tensor_fp8x23.cairo | 13 ++ .../implementations/tensor_fp8x23wide.cairo | 13 ++ .../tensor/implementations/tensor_i32.cairo | 12 ++ .../tensor/implementations/tensor_i8.cairo | 12 ++ .../tensor/implementations/tensor_u32.cairo | 12 ++ src/operators/tensor/math.cairo | 3 + .../tensor/math/hamming_window.cairo | 72 ++++++++++ src/operators/tensor/math/hann_window.cairo | 65 +++++++++ src/operators/tensor/math/range.cairo | 39 ++++++ tests/nodes.cairo | 9 ++ tests/nodes/hamming_window_fp16x16.cairo | 19 +++ .../hamming_window_fp16x16/output_0.cairo | 22 +++ tests/nodes/hamming_window_fp8x23.cairo | 19 +++ .../hamming_window_fp8x23/output_0.cairo | 16 +++ tests/nodes/hann_window_fp16x16.cairo | 19 +++ .../nodes/hann_window_fp16x16/output_0.cairo | 22 +++ tests/nodes/hann_window_fp8x23.cairo | 19 +++ tests/nodes/hann_window_fp8x23/output_0.cairo | 16 +++ tests/nodes/range_fp16x16.cairo | 19 +++ tests/nodes/range_fp16x16/output_0.cairo | 20 +++ tests/nodes/range_fp8x23.cairo | 19 +++ tests/nodes/range_fp8x23/output_0.cairo | 26 ++++ tests/nodes/range_i32.cairo | 19 +++ tests/nodes/range_i32/output_0.cairo | 19 +++ tests/nodes/range_i8.cairo | 19 +++ tests/nodes/range_i8/output_0.cairo | 21 +++ tests/nodes/range_u32.cairo | 19 +++ tests/nodes/range_u32/output_0.cairo | 20 +++ 42 files changed, 1253 insertions(+) create mode 100644 docs/framework/operators/tensor/tensor.hamming_window.md create mode 100644 docs/framework/operators/tensor/tensor.hann_window.md create mode 100644 docs/framework/operators/tensor/tensor.range.md create mode 100644 nodegen/node/hamming_window.py create mode 100644 nodegen/node/hann_window.py create mode 100644 nodegen/node/range.py create mode 100644 src/operators/tensor/math/hamming_window.cairo create mode 100644 src/operators/tensor/math/hann_window.cairo create mode 100644 src/operators/tensor/math/range.cairo create mode 100644 tests/nodes/hamming_window_fp16x16.cairo create mode 100644 tests/nodes/hamming_window_fp16x16/output_0.cairo create mode 100644 tests/nodes/hamming_window_fp8x23.cairo create mode 100644 tests/nodes/hamming_window_fp8x23/output_0.cairo create mode 100644 tests/nodes/hann_window_fp16x16.cairo create mode 100644 tests/nodes/hann_window_fp16x16/output_0.cairo create mode 100644 tests/nodes/hann_window_fp8x23.cairo create mode 100644 tests/nodes/hann_window_fp8x23/output_0.cairo create mode 100644 tests/nodes/range_fp16x16.cairo create mode 100644 tests/nodes/range_fp16x16/output_0.cairo create mode 100644 tests/nodes/range_fp8x23.cairo create mode 100644 tests/nodes/range_fp8x23/output_0.cairo create mode 100644 tests/nodes/range_i32.cairo create mode 100644 tests/nodes/range_i32/output_0.cairo create mode 100644 tests/nodes/range_i8.cairo create mode 100644 tests/nodes/range_i8/output_0.cairo create mode 100644 tests/nodes/range_u32.cairo create mode 100644 tests/nodes/range_u32/output_0.cairo diff --git a/docs/framework/operators/tensor/README.md b/docs/framework/operators/tensor/README.md index 281135f63..5febdda29 100644 --- a/docs/framework/operators/tensor/README.md +++ b/docs/framework/operators/tensor/README.md @@ -120,6 +120,9 @@ use orion::operators::tensor::TensorTrait; | [`tensor.erf`](tensor.erf.md) | Computes the error function of the given input tensor element-wise. | | [`tensor.layer_normalization`](tensor.layer\_normalization.md) | computes the layer normalization of the input tensor. | | [`tensor.split`](tensor.split.md) | Split a tensor into a list of tensors, along the specified ‘axis’. | +| [`tensor.range`](tensor.range.md) | Generate a tensor containing a sequence of numbers that begin at start and extends by increments of delta up to limit (exclusive). | +| [`tensor.hann_window`](tensor.hann\_window.md) | Generates a Hann window as described in the paper https://ieeexplore.ieee.org/document/1455106. | +| [`tensor.hamming_window`](tensor.hamming\_window.md) | Generates a Hamming window as described in the paper https://ieeexplore.ieee.org/document/1455106. | ## Arithmetic Operations diff --git a/docs/framework/operators/tensor/tensor.hamming_window.md b/docs/framework/operators/tensor/tensor.hamming_window.md new file mode 100644 index 000000000..b4ab0cc5f --- /dev/null +++ b/docs/framework/operators/tensor/tensor.hamming_window.md @@ -0,0 +1,32 @@ +# tensor.hamming_window + +```rust + fn hamming_window(size: T, periodic: Option) -> Tensor; +``` + +Generates a Hamming window as described in the paper https://ieeexplore.ieee.org/document/1455106. + + +* `size`(`T`) - A scalar value indicating the length of the window. +* `periodic`(Option) - If 1, returns a window to be used as periodic function. If 0, return a symmetric window. When 'periodic' is specified, hann computes a window of length size + 1 and returns the first size points. The default value is 1. + +## Returns + +A Hann window with length: size. The output has the shape: [size]. + +## Examples + +```rust +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::numbers::{FixedTrait, FP8x23}; + + +fn hann_window_example() -> Tensor { + return TensorTrait::hamming_window(FP8x23 { mag: 33554432, sign: false }, Option::Some(0)); // size: 4 +} +>>> [729444 6473817 6473817 729444] +``` diff --git a/docs/framework/operators/tensor/tensor.hann_window.md b/docs/framework/operators/tensor/tensor.hann_window.md new file mode 100644 index 000000000..c3119342c --- /dev/null +++ b/docs/framework/operators/tensor/tensor.hann_window.md @@ -0,0 +1,32 @@ +# tensor.hann_window + +```rust + fn hann_window(size: T, periodic: Option) -> Tensor; +``` + +Generates a Hann window as described in the paper https://ieeexplore.ieee.org/document/1455106. + + +* `size`(`T`) - A scalar value indicating the length of the window. +* `periodic`(Option) - If 1, returns a window to be used as periodic function. If 0, return a symmetric window. When 'periodic' is specified, hann computes a window of length size + 1 and returns the first size points. The default value is 1. + +## Returns + +A Hann window with length: size. The output has the shape: [size]. + +## Examples + +```rust +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::numbers::{FixedTrait, FP8x23}; + + +fn hann_window_example() -> Tensor { + return TensorTrait::hann_window(FP8x23 { mag: 33554432, sign: false }, Option::Some(0)); // size: 4 +} +>>> [0 6291455 6291456 0] +``` diff --git a/docs/framework/operators/tensor/tensor.range.md b/docs/framework/operators/tensor/tensor.range.md new file mode 100644 index 000000000..90530dedc --- /dev/null +++ b/docs/framework/operators/tensor/tensor.range.md @@ -0,0 +1,33 @@ +# tensor.range + +```rust + fn range(start: T, end: T, step: T) -> Tensor; +``` + +Generate a tensor containing a sequence of numbers that begin at start and extends by increments of delta up to limit (exclusive). + + +* `start`(`T`) - First entry for the range of output values. +* `end`(`T`) - Exclusive upper limit for the range of output values. +* `step `(`T`) - Value to step by. + +## Returns + +A 1-D tensor with same type as the inputs containing generated range of values. + +## Examples + +```rust +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::numbers::NumberTrait; + + +fn range_example() -> Tensor { + return TensorTrait::range(21,2,-3); +} +>>> [21 18 15 12 9 6 3] +``` diff --git a/nodegen/node/hamming_window.py b/nodegen/node/hamming_window.py new file mode 100644 index 000000000..e7c0f109b --- /dev/null +++ b/nodegen/node/hamming_window.py @@ -0,0 +1,132 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait, get_data_statement + +def hamming_window(size, output_datatype=None, periodic=None) -> np.ndarray: # type: ignore + if periodic == 1: + N_1 = size + else: + N_1 = size - 1 + ni = np.arange(size, dtype=output_datatype) + alpha = 25.0 / 46.0 + beta = 1 - alpha + res = alpha - np.cos(ni * np.float64(np.pi).astype(output_datatype) * 2 / N_1).astype(output_datatype) * beta + return res.astype(output_datatype) + +class Hamming_window(RunAll): + + @staticmethod + # We test here with fp8x23 implementation. + def fp8x23(): + print(get_data_statement(to_fp(np.array([np.pi]).flatten(), FixedImpl.FP8x23), Dtype.FP8x23)) + args = [4] + # x = np.float64(4) + args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP8x23), Dtype.FP8x23) + y = hamming_window(*args, np.float64) + print(y) + + # Convert the floats values in `y` to fixed points with `to_fp` method: + y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) + + # Define the name of the generated folder. + name = "hamming_window_fp8x23" + # Invoke `make_test` method to generate corresponding Cairo tests: + make_test( + [], # List of input tensors. + y, # The expected output result. + f"TensorTrait::hamming_window({','.join(args_str)}, Option::Some(0))", # The code signature. + name # The name of the generated folder. + ) + + @staticmethod + # We test here with fp16x16 implementation. + def fp16x16(): + print(get_data_statement(to_fp(np.array([np.pi]).flatten(), FixedImpl.FP16x16), Dtype.FP16x16)) + args = [10] + # x = np.float64(4) + args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP16x16), Dtype.FP16x16) + y = hamming_window(*args, np.float16) + print(y) + + # Convert the floats values in `y` to fixed points with `to_fp` method: + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + # Define the name of the generated folder. + name = "hamming_window_fp16x16" + # Invoke `make_test` method to generate corresponding Cairo tests: + make_test( + [], # List of input tensors. + y, # The expected output result. + f"TensorTrait::hamming_window({','.join(args_str)}, Option::Some(0))", # The code signature. + name # The name of the generated folder. + ) + + # @staticmethod + # # We test here with i8 implementation. + # def i8(): + # print(get_data_statement(np.array([np.pi]).flatten(), Dtype.I8)) + # args = [5] + # # x = np.float64(4) + # args_str = get_data_statement(np.array(args).flatten(), Dtype.I8) + # y = hamming_window(*args, np.int8) + # print(y) + + # # Convert the floats values in `y` to fixed points with `to_fp` method: + # y = Tensor(Dtype.I8, y.shape, y.flatten()) + + # # Define the name of the generated folder. + # name = "hamming_window_i8" + # # Invoke `make_test` method to generate corresponding Cairo tests: + # make_test( + # [], # List of input tensors. + # y, # The expected output result. + # f"TensorTrait::hamming_window({','.join(args_str)}, Option::Some(1))", # The code signature. + # name # The name of the generated folder. + # ) + + # @staticmethod + # # We test here with i32 implementation. + # def i32(): + # print(get_data_statement(np.array([np.pi]).flatten(), Dtype.I32)) + # args = [4] + # # x = np.float64(4) + # args_str = get_data_statement(np.array(args).flatten(), Dtype.I32) + # y = hamming_window(*args, np.int32) + # print(y) + + # # Convert the floats values in `y` to fixed points with `to_fp` method: + # y = Tensor(Dtype.I32, y.shape, y.flatten()) + + # # Define the name of the generated folder. + # name = "hamming_window_i32" + # # Invoke `make_test` method to generate corresponding Cairo tests: + # make_test( + # [], # List of input tensors. + # y, # The expected output result. + # f"TensorTrait::hamming_window({','.join(args_str)}, Option::Some(0))", # The code signature. + # name # The name of the generated folder. + # ) + + # @staticmethod + # # We test here with u32 implementation. + # def u32(): + # print(get_data_statement(np.array([np.pi]).flatten(), Dtype.U32)) + # args = [4] + # # x = np.float64(4) + # args_str = get_data_statement(np.array(args).flatten(), Dtype.U32) + # y = hamming_window(*args, np.uint32) + # print(y) + + # # Convert the floats values in `y` to fixed points with `to_fp` method: + # y = Tensor(Dtype.U32, y.shape, y.flatten()) + + # # Define the name of the generated folder. + # name = "hamming_window_u32" + # # Invoke `make_test` method to generate corresponding Cairo tests: + # make_test( + # [], # List of input tensors. + # y, # The expected output result. + # f"TensorTrait::hamming_window({','.join(args_str)}, Option::Some(0))", # The code signature. + # name # The name of the generated folder. + # ) + \ No newline at end of file diff --git a/nodegen/node/hann_window.py b/nodegen/node/hann_window.py new file mode 100644 index 000000000..b8d94025b --- /dev/null +++ b/nodegen/node/hann_window.py @@ -0,0 +1,130 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait, get_data_statement + +def hann_window(size, output_datatype=None, periodic=None) -> np.ndarray: # type: ignore + if periodic == 1: + N_1 = size + else: + N_1 = size - 1 + ni = np.arange(size, dtype=output_datatype) + res = np.sin((ni * np.float64(np.pi).astype(output_datatype) / N_1).astype(output_datatype)) ** 2 + return res.astype(output_datatype) + +class Hann_window(RunAll): + + @staticmethod + # We test here with fp8x23 implementation. + def fp8x23(): + print(get_data_statement(to_fp(np.array([np.pi]).flatten(), FixedImpl.FP8x23), Dtype.FP8x23)) + args = [4] + # x = np.float64(4) + args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP8x23), Dtype.FP8x23) + y = hann_window(*args, np.float64) + print(y) + + # Convert the floats values in `y` to fixed points with `to_fp` method: + y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) + + # Define the name of the generated folder. + name = "hann_window_fp8x23" + # Invoke `make_test` method to generate corresponding Cairo tests: + make_test( + [], # List of input tensors. + y, # The expected output result. + f"TensorTrait::hann_window({','.join(args_str)}, Option::Some(0))", # The code signature. + name # The name of the generated folder. + ) + + @staticmethod + # We test here with fp16x16 implementation. + def fp16x16(): + print(get_data_statement(to_fp(np.array([np.pi]).flatten(), FixedImpl.FP16x16), Dtype.FP16x16)) + args = [10] + # x = np.float64(4) + args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP16x16), Dtype.FP16x16) + y = hann_window(*args, np.float16) + print(y) + + # Convert the floats values in `y` to fixed points with `to_fp` method: + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + # Define the name of the generated folder. + name = "hann_window_fp16x16" + # Invoke `make_test` method to generate corresponding Cairo tests: + make_test( + [], # List of input tensors. + y, # The expected output result. + f"TensorTrait::hann_window({','.join(args_str)}, Option::Some(0))", # The code signature. + name # The name of the generated folder. + ) + + # @staticmethod + # # We test here with i8 implementation. + # def i8(): + # print(get_data_statement(np.array([np.pi]).flatten(), Dtype.I8)) + # args = [5] + # # x = np.float64(4) + # args_str = get_data_statement(np.array(args).flatten(), Dtype.I8) + # y = hann_window(*args, np.int8) + # print(y) + + # # Convert the floats values in `y` to fixed points with `to_fp` method: + # y = Tensor(Dtype.I8, y.shape, y.flatten()) + + # # Define the name of the generated folder. + # name = "hann_window_i8" + # # Invoke `make_test` method to generate corresponding Cairo tests: + # make_test( + # [], # List of input tensors. + # y, # The expected output result. + # f"TensorTrait::hann_window({','.join(args_str)}, Option::Some(1))", # The code signature. + # name # The name of the generated folder. + # ) + + # @staticmethod + # # We test here with i32 implementation. + # def i32(): + # print(get_data_statement(np.array([np.pi]).flatten(), Dtype.I32)) + # args = [4] + # # x = np.float64(4) + # args_str = get_data_statement(np.array(args).flatten(), Dtype.I32) + # y = hann_window(*args, np.int32) + # print(y) + + # # Convert the floats values in `y` to fixed points with `to_fp` method: + # y = Tensor(Dtype.I32, y.shape, y.flatten()) + + # # Define the name of the generated folder. + # name = "hann_window_i32" + # # Invoke `make_test` method to generate corresponding Cairo tests: + # make_test( + # [], # List of input tensors. + # y, # The expected output result. + # f"TensorTrait::hann_window({','.join(args_str)}, Option::Some(0))", # The code signature. + # name # The name of the generated folder. + # ) + + # @staticmethod + # # We test here with u32 implementation. + # def u32(): + # print(get_data_statement(np.array([np.pi]).flatten(), Dtype.U32)) + # args = [4] + # # x = np.float64(4) + # args_str = get_data_statement(np.array(args).flatten(), Dtype.U32) + # y = hann_window(*args, np.uint32) + # print(y) + + # # Convert the floats values in `y` to fixed points with `to_fp` method: + # y = Tensor(Dtype.U32, y.shape, y.flatten()) + + # # Define the name of the generated folder. + # name = "hann_window_u32" + # # Invoke `make_test` method to generate corresponding Cairo tests: + # make_test( + # [], # List of input tensors. + # y, # The expected output result. + # f"TensorTrait::hann_window({','.join(args_str)}, Option::Some(0))", # The code signature. + # name # The name of the generated folder. + # ) + \ No newline at end of file diff --git a/nodegen/node/range.py b/nodegen/node/range.py new file mode 100644 index 000000000..6eb7751bd --- /dev/null +++ b/nodegen/node/range.py @@ -0,0 +1,108 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait, get_data_statement + + + +class Range(RunAll): + + @staticmethod + # We test here with fp8x23 implementation. + def fp8x23(): + args = [1, 5, 0.3] + args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP8x23), Dtype.FP8x23) + y = np.arange(*args) + print(y) + # Convert the floats values in `y` to fixed points with `to_fp` method: + y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) + + # Define the name of the generated folder. + name = "range_fp8x23" + # Invoke `make_test` method to generate corresponding Cairo tests: + make_test( + [], # List of input tensors. + y, # The expected output result. + f"TensorTrait::range({','.join(args_str)})", # The code signature. + name, # The name of the generated folder. + ) + + @staticmethod + # We test here with fp16x16 implementation. + def fp16x16(): + args = [1, 25, 3] + args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP16x16), Dtype.FP16x16) + y = np.arange(*args) + print(y) + # Convert the floats values in `y` to fixed points with `to_fp` method: + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + # Define the name of the generated folder. + name = "range_fp16x16" + # Invoke `make_test` method to generate corresponding Cairo tests: + make_test( + [], # List of input tensors. + y, # The expected output result. + f"TensorTrait::range({','.join(args_str)})", # The code signature. + name, # The name of the generated folder. + ) + + @staticmethod + # We test here with i8 implementation. + def i8(): + args = [-1, 25, 3] + args_str = get_data_statement(np.array(args).flatten(), Dtype.I8) + y = np.arange(*args) + print(y) + # Convert the floats values in `y` to fixed points with `to_fp` method: + y = Tensor(Dtype.I8, y.shape, y.flatten()) + + # Define the name of the generated folder. + name = "range_i8" + # Invoke `make_test` method to generate corresponding Cairo tests: + make_test( + [], # List of input tensors. + y, # The expected output result. + f"TensorTrait::range({','.join(args_str)})", # The code signature. + name, # The name of the generated folder. + ) + + @staticmethod + # We test here with i32 implementation. + def i32(): + args = [21, 2, -3] + args_str = get_data_statement(np.array(args).flatten(), Dtype.I32) + y = np.arange(*args) + print(y) + # Convert the floats values in `y` to fixed points with `to_fp` method: + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + # Define the name of the generated folder. + name = "range_i32" + # Invoke `make_test` method to generate corresponding Cairo tests: + make_test( + [], # List of input tensors. + y, # The expected output result. + f"TensorTrait::range({','.join(args_str)})", # The code signature. + name, # The name of the generated folder. + ) + + @staticmethod + # We test here with u32 implementation. + def u32(): + args = [1, 25, 3] + args_str = get_data_statement(np.array(args).flatten(), Dtype.U32) + y = np.arange(*args) + print(y) + # Convert the floats values in `y` to fixed points with `to_fp` method: + y = Tensor(Dtype.U32, y.shape, y.flatten()) + + # Define the name of the generated folder. + name = "range_u32" + # Invoke `make_test` method to generate corresponding Cairo tests: + make_test( + [], # List of input tensors. + y, # The expected output result. + f"TensorTrait::range({','.join(args_str)})", # The code signature. + name, # The name of the generated folder. + ) + \ No newline at end of file diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 70344eb97..1fcb47c20 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -118,6 +118,9 @@ impl TensorSerde, impl TDrop: Drop> of Serde { /// # tensor.new /// @@ -5162,6 +5165,109 @@ trait TensorTrait { fn split( self: @Tensor, axis: usize, num_outputs: Option, spl: Option> ) -> Array>; + /// # tensor.range + /// + /// ```rust + /// fn range(start: T, end: T, step: T) -> Tensor; + /// ``` + /// + /// Generate a tensor containing a sequence of numbers that begin at start and extends by increments of delta up to limit (exclusive). + /// + /// + /// * `start`(`T`) - First entry for the range of output values. + /// * `end`(`T`) - Exclusive upper limit for the range of output values. + /// * `step `(`T`) - Value to step by. + /// + /// ## Returns + /// + /// A 1-D tensor with same type as the inputs containing generated range of values. + /// + /// ## Examples + /// + /// ```rust + /// use core::array::{ArrayTrait, SpanTrait}; + /// use orion::operators::tensor::I32TensorPartialEq; + /// use orion::operators::tensor::{TensorTrait, Tensor}; + /// use orion::operators::tensor::{I32Tensor, I32TensorAdd}; + /// use orion::utils::{assert_eq, assert_seq_eq}; + /// use orion::numbers::NumberTrait; + /// + /// + /// fn range_example() -> Tensor { + /// return TensorTrait::range(21,2,-3); + /// } + /// >>> [21 18 15 12 9 6 3] + /// ``` + /// + fn range(start: T, end: T, step: T) -> Tensor; + /// # tensor.hann_window + /// + /// ```rust + /// fn hann_window(size: T, periodic: Option) -> Tensor; + /// ``` + /// + /// Generates a Hann window as described in the paper https://ieeexplore.ieee.org/document/1455106. + /// + /// + /// * `size`(`T`) - A scalar value indicating the length of the window. + /// * `periodic`(Option) - If 1, returns a window to be used as periodic function. If 0, return a symmetric window. When 'periodic' is specified, hann computes a window of length size + 1 and returns the first size points. The default value is 1. + /// + /// ## Returns + /// + /// A Hann window with length: size. The output has the shape: [size]. + /// + /// ## Examples + /// + /// ```rust + /// use core::array::{ArrayTrait, SpanTrait}; + /// use orion::operators::tensor::FP8x23TensorPartialEq; + /// use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; + /// use orion::operators::tensor::{TensorTrait, Tensor}; + /// use orion::utils::{assert_eq, assert_seq_eq}; + /// use orion::numbers::{FixedTrait, FP8x23}; + /// + /// + /// fn hann_window_example() -> Tensor { + /// return TensorTrait::hann_window(FP8x23 { mag: 33554432, sign: false }, Option::Some(0)); // size: 4 + /// } + /// >>> [0 6291455 6291456 0] + /// ``` + /// + fn hann_window(size: T, periodic: Option) -> Tensor; + /// # tensor.hamming_window + /// + /// ```rust + /// fn hamming_window(size: T, periodic: Option) -> Tensor; + /// ``` + /// + /// Generates a Hamming window as described in the paper https://ieeexplore.ieee.org/document/1455106. + /// + /// + /// * `size`(`T`) - A scalar value indicating the length of the window. + /// * `periodic`(Option) - If 1, returns a window to be used as periodic function. If 0, return a symmetric window. When 'periodic' is specified, hann computes a window of length size + 1 and returns the first size points. The default value is 1. + /// + /// ## Returns + /// + /// A Hann window with length: size. The output has the shape: [size]. + /// + /// ## Examples + /// + /// ```rust + /// use core::array::{ArrayTrait, SpanTrait}; + /// use orion::operators::tensor::FP8x23TensorPartialEq; + /// use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; + /// use orion::operators::tensor::{TensorTrait, Tensor}; + /// use orion::utils::{assert_eq, assert_seq_eq}; + /// use orion::numbers::{FixedTrait, FP8x23}; + /// + /// + /// fn hann_window_example() -> Tensor { + /// return TensorTrait::hamming_window(FP8x23 { mag: 33554432, sign: false }, Option::Some(0)); // size: 4 + /// } + /// >>> [729444 6473817 6473817 729444] + /// ``` + /// + fn hamming_window(size: T, periodic: Option) -> Tensor; } /// Cf: TensorTrait::new docstring diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index 3da518ec8..b2cd72fb3 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -484,6 +484,18 @@ impl BoolTensor of TensorTrait { ) -> Array> { panic(array!['not supported!']) } + + fn range(start: bool, end: bool, step: bool) -> Tensor { + panic(array!['not supported!']) + } + + fn hann_window(size: bool, periodic: Option) -> Tensor { + panic(array!['not supported!']) + } + + fn hamming_window(size: bool, periodic: Option) -> Tensor { + panic(array!['not supported!']) + } } /// Implements partial equal for two `Tensor` using the `PartialEq` trait. diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 74acba5c6..760a9baa1 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -515,6 +515,18 @@ impl Complex64Tensor of TensorTrait { ) -> Tensor { panic(array!['not supported!']) } + + fn range(start: complex64, end: complex64, step: complex64) -> Tensor { + panic(array!['not supported!']) + } + + fn hann_window(size: complex64, periodic: Option) -> Tensor { + panic(array!['not supported!']) + } + + fn hamming_window(size: complex64, periodic: Option) -> Tensor { + panic(array!['not supported!']) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index cdc50bc4f..ffd6b6545 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -14,6 +14,7 @@ use orion::numbers::{NumberTrait, FP16x16, I8IntoFP16x16}; use orion::operators::tensor::implementations::{ tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor }; +use orion::numbers::fixed_point::implementations::fp16x16::math::trig::PI; impl FP16x16Tensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { @@ -560,6 +561,18 @@ impl FP16x16Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn range(start: FP16x16, end: FP16x16, step: FP16x16) -> Tensor { + math::range::range(start, end, step) + } + + fn hann_window(size: FP16x16, periodic: Option) -> Tensor { + math::hann_window::hann_window(size, FP16x16 { mag: PI, sign: false }, periodic) + } + + fn hamming_window(size: FP16x16, periodic: Option) -> Tensor { + math::hamming_window::hamming_window(size, FP16x16 { mag: PI, sign: false }, periodic) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index b0dc2d858..8fff3ccd1 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -14,6 +14,7 @@ use orion::numbers::{NumberTrait, FP16x16W}; use orion::operators::tensor::implementations::{ tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor }; +use orion::numbers::fixed_point::implementations::fp16x16wide::math::trig::PI; impl FP16x16WTensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { @@ -512,6 +513,18 @@ impl FP16x16WTensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn range(start: FP16x16W, end: FP16x16W, step: FP16x16W) -> Tensor { + math::range::range(start, end, step) + } + + fn hann_window(size: FP16x16W, periodic: Option) -> Tensor { + math::hann_window::hann_window(size, FP16x16W { mag: PI, sign: false }, periodic) + } + + fn hamming_window(size: FP16x16W, periodic: Option) -> Tensor { + math::hamming_window::hamming_window(size, FP16x16W { mag: PI, sign: false }, periodic) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 4f862fd0e..051a9e907 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -561,6 +561,18 @@ impl FP32x32Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn range(start: FP32x32, end: FP32x32, step: FP32x32) -> Tensor { + math::range::range(start, end, step) + } + + fn hann_window(size: FP32x32, periodic: Option) -> Tensor { + panic(array!['not supported!']) + } + + fn hamming_window(size: FP32x32, periodic: Option) -> Tensor { + panic(array!['not supported!']) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 1fe5591fc..41e44d965 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -561,6 +561,18 @@ impl FP64x64Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn range(start: FP64x64, end: FP64x64, step: FP64x64) -> Tensor { + math::range::range(start, end, step) + } + + fn hann_window(size: FP64x64, periodic: Option) -> Tensor { + panic(array!['not supported!']) + } + + fn hamming_window(size: FP64x64, periodic: Option) -> Tensor { + panic(array!['not supported!']) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index 77d183c21..5497515e3 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -14,6 +14,7 @@ use orion::numbers::{NumberTrait, FP8x23, I8IntoFP8x23}; use orion::operators::tensor::implementations::{ tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor }; +use orion::numbers::fixed_point::implementations::fp8x23::math::trig::PI; impl FP8x23Tensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { @@ -559,6 +560,18 @@ impl FP8x23Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn range(start: FP8x23, end: FP8x23, step: FP8x23) -> Tensor { + math::range::range(start, end, step) + } + + fn hann_window(size: FP8x23, periodic: Option) -> Tensor { + math::hann_window::hann_window(size, FP8x23 { mag: PI, sign: false }, periodic) + } + + fn hamming_window(size: FP8x23, periodic: Option) -> Tensor { + math::hamming_window::hamming_window(size, FP8x23 { mag: PI, sign: false }, periodic) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index ff6069087..f00a47bdd 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -14,6 +14,7 @@ use orion::numbers::{NumberTrait, FP8x23W}; use orion::operators::tensor::implementations::{ tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor }; +use orion::numbers::fixed_point::implementations::fp8x23wide::math::trig::PI; impl FP8x23WTensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { @@ -498,6 +499,18 @@ impl FP8x23WTensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn range(start: FP8x23W, end: FP8x23W, step: FP8x23W) -> Tensor { + math::range::range(start, end, step) + } + + fn hann_window(size: FP8x23W, periodic: Option) -> Tensor { + math::hann_window::hann_window(size, FP8x23W { mag: PI, sign: false }, periodic) + } + + fn hamming_window(size: FP8x23W, periodic: Option) -> Tensor { + math::hamming_window::hamming_window(size, FP8x23W { mag: PI, sign: false }, periodic) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 50383d2df..8d682b647 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -541,6 +541,18 @@ impl I32Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn range(start: i32, end: i32, step: i32) -> Tensor { + math::range::range(start, end, step) + } + + fn hann_window(size: i32, periodic: Option) -> Tensor { + panic(array!['not supported!']) + } + + fn hamming_window(size: i32, periodic: Option) -> Tensor { + panic(array!['not supported!']) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 7e81d90eb..f71c7f8dc 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -539,6 +539,18 @@ impl I8Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn range(start: i8, end: i8, step: i8) -> Tensor { + math::range::range(start, end, step) + } + + fn hann_window(size: i8, periodic: Option) -> Tensor { + panic(array!['not supported!']) + } + + fn hamming_window(size: i8, periodic: Option) -> Tensor { + panic(array!['not supported!']) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index 5a926a538..a2310c9c0 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -482,6 +482,18 @@ impl U32Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn range(start: u32, end: u32, step: u32) -> Tensor { + math::range::range(start, end, step) + } + + fn hann_window(size: u32, periodic: Option) -> Tensor { + panic(array!['not supported!']) + } + + fn hamming_window(size: u32, periodic: Option) -> Tensor { + panic(array!['not supported!']) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/math.cairo b/src/operators/tensor/math.cairo index 13c2ca49a..0ba680a8d 100644 --- a/src/operators/tensor/math.cairo +++ b/src/operators/tensor/math.cairo @@ -61,3 +61,6 @@ mod erf; mod layer_normalization; mod resize; mod compress; +mod range; +mod hann_window; +mod hamming_window; diff --git a/src/operators/tensor/math/hamming_window.cairo b/src/operators/tensor/math/hamming_window.cairo new file mode 100644 index 000000000..216590f09 --- /dev/null +++ b/src/operators/tensor/math/hamming_window.cairo @@ -0,0 +1,72 @@ +use core::traits::Into; +use core::traits::TryInto; +use orion::operators::tensor::core::{Tensor, TensorTrait}; +use core::array::{ArrayTrait, SpanTrait}; +use core::option::OptionTrait; + +use orion::numbers::fixed_point::core::FixedTrait; +use orion::numbers::NumberTrait; + +use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; +use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; + + +fn hamming_window< + T, + MAG, + impl TTensor: TensorTrait, + impl TNumber: NumberTrait, + impl TAdd: Add, + impl TSub: Sub, + impl TMul: Mul, + impl TDiv: Div, + impl TTensorAdd: Add>, + impl TPartialOrd: PartialOrd, + impl TAddEq: AddEq, + impl TCopy: Copy, + impl TDrop: Drop, +>(size: T, PI: T, periodic: Option) -> Tensor { + let start: T = NumberTrait::zero(); + let one_step: T = NumberTrait::one(); + let two: T = one_step + one_step; + let three: T = two + one_step; + let n25: T = three.pow(three) - two; + let n46: T = n25 * two - two * two; + let alpha: T = n25 / n46; + + let beta: T = one_step - alpha; + let ni = TensorTrait::range(start, size, one_step); + assert!((ni.shape).len() == 1, "Unexpected shape 1."); + let mut N_1 = size; + if periodic != Option::Some(1) { + N_1 = N_1 - one_step; + }; + let len = *(ni.shape).at(0); + let mut arr: Array = ArrayTrait::::new(); + let mut i: usize = 0; + loop { + let v = *(ni.data).at(i); + let r = v * PI * two / N_1; + arr.append(r); + i += 1; + if i >= len { + break (); + }; + }; + let window = TensorTrait::::new(ni.shape, arr.span()); + let window_cos = window.cos(); + let len2 = *(ni.shape).at(0); + let mut arr2: Array = ArrayTrait::::new(); + let mut j: usize = 0; + loop { + let v = *(window_cos.data).at(j); + let v_2 = alpha - v * beta; + arr2.append(v_2); + j += 1; + if j >= len2 { + break (); + }; + }; + let window_cos_2 = TensorTrait::::new(ni.shape, arr2.span()); + return window_cos_2; +} diff --git a/src/operators/tensor/math/hann_window.cairo b/src/operators/tensor/math/hann_window.cairo new file mode 100644 index 000000000..05aa3b923 --- /dev/null +++ b/src/operators/tensor/math/hann_window.cairo @@ -0,0 +1,65 @@ +use core::traits::Into; +use core::traits::TryInto; +use orion::operators::tensor::core::{Tensor, TensorTrait}; +use core::array::{ArrayTrait, SpanTrait}; +use core::option::OptionTrait; + +use orion::numbers::fixed_point::core::FixedTrait; +use orion::numbers::NumberTrait; + +use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; +use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; + + +fn hann_window< + T, + MAG, + impl TTensor: TensorTrait, + impl TNumber: NumberTrait, + impl TAdd: Add, + impl TSub: Sub, + impl TMul: Mul, + impl TDiv: Div, + impl TTensorAdd: Add>, + impl TPartialOrd: PartialOrd, + impl TAddEq: AddEq, + impl TCopy: Copy, + impl TDrop: Drop, +>(size: T, PI: T, periodic: Option) -> Tensor { + let start: T = NumberTrait::zero(); + let one_step: T = NumberTrait::one(); + let ni = TensorTrait::range(start, size, one_step); + assert!((ni.shape).len() == 1, "Unexpected shape 1."); + let mut N_1 = size; + if periodic != Option::Some(1) { + N_1 = N_1 - one_step; + }; + let len = *(ni.shape).at(0); + let mut arr: Array = ArrayTrait::::new(); + let mut i: usize = 0; + loop { + let v = *(ni.data).at(i); + let r = v * PI / N_1; + arr.append(r); + i += 1; + if i >= len { + break (); + }; + }; + let window = TensorTrait::::new(ni.shape, arr.span()); + let window_sin = window.sin(); + let len2 = *(ni.shape).at(0); + let mut arr2: Array = ArrayTrait::::new(); + let mut j: usize = 0; + loop { + let v = *(window_sin.data).at(j); + let v_2 = v * v; + arr2.append(v_2); + j += 1; + if j >= len2 { + break (); + }; + }; + let window_sin_2 = TensorTrait::::new(ni.shape, arr2.span()); + return window_sin_2; +} diff --git a/src/operators/tensor/math/range.cairo b/src/operators/tensor/math/range.cairo new file mode 100644 index 000000000..a21f7f2b0 --- /dev/null +++ b/src/operators/tensor/math/range.cairo @@ -0,0 +1,39 @@ +use core::traits::Into; +use core::traits::TryInto; +use orion::operators::tensor::core::{Tensor, TensorTrait}; +use core::array::{ArrayTrait, SpanTrait}; +use core::option::OptionTrait; + +use orion::numbers::fixed_point::core::FixedTrait; +use orion::numbers::NumberTrait; + +use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; +use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; + + +fn range< + T, + MAG, + impl TTensor: TensorTrait, + impl TNumber: NumberTrait, + impl TAdd: Add, + impl TSub: Sub, + impl TMul: Mul, + impl TPartialOrd: PartialOrd, + impl TAddEq: AddEq, + impl TCopy: Copy, + impl TDrop: Drop, +>(mut start: T, end: T, step: T) -> Tensor { + let mut result: Array = ArrayTrait::::new(); + let zero: T = NumberTrait::zero(); + loop { + if (step >= zero && start >= end) || (step <= zero && start <= end) { + break (); + }; + let v = start; + result.append(v); + start += step; + }; + let shape = array![result.len()]; + return TensorTrait::::new(shape.span(), result.span()); +} diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 6c70b42cb..cec69c81a 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -936,3 +936,12 @@ mod split_fp16x16_2d_variable_parts; mod split_fp16x16_zero_size; mod split_fp16x16_1d_uneven; mod split_fp16x16_2d_uneven; +mod range_fp8x23; +mod range_fp16x16; +mod range_i32; +mod range_i8; +mod range_u32; +mod hann_window_fp8x23; +mod hann_window_fp16x16; +mod hamming_window_fp16x16; +mod hamming_window_fp8x23; diff --git a/tests/nodes/hamming_window_fp16x16.cairo b/tests/nodes/hamming_window_fp16x16.cairo new file mode 100644 index 000000000..841e3ea6d --- /dev/null +++ b/tests/nodes/hamming_window_fp16x16.cairo @@ -0,0 +1,19 @@ +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::numbers::{FixedTrait, FP16x16}; + +#[test] +#[available_gas(2000000000)] +fn test_hamming_window_fp16x16() { + let z_0 = output_0::output_0(); + + let y_0 = TensorTrait::hamming_window(FP16x16 { mag: 655360, sign: false }, Option::Some(0)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/hamming_window_fp16x16/output_0.cairo b/tests/nodes/hamming_window_fp16x16/output_0.cairo new file mode 100644 index 000000000..13c592aa1 --- /dev/null +++ b/tests/nodes/hamming_window_fp16x16/output_0.cairo @@ -0,0 +1,22 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(10); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 5696, sign: false }); + data.append(FP16x16 { mag: 12688, sign: false }); + data.append(FP16x16 { mag: 30400, sign: false }); + data.append(FP16x16 { mag: 50560, sign: false }); + data.append(FP16x16 { mag: 63712, sign: false }); + data.append(FP16x16 { mag: 63744, sign: false }); + data.append(FP16x16 { mag: 50624, sign: false }); + data.append(FP16x16 { mag: 30432, sign: false }); + data.append(FP16x16 { mag: 12752, sign: false }); + data.append(FP16x16 { mag: 5696, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/hamming_window_fp8x23.cairo b/tests/nodes/hamming_window_fp8x23.cairo new file mode 100644 index 000000000..2f85d8ef3 --- /dev/null +++ b/tests/nodes/hamming_window_fp8x23.cairo @@ -0,0 +1,19 @@ +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::numbers::{FixedTrait, FP8x23}; + +#[test] +#[available_gas(2000000000)] +fn test_hamming_window_fp8x23() { + let z_0 = output_0::output_0(); + + let y_0 = TensorTrait::hamming_window(FP8x23 { mag: 33554432, sign: false }, Option::Some(0)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/hamming_window_fp8x23/output_0.cairo b/tests/nodes/hamming_window_fp8x23/output_0.cairo new file mode 100644 index 000000000..911c8fa49 --- /dev/null +++ b/tests/nodes/hamming_window_fp8x23/output_0.cairo @@ -0,0 +1,16 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 729444, sign: false }); + data.append(FP8x23 { mag: 6473817, sign: false }); + data.append(FP8x23 { mag: 6473817, sign: false }); + data.append(FP8x23 { mag: 729444, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/hann_window_fp16x16.cairo b/tests/nodes/hann_window_fp16x16.cairo new file mode 100644 index 000000000..0f6d6f049 --- /dev/null +++ b/tests/nodes/hann_window_fp16x16.cairo @@ -0,0 +1,19 @@ +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::numbers::{FixedTrait, FP16x16}; + +#[test] +#[available_gas(2000000000)] +fn test_hann_window_fp16x16() { + let z_0 = output_0::output_0(); + + let y_0 = TensorTrait::hann_window(FP16x16 { mag: 655360, sign: false }, Option::Some(0)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/hann_window_fp16x16/output_0.cairo b/tests/nodes/hann_window_fp16x16/output_0.cairo new file mode 100644 index 000000000..8a1e9f04b --- /dev/null +++ b/tests/nodes/hann_window_fp16x16/output_0.cairo @@ -0,0 +1,22 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(10); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 7656, sign: false }); + data.append(FP16x16 { mag: 27056, sign: false }); + data.append(FP16x16 { mag: 49120, sign: false }); + data.append(FP16x16 { mag: 63552, sign: false }); + data.append(FP16x16 { mag: 63552, sign: false }); + data.append(FP16x16 { mag: 49184, sign: false }); + data.append(FP16x16 { mag: 27104, sign: false }); + data.append(FP16x16 { mag: 7732, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/hann_window_fp8x23.cairo b/tests/nodes/hann_window_fp8x23.cairo new file mode 100644 index 000000000..fc0f7a40f --- /dev/null +++ b/tests/nodes/hann_window_fp8x23.cairo @@ -0,0 +1,19 @@ +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::numbers::{FixedTrait, FP8x23}; + +#[test] +#[available_gas(2000000000)] +fn test_hann_window_fp8x23() { + let z_0 = output_0::output_0(); + + let y_0 = TensorTrait::hann_window(FP8x23 { mag: 33554432, sign: false }, Option::Some(0)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/hann_window_fp8x23/output_0.cairo b/tests/nodes/hann_window_fp8x23/output_0.cairo new file mode 100644 index 000000000..1051f1586 --- /dev/null +++ b/tests/nodes/hann_window_fp8x23/output_0.cairo @@ -0,0 +1,16 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 6291455, sign: false }); + data.append(FP8x23 { mag: 6291456, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/range_fp16x16.cairo b/tests/nodes/range_fp16x16.cairo new file mode 100644 index 000000000..efacd031c --- /dev/null +++ b/tests/nodes/range_fp16x16.cairo @@ -0,0 +1,19 @@ +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +#[test] +#[available_gas(2000000000)] +fn test_range_fp16x16() { + let z_0 = output_0::output_0(); + + let y_0 = TensorTrait::range(FP16x16 { mag: 65536, sign: false },FP16x16 { mag: 1638400, sign: false },FP16x16 { mag: 196608, sign: false }); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/range_fp16x16/output_0.cairo b/tests/nodes/range_fp16x16/output_0.cairo new file mode 100644 index 000000000..dc9f58b13 --- /dev/null +++ b/tests/nodes/range_fp16x16/output_0.cairo @@ -0,0 +1,20 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(8); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/range_fp8x23.cairo b/tests/nodes/range_fp8x23.cairo new file mode 100644 index 000000000..c299a96ab --- /dev/null +++ b/tests/nodes/range_fp8x23.cairo @@ -0,0 +1,19 @@ +mod output_0; + + +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::numbers::{FixedTrait, FP8x23}; + +#[test] +#[available_gas(2000000000)] +fn test_range_fp8x23() { + let z_0 = output_0::output_0(); + + let y_0 = TensorTrait::range(FP8x23 { mag: 8388608, sign: false },FP8x23 { mag: 41943040, sign: false },FP8x23 { mag: 2516582, sign: false }); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/range_fp8x23/output_0.cairo b/tests/nodes/range_fp8x23/output_0.cairo new file mode 100644 index 000000000..9e3cdacdb --- /dev/null +++ b/tests/nodes/range_fp8x23/output_0.cairo @@ -0,0 +1,26 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(14); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 10905190, sign: false }); + data.append(FP8x23 { mag: 13421772, sign: false }); + data.append(FP8x23 { mag: 15938355, sign: false }); + data.append(FP8x23 { mag: 18454937, sign: false }); + data.append(FP8x23 { mag: 20971520, sign: false }); + data.append(FP8x23 { mag: 23488102, sign: false }); + data.append(FP8x23 { mag: 26004684, sign: false }); + data.append(FP8x23 { mag: 28521267, sign: false }); + data.append(FP8x23 { mag: 31037849, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(FP8x23 { mag: 36071014, sign: false }); + data.append(FP8x23 { mag: 38587596, sign: false }); + data.append(FP8x23 { mag: 41104179, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/range_i32.cairo b/tests/nodes/range_i32.cairo new file mode 100644 index 000000000..786094089 --- /dev/null +++ b/tests/nodes/range_i32.cairo @@ -0,0 +1,19 @@ +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::numbers::NumberTrait; + +#[test] +#[available_gas(2000000000)] +fn test_range_i32() { + let z_0 = output_0::output_0(); + + let y_0 = TensorTrait::range(21,2,-3); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/range_i32/output_0.cairo b/tests/nodes/range_i32/output_0.cairo new file mode 100644 index 000000000..a2dcbdaaf --- /dev/null +++ b/tests/nodes/range_i32/output_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(7); + + let mut data = ArrayTrait::new(); + data.append(21); + data.append(18); + data.append(15); + data.append(12); + data.append(9); + data.append(6); + data.append(3); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/range_i8.cairo b/tests/nodes/range_i8.cairo new file mode 100644 index 000000000..90c9917cd --- /dev/null +++ b/tests/nodes/range_i8.cairo @@ -0,0 +1,19 @@ +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I8TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::numbers::NumberTrait; + +#[test] +#[available_gas(2000000000)] +fn test_range_i8() { + let z_0 = output_0::output_0(); + + let y_0 = TensorTrait::range(-1,25,3); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/range_i8/output_0.cairo b/tests/nodes/range_i8/output_0.cairo new file mode 100644 index 000000000..f6b8e1055 --- /dev/null +++ b/tests/nodes/range_i8/output_0.cairo @@ -0,0 +1,21 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(9); + + let mut data = ArrayTrait::new(); + data.append(-1); + data.append(2); + data.append(5); + data.append(8); + data.append(11); + data.append(14); + data.append(17); + data.append(20); + data.append(23); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/range_u32.cairo b/tests/nodes/range_u32.cairo new file mode 100644 index 000000000..fcaa30ca0 --- /dev/null +++ b/tests/nodes/range_u32.cairo @@ -0,0 +1,19 @@ +mod output_0; + + +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::numbers::NumberTrait; + +#[test] +#[available_gas(2000000000)] +fn test_range_u32() { + let z_0 = output_0::output_0(); + + let y_0 = TensorTrait::range(1,25,3); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/range_u32/output_0.cairo b/tests/nodes/range_u32/output_0.cairo new file mode 100644 index 000000000..04d145f9a --- /dev/null +++ b/tests/nodes/range_u32/output_0.cairo @@ -0,0 +1,20 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(8); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(4); + data.append(7); + data.append(10); + data.append(13); + data.append(16); + data.append(19); + data.append(22); + TensorTrait::new(shape.span(), data.span()) +} From 7109bf02df68e14ea04ec624ef9435a4f322f0c9 Mon Sep 17 00:00:00 2001 From: zhangzhichao Date: Fri, 2 Feb 2024 10:33:05 +0800 Subject: [PATCH 19/46] tests: Add mod in tests/node.cairo --- tests/nodes.cairo | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 5ed0c17b7..69ed72cf6 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -952,3 +952,4 @@ mod split_to_sequence_u32_1d_uneven; mod split_to_sequence_u32_2d_uneven; mod split_to_sequence_2d_scalar; mod split_to_sequence_2d_nokeepdims; +mod split_to_sequence_1d_nokeepdims; From f16d643ed49ce759937f3877a6e192c814b802c7 Mon Sep 17 00:00:00 2001 From: zhangzhichao Date: Fri, 2 Feb 2024 18:29:27 +0800 Subject: [PATCH 20/46] feat: Added blackman_window operator --- docs/framework/operators/tensor/README.md | 1 + .../tensor/tensor.blackman_window.md | 32 +++++ .../operators/tensor/tensor.hamming_window.md | 4 +- nodegen/node/blackman_window.py | 130 ++++++++++++++++++ nodegen/node/hamming_window.py | 4 - src/operators/tensor/core.cairo | 39 +++++- .../tensor/implementations/tensor_bool.cairo | 4 + .../implementations/tensor_complex64.cairo | 4 + .../implementations/tensor_fp16x16.cairo | 4 + .../implementations/tensor_fp16x16wide.cairo | 4 + .../implementations/tensor_fp32x32.cairo | 4 + .../implementations/tensor_fp64x64.cairo | 4 + .../implementations/tensor_fp8x23.cairo | 4 + .../implementations/tensor_fp8x23wide.cairo | 4 + .../tensor/implementations/tensor_i32.cairo | 4 + .../tensor/implementations/tensor_i8.cairo | 4 + .../tensor/implementations/tensor_u32.cairo | 4 + src/operators/tensor/math.cairo | 1 + .../tensor/math/blackman_window.cairo | 109 +++++++++++++++ tests/nodes.cairo | 2 + tests/nodes/blackman_window_fp16x16.cairo | 19 +++ .../blackman_window_fp16x16/output_0.cairo | 15 ++ tests/nodes/blackman_window_fp8x23.cairo | 19 +++ .../blackman_window_fp8x23/output_0.cairo | 15 ++ 24 files changed, 426 insertions(+), 8 deletions(-) create mode 100644 docs/framework/operators/tensor/tensor.blackman_window.md create mode 100644 nodegen/node/blackman_window.py create mode 100644 src/operators/tensor/math/blackman_window.cairo create mode 100644 tests/nodes/blackman_window_fp16x16.cairo create mode 100644 tests/nodes/blackman_window_fp16x16/output_0.cairo create mode 100644 tests/nodes/blackman_window_fp8x23.cairo create mode 100644 tests/nodes/blackman_window_fp8x23/output_0.cairo diff --git a/docs/framework/operators/tensor/README.md b/docs/framework/operators/tensor/README.md index 5febdda29..c5d395777 100644 --- a/docs/framework/operators/tensor/README.md +++ b/docs/framework/operators/tensor/README.md @@ -123,6 +123,7 @@ use orion::operators::tensor::TensorTrait; | [`tensor.range`](tensor.range.md) | Generate a tensor containing a sequence of numbers that begin at start and extends by increments of delta up to limit (exclusive). | | [`tensor.hann_window`](tensor.hann\_window.md) | Generates a Hann window as described in the paper https://ieeexplore.ieee.org/document/1455106. | | [`tensor.hamming_window`](tensor.hamming\_window.md) | Generates a Hamming window as described in the paper https://ieeexplore.ieee.org/document/1455106. | +| [`tensor.blackman_window`](tensor.blackman\_window.md) | Generates a Blackman window as described in the paper https://ieeexplore.ieee.org/document/1455106. | ## Arithmetic Operations diff --git a/docs/framework/operators/tensor/tensor.blackman_window.md b/docs/framework/operators/tensor/tensor.blackman_window.md new file mode 100644 index 000000000..9233e71eb --- /dev/null +++ b/docs/framework/operators/tensor/tensor.blackman_window.md @@ -0,0 +1,32 @@ +# tensor.blackman_window + +```rust + fn blackman_window(size: T, periodic: Option) -> Tensor; +``` + +Generates a Blackman window as described in the paper https://ieeexplore.ieee.org/document/1455106. + + +* `size`(`T`) - A scalar value indicating the length of the window. +* `periodic`(Option) - If 1, returns a window to be used as periodic function. If 0, return a symmetric window. When 'periodic' is specified, hann computes a window of length size + 1 and returns the first size points. The default value is 1. + +## Returns + +A Blackman window with length: size. The output has the shape: [size]. + +## Examples + +```rust +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::numbers::{FixedTrait, FP8x23}; + + +fn blackman_window_example() -> Tensor { + return TensorTrait::blackman_window(FP8x23 { mag: 33554432, sign: false }, Option::Some(0)); // size: 4 +} +>>> [0 0.36 0.36 0] +``` diff --git a/docs/framework/operators/tensor/tensor.hamming_window.md b/docs/framework/operators/tensor/tensor.hamming_window.md index b4ab0cc5f..1ce1a25f1 100644 --- a/docs/framework/operators/tensor/tensor.hamming_window.md +++ b/docs/framework/operators/tensor/tensor.hamming_window.md @@ -12,7 +12,7 @@ Generates a Hamming window as described in the paper https://ieeexplore.ieee.org ## Returns -A Hann window with length: size. The output has the shape: [size]. +A Hamming window with length: size. The output has the shape: [size]. ## Examples @@ -25,7 +25,7 @@ use orion::utils::{assert_eq, assert_seq_eq}; use orion::numbers::{FixedTrait, FP8x23}; -fn hann_window_example() -> Tensor { +fn hamming_window_example() -> Tensor { return TensorTrait::hamming_window(FP8x23 { mag: 33554432, sign: false }, Option::Some(0)); // size: 4 } >>> [729444 6473817 6473817 729444] diff --git a/nodegen/node/blackman_window.py b/nodegen/node/blackman_window.py new file mode 100644 index 000000000..fdbacc615 --- /dev/null +++ b/nodegen/node/blackman_window.py @@ -0,0 +1,130 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait, get_data_statement + +def blackman_window(size, output_datatype=None, periodic=None) -> np.ndarray: # type: ignore + if periodic == 1: + N_1 = size + else: + N_1 = size - 1 + ni = np.arange(size, dtype=output_datatype) + alpha = 0.42 + beta = 0.08 + y = np.cos((ni * (np.float64(np.pi).astype(output_datatype) * 2)) / N_1).astype(output_datatype) * (-0.5) + y += np.cos((ni * (np.float64(np.pi).astype(output_datatype) * 4)) / N_1) * beta + y += alpha + return y.astype(output_datatype) + +class Blackman_window(RunAll): + + @staticmethod + # We test here with fp8x23 implementation. + def fp8x23(): + args = [3] + # x = np.float64(4) + args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP8x23), Dtype.FP8x23) + y = blackman_window(*args, np.float64) + + # Convert the floats values in `y` to fixed points with `to_fp` method: + y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) + + # Define the name of the generated folder. + name = "blackman_window_fp8x23" + # Invoke `make_test` method to generate corresponding Cairo tests: + make_test( + [], # List of input tensors. + y, # The expected output result. + f"TensorTrait::blackman_window({','.join(args_str)}, Option::Some(0))", # The code signature. + name # The name of the generated folder. + ) + + @staticmethod + # We test here with fp16x16 implementation. + def fp16x16(): + print(get_data_statement(to_fp(np.array([np.pi]).flatten(), FixedImpl.FP16x16), Dtype.FP16x16)) + args = [3] + # x = np.float64(4) + args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP16x16), Dtype.FP16x16) + y = blackman_window(*args, np.float16, 1) + # Convert the floats values in `y` to fixed points with `to_fp` method: + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + # Define the name of the generated folder. + name = "blackman_window_fp16x16" + # Invoke `make_test` method to generate corresponding Cairo tests: + make_test( + [], # List of input tensors. + y, # The expected output result. + f"TensorTrait::blackman_window({','.join(args_str)}, Option::Some(1))", # The code signature. + name # The name of the generated folder. + ) + + # @staticmethod + # # We test here with i8 implementation. + # def i8(): + # print(get_data_statement(np.array([np.pi]).flatten(), Dtype.I8)) + # args = [5] + # # x = np.float64(4) + # args_str = get_data_statement(np.array(args).flatten(), Dtype.I8) + # y = blackman_window(*args, np.int8) + # print(y) + + # # Convert the floats values in `y` to fixed points with `to_fp` method: + # y = Tensor(Dtype.I8, y.shape, y.flatten()) + + # # Define the name of the generated folder. + # name = "blackman_window_i8" + # # Invoke `make_test` method to generate corresponding Cairo tests: + # make_test( + # [], # List of input tensors. + # y, # The expected output result. + # f"TensorTrait::blackman_window({','.join(args_str)}, Option::Some(1))", # The code signature. + # name # The name of the generated folder. + # ) + + # @staticmethod + # # We test here with i32 implementation. + # def i32(): + # print(get_data_statement(np.array([np.pi]).flatten(), Dtype.I32)) + # args = [4] + # # x = np.float64(4) + # args_str = get_data_statement(np.array(args).flatten(), Dtype.I32) + # y = blackman_window(*args, np.int32) + # print(y) + + # # Convert the floats values in `y` to fixed points with `to_fp` method: + # y = Tensor(Dtype.I32, y.shape, y.flatten()) + + # # Define the name of the generated folder. + # name = "blackman_window_i32" + # # Invoke `make_test` method to generate corresponding Cairo tests: + # make_test( + # [], # List of input tensors. + # y, # The expected output result. + # f"TensorTrait::blackman_window({','.join(args_str)}, Option::Some(0))", # The code signature. + # name # The name of the generated folder. + # ) + + # @staticmethod + # # We test here with u32 implementation. + # def u32(): + # print(get_data_statement(np.array([np.pi]).flatten(), Dtype.U32)) + # args = [4] + # # x = np.float64(4) + # args_str = get_data_statement(np.array(args).flatten(), Dtype.U32) + # y = blackman_window(*args, np.uint32) + # print(y) + + # # Convert the floats values in `y` to fixed points with `to_fp` method: + # y = Tensor(Dtype.U32, y.shape, y.flatten()) + + # # Define the name of the generated folder. + # name = "blackman_window_u32" + # # Invoke `make_test` method to generate corresponding Cairo tests: + # make_test( + # [], # List of input tensors. + # y, # The expected output result. + # f"TensorTrait::blackman_window({','.join(args_str)}, Option::Some(0))", # The code signature. + # name # The name of the generated folder. + # ) + \ No newline at end of file diff --git a/nodegen/node/hamming_window.py b/nodegen/node/hamming_window.py index e7c0f109b..295db4fad 100644 --- a/nodegen/node/hamming_window.py +++ b/nodegen/node/hamming_window.py @@ -18,12 +18,10 @@ class Hamming_window(RunAll): @staticmethod # We test here with fp8x23 implementation. def fp8x23(): - print(get_data_statement(to_fp(np.array([np.pi]).flatten(), FixedImpl.FP8x23), Dtype.FP8x23)) args = [4] # x = np.float64(4) args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP8x23), Dtype.FP8x23) y = hamming_window(*args, np.float64) - print(y) # Convert the floats values in `y` to fixed points with `to_fp` method: y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) @@ -41,12 +39,10 @@ def fp8x23(): @staticmethod # We test here with fp16x16 implementation. def fp16x16(): - print(get_data_statement(to_fp(np.array([np.pi]).flatten(), FixedImpl.FP16x16), Dtype.FP16x16)) args = [10] # x = np.float64(4) args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP16x16), Dtype.FP16x16) y = hamming_window(*args, np.float16) - print(y) # Convert the floats values in `y` to fixed points with `to_fp` method: y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 1fcb47c20..4794249dc 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -121,6 +121,7 @@ impl TensorSerde, impl TDrop: Drop> of Serde { /// # tensor.new /// @@ -5248,7 +5249,7 @@ trait TensorTrait { /// /// ## Returns /// - /// A Hann window with length: size. The output has the shape: [size]. + /// A Hamming window with length: size. The output has the shape: [size]. /// /// ## Examples /// @@ -5261,13 +5262,47 @@ trait TensorTrait { /// use orion::numbers::{FixedTrait, FP8x23}; /// /// - /// fn hann_window_example() -> Tensor { + /// fn hamming_window_example() -> Tensor { /// return TensorTrait::hamming_window(FP8x23 { mag: 33554432, sign: false }, Option::Some(0)); // size: 4 /// } /// >>> [729444 6473817 6473817 729444] /// ``` /// fn hamming_window(size: T, periodic: Option) -> Tensor; + /// # tensor.blackman_window + /// + /// ```rust + /// fn blackman_window(size: T, periodic: Option) -> Tensor; + /// ``` + /// + /// Generates a Blackman window as described in the paper https://ieeexplore.ieee.org/document/1455106. + /// + /// + /// * `size`(`T`) - A scalar value indicating the length of the window. + /// * `periodic`(Option) - If 1, returns a window to be used as periodic function. If 0, return a symmetric window. When 'periodic' is specified, hann computes a window of length size + 1 and returns the first size points. The default value is 1. + /// + /// ## Returns + /// + /// A Blackman window with length: size. The output has the shape: [size]. + /// + /// ## Examples + /// + /// ```rust + /// use core::array::{ArrayTrait, SpanTrait}; + /// use orion::operators::tensor::FP8x23TensorPartialEq; + /// use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; + /// use orion::operators::tensor::{TensorTrait, Tensor}; + /// use orion::utils::{assert_eq, assert_seq_eq}; + /// use orion::numbers::{FixedTrait, FP8x23}; + /// + /// + /// fn blackman_window_example() -> Tensor { + /// return TensorTrait::blackman_window(FP8x23 { mag: 33554432, sign: false }, Option::Some(0)); // size: 4 + /// } + /// >>> [0 0.36 0.36 0] + /// ``` + /// + fn blackman_window(size: T, periodic: Option) -> Tensor; } /// Cf: TensorTrait::new docstring diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index b2cd72fb3..8c112cef7 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -496,6 +496,10 @@ impl BoolTensor of TensorTrait { fn hamming_window(size: bool, periodic: Option) -> Tensor { panic(array!['not supported!']) } + + fn blackman_window(size: bool, periodic: Option) -> Tensor { + panic(array!['not supported!']) + } } /// Implements partial equal for two `Tensor` using the `PartialEq` trait. diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 760a9baa1..a08dba543 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -527,6 +527,10 @@ impl Complex64Tensor of TensorTrait { fn hamming_window(size: complex64, periodic: Option) -> Tensor { panic(array!['not supported!']) } + + fn blackman_window(size: complex64, periodic: Option) -> Tensor { + panic(array!['not supported!']) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index ffd6b6545..091e75cfc 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -573,6 +573,10 @@ impl FP16x16Tensor of TensorTrait { fn hamming_window(size: FP16x16, periodic: Option) -> Tensor { math::hamming_window::hamming_window(size, FP16x16 { mag: PI, sign: false }, periodic) } + + fn blackman_window(size: FP16x16, periodic: Option) -> Tensor { + math::blackman_window::blackman_window(size, FP16x16 { mag: PI, sign: false }, periodic) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index 8fff3ccd1..3fa226117 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -525,6 +525,10 @@ impl FP16x16WTensor of TensorTrait { fn hamming_window(size: FP16x16W, periodic: Option) -> Tensor { math::hamming_window::hamming_window(size, FP16x16W { mag: PI, sign: false }, periodic) } + + fn blackman_window(size: FP16x16W, periodic: Option) -> Tensor { + math::blackman_window::blackman_window(size, FP16x16W { mag: PI, sign: false }, periodic) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 051a9e907..a78d5af35 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -573,6 +573,10 @@ impl FP32x32Tensor of TensorTrait { fn hamming_window(size: FP32x32, periodic: Option) -> Tensor { panic(array!['not supported!']) } + + fn blackman_window(size: FP32x32, periodic: Option) -> Tensor { + panic(array!['not supported!']) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 41e44d965..106a4375c 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -573,6 +573,10 @@ impl FP64x64Tensor of TensorTrait { fn hamming_window(size: FP64x64, periodic: Option) -> Tensor { panic(array!['not supported!']) } + + fn blackman_window(size: FP64x64, periodic: Option) -> Tensor { + panic(array!['not supported!']) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index 5497515e3..f3a472f23 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -572,6 +572,10 @@ impl FP8x23Tensor of TensorTrait { fn hamming_window(size: FP8x23, periodic: Option) -> Tensor { math::hamming_window::hamming_window(size, FP8x23 { mag: PI, sign: false }, periodic) } + + fn blackman_window(size: FP8x23, periodic: Option) -> Tensor { + math::blackman_window::blackman_window(size, FP8x23 { mag: PI, sign: false }, periodic) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index f00a47bdd..d322f72a6 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -511,6 +511,10 @@ impl FP8x23WTensor of TensorTrait { fn hamming_window(size: FP8x23W, periodic: Option) -> Tensor { math::hamming_window::hamming_window(size, FP8x23W { mag: PI, sign: false }, periodic) } + + fn blackman_window(size: FP8x23W, periodic: Option) -> Tensor { + math::blackman_window::blackman_window(size, FP8x23W { mag: PI, sign: false }, periodic) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 8d682b647..4b60d913e 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -553,6 +553,10 @@ impl I32Tensor of TensorTrait { fn hamming_window(size: i32, periodic: Option) -> Tensor { panic(array!['not supported!']) } + + fn blackman_window(size: i32, periodic: Option) -> Tensor { + panic(array!['not supported!']) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index f71c7f8dc..b78faf0bd 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -551,6 +551,10 @@ impl I8Tensor of TensorTrait { fn hamming_window(size: i8, periodic: Option) -> Tensor { panic(array!['not supported!']) } + + fn blackman_window(size: i8, periodic: Option) -> Tensor { + panic(array!['not supported!']) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index a2310c9c0..6851192b8 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -494,6 +494,10 @@ impl U32Tensor of TensorTrait { fn hamming_window(size: u32, periodic: Option) -> Tensor { panic(array!['not supported!']) } + + fn blackman_window(size: u32, periodic: Option) -> Tensor { + panic(array!['not supported!']) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/math.cairo b/src/operators/tensor/math.cairo index 0ba680a8d..aca8c8f0e 100644 --- a/src/operators/tensor/math.cairo +++ b/src/operators/tensor/math.cairo @@ -64,3 +64,4 @@ mod compress; mod range; mod hann_window; mod hamming_window; +mod blackman_window; diff --git a/src/operators/tensor/math/blackman_window.cairo b/src/operators/tensor/math/blackman_window.cairo new file mode 100644 index 000000000..29f4d2903 --- /dev/null +++ b/src/operators/tensor/math/blackman_window.cairo @@ -0,0 +1,109 @@ +use core::traits::Into; +use core::traits::TryInto; +use orion::operators::tensor::core::{Tensor, TensorTrait}; +use core::array::{ArrayTrait, SpanTrait}; +use core::option::OptionTrait; + +use orion::numbers::fixed_point::core::FixedTrait; +use orion::numbers::NumberTrait; + +use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; +use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; + + +fn blackman_window< + T, + MAG, + impl TTensor: TensorTrait, + impl TNumber: NumberTrait, + impl TAdd: Add, + impl TSub: Sub, + impl TMul: Mul, + impl TDiv: Div, + impl TTensorAdd: Add>, + impl TPartialOrd: PartialOrd, + impl TAddEq: AddEq, + impl TCopy: Copy, + impl TDrop: Drop, +>(size: T, PI: T, periodic: Option) -> Tensor { + let start: T = NumberTrait::zero(); + let one_step: T = NumberTrait::one(); + let two: T = one_step + one_step; + let three: T = two + one_step; + let n25: T = three.pow(three) - two; + let alpha: T = (n25 - two * two) / (n25 * two); + let beta: T = two / n25; + let n_0_5: T = (one_step - two) / two; + + let ni = TensorTrait::range(start, size, one_step); + assert!((ni.shape).len() == 1, "Unexpected shape 1."); + let mut N_1 = size; + if periodic != Option::Some(1) { + N_1 = N_1 - one_step; + }; + let len = *(ni.shape).at(0); + let mut arr1: Array = ArrayTrait::::new(); + let mut i: usize = 0; + loop { + let v = *(ni.data).at(i); + let r = (v * (PI * two)) / N_1; + arr1.append(r); + i += 1; + if i >= len { + break (); + }; + }; + let window_cos = TensorTrait::::new(ni.shape, arr1.span()).cos(); + i = 0; + let mut a1: Array = ArrayTrait::::new(); + loop { + let v = *(window_cos.data).at(i); + let r = v * n_0_5; + a1.append(r); + i += 1; + if i >= len { + break (); + }; + }; + let window1 = TensorTrait::::new(ni.shape, a1.span()); + + let mut arr2: Array = ArrayTrait::::new(); + i = 0; + loop { + let v = *(ni.data).at(i); + let r = v * (PI * two * two) / N_1; + arr2.append(r); + i += 1; + if i >= len { + break (); + }; + }; + let window_cos_2 = TensorTrait::::new(ni.shape, arr2.span()).cos(); + + let mut a2: Array = ArrayTrait::::new(); + i = 0; + loop { + let v = *(window_cos_2.data).at(i); + let r = v * beta + alpha; + a2.append(r); + i += 1; + if i >= len { + break (); + }; + }; + let window2 = TensorTrait::::new(ni.shape, a2.span()); + + let mut arr: Array = ArrayTrait::::new(); + i = 0; + loop { + let v1 = *(window1.data).at(i); + let v2 = *(window2.data).at(i); + let r = v1 + v2; + arr.append(r); + i += 1; + if i >= len { + break (); + }; + }; + return TensorTrait::::new(ni.shape, arr.span()); +} diff --git a/tests/nodes.cairo b/tests/nodes.cairo index cec69c81a..3967ad954 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -945,3 +945,5 @@ mod hann_window_fp8x23; mod hann_window_fp16x16; mod hamming_window_fp16x16; mod hamming_window_fp8x23; +mod blackman_window_fp16x16; +mod blackman_window_fp8x23; diff --git a/tests/nodes/blackman_window_fp16x16.cairo b/tests/nodes/blackman_window_fp16x16.cairo new file mode 100644 index 000000000..a1ca518c0 --- /dev/null +++ b/tests/nodes/blackman_window_fp16x16.cairo @@ -0,0 +1,19 @@ +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::numbers::{FixedTrait, FP16x16}; + +#[test] +#[available_gas(2000000000)] +fn test_blackman_window_fp16x16() { + let z_0 = output_0::output_0(); + + let y_0 = TensorTrait::blackman_window(FP16x16 { mag: 196608, sign: false }, Option::Some(1)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/blackman_window_fp16x16/output_0.cairo b/tests/nodes/blackman_window_fp16x16/output_0.cairo new file mode 100644 index 000000000..c14ec30b4 --- /dev/null +++ b/tests/nodes/blackman_window_fp16x16/output_0.cairo @@ -0,0 +1,15 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 1, sign: true }); + data.append(FP16x16 { mag: 41288, sign: false }); + data.append(FP16x16 { mag: 41288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/blackman_window_fp8x23.cairo b/tests/nodes/blackman_window_fp8x23.cairo new file mode 100644 index 000000000..1189aafa8 --- /dev/null +++ b/tests/nodes/blackman_window_fp8x23.cairo @@ -0,0 +1,19 @@ +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::numbers::{FixedTrait, FP8x23}; + +#[test] +#[available_gas(2000000000)] +fn test_blackman_window_fp8x23() { + let z_0 = output_0::output_0(); + + let y_0 = TensorTrait::blackman_window(FP8x23 { mag: 25165824, sign: false }, Option::Some(0)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/blackman_window_fp8x23/output_0.cairo b/tests/nodes/blackman_window_fp8x23/output_0.cairo new file mode 100644 index 000000000..ec37b061f --- /dev/null +++ b/tests/nodes/blackman_window_fp8x23/output_0.cairo @@ -0,0 +1,15 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 1, sign: true }); + data.append(FP8x23 { mag: 8388605, sign: false }); + data.append(FP8x23 { mag: 1, sign: true }); + TensorTrait::new(shape.span(), data.span()) +} From 39d79b0812f6eb4a48116b9b125e8af286159a6d Mon Sep 17 00:00:00 2001 From: Kazeem Hakeem Date: Sun, 4 Feb 2024 17:42:02 +0100 Subject: [PATCH 21/46] Update gather_nd_u32_default.cairo --- tests/nodes/gather_nd_u32_default.cairo | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/nodes/gather_nd_u32_default.cairo b/tests/nodes/gather_nd_u32_default.cairo index 7b87263e8..5893b5017 100644 --- a/tests/nodes/gather_nd_u32_default.cairo +++ b/tests/nodes/gather_nd_u32_default.cairo @@ -7,7 +7,6 @@ use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{TensorTrait, Tensor}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32Tensor; use orion::operators::tensor::U32TensorPartialEq; #[test] From 9998426380ecbd453d319276ed3ef210e9fdf747 Mon Sep 17 00:00:00 2001 From: chachaleo Date: Mon, 5 Feb 2024 05:21:53 +0100 Subject: [PATCH 22/46] feat: conv --- docs/SUMMARY.md | 1 + docs/framework/compatibility.md | 1 + .../operators/neural-network/README.md | 1 + .../operators/neural-network/nn.conv.md | 124 ++ .../operators/neural-network/nn.gemm.md | 11 +- nodegen/node/conv.py | 1079 +++++++++++ src/numbers.cairo | 69 +- .../implementations/fp16x16/core.cairo | 3 +- .../implementations/fp16x16wide/core.cairo | 2 +- .../implementations/fp32x32/core.cairo | 3 +- .../implementations/fp64x64/core.cairo | 3 +- .../implementations/fp8x23/core.cairo | 4 +- .../implementations/fp8x23wide/core.cairo | 2 +- src/operators/nn/core.cairo | 137 ++ src/operators/nn/functional.cairo | 1 + src/operators/nn/functional/conv.cairo | 1593 +++++++++++++++++ .../nn/implementations/nn_fp16x16.cairo | 14 + .../nn/implementations/nn_fp32x32.cairo | 14 + .../nn/implementations/nn_fp64x64.cairo | 14 + .../nn/implementations/nn_fp8x23.cairo | 14 + src/operators/nn/implementations/nn_i32.cairo | 14 + src/operators/nn/implementations/nn_i8.cairo | 14 + src/operators/nn/implementations/nn_u32.cairo | 14 + .../sequence/functional/sequence_at.cairo | 4 +- .../sequence/functional/sequence_erase.cairo | 3 +- .../sequence/functional/sequence_insert.cairo | 4 +- src/operators/tensor/core.cairo | 8 +- src/operators/tensor/helpers.cairo | 2 +- .../tensor/implementations/tensor_i32.cairo | 10 +- .../tensor/implementations/tensor_i8.cairo | 2 +- .../tensor/math/layer_normalization.cairo | 3 +- src/test_helper/tensor/i32.cairo | 3 +- src/test_helper/tensor/i8.cairo | 3 +- tests/nodes.cairo | 11 + tests/nodes/clip_fp16x16_2d.cairo | 6 +- tests/nodes/clip_fp16x16_3d.cairo | 6 +- tests/nodes/clip_fp8x23_2d.cairo | 6 +- tests/nodes/clip_fp8x23_3d.cairo | 6 +- tests/nodes/compress_fp16x16_3d_axis1.cairo | 2 +- tests/nodes/compress_fp16x16_3d_axis2.cairo | 2 +- tests/nodes/compress_fp16x16_3d_axis3.cairo | 2 +- tests/nodes/compress_fp16x16_3d_default.cairo | 2 +- tests/nodes/compress_fp16x16_3d_noaxis.cairo | 2 +- tests/nodes/compress_fp8x23_3d_axis1.cairo | 2 +- tests/nodes/compress_fp8x23_3d_axis2.cairo | 2 +- tests/nodes/compress_fp8x23_3d_default.cairo | 2 +- tests/nodes/compress_i32_3d_axis1.cairo | 2 +- tests/nodes/compress_i32_3d_axis2.cairo | 2 +- tests/nodes/compress_i32_3d_default.cairo | 2 +- tests/nodes/compress_i8_3d_axis1.cairo | 2 +- tests/nodes/compress_i8_3d_axis2.cairo | 2 +- tests/nodes/compress_i8_3d_default.cairo | 2 +- tests/nodes/compress_u32_3d_axis1.cairo | 2 +- tests/nodes/compress_u32_3d_axis2.cairo | 2 +- tests/nodes/compress_u32_3d_axis2_2.cairo | 2 +- tests/nodes/compress_u32_3d_axis3.cairo | 2 +- tests/nodes/compress_u32_3d_default.cairo | 2 +- tests/nodes/conv_1D_no_padding.cairo | 32 + tests/nodes/conv_1D_no_padding/input_0.cairo | 19 + tests/nodes/conv_1D_no_padding/input_1.cairo | 17 + tests/nodes/conv_1D_no_padding/output_0.cairo | 17 + tests/nodes/conv_1D_with_padding.cairo | 32 + .../nodes/conv_1D_with_padding/input_0.cairo | 19 + .../nodes/conv_1D_with_padding/input_1.cairo | 17 + .../nodes/conv_1D_with_padding/output_0.cairo | 19 + tests/nodes/conv_2D_with_2_groups.cairo | 32 + .../nodes/conv_2D_with_2_groups/input_0.cairo | 33 + .../nodes/conv_2D_with_2_groups/input_1.cairo | 33 + .../conv_2D_with_2_groups/output_0.cairo | 17 + tests/nodes/conv_2D_with_autopad_same.cairo | 34 + .../conv_2D_with_autopad_same/input_0.cairo | 40 + .../conv_2D_with_autopad_same/input_1.cairo | 24 + .../conv_2D_with_autopad_same/output_0.cairo | 24 + tests/nodes/conv_2D_with_padding.cairo | 32 + .../nodes/conv_2D_with_padding/input_0.cairo | 40 + .../nodes/conv_2D_with_padding/input_1.cairo | 24 + .../nodes/conv_2D_with_padding/output_0.cairo | 24 + ...v_2D_with_strides_asymmetric_padding.cairo | 32 + .../input_0.cairo | 50 + .../input_1.cairo | 24 + .../output_0.cairo | 23 + .../conv_2D_with_strides_with_padding.cairo | 32 + .../input_0.cairo | 50 + .../input_1.cairo | 24 + .../output_0.cairo | 27 + tests/nodes/conv_3D_no_padding.cairo | 32 + tests/nodes/conv_3D_no_padding/input_0.cairo | 141 ++ tests/nodes/conv_3D_no_padding/input_1.cairo | 43 + tests/nodes/conv_3D_no_padding/output_0.cairo | 43 + tests/nodes/conv_3D_with_padding.cairo | 32 + .../nodes/conv_3D_with_padding/input_0.cairo | 141 ++ .../nodes/conv_3D_with_padding/input_1.cairo | 43 + .../nodes/conv_3D_with_padding/output_0.cairo | 141 ++ tests/nodes/conv_4D_no_padding.cairo | 32 + tests/nodes/conv_4D_no_padding/input_0.cairo | 98 + tests/nodes/conv_4D_no_padding/input_1.cairo | 33 + tests/nodes/conv_4D_no_padding/output_0.cairo | 33 + tests/nodes/conv_4D_with_padding.cairo | 32 + .../nodes/conv_4D_with_padding/input_0.cairo | 98 + .../nodes/conv_4D_with_padding/input_1.cairo | 33 + .../nodes/conv_4D_with_padding/output_0.cairo | 273 +++ tests/nodes/gather_fp16x16_3d_axis1.cairo | 2 +- tests/nodes/gather_fp16x16_3d_axis2.cairo | 2 +- tests/nodes/gather_fp16x16_3d_default.cairo | 2 +- tests/nodes/gather_fp8x23_3d_axis1.cairo | 2 +- tests/nodes/gather_fp8x23_3d_axis2.cairo | 2 +- tests/nodes/gather_fp8x23_3d_default.cairo | 2 +- tests/nodes/gather_i32_3d_axis1.cairo | 2 +- tests/nodes/gather_i32_3d_axis2.cairo | 2 +- tests/nodes/gather_i32_3d_default.cairo | 2 +- tests/nodes/gather_i8_3d_axis1.cairo | 2 +- tests/nodes/gather_i8_3d_axis2.cairo | 2 +- tests/nodes/gather_i8_3d_default.cairo | 2 +- .../gather_nd_fp16x16_3d_batch_dims1.cairo | 2 +- .../gather_nd_fp16x16_3d_batch_dims2.cairo | 2 +- .../nodes/gather_nd_fp16x16_3d_default.cairo | 2 +- .../gather_nd_fp8x23_3d_batch_dims1.cairo | 2 +- .../gather_nd_fp8x23_3d_batch_dims2.cairo | 2 +- tests/nodes/gather_nd_fp8x23_3d_default.cairo | 2 +- .../nodes/gather_nd_i32_3d_batch_dims1.cairo | 2 +- .../nodes/gather_nd_i32_3d_batch_dims2.cairo | 2 +- tests/nodes/gather_nd_i32_3d_default.cairo | 2 +- tests/nodes/gather_nd_i8_3d_batch_dims1.cairo | 2 +- tests/nodes/gather_nd_i8_3d_default.cairo | 2 +- tests/nodes/gather_nd_u32_batch_dims1.cairo | 2 +- tests/nodes/gather_nd_u32_batch_dims2.cairo | 2 +- tests/nodes/gather_nd_u32_default.cairo | 2 +- tests/nodes/gather_u32_3d_axis1.cairo | 2 +- tests/nodes/gather_u32_3d_axis2.cairo | 2 +- tests/nodes/gather_u32_3d_default.cairo | 2 +- tests/nodes/gemm_all_attributes.cairo | 10 +- tests/nodes/gemm_alpha.cairo | 10 +- tests/nodes/gemm_beta.cairo | 10 +- tests/nodes/gemm_default_matrix_bias.cairo | 4 +- tests/nodes/gemm_default_no_bias.cairo | 4 +- tests/nodes/gemm_default_vector_bias.cairo | 4 +- tests/nodes/gemm_transposeA.cairo | 4 +- tests/nodes/gemm_transposeB.cairo | 4 +- tests/nodes/hard_sigmoid_fp16x16.cairo | 4 +- tests/nodes/hard_sigmoid_fp8x23.cairo | 4 +- tests/nodes/is_nan_fp16x16/input_0.cairo | 2 +- ...layer_normalization_3d_axis0_epsilon.cairo | 9 +- ...layer_normalization_3d_axis1_epsilon.cairo | 9 +- ...layer_normalization_3d_axis2_epsilon.cairo | 9 +- ...alization_3d_axis_negative_1_epsilon.cairo | 9 +- ...alization_3d_axis_negative_2_epsilon.cairo | 9 +- ...alization_3d_axis_negative_3_epsilon.cairo | 9 +- .../nodes/layer_normalization_4d_axis0.cairo | 5 +- .../nodes/layer_normalization_4d_axis1.cairo | 5 +- .../nodes/layer_normalization_4d_axis2.cairo | 5 +- .../nodes/layer_normalization_4d_axis3.cairo | 5 +- ...yer_normalization_4d_axis_negative_1.cairo | 5 +- ...yer_normalization_4d_axis_negative_2.cairo | 5 +- ...yer_normalization_4d_axis_negative_3.cairo | 5 +- ...yer_normalization_4d_axis_negative_4.cairo | 5 +- .../layer_normalization_default_axis.cairo | 5 +- tests/nodes/layer_normalization_test.cairo | 5 +- tests/nodes/scatter_fp16x16_3d_axis1.cairo | 8 +- .../nodes/scatter_fp16x16_3d_axis1_add.cairo | 8 +- tests/nodes/scatter_fp16x16_3d_default.cairo | 8 +- tests/nodes/scatter_fp8x23_axis1.cairo | 8 +- tests/nodes/scatter_fp8x23_default.cairo | 8 +- tests/nodes/scatter_fp8x23_mul.cairo | 8 +- tests/nodes/scatter_i8_axis1.cairo | 8 +- tests/nodes/scatter_i8_axis1_max.cairo | 8 +- tests/nodes/scatter_i8_default.cairo | 8 +- tests/nodes/scatter_u32_add.cairo | 8 +- tests/nodes/scatter_u32_axis1.cairo | 8 +- tests/nodes/scatter_u32_default.cairo | 8 +- tests/nodes/sequence_insert_fp16x16.cairo | 2 +- tests/nodes/sequence_insert_fp8x23.cairo | 2 +- tests/nodes/sequence_insert_i32.cairo | 2 +- tests/nodes/sequence_insert_i8.cairo | 2 +- tests/nodes/sequence_insert_u32.cairo | 2 +- tests/nodes/sequence_length_fp16x16.cairo | 4 +- tests/nodes/shrink_hard_fp16x16.cairo | 4 +- tests/nodes/shrink_hard_fp8x23.cairo | 4 +- tests/nodes/shrink_soft_fp16x16.cairo | 6 +- tests/nodes/shrink_soft_fp8x23.cairo | 6 +- tests/nodes/slice_fp16x16_2d.cairo | 8 +- tests/nodes/slice_fp16x16_3d.cairo | 8 +- tests/nodes/slice_fp8x23_2d.cairo | 8 +- tests/nodes/slice_fp8x23_3d.cairo | 8 +- tests/nodes/slice_i32_2d.cairo | 8 +- tests/nodes/slice_i32_3d.cairo | 8 +- tests/nodes/slice_i8_2d.cairo | 8 +- tests/nodes/slice_i8_3d.cairo | 8 +- tests/nodes/slice_u32_2d.cairo | 8 +- tests/nodes/slice_u32_3d.cairo | 8 +- tests/nodes/where_fp16x16.cairo | 2 +- tests/nodes/where_fp16x16_broadcast.cairo | 2 +- tests/nodes/where_fp8x23.cairo | 2 +- tests/nodes/where_fp8x23_broadcast.cairo | 2 +- tests/nodes/where_i32.cairo | 2 +- tests/nodes/where_i32_broadcast.cairo | 2 +- tests/nodes/where_i8.cairo | 2 +- tests/nodes/where_i8_broadcast.cairo | 2 +- tests/nodes/where_u32.cairo | 2 +- tests/nodes/where_u32_broadcast.cairo | 2 +- tests/operators/qlinear_add_test.cairo | 70 +- tests/operators/qlinear_concat_test.cairo | 77 +- tests/operators/qlinear_leakyrelu_test.cairo | 10 +- tests/operators/qlinear_matmul_test.cairo | 76 +- tests/operators/qlinear_mul_test.cairo | 74 +- 204 files changed, 5570 insertions(+), 475 deletions(-) create mode 100644 docs/framework/operators/neural-network/nn.conv.md create mode 100644 nodegen/node/conv.py create mode 100644 src/operators/nn/functional/conv.cairo create mode 100644 tests/nodes/conv_1D_no_padding.cairo create mode 100644 tests/nodes/conv_1D_no_padding/input_0.cairo create mode 100644 tests/nodes/conv_1D_no_padding/input_1.cairo create mode 100644 tests/nodes/conv_1D_no_padding/output_0.cairo create mode 100644 tests/nodes/conv_1D_with_padding.cairo create mode 100644 tests/nodes/conv_1D_with_padding/input_0.cairo create mode 100644 tests/nodes/conv_1D_with_padding/input_1.cairo create mode 100644 tests/nodes/conv_1D_with_padding/output_0.cairo create mode 100644 tests/nodes/conv_2D_with_2_groups.cairo create mode 100644 tests/nodes/conv_2D_with_2_groups/input_0.cairo create mode 100644 tests/nodes/conv_2D_with_2_groups/input_1.cairo create mode 100644 tests/nodes/conv_2D_with_2_groups/output_0.cairo create mode 100644 tests/nodes/conv_2D_with_autopad_same.cairo create mode 100644 tests/nodes/conv_2D_with_autopad_same/input_0.cairo create mode 100644 tests/nodes/conv_2D_with_autopad_same/input_1.cairo create mode 100644 tests/nodes/conv_2D_with_autopad_same/output_0.cairo create mode 100644 tests/nodes/conv_2D_with_padding.cairo create mode 100644 tests/nodes/conv_2D_with_padding/input_0.cairo create mode 100644 tests/nodes/conv_2D_with_padding/input_1.cairo create mode 100644 tests/nodes/conv_2D_with_padding/output_0.cairo create mode 100644 tests/nodes/conv_2D_with_strides_asymmetric_padding.cairo create mode 100644 tests/nodes/conv_2D_with_strides_asymmetric_padding/input_0.cairo create mode 100644 tests/nodes/conv_2D_with_strides_asymmetric_padding/input_1.cairo create mode 100644 tests/nodes/conv_2D_with_strides_asymmetric_padding/output_0.cairo create mode 100644 tests/nodes/conv_2D_with_strides_with_padding.cairo create mode 100644 tests/nodes/conv_2D_with_strides_with_padding/input_0.cairo create mode 100644 tests/nodes/conv_2D_with_strides_with_padding/input_1.cairo create mode 100644 tests/nodes/conv_2D_with_strides_with_padding/output_0.cairo create mode 100644 tests/nodes/conv_3D_no_padding.cairo create mode 100644 tests/nodes/conv_3D_no_padding/input_0.cairo create mode 100644 tests/nodes/conv_3D_no_padding/input_1.cairo create mode 100644 tests/nodes/conv_3D_no_padding/output_0.cairo create mode 100644 tests/nodes/conv_3D_with_padding.cairo create mode 100644 tests/nodes/conv_3D_with_padding/input_0.cairo create mode 100644 tests/nodes/conv_3D_with_padding/input_1.cairo create mode 100644 tests/nodes/conv_3D_with_padding/output_0.cairo create mode 100644 tests/nodes/conv_4D_no_padding.cairo create mode 100644 tests/nodes/conv_4D_no_padding/input_0.cairo create mode 100644 tests/nodes/conv_4D_no_padding/input_1.cairo create mode 100644 tests/nodes/conv_4D_no_padding/output_0.cairo create mode 100644 tests/nodes/conv_4D_with_padding.cairo create mode 100644 tests/nodes/conv_4D_with_padding/input_0.cairo create mode 100644 tests/nodes/conv_4D_with_padding/input_1.cairo create mode 100644 tests/nodes/conv_4D_with_padding/output_0.cairo diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 649e411f9..b14884ffd 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -160,6 +160,7 @@ * [nn.hard\_sigmoid](framework/operators/neural-network/nn.hard\_sigmoid.md) * [nn.thresholded\_relu](framework/operators/neural-network/nn.thresholded\_relu.md) * [nn.gemm](framework/operators/neural-network/nn.gemm.md) + * [nn.conv](framework/operators/neural-network/nn.conv.md) * [Machine Learning](framework/operators/machine-learning/README.md) * [Tree Ensemble Classifier](framework/operators/machine-learning/tree-ensemble-classifier/README.md) * [tree\_ensemble\_classifier.predict](framework/operators/machine-learning/tree-ensemble-classifier/tree\_ensemble\_classifier.predict.md) diff --git a/docs/framework/compatibility.md b/docs/framework/compatibility.md index 0e0e5be17..2a3adb9ee 100644 --- a/docs/framework/compatibility.md +++ b/docs/framework/compatibility.md @@ -108,5 +108,6 @@ You can see below the list of current supported ONNX Operators: | [Erf](operators/tensor/tensor.erf.md) | :white\_check\_mark: | | [Compress](operators/tensor/tensor.compress.md) | :white\_check\_mark: | | [Layer_normalization](operators/tensor/tensor.layer_normalization.md) | :white\_check\_mark: | +| [Conv](operators/tensor/tensor.conv.md) | :white\_check\_mark: | Current Operators support: **97/156 (62%)** diff --git a/docs/framework/operators/neural-network/README.md b/docs/framework/operators/neural-network/README.md index 8343d0c90..175c76f44 100644 --- a/docs/framework/operators/neural-network/README.md +++ b/docs/framework/operators/neural-network/README.md @@ -35,4 +35,5 @@ Orion supports currently these `NN` types. | [`nn.hard_sigmoid`](nn.hard\_sigmoid.md) | Applies the Hard Sigmoid function to an n-dimensional input tensor. | | [`nn.thresholded_relu`](nn.thresholded\_relu.md) | Performs the thresholded relu activation function element-wise. | | [`nn.gemm`](nn.gemm.md) | Performs General Matrix multiplication. | +| [`nn.conv_transpose`](nn.conv\_transpose.md) | Performs the convolution of the input data tensor and weigth tensor. | diff --git a/docs/framework/operators/neural-network/nn.conv.md b/docs/framework/operators/neural-network/nn.conv.md new file mode 100644 index 000000000..91d05c369 --- /dev/null +++ b/docs/framework/operators/neural-network/nn.conv.md @@ -0,0 +1,124 @@ + +# NNTrait::conv_transpose + +```rust + conv( + X: @Tensor, + W: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor +``` + +The convolution operator consumes an input tensor and a filter (input weigth tensor), and computes the output. + +## Args + +* `X`(`@Tensor`) - Input data tensor, has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W if 2D, otherwise the size is (N x C x D1 x D2 ... x Dn). +* `W`(`@Tensor`) - The weight tensor, has size (C x M/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps if 2D, for more than 2 dimensions, the weight shape will be (C x M/group x k1 x k2 x ... x kn). +* `B`(`Option<@Tensor>`) - Optional 1D bias to be added to the convolution, has size of M. +* `auto_pad`(`Option`) - Default is NOTSET, auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. NOTSET means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = ceil(input_shape[i] / strides[i])` for each axis `i`. +* `dilations`(`Option>`) - Dilation value along each spatial axis of the filter. If not present, the dilation defaults to 1 along each spatial axis. +* `group`(`Option`) - Default is 1, number of groups input channels and output channels are divided into. +* `kernel_shape`(`Option>`) - The shape of the convolution kernel. If not present, should be inferred from input W. +* `pads`(`Option>`) - Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis. +* `strides`(`Option>`) - Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis. + +## Returns + +A `Tensor` that contains the result of the convolution. + +## Examples + +```rust +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::operators::nn::FP16x16NN; +use orion::numbers::FP16x16; +use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor}; + + +fn example_conv_transpose() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + let W = TensorTrait::new(shape.span(), data.span()); + + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(5); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + let mut X = TensorTrait::new(shape.span(), data.span()); + + return NNTrait::conv( + @X, + @W, + Option::None, + Option::None, + Option::None, + Option::None, + Option::Some(array![3, 3].span()), + Option::Some(array![1, 1, 1, 1].span()), + Option::None, + ); +} + +>>> [ + [ + [ + [12.0, 21.0, 27.0, 33.0, 24.0], + [33.0, 54.0, 63.0, 72.0, 51.0], + [63.0, 99.0, 108.0, 117.0, 81.0], + [93.0, 144.0, 153.0, 162.0, 111.0], + [72.0, 111.0, 117.0, 123.0, 84.0], + ] + ] + ] + +```` diff --git a/docs/framework/operators/neural-network/nn.gemm.md b/docs/framework/operators/neural-network/nn.gemm.md index 4ac734d73..b89d884fc 100644 --- a/docs/framework/operators/neural-network/nn.gemm.md +++ b/docs/framework/operators/neural-network/nn.gemm.md @@ -1,4 +1,4 @@ -# nn.gemm +# NNTrait::gemm ```rust fn gemm( @@ -12,18 +12,19 @@ ) -> Tensor; ``` -Performs General Matrix multiplication: [https://en.wikipedia.org/wiki/Basic\_Linear\_Algebra\_Subprograms#Level\_3](https://en.wikipedia.org/wiki/Basic\_Linear\_Algebra\_Subprograms#Level\_3) +Performs General Matrix multiplication: https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3 * A' = transpose(A) if transA else A * B' = transpose(B) if transB else B -Compute `Y = alpha * A' * B' + beta * C`, where input tensor A has shape (M, K) or (K, M), input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), and output tensor Y has shape (M, N). `A` will be transposed before doing the computation if attribute `transA` is `true`, same for `B` and `transB`. +Compute `Y = alpha * A' * B' + beta * C`, where input tensor A has shape (M, K) or (K, M), input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), and output tensor Y has shape (M, N). +`A` will be transposed before doing the computation if attribute `transA` is `true`, same for `B` and `transB`. ## Args * `A`(`Tensor`) - Input tensor A. The shape of `A` should be (M, K) if `transA` is `false`, or (K, M) if `transA` is `true`. * `B`(`Tensor`) - Input tensor B. The shape of `B` should be (K, N) if `transB` is `false`, or (N, K) if `transB` is `true`. -* `C`(`Option>`) - Optional input tensor C. The shape of C should be unidirectional broadcastable to (M, N). +* `C`(`Option>`) - Optional input tensor C. The shape of C should be unidirectional broadcastable to (M, N). * `alpha`(`Option`) - Optional scalar multiplier for the product of input tensors `A * B`. * `beta`(`Option`) - Optional scalar multiplier for input tensor `C`. * `transA`(`bool`) - Whether `A` should be transposed. @@ -63,4 +64,4 @@ A `Tensor` of shape (M, N). return y; } >>> tensor of shape [3;5] -``` +```` diff --git a/nodegen/node/conv.py b/nodegen/node/conv.py new file mode 100644 index 000000000..3b05b621d --- /dev/null +++ b/nodegen/node/conv.py @@ -0,0 +1,1079 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait + +import numpy as np + +def r_index_check(r_index, shape_out): + for i in range(len(r_index)): + if r_index[i] >= shape_out[i]: + return False + return True + +def stride(arr): + stride = np.zeros(len(arr)) + acc = 1 + for i in range(len(arr)): + stride[i] = acc + acc *= arr[-(i + 1)] + return np.flip(stride) + +def conv( + X, + W, + B=None, + auto_pad=None, + dilations=None, + group=None, + kernel_shape=None, + pads=None, + strides=None, +): + if dilations is None: + dilations = [1 for s in X.shape[2:]] + if kernel_shape is None: + kernel_shape = W.shape[2:] + if pads is None: + pads = [0 for s in X.shape[2:]] * 2 + if strides is None: + strides = [1 for s in X.shape[2:]] + + if X.shape[1] != W.shape[1] * group or W.shape[0] % group != 0: + raise ValueError( + f"Shape inconsistencies, X.shape={X.shape}, W.shape={W.shape}, group={group}, " + f"W should be {(W.shape[0], X.shape[1] // group, np.prod(W.shape[1:]) // X.shape[1] * group)}." + ) + if group > 1: + res = [] + td = 0 + mg = W.shape[0] // group + dw = W.shape[1] + + for b in range(X.shape[0]): + for g in range(group): + gx = X[b : b + 1, g * dw : (g + 1) * dw] + gw = W[g * mg : (g + 1) * mg] + try: + cv = conv( + gx, + gw, + None, + auto_pad, + dilations, + 1, + kernel_shape, + pads, + strides, + ) + except (ValueError, RuntimeError) as e: + raise ValueError( + f"Shape inconsistencies, X.shape={X.shape}, W.shape={W.shape}, group={g}/{group}, " + f"gx.shape={gx.shape}, gw.shape={gw.shape}, auto_pad={auto_pad}, " + f"dilations={dilations}, kernel_shape={kernel_shape}, pads={pads}, " + f"strides={strides}." + ) from e + if b == 0: + td += cv.shape[1] + res.append((b, cv)) + + new_shape = [X.shape[0], *list(res[0][1].shape[1:])] + + new_shape[1] = td + final = np.zeros(tuple(new_shape), dtype=res[0][1].dtype) + p = 0 + for b, cv in res: + final[b : b + 1, p : p + cv.shape[1]] = cv + p += cv.shape[1] + if p >= final.shape[1]: + p = 0 + if B is not None: + new_shape = [1 for s in final.shape] + new_shape[1] = B.shape[0] + b = B.reshape(tuple(new_shape)) + final += b + return final + + if dilations[0] != 1 or min(dilations) != max(dilations): + # Let's compute the dilated kernel. + nd = len(dilations) + new_kernel_shape = [] + new_shape = list(W.shape[:-nd]) + for i, d in enumerate(dilations): + di = len(W.shape) - nd + i + new_shape.append(W.shape[di] + (W.shape[di] - 1) * (d - 1)) + new_kernel_shape.append(kernel_shape[i] + (kernel_shape[i] - 1) * (d - 1)) + new_w = np.zeros(tuple(new_shape), dtype=W.dtype) + indices = [slice(0, new_w.shape[0]), slice(0, new_w.shape[1])] + for i, d in enumerate(dilations): + di = len(W.shape) - nd + i + indices.append(slice(0, new_w.shape[di], d)) + new_w[tuple(indices)] = W + W = new_w + kernel_shape = new_kernel_shape + + if auto_pad in {"SAME_LOWER", "SAME_UPPER", "VALID"}: + head = [] + tail = [] + for i in range(len(X.shape) - 2): + d = X.shape[i] + target_size = (d + strides[i] - 1) // strides[i] + pad_needed = (target_size - 1) * strides[i] + kernel_shape[i] - d + if auto_pad == "SAME_LOWER": + pad_head = (pad_needed + 1) // 2 + else: + pad_head = pad_needed // 2 + pad_tail = pad_needed - pad_head + head.append(pad_head) + tail.append(pad_tail) + pads = head + tail + + if len(X.shape) == 3: + sN, sC, sH = X.shape + # M, C_group, kH, kW = W.shape + (kh,) = kernel_shape + (sth,) = strides + + h_out = int(((sH - kh + pads[0] + pads[1]) / sth) + 1) + + h0 = pads[0] + oh = -1 * (kh % 2) + bh = -h0 + eh = h_out * sth + res = np.zeros((X.shape[0], W.shape[0], h_out)) # type: ignore[assignment] + if B is not None: + res[:, :, :] += B.reshape((1, -1, 1)) # type: ignore + + for n in range(0, sN): + for nw in range(W.shape[0]): + for c in range(0, sC): + w = W[nw : nw + 1, c : c + 1] + for io in range(bh, eh, sth): + hr = (io - bh) // sth + if hr >= h_out: + continue + i = io + kh % 2 + ih1, ih2 = max(0, i + oh), min(i + oh + kh, sH) + img = X[n : n + 1, c : c + 1, ih1:ih2] + if img.shape != w.shape: + jh1, jh2 = max(-oh - i, 0), min(kh, kh + sH - (i + oh + kh)) + w_ = w[:1, :1, jh1:jh2] + + if img.shape != w_.shape: + raise RuntimeError( + f"Unexpected shape {img.shape} != {w_.shape}, oh={oh}, " + f"i={i}, kh={kh}, sH={sH}, sth={sth}." + ) + s = np.dot(img.reshape((1, -1)), w_.reshape((-1, 1)))[ + 0, 0 + ] # (img * w_).sum() + else: + s = np.dot(img.reshape((1, -1)), w.reshape((-1, 1)))[ + 0, 0 + ] # (img * w).sum() + res[n, nw, hr] += s # type: ignore + + return res + + if len(X.shape) == 4: + sN, sC, sH, sW = X.shape + # M, C_group, kH, kW = W.shape + kh, kw = kernel_shape + sth, stw = strides + + h_out = int(((sH - kh + pads[0] + pads[2]) / sth) + 1) + w_out = int(((sW - kw + pads[1] + pads[3]) / stw) + 1) + + h0, w0 = pads[0], pads[1] + oh, ow = -1 * (kh % 2), -1 * (kw % 2) + bh, bw = -h0, -w0 + eh, ew = h_out * sth, w_out * stw + res = np.zeros((X.shape[0], W.shape[0], h_out, w_out)) # type: ignore[assignment] + if B is not None: + res[:, :, :, :] = B.reshape((1, -1, 1, 1)) # type: ignore + + for n in range(0, sN): + for nw in range(W.shape[0]): + for c in range(0, sC): + w = W[nw : nw + 1, c : c + 1] + for io in range(bh, eh, sth): + hr = (io - bh) // sth + if hr >= h_out: + continue + i = io + kh % 2 + ih1, ih2 = max(0, i + oh), min(i + oh + kh, sH) + for jo in range(bw, ew, stw): + wr = (jo - bw) // stw + if wr >= w_out: + continue + + j = jo + kw % 2 + iw1, iw2 = max(0, j + ow), min(j + ow + kw, sW) + img = X[n : n + 1, c : c + 1, ih1:ih2, iw1:iw2] + + if img.shape != w.shape: + jh1, jh2 = max(-oh - i, 0), min( + kh, kh + sH - (i + oh + kh) + ) + jw1, jw2 = max(-ow - j, 0), min( + kw, kw + sW - (j + ow + kw) + ) + w_ = w[:1, :1, jh1:jh2, jw1:jw2] + if img.shape != w_.shape: + raise RuntimeError( + f"Unexpected shape {img.shape} != {w_.shape}, oh={oh}, ow={ow}, " + f"i={i}, j={j}, kh={kh}, kw={kw}, sH={sH}, sW={sW}, sth={sth}, stw={stw}." + ) + s = np.dot(img.reshape((1, -1)), w_.reshape((-1, 1)))[ + 0, 0 + ] # (img * w_).sum() + else: + s = np.dot(img.reshape((1, -1)), w.reshape((-1, 1)))[ + 0, 0 + ] # (img * w).sum() + res[n, nw, hr, wr] += s # type: ignore + + return res + + if len(X.shape) == 5: + sN, sC, sH, sW, sZ = X.shape + kh, kw, kz = kernel_shape + sth, stw, stz = strides + + h_out = int(((sH - kh + pads[0] + pads[3]) / sth) + 1) + w_out = int(((sW - kw + pads[1] + pads[4]) / stw) + 1) + z_out = int(((sZ - kz + pads[2] + pads[5]) / stz) + 1) + + h0, w0, z0 = pads[0], pads[1], pads[2] + oh, ow, oz = -1 * (kh % 2), -1 * (kw % 2), -1 * (kz % 2) + bh, bw, bz = -h0, -w0, -z0 + eh, ew, ez = h_out * sth, w_out * stw, z_out * stz + res = np.zeros((X.shape[0], W.shape[0], h_out, w_out, z_out)) # type: ignore[assignment] + if B is not None: + res[:, :, :, :, :] = B.reshape((1, -1, 1, 1, 1)) # type: ignore + + for n in range(0, sN): + for nw in range(W.shape[0]): + for c in range(0, sC): + w = W[nw : nw + 1, c : c + 1] + for io in range(bh, eh, sth): + hr = (io - bh) // sth + if hr >= h_out: + continue + i = io + kh % 2 + ih1, ih2 = max(0, i + oh), min(i + oh + kh, sH) + for jo in range(bw, ew, stw): + wr = (jo - bw) // stw + if wr >= w_out: + continue + j = jo + kw % 2 + iw1, iw2 = max(0, j + ow), min(j + ow + kw, sW) + for zo in range(bz, ez, stz): + zr = (zo - bz) // stz + if zr >= z_out: + continue + z = zo + kz % 2 + iz1, iz2 = max(0, z + oz), min(z + oz + kz, sZ) + img = X[n : n + 1, c : c + 1, ih1:ih2, iw1:iw2, iz1:iz2] + + ### ICI + if img.shape != w.shape: + jh1, jh2 = max(-oh - i, 0), min( + kh, kh + sH - (i + oh + kh) + ) + jw1, jw2 = max(-ow - j, 0), min( + kw, kw + sW - (j + ow + kw) + ) + jz1, jz2 = max(-oz - z, 0), min( + kz, kz + sZ - (z + oz + kz) + ) + w_ = w[:1, :1, jh1:jh2, jw1:jw2, jz1:jz2] + if img.shape != w_.shape: + raise RuntimeError( + f"Unexpected shape {img.shape} != {w_.shape}, oh={oh}, ow={ow}, oz={oz}, " + f"i={i}, j={j}, z={z}, kh={kh}, kw={kw}, kz={kz}, " + f"sH={sH}, sW={sW}, sZ={sZ}, sth={sth}, stw={stw}, stz={stz}." + ) + + s = np.dot( + img.reshape((1, -1)), w_.reshape((-1, 1)) + )[ + 0, 0 + ] + else: + + s = np.dot( + img.reshape((1, -1)), w.reshape((-1, 1)) + )[ + 0, 0 + ] + res[n, nw, hr, wr, zr] += s # type: ignore + + return res + + else: + nd = len(X.shape[2:]) + sN, sC = X.shape[:2] + + x_stride = stride(X.shape) + w_stride = stride(W.shape) + x_flatten = X.reshape(int(x_stride[0] * X.shape[0])) + + + shape_out = [int(((X.shape[2+i] - kernel_shape[i] + pads[i] + pads[i + nd]) / strides[i]) + 1) for i in range(nd)] + o_index = [-1 * (kernel_shape[i] % 2) for i in range(nd)] + b_index = [-pads[i] for i in range(nd)] + e_index = [shape_out[i] * strides[i] for i in range(nd)] + + + range_len = [e_index[i] - b_index[i] / strides[i] for i in range(nd)] + range_stride = stride(range_len) + + res_shape = [X.shape[0], W.shape[0]] + shape_out + res = np.zeros(res_shape) + + res_strides = stride(res_shape) + if B is not None: + res[:, :, :, :, :] = B.reshape((1, -1, 1, 1, 1)) # type: ignore + + for n in range(0, sN): + for nw in range(W.shape[0]): + for c in range(0, sC): + w = W[nw : nw + 1, c : c + 1] + for i in range(int(range_len[0] * range_stride[0])): + flatten_index = i + io_index = np.zeros(nd) + r_index = np.zeros(nd) + for nx in range(nd): + n_index, rem = divmod(flatten_index, range_stride[nx]) + flatten_index = rem + io_index[nx] = n_index * strides[nx] + b_index[nx] + r_index[nx] = n_index + if r_index_check(r_index, shape_out): + indices = [io_index[nx] + (kernel_shape[nx] % 2) for nx in range(nd)] + i1_index = [max(0, indices[nx] + o_index[nx]) for nx in range(nd)] + i2_index = [min(X.shape[2 + nx], indices[nx] + o_index[nx] + kernel_shape[nx]) for nx in range(nd)] + idiff_index = [int(i2_index[nx] - i1_index[nx]) for nx in range(nd - 1)] + + i_stride = stride(idiff_index) + img = [] + for ii in range(int(i_stride[0] * idiff_index[0])): + flatten_index = ii + start = n * x_stride[0] + c * x_stride[1] + for nx in range(nd - 1): + ii_index, rem = divmod(flatten_index, i_stride[nx]) + flatten_index = rem + start += (i1_index[nx] + ii_index) * x_stride[2 + nx] + start += i1_index[nd-1] + end = start + (i2_index[nd-1] - i1_index[nd-1]) + img.append(x_flatten[int(start):int(end)]) + img_shape = [1, 1] + idiff_index + w = w.reshape(np.prod(kernel_shape)) + if len(img) != len(w): + j1_index = [max(0, -indices[nx] - o_index[nx]) for nx in range(nd)] + j2_index = [min(X.shape[2 + nx] - indices[nx] - o_index[nx], kernel_shape[nx]) for nx in range(nd)] + jdiff_index = [j2_index[nx] - j1_index[nx] for nx in range(nd - 1)] + + + w_ = [] + + j_stride = stride(jdiff_index) + + for jj in range(int(j_stride[0] * jdiff_index[0])): + flatten_index = jj + start = 0 + for nx in range(nd): + jj_index, rem = divmod(flatten_index, range_stride[nx]) + flatten_index = rem + start += (j1_index[nx] + jj_index) * kernel_shape[nx] + w_.append(w[int(start + j1_index[-1]):int(start + j1_index[-1] + j2_index[nd-1] - j1_index[nd-1])]) + + + img = np.array(img) + s = np.dot( + np.array(img).reshape((1, -1)), np.array(w_).reshape((-1, 1)) + )[ + 0, 0 + ] + else: + img = np.array(img) + s = np.dot( + np.array(img).reshape((1, -1)), np.array(w_).reshape((-1, 1)) + )[ + 0, 0 + ] + + res_index = [] + for nx in range(nd): + res_index.append(int(r_index[nx])) + + index = tuple([n, nw]) + tuple(res_index) + res[index] += s # type: ignore + return res + + + +class Conv(RunAll): + + @staticmethod + def export_conv_1D_no_padding() -> None: + x = np.array( + [ + [ + [ + 0.0, 1.0, 2.0, 3.0, 4.0 + ] + ] + ] + ).astype(np.float32) + w = np.array( + [ + [ + [ + 1.0, 1.0, 1.0 + ] + ] + ] + ).astype(np.float32) + + + y = conv(x, w, group = 1) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "conv_1D_no_padding" + func_sig = "NNTrait::conv(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None)" + make_test( + [x, w], y, func_sig, name, Trait.NN) + + @staticmethod + def export_conv_1D_with_padding() -> None: + x = np.array( + [ + [ + [ + 0.0, 1.0, 2.0, 3.0, 4.0 + ] + ] + ] + ).astype(np.float32) + w = np.array( + [ + [ + [ + 1.0, 1.0, 1.0 + ] + ] + ] + ).astype(np.float32) + + + y = conv(x, w, group = 1, pads=[1, 1]) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "conv_1D_with_padding" + func_sig = "NNTrait::conv(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![1, 1].span())," + func_sig += "Option::None)" + make_test( + [x, w], y, func_sig, name, Trait.NN) + + @staticmethod + def export_conv_2D_no_padding() -> None: + x = np.array( + [ + [ + [ + [0.0, 1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0, 9.0], + [10.0, 11.0, 12.0, 13.0, 14.0], + [15.0, 16.0, 17.0, 18.0, 19.0], + [20.0, 21.0, 22.0, 23.0, 24.0], + ] + ] + ] + ).astype(np.float32) + w = np.array( + [ + [ + [ + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + ] + ] + ] + ).astype(np.float32) + + + y = conv(x, w, group = 1, kernel_shape=[3, 3],pads=[0, 0, 0, 0],) + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "conv_2D_with_padding" + func_sig = "NNTrait::conv(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None)" + make_test( + [x, w], y, func_sig, name, Trait.NN) + + @staticmethod + def export_con_2D_with_padding() -> None: + x = np.array( + [ + [ + [ + [0.0, 1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0, 9.0], + [10.0, 11.0, 12.0, 13.0, 14.0], + [15.0, 16.0, 17.0, 18.0, 19.0], + [20.0, 21.0, 22.0, 23.0, 24.0], + ] + ] + ] + ).astype(np.float32) + w = np.array( + [ + [ + [ + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + ] + ] + ] + ).astype(np.float32) + + y = conv(x, w, group = 1, kernel_shape=[3, 3],pads=[1, 1, 1, 1],) + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "conv_2D_with_padding" + func_sig = "NNTrait::conv(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![1, 1, 1, 1].span())," + func_sig += "Option::None)" + make_test( + [x, w], y, func_sig, name, Trait.NN) + + + @staticmethod + def export_conv_3D_no_padding() -> None: + x = np.array( + [ + [ + [ + [ + [ 0, 1, 2, 3, 4],[ 5, 6, 7, 8, 9],[ 10, 11, 12, 13, 14],[ 15, 16, 17, 18, 19],[ 20, 21, 22, 23, 24] + ], + [ + [ 25, 26, 27, 28, 29],[ 30, 31, 32, 33, 34],[ 35, 36, 37, 38, 39],[ 40, 41, 42, 43, 44],[ 45, 46, 47, 48, 49] + ], + [ + [ 50, 51, 52, 53, 54],[ 55, 56, 57, 58, 59],[ 60, 61, 62, 63, 64],[ 65, 66, 67, 68, 69],[ 70, 71, 72, 73, 74] + ], + [ + [ 75, 76, 77, 78, 79],[ 80, 81, 82, 83, 84],[ 85, 86, 87, 88, 89],[ 90, 91, 92, 93, 94],[ 95, 96, 97, 98, 99] + ], + [ + [100, 101, 102, 103, 104],[105, 106, 107, 108, 109],[110, 111, 112, 113, 114],[115, 116, 117, 118, 119],[120, 121, 122, 123, 124] + ] + ] + ] + ] + ).astype(np.float32) + w = np.array( + [ + [ + [ + [ + [1., 1., 1.],[1., 1., 1.],[1., 1., 1.] + ], + [ + [1., 1., 1.],[1., 1., 1.],[1., 1., 1.] + ], + [ + [1., 1., 1.],[1., 1., 1.],[1., 1., 1.] + ] + ] + ] + ] + ).astype(np.float32) + + y = conv(x, w, group = 1) + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "conv_3D_no_padding" + func_sig = "NNTrait::conv(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None)" + make_test( + [x, w], y, func_sig, name, Trait.NN) + + @staticmethod + def export_conv_3D_with_padding() -> None: + x = np.array( + [ + [ + [ + [ + [ 0, 1, 2, 3, 4],[ 5, 6, 7, 8, 9],[ 10, 11, 12, 13, 14],[ 15, 16, 17, 18, 19],[ 20, 21, 22, 23, 24] + ], + [ + [ 25, 26, 27, 28, 29],[ 30, 31, 32, 33, 34],[ 35, 36, 37, 38, 39],[ 40, 41, 42, 43, 44],[ 45, 46, 47, 48, 49] + ], + [ + [ 50, 51, 52, 53, 54],[ 55, 56, 57, 58, 59],[ 60, 61, 62, 63, 64],[ 65, 66, 67, 68, 69],[ 70, 71, 72, 73, 74] + ], + [ + [ 75, 76, 77, 78, 79],[ 80, 81, 82, 83, 84],[ 85, 86, 87, 88, 89],[ 90, 91, 92, 93, 94],[ 95, 96, 97, 98, 99] + ], + [ + [100, 101, 102, 103, 104],[105, 106, 107, 108, 109],[110, 111, 112, 113, 114],[115, 116, 117, 118, 119],[120, 121, 122, 123, 124] + ] + ] + ] + ] + ).astype(np.float32) + w = np.array( + [ + [ + [ + [ + [1., 1., 1.],[1., 1., 1.],[1., 1., 1.] + ], + [ + [1., 1., 1.],[1., 1., 1.],[1., 1., 1.] + ], + [ + [1., 1., 1.],[1., 1., 1.],[1., 1., 1.] + ] + ] + ] + ] + ).astype(np.float32) + + y = conv(x, w, group = 1, pads=[1, 1, 1, 1, 1, 1]) + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "conv_3D_with_padding" + func_sig = "NNTrait::conv(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![1, 1, 1, 1, 1, 1].span())," + func_sig += "Option::None)" + make_test( + [x, w], y, func_sig, name, Trait.NN) + + @staticmethod + def export_conv_4D_no_padding() -> None: + x = np.array( + [ + [ + [ + [ + [ + [ 0, 1, 2],[ 3, 4, 5],[ 6, 7, 8] + ], + [ + [ 9, 10, 11],[12, 13, 14],[15, 16, 17] + ], + [ + [18, 19, 20],[21, 22, 23],[24, 25, 26] + ] + ], + [ + [ + [27, 28, 29],[30, 31, 32],[33, 34, 35] + ], + [ + [36, 37, 38],[39, 40, 41],[42, 43, 44] + ], + [ + [45, 46, 47],[48, 49, 50],[51, 52, 53] + ] + ], + [ + [ + [54, 55, 56],[57, 58, 59],[60, 61, 62] + ], + [ + [63, 64, 65],[66, 67, 68],[69, 70, 71] + ], + [ + [72, 73, 74],[75, 76, 77],[78, 79, 80] + ] + ] + ] + ] + ] + ).astype(np.float32) + w = np.array( + [ + [ + [ + [ + [ + [1., 1.],[1., 1.] + ], + [ + [1., 1.],[1., 1.] + ] + ], + [ + [ + [1., 1.],[1., 1.] + ], + [ + [1., 1.],[1., 1.] + ] + ] + ] + ] + ] + ).astype(np.float32) + + y = conv(x, w, group = 1) + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "conv_4D_no_padding" + func_sig = "NNTrait::conv(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None)" + make_test( + [x, w], y, func_sig, name, Trait.NN) + + @staticmethod + def export_conv_4D_with_padding() -> None: + x = np.array( + [ + [ + [ + [ + [ + [ 0, 1, 2],[ 3, 4, 5],[ 6, 7, 8] + ], + [ + [ 9, 10, 11],[12, 13, 14],[15, 16, 17] + ], + [ + [18, 19, 20],[21, 22, 23],[24, 25, 26] + ] + ], + [ + [ + [27, 28, 29],[30, 31, 32],[33, 34, 35] + ], + [ + [36, 37, 38],[39, 40, 41],[42, 43, 44] + ], + [ + [45, 46, 47],[48, 49, 50],[51, 52, 53] + ] + ], + [ + [ + [54, 55, 56],[57, 58, 59],[60, 61, 62] + ], + [ + [63, 64, 65],[66, 67, 68],[69, 70, 71] + ], + [ + [72, 73, 74],[75, 76, 77],[78, 79, 80] + ] + ] + ] + ] + ] + ).astype(np.float32) + w = np.array( + [ + [ + [ + [ + [ + [1., 1.],[1., 1.] + ], + [ + [1., 1.],[1., 1.] + ] + ], + [ + [ + [1., 1.],[1., 1.] + ], + [ + [1., 1.],[1., 1.] + ] + ] + ] + ] + ] + ).astype(np.float32) + + y = conv(x, w, group = 1, pads=[1, 1, 1, 1, 1, 1, 1, 1]) + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) # + + name = "conv_4D_with_padding" + func_sig = "NNTrait::conv(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![1, 1, 1, 1, 1, 1, 1, 1].span())," + func_sig += "Option::None)" + make_test( + [x, w], y, func_sig, name, Trait.NN) + + + + @staticmethod + def export_conv_with_autopad_same() -> None: + x = np.array( + [ + [ + [ + [0.0, 1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0, 9.0], + [10.0, 11.0, 12.0, 13.0, 14.0], + [15.0, 16.0, 17.0, 18.0, 19.0], + [20.0, 21.0, 22.0, 23.0, 24.0], + ] + ] + ] + ).astype(np.float32) + w = np.array( + [ + [ + [ + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + ] + ] + ] + ).astype(np.float32) + + y = conv(x, w, group = 1, kernel_shape=[3, 3],auto_pad="SAME_LOWER",strides = [2, 2]) + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "conv_2D_with_autopad_same" + func_sig = "NNTrait::conv(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::Some(AUTO_PAD::SAME_LOWER)," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![3, 3].span())," + func_sig += "Option::None," + func_sig += "Option::Some(array![2, 2].span()))" + make_test( + [x, w], y, func_sig, name, Trait.NN) + + @staticmethod + def export_conv_with_strides_asymmetric_padding() -> None: + x = np.array( + [ + [ + [ + [0.0, 1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0, 9.0], + [10.0, 11.0, 12.0, 13.0, 14.0], + [15.0, 16.0, 17.0, 18.0, 19.0], + [20.0, 21.0, 22.0, 23.0, 24.0], + [25.0, 26.0, 27.0, 28.0, 29.0], + [30.0, 31.0, 32.0, 33.0, 34.0], + ] + ] + ] + ).astype(np.float32) + w = np.array( + [ + [ + [ + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + ] + ] + ] + ).astype(np.float32) + + y = conv(x, w, group = 1, kernel_shape=[3, 3],pads=[1, 0, 1, 0],strides = [2, 2]) + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "conv_2D_with_strides_asymmetric_padding" + func_sig = "NNTrait::conv(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![3, 3].span())," + func_sig += "Option::Some(array![1, 0, 1, 0].span())," + func_sig += "Option::Some(array![2, 2].span()))" + make_test( + [x, w], y, func_sig, name, Trait.NN) + + @staticmethod + def export_conv_with_strides_with_padding() -> None: + x = np.array( + [ + [ + [ + [0.0, 1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0, 9.0], + [10.0, 11.0, 12.0, 13.0, 14.0], + [15.0, 16.0, 17.0, 18.0, 19.0], + [20.0, 21.0, 22.0, 23.0, 24.0], + [25.0, 26.0, 27.0, 28.0, 29.0], + [30.0, 31.0, 32.0, 33.0, 34.0], + ] + ] + ] + ).astype(np.float32) + w = np.array( + [ + [ + [ + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + ] + ] + ] + ).astype(np.float32) + + y = conv(x, w, group = 1, kernel_shape=[3, 3],pads=[1, 1, 1, 1],strides = [2, 2]) + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "conv_2D_with_strides_with_padding" + func_sig = "NNTrait::conv(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![3, 3].span())," + func_sig += "Option::Some(array![1, 1, 1, 1].span())," + func_sig += "Option::Some(array![2, 2].span()))" + make_test( + [x, w], y, func_sig, name, Trait.NN) + + @staticmethod + def export_conv_with_2_groups() -> None: + x = np.array( + [ + [ + [ + [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], + [ + [9.0, 10.0, 11.0], [12.0, 13.0, 14.0], [15.0, 16.0, 17.0]] + ] + ] + ).astype(np.float32) + w = np.array( + [ + [ + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ], + [ + [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], + ] + ] + ).astype(np.float32) + y = conv(x, w, group = 2) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "conv_2D_with_2_groups" + func_sig = "NNTrait::conv(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(2)," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None)" + make_test( + [x, w], y, func_sig, name, Trait.NN) + + \ No newline at end of file diff --git a/src/numbers.cairo b/src/numbers.cairo index 936c128e1..1ce8a803d 100644 --- a/src/numbers.cairo +++ b/src/numbers.cairo @@ -2,10 +2,10 @@ mod fixed_point; mod complex_number; use orion::numbers::fixed_point::core::FixedTrait; -use orion::numbers::fixed_point::implementations::fp8x23::core::{ONE as ONE_fp8x23 }; -use orion::numbers::fixed_point::implementations::fp16x16::core::{ONE as ONE_fp16x16 }; -use orion::numbers::fixed_point::implementations::fp64x64::core::{ONE as ONE_fp64x64 }; -use orion::numbers::fixed_point::implementations::fp32x32::core::{ONE as ONE_fp32x32 }; +use orion::numbers::fixed_point::implementations::fp8x23::core::{ONE as ONE_fp8x23}; +use orion::numbers::fixed_point::implementations::fp16x16::core::{ONE as ONE_fp16x16}; +use orion::numbers::fixed_point::implementations::fp64x64::core::{ONE as ONE_fp64x64}; +use orion::numbers::fixed_point::implementations::fp32x32::core::{ONE as ONE_fp32x32}; // Common methods from Fixed Point and Signed Integers. trait NumberTrait { @@ -1535,7 +1535,7 @@ impl I8Number of NumberTrait { 0 } fn is_zero(self: i8) -> bool { - self == 0 + self == 0 } fn half() -> i8 { @@ -1571,7 +1571,7 @@ impl I8Number of NumberTrait { } fn max_value() -> i8 { - 127 + 127 } fn min(self: i8, other: i8) -> i8 { @@ -1661,7 +1661,7 @@ impl I8Number of NumberTrait { } fn is_neg_inf(self: i8) -> bool { - self == -127 + self == -127 } fn bitwise_and(lhs: i8, rhs: i8) -> i8 { @@ -1702,7 +1702,7 @@ impl I8Div of Div { let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); - let mut result = lhs_u128 / rhs_u128; + let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i8 = felt_result.try_into().unwrap(); if lhs * rhs < 0 { @@ -1729,7 +1729,7 @@ impl I8IntoFP8x23 of Into { } let number_felt: felt252 = self_positive.into(); let number_u32: u32 = number_felt.try_into().unwrap(); - FP8x23 {mag: number_u32 * ONE_fp8x23, sign: number_sign} + FP8x23 { mag: number_u32 * ONE_fp8x23, sign: number_sign } } } @@ -1742,7 +1742,7 @@ impl I8IntoFP16x16 of Into { } let number_felt: felt252 = self_positive.into(); let number_u32: u32 = number_felt.try_into().unwrap(); - FP16x16 {mag: number_u32 * ONE_fp16x16, sign: number_sign} + FP16x16 { mag: number_u32 * ONE_fp16x16, sign: number_sign } } } @@ -1755,7 +1755,7 @@ impl I8IntoFP64x64 of Into { } let number_felt: felt252 = self_positive.into(); let number_u128: u128 = number_felt.try_into().unwrap(); - FP64x64 {mag: number_u128 * ONE_fp64x64, sign: number_sign} + FP64x64 { mag: number_u128 * ONE_fp64x64, sign: number_sign } } } @@ -1768,7 +1768,7 @@ impl I8IntoFP32x32 of Into { } let number_felt: felt252 = self_positive.into(); let number_u128: u64 = number_felt.try_into().unwrap(); - FP32x32 {mag: number_u128 * ONE_fp32x32, sign: number_sign} + FP32x32 { mag: number_u128 * ONE_fp32x32, sign: number_sign } } } @@ -1877,7 +1877,7 @@ impl I16Number of NumberTrait { 0 } fn is_zero(self: i16) -> bool { - self == 0 + self == 0 } fn half() -> i16 { @@ -2003,7 +2003,7 @@ impl I16Number of NumberTrait { } fn is_neg_inf(self: i16) -> bool { - self == -32767 + self == -32767 } fn bitwise_and(lhs: i16, rhs: i16) -> i16 { @@ -2044,7 +2044,7 @@ impl I16Div of Div { let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); - let mut result = lhs_u128 / rhs_u128; + let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i16 = felt_result.try_into().unwrap(); if lhs * rhs < 0 { @@ -2167,7 +2167,7 @@ impl I32Number of NumberTrait { 0 } fn is_zero(self: i32) -> bool { - self == 0 + self == 0 } fn half() -> i32 { @@ -2203,7 +2203,7 @@ impl I32Number of NumberTrait { } fn max_value() -> i32 { - 2147483647 + 2147483647 } fn min(self: i32, other: i32) -> i32 { @@ -2281,7 +2281,7 @@ impl I32Number of NumberTrait { } fn INF() -> i32 { - 2147483647 + 2147483647 } fn is_inf(self: i32) -> bool { @@ -2289,11 +2289,11 @@ impl I32Number of NumberTrait { } fn is_pos_inf(self: i32) -> bool { - self == 2147483647 + self == 2147483647 } fn is_neg_inf(self: i32) -> bool { - self == -2147483647 + self == -2147483647 } fn bitwise_and(lhs: i32, rhs: i32) -> i32 { @@ -2334,7 +2334,7 @@ impl I32Div of Div { let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); - let mut result = lhs_u128 / rhs_u128; + let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i32 = felt_result.try_into().unwrap(); if lhs * rhs < 0 { @@ -2470,7 +2470,7 @@ impl I64Number of NumberTrait { 0 } fn is_zero(self: i64) -> bool { - self == 0 + self == 0 } fn half() -> i64 { @@ -2506,7 +2506,7 @@ impl I64Number of NumberTrait { } fn max_value() -> i64 { - 9223372036854775807 + 9223372036854775807 } fn min(self: i64, other: i64) -> i64 { @@ -2584,7 +2584,7 @@ impl I64Number of NumberTrait { } fn INF() -> i64 { - 9223372036854775807 + 9223372036854775807 } fn is_inf(self: i64) -> bool { @@ -2592,11 +2592,11 @@ impl I64Number of NumberTrait { } fn is_pos_inf(self: i64) -> bool { - self == 9223372036854775807 + self == 9223372036854775807 } fn is_neg_inf(self: i64) -> bool { - self == -9223372036854775807 + self == -9223372036854775807 } fn bitwise_and(lhs: i64, rhs: i64) -> i64 { @@ -2637,7 +2637,7 @@ impl I64Div of Div { let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); - let mut result = lhs_u128 / rhs_u128; + let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i64 = felt_result.try_into().unwrap(); if lhs * rhs < 0 { @@ -2760,7 +2760,7 @@ impl I128Number of NumberTrait { 0 } fn is_zero(self: i128) -> bool { - self == 0 + self == 0 } fn half() -> i128 { @@ -2796,7 +2796,7 @@ impl I128Number of NumberTrait { } fn max_value() -> i128 { - 170141183460469231731687303715884105727 + 170141183460469231731687303715884105727 } fn min(self: i128, other: i128) -> i128 { @@ -2874,19 +2874,20 @@ impl I128Number of NumberTrait { } fn INF() -> i128 { - 170141183460469231731687303715884105727 + 170141183460469231731687303715884105727 } fn is_inf(self: i128) -> bool { - (self == 170141183460469231731687303715884105727 || self == -170141183460469231731687303715884105727) + (self == 170141183460469231731687303715884105727 + || self == -170141183460469231731687303715884105727) } fn is_pos_inf(self: i128) -> bool { - self == 170141183460469231731687303715884105727 + self == 170141183460469231731687303715884105727 } fn is_neg_inf(self: i128) -> bool { - self == -170141183460469231731687303715884105727 + self == -170141183460469231731687303715884105727 } fn bitwise_and(lhs: i128, rhs: i128) -> i128 { @@ -2927,7 +2928,7 @@ impl I128Div of Div { let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); - let mut result = lhs_u128 / rhs_u128; + let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i128 = felt_result.try_into().unwrap(); // assigning the sign and returning diff --git a/src/numbers/fixed_point/implementations/fp16x16/core.cairo b/src/numbers/fixed_point/implementations/fp16x16/core.cairo index cff7996af..a260d886f 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/core.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/core.cairo @@ -436,9 +436,8 @@ fn _i8_try_from_fp(x: FP16x16) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, - Option::None(_) => Option::None(()) } } diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo index b3fe4d39b..176c1a115 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo @@ -451,7 +451,7 @@ fn _i8_try_from_fp(x: FP16x16W) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, Option::None(_) => Option::None(()) } diff --git a/src/numbers/fixed_point/implementations/fp32x32/core.cairo b/src/numbers/fixed_point/implementations/fp32x32/core.cairo index 9fa722e8e..34b06bc44 100644 --- a/src/numbers/fixed_point/implementations/fp32x32/core.cairo +++ b/src/numbers/fixed_point/implementations/fp32x32/core.cairo @@ -402,9 +402,8 @@ fn _i8_try_from_fp(x: FP32x32) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, - Option::None(_) => Option::None(()) } } diff --git a/src/numbers/fixed_point/implementations/fp64x64/core.cairo b/src/numbers/fixed_point/implementations/fp64x64/core.cairo index c98cb7c57..d35cb9cfa 100644 --- a/src/numbers/fixed_point/implementations/fp64x64/core.cairo +++ b/src/numbers/fixed_point/implementations/fp64x64/core.cairo @@ -402,9 +402,8 @@ fn _i8_try_from_fp(x: FP64x64) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, - Option::None(_) => Option::None(()) } } diff --git a/src/numbers/fixed_point/implementations/fp8x23/core.cairo b/src/numbers/fixed_point/implementations/fp8x23/core.cairo index b1ab1b6ac..6db9a5a43 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/core.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/core.cairo @@ -425,7 +425,7 @@ fn _i32_into_fp(x: FP8x23) -> i32 { fn _i8_try_from_fp(x: FP8x23) -> Option { let unscaled_mag: Option = (x.mag / ONE).try_into(); -// Option::Some(i8 { mag: unscaled_mag.unwrap(), sign: x.sign }) + // Option::Some(i8 { mag: unscaled_mag.unwrap(), sign: x.sign }) match unscaled_mag { Option::Some(val) => { let number_felt: felt252 = unscaled_mag.unwrap().into(); @@ -433,7 +433,7 @@ fn _i8_try_from_fp(x: FP8x23) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, Option::None(_) => Option::None(()) } diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo index c4b49c798..9d9b985de 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo @@ -439,7 +439,7 @@ fn _i8_try_from_fp(x: FP8x23W) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, Option::None(_) => Option::None(()) } diff --git a/src/operators/nn/core.cairo b/src/operators/nn/core.cairo index 3c99f4733..a8dac8369 100644 --- a/src/operators/nn/core.cairo +++ b/src/operators/nn/core.cairo @@ -14,6 +14,7 @@ use orion::operators::tensor::core::Tensor; /// hard_sigmoid - Applies the Hard Sigmoid function to an n-dimensional input tensor. /// thresholded_relu - Performs the thresholded relu activation function element-wise. /// gemm - Performs General Matrix multiplication. +/// conv_transpose - Performs the convolution of the input data tensor and weigth tensor. trait NNTrait { /// # NNTrait::relu /// @@ -694,4 +695,140 @@ trait NNTrait { transA: bool, transB: bool ) -> Tensor; + /// + /// # NNTrait::conv_transpose + /// + /// ```rust + /// conv( + /// X: @Tensor, + /// W: @Tensor, + /// B: Option>, + /// auto_pad: Option, + /// dilations: Option>, + /// group: Option, + /// kernel_shape: Option>, + /// pads: Option>, + /// strides: Option>, + /// ) -> Tensor + /// ``` + /// + /// The convolution operator consumes an input tensor and a filter (input weigth tensor), and computes the output. + /// + /// ## Args + /// + /// * `X`(`@Tensor`) - Input data tensor, has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W if 2D, otherwise the size is (N x C x D1 x D2 ... x Dn). + /// * `W`(`@Tensor`) - The weight tensor, has size (C x M/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps if 2D, for more than 2 dimensions, the weight shape will be (C x M/group x k1 x k2 x ... x kn). + /// * `B`(`Option<@Tensor>`) - Optional 1D bias to be added to the convolution, has size of M. + /// * `auto_pad`(`Option`) - Default is NOTSET, auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. NOTSET means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = ceil(input_shape[i] / strides[i])` for each axis `i`. + /// * `dilations`(`Option>`) - Dilation value along each spatial axis of the filter. If not present, the dilation defaults to 1 along each spatial axis. + /// * `group`(`Option`) - Default is 1, number of groups input channels and output channels are divided into. + /// * `kernel_shape`(`Option>`) - The shape of the convolution kernel. If not present, should be inferred from input W. + /// * `pads`(`Option>`) - Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis. + /// * `strides`(`Option>`) - Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis. + /// + /// ## Returns + /// + /// A `Tensor` that contains the result of the convolution. + /// + /// ## Examples + /// + /// ```rust + /// use orion::operators::nn::NNTrait; + /// use orion::numbers::FixedTrait; + /// use orion::operators::nn::FP16x16NN; + /// use orion::numbers::FP16x16; + /// use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor}; + /// + /// + /// fn example_conv_transpose() -> Tensor { + /// let mut shape = ArrayTrait::::new(); + /// shape.append(1); + /// shape.append(1); + /// shape.append(3); + /// shape.append(3); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// let W = TensorTrait::new(shape.span(), data.span()); + /// + /// let mut shape = ArrayTrait::::new(); + /// shape.append(1); + /// shape.append(1); + /// shape.append(5); + /// shape.append(5); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 131072, sign: false }); + /// data.append(FP16x16 { mag: 196608, sign: false }); + /// data.append(FP16x16 { mag: 262144, sign: false }); + /// data.append(FP16x16 { mag: 327680, sign: false }); + /// data.append(FP16x16 { mag: 393216, sign: false }); + /// data.append(FP16x16 { mag: 458752, sign: false }); + /// data.append(FP16x16 { mag: 524288, sign: false }); + /// data.append(FP16x16 { mag: 589824, sign: false }); + /// data.append(FP16x16 { mag: 655360, sign: false }); + /// data.append(FP16x16 { mag: 720896, sign: false }); + /// data.append(FP16x16 { mag: 786432, sign: false }); + /// data.append(FP16x16 { mag: 851968, sign: false }); + /// data.append(FP16x16 { mag: 917504, sign: false }); + /// data.append(FP16x16 { mag: 983040, sign: false }); + /// data.append(FP16x16 { mag: 1048576, sign: false }); + /// data.append(FP16x16 { mag: 1114112, sign: false }); + /// data.append(FP16x16 { mag: 1179648, sign: false }); + /// data.append(FP16x16 { mag: 1245184, sign: false }); + /// data.append(FP16x16 { mag: 1310720, sign: false }); + /// data.append(FP16x16 { mag: 1376256, sign: false }); + /// data.append(FP16x16 { mag: 1441792, sign: false }); + /// data.append(FP16x16 { mag: 1507328, sign: false }); + /// data.append(FP16x16 { mag: 1572864, sign: false }); + /// let mut X = TensorTrait::new(shape.span(), data.span()); + /// + /// return NNTrait::conv( + /// @X, + /// @W, + /// Option::None, + /// Option::None, + /// Option::None, + /// Option::None, + /// Option::Some(array![3, 3].span()), + /// Option::Some(array![1, 1, 1, 1].span()), + /// Option::None, + /// ); + /// } + /// + /// >>> [ + /// [ + /// [ + /// [12.0, 21.0, 27.0, 33.0, 24.0], + /// [33.0, 54.0, 63.0, 72.0, 51.0], + /// [63.0, 99.0, 108.0, 117.0, 81.0], + /// [93.0, 144.0, 153.0, 162.0, 111.0], + /// [72.0, 111.0, 117.0, 123.0, 84.0], + /// ] + /// ] + /// ] + /// + /// ```` + /// + fn conv( + X: @Tensor, + W: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor; } diff --git a/src/operators/nn/functional.cairo b/src/operators/nn/functional.cairo index a0fd96cc8..c2d34e8f3 100644 --- a/src/operators/nn/functional.cairo +++ b/src/operators/nn/functional.cairo @@ -10,3 +10,4 @@ mod logsoftmax; mod thresholded_relu; mod hard_sigmoid; mod gemm; +mod conv; diff --git a/src/operators/nn/functional/conv.cairo b/src/operators/nn/functional/conv.cairo new file mode 100644 index 000000000..e34cb0b3b --- /dev/null +++ b/src/operators/nn/functional/conv.cairo @@ -0,0 +1,1593 @@ +use core::traits::Into; +use core::traits::IndexView; +use core::array::ArrayTrait; +use orion::numbers::NumberTrait; +use orion::numbers::{U32IntoI32, I32IntoU32, I32Div, I32Number}; +use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,}; +use orion::operators::vec::{NullableVec, NullableVecImpl}; +use orion::operators::tensor::core::{stride}; + +use core::debug::PrintTrait; + +#[derive(Copy, Drop)] +enum AUTO_PAD { + NOTSET, + SAME_UPPER, + SAME_LOWER, + VALID +} + +fn conv< + T, + MAG, + +TensorTrait, + +NumberTrait, + +Copy, + +Drop, + +Add, + +Mul, + +AddEq, + +PrintTrait, +>( + X: @Tensor, + W: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, +) -> Tensor { + assert((*X).shape.len() >= 3, 'X must have at least 3 dim'); + let dilations = match dilations { + Option::Some(dilations) => dilations, + Option::None => { + let mut dilations = ArrayTrait::new(); + let mut i = 2; + loop { + if i >= (*X).shape.len() { + break; + } + dilations.append(1); + i += 1; + }; + dilations.span() + }, + }; + let kernel_shape = match kernel_shape { + Option::Some(kernel_shape) => kernel_shape, + Option::None => { + let mut kernel_shape = ArrayTrait::new(); + let mut i = 2; + loop { + if i >= (*W).shape.len() { + break; + } + kernel_shape.append(*(*W).shape.at(i)); + i += 1; + }; + kernel_shape.span() + }, + }; + let pads = match pads { + Option::Some(pads) => pads, + Option::None => { + let mut pads = ArrayTrait::new(); + let mut i = 2; + loop { + if i >= (*X).shape.len() { + break; + } + pads.append(0); + pads.append(0); + i += 1; + }; + pads.span() + }, + }; + let strides = match strides { + Option::Some(strides) => strides, + Option::None => { + let mut strides = ArrayTrait::new(); + let mut i = 2; + loop { + if i >= (*X).shape.len() { + break; + } + strides.append(1); + i += 1; + }; + strides.span() + }, + }; + + let group = match group { + Option::Some(group) => group, + Option::None => { 1 }, + }; + let auto_pad = match auto_pad { + Option::Some(auto_pad) => auto_pad, + Option::None => { AUTO_PAD::NOTSET }, + }; + + if group > 1 { + let mut res_b = ArrayTrait::new(); + let mut res_cv = ArrayTrait::new(); + let mut td = 0; + let mg = *(*W).shape.at(0) / group; + let dw = *(*W).shape.at(1); + + let X_stride = stride((*X).shape); + let mut gx_shape = array![1, dw]; + let mut i = 2; + loop { + if i >= (*X).shape.len() { + break; + } + gx_shape.append(*(*X).shape.at(i)); + i += 1; + }; + let gx_shape = gx_shape.span(); + + let W_stride = stride((*W).shape); + let mut gw_shape = array![mg]; + let mut i = 1; + loop { + if i >= (*W).shape.len() { + break; + } + gw_shape.append(*(*W).shape.at(i)); + i += 1; + }; + let gw_shape = gw_shape.span(); + + let mut b = 0; + loop { + if b == *(*X).shape.at(0) { + break; + } + let mut g = 0; + loop { + if g == group { + break; + } + let gx = TensorTrait::new( + gx_shape, + SpanTrait::slice( + (*X).data, + b * *X_stride.at(0) + (g * dw) * *X_stride.at(1), + *X_stride.at(1) * dw + ) + ); + let gw = TensorTrait::new( + gw_shape, + SpanTrait::slice((*W).data, (g * mg) * *W_stride.at(0), *W_stride.at(0) * mg) + ); + let cv = conv( + @gx, + @gw, + Option::None, + Option::Some(auto_pad), + Option::Some(dilations), + Option::Some(1), + Option::Some(kernel_shape), + Option::Some(pads), + Option::Some(strides) + ); + if b == 0 { + td += *cv.shape.at(1); + } + res_b.append(b); + res_cv.append(cv); + g += 1; + }; + b += 1; + }; + + let res_b = res_b.span(); + let res_cv = res_cv.span(); + + let mut final_shape = array![*(*X).shape.at(0), td]; + + let mut cv = *res_cv.at(0); + + let mut i = 2; + loop { + if i == cv.shape.len() { + break; + } + final_shape.append(*cv.shape.at(i)); + i += 1; + }; + let final_shape = final_shape.span(); + + let mut final = ArrayTrait::new(); + + let mut p = 0; + let mut i = 0; + + loop { + if i == res_b.len() { + break; + } + let b = *res_b.at(i); + let cv = *res_cv.at(i); + + let mut n = 0; + loop { + if n == cv.data.len() { + break; + } + final.append(*cv.data.at(n)); + n += 1; + }; + p += *cv.shape.at(1); + if p >= td { + p = 0; + } + i += 1; + }; + let final = final.span(); + + let final = match B { + Option::Some(B) => { + let mut final_b = ArrayTrait::new(); + let final_stride = stride(final_shape); + let mut i = 0; + loop { + if i == *final_shape.at(0) { + break; + } + let mut j = 0; + loop { + if j == B.len() { + break; + } + let mut k = 0; + loop { + if k == *final_stride.at(1) { + break; + } + final_b + .append( + *final.at(i * *final_stride.at(0) + j * *final_stride.at(1) + k) + + *B.at(j) + ); + k += 1; + }; + j += 1; + }; + i += 1; + }; + final_b.span() + }, + Option::None => { final }, + }; + + return TensorTrait::new(final_shape, final); + } + + // group == 1 + if *dilations.at(0) != 1 || min(dilations) != max(dilations) { + // computation of the dilated kernel + let nd = dilations.len(); + let mut new_kernel_shape = ArrayTrait::new(); + let mut new_shape = ArrayTrait::new(); + new_shape.append_span(SpanTrait::slice((*W).shape, 0, (*W).shape.len() - nd)); + + let mut i = 0; + loop { + if i == dilations.len() { + break; + } + let d = *dilations.at(i); + let di = (*W).shape.len() - nd + i; + new_shape.append(*(*W).shape.at(di) + (*(*W).shape.at(di) - 1) * (d - 1)); + new_kernel_shape.append(*kernel_shape.at(i) + (*kernel_shape.at(i) - 1) * (d - 1)); + i += 1; + }; + let new_shape = new_shape.span(); + let new_w_strides = stride(new_shape); + let w_strides = stride((*W).shape); + + let mut new_w = NullableVecImpl::new(); + new_w.set(*new_shape.at(0) * *new_w_strides.at(0) - 1, NumberTrait::zero()); + + let mut indices = ArrayTrait::new(); + //let mut indices_W = ArrayTrait::new(); + + indices.append(arange(0, *new_shape.at(0), 1)); + indices.append(arange(0, *new_shape.at(1), 1)); + + //indices_W.append(arange(0, *(*W).shape.at(0), 1)); + //indices_W.append(arange(0, *(*W).shape.at(1), 1)); + + let mut i = 0; + loop { + if i == dilations.len() { + break; + } + let d = *dilations.at(i); + let di = (*W).shape.len() - nd + i; + indices.append(arange(0, *new_shape.at(di), d)); + //indices_W.append(arange(0, *(*W).shape.at(di), 1)); + i += 1; + }; + + let set_of_all_indices = cartesian(indices.span()); + //let set_of_all_indices_W = cartesian(indices_W.span()); + + let mut new_w_arr = ArrayTrait::new(); + + let mut i = 0; + let mut prev = 0; + loop { + if i == (*W).data.len() { + break; + } + let nd_index = *set_of_all_indices.at(i); + let mut flatten_index = 0; + let mut j = 0; + loop { + if j == nd_index.len() { + break; + } + flatten_index += *nd_index.at(j) * *new_w_strides.at(j); + j += 1; + }; + + if flatten_index > prev + 1 { + let mut j = prev + 1; + loop { + if j == flatten_index { + break; + } + new_w_arr.append(NumberTrait::zero()); + }; + j += 1; + } + new_w_arr.append(*(*W).data.at(i)); + new_w.set(flatten_index, *(*W).data.at(i)); + prev = flatten_index; + i += 1; + }; + let W = @TensorTrait::new(new_shape, new_w_arr.span()); + let kernel_shape = new_kernel_shape; + } + + let pads = match auto_pad { + AUTO_PAD::NOTSET => { pads }, + AUTO_PAD::SAME_UPPER => { + let mut head = ArrayTrait::new(); + let mut tail = ArrayTrait::new(); + let mut i = 0; + loop { + if i == (*X).shape.len() - 2 { + break; + } + let d = *(*X).shape.at(i); + let target_size = (d + *strides.at(i) - 1) / *strides.at(i); + let pad_needed = (target_size - 1) * *strides.at(i) + *kernel_shape.at(i) - d; + let pad_head = pad_needed / 2; + let pad_tail = pad_needed - pad_head; + head.append(pad_head); + tail.append(pad_tail); + i += 1; + }; + head.append_span(tail.span()); + let pads = head.span(); + pads + }, + AUTO_PAD::SAME_LOWER => { + let mut head = ArrayTrait::new(); + let mut tail = ArrayTrait::new(); + let mut i = 0; + loop { + if i == (*X).shape.len() - 2 { + break; + } + let d = *(*X).shape.at(i); + let target_size = (d + *strides.at(i) - 1) / *strides.at(i); + let pad_needed = (target_size - 1) * *strides.at(i) + *kernel_shape.at(i) - d; + let pad_head = (pad_needed + 1) / 2; + let pad_tail = pad_needed - pad_head; + head.append(pad_head); + tail.append(pad_tail); + i += 1; + }; + head.append_span(tail.span()); + let pads = head.span(); + pads + }, + AUTO_PAD::VALID => { + let mut head = ArrayTrait::new(); + let mut tail = ArrayTrait::new(); + let mut i = 0; + loop { + if i == (*X).shape.len() - 2 { + break; + } + let d = *(*X).shape.at(i); + let target_size = (d + *strides.at(i) - 1) / *strides.at(i); + let pad_needed = (target_size - 1) * *strides.at(i) + *kernel_shape.at(i) - d; + let pad_head = pad_needed / 2; + let pad_tail = pad_needed - pad_head; + head.append(pad_head); + tail.append(pad_tail); + i += 1; + }; + head.append_span(tail.span()); + let pads = head.span(); + pads + }, + }; + + if (*X).shape.len() == 3 { + let sN = *(*X).shape.at(0); + let sC = *(*X).shape.at(1); + let sH = *(*X).shape.at(2); + + let sM = *(*W).shape.at(0); + + let kh = *kernel_shape.at(0); + let sth = *strides.at(0); + + let h_out = ((sH - kh + *pads.at(0) + *pads.at(1)) / sth) + 1; + + let h0 = *pads.at(0); + let oh: i32 = -1 * (kh % 2).into(); + let bh: i32 = -h0.into(); + let eh = h_out * sth; + let mut res = NullableVecImpl::new(); + + let res_shape = array![sN, sM, h_out].span(); + let res_strides = stride(res_shape); + res.set(sN * *res_strides.at(0) - 1, NumberTrait::zero()); + + match B { + Option::Some(B) => { + let mut i = 0; + loop { + if i == sN { + break; + } + let mut j = 0; + loop { + if j == sM { + break; + } + let b_j = *B.at(j); + let mut k = 0; + loop { + if k == h_out { + break; + } + res.set(i * *res_strides.at(0) + j * *res_strides.at(1) + k, b_j); + k += 1; + }; + j += 1; + }; + i += 1; + }; + }, + Option::None => {}, + } + + let mut n = 0; + loop { + if n == sN { + break; + } + let mut nw = 0; + loop { + if nw == sM { + break; + } + let mut c = 0; + loop { + if c == sC { + break; + } + let w = SpanTrait::slice((*W).data, nw * sC * kh + c * kh, kh); + + let mut io = bh; + loop { + if io >= eh.into() { + break; + } + let hr = (io - bh) / sth.into(); + if hr < h_out.into() { + let i = io + (kh % 2).into(); + + let ih1 = I32Number::max(0, i + oh).into(); + let ih2 = I32Number::min(i + oh + kh.into(), sH.into()).into(); + let img = SpanTrait::slice((*X).data, n * sN + c * sC + ih1, ih2 - ih1); + + let s = if w.len() != img.len() { + let jh1 = I32Number::max(0, -i - oh).into(); + let jh2 = I32Number::min(sH.into() - (i + oh), kh.into()).into(); + + let w_ = SpanTrait::slice(w, jh1, jh2 - jh1); + assert(w_.len() == img.len(), 'unexpected w and img len'); + dot(img, w_) + } else { + dot(img, w) + }; + let hr = if hr < 0 { + *res_strides.at(1) - hr.into() + } else { + hr.into() + }; + res + .set( + n * *res_strides.at(0) + nw * *res_strides.at(1) + hr, + res.at(n * *res_strides.at(0) + nw * *res_strides.at(1) + hr) + + s + ); + } + io += sth.into(); + }; + c += 1; + }; + nw += 1; + }; + n += 1; + }; + let mut res_data = ArrayTrait::new(); + let mut i = 0; + loop { + if i == res.len() { + break; + } + res_data.append(res.at(i)); + i += 1; + }; + return TensorTrait::new(res_shape, res_data.span()); + } + + if (*X).shape.len() == 4 { + let sN = *(*X).shape.at(0); + let sC = *(*X).shape.at(1); + let sH = *(*X).shape.at(2); + let sW = *(*X).shape.at(3); + + let sM = *(*W).shape.at(0); + + let kh = *kernel_shape.at(0); + let kw = *kernel_shape.at(1); + + let sth = *strides.at(0); + let stw = *strides.at(1); + + let h_out = ((sH - kh + *pads.at(0) + *pads.at(2)) / sth) + 1; + let w_out = ((sW - kw + *pads.at(1) + *pads.at(3)) / stw) + 1; + + let h0 = *pads.at(0); + let w0 = *pads.at(1); + + let oh: i32 = -1 * (kh % 2).into(); + let ow: i32 = -1 * (kw % 2).into(); + let bh: i32 = -h0.into(); + let bw: i32 = -w0.into(); + let eh = h_out * sth; + let ew = w_out * stw; + + let mut res = NullableVecImpl::new(); + let res_shape = array![sN, sM, h_out, w_out].span(); + let res_strides = stride(res_shape); + res.set(sN * *res_strides.at(0) - 1, NumberTrait::zero()); + + match B { + Option::Some(B) => { + let mut i = 0; + loop { + if i == sN { + break; + } + let mut j = 0; + loop { + if j == sM { + break; + } + let b_j = *B.at(j); + let mut k = 0; + loop { + if k == h_out { + break; + } + let mut l = 0; + loop { + if l == w_out { + break; + } + res + .set( + i * *res_strides.at(0) + + j * *res_strides.at(1) + + k * *res_strides.at(2) + + l, + b_j + ); + l += 1; + }; + k += 1; + }; + j += 1; + }; + i += 1; + }; + }, + Option::None => {}, + } + + let mut n = 0; + loop { + if n == sN { + break; + } + let mut nw = 0; + loop { + if nw == sM { + break; + } + let mut c = 0; + loop { + if c == sC { + break; + } + let w = SpanTrait::slice( + (*W).data, nw * (sC * kh * kw) + c * (kh * kw), kh * kw + ); + + let mut io = bh; + loop { + if io >= eh.into() { + break; + } + let hr = (io - bh) / sth.into(); + if hr < h_out.into() { + let i = io + (kh % 2).into(); + let ih1 = I32Number::max(0, i + oh).into(); + let ih2 = I32Number::min(i + oh + kh.into(), sH.into()).into(); + + let mut jo = bw; + loop { + if jo >= ew.into() { + break; + } + let wr = (jo - bw) / stw.into(); + if wr < w_out.into() { + let j = jo + (kw % 2).into(); + let iw1 = I32Number::max(0, j + ow).into(); + let iw2 = I32Number::min(j + ow + kw.into(), sW.into()).into(); + + let mut img = ArrayTrait::new(); + let mut ihi = ih1; + loop { + if ihi == ih2 { + break; + } + img + .append_span( + SpanTrait::slice( + (*X).data, + n * (sC * sH * sW) + + c * (sH * sW) + + ihi * sW + + iw1, + iw2 - iw1 + ) + ); + ihi += 1; + }; + let img = img.span(); + + let s = if w.len() != img.len() { + let jh1 = I32Number::max(0, -i - oh).into(); + let jh2 = I32Number::min(sH.into() - (i + oh), kh.into()) + .into(); + + let jw1 = I32Number::max(0, -j - ow).into(); + let jw2 = I32Number::min(sW.into() - (j + ow), kw.into()) + .into(); + + let mut w_ = ArrayTrait::new(); + let mut jhj = jh1; + loop { + if jhj == jh2 { + break; + } + w_ + .append_span( + SpanTrait::slice(w, jhj * kw + jw1, jw2 - jw1) + ); + jhj += 1; + }; + let w_ = w_.span(); + + assert(w_.len() == img.len(), 'unexpected w and img len'); + dot(img, w_) + } else { + dot(img, w) + }; + + let hr = if hr < 0 { + h_out - hr.into() + } else { + hr.into() + }; + + let wr = if wr < 0 { + w_out - wr.into() + } else { + wr.into() + }; + + res + .set( + n * *res_strides.at(0) + + nw * *res_strides.at(1) + + hr * *res_strides.at(2) + + wr, + res + .at( + n * *res_strides.at(0) + + nw * *res_strides.at(1) + + hr * *res_strides.at(2) + + wr + ) + + s + ); + } + + jo += stw.into(); + }; + } + io += sth.into(); + }; + c += 1; + }; + nw += 1; + }; + n += 1; + }; + + let mut res_data = ArrayTrait::new(); + let mut i = 0; + loop { + if i == res.len() { + break; + } + res_data.append(res.at(i)); + i += 1; + }; + return TensorTrait::new(res_shape, res_data.span()); + } + + if (*X).shape.len() == 5 { + let sN = *(*X).shape.at(0); + let sC = *(*X).shape.at(1); + let sH = *(*X).shape.at(2); + let sW = *(*X).shape.at(3); + let sZ = *(*X).shape.at(4); + + let sM = *(*W).shape.at(0); + + let kh = *kernel_shape.at(0); + let kw = *kernel_shape.at(1); + let kz = *kernel_shape.at(2); + + let sth = *strides.at(0); + let stw = *strides.at(1); + let stz = *strides.at(2); + + let h_out = ((sH - kh + *pads.at(0) + *pads.at(3)) / sth) + 1; + let w_out = ((sW - kw + *pads.at(1) + *pads.at(4)) / stw) + 1; + let z_out = ((sZ - kz + *pads.at(2) + *pads.at(5)) / stz) + 1; + + let h0 = *pads.at(0); + let w0 = *pads.at(1); + let z0 = *pads.at(2); + + let oh: i32 = -1 * (kh % 2).into(); + let ow: i32 = -1 * (kw % 2).into(); + let oz: i32 = -1 * (kz % 2).into(); + + let bh: i32 = -h0.into(); + let bw: i32 = -w0.into(); + let bz: i32 = -z0.into(); + + let eh = h_out * sth; + let ew = w_out * stw; + let ez = z_out * stz; + + let mut res = NullableVecImpl::new(); + let res_shape = array![sN, sM, h_out, w_out, z_out].span(); + let res_strides = stride(res_shape); + res.set(sN * *res_strides.at(0) - 1, NumberTrait::zero()); + + match B { + Option::Some(B) => { + let mut i = 0; + loop { + if i == sN { + break; + } + let mut j = 0; + loop { + if j == sM { + break; + } + let b_j = *B.at(j); + let mut k = 0; + loop { + if k == h_out { + break; + } + let mut l = 0; + loop { + if l == w_out { + break; + } + let mut m = 0; + loop { + if m == z_out { + break; + } + res + .set( + i * *res_strides.at(0) + + j * *res_strides.at(1) + + k * *res_strides.at(2) + + l * *res_strides.at(3) + + m, + b_j + ); + m += 1; + }; + l += 1; + }; + k += 1; + }; + j += 1; + }; + i += 1; + }; + }, + Option::None => {}, + } + + let mut n = 0; + loop { + if n == sN { + break; + } + let mut nw = 0; + loop { + if nw == sM { + break; + } + let mut c = 0; + loop { + if c == sC { + break; + } + let w = SpanTrait::slice( + (*W).data, nw * (sC * kh * kw * kz) + c * (kh * kw * kz), kh * kw * kz + ); + + let mut io = bh; + loop { + if io >= eh.into() { + break; + } + let hr = (io - bh) / sth.into(); + if hr < h_out.into() { + let i = io + (kh % 2).into(); + let ih1 = I32Number::max(0, i + oh).into(); + let ih2 = I32Number::min(i + oh + kh.into(), sH.into()).into(); + + let mut jo = bw; + loop { + if jo >= ew.into() { + break; + } + let wr = (jo - bw) / stw.into(); + if wr < w_out.into() { + let j = jo + (kw % 2).into(); + let iw1 = I32Number::max(0, j + ow).into(); + let iw2 = I32Number::min(j + ow + kw.into(), sW.into()).into(); + + let mut zo = bz; + loop { + if zo >= ez.into() { + break; + } + let zr = (zo - bz) / stz.into(); + if zr < z_out.into() { + let z = zo + (kz % 2).into(); + let iz1 = I32Number::max(0, z + oz).into(); + let iz2 = I32Number::min(z + oz + kz.into(), sW.into()) + .into(); + + let mut img = ArrayTrait::new(); + let mut ihi = ih1; + loop { + if ihi == ih2 { + break; + } + let mut iwi = iw1; + loop { + if iwi == iw2 { + break; + } + img + .append_span( + SpanTrait::slice( + (*X).data, + n * (sC * sH * sW * sZ) + + c * (sH * sW * sZ) + + ihi * (sW * sZ) + + iwi * sZ + + iz1, + iz2 - iz1 + ) + ); + iwi += 1; + }; + ihi += 1; + }; + let img = img.span(); + + let s = if w.len() != img.len() { + let jh1 = I32Number::max(0, -i - oh).into(); + let jh2 = I32Number::min( + sH.into() - (i + oh), kh.into() + ) + .into(); + + let jw1 = I32Number::max(0, -j - ow).into(); + let jw2 = I32Number::min( + sW.into() - (j + ow), kw.into() + ) + .into(); + + let jz1 = I32Number::max(0, -z - oz).into(); + let jz2 = I32Number::min( + sZ.into() - (z + oz), kz.into() + ) + .into(); + + let mut w_ = ArrayTrait::new(); + let mut jhj = jh1; + loop { + if jhj == jh2 { + break; + } + let mut jwj = jw1; + loop { + if jwj == jw2 { + break; + } + w_ + .append_span( + SpanTrait::slice( + w, + jhj * kw * kz + jwj * kz + jz1, + jz2 - jz1 + ) + ); + jwj += 1; + }; + jhj += 1; + }; + let w_ = w_.span(); + + assert( + w_.len() == img.len(), + 'unexpected w and img len' + ); + dot(img, w_) + } else { + dot(img, w) + }; + + let hr = if hr < 0 { + h_out - hr.into() + } else { + hr.into() + }; + + let wr = if wr < 0 { + w_out - wr.into() + } else { + wr.into() + }; + + let zr = if zr < 0 { + z_out - zr.into() + } else { + zr.into() + }; + + res + .set( + n * *res_strides.at(0) + + nw * *res_strides.at(1) + + hr * *res_strides.at(2) + + wr * *res_strides.at(3) + + zr, + res + .at( + n * *res_strides.at(0) + + nw * *res_strides.at(1) + + hr * *res_strides.at(2) + + wr * *res_strides.at(3) + + zr + ) + + s + ); + } + zo += stz.into(); + }; + } + + jo += stw.into(); + }; + } + io += sth.into(); + }; + c += 1; + }; + nw += 1; + }; + n += 1; + }; + + let mut res_data = ArrayTrait::new(); + let mut i = 0; + loop { + if i == res.len() { + break; + } + res_data.append(res.at(i)); + i += 1; + }; + return TensorTrait::new(res_shape, res_data.span()); + } + + // if (*X).shape.len() > 5 + let nd = (*X).shape.len() - 2; + + let sN = *(*X).shape.at(0); + let sC = *(*X).shape.at(1); + + let sM = *(*W).shape.at(0); + + let w_stride = stride((*W).shape); + let x_stride = stride((*X).shape); + + let mut shape_out = ArrayTrait::new(); + let mut o_index = ArrayTrait::::new(); + let mut b_index = ArrayTrait::::new(); + let mut e_index = ArrayTrait::new(); + + let mut range_len = ArrayTrait::new(); + + let mut i = 0; + loop { + if i == nd { + break; + } + shape_out + .append( + ((*(*X).shape.at(2 + i) - *kernel_shape.at(i) + *pads.at(i) + *pads.at(i + nd)) + / *strides.at(i)) + + 1 + ); + let k = *kernel_shape.at(i); + o_index.append(-1 * (k % 2).into()); + b_index.append(-(*pads.at(i)).into()); + e_index.append(*shape_out.at(i) * *strides.at(i)); + range_len.append((((*e_index.at(i)).into() - *b_index.at(i)).into()) / *strides.at(i)); + i += 1; + }; + + let o_index = o_index.span(); + let b_index = b_index.span(); + + let shape_out = shape_out.span(); + + let range_len = range_len.span(); + let range_stride = stride(range_len); + + let mut res_shape = array![sN, sM]; + res_shape.append_span(shape_out); + let res_shape = res_shape.span(); + + let res_strides = stride(res_shape); + + let mut res = NullableVecImpl::new(); + res.set(sN * *res_strides.at(0) - 1, NumberTrait::zero()); + + match B { + Option::Some(B) => { + let mut i = 0; + loop { + if i == sN { + break; + } + let mut j = 0; + loop { + if j == sM { + break; + } + let b_j = *B.at(j); + let mut k = 0; + loop { + if k == *res_strides.at(1) { + break; + } + res.set(i * *res_strides.at(0) + j * *res_strides.at(1) + k, b_j); + k += 1; + }; + j += 1; + }; + i += 1; + }; + }, + Option::None => {}, + } + + let mut n = 0; + loop { + if n == sN { + break; + } + let mut nw = 0; + loop { + if nw == sM { + break; + } + let mut c = 0; + loop { + if c == sC { + break; + } + let w = SpanTrait::slice( + (*W).data, nw * *w_stride.at(0) + c * *w_stride.at(1), *w_stride.at(1) + ); + let mut i = 0; + loop { + if i == *range_len.at(0) * *range_stride.at(0) { + break; + } + let mut io_index = ArrayTrait::::new(); + let mut r_index = ArrayTrait::::new(); + let mut flatten_index = i; + + let mut nx = 0; + loop { + if nx == nd { + break; + } + let (n_index, rem) = DivRem::div_rem( + flatten_index, (*range_stride.at(nx)).try_into().unwrap() + ); + + flatten_index = rem; + io_index + .append(n_index.into() * (*strides.at(nx)).into() + *b_index.at(nx)); + r_index.append(n_index.into()); + nx += 1; + }; + + if r_index_check(r_index.span(), shape_out) { + let mut indices = ArrayTrait::::new(); + let mut i1_index = ArrayTrait::new(); + let mut i2_index = ArrayTrait::new(); + let mut idiff_index = ArrayTrait::new(); + + let mut nx = 0; + loop { + if nx == nd { + break; + } + indices.append(*io_index.at(nx) + (*kernel_shape.at(nx) % 2).into()); + i1_index + .append( + I32Number::max(0, *indices.at(nx) + *o_index.at(nx)).into() + ); + i2_index + .append( + I32Number::min( + (*(*X).shape.at(nx + 2)).into(), + *indices.at(nx) + + *o_index.at(nx) + + (*kernel_shape.at(nx)).into() + ) + .into() + ); + + if nx != nd - 1 { + idiff_index.append(*i2_index.at(nx) - *i1_index.at(nx)); + } + nx += 1; + }; + let i1_index = i1_index.span(); + let mut img = ArrayTrait::new(); + + let img = if nx == 1 { + let img = SpanTrait::slice( + (*X).data, + n * sN + c * sC + *i1_index.at(nd - 1), + *i2_index.at(nd - 1) - *i1_index.at(nd - 1) + ); + img + } else { + let i_stride = stride(idiff_index.span()); + + let mut ii = 0; + loop { + if ii == *i_stride.at(0) * *idiff_index.at(0) { + break; + } + let mut flatten_index = ii; + let mut start = n * *x_stride.at(0) + c * *x_stride.at(1); + + let mut nx = 0; + loop { + if nx == nd - 1 { + break; + } + let (ii_index, rem) = DivRem::div_rem( + flatten_index, (*i_stride.at(nx)).try_into().unwrap() + ); + flatten_index = rem; + + start += (*i1_index.at(nx) + ii_index) * *x_stride.at(2 + nx); + nx += 1; + }; + img + .append_span( + SpanTrait::slice( + (*X).data, + start + *i1_index.at(nd - 1), + *i2_index.at(nd - 1) - *i1_index.at(nd - 1) + ) + ); + ii += 1; + }; + img.span() + }; + + let s = if w.len() != img.len() { + let mut j1_index = ArrayTrait::new(); + let mut j2_index = ArrayTrait::new(); + let mut jdiff_index = ArrayTrait::new(); + + let mut nx = 0; + loop { + if nx == nd { + break; + } + j1_index + .append( + I32Number::max(0, -*indices.at(nx) - *o_index.at(nx)).into() + ); + j2_index + .append( + I32Number::min( + (*(*X).shape.at(nx + 2)).into() + - *indices.at(nx) + - *o_index.at(nx), + (*kernel_shape.at(nx)).into() + ) + .into() + ); + if nx != nd - 1 { + jdiff_index.append(*j2_index.at(nx) - *j1_index.at(nx)); + } + nx += 1; + }; + let j1_index = j1_index.span(); + + let mut w_ = ArrayTrait::new(); + + let w_ = if nx == 1 { + let w_ = SpanTrait::slice( + w, + *j1_index.at(nd - 1), + *j2_index.at(nd - 1) - *j1_index.at(nd - 1) + ); + w_ + } else { + let j_stride = stride(jdiff_index.span()); + + let mut jj = 0; + loop { + if jj == *j_stride.at(0) * *jdiff_index.at(0) { + break; + } + let mut flatten_index = jj; + let mut start = 0; + + let mut nx = 0; + loop { + if nx == nd - 1 { + break; + } + let (jj_index, rem) = DivRem::div_rem( + flatten_index, (*j_stride.at(nx)).try_into().unwrap() + ); + flatten_index = rem; + start += (*j1_index.at(nx) + jj_index) + * *kernel_shape.at(nx); + nx += 1; + }; + w_ + .append_span( + SpanTrait::slice( + w, + start + *j1_index.at(nd - 1), + *j2_index.at(nd - 1) - *j1_index.at(nd - 1) + ) + ); + jj += 1; + }; + w_.span() + }; + dot(img, w_) + } else { + dot(img, w) + }; + + let mut res_index = n * *res_strides.at(0) + nw * *res_strides.at(1); + + let mut nx = 0; + loop { + if nx == nd { + break; + } + res_index += (*r_index.at(nx)).into() * *res_strides.at(2 + nx); + nx += 1; + }; + + res.set(res_index, res.at(res_index) + s); + }; + i += 1 + }; + c += 1; + }; + nw += 1; + }; + n += 1; + }; + + let mut res_data = ArrayTrait::new(); + let mut i = 0; + loop { + if i == res.len() { + break; + } + res_data.append(res.at(i)); + i += 1; + }; + return TensorTrait::new(res_shape, res_data.span()); +} + + +fn r_index_check(r_index: Span, shape_out: Span) -> bool { + let mut i = 0; + let flag = loop { + if i == r_index.len() { + break true; + } + if *r_index.at(i) >= (*shape_out.at(i)).into() { + break false; + } + i += 1; + }; + return flag; +} + + +fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul,>( + pA: Span, start: usize +) -> T { + let mut i = start; + let mut prod = NumberTrait::one(); + loop { + if i == pA.len() { + break; + } + prod = prod * (*pA.at(i)); + i += 1; + }; + return prod; +} + + +fn min(a: Span) -> usize { + assert(a.len() > 0, 'span cannot be empty'); + + let mut min = *a.at(0); + let mut i = 0; + loop { + if i == a.len() { + break; + } + let item = *a.at(i); + if item < min { + min = item; + } + i += 1; + }; + return min; +} + + +fn max(a: Span) -> usize { + assert(a.len() > 0, 'span cannot be empty'); + + let mut max = *a.at(0); + let mut i = 0; + loop { + if i == a.len() { + break; + } + let item = *a.at(i); + if item > max { + max = item; + } + i += 1; + }; + return max; +} + +fn arange(start: usize, end: usize, step: usize) -> Span { + assert((end - start) % step == 0, 'incompatible step value'); + + let mut arr = ArrayTrait::new(); + let mut i = start; + loop { + if i >= end { + break; + } + arr.append(i); + i += step; + }; + return arr.span(); +} + + +fn cartesian(mut arrays: Span>,) -> Span> { + let mut n = 1; + let mut i = arrays.len() - 1; + loop { + n = n * (*(arrays.at(i))).len(); + if i == 0 { + break; + } + i -= 1; + }; + + let mut i = 0; + let mut size_arrays = ArrayTrait::new(); + let mut m = n; + loop { + if i == arrays.len() { + break; + } + size_arrays.append((*(arrays.at(i))).len()); + + i += 1; + }; + let size_arrays = size_arrays.span(); + let mut output_arrays = ArrayTrait::>::new(); + let mut m = n; + + let mut i = 0; + loop { + if i == arrays.len() { + break; + } + m = m / (*(arrays.at(i))).len(); + let mut out = repeat(*(arrays.at(i)), m); + out = repeat_2(out, size_arrays, i); + + output_arrays.append(out); + i += 1; + }; + let output_arrays = output_arrays.span(); + + let mut i = 0; + let mut ret = ArrayTrait::new(); + loop { + if i == n { + break; + } + let mut j = 0; + let mut x = ArrayTrait::new(); + loop { + if j == arrays.len() { + break; + } + + x.append(*(output_arrays.at(j)).at(i)); + j += 1; + }; + ret.append(x.span()); + i += 1; + }; + + return ret.span(); +} + +fn repeat_2(mut array: Array, size_array: Span, index: usize) -> Array { + let mut size = array.len(); + let mut i = 0; + loop { + if i == index { + break; + } + let mut j = 1; + loop { + if j == *size_array.at(index - 1 - i) { + break; + } + let mut k = 0; + loop { + if k == size { + break; + } + array.append(*array.at(k)); + k += 1; + }; + j += 1; + }; + size = size * *size_array.at(index - 1 - i); + i += 1; + }; + array +} + +fn repeat(array: Span, m: usize,) -> Array { + let mut out = ArrayTrait::new(); + let mut j = 0; + loop { + if j == array.len() { + break; + } + let mut k = 0; + loop { + if k == m { + break; + } + out.append(*array.at(j)); + k += 1; + }; + j += 1; + }; + + out +} + +fn dot< + T, MAG, +Drop, +Copy, +NumberTrait, +Add, +TensorTrait, +AddEq, +Mul, +>( + a: Span, b: Span +) -> T { + let mut i = 0; + let mut sum = NumberTrait::zero(); + loop { + if i == a.len() { + break; + } + sum = sum + *a.at(i) * *b.at(i); + i += 1; + }; + + return sum; +} diff --git a/src/operators/nn/implementations/nn_fp16x16.cairo b/src/operators/nn/implementations/nn_fp16x16.cairo index 785d3c9fa..82e3d4d13 100644 --- a/src/operators/nn/implementations/nn_fp16x16.cairo +++ b/src/operators/nn/implementations/nn_fp16x16.cairo @@ -72,4 +72,18 @@ impl FP16x16NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn conv( + X: @Tensor, + W: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides) + } } diff --git a/src/operators/nn/implementations/nn_fp32x32.cairo b/src/operators/nn/implementations/nn_fp32x32.cairo index 0427ea5f7..496485766 100644 --- a/src/operators/nn/implementations/nn_fp32x32.cairo +++ b/src/operators/nn/implementations/nn_fp32x32.cairo @@ -66,4 +66,18 @@ impl FP32x32NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn conv( + X: @Tensor, + W: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides) + } } diff --git a/src/operators/nn/implementations/nn_fp64x64.cairo b/src/operators/nn/implementations/nn_fp64x64.cairo index fec810679..5d674d71d 100644 --- a/src/operators/nn/implementations/nn_fp64x64.cairo +++ b/src/operators/nn/implementations/nn_fp64x64.cairo @@ -66,4 +66,18 @@ impl FP64x64NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn conv( + X: @Tensor, + W: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides) + } } diff --git a/src/operators/nn/implementations/nn_fp8x23.cairo b/src/operators/nn/implementations/nn_fp8x23.cairo index 9f5416121..129c298f9 100644 --- a/src/operators/nn/implementations/nn_fp8x23.cairo +++ b/src/operators/nn/implementations/nn_fp8x23.cairo @@ -70,4 +70,18 @@ impl FP8x23NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn conv( + X: @Tensor, + W: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides) + } } diff --git a/src/operators/nn/implementations/nn_i32.cairo b/src/operators/nn/implementations/nn_i32.cairo index 1db66a1c6..ff9be81a7 100644 --- a/src/operators/nn/implementations/nn_i32.cairo +++ b/src/operators/nn/implementations/nn_i32.cairo @@ -61,4 +61,18 @@ impl I32NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn conv( + X: @Tensor, + W: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides) + } } diff --git a/src/operators/nn/implementations/nn_i8.cairo b/src/operators/nn/implementations/nn_i8.cairo index e67bb7504..e0c13e5e9 100644 --- a/src/operators/nn/implementations/nn_i8.cairo +++ b/src/operators/nn/implementations/nn_i8.cairo @@ -61,4 +61,18 @@ impl I8NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn conv( + X: @Tensor, + W: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides) + } } diff --git a/src/operators/nn/implementations/nn_u32.cairo b/src/operators/nn/implementations/nn_u32.cairo index 370880e8d..564ad289d 100644 --- a/src/operators/nn/implementations/nn_u32.cairo +++ b/src/operators/nn/implementations/nn_u32.cairo @@ -61,4 +61,18 @@ impl U32NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn conv( + X: @Tensor, + W: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides) + } } diff --git a/src/operators/sequence/functional/sequence_at.cairo b/src/operators/sequence/functional/sequence_at.cairo index 7953abb9d..4a4aa9203 100644 --- a/src/operators/sequence/functional/sequence_at.cairo +++ b/src/operators/sequence/functional/sequence_at.cairo @@ -8,7 +8,9 @@ use orion::numbers::{NumberTrait, I32IntoU32, U32IntoI32}; fn sequence_at, impl TCopy: Copy, impl TDrop: Drop>( sequence: Array>, position: Tensor ) -> Tensor { - assert(position.shape.len() == 0 && position.data.len().into() == 1, 'Position must be a scalar'); + assert( + position.shape.len() == 0 && position.data.len().into() == 1, 'Position must be a scalar' + ); let position_value_i32: i32 = *position.data.at(0); let is_negative: bool = position_value_i32 < 0; diff --git a/src/operators/sequence/functional/sequence_erase.cairo b/src/operators/sequence/functional/sequence_erase.cairo index dd2a2aad6..573087b1f 100644 --- a/src/operators/sequence/functional/sequence_erase.cairo +++ b/src/operators/sequence/functional/sequence_erase.cairo @@ -3,7 +3,7 @@ use core::option::OptionTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::I32Tensor; -use orion::numbers::{ NumberTrait, I32IntoU32}; +use orion::numbers::{NumberTrait, I32IntoU32}; /// Cf: SequenceTrait::sequence_erase docstring fn sequence_erase, impl TCopy: Copy, impl TDrop: Drop>( @@ -56,4 +56,3 @@ fn sequence_erase, impl TCopy: Copy, impl TDr return output_sequence; } - diff --git a/src/operators/sequence/functional/sequence_insert.cairo b/src/operators/sequence/functional/sequence_insert.cairo index 256a1b91c..412fc6c4b 100644 --- a/src/operators/sequence/functional/sequence_insert.cairo +++ b/src/operators/sequence/functional/sequence_insert.cairo @@ -3,7 +3,7 @@ use core::option::OptionTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::I32Tensor; -use orion::numbers::{ NumberTrait, I32IntoU32}; +use orion::numbers::{NumberTrait, I32IntoU32}; /// Cf: SequenceTrait::sequence_insert docstring fn sequence_insert, impl TCopy: Copy, impl TDrop: Drop>( @@ -55,4 +55,4 @@ fn sequence_insert, impl TCopy: Copy, impl TD }; return new_sequence; -} \ No newline at end of file +} diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 70344eb97..4245b418f 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -5559,10 +5559,14 @@ fn squeeze(self: @Tensor, axes: Option>) -> Tensor { let mut reshape: Array = ArrayTrait::new(); let mut index = 0_i32; let axis = if *axis < 0 { - assert(*axis <= (*self.shape).len().into(), 'axis out of accepted range'); + assert( + *axis <= (*self.shape).len().into(), 'axis out of accepted range' + ); (*self.shape).len().into() - *axis } else { - assert(*axis < (*self.shape).len().into(), 'axis out of accepted range'); + assert( + *axis < (*self.shape).len().into(), 'axis out of accepted range' + ); *axis }; diff --git a/src/operators/tensor/helpers.cairo b/src/operators/tensor/helpers.cairo index 8c7e2b359..894dfc8d4 100644 --- a/src/operators/tensor/helpers.cairo +++ b/src/operators/tensor/helpers.cairo @@ -496,4 +496,4 @@ impl SpanPartialOrd, +Copy, +PartialEq, +PartialOrd> of Par fn lt(lhs: Span, rhs: Span) -> bool { span_cmp(lhs, rhs) < 0 } -} \ No newline at end of file +} diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 50383d2df..890a2d3b2 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -3,7 +3,7 @@ use core::array::SpanTrait; use core::option::OptionTrait; use core::traits::{TryInto, Into}; -use orion::numbers::{ I32Div, I32DivEq }; +use orion::numbers::{I32Div, I32DivEq}; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ @@ -221,13 +221,7 @@ impl I32Tensor of TensorTrait { fn quantize_linear( self: @Tensor, y_scale: @Tensor, y_zero_point: @Tensor ) -> Tensor:: { - quantization::quantize_linear::quantize_linear( - self, - y_scale, - y_zero_point, - -127, - 127 - ) + quantization::quantize_linear::quantize_linear(self, y_scale, y_zero_point, -127, 127) } fn dequantize_linear( diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 7e81d90eb..9366a0347 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -3,7 +3,7 @@ use core::array::SpanTrait; use core::option::OptionTrait; use core::traits::{TryInto, Into}; -use orion::numbers::{ I8Div, I8DivEq }; +use orion::numbers::{I8Div, I8DivEq}; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ diff --git a/src/operators/tensor/math/layer_normalization.cairo b/src/operators/tensor/math/layer_normalization.cairo index 372d5b1c2..bb0d9579b 100644 --- a/src/operators/tensor/math/layer_normalization.cairo +++ b/src/operators/tensor/math/layer_normalization.cairo @@ -3,7 +3,7 @@ use core::array::ArrayTrait; use core::array::SpanTrait; use core::option::OptionTrait; use core::traits::Into; -use orion::numbers::{ NumberTrait, I32IntoU32}; +use orion::numbers::{NumberTrait, I32IntoU32}; use orion::operators::tensor::{ TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor }; @@ -51,7 +51,6 @@ fn layer_normalization< Option::None => 1, }; - let axis = if axis < 0 { X_rank - axis.into() } else { diff --git a/src/test_helper/tensor/i32.cairo b/src/test_helper/tensor/i32.cairo index 0451fa442..89979eef0 100644 --- a/src/test_helper/tensor/i32.cairo +++ b/src/test_helper/tensor/i32.cairo @@ -93,7 +93,7 @@ fn i32_tensor_3x3_neg_helper() -> Tensor { sizes.append(3); let mut data = ArrayTrait::new(); - + data.append(0_i32); data.append(-1_i32); data.append(-2_i32); @@ -338,7 +338,6 @@ fn i32_tensor_3x3x3_helper() -> Tensor { data.append(24_i32); data.append(25_i32); data.append(26_i32); - let tensor = TensorTrait::new(sizes.span(), data.span()); diff --git a/src/test_helper/tensor/i8.cairo b/src/test_helper/tensor/i8.cairo index e492ad913..6d85e4b3e 100644 --- a/src/test_helper/tensor/i8.cairo +++ b/src/test_helper/tensor/i8.cairo @@ -93,7 +93,7 @@ fn i8_tensor_3x3_neg_helper() -> Tensor { sizes.append(3); let mut data = ArrayTrait::new(); - + data.append(0_i8); data.append(-1_i8); data.append(-2_i8); @@ -338,7 +338,6 @@ fn i8_tensor_3x3x3_helper() -> Tensor { data.append(24_i8); data.append(25_i8); data.append(26_i8); - let tensor = TensorTrait::new(sizes.span(), data.span()); diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 6c70b42cb..c81ceb8cf 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -936,3 +936,14 @@ mod split_fp16x16_2d_variable_parts; mod split_fp16x16_zero_size; mod split_fp16x16_1d_uneven; mod split_fp16x16_2d_uneven; +mod conv_2D_with_padding; +mod conv_1D_no_padding; +mod conv_1D_with_padding; +mod conv_3D_no_padding; +mod conv_3D_with_padding; +mod conv_4D_no_padding; +mod conv_2D_with_2_groups; +mod conv_2D_with_autopad_same; +mod conv_2D_with_strides_asymmetric_padding; +mod conv_2D_with_strides_with_padding; +mod conv_4D_with_padding; diff --git a/tests/nodes/clip_fp16x16_2d.cairo b/tests/nodes/clip_fp16x16_2d.cairo index d779d2790..b576203eb 100644 --- a/tests/nodes/clip_fp16x16_2d.cairo +++ b/tests/nodes/clip_fp16x16_2d.cairo @@ -15,7 +15,11 @@ fn test_clip_fp16x16_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.clip(Option::Some(FP16x16 { mag: 655360, sign: true }), Option::Some(FP16x16 { mag: 1310720, sign: false })); + let y = input_0 + .clip( + Option::Some(FP16x16 { mag: 655360, sign: true }), + Option::Some(FP16x16 { mag: 1310720, sign: false }) + ); assert_eq(y, z); } diff --git a/tests/nodes/clip_fp16x16_3d.cairo b/tests/nodes/clip_fp16x16_3d.cairo index d82de09dc..98bed1a61 100644 --- a/tests/nodes/clip_fp16x16_3d.cairo +++ b/tests/nodes/clip_fp16x16_3d.cairo @@ -15,7 +15,11 @@ fn test_clip_fp16x16_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.clip(Option::Some(FP16x16 { mag: 655360, sign: true }), Option::Some(FP16x16 { mag: 1310720, sign: false })); + let y = input_0 + .clip( + Option::Some(FP16x16 { mag: 655360, sign: true }), + Option::Some(FP16x16 { mag: 1310720, sign: false }) + ); assert_eq(y, z); } diff --git a/tests/nodes/clip_fp8x23_2d.cairo b/tests/nodes/clip_fp8x23_2d.cairo index 64f1792a1..60b38b565 100644 --- a/tests/nodes/clip_fp8x23_2d.cairo +++ b/tests/nodes/clip_fp8x23_2d.cairo @@ -15,7 +15,11 @@ fn test_clip_fp8x23_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.clip(Option::Some(FP8x23 { mag: 83886080, sign: true }), Option::Some(FP8x23 { mag: 167772160, sign: false })); + let y = input_0 + .clip( + Option::Some(FP8x23 { mag: 83886080, sign: true }), + Option::Some(FP8x23 { mag: 167772160, sign: false }) + ); assert_eq(y, z); } diff --git a/tests/nodes/clip_fp8x23_3d.cairo b/tests/nodes/clip_fp8x23_3d.cairo index 511b33859..cc80a61d7 100644 --- a/tests/nodes/clip_fp8x23_3d.cairo +++ b/tests/nodes/clip_fp8x23_3d.cairo @@ -15,7 +15,11 @@ fn test_clip_fp8x23_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.clip(Option::Some(FP8x23 { mag: 83886080, sign: true }), Option::Some(FP8x23 { mag: 167772160, sign: false })); + let y = input_0 + .clip( + Option::Some(FP8x23 { mag: 83886080, sign: true }), + Option::Some(FP8x23 { mag: 167772160, sign: false }) + ); assert_eq(y, z); } diff --git a/tests/nodes/compress_fp16x16_3d_axis1.cairo b/tests/nodes/compress_fp16x16_3d_axis1.cairo index 2463dfa93..4189bd1e9 100644 --- a/tests/nodes/compress_fp16x16_3d_axis1.cairo +++ b/tests/nodes/compress_fp16x16_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp16x16_3d_axis2.cairo b/tests/nodes/compress_fp16x16_3d_axis2.cairo index a425e0988..e17e6bed4 100644 --- a/tests/nodes/compress_fp16x16_3d_axis2.cairo +++ b/tests/nodes/compress_fp16x16_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp16x16_3d_axis3.cairo b/tests/nodes/compress_fp16x16_3d_axis3.cairo index 3ad15cc97..fa9efb511 100644 --- a/tests/nodes/compress_fp16x16_3d_axis3.cairo +++ b/tests/nodes/compress_fp16x16_3d_axis3.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_axis3() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(3)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(3)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp16x16_3d_default.cairo b/tests/nodes/compress_fp16x16_3d_default.cairo index 4bff29c09..0a8b68bf2 100644 --- a/tests/nodes/compress_fp16x16_3d_default.cairo +++ b/tests/nodes/compress_fp16x16_3d_default.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp16x16_3d_noaxis.cairo b/tests/nodes/compress_fp16x16_3d_noaxis.cairo index e637f47c8..4e1b1620e 100644 --- a/tests/nodes/compress_fp16x16_3d_noaxis.cairo +++ b/tests/nodes/compress_fp16x16_3d_noaxis.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_noaxis() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::None(())); + let y_0 = input_0.compress(condition: input_1, axis: Option::None(())); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp8x23_3d_axis1.cairo b/tests/nodes/compress_fp8x23_3d_axis1.cairo index 24829c58f..03bdc8815 100644 --- a/tests/nodes/compress_fp8x23_3d_axis1.cairo +++ b/tests/nodes/compress_fp8x23_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_compress_fp8x23_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp8x23_3d_axis2.cairo b/tests/nodes/compress_fp8x23_3d_axis2.cairo index c4cf9a814..ca6bc4ec6 100644 --- a/tests/nodes/compress_fp8x23_3d_axis2.cairo +++ b/tests/nodes/compress_fp8x23_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_compress_fp8x23_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp8x23_3d_default.cairo b/tests/nodes/compress_fp8x23_3d_default.cairo index 6f590b622..f9acf8b7b 100644 --- a/tests/nodes/compress_fp8x23_3d_default.cairo +++ b/tests/nodes/compress_fp8x23_3d_default.cairo @@ -18,7 +18,7 @@ fn test_compress_fp8x23_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i32_3d_axis1.cairo b/tests/nodes/compress_i32_3d_axis1.cairo index e3d6a8072..6d3142fec 100644 --- a/tests/nodes/compress_i32_3d_axis1.cairo +++ b/tests/nodes/compress_i32_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_compress_i32_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i32_3d_axis2.cairo b/tests/nodes/compress_i32_3d_axis2.cairo index 3ae5828c8..242aef0ae 100644 --- a/tests/nodes/compress_i32_3d_axis2.cairo +++ b/tests/nodes/compress_i32_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_compress_i32_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i32_3d_default.cairo b/tests/nodes/compress_i32_3d_default.cairo index dde8e15cf..ab19213b0 100644 --- a/tests/nodes/compress_i32_3d_default.cairo +++ b/tests/nodes/compress_i32_3d_default.cairo @@ -18,7 +18,7 @@ fn test_compress_i32_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i8_3d_axis1.cairo b/tests/nodes/compress_i8_3d_axis1.cairo index 8fd8bb267..4ab02896a 100644 --- a/tests/nodes/compress_i8_3d_axis1.cairo +++ b/tests/nodes/compress_i8_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_compress_i8_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i8_3d_axis2.cairo b/tests/nodes/compress_i8_3d_axis2.cairo index 220210744..f0dbaef06 100644 --- a/tests/nodes/compress_i8_3d_axis2.cairo +++ b/tests/nodes/compress_i8_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_compress_i8_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i8_3d_default.cairo b/tests/nodes/compress_i8_3d_default.cairo index b802e589c..e4ad1fbc8 100644 --- a/tests/nodes/compress_i8_3d_default.cairo +++ b/tests/nodes/compress_i8_3d_default.cairo @@ -18,7 +18,7 @@ fn test_compress_i8_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_axis1.cairo b/tests/nodes/compress_u32_3d_axis1.cairo index 136f8b8ce..41a2adc63 100644 --- a/tests/nodes/compress_u32_3d_axis1.cairo +++ b/tests/nodes/compress_u32_3d_axis1.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_axis2.cairo b/tests/nodes/compress_u32_3d_axis2.cairo index 347e36676..801886380 100644 --- a/tests/nodes/compress_u32_3d_axis2.cairo +++ b/tests/nodes/compress_u32_3d_axis2.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_axis2_2.cairo b/tests/nodes/compress_u32_3d_axis2_2.cairo index abc515486..c5a20dbc2 100644 --- a/tests/nodes/compress_u32_3d_axis2_2.cairo +++ b/tests/nodes/compress_u32_3d_axis2_2.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_axis2_2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_axis3.cairo b/tests/nodes/compress_u32_3d_axis3.cairo index 10e1e507e..4edd5c8dc 100644 --- a/tests/nodes/compress_u32_3d_axis3.cairo +++ b/tests/nodes/compress_u32_3d_axis3.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_axis3() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(3)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(3)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_default.cairo b/tests/nodes/compress_u32_3d_default.cairo index ce12adac8..32068f9b7 100644 --- a/tests/nodes/compress_u32_3d_default.cairo +++ b/tests/nodes/compress_u32_3d_default.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/conv_1D_no_padding.cairo b/tests/nodes/conv_1D_no_padding.cairo new file mode 100644 index 000000000..4634af9a4 --- /dev/null +++ b/tests/nodes/conv_1D_no_padding.cairo @@ -0,0 +1,32 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::numbers::FixedTrait; +use orion::operators::nn::NNTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_conv_1D_no_padding() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::conv( + @input_0, + @input_1, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/conv_1D_no_padding/input_0.cairo b/tests/nodes/conv_1D_no_padding/input_0.cairo new file mode 100644 index 000000000..f186424cb --- /dev/null +++ b/tests/nodes/conv_1D_no_padding/input_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_1D_no_padding/input_1.cairo b/tests/nodes/conv_1D_no_padding/input_1.cairo new file mode 100644 index 000000000..1d1e2760f --- /dev/null +++ b/tests/nodes/conv_1D_no_padding/input_1.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_1D_no_padding/output_0.cairo b/tests/nodes/conv_1D_no_padding/output_0.cairo new file mode 100644 index 000000000..24ed1a5cb --- /dev/null +++ b/tests/nodes/conv_1D_no_padding/output_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_1D_with_padding.cairo b/tests/nodes/conv_1D_with_padding.cairo new file mode 100644 index 000000000..3dacddc9f --- /dev/null +++ b/tests/nodes/conv_1D_with_padding.cairo @@ -0,0 +1,32 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::numbers::FixedTrait; +use orion::operators::nn::NNTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_conv_1D_with_padding() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::conv( + @input_0, + @input_1, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::Some(array![1, 1].span()), + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/conv_1D_with_padding/input_0.cairo b/tests/nodes/conv_1D_with_padding/input_0.cairo new file mode 100644 index 000000000..f186424cb --- /dev/null +++ b/tests/nodes/conv_1D_with_padding/input_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_1D_with_padding/input_1.cairo b/tests/nodes/conv_1D_with_padding/input_1.cairo new file mode 100644 index 000000000..1d1e2760f --- /dev/null +++ b/tests/nodes/conv_1D_with_padding/input_1.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_1D_with_padding/output_0.cairo b/tests/nodes/conv_1D_with_padding/output_0.cairo new file mode 100644 index 000000000..3552d322f --- /dev/null +++ b/tests/nodes/conv_1D_with_padding/output_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_2D_with_2_groups.cairo b/tests/nodes/conv_2D_with_2_groups.cairo new file mode 100644 index 000000000..b3022dc06 --- /dev/null +++ b/tests/nodes/conv_2D_with_2_groups.cairo @@ -0,0 +1,32 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::numbers::FixedTrait; +use orion::operators::nn::NNTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_conv_2D_with_2_groups() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::conv( + @input_0, + @input_1, + Option::None, + Option::None, + Option::None, + Option::Some(2), + Option::None, + Option::None, + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/conv_2D_with_2_groups/input_0.cairo b/tests/nodes/conv_2D_with_2_groups/input_0.cairo new file mode 100644 index 000000000..e152fc043 --- /dev/null +++ b/tests/nodes/conv_2D_with_2_groups/input_0.cairo @@ -0,0 +1,33 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_2D_with_2_groups/input_1.cairo b/tests/nodes/conv_2D_with_2_groups/input_1.cairo new file mode 100644 index 000000000..badf32363 --- /dev/null +++ b/tests/nodes/conv_2D_with_2_groups/input_1.cairo @@ -0,0 +1,33 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_2D_with_2_groups/output_0.cairo b/tests/nodes/conv_2D_with_2_groups/output_0.cairo new file mode 100644 index 000000000..0f6aa2d56 --- /dev/null +++ b/tests/nodes/conv_2D_with_2_groups/output_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(1); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 2359296, sign: false }); + data.append(FP16x16 { mag: 7667712, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_2D_with_autopad_same.cairo b/tests/nodes/conv_2D_with_autopad_same.cairo new file mode 100644 index 000000000..b3c88bdf4 --- /dev/null +++ b/tests/nodes/conv_2D_with_autopad_same.cairo @@ -0,0 +1,34 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::numbers::FixedTrait; +use orion::operators::nn::NNTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::FP16x16NN; + +use orion::operators::nn::functional::conv::AUTO_PAD; + +#[test] +#[available_gas(2000000000)] +fn test_conv_2D_with_autopad_same() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::conv( + @input_0, + @input_1, + Option::None, + Option::Some(AUTO_PAD::SAME_LOWER), + Option::None, + Option::None, + Option::Some(array![3, 3].span()), + Option::None, + Option::Some(array![2, 2].span()) + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/conv_2D_with_autopad_same/input_0.cairo b/tests/nodes/conv_2D_with_autopad_same/input_0.cairo new file mode 100644 index 000000000..5b8aee80b --- /dev/null +++ b/tests/nodes/conv_2D_with_autopad_same/input_0.cairo @@ -0,0 +1,40 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(5); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_2D_with_autopad_same/input_1.cairo b/tests/nodes/conv_2D_with_autopad_same/input_1.cairo new file mode 100644 index 000000000..d40faf085 --- /dev/null +++ b/tests/nodes/conv_2D_with_autopad_same/input_1.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_2D_with_autopad_same/output_0.cairo b/tests/nodes/conv_2D_with_autopad_same/output_0.cairo new file mode 100644 index 000000000..a5e7c8a8b --- /dev/null +++ b/tests/nodes/conv_2D_with_autopad_same/output_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 4128768, sign: false }); + data.append(FP16x16 { mag: 7077888, sign: false }); + data.append(FP16x16 { mag: 5308416, sign: false }); + data.append(FP16x16 { mag: 4718592, sign: false }); + data.append(FP16x16 { mag: 7667712, sign: false }); + data.append(FP16x16 { mag: 5505024, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_2D_with_padding.cairo b/tests/nodes/conv_2D_with_padding.cairo new file mode 100644 index 000000000..a48e2dae1 --- /dev/null +++ b/tests/nodes/conv_2D_with_padding.cairo @@ -0,0 +1,32 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::numbers::FixedTrait; +use orion::operators::nn::NNTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_conv_2D_with_padding() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::conv( + @input_0, + @input_1, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/conv_2D_with_padding/input_0.cairo b/tests/nodes/conv_2D_with_padding/input_0.cairo new file mode 100644 index 000000000..5b8aee80b --- /dev/null +++ b/tests/nodes/conv_2D_with_padding/input_0.cairo @@ -0,0 +1,40 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(5); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_2D_with_padding/input_1.cairo b/tests/nodes/conv_2D_with_padding/input_1.cairo new file mode 100644 index 000000000..d40faf085 --- /dev/null +++ b/tests/nodes/conv_2D_with_padding/input_1.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_2D_with_padding/output_0.cairo b/tests/nodes/conv_2D_with_padding/output_0.cairo new file mode 100644 index 000000000..83a3190f8 --- /dev/null +++ b/tests/nodes/conv_2D_with_padding/output_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 3538944, sign: false }); + data.append(FP16x16 { mag: 4128768, sign: false }); + data.append(FP16x16 { mag: 4718592, sign: false }); + data.append(FP16x16 { mag: 6488064, sign: false }); + data.append(FP16x16 { mag: 7077888, sign: false }); + data.append(FP16x16 { mag: 7667712, sign: false }); + data.append(FP16x16 { mag: 9437184, sign: false }); + data.append(FP16x16 { mag: 10027008, sign: false }); + data.append(FP16x16 { mag: 10616832, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_2D_with_strides_asymmetric_padding.cairo b/tests/nodes/conv_2D_with_strides_asymmetric_padding.cairo new file mode 100644 index 000000000..06781ef93 --- /dev/null +++ b/tests/nodes/conv_2D_with_strides_asymmetric_padding.cairo @@ -0,0 +1,32 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::numbers::FixedTrait; +use orion::operators::nn::NNTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_conv_2D_with_strides_asymmetric_padding() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::conv( + @input_0, + @input_1, + Option::None, + Option::None, + Option::None, + Option::None, + Option::Some(array![3, 3].span()), + Option::Some(array![1, 0, 1, 0].span()), + Option::Some(array![2, 2].span()) + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/conv_2D_with_strides_asymmetric_padding/input_0.cairo b/tests/nodes/conv_2D_with_strides_asymmetric_padding/input_0.cairo new file mode 100644 index 000000000..3b7ad252a --- /dev/null +++ b/tests/nodes/conv_2D_with_strides_asymmetric_padding/input_0.cairo @@ -0,0 +1,50 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(7); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1703936, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 1835008, sign: false }); + data.append(FP16x16 { mag: 1900544, sign: false }); + data.append(FP16x16 { mag: 1966080, sign: false }); + data.append(FP16x16 { mag: 2031616, sign: false }); + data.append(FP16x16 { mag: 2097152, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 2228224, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_2D_with_strides_asymmetric_padding/input_1.cairo b/tests/nodes/conv_2D_with_strides_asymmetric_padding/input_1.cairo new file mode 100644 index 000000000..d40faf085 --- /dev/null +++ b/tests/nodes/conv_2D_with_strides_asymmetric_padding/input_1.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_2D_with_strides_asymmetric_padding/output_0.cairo b/tests/nodes/conv_2D_with_strides_asymmetric_padding/output_0.cairo new file mode 100644 index 000000000..05e0403dc --- /dev/null +++ b/tests/nodes/conv_2D_with_strides_asymmetric_padding/output_0.cairo @@ -0,0 +1,23 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(4); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 6488064, sign: false }); + data.append(FP16x16 { mag: 7667712, sign: false }); + data.append(FP16x16 { mag: 12386304, sign: false }); + data.append(FP16x16 { mag: 13565952, sign: false }); + data.append(FP16x16 { mag: 11206656, sign: false }); + data.append(FP16x16 { mag: 11993088, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_2D_with_strides_with_padding.cairo b/tests/nodes/conv_2D_with_strides_with_padding.cairo new file mode 100644 index 000000000..1c9fcdca5 --- /dev/null +++ b/tests/nodes/conv_2D_with_strides_with_padding.cairo @@ -0,0 +1,32 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::numbers::FixedTrait; +use orion::operators::nn::NNTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_conv_2D_with_strides_with_padding() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::conv( + @input_0, + @input_1, + Option::None, + Option::None, + Option::None, + Option::None, + Option::Some(array![3, 3].span()), + Option::Some(array![1, 1, 1, 1].span()), + Option::Some(array![2, 2].span()) + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/conv_2D_with_strides_with_padding/input_0.cairo b/tests/nodes/conv_2D_with_strides_with_padding/input_0.cairo new file mode 100644 index 000000000..3b7ad252a --- /dev/null +++ b/tests/nodes/conv_2D_with_strides_with_padding/input_0.cairo @@ -0,0 +1,50 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(7); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1703936, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 1835008, sign: false }); + data.append(FP16x16 { mag: 1900544, sign: false }); + data.append(FP16x16 { mag: 1966080, sign: false }); + data.append(FP16x16 { mag: 2031616, sign: false }); + data.append(FP16x16 { mag: 2097152, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 2228224, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_2D_with_strides_with_padding/input_1.cairo b/tests/nodes/conv_2D_with_strides_with_padding/input_1.cairo new file mode 100644 index 000000000..d40faf085 --- /dev/null +++ b/tests/nodes/conv_2D_with_strides_with_padding/input_1.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_2D_with_strides_with_padding/output_0.cairo b/tests/nodes/conv_2D_with_strides_with_padding/output_0.cairo new file mode 100644 index 000000000..8a57d253e --- /dev/null +++ b/tests/nodes/conv_2D_with_strides_with_padding/output_0.cairo @@ -0,0 +1,27 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(4); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 4128768, sign: false }); + data.append(FP16x16 { mag: 7077888, sign: false }); + data.append(FP16x16 { mag: 5308416, sign: false }); + data.append(FP16x16 { mag: 8060928, sign: false }); + data.append(FP16x16 { mag: 12976128, sign: false }); + data.append(FP16x16 { mag: 9240576, sign: false }); + data.append(FP16x16 { mag: 7340032, sign: false }); + data.append(FP16x16 { mag: 11599872, sign: false }); + data.append(FP16x16 { mag: 8126464, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_3D_no_padding.cairo b/tests/nodes/conv_3D_no_padding.cairo new file mode 100644 index 000000000..399af5e4c --- /dev/null +++ b/tests/nodes/conv_3D_no_padding.cairo @@ -0,0 +1,32 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::numbers::FixedTrait; +use orion::operators::nn::NNTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_conv_3D_no_padding() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::conv( + @input_0, + @input_1, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/conv_3D_no_padding/input_0.cairo b/tests/nodes/conv_3D_no_padding/input_0.cairo new file mode 100644 index 000000000..7bdd026fd --- /dev/null +++ b/tests/nodes/conv_3D_no_padding/input_0.cairo @@ -0,0 +1,141 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(5); + shape.append(5); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1703936, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 1835008, sign: false }); + data.append(FP16x16 { mag: 1900544, sign: false }); + data.append(FP16x16 { mag: 1966080, sign: false }); + data.append(FP16x16 { mag: 2031616, sign: false }); + data.append(FP16x16 { mag: 2097152, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 2228224, sign: false }); + data.append(FP16x16 { mag: 2293760, sign: false }); + data.append(FP16x16 { mag: 2359296, sign: false }); + data.append(FP16x16 { mag: 2424832, sign: false }); + data.append(FP16x16 { mag: 2490368, sign: false }); + data.append(FP16x16 { mag: 2555904, sign: false }); + data.append(FP16x16 { mag: 2621440, sign: false }); + data.append(FP16x16 { mag: 2686976, sign: false }); + data.append(FP16x16 { mag: 2752512, sign: false }); + data.append(FP16x16 { mag: 2818048, sign: false }); + data.append(FP16x16 { mag: 2883584, sign: false }); + data.append(FP16x16 { mag: 2949120, sign: false }); + data.append(FP16x16 { mag: 3014656, sign: false }); + data.append(FP16x16 { mag: 3080192, sign: false }); + data.append(FP16x16 { mag: 3145728, sign: false }); + data.append(FP16x16 { mag: 3211264, sign: false }); + data.append(FP16x16 { mag: 3276800, sign: false }); + data.append(FP16x16 { mag: 3342336, sign: false }); + data.append(FP16x16 { mag: 3407872, sign: false }); + data.append(FP16x16 { mag: 3473408, sign: false }); + data.append(FP16x16 { mag: 3538944, sign: false }); + data.append(FP16x16 { mag: 3604480, sign: false }); + data.append(FP16x16 { mag: 3670016, sign: false }); + data.append(FP16x16 { mag: 3735552, sign: false }); + data.append(FP16x16 { mag: 3801088, sign: false }); + data.append(FP16x16 { mag: 3866624, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 3997696, sign: false }); + data.append(FP16x16 { mag: 4063232, sign: false }); + data.append(FP16x16 { mag: 4128768, sign: false }); + data.append(FP16x16 { mag: 4194304, sign: false }); + data.append(FP16x16 { mag: 4259840, sign: false }); + data.append(FP16x16 { mag: 4325376, sign: false }); + data.append(FP16x16 { mag: 4390912, sign: false }); + data.append(FP16x16 { mag: 4456448, sign: false }); + data.append(FP16x16 { mag: 4521984, sign: false }); + data.append(FP16x16 { mag: 4587520, sign: false }); + data.append(FP16x16 { mag: 4653056, sign: false }); + data.append(FP16x16 { mag: 4718592, sign: false }); + data.append(FP16x16 { mag: 4784128, sign: false }); + data.append(FP16x16 { mag: 4849664, sign: false }); + data.append(FP16x16 { mag: 4915200, sign: false }); + data.append(FP16x16 { mag: 4980736, sign: false }); + data.append(FP16x16 { mag: 5046272, sign: false }); + data.append(FP16x16 { mag: 5111808, sign: false }); + data.append(FP16x16 { mag: 5177344, sign: false }); + data.append(FP16x16 { mag: 5242880, sign: false }); + data.append(FP16x16 { mag: 5308416, sign: false }); + data.append(FP16x16 { mag: 5373952, sign: false }); + data.append(FP16x16 { mag: 5439488, sign: false }); + data.append(FP16x16 { mag: 5505024, sign: false }); + data.append(FP16x16 { mag: 5570560, sign: false }); + data.append(FP16x16 { mag: 5636096, sign: false }); + data.append(FP16x16 { mag: 5701632, sign: false }); + data.append(FP16x16 { mag: 5767168, sign: false }); + data.append(FP16x16 { mag: 5832704, sign: false }); + data.append(FP16x16 { mag: 5898240, sign: false }); + data.append(FP16x16 { mag: 5963776, sign: false }); + data.append(FP16x16 { mag: 6029312, sign: false }); + data.append(FP16x16 { mag: 6094848, sign: false }); + data.append(FP16x16 { mag: 6160384, sign: false }); + data.append(FP16x16 { mag: 6225920, sign: false }); + data.append(FP16x16 { mag: 6291456, sign: false }); + data.append(FP16x16 { mag: 6356992, sign: false }); + data.append(FP16x16 { mag: 6422528, sign: false }); + data.append(FP16x16 { mag: 6488064, sign: false }); + data.append(FP16x16 { mag: 6553600, sign: false }); + data.append(FP16x16 { mag: 6619136, sign: false }); + data.append(FP16x16 { mag: 6684672, sign: false }); + data.append(FP16x16 { mag: 6750208, sign: false }); + data.append(FP16x16 { mag: 6815744, sign: false }); + data.append(FP16x16 { mag: 6881280, sign: false }); + data.append(FP16x16 { mag: 6946816, sign: false }); + data.append(FP16x16 { mag: 7012352, sign: false }); + data.append(FP16x16 { mag: 7077888, sign: false }); + data.append(FP16x16 { mag: 7143424, sign: false }); + data.append(FP16x16 { mag: 7208960, sign: false }); + data.append(FP16x16 { mag: 7274496, sign: false }); + data.append(FP16x16 { mag: 7340032, sign: false }); + data.append(FP16x16 { mag: 7405568, sign: false }); + data.append(FP16x16 { mag: 7471104, sign: false }); + data.append(FP16x16 { mag: 7536640, sign: false }); + data.append(FP16x16 { mag: 7602176, sign: false }); + data.append(FP16x16 { mag: 7667712, sign: false }); + data.append(FP16x16 { mag: 7733248, sign: false }); + data.append(FP16x16 { mag: 7798784, sign: false }); + data.append(FP16x16 { mag: 7864320, sign: false }); + data.append(FP16x16 { mag: 7929856, sign: false }); + data.append(FP16x16 { mag: 7995392, sign: false }); + data.append(FP16x16 { mag: 8060928, sign: false }); + data.append(FP16x16 { mag: 8126464, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_3D_no_padding/input_1.cairo b/tests/nodes/conv_3D_no_padding/input_1.cairo new file mode 100644 index 000000000..2424a6daa --- /dev/null +++ b/tests/nodes/conv_3D_no_padding/input_1.cairo @@ -0,0 +1,43 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_3D_no_padding/output_0.cairo b/tests/nodes/conv_3D_no_padding/output_0.cairo new file mode 100644 index 000000000..bcc148792 --- /dev/null +++ b/tests/nodes/conv_3D_no_padding/output_0.cairo @@ -0,0 +1,43 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 54853632, sign: false }); + data.append(FP16x16 { mag: 56623104, sign: false }); + data.append(FP16x16 { mag: 58392576, sign: false }); + data.append(FP16x16 { mag: 63700992, sign: false }); + data.append(FP16x16 { mag: 65470464, sign: false }); + data.append(FP16x16 { mag: 67239936, sign: false }); + data.append(FP16x16 { mag: 72548352, sign: false }); + data.append(FP16x16 { mag: 74317824, sign: false }); + data.append(FP16x16 { mag: 76087296, sign: false }); + data.append(FP16x16 { mag: 99090432, sign: false }); + data.append(FP16x16 { mag: 100859904, sign: false }); + data.append(FP16x16 { mag: 102629376, sign: false }); + data.append(FP16x16 { mag: 107937792, sign: false }); + data.append(FP16x16 { mag: 109707264, sign: false }); + data.append(FP16x16 { mag: 111476736, sign: false }); + data.append(FP16x16 { mag: 116785152, sign: false }); + data.append(FP16x16 { mag: 118554624, sign: false }); + data.append(FP16x16 { mag: 120324096, sign: false }); + data.append(FP16x16 { mag: 143327232, sign: false }); + data.append(FP16x16 { mag: 145096704, sign: false }); + data.append(FP16x16 { mag: 146866176, sign: false }); + data.append(FP16x16 { mag: 152174592, sign: false }); + data.append(FP16x16 { mag: 153944064, sign: false }); + data.append(FP16x16 { mag: 155713536, sign: false }); + data.append(FP16x16 { mag: 161021952, sign: false }); + data.append(FP16x16 { mag: 162791424, sign: false }); + data.append(FP16x16 { mag: 164560896, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_3D_with_padding.cairo b/tests/nodes/conv_3D_with_padding.cairo new file mode 100644 index 000000000..2c023284f --- /dev/null +++ b/tests/nodes/conv_3D_with_padding.cairo @@ -0,0 +1,32 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::numbers::FixedTrait; +use orion::operators::nn::NNTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_conv_3D_with_padding() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::conv( + @input_0, + @input_1, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::Some(array![1, 1, 1, 1, 1, 1].span()), + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/conv_3D_with_padding/input_0.cairo b/tests/nodes/conv_3D_with_padding/input_0.cairo new file mode 100644 index 000000000..7bdd026fd --- /dev/null +++ b/tests/nodes/conv_3D_with_padding/input_0.cairo @@ -0,0 +1,141 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(5); + shape.append(5); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1703936, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 1835008, sign: false }); + data.append(FP16x16 { mag: 1900544, sign: false }); + data.append(FP16x16 { mag: 1966080, sign: false }); + data.append(FP16x16 { mag: 2031616, sign: false }); + data.append(FP16x16 { mag: 2097152, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 2228224, sign: false }); + data.append(FP16x16 { mag: 2293760, sign: false }); + data.append(FP16x16 { mag: 2359296, sign: false }); + data.append(FP16x16 { mag: 2424832, sign: false }); + data.append(FP16x16 { mag: 2490368, sign: false }); + data.append(FP16x16 { mag: 2555904, sign: false }); + data.append(FP16x16 { mag: 2621440, sign: false }); + data.append(FP16x16 { mag: 2686976, sign: false }); + data.append(FP16x16 { mag: 2752512, sign: false }); + data.append(FP16x16 { mag: 2818048, sign: false }); + data.append(FP16x16 { mag: 2883584, sign: false }); + data.append(FP16x16 { mag: 2949120, sign: false }); + data.append(FP16x16 { mag: 3014656, sign: false }); + data.append(FP16x16 { mag: 3080192, sign: false }); + data.append(FP16x16 { mag: 3145728, sign: false }); + data.append(FP16x16 { mag: 3211264, sign: false }); + data.append(FP16x16 { mag: 3276800, sign: false }); + data.append(FP16x16 { mag: 3342336, sign: false }); + data.append(FP16x16 { mag: 3407872, sign: false }); + data.append(FP16x16 { mag: 3473408, sign: false }); + data.append(FP16x16 { mag: 3538944, sign: false }); + data.append(FP16x16 { mag: 3604480, sign: false }); + data.append(FP16x16 { mag: 3670016, sign: false }); + data.append(FP16x16 { mag: 3735552, sign: false }); + data.append(FP16x16 { mag: 3801088, sign: false }); + data.append(FP16x16 { mag: 3866624, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 3997696, sign: false }); + data.append(FP16x16 { mag: 4063232, sign: false }); + data.append(FP16x16 { mag: 4128768, sign: false }); + data.append(FP16x16 { mag: 4194304, sign: false }); + data.append(FP16x16 { mag: 4259840, sign: false }); + data.append(FP16x16 { mag: 4325376, sign: false }); + data.append(FP16x16 { mag: 4390912, sign: false }); + data.append(FP16x16 { mag: 4456448, sign: false }); + data.append(FP16x16 { mag: 4521984, sign: false }); + data.append(FP16x16 { mag: 4587520, sign: false }); + data.append(FP16x16 { mag: 4653056, sign: false }); + data.append(FP16x16 { mag: 4718592, sign: false }); + data.append(FP16x16 { mag: 4784128, sign: false }); + data.append(FP16x16 { mag: 4849664, sign: false }); + data.append(FP16x16 { mag: 4915200, sign: false }); + data.append(FP16x16 { mag: 4980736, sign: false }); + data.append(FP16x16 { mag: 5046272, sign: false }); + data.append(FP16x16 { mag: 5111808, sign: false }); + data.append(FP16x16 { mag: 5177344, sign: false }); + data.append(FP16x16 { mag: 5242880, sign: false }); + data.append(FP16x16 { mag: 5308416, sign: false }); + data.append(FP16x16 { mag: 5373952, sign: false }); + data.append(FP16x16 { mag: 5439488, sign: false }); + data.append(FP16x16 { mag: 5505024, sign: false }); + data.append(FP16x16 { mag: 5570560, sign: false }); + data.append(FP16x16 { mag: 5636096, sign: false }); + data.append(FP16x16 { mag: 5701632, sign: false }); + data.append(FP16x16 { mag: 5767168, sign: false }); + data.append(FP16x16 { mag: 5832704, sign: false }); + data.append(FP16x16 { mag: 5898240, sign: false }); + data.append(FP16x16 { mag: 5963776, sign: false }); + data.append(FP16x16 { mag: 6029312, sign: false }); + data.append(FP16x16 { mag: 6094848, sign: false }); + data.append(FP16x16 { mag: 6160384, sign: false }); + data.append(FP16x16 { mag: 6225920, sign: false }); + data.append(FP16x16 { mag: 6291456, sign: false }); + data.append(FP16x16 { mag: 6356992, sign: false }); + data.append(FP16x16 { mag: 6422528, sign: false }); + data.append(FP16x16 { mag: 6488064, sign: false }); + data.append(FP16x16 { mag: 6553600, sign: false }); + data.append(FP16x16 { mag: 6619136, sign: false }); + data.append(FP16x16 { mag: 6684672, sign: false }); + data.append(FP16x16 { mag: 6750208, sign: false }); + data.append(FP16x16 { mag: 6815744, sign: false }); + data.append(FP16x16 { mag: 6881280, sign: false }); + data.append(FP16x16 { mag: 6946816, sign: false }); + data.append(FP16x16 { mag: 7012352, sign: false }); + data.append(FP16x16 { mag: 7077888, sign: false }); + data.append(FP16x16 { mag: 7143424, sign: false }); + data.append(FP16x16 { mag: 7208960, sign: false }); + data.append(FP16x16 { mag: 7274496, sign: false }); + data.append(FP16x16 { mag: 7340032, sign: false }); + data.append(FP16x16 { mag: 7405568, sign: false }); + data.append(FP16x16 { mag: 7471104, sign: false }); + data.append(FP16x16 { mag: 7536640, sign: false }); + data.append(FP16x16 { mag: 7602176, sign: false }); + data.append(FP16x16 { mag: 7667712, sign: false }); + data.append(FP16x16 { mag: 7733248, sign: false }); + data.append(FP16x16 { mag: 7798784, sign: false }); + data.append(FP16x16 { mag: 7864320, sign: false }); + data.append(FP16x16 { mag: 7929856, sign: false }); + data.append(FP16x16 { mag: 7995392, sign: false }); + data.append(FP16x16 { mag: 8060928, sign: false }); + data.append(FP16x16 { mag: 8126464, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_3D_with_padding/input_1.cairo b/tests/nodes/conv_3D_with_padding/input_1.cairo new file mode 100644 index 000000000..2424a6daa --- /dev/null +++ b/tests/nodes/conv_3D_with_padding/input_1.cairo @@ -0,0 +1,43 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_3D_with_padding/output_0.cairo b/tests/nodes/conv_3D_with_padding/output_0.cairo new file mode 100644 index 000000000..f824053b7 --- /dev/null +++ b/tests/nodes/conv_3D_with_padding/output_0.cairo @@ -0,0 +1,141 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(5); + shape.append(5); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 8126464, sign: false }); + data.append(FP16x16 { mag: 12582912, sign: false }); + data.append(FP16x16 { mag: 13369344, sign: false }); + data.append(FP16x16 { mag: 14155776, sign: false }); + data.append(FP16x16 { mag: 9699328, sign: false }); + data.append(FP16x16 { mag: 14155776, sign: false }); + data.append(FP16x16 { mag: 21823488, sign: false }); + data.append(FP16x16 { mag: 23003136, sign: false }); + data.append(FP16x16 { mag: 24182784, sign: false }); + data.append(FP16x16 { mag: 16515072, sign: false }); + data.append(FP16x16 { mag: 18087936, sign: false }); + data.append(FP16x16 { mag: 27721728, sign: false }); + data.append(FP16x16 { mag: 28901376, sign: false }); + data.append(FP16x16 { mag: 30081024, sign: false }); + data.append(FP16x16 { mag: 20447232, sign: false }); + data.append(FP16x16 { mag: 22020096, sign: false }); + data.append(FP16x16 { mag: 33619968, sign: false }); + data.append(FP16x16 { mag: 34799616, sign: false }); + data.append(FP16x16 { mag: 35979264, sign: false }); + data.append(FP16x16 { mag: 24379392, sign: false }); + data.append(FP16x16 { mag: 15990784, sign: false }); + data.append(FP16x16 { mag: 24379392, sign: false }); + data.append(FP16x16 { mag: 25165824, sign: false }); + data.append(FP16x16 { mag: 25952256, sign: false }); + data.append(FP16x16 { mag: 17563648, sign: false }); + data.append(FP16x16 { mag: 22020096, sign: false }); + data.append(FP16x16 { mag: 33619968, sign: false }); + data.append(FP16x16 { mag: 34799616, sign: false }); + data.append(FP16x16 { mag: 35979264, sign: false }); + data.append(FP16x16 { mag: 24379392, sign: false }); + data.append(FP16x16 { mag: 35979264, sign: false }); + data.append(FP16x16 { mag: 54853632, sign: false }); + data.append(FP16x16 { mag: 56623104, sign: false }); + data.append(FP16x16 { mag: 58392576, sign: false }); + data.append(FP16x16 { mag: 39518208, sign: false }); + data.append(FP16x16 { mag: 41877504, sign: false }); + data.append(FP16x16 { mag: 63700992, sign: false }); + data.append(FP16x16 { mag: 65470464, sign: false }); + data.append(FP16x16 { mag: 67239936, sign: false }); + data.append(FP16x16 { mag: 45416448, sign: false }); + data.append(FP16x16 { mag: 47775744, sign: false }); + data.append(FP16x16 { mag: 72548352, sign: false }); + data.append(FP16x16 { mag: 74317824, sign: false }); + data.append(FP16x16 { mag: 76087296, sign: false }); + data.append(FP16x16 { mag: 51314688, sign: false }); + data.append(FP16x16 { mag: 33816576, sign: false }); + data.append(FP16x16 { mag: 51314688, sign: false }); + data.append(FP16x16 { mag: 52494336, sign: false }); + data.append(FP16x16 { mag: 53673984, sign: false }); + data.append(FP16x16 { mag: 36175872, sign: false }); + data.append(FP16x16 { mag: 41680896, sign: false }); + data.append(FP16x16 { mag: 63111168, sign: false }); + data.append(FP16x16 { mag: 64290816, sign: false }); + data.append(FP16x16 { mag: 65470464, sign: false }); + data.append(FP16x16 { mag: 44040192, sign: false }); + data.append(FP16x16 { mag: 65470464, sign: false }); + data.append(FP16x16 { mag: 99090432, sign: false }); + data.append(FP16x16 { mag: 100859904, sign: false }); + data.append(FP16x16 { mag: 102629376, sign: false }); + data.append(FP16x16 { mag: 69009408, sign: false }); + data.append(FP16x16 { mag: 71368704, sign: false }); + data.append(FP16x16 { mag: 107937792, sign: false }); + data.append(FP16x16 { mag: 109707264, sign: false }); + data.append(FP16x16 { mag: 111476736, sign: false }); + data.append(FP16x16 { mag: 74907648, sign: false }); + data.append(FP16x16 { mag: 77266944, sign: false }); + data.append(FP16x16 { mag: 116785152, sign: false }); + data.append(FP16x16 { mag: 118554624, sign: false }); + data.append(FP16x16 { mag: 120324096, sign: false }); + data.append(FP16x16 { mag: 80805888, sign: false }); + data.append(FP16x16 { mag: 53477376, sign: false }); + data.append(FP16x16 { mag: 80805888, sign: false }); + data.append(FP16x16 { mag: 81985536, sign: false }); + data.append(FP16x16 { mag: 83165184, sign: false }); + data.append(FP16x16 { mag: 55836672, sign: false }); + data.append(FP16x16 { mag: 61341696, sign: false }); + data.append(FP16x16 { mag: 92602368, sign: false }); + data.append(FP16x16 { mag: 93782016, sign: false }); + data.append(FP16x16 { mag: 94961664, sign: false }); + data.append(FP16x16 { mag: 63700992, sign: false }); + data.append(FP16x16 { mag: 94961664, sign: false }); + data.append(FP16x16 { mag: 143327232, sign: false }); + data.append(FP16x16 { mag: 145096704, sign: false }); + data.append(FP16x16 { mag: 146866176, sign: false }); + data.append(FP16x16 { mag: 98500608, sign: false }); + data.append(FP16x16 { mag: 100859904, sign: false }); + data.append(FP16x16 { mag: 152174592, sign: false }); + data.append(FP16x16 { mag: 153944064, sign: false }); + data.append(FP16x16 { mag: 155713536, sign: false }); + data.append(FP16x16 { mag: 104398848, sign: false }); + data.append(FP16x16 { mag: 106758144, sign: false }); + data.append(FP16x16 { mag: 161021952, sign: false }); + data.append(FP16x16 { mag: 162791424, sign: false }); + data.append(FP16x16 { mag: 164560896, sign: false }); + data.append(FP16x16 { mag: 110297088, sign: false }); + data.append(FP16x16 { mag: 73138176, sign: false }); + data.append(FP16x16 { mag: 110297088, sign: false }); + data.append(FP16x16 { mag: 111476736, sign: false }); + data.append(FP16x16 { mag: 112656384, sign: false }); + data.append(FP16x16 { mag: 75497472, sign: false }); + data.append(FP16x16 { mag: 47448064, sign: false }); + data.append(FP16x16 { mag: 71565312, sign: false }); + data.append(FP16x16 { mag: 72351744, sign: false }); + data.append(FP16x16 { mag: 73138176, sign: false }); + data.append(FP16x16 { mag: 49020928, sign: false }); + data.append(FP16x16 { mag: 73138176, sign: false }); + data.append(FP16x16 { mag: 110297088, sign: false }); + data.append(FP16x16 { mag: 111476736, sign: false }); + data.append(FP16x16 { mag: 112656384, sign: false }); + data.append(FP16x16 { mag: 75497472, sign: false }); + data.append(FP16x16 { mag: 77070336, sign: false }); + data.append(FP16x16 { mag: 116195328, sign: false }); + data.append(FP16x16 { mag: 117374976, sign: false }); + data.append(FP16x16 { mag: 118554624, sign: false }); + data.append(FP16x16 { mag: 79429632, sign: false }); + data.append(FP16x16 { mag: 81002496, sign: false }); + data.append(FP16x16 { mag: 122093568, sign: false }); + data.append(FP16x16 { mag: 123273216, sign: false }); + data.append(FP16x16 { mag: 124452864, sign: false }); + data.append(FP16x16 { mag: 83361792, sign: false }); + data.append(FP16x16 { mag: 55312384, sign: false }); + data.append(FP16x16 { mag: 83361792, sign: false }); + data.append(FP16x16 { mag: 84148224, sign: false }); + data.append(FP16x16 { mag: 84934656, sign: false }); + data.append(FP16x16 { mag: 56885248, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_4D_no_padding.cairo b/tests/nodes/conv_4D_no_padding.cairo new file mode 100644 index 000000000..5f7c2a3bd --- /dev/null +++ b/tests/nodes/conv_4D_no_padding.cairo @@ -0,0 +1,32 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::numbers::FixedTrait; +use orion::operators::nn::NNTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_conv_4D_no_padding() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::conv( + @input_0, + @input_1, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/conv_4D_no_padding/input_0.cairo b/tests/nodes/conv_4D_no_padding/input_0.cairo new file mode 100644 index 000000000..9aa196216 --- /dev/null +++ b/tests/nodes/conv_4D_no_padding/input_0.cairo @@ -0,0 +1,98 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1703936, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 1835008, sign: false }); + data.append(FP16x16 { mag: 1900544, sign: false }); + data.append(FP16x16 { mag: 1966080, sign: false }); + data.append(FP16x16 { mag: 2031616, sign: false }); + data.append(FP16x16 { mag: 2097152, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 2228224, sign: false }); + data.append(FP16x16 { mag: 2293760, sign: false }); + data.append(FP16x16 { mag: 2359296, sign: false }); + data.append(FP16x16 { mag: 2424832, sign: false }); + data.append(FP16x16 { mag: 2490368, sign: false }); + data.append(FP16x16 { mag: 2555904, sign: false }); + data.append(FP16x16 { mag: 2621440, sign: false }); + data.append(FP16x16 { mag: 2686976, sign: false }); + data.append(FP16x16 { mag: 2752512, sign: false }); + data.append(FP16x16 { mag: 2818048, sign: false }); + data.append(FP16x16 { mag: 2883584, sign: false }); + data.append(FP16x16 { mag: 2949120, sign: false }); + data.append(FP16x16 { mag: 3014656, sign: false }); + data.append(FP16x16 { mag: 3080192, sign: false }); + data.append(FP16x16 { mag: 3145728, sign: false }); + data.append(FP16x16 { mag: 3211264, sign: false }); + data.append(FP16x16 { mag: 3276800, sign: false }); + data.append(FP16x16 { mag: 3342336, sign: false }); + data.append(FP16x16 { mag: 3407872, sign: false }); + data.append(FP16x16 { mag: 3473408, sign: false }); + data.append(FP16x16 { mag: 3538944, sign: false }); + data.append(FP16x16 { mag: 3604480, sign: false }); + data.append(FP16x16 { mag: 3670016, sign: false }); + data.append(FP16x16 { mag: 3735552, sign: false }); + data.append(FP16x16 { mag: 3801088, sign: false }); + data.append(FP16x16 { mag: 3866624, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 3997696, sign: false }); + data.append(FP16x16 { mag: 4063232, sign: false }); + data.append(FP16x16 { mag: 4128768, sign: false }); + data.append(FP16x16 { mag: 4194304, sign: false }); + data.append(FP16x16 { mag: 4259840, sign: false }); + data.append(FP16x16 { mag: 4325376, sign: false }); + data.append(FP16x16 { mag: 4390912, sign: false }); + data.append(FP16x16 { mag: 4456448, sign: false }); + data.append(FP16x16 { mag: 4521984, sign: false }); + data.append(FP16x16 { mag: 4587520, sign: false }); + data.append(FP16x16 { mag: 4653056, sign: false }); + data.append(FP16x16 { mag: 4718592, sign: false }); + data.append(FP16x16 { mag: 4784128, sign: false }); + data.append(FP16x16 { mag: 4849664, sign: false }); + data.append(FP16x16 { mag: 4915200, sign: false }); + data.append(FP16x16 { mag: 4980736, sign: false }); + data.append(FP16x16 { mag: 5046272, sign: false }); + data.append(FP16x16 { mag: 5111808, sign: false }); + data.append(FP16x16 { mag: 5177344, sign: false }); + data.append(FP16x16 { mag: 5242880, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_4D_no_padding/input_1.cairo b/tests/nodes/conv_4D_no_padding/input_1.cairo new file mode 100644 index 000000000..ca6d6c0a5 --- /dev/null +++ b/tests/nodes/conv_4D_no_padding/input_1.cairo @@ -0,0 +1,33 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_4D_no_padding/output_0.cairo b/tests/nodes/conv_4D_no_padding/output_0.cairo new file mode 100644 index 000000000..59cd9927a --- /dev/null +++ b/tests/nodes/conv_4D_no_padding/output_0.cairo @@ -0,0 +1,33 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 20971520, sign: false }); + data.append(FP16x16 { mag: 22020096, sign: false }); + data.append(FP16x16 { mag: 24117248, sign: false }); + data.append(FP16x16 { mag: 25165824, sign: false }); + data.append(FP16x16 { mag: 30408704, sign: false }); + data.append(FP16x16 { mag: 31457280, sign: false }); + data.append(FP16x16 { mag: 33554432, sign: false }); + data.append(FP16x16 { mag: 34603008, sign: false }); + data.append(FP16x16 { mag: 49283072, sign: false }); + data.append(FP16x16 { mag: 50331648, sign: false }); + data.append(FP16x16 { mag: 52428800, sign: false }); + data.append(FP16x16 { mag: 53477376, sign: false }); + data.append(FP16x16 { mag: 58720256, sign: false }); + data.append(FP16x16 { mag: 59768832, sign: false }); + data.append(FP16x16 { mag: 61865984, sign: false }); + data.append(FP16x16 { mag: 62914560, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_4D_with_padding.cairo b/tests/nodes/conv_4D_with_padding.cairo new file mode 100644 index 000000000..8639eee08 --- /dev/null +++ b/tests/nodes/conv_4D_with_padding.cairo @@ -0,0 +1,32 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::numbers::FixedTrait; +use orion::operators::nn::NNTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_conv_4D_with_padding() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::conv( + @input_0, + @input_1, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::Some(array![1, 1, 1, 1, 1, 1, 1, 1].span()), + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/conv_4D_with_padding/input_0.cairo b/tests/nodes/conv_4D_with_padding/input_0.cairo new file mode 100644 index 000000000..9aa196216 --- /dev/null +++ b/tests/nodes/conv_4D_with_padding/input_0.cairo @@ -0,0 +1,98 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1703936, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 1835008, sign: false }); + data.append(FP16x16 { mag: 1900544, sign: false }); + data.append(FP16x16 { mag: 1966080, sign: false }); + data.append(FP16x16 { mag: 2031616, sign: false }); + data.append(FP16x16 { mag: 2097152, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 2228224, sign: false }); + data.append(FP16x16 { mag: 2293760, sign: false }); + data.append(FP16x16 { mag: 2359296, sign: false }); + data.append(FP16x16 { mag: 2424832, sign: false }); + data.append(FP16x16 { mag: 2490368, sign: false }); + data.append(FP16x16 { mag: 2555904, sign: false }); + data.append(FP16x16 { mag: 2621440, sign: false }); + data.append(FP16x16 { mag: 2686976, sign: false }); + data.append(FP16x16 { mag: 2752512, sign: false }); + data.append(FP16x16 { mag: 2818048, sign: false }); + data.append(FP16x16 { mag: 2883584, sign: false }); + data.append(FP16x16 { mag: 2949120, sign: false }); + data.append(FP16x16 { mag: 3014656, sign: false }); + data.append(FP16x16 { mag: 3080192, sign: false }); + data.append(FP16x16 { mag: 3145728, sign: false }); + data.append(FP16x16 { mag: 3211264, sign: false }); + data.append(FP16x16 { mag: 3276800, sign: false }); + data.append(FP16x16 { mag: 3342336, sign: false }); + data.append(FP16x16 { mag: 3407872, sign: false }); + data.append(FP16x16 { mag: 3473408, sign: false }); + data.append(FP16x16 { mag: 3538944, sign: false }); + data.append(FP16x16 { mag: 3604480, sign: false }); + data.append(FP16x16 { mag: 3670016, sign: false }); + data.append(FP16x16 { mag: 3735552, sign: false }); + data.append(FP16x16 { mag: 3801088, sign: false }); + data.append(FP16x16 { mag: 3866624, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 3997696, sign: false }); + data.append(FP16x16 { mag: 4063232, sign: false }); + data.append(FP16x16 { mag: 4128768, sign: false }); + data.append(FP16x16 { mag: 4194304, sign: false }); + data.append(FP16x16 { mag: 4259840, sign: false }); + data.append(FP16x16 { mag: 4325376, sign: false }); + data.append(FP16x16 { mag: 4390912, sign: false }); + data.append(FP16x16 { mag: 4456448, sign: false }); + data.append(FP16x16 { mag: 4521984, sign: false }); + data.append(FP16x16 { mag: 4587520, sign: false }); + data.append(FP16x16 { mag: 4653056, sign: false }); + data.append(FP16x16 { mag: 4718592, sign: false }); + data.append(FP16x16 { mag: 4784128, sign: false }); + data.append(FP16x16 { mag: 4849664, sign: false }); + data.append(FP16x16 { mag: 4915200, sign: false }); + data.append(FP16x16 { mag: 4980736, sign: false }); + data.append(FP16x16 { mag: 5046272, sign: false }); + data.append(FP16x16 { mag: 5111808, sign: false }); + data.append(FP16x16 { mag: 5177344, sign: false }); + data.append(FP16x16 { mag: 5242880, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_4D_with_padding/input_1.cairo b/tests/nodes/conv_4D_with_padding/input_1.cairo new file mode 100644 index 000000000..ca6d6c0a5 --- /dev/null +++ b/tests/nodes/conv_4D_with_padding/input_1.cairo @@ -0,0 +1,33 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/conv_4D_with_padding/output_0.cairo b/tests/nodes/conv_4D_with_padding/output_0.cairo new file mode 100644 index 000000000..2e3c9d3c3 --- /dev/null +++ b/tests/nodes/conv_4D_with_padding/output_0.cairo @@ -0,0 +1,273 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(4); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 3407872, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 2097152, sign: false }); + data.append(FP16x16 { mag: 2359296, sign: false }); + data.append(FP16x16 { mag: 4980736, sign: false }); + data.append(FP16x16 { mag: 5505024, sign: false }); + data.append(FP16x16 { mag: 2883584, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 2883584, sign: false }); + data.append(FP16x16 { mag: 3145728, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 3670016, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 2031616, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 8126464, sign: false }); + data.append(FP16x16 { mag: 8650752, sign: false }); + data.append(FP16x16 { mag: 4456448, sign: false }); + data.append(FP16x16 { mag: 4718592, sign: false }); + data.append(FP16x16 { mag: 9699328, sign: false }); + data.append(FP16x16 { mag: 10223616, sign: false }); + data.append(FP16x16 { mag: 5242880, sign: false }); + data.append(FP16x16 { mag: 2555904, sign: false }); + data.append(FP16x16 { mag: 5242880, sign: false }); + data.append(FP16x16 { mag: 5505024, sign: false }); + data.append(FP16x16 { mag: 2818048, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 2424832, sign: false }); + data.append(FP16x16 { mag: 2555904, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 2555904, sign: false }); + data.append(FP16x16 { mag: 5242880, sign: false }); + data.append(FP16x16 { mag: 5505024, sign: false }); + data.append(FP16x16 { mag: 2818048, sign: false }); + data.append(FP16x16 { mag: 2949120, sign: false }); + data.append(FP16x16 { mag: 6029312, sign: false }); + data.append(FP16x16 { mag: 6291456, sign: false }); + data.append(FP16x16 { mag: 3211264, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 3211264, sign: false }); + data.append(FP16x16 { mag: 3342336, sign: false }); + data.append(FP16x16 { mag: 1703936, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 3670016, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 2031616, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 8126464, sign: false }); + data.append(FP16x16 { mag: 8650752, sign: false }); + data.append(FP16x16 { mag: 4456448, sign: false }); + data.append(FP16x16 { mag: 4718592, sign: false }); + data.append(FP16x16 { mag: 9699328, sign: false }); + data.append(FP16x16 { mag: 10223616, sign: false }); + data.append(FP16x16 { mag: 5242880, sign: false }); + data.append(FP16x16 { mag: 2555904, sign: false }); + data.append(FP16x16 { mag: 5242880, sign: false }); + data.append(FP16x16 { mag: 5505024, sign: false }); + data.append(FP16x16 { mag: 2818048, sign: false }); + data.append(FP16x16 { mag: 4718592, sign: false }); + data.append(FP16x16 { mag: 9699328, sign: false }); + data.append(FP16x16 { mag: 10223616, sign: false }); + data.append(FP16x16 { mag: 5242880, sign: false }); + data.append(FP16x16 { mag: 10223616, sign: false }); + data.append(FP16x16 { mag: 20971520, sign: false }); + data.append(FP16x16 { mag: 22020096, sign: false }); + data.append(FP16x16 { mag: 11272192, sign: false }); + data.append(FP16x16 { mag: 11796480, sign: false }); + data.append(FP16x16 { mag: 24117248, sign: false }); + data.append(FP16x16 { mag: 25165824, sign: false }); + data.append(FP16x16 { mag: 12845056, sign: false }); + data.append(FP16x16 { mag: 6291456, sign: false }); + data.append(FP16x16 { mag: 12845056, sign: false }); + data.append(FP16x16 { mag: 13369344, sign: false }); + data.append(FP16x16 { mag: 6815744, sign: false }); + data.append(FP16x16 { mag: 7077888, sign: false }); + data.append(FP16x16 { mag: 14417920, sign: false }); + data.append(FP16x16 { mag: 14942208, sign: false }); + data.append(FP16x16 { mag: 7602176, sign: false }); + data.append(FP16x16 { mag: 14942208, sign: false }); + data.append(FP16x16 { mag: 30408704, sign: false }); + data.append(FP16x16 { mag: 31457280, sign: false }); + data.append(FP16x16 { mag: 15990784, sign: false }); + data.append(FP16x16 { mag: 16515072, sign: false }); + data.append(FP16x16 { mag: 33554432, sign: false }); + data.append(FP16x16 { mag: 34603008, sign: false }); + data.append(FP16x16 { mag: 17563648, sign: false }); + data.append(FP16x16 { mag: 8650752, sign: false }); + data.append(FP16x16 { mag: 17563648, sign: false }); + data.append(FP16x16 { mag: 18087936, sign: false }); + data.append(FP16x16 { mag: 9175040, sign: false }); + data.append(FP16x16 { mag: 4128768, sign: false }); + data.append(FP16x16 { mag: 8388608, sign: false }); + data.append(FP16x16 { mag: 8650752, sign: false }); + data.append(FP16x16 { mag: 4390912, sign: false }); + data.append(FP16x16 { mag: 8650752, sign: false }); + data.append(FP16x16 { mag: 17563648, sign: false }); + data.append(FP16x16 { mag: 18087936, sign: false }); + data.append(FP16x16 { mag: 9175040, sign: false }); + data.append(FP16x16 { mag: 9437184, sign: false }); + data.append(FP16x16 { mag: 19136512, sign: false }); + data.append(FP16x16 { mag: 19660800, sign: false }); + data.append(FP16x16 { mag: 9961472, sign: false }); + data.append(FP16x16 { mag: 4915200, sign: false }); + data.append(FP16x16 { mag: 9961472, sign: false }); + data.append(FP16x16 { mag: 10223616, sign: false }); + data.append(FP16x16 { mag: 5177344, sign: false }); + data.append(FP16x16 { mag: 5308416, sign: false }); + data.append(FP16x16 { mag: 10747904, sign: false }); + data.append(FP16x16 { mag: 11010048, sign: false }); + data.append(FP16x16 { mag: 5570560, sign: false }); + data.append(FP16x16 { mag: 11010048, sign: false }); + data.append(FP16x16 { mag: 22282240, sign: false }); + data.append(FP16x16 { mag: 22806528, sign: false }); + data.append(FP16x16 { mag: 11534336, sign: false }); + data.append(FP16x16 { mag: 11796480, sign: false }); + data.append(FP16x16 { mag: 23855104, sign: false }); + data.append(FP16x16 { mag: 24379392, sign: false }); + data.append(FP16x16 { mag: 12320768, sign: false }); + data.append(FP16x16 { mag: 6094848, sign: false }); + data.append(FP16x16 { mag: 12320768, sign: false }); + data.append(FP16x16 { mag: 12582912, sign: false }); + data.append(FP16x16 { mag: 6356992, sign: false }); + data.append(FP16x16 { mag: 11796480, sign: false }); + data.append(FP16x16 { mag: 23855104, sign: false }); + data.append(FP16x16 { mag: 24379392, sign: false }); + data.append(FP16x16 { mag: 12320768, sign: false }); + data.append(FP16x16 { mag: 24379392, sign: false }); + data.append(FP16x16 { mag: 49283072, sign: false }); + data.append(FP16x16 { mag: 50331648, sign: false }); + data.append(FP16x16 { mag: 25427968, sign: false }); + data.append(FP16x16 { mag: 25952256, sign: false }); + data.append(FP16x16 { mag: 52428800, sign: false }); + data.append(FP16x16 { mag: 53477376, sign: false }); + data.append(FP16x16 { mag: 27000832, sign: false }); + data.append(FP16x16 { mag: 13369344, sign: false }); + data.append(FP16x16 { mag: 27000832, sign: false }); + data.append(FP16x16 { mag: 27525120, sign: false }); + data.append(FP16x16 { mag: 13893632, sign: false }); + data.append(FP16x16 { mag: 14155776, sign: false }); + data.append(FP16x16 { mag: 28573696, sign: false }); + data.append(FP16x16 { mag: 29097984, sign: false }); + data.append(FP16x16 { mag: 14680064, sign: false }); + data.append(FP16x16 { mag: 29097984, sign: false }); + data.append(FP16x16 { mag: 58720256, sign: false }); + data.append(FP16x16 { mag: 59768832, sign: false }); + data.append(FP16x16 { mag: 30146560, sign: false }); + data.append(FP16x16 { mag: 30670848, sign: false }); + data.append(FP16x16 { mag: 61865984, sign: false }); + data.append(FP16x16 { mag: 62914560, sign: false }); + data.append(FP16x16 { mag: 31719424, sign: false }); + data.append(FP16x16 { mag: 15728640, sign: false }); + data.append(FP16x16 { mag: 31719424, sign: false }); + data.append(FP16x16 { mag: 32243712, sign: false }); + data.append(FP16x16 { mag: 16252928, sign: false }); + data.append(FP16x16 { mag: 7667712, sign: false }); + data.append(FP16x16 { mag: 15466496, sign: false }); + data.append(FP16x16 { mag: 15728640, sign: false }); + data.append(FP16x16 { mag: 7929856, sign: false }); + data.append(FP16x16 { mag: 15728640, sign: false }); + data.append(FP16x16 { mag: 31719424, sign: false }); + data.append(FP16x16 { mag: 32243712, sign: false }); + data.append(FP16x16 { mag: 16252928, sign: false }); + data.append(FP16x16 { mag: 16515072, sign: false }); + data.append(FP16x16 { mag: 33292288, sign: false }); + data.append(FP16x16 { mag: 33816576, sign: false }); + data.append(FP16x16 { mag: 17039360, sign: false }); + data.append(FP16x16 { mag: 8454144, sign: false }); + data.append(FP16x16 { mag: 17039360, sign: false }); + data.append(FP16x16 { mag: 17301504, sign: false }); + data.append(FP16x16 { mag: 8716288, sign: false }); + data.append(FP16x16 { mag: 3538944, sign: false }); + data.append(FP16x16 { mag: 7143424, sign: false }); + data.append(FP16x16 { mag: 7274496, sign: false }); + data.append(FP16x16 { mag: 3670016, sign: false }); + data.append(FP16x16 { mag: 7274496, sign: false }); + data.append(FP16x16 { mag: 14680064, sign: false }); + data.append(FP16x16 { mag: 14942208, sign: false }); + data.append(FP16x16 { mag: 7536640, sign: false }); + data.append(FP16x16 { mag: 7667712, sign: false }); + data.append(FP16x16 { mag: 15466496, sign: false }); + data.append(FP16x16 { mag: 15728640, sign: false }); + data.append(FP16x16 { mag: 7929856, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 7929856, sign: false }); + data.append(FP16x16 { mag: 8060928, sign: false }); + data.append(FP16x16 { mag: 4063232, sign: false }); + data.append(FP16x16 { mag: 7667712, sign: false }); + data.append(FP16x16 { mag: 15466496, sign: false }); + data.append(FP16x16 { mag: 15728640, sign: false }); + data.append(FP16x16 { mag: 7929856, sign: false }); + data.append(FP16x16 { mag: 15728640, sign: false }); + data.append(FP16x16 { mag: 31719424, sign: false }); + data.append(FP16x16 { mag: 32243712, sign: false }); + data.append(FP16x16 { mag: 16252928, sign: false }); + data.append(FP16x16 { mag: 16515072, sign: false }); + data.append(FP16x16 { mag: 33292288, sign: false }); + data.append(FP16x16 { mag: 33816576, sign: false }); + data.append(FP16x16 { mag: 17039360, sign: false }); + data.append(FP16x16 { mag: 8454144, sign: false }); + data.append(FP16x16 { mag: 17039360, sign: false }); + data.append(FP16x16 { mag: 17301504, sign: false }); + data.append(FP16x16 { mag: 8716288, sign: false }); + data.append(FP16x16 { mag: 8847360, sign: false }); + data.append(FP16x16 { mag: 17825792, sign: false }); + data.append(FP16x16 { mag: 18087936, sign: false }); + data.append(FP16x16 { mag: 9109504, sign: false }); + data.append(FP16x16 { mag: 18087936, sign: false }); + data.append(FP16x16 { mag: 36438016, sign: false }); + data.append(FP16x16 { mag: 36962304, sign: false }); + data.append(FP16x16 { mag: 18612224, sign: false }); + data.append(FP16x16 { mag: 18874368, sign: false }); + data.append(FP16x16 { mag: 38010880, sign: false }); + data.append(FP16x16 { mag: 38535168, sign: false }); + data.append(FP16x16 { mag: 19398656, sign: false }); + data.append(FP16x16 { mag: 9633792, sign: false }); + data.append(FP16x16 { mag: 19398656, sign: false }); + data.append(FP16x16 { mag: 19660800, sign: false }); + data.append(FP16x16 { mag: 9895936, sign: false }); + data.append(FP16x16 { mag: 4718592, sign: false }); + data.append(FP16x16 { mag: 9502720, sign: false }); + data.append(FP16x16 { mag: 9633792, sign: false }); + data.append(FP16x16 { mag: 4849664, sign: false }); + data.append(FP16x16 { mag: 9633792, sign: false }); + data.append(FP16x16 { mag: 19398656, sign: false }); + data.append(FP16x16 { mag: 19660800, sign: false }); + data.append(FP16x16 { mag: 9895936, sign: false }); + data.append(FP16x16 { mag: 10027008, sign: false }); + data.append(FP16x16 { mag: 20185088, sign: false }); + data.append(FP16x16 { mag: 20447232, sign: false }); + data.append(FP16x16 { mag: 10289152, sign: false }); + data.append(FP16x16 { mag: 5111808, sign: false }); + data.append(FP16x16 { mag: 10289152, sign: false }); + data.append(FP16x16 { mag: 10420224, sign: false }); + data.append(FP16x16 { mag: 5242880, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/gather_fp16x16_3d_axis1.cairo b/tests/nodes/gather_fp16x16_3d_axis1.cairo index 8c4af9664..429d085d4 100644 --- a/tests/nodes/gather_fp16x16_3d_axis1.cairo +++ b/tests/nodes/gather_fp16x16_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_gather_fp16x16_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(1)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp16x16_3d_axis2.cairo b/tests/nodes/gather_fp16x16_3d_axis2.cairo index 0b4f77ed8..cfb8a61d2 100644 --- a/tests/nodes/gather_fp16x16_3d_axis2.cairo +++ b/tests/nodes/gather_fp16x16_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_gather_fp16x16_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(2)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp16x16_3d_default.cairo b/tests/nodes/gather_fp16x16_3d_default.cairo index 91c9ebdd4..ee49aac75 100644 --- a/tests/nodes/gather_fp16x16_3d_default.cairo +++ b/tests/nodes/gather_fp16x16_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_fp16x16_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(0)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp8x23_3d_axis1.cairo b/tests/nodes/gather_fp8x23_3d_axis1.cairo index 6a5d1a046..c9c6dcf7f 100644 --- a/tests/nodes/gather_fp8x23_3d_axis1.cairo +++ b/tests/nodes/gather_fp8x23_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_gather_fp8x23_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(1)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp8x23_3d_axis2.cairo b/tests/nodes/gather_fp8x23_3d_axis2.cairo index d5a913163..726411dd2 100644 --- a/tests/nodes/gather_fp8x23_3d_axis2.cairo +++ b/tests/nodes/gather_fp8x23_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_gather_fp8x23_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(2)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp8x23_3d_default.cairo b/tests/nodes/gather_fp8x23_3d_default.cairo index 7f9492f8d..e844827f9 100644 --- a/tests/nodes/gather_fp8x23_3d_default.cairo +++ b/tests/nodes/gather_fp8x23_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_fp8x23_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(0)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i32_3d_axis1.cairo b/tests/nodes/gather_i32_3d_axis1.cairo index 8b1777d8f..6dbb78c47 100644 --- a/tests/nodes/gather_i32_3d_axis1.cairo +++ b/tests/nodes/gather_i32_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_gather_i32_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(1)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i32_3d_axis2.cairo b/tests/nodes/gather_i32_3d_axis2.cairo index bdc557d7a..29bd217b3 100644 --- a/tests/nodes/gather_i32_3d_axis2.cairo +++ b/tests/nodes/gather_i32_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_gather_i32_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(2)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i32_3d_default.cairo b/tests/nodes/gather_i32_3d_default.cairo index 9288c3dab..4c0b9c9bd 100644 --- a/tests/nodes/gather_i32_3d_default.cairo +++ b/tests/nodes/gather_i32_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_i32_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(0)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i8_3d_axis1.cairo b/tests/nodes/gather_i8_3d_axis1.cairo index 10dd5ce6f..140608123 100644 --- a/tests/nodes/gather_i8_3d_axis1.cairo +++ b/tests/nodes/gather_i8_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_gather_i8_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(1)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i8_3d_axis2.cairo b/tests/nodes/gather_i8_3d_axis2.cairo index 35f50077a..992cee33e 100644 --- a/tests/nodes/gather_i8_3d_axis2.cairo +++ b/tests/nodes/gather_i8_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_gather_i8_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(2)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i8_3d_default.cairo b/tests/nodes/gather_i8_3d_default.cairo index 5bc437a7b..0f8e6dec2 100644 --- a/tests/nodes/gather_i8_3d_default.cairo +++ b/tests/nodes/gather_i8_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_i8_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(0)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp16x16_3d_batch_dims1.cairo b/tests/nodes/gather_nd_fp16x16_3d_batch_dims1.cairo index 86de6e9b9..037d2ad93 100644 --- a/tests/nodes/gather_nd_fp16x16_3d_batch_dims1.cairo +++ b/tests/nodes/gather_nd_fp16x16_3d_batch_dims1.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp16x16_3d_batch_dims1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp16x16_3d_batch_dims2.cairo b/tests/nodes/gather_nd_fp16x16_3d_batch_dims2.cairo index d2ac3b2ce..3661bb6c5 100644 --- a/tests/nodes/gather_nd_fp16x16_3d_batch_dims2.cairo +++ b/tests/nodes/gather_nd_fp16x16_3d_batch_dims2.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp16x16_3d_batch_dims2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp16x16_3d_default.cairo b/tests/nodes/gather_nd_fp16x16_3d_default.cairo index 157266adb..60f116c86 100644 --- a/tests/nodes/gather_nd_fp16x16_3d_default.cairo +++ b/tests/nodes/gather_nd_fp16x16_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp16x16_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp8x23_3d_batch_dims1.cairo b/tests/nodes/gather_nd_fp8x23_3d_batch_dims1.cairo index 6da924b6c..c523e0135 100644 --- a/tests/nodes/gather_nd_fp8x23_3d_batch_dims1.cairo +++ b/tests/nodes/gather_nd_fp8x23_3d_batch_dims1.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp8x23_3d_batch_dims1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp8x23_3d_batch_dims2.cairo b/tests/nodes/gather_nd_fp8x23_3d_batch_dims2.cairo index 251d442ba..edb022910 100644 --- a/tests/nodes/gather_nd_fp8x23_3d_batch_dims2.cairo +++ b/tests/nodes/gather_nd_fp8x23_3d_batch_dims2.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp8x23_3d_batch_dims2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp8x23_3d_default.cairo b/tests/nodes/gather_nd_fp8x23_3d_default.cairo index 8ce119604..70b25cea1 100644 --- a/tests/nodes/gather_nd_fp8x23_3d_default.cairo +++ b/tests/nodes/gather_nd_fp8x23_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp8x23_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_i32_3d_batch_dims1.cairo b/tests/nodes/gather_nd_i32_3d_batch_dims1.cairo index 1d275fb4a..923c7f9ba 100644 --- a/tests/nodes/gather_nd_i32_3d_batch_dims1.cairo +++ b/tests/nodes/gather_nd_i32_3d_batch_dims1.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_i32_3d_batch_dims1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_i32_3d_batch_dims2.cairo b/tests/nodes/gather_nd_i32_3d_batch_dims2.cairo index 6bfa5cf4a..44ed06b2c 100644 --- a/tests/nodes/gather_nd_i32_3d_batch_dims2.cairo +++ b/tests/nodes/gather_nd_i32_3d_batch_dims2.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_i32_3d_batch_dims2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_i32_3d_default.cairo b/tests/nodes/gather_nd_i32_3d_default.cairo index 4fa1c55f1..5268e13f4 100644 --- a/tests/nodes/gather_nd_i32_3d_default.cairo +++ b/tests/nodes/gather_nd_i32_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_i32_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_i8_3d_batch_dims1.cairo b/tests/nodes/gather_nd_i8_3d_batch_dims1.cairo index b42d1a430..1d47f72ff 100644 --- a/tests/nodes/gather_nd_i8_3d_batch_dims1.cairo +++ b/tests/nodes/gather_nd_i8_3d_batch_dims1.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_i8_3d_batch_dims1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_i8_3d_default.cairo b/tests/nodes/gather_nd_i8_3d_default.cairo index 6ee8e0a9e..f9152f412 100644 --- a/tests/nodes/gather_nd_i8_3d_default.cairo +++ b/tests/nodes/gather_nd_i8_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_i8_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_u32_batch_dims1.cairo b/tests/nodes/gather_nd_u32_batch_dims1.cairo index d1bfb099c..7689359ee 100644 --- a/tests/nodes/gather_nd_u32_batch_dims1.cairo +++ b/tests/nodes/gather_nd_u32_batch_dims1.cairo @@ -16,7 +16,7 @@ fn test_gather_nd_u32_batch_dims1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_u32_batch_dims2.cairo b/tests/nodes/gather_nd_u32_batch_dims2.cairo index 2cd029255..4659cfaa7 100644 --- a/tests/nodes/gather_nd_u32_batch_dims2.cairo +++ b/tests/nodes/gather_nd_u32_batch_dims2.cairo @@ -16,7 +16,7 @@ fn test_gather_nd_u32_batch_dims2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_u32_default.cairo b/tests/nodes/gather_nd_u32_default.cairo index 5893b5017..e226d0eb0 100644 --- a/tests/nodes/gather_nd_u32_default.cairo +++ b/tests/nodes/gather_nd_u32_default.cairo @@ -16,7 +16,7 @@ fn test_gather_nd_u32_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_u32_3d_axis1.cairo b/tests/nodes/gather_u32_3d_axis1.cairo index 641d67f80..1a7a56d37 100644 --- a/tests/nodes/gather_u32_3d_axis1.cairo +++ b/tests/nodes/gather_u32_3d_axis1.cairo @@ -16,7 +16,7 @@ fn test_gather_u32_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(1)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_u32_3d_axis2.cairo b/tests/nodes/gather_u32_3d_axis2.cairo index 94f91a138..30d5f6a61 100644 --- a/tests/nodes/gather_u32_3d_axis2.cairo +++ b/tests/nodes/gather_u32_3d_axis2.cairo @@ -16,7 +16,7 @@ fn test_gather_u32_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(2)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_u32_3d_default.cairo b/tests/nodes/gather_u32_3d_default.cairo index 7931d3e27..8f223c4af 100644 --- a/tests/nodes/gather_u32_3d_default.cairo +++ b/tests/nodes/gather_u32_3d_default.cairo @@ -16,7 +16,7 @@ fn test_gather_u32_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(0)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gemm_all_attributes.cairo b/tests/nodes/gemm_all_attributes.cairo index c543ddb3b..2cbd9cab3 100644 --- a/tests/nodes/gemm_all_attributes.cairo +++ b/tests/nodes/gemm_all_attributes.cairo @@ -18,7 +18,15 @@ fn test_gemm_all_attributes() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::Some(FixedTrait::new(16384, false)), Option::Some(FixedTrait::new(22938, false)), true, true); + let y = NNTrait::gemm( + input_0, + input_1, + Option::Some(input_2), + Option::Some(FixedTrait::new(16384, false)), + Option::Some(FixedTrait::new(22938, false)), + true, + true + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_alpha.cairo b/tests/nodes/gemm_alpha.cairo index 074392584..dad8187f4 100644 --- a/tests/nodes/gemm_alpha.cairo +++ b/tests/nodes/gemm_alpha.cairo @@ -16,7 +16,15 @@ fn test_gemm_alpha() { let input_1 = input_1::input_1(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::None(()), Option::Some(FixedTrait::new(32768, false)), Option::None(()), false, false); + let y = NNTrait::gemm( + input_0, + input_1, + Option::None(()), + Option::Some(FixedTrait::new(32768, false)), + Option::None(()), + false, + false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_beta.cairo b/tests/nodes/gemm_beta.cairo index 9ec8fe530..9f417e32a 100644 --- a/tests/nodes/gemm_beta.cairo +++ b/tests/nodes/gemm_beta.cairo @@ -18,7 +18,15 @@ fn test_gemm_beta() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::None(()), Option::Some(FixedTrait::new(32768, false)), false, false); + let y = NNTrait::gemm( + input_0, + input_1, + Option::Some(input_2), + Option::None(()), + Option::Some(FixedTrait::new(32768, false)), + false, + false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_default_matrix_bias.cairo b/tests/nodes/gemm_default_matrix_bias.cairo index 76c6fff0c..16d00f933 100644 --- a/tests/nodes/gemm_default_matrix_bias.cairo +++ b/tests/nodes/gemm_default_matrix_bias.cairo @@ -18,7 +18,9 @@ fn test_gemm_default_matrix_bias() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::None(()), Option::None(()), false, false); + let y = NNTrait::gemm( + input_0, input_1, Option::Some(input_2), Option::None(()), Option::None(()), false, false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_default_no_bias.cairo b/tests/nodes/gemm_default_no_bias.cairo index b702bcfc3..ea43cd0fe 100644 --- a/tests/nodes/gemm_default_no_bias.cairo +++ b/tests/nodes/gemm_default_no_bias.cairo @@ -16,7 +16,9 @@ fn test_gemm_default_no_bias() { let input_1 = input_1::input_1(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::None(()), Option::None(()), Option::None(()), false, false); + let y = NNTrait::gemm( + input_0, input_1, Option::None(()), Option::None(()), Option::None(()), false, false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_default_vector_bias.cairo b/tests/nodes/gemm_default_vector_bias.cairo index 7f4f2646b..24826f739 100644 --- a/tests/nodes/gemm_default_vector_bias.cairo +++ b/tests/nodes/gemm_default_vector_bias.cairo @@ -18,7 +18,9 @@ fn test_gemm_default_vector_bias() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::None(()), Option::None(()), false, false); + let y = NNTrait::gemm( + input_0, input_1, Option::Some(input_2), Option::None(()), Option::None(()), false, false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_transposeA.cairo b/tests/nodes/gemm_transposeA.cairo index c0b49d799..76c4592e4 100644 --- a/tests/nodes/gemm_transposeA.cairo +++ b/tests/nodes/gemm_transposeA.cairo @@ -16,7 +16,9 @@ fn test_gemm_transposeA() { let input_1 = input_1::input_1(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::None(()), Option::None(()), Option::None(()), true, false); + let y = NNTrait::gemm( + input_0, input_1, Option::None(()), Option::None(()), Option::None(()), true, false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_transposeB.cairo b/tests/nodes/gemm_transposeB.cairo index 4c7ccbef4..1728fd014 100644 --- a/tests/nodes/gemm_transposeB.cairo +++ b/tests/nodes/gemm_transposeB.cairo @@ -16,7 +16,9 @@ fn test_gemm_transposeB() { let input_1 = input_1::input_1(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::None(()), Option::None(()), Option::None(()), false, true); + let y = NNTrait::gemm( + input_0, input_1, Option::None(()), Option::None(()), Option::None(()), false, true + ); assert_eq(y, z); } diff --git a/tests/nodes/hard_sigmoid_fp16x16.cairo b/tests/nodes/hard_sigmoid_fp16x16.cairo index 8a8f8672a..6ad8c8c6c 100644 --- a/tests/nodes/hard_sigmoid_fp16x16.cairo +++ b/tests/nodes/hard_sigmoid_fp16x16.cairo @@ -14,7 +14,9 @@ fn test_hard_sigmoid_fp16x16() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = NNTrait::hard_sigmoid(@input_0, @FixedTrait::new(13107, false), @FixedTrait::new(32768, false)); + let y = NNTrait::hard_sigmoid( + @input_0, @FixedTrait::new(13107, false), @FixedTrait::new(32768, false) + ); assert_eq(y, z); } diff --git a/tests/nodes/hard_sigmoid_fp8x23.cairo b/tests/nodes/hard_sigmoid_fp8x23.cairo index 317c25425..3697b1d7a 100644 --- a/tests/nodes/hard_sigmoid_fp8x23.cairo +++ b/tests/nodes/hard_sigmoid_fp8x23.cairo @@ -14,7 +14,9 @@ fn test_hard_sigmoid_fp8x23() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = NNTrait::hard_sigmoid(@input_0, @FixedTrait::new(1677721, false), @FixedTrait::new(4194304, false)); + let y = NNTrait::hard_sigmoid( + @input_0, @FixedTrait::new(1677721, false), @FixedTrait::new(4194304, false) + ); assert_eq(y, z); } diff --git a/tests/nodes/is_nan_fp16x16/input_0.cairo b/tests/nodes/is_nan_fp16x16/input_0.cairo index 576456503..8c86af4fb 100644 --- a/tests/nodes/is_nan_fp16x16/input_0.cairo +++ b/tests/nodes/is_nan_fp16x16/input_0.cairo @@ -15,4 +15,4 @@ fn input_0() -> Tensor { data.append(FixedTrait::NaN()); data.append(FixedTrait::NaN()); TensorTrait::new(shape.span(), data.span()) -} \ No newline at end of file +} diff --git a/tests/nodes/layer_normalization_3d_axis0_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis0_epsilon.cairo index 6931c44ec..93373e675 100644 --- a/tests/nodes/layer_normalization_3d_axis0_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis0_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis0_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(0),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(0), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_3d_axis1_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis1_epsilon.cairo index 1bdb8700d..72d384de1 100644 --- a/tests/nodes/layer_normalization_3d_axis1_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis1_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis1_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(1),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(1), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_3d_axis2_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis2_epsilon.cairo index 06505280b..44a5f550d 100644 --- a/tests/nodes/layer_normalization_3d_axis2_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis2_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis2_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(2),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(2), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_3d_axis_negative_1_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis_negative_1_epsilon.cairo index 4c095bf62..0b5b77e17 100644 --- a/tests/nodes/layer_normalization_3d_axis_negative_1_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis_negative_1_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis_negative_1_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-1),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(-1), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_3d_axis_negative_2_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis_negative_2_epsilon.cairo index 0be005ddd..5f632aa6e 100644 --- a/tests/nodes/layer_normalization_3d_axis_negative_2_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis_negative_2_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis_negative_2_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-2),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(-2), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_3d_axis_negative_3_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis_negative_3_epsilon.cairo index e3c602e1f..d08c443f8 100644 --- a/tests/nodes/layer_normalization_3d_axis_negative_3_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis_negative_3_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis_negative_3_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-3),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(-3), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis0.cairo b/tests/nodes/layer_normalization_4d_axis0.cairo index 45a825cd5..279acc624 100644 --- a/tests/nodes/layer_normalization_4d_axis0.cairo +++ b/tests/nodes/layer_normalization_4d_axis0.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis0() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(0),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(0), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis1.cairo b/tests/nodes/layer_normalization_4d_axis1.cairo index e7ee8885c..d8e00b332 100644 --- a/tests/nodes/layer_normalization_4d_axis1.cairo +++ b/tests/nodes/layer_normalization_4d_axis1.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis1() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(1),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(1), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis2.cairo b/tests/nodes/layer_normalization_4d_axis2.cairo index 3bd45e907..65b738957 100644 --- a/tests/nodes/layer_normalization_4d_axis2.cairo +++ b/tests/nodes/layer_normalization_4d_axis2.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis2() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(2),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(2), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis3.cairo b/tests/nodes/layer_normalization_4d_axis3.cairo index 4b173b4f6..fae5a51c7 100644 --- a/tests/nodes/layer_normalization_4d_axis3.cairo +++ b/tests/nodes/layer_normalization_4d_axis3.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis3() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(3),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(3), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis_negative_1.cairo b/tests/nodes/layer_normalization_4d_axis_negative_1.cairo index d7b04e192..2f879f988 100644 --- a/tests/nodes/layer_normalization_4d_axis_negative_1.cairo +++ b/tests/nodes/layer_normalization_4d_axis_negative_1.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis_negative_1() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-1),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(-1), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis_negative_2.cairo b/tests/nodes/layer_normalization_4d_axis_negative_2.cairo index 5e17a8b52..718c97ad5 100644 --- a/tests/nodes/layer_normalization_4d_axis_negative_2.cairo +++ b/tests/nodes/layer_normalization_4d_axis_negative_2.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis_negative_2() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(2),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(2), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis_negative_3.cairo b/tests/nodes/layer_normalization_4d_axis_negative_3.cairo index 4188eec6c..b97678d38 100644 --- a/tests/nodes/layer_normalization_4d_axis_negative_3.cairo +++ b/tests/nodes/layer_normalization_4d_axis_negative_3.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis_negative_3() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-3),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(-3), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis_negative_4.cairo b/tests/nodes/layer_normalization_4d_axis_negative_4.cairo index 5aa5971dc..94be87f32 100644 --- a/tests/nodes/layer_normalization_4d_axis_negative_4.cairo +++ b/tests/nodes/layer_normalization_4d_axis_negative_4.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis_negative_4() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-4),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(-4), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_default_axis.cairo b/tests/nodes/layer_normalization_default_axis.cairo index dd792e731..994ab7106 100644 --- a/tests/nodes/layer_normalization_default_axis.cairo +++ b/tests/nodes/layer_normalization_default_axis.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_default_axis() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::None,Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::None, Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_test.cairo b/tests/nodes/layer_normalization_test.cairo index 631dc6f46..ad8baa5f2 100644 --- a/tests/nodes/layer_normalization_test.cairo +++ b/tests/nodes/layer_normalization_test.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_test() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::None,Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::None, Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_fp16x16_3d_axis1.cairo b/tests/nodes/scatter_fp16x16_3d_axis1.cairo index b471e028c..5173d8bd7 100644 --- a/tests/nodes/scatter_fp16x16_3d_axis1.cairo +++ b/tests/nodes/scatter_fp16x16_3d_axis1.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp16x16_3d_axis1() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_fp16x16_3d_axis1_add.cairo b/tests/nodes/scatter_fp16x16_3d_axis1_add.cairo index c6fc48b15..be927416d 100644 --- a/tests/nodes/scatter_fp16x16_3d_axis1_add.cairo +++ b/tests/nodes/scatter_fp16x16_3d_axis1_add.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp16x16_3d_axis1_add() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('add')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('add') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_fp16x16_3d_default.cairo b/tests/nodes/scatter_fp16x16_3d_default.cairo index c14bbc0a6..b106de54d 100644 --- a/tests/nodes/scatter_fp16x16_3d_default.cairo +++ b/tests/nodes/scatter_fp16x16_3d_default.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp16x16_3d_default() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_fp8x23_axis1.cairo b/tests/nodes/scatter_fp8x23_axis1.cairo index e0008d409..8ff871c7b 100644 --- a/tests/nodes/scatter_fp8x23_axis1.cairo +++ b/tests/nodes/scatter_fp8x23_axis1.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp8x23_axis1() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_fp8x23_default.cairo b/tests/nodes/scatter_fp8x23_default.cairo index bdaea6568..157aca0bb 100644 --- a/tests/nodes/scatter_fp8x23_default.cairo +++ b/tests/nodes/scatter_fp8x23_default.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp8x23_default() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_fp8x23_mul.cairo b/tests/nodes/scatter_fp8x23_mul.cairo index 4430bf041..5b2305aee 100644 --- a/tests/nodes/scatter_fp8x23_mul.cairo +++ b/tests/nodes/scatter_fp8x23_mul.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp8x23_mul() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('mul')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('mul') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_i8_axis1.cairo b/tests/nodes/scatter_i8_axis1.cairo index e143463f1..c42123f3d 100644 --- a/tests/nodes/scatter_i8_axis1.cairo +++ b/tests/nodes/scatter_i8_axis1.cairo @@ -20,7 +20,13 @@ fn test_scatter_i8_axis1() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_i8_axis1_max.cairo b/tests/nodes/scatter_i8_axis1_max.cairo index 53dabbe40..844911a8d 100644 --- a/tests/nodes/scatter_i8_axis1_max.cairo +++ b/tests/nodes/scatter_i8_axis1_max.cairo @@ -20,7 +20,13 @@ fn test_scatter_i8_axis1_max() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('max')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('max') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_i8_default.cairo b/tests/nodes/scatter_i8_default.cairo index c41b29d7b..f658268ce 100644 --- a/tests/nodes/scatter_i8_default.cairo +++ b/tests/nodes/scatter_i8_default.cairo @@ -20,7 +20,13 @@ fn test_scatter_i8_default() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_u32_add.cairo b/tests/nodes/scatter_u32_add.cairo index 735b8fb5e..2b14d68d1 100644 --- a/tests/nodes/scatter_u32_add.cairo +++ b/tests/nodes/scatter_u32_add.cairo @@ -18,7 +18,13 @@ fn test_scatter_u32_add() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('add')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('add') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_u32_axis1.cairo b/tests/nodes/scatter_u32_axis1.cairo index e2a96e71b..2c85e2a6c 100644 --- a/tests/nodes/scatter_u32_axis1.cairo +++ b/tests/nodes/scatter_u32_axis1.cairo @@ -18,7 +18,13 @@ fn test_scatter_u32_axis1() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_u32_default.cairo b/tests/nodes/scatter_u32_default.cairo index 1ccdac72f..5fb16207c 100644 --- a/tests/nodes/scatter_u32_default.cairo +++ b/tests/nodes/scatter_u32_default.cairo @@ -18,7 +18,13 @@ fn test_scatter_u32_default() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/sequence_insert_fp16x16.cairo b/tests/nodes/sequence_insert_fp16x16.cairo index d30b0d3e1..70316ebb9 100644 --- a/tests/nodes/sequence_insert_fp16x16.cairo +++ b/tests/nodes/sequence_insert_fp16x16.cairo @@ -20,7 +20,7 @@ fn test_sequence_insert_fp16x16() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.sequence_insert(@input_1,Option::Some(input_2)); + let y = input_0.sequence_insert(@input_1, Option::Some(input_2)); assert_seq_eq(y, z); } diff --git a/tests/nodes/sequence_insert_fp8x23.cairo b/tests/nodes/sequence_insert_fp8x23.cairo index ad4d12be4..fb474c6d4 100644 --- a/tests/nodes/sequence_insert_fp8x23.cairo +++ b/tests/nodes/sequence_insert_fp8x23.cairo @@ -20,7 +20,7 @@ fn test_sequence_insert_fp8x23() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.sequence_insert(@input_1,Option::Some(input_2)); + let y = input_0.sequence_insert(@input_1, Option::Some(input_2)); assert_seq_eq(y, z); } diff --git a/tests/nodes/sequence_insert_i32.cairo b/tests/nodes/sequence_insert_i32.cairo index 3a397715d..7bcadba2d 100644 --- a/tests/nodes/sequence_insert_i32.cairo +++ b/tests/nodes/sequence_insert_i32.cairo @@ -18,7 +18,7 @@ fn test_sequence_insert_i32() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.sequence_insert(@input_1,Option::Some(input_2)); + let y = input_0.sequence_insert(@input_1, Option::Some(input_2)); assert_seq_eq(y, z); } diff --git a/tests/nodes/sequence_insert_i8.cairo b/tests/nodes/sequence_insert_i8.cairo index a304ff2c4..ff1be34fe 100644 --- a/tests/nodes/sequence_insert_i8.cairo +++ b/tests/nodes/sequence_insert_i8.cairo @@ -20,7 +20,7 @@ fn test_sequence_insert_i8() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.sequence_insert(@input_1,Option::Some(input_2)); + let y = input_0.sequence_insert(@input_1, Option::Some(input_2)); assert_seq_eq(y, z); } diff --git a/tests/nodes/sequence_insert_u32.cairo b/tests/nodes/sequence_insert_u32.cairo index dcd905f72..079d6a4a0 100644 --- a/tests/nodes/sequence_insert_u32.cairo +++ b/tests/nodes/sequence_insert_u32.cairo @@ -20,7 +20,7 @@ fn test_sequence_insert_u32() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.sequence_insert(@input_1,Option::Some(input_2)); + let y = input_0.sequence_insert(@input_1, Option::Some(input_2)); assert_seq_eq(y, z); } diff --git a/tests/nodes/sequence_length_fp16x16.cairo b/tests/nodes/sequence_length_fp16x16.cairo index d971d5569..559ec3ff6 100644 --- a/tests/nodes/sequence_length_fp16x16.cairo +++ b/tests/nodes/sequence_length_fp16x16.cairo @@ -13,10 +13,10 @@ use orion::operators::sequence::SequenceTrait; #[test] #[available_gas(2000000000)] fn test_sequence_length_fp16x16() { - let input_0 = input_0::input_0(); + let input_0 = input_0::input_0(); let z = output_0::output_0(); let y = input_0.sequence_length(); assert_eq(y, z); -} +} diff --git a/tests/nodes/shrink_hard_fp16x16.cairo b/tests/nodes/shrink_hard_fp16x16.cairo index 0818844b2..2f5ec5312 100644 --- a/tests/nodes/shrink_hard_fp16x16.cairo +++ b/tests/nodes/shrink_hard_fp16x16.cairo @@ -15,7 +15,9 @@ fn test_shrink_hard_fp16x16() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = TensorTrait::shrink(input_0, Option::None(()), Option::Some(FixedTrait::new(65536, false))); + let y = TensorTrait::shrink( + input_0, Option::None(()), Option::Some(FixedTrait::new(65536, false)) + ); assert_eq(y, z); } diff --git a/tests/nodes/shrink_hard_fp8x23.cairo b/tests/nodes/shrink_hard_fp8x23.cairo index 3c054f433..c76eec1ec 100644 --- a/tests/nodes/shrink_hard_fp8x23.cairo +++ b/tests/nodes/shrink_hard_fp8x23.cairo @@ -15,7 +15,9 @@ fn test_shrink_hard_fp8x23() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = TensorTrait::shrink(input_0, Option::None(()), Option::Some(FixedTrait::new(8388608, false))); + let y = TensorTrait::shrink( + input_0, Option::None(()), Option::Some(FixedTrait::new(8388608, false)) + ); assert_eq(y, z); } diff --git a/tests/nodes/shrink_soft_fp16x16.cairo b/tests/nodes/shrink_soft_fp16x16.cairo index 924ecfde5..aa975069c 100644 --- a/tests/nodes/shrink_soft_fp16x16.cairo +++ b/tests/nodes/shrink_soft_fp16x16.cairo @@ -15,7 +15,11 @@ fn test_shrink_soft_fp16x16() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = TensorTrait::shrink(input_0, Option::Some(FixedTrait::new(65536, false)), Option::Some(FixedTrait::new(65536, false))); + let y = TensorTrait::shrink( + input_0, + Option::Some(FixedTrait::new(65536, false)), + Option::Some(FixedTrait::new(65536, false)) + ); assert_eq(y, z); } diff --git a/tests/nodes/shrink_soft_fp8x23.cairo b/tests/nodes/shrink_soft_fp8x23.cairo index 01a314e10..8413beccd 100644 --- a/tests/nodes/shrink_soft_fp8x23.cairo +++ b/tests/nodes/shrink_soft_fp8x23.cairo @@ -15,7 +15,11 @@ fn test_shrink_soft_fp8x23() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = TensorTrait::shrink(input_0, Option::Some(FixedTrait::new(8388608, false)), Option::Some(FixedTrait::new(8388608, false))); + let y = TensorTrait::shrink( + input_0, + Option::Some(FixedTrait::new(8388608, false)), + Option::Some(FixedTrait::new(8388608, false)) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_fp16x16_2d.cairo b/tests/nodes/slice_fp16x16_2d.cairo index 5e3d593be..2a95e6e4b 100644 --- a/tests/nodes/slice_fp16x16_2d.cairo +++ b/tests/nodes/slice_fp16x16_2d.cairo @@ -14,7 +14,13 @@ fn test_slice_fp16x16_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span())); + let y = input_0 + .slice( + array![0, 2].span(), + array![2, 4].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 1].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_fp16x16_3d.cairo b/tests/nodes/slice_fp16x16_3d.cairo index d0b5462c4..a681191ce 100644 --- a/tests/nodes/slice_fp16x16_3d.cairo +++ b/tests/nodes/slice_fp16x16_3d.cairo @@ -14,7 +14,13 @@ fn test_slice_fp16x16_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span())); + let y = input_0 + .slice( + array![0, 0].span(), + array![3, 10].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 3].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_fp8x23_2d.cairo b/tests/nodes/slice_fp8x23_2d.cairo index 6a80a5422..56fed5a6a 100644 --- a/tests/nodes/slice_fp8x23_2d.cairo +++ b/tests/nodes/slice_fp8x23_2d.cairo @@ -14,7 +14,13 @@ fn test_slice_fp8x23_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span())); + let y = input_0 + .slice( + array![0, 2].span(), + array![2, 4].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 1].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_fp8x23_3d.cairo b/tests/nodes/slice_fp8x23_3d.cairo index 5c2af30b7..fd5e95485 100644 --- a/tests/nodes/slice_fp8x23_3d.cairo +++ b/tests/nodes/slice_fp8x23_3d.cairo @@ -14,7 +14,13 @@ fn test_slice_fp8x23_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span())); + let y = input_0 + .slice( + array![0, 0].span(), + array![3, 10].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 3].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_i32_2d.cairo b/tests/nodes/slice_i32_2d.cairo index 082b8f15f..f26a2a809 100644 --- a/tests/nodes/slice_i32_2d.cairo +++ b/tests/nodes/slice_i32_2d.cairo @@ -14,7 +14,13 @@ fn test_slice_i32_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span())); + let y = input_0 + .slice( + array![0, 2].span(), + array![2, 4].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 1].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_i32_3d.cairo b/tests/nodes/slice_i32_3d.cairo index 1683e6987..16fd3f51b 100644 --- a/tests/nodes/slice_i32_3d.cairo +++ b/tests/nodes/slice_i32_3d.cairo @@ -14,7 +14,13 @@ fn test_slice_i32_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span())); + let y = input_0 + .slice( + array![0, 0].span(), + array![3, 10].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 3].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_i8_2d.cairo b/tests/nodes/slice_i8_2d.cairo index fc7f35364..2dc5f6ab4 100644 --- a/tests/nodes/slice_i8_2d.cairo +++ b/tests/nodes/slice_i8_2d.cairo @@ -14,7 +14,13 @@ fn test_slice_i8_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span())); + let y = input_0 + .slice( + array![0, 2].span(), + array![2, 4].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 1].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_i8_3d.cairo b/tests/nodes/slice_i8_3d.cairo index ec8ea9ffd..a140d8681 100644 --- a/tests/nodes/slice_i8_3d.cairo +++ b/tests/nodes/slice_i8_3d.cairo @@ -14,7 +14,13 @@ fn test_slice_i8_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span())); + let y = input_0 + .slice( + array![0, 0].span(), + array![3, 10].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 3].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_u32_2d.cairo b/tests/nodes/slice_u32_2d.cairo index 27678fc0c..c5ad63061 100644 --- a/tests/nodes/slice_u32_2d.cairo +++ b/tests/nodes/slice_u32_2d.cairo @@ -14,7 +14,13 @@ fn test_slice_u32_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span())); + let y = input_0 + .slice( + array![0, 2].span(), + array![2, 4].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 1].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_u32_3d.cairo b/tests/nodes/slice_u32_3d.cairo index a3ca0e1bc..08a77cf55 100644 --- a/tests/nodes/slice_u32_3d.cairo +++ b/tests/nodes/slice_u32_3d.cairo @@ -14,7 +14,13 @@ fn test_slice_u32_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span())); + let y = input_0 + .slice( + array![0, 0].span(), + array![3, 10].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 3].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/where_fp16x16.cairo b/tests/nodes/where_fp16x16.cairo index 05467ef51..ae3416d67 100644 --- a/tests/nodes/where_fp16x16.cairo +++ b/tests/nodes/where_fp16x16.cairo @@ -18,7 +18,7 @@ fn test_where_fp16x16() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_fp16x16_broadcast.cairo b/tests/nodes/where_fp16x16_broadcast.cairo index b0d9b9faa..5df239b78 100644 --- a/tests/nodes/where_fp16x16_broadcast.cairo +++ b/tests/nodes/where_fp16x16_broadcast.cairo @@ -18,7 +18,7 @@ fn test_where_fp16x16_broadcast() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_fp8x23.cairo b/tests/nodes/where_fp8x23.cairo index 8661bf163..492db3766 100644 --- a/tests/nodes/where_fp8x23.cairo +++ b/tests/nodes/where_fp8x23.cairo @@ -18,7 +18,7 @@ fn test_where_fp8x23() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_fp8x23_broadcast.cairo b/tests/nodes/where_fp8x23_broadcast.cairo index 771c00bf4..112f9ef74 100644 --- a/tests/nodes/where_fp8x23_broadcast.cairo +++ b/tests/nodes/where_fp8x23_broadcast.cairo @@ -18,7 +18,7 @@ fn test_where_fp8x23_broadcast() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_i32.cairo b/tests/nodes/where_i32.cairo index 1662b010d..a455f8ac1 100644 --- a/tests/nodes/where_i32.cairo +++ b/tests/nodes/where_i32.cairo @@ -18,7 +18,7 @@ fn test_where_i32() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_i32_broadcast.cairo b/tests/nodes/where_i32_broadcast.cairo index 53aaf91e2..62891b235 100644 --- a/tests/nodes/where_i32_broadcast.cairo +++ b/tests/nodes/where_i32_broadcast.cairo @@ -18,7 +18,7 @@ fn test_where_i32_broadcast() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_i8.cairo b/tests/nodes/where_i8.cairo index 0627fd33b..6f54a1271 100644 --- a/tests/nodes/where_i8.cairo +++ b/tests/nodes/where_i8.cairo @@ -18,7 +18,7 @@ fn test_where_i8() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_i8_broadcast.cairo b/tests/nodes/where_i8_broadcast.cairo index 69e02821f..4bcb86a3d 100644 --- a/tests/nodes/where_i8_broadcast.cairo +++ b/tests/nodes/where_i8_broadcast.cairo @@ -18,7 +18,7 @@ fn test_where_i8_broadcast() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_u32.cairo b/tests/nodes/where_u32.cairo index a14d685ac..5f8a3119a 100644 --- a/tests/nodes/where_u32.cairo +++ b/tests/nodes/where_u32.cairo @@ -18,7 +18,7 @@ fn test_where_u32() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_u32_broadcast.cairo b/tests/nodes/where_u32_broadcast.cairo index b810f7143..4aedc56a1 100644 --- a/tests/nodes/where_u32_broadcast.cairo +++ b/tests/nodes/where_u32_broadcast.cairo @@ -18,7 +18,7 @@ fn test_where_u32_broadcast() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/operators/qlinear_add_test.cairo b/tests/operators/qlinear_add_test.cairo index 3163fb8e6..fe7f2af47 100644 --- a/tests/operators/qlinear_add_test.cairo +++ b/tests/operators/qlinear_add_test.cairo @@ -13,33 +13,13 @@ fn qlinearadd_test() { i8 >::new( shape: array![4, 2].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8 - ] - .span(), + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8].span(), ); let b = TensorTrait::< i8 >::new( shape: array![4, 2].span(), - data: array![ - 2_i8, - 4_i8, - 6_i8, - 8_i8, - 10_i8, - 12_i8, - 14_i8, - 16_i8 - ] - .span(), + data: array![2_i8, 4_i8, 6_i8, 8_i8, 10_i8, 12_i8, 14_i8, 16_i8].span(), ); let a_scale = TensorTrait::< @@ -82,30 +62,11 @@ fn qlinearadd_broadcast_test() { i8 >::new( shape: array![2, 4].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8 - ] - .span(), + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8].span(), ); let b = TensorTrait::< i8 - >::new( - shape: array![1, 4].span(), - data: array![ - 2_i8, - 4_i8, - 6_i8, - 8_i8, - ] - .span(), - ); + >::new(shape: array![1, 4].span(), data: array![2_i8, 4_i8, 6_i8, 8_i8,].span(),); let a_scale = TensorTrait::< FP16x16 @@ -146,29 +107,10 @@ fn qlinearadd_broadcast_test() { fn test_example_doc() { let a = TensorTrait::< i8 - >::new( - shape: array![2, 3].span(), - data: array![ - 6_i8, - 6_i8, - 6_i8, - 11_i8, - 11_i8, - 11_i8 - ] - .span(), - ); + >::new(shape: array![2, 3].span(), data: array![6_i8, 6_i8, 6_i8, 11_i8, 11_i8, 11_i8].span(),); let b = TensorTrait::< i8 - >::new( - shape: array![1, 3].span(), - data: array![ - 40_i8, - 40_i8, - 40_i8 - ] - .span(), - ); + >::new(shape: array![1, 3].span(), data: array![40_i8, 40_i8, 40_i8].span(),); let a_scale = TensorTrait::< FP16x16 diff --git a/tests/operators/qlinear_concat_test.cairo b/tests/operators/qlinear_concat_test.cairo index 101cefaa8..4c86b3ff8 100644 --- a/tests/operators/qlinear_concat_test.cairo +++ b/tests/operators/qlinear_concat_test.cairo @@ -19,28 +19,10 @@ fn print_span(mut span: Span) { fn qlinear_concat_test() { let tensor1 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 10_i8, - 20_i8, - 30_i8, - 40_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![10_i8, 20_i8, 30_i8, 40_i8,].span(),); let tensor2 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 20_i8, - 40_i8, - 60_i8, - 80_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![20_i8, 40_i8, 60_i8, 80_i8,].span(),); let tensors = array![tensor1, tensor2].span(); @@ -90,40 +72,13 @@ fn qlinear_concat_test() { fn qlinear_concat_test_shape() { let tensor1 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 2_i8, - 2_i8, - 2_i8, - 2_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![2_i8, 2_i8, 2_i8, 2_i8,].span(),); let tensor2 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 8_i8, - 8_i8, - 8_i8, - 8_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![8_i8, 8_i8, 8_i8, 8_i8,].span(),); let tensor3 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 10_i8, - 10_i8, - 10_i8, - 10_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![10_i8, 10_i8, 10_i8, 10_i8,].span(),); let tensors = array![tensor1, tensor2, tensor3].span(); @@ -177,28 +132,10 @@ fn qlinear_concat_test_shape() { fn qlinear_concat_example_doc() { let tensor1 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 5_i8, - 5_i8, - 5_i8, - 5_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![5_i8, 5_i8, 5_i8, 5_i8,].span(),); let tensor2 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 1_i8, - 1_i8, - 1_i8, - 1_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![1_i8, 1_i8, 1_i8, 1_i8,].span(),); let tensors = array![tensor1, tensor2].span(); diff --git a/tests/operators/qlinear_leakyrelu_test.cairo b/tests/operators/qlinear_leakyrelu_test.cairo index 9e6473d06..e180ab33b 100644 --- a/tests/operators/qlinear_leakyrelu_test.cairo +++ b/tests/operators/qlinear_leakyrelu_test.cairo @@ -12,15 +12,7 @@ fn qlinear_leakyrelu_test() { i8 >::new( shape: array![2, 3].span(), - data: array![ - -10_i8, - -10_i8, - -10_i8, - 10_i8, - 10_i8, - 10_i8 - ] - .span(), + data: array![-10_i8, -10_i8, -10_i8, 10_i8, 10_i8, 10_i8].span(), ); let a_scale = TensorTrait::< diff --git a/tests/operators/qlinear_matmul_test.cairo b/tests/operators/qlinear_matmul_test.cairo index bfbe04714..9d3f8fa4b 100644 --- a/tests/operators/qlinear_matmul_test.cairo +++ b/tests/operators/qlinear_matmul_test.cairo @@ -15,36 +15,13 @@ fn qlinearmatmul_2D_test() { i8 >::new( shape: array![2, 4].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8 - ] - .span(), + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8].span(), ); let b = TensorTrait::< i8 >::new( shape: array![4, 3].span(), - data: array![ - 2_i8, - 4_i8, - 6_i8, - 8_i8, - 10_i8, - 12_i8, - 14_i8, - 16_i8, - 18_i8, - 20_i8, - 22_i8, - 24_i8 - ] + data: array![2_i8, 4_i8, 6_i8, 8_i8, 10_i8, 12_i8, 14_i8, 16_i8, 18_i8, 20_i8, 22_i8, 24_i8] .span(), ); @@ -90,18 +67,7 @@ fn qlinearmatmul_3D_test() { >::new( shape: array![2, 2, 3].span(), data: array![ - -1_i8, - -2_i8, - -2_i8, - -3_i8, - -4_i8, - -4_i8, - -5_i8, - -6_i8, - -6_i8, - -7_i8, - -8_i8, - -8_i8 + -1_i8, -2_i8, -2_i8, -3_i8, -4_i8, -4_i8, -5_i8, -6_i8, -6_i8, -7_i8, -8_i8, -8_i8 ] .span(), ); @@ -110,18 +76,7 @@ fn qlinearmatmul_3D_test() { >::new( shape: array![2, 3, 2].span(), data: array![ - -2_i8, - -4_i8, - -6_i8, - -8_i8, - -10_i8, - -12_i8, - -2_i8, - -4_i8, - -6_i8, - -8_i8, - -10_i8, - -12_i8 + -2_i8, -4_i8, -6_i8, -8_i8, -10_i8, -12_i8, -2_i8, -4_i8, -6_i8, -8_i8, -10_i8, -12_i8 ] .span(), ); @@ -167,29 +122,10 @@ fn qlinearmatmul_3D_test() { fn test_example_doc() { let a = TensorTrait::< i8 - >::new( - shape: array![2, 3].span(), - data: array![ - 3_i8, - 4_i8, - 5_i8, - 2_i8, - 4_i8, - 3_i8 - ] - .span(), - ); + >::new(shape: array![2, 3].span(), data: array![3_i8, 4_i8, 5_i8, 2_i8, 4_i8, 3_i8].span(),); let b = TensorTrait::< i8 - >::new( - shape: array![3, 1].span(), - data: array![ - 4_i8, - 8_i8, - 4_i8 - ] - .span(), - ); + >::new(shape: array![3, 1].span(), data: array![4_i8, 8_i8, 4_i8].span(),); let a_scale = TensorTrait::< FP16x16 diff --git a/tests/operators/qlinear_mul_test.cairo b/tests/operators/qlinear_mul_test.cairo index 6bf292bcc..3babc1800 100644 --- a/tests/operators/qlinear_mul_test.cairo +++ b/tests/operators/qlinear_mul_test.cairo @@ -14,40 +14,14 @@ fn qlinearmul_test() { i8 >::new( shape: array![4, 3].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8, - 9_i8, - 10_i8, - 11_i8, - 12_i8 - ] + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8, 9_i8, 10_i8, 11_i8, 12_i8] .span(), ); let b = TensorTrait::< i8 >::new( shape: array![4, 3].span(), - data: array![ - 2_i8, - 4_i8, - 6_i8, - 8_i8, - 10_i8, - 12_i8, - 14_i8, - 16_i8, - 18_i8, - 20_i8, - 22_i8, - 24_i8 - ] + data: array![2_i8, 4_i8, 6_i8, 8_i8, 10_i8, 12_i8, 14_i8, 16_i8, 18_i8, 20_i8, 22_i8, 24_i8] .span(), ); @@ -96,30 +70,11 @@ fn qlinear_mul_broadcast_test() { i8 >::new( shape: array![2, 4].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8 - ] - .span(), + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8].span(), ); let b = TensorTrait::< i8 - >::new( - shape: array![1, 4].span(), - data: array![ - 2_i8, - 4_i8, - 6_i8, - 8_i8, - ] - .span(), - ); + >::new(shape: array![1, 4].span(), data: array![2_i8, 4_i8, 6_i8, 8_i8,].span(),); let a_scale = TensorTrait::< FP16x16 @@ -161,28 +116,11 @@ fn test_example_doc() { let a = TensorTrait::< i8 >::new( - shape: array![2, 3].span(), - data: array![ - 21_i8, - 21_i8, - 21_i8, - 41_i8, - 41_i8, - 41_i8 - ] - .span(), + shape: array![2, 3].span(), data: array![21_i8, 21_i8, 21_i8, 41_i8, 41_i8, 41_i8].span(), ); let b = TensorTrait::< i8 - >::new( - shape: array![1, 3].span(), - data: array![ - 4_i8, - 8_i8, - 12_i8 - ] - .span(), - ); + >::new(shape: array![1, 3].span(), data: array![4_i8, 8_i8, 12_i8].span(),); let a_scale = TensorTrait::< FP16x16 From 2069160c232fdf6f3cd2428812e1a8beb4c6ded8 Mon Sep 17 00:00:00 2001 From: chachaleo Date: Mon, 5 Feb 2024 05:40:05 +0100 Subject: [PATCH 23/46] fix doc --- docs/framework/operators/neural-network/nn.conv_transpose.md | 2 +- src/operators/nn/core.cairo | 2 +- src/operators/nn/functional/conv_transpose.cairo | 2 -- tests/nodes.cairo | 2 +- 4 files changed, 3 insertions(+), 5 deletions(-) diff --git a/docs/framework/operators/neural-network/nn.conv_transpose.md b/docs/framework/operators/neural-network/nn.conv_transpose.md index 29ce733a0..29b2af6d2 100644 --- a/docs/framework/operators/neural-network/nn.conv_transpose.md +++ b/docs/framework/operators/neural-network/nn.conv_transpose.md @@ -34,7 +34,7 @@ The convolution transpose operator consumes an input tensor and a input weigth t ## Returns -A `Tensor` of shape (M, N). +A `Tensor` that contains the result of the convolution transpose. ## Examples diff --git a/src/operators/nn/core.cairo b/src/operators/nn/core.cairo index 522aa3eb5..82d65f4f6 100644 --- a/src/operators/nn/core.cairo +++ b/src/operators/nn/core.cairo @@ -731,7 +731,7 @@ trait NNTrait { /// /// ## Returns /// - /// A `Tensor` of shape (M, N). + /// A `Tensor` that contains the result of the convolution transpose. /// /// ## Examples /// diff --git a/src/operators/nn/functional/conv_transpose.cairo b/src/operators/nn/functional/conv_transpose.cairo index dd62b844f..2b84def04 100644 --- a/src/operators/nn/functional/conv_transpose.cairo +++ b/src/operators/nn/functional/conv_transpose.cairo @@ -393,7 +393,6 @@ fn conv_transpose< image_id += 1; }; } else { - let mut output_array = ArrayTrait::new(); let mut i = 0; @@ -685,7 +684,6 @@ fn col2im_shape_check, +Copy, +Drop,>( } - fn get_indices(index: usize, shape: Span,) -> Array { let mut i = index; let mut res = ArrayTrait::new(); diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 61aa152d0..faf315564 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -944,4 +944,4 @@ mod conv_transpose_autopad_same; mod conv_transpose_dilations; mod conv_transpose_pads; mod conv_transpose_group_2; -mod conv_transpose_group_2_image_3; \ No newline at end of file +mod conv_transpose_group_2_image_3; From 18087f9a43feaed4df64a62d0e2f6c59d908fca0 Mon Sep 17 00:00:00 2001 From: chachaleo Date: Mon, 5 Feb 2024 08:52:18 +0100 Subject: [PATCH 24/46] small fix --- src/operators/nn/functional/conv.cairo | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/operators/nn/functional/conv.cairo b/src/operators/nn/functional/conv.cairo index e34cb0b3b..444bcb6ed 100644 --- a/src/operators/nn/functional/conv.cairo +++ b/src/operators/nn/functional/conv.cairo @@ -295,14 +295,10 @@ fn conv< new_w.set(*new_shape.at(0) * *new_w_strides.at(0) - 1, NumberTrait::zero()); let mut indices = ArrayTrait::new(); - //let mut indices_W = ArrayTrait::new(); indices.append(arange(0, *new_shape.at(0), 1)); indices.append(arange(0, *new_shape.at(1), 1)); - //indices_W.append(arange(0, *(*W).shape.at(0), 1)); - //indices_W.append(arange(0, *(*W).shape.at(1), 1)); - let mut i = 0; loop { if i == dilations.len() { @@ -311,12 +307,10 @@ fn conv< let d = *dilations.at(i); let di = (*W).shape.len() - nd + i; indices.append(arange(0, *new_shape.at(di), d)); - //indices_W.append(arange(0, *(*W).shape.at(di), 1)); i += 1; }; let set_of_all_indices = cartesian(indices.span()); - //let set_of_all_indices_W = cartesian(indices_W.span()); let mut new_w_arr = ArrayTrait::new(); From a2f0efb19405fa0fc8a64025e11b2d1c28478219 Mon Sep 17 00:00:00 2001 From: zhangzhichao Date: Mon, 5 Feb 2024 19:31:46 +0800 Subject: [PATCH 25/46] feat: random_uniform_like --- .../tensor/tensor.random_uniform_like.md | 61 +++++++++ nodegen/node/random_uniform_like.py | 125 +++++++++++++++++ src/operators/tensor/core.cairo | 64 +++++++++ .../tensor/implementations/tensor_bool.cairo | 4 + .../implementations/tensor_complex64.cairo | 4 + .../implementations/tensor_fp16x16.cairo | 4 + .../implementations/tensor_fp16x16wide.cairo | 4 + .../implementations/tensor_fp32x32.cairo | 4 + .../implementations/tensor_fp64x64.cairo | 4 + .../implementations/tensor_fp8x23.cairo | 4 + .../implementations/tensor_fp8x23wide.cairo | 4 + .../tensor/implementations/tensor_i32.cairo | 4 + .../tensor/implementations/tensor_i8.cairo | 4 + .../tensor/implementations/tensor_u32.cairo | 4 + src/operators/tensor/math.cairo | 1 + .../tensor/math/random_uniform_like.cairo | 126 ++++++++++++++++++ tests/nodes.cairo | 2 + tests/nodes/random_uniform_like_fp16x16.cairo | 21 +++ .../random_uniform_like_fp16x16/input_0.cairo | 31 +++++ .../output_0.cairo | 31 +++++ tests/nodes/random_uniform_like_fp8x23.cairo | 21 +++ .../random_uniform_like_fp8x23/input_0.cairo | 31 +++++ .../random_uniform_like_fp8x23/output_0.cairo | 31 +++++ 23 files changed, 589 insertions(+) create mode 100644 docs/framework/operators/tensor/tensor.random_uniform_like.md create mode 100644 nodegen/node/random_uniform_like.py create mode 100644 src/operators/tensor/math/random_uniform_like.cairo create mode 100644 tests/nodes/random_uniform_like_fp16x16.cairo create mode 100644 tests/nodes/random_uniform_like_fp16x16/input_0.cairo create mode 100644 tests/nodes/random_uniform_like_fp16x16/output_0.cairo create mode 100644 tests/nodes/random_uniform_like_fp8x23.cairo create mode 100644 tests/nodes/random_uniform_like_fp8x23/input_0.cairo create mode 100644 tests/nodes/random_uniform_like_fp8x23/output_0.cairo diff --git a/docs/framework/operators/tensor/tensor.random_uniform_like.md b/docs/framework/operators/tensor/tensor.random_uniform_like.md new file mode 100644 index 000000000..06ca8e772 --- /dev/null +++ b/docs/framework/operators/tensor/tensor.random_uniform_like.md @@ -0,0 +1,61 @@ +# TensorTrait::random_uniform_like + +```rust + fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor; +``` + +RandomUniformLike generates a tensor with random values using a uniform distribution, matching the shape of the input tensor. + +This operation creates a new tensor with the same shape as the input tensor, where each element is initialized with a random value sampled from a uniform distribution. + +## Args + +* `tensor`(`@Tensor`) - The input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width. +* `high`(Option) - An optional parameter specifying the upper bound (exclusive) of the uniform distribution. If not provided, defaults to 1.0. +* `low`(Option) - An optional parameter specifying the lower bound (inclusive) of the uniform distribution. If not provided, defaults to 0.0. +* `seed`(Option) - An optional parameter specifying the seed for the random number generator. If not provided, a random seed will be used. + +## Returns + +* A `Tensor` with the same shape as the input tensor, filled with random values from a uniform distribution within the specified range. + +## Examples + +```rust +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::numbers::{FixedTrait, FP8x23}; + + +fn example() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(8); + shape.append(1); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 70016, sign: true }); + data.append(FP8x23 { mag: 57536, sign: false }); + data.append(FP8x23 { mag: 116032, sign: false }); + data.append(FP8x23 { mag: 162944, sign: true }); + data.append(FP8x23 { mag: 43360, sign: false }); + data.append(FP8x23 { mag: 128960, sign: false }); + data.append(FP8x23 { mag: 151808, sign: true }); + data.append(FP8x23 { mag: 28368, sign: false }); + data.append(FP8x23 { mag: 21024, sign: false }); + data.append(FP8x23 { mag: 24992, sign: false }); + data.append(FP8x23 { mag: 125120, sign: true }); + data.append(FP8x23 { mag: 79168, sign: true }); + data.append(FP8x23 { mag: 136960, sign: true }); + data.append(FP8x23 { mag: 10104, sign: true }); + data.append(FP8x23 { mag: 136704, sign: false }); + data.append(FP8x23 { mag: 184960, sign: true }); + let tensor = TensorTrait::new(shape.span(), data.span()); + return TensorTrait::random_uniform_like(@tensor, Option::Some(FP8x23 { mag: 83886080, sign: false }),Option::Some(FP8x23 { mag: 8388608, sign: false }), Option::Some(354145)); +} +>>> [[[[7299130, 4884492]], [[2339070, 1559536]], [[3448557, 984617]], [[5745934, 3670947]], [[4665989, 3079292]], [[3375288, 948254]], [[3749966, 4911069]], [[1358829, 4368105]]]] +``` diff --git a/nodegen/node/random_uniform_like.py b/nodegen/node/random_uniform_like.py new file mode 100644 index 000000000..6669093e2 --- /dev/null +++ b/nodegen/node/random_uniform_like.py @@ -0,0 +1,125 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait + +def random_uniform_like(x: np.ndarray, high: int=1,low: int=0,seed: int=25) ->np.ndarray: + dtype = np.float64 + if seed is None or np.isnan(seed): # type: ignore + state = np.random.RandomState() + else: + state = np.random.RandomState(seed=int(seed)) # type: ignore + res = state.rand(*x.shape).astype(dtype) + res *= high - low # type: ignore + res += low # type: ignore + return (res.astype(dtype),) + +def get_data_statement(data: np.ndarray, dtype: Dtype) -> list[str]: + match dtype: + case Dtype.FP8x23: + return ["Option::Some(FP8x23 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"})" for x in data.flatten()] + case Dtype.FP16x16: + return ["Option::Some(FP16x16 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"})" for x in data.flatten()] + case Dtype.U32: + return [f"Option::Some({int(x)})" for x in data.flatten()] + +class Random_uniform_like(RunAll): + + @staticmethod + def fp8x23(): + x = np.random.uniform(1, 10, (1, 2, 2, 4)).astype(np.float64) + y = random_uniform_like(x) + + args = [10, 1] + args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP8x23), Dtype.FP8x23) + x = Tensor(Dtype.FP8x23, x.shape, to_fp( + x.flatten(), FixedImpl.FP8x23)) + y = Tensor(Dtype.FP8x23, y[0].shape, to_fp( + y[0].flatten(), FixedImpl.FP8x23)) + + name = "random_uniform_like_fp8x23" + make_test( + [x], # List of input tensors. + y, # The expected output result. + f"TensorTrait::random_uniform_like(@input_0, {','.join(args_str)}, Option::Some(354145))", # The code signature. + name # The name of the generated folder. + ) + + @staticmethod + def fp16x16(): + x = np.random.uniform(1, 10, (1, 2, 2, 4)).astype(np.float16) + y = random_uniform_like(x) + + args = [10, 1] + args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP16x16), Dtype.FP16x16) + + + x = Tensor(Dtype.FP16x16, x.shape, to_fp( + x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y[0].shape, to_fp( + y[0].flatten(), FixedImpl.FP16x16)) + + name = "random_uniform_like_fp16x16" + make_test( + [x], # List of input tensors. + y, # The expected output result. + f"TensorTrait::random_uniform_like(@input_0, {','.join(args_str)}, Option::Some(354145))", # The code signature. + name # The name of the generated folder. + ) + + # @staticmethod + # def fp64x64(): + # x = np.random.uniform(-3, 3, (1, 2, 2, 4)).astype(np.float64) + # y = random_uniform_like(x) + + # x = Tensor(Dtype.FP64x64, x.shape, to_fp( + # x.flatten(), FixedImpl.FP64x64)) + # y = Tensor(Dtype.FP64x64, y[0].shape, to_fp( + # y[0].flatten(), FixedImpl.FP64x64)) + + # name = "random_uniform_like_fp64x64" + # make_test([x], y, "TensorTrait::random_uniform_like(@input_0, 5, 1, 10)", + # name) + + # @staticmethod + # def fpi8(): + # x = np.random.randint(-3, 3, (1, 2, 2, 4)).astype(np.int8) + # y = random_uniform_like(x) + + # x = Tensor(Dtype.I8, x.shape, x.flatten()) + # y = Tensor(Dtype.I8, y[0].shape, y[0].flatten()) + + # name = "random_uniform_like_i8" + # make_test([x], y, "TensorTrait::random_uniform_like(@input_0, 5, 1, 10)", + # name) + + # @staticmethod + # def fpi32(): + # x = np.random.randint(-3, 3, (1, 2, 2, 4)).astype(np.int32) + # y = random_uniform_like(x) + + # x = Tensor(Dtype.I32, x.shape, x.flatten()) + # y = Tensor(Dtype.I32, y[0].shape, y[0].flatten()) + + # name = "random_uniform_like_i32" + # make_test([x], y, "TensorTrait::random_uniform_like(@input_0, 5, 1, 10)", + # name) + + + # @staticmethod + # def fpu32(): + # x = np.random.randint(-3, 3, (1, 2, 2, 4)).astype(np.uint32) + # y = random_uniform_like(x) + # args = [5, 1, 10] + # args_str = get_data_statement(np.array(args).flatten(), Dtype.U32) + + + # x = Tensor(Dtype.U32, x.shape, x.flatten()) + # y = Tensor(Dtype.U32, y[0].shape, y[0].flatten()) + + # name = "random_uniform_like_u32" + # make_test( + # [x], # List of input tensors. + # y, # The expected output result. + # f"TensorTrait::random_uniform_like(@input_0, {','.join(args_str)})", # The code signature. + # name # The name of the generated folder. + # ) diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 70344eb97..9c8732614 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -118,6 +118,7 @@ impl TensorSerde, impl TDrop: Drop> of Serde { /// # tensor.new /// @@ -5162,6 +5163,69 @@ trait TensorTrait { fn split( self: @Tensor, axis: usize, num_outputs: Option, spl: Option> ) -> Array>; + /// # TensorTrait::random_uniform_like + /// + /// ```rust + /// fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor; + /// ``` + /// + /// RandomUniformLike generates a tensor with random values using a uniform distribution, matching the shape of the input tensor. + /// + /// This operation creates a new tensor with the same shape as the input tensor, where each element is initialized with a random value sampled from a uniform distribution. + /// + /// ## Args + /// + /// * `tensor`(`@Tensor`) - The input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width. + /// * `high`(Option) - An optional parameter specifying the upper bound (exclusive) of the uniform distribution. If not provided, defaults to 1.0. + /// * `low`(Option) - An optional parameter specifying the lower bound (inclusive) of the uniform distribution. If not provided, defaults to 0.0. + /// * `seed`(Option) - An optional parameter specifying the seed for the random number generator. If not provided, a random seed will be used. + /// + /// ## Returns + /// + /// * A `Tensor` with the same shape as the input tensor, filled with random values from a uniform distribution within the specified range. + /// + /// ## Examples + /// + /// ```rust + /// use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; + /// use core::array::{ArrayTrait, SpanTrait}; + /// use orion::operators::tensor::{TensorTrait, Tensor}; + /// use orion::utils::{assert_eq, assert_seq_eq}; + /// use orion::operators::tensor::FP8x23TensorPartialEq; + /// use orion::numbers::{FixedTrait, FP8x23}; + /// + /// + /// fn example() -> Tensor { + /// let mut shape = ArrayTrait::::new(); + /// shape.append(1); + /// shape.append(8); + /// shape.append(1); + /// shape.append(2); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(FP8x23 { mag: 70016, sign: true }); + /// data.append(FP8x23 { mag: 57536, sign: false }); + /// data.append(FP8x23 { mag: 116032, sign: false }); + /// data.append(FP8x23 { mag: 162944, sign: true }); + /// data.append(FP8x23 { mag: 43360, sign: false }); + /// data.append(FP8x23 { mag: 128960, sign: false }); + /// data.append(FP8x23 { mag: 151808, sign: true }); + /// data.append(FP8x23 { mag: 28368, sign: false }); + /// data.append(FP8x23 { mag: 21024, sign: false }); + /// data.append(FP8x23 { mag: 24992, sign: false }); + /// data.append(FP8x23 { mag: 125120, sign: true }); + /// data.append(FP8x23 { mag: 79168, sign: true }); + /// data.append(FP8x23 { mag: 136960, sign: true }); + /// data.append(FP8x23 { mag: 10104, sign: true }); + /// data.append(FP8x23 { mag: 136704, sign: false }); + /// data.append(FP8x23 { mag: 184960, sign: true }); + /// let tensor = TensorTrait::new(shape.span(), data.span()); + /// return TensorTrait::random_uniform_like(@tensor, Option::Some(FP8x23 { mag: 83886080, sign: false }),Option::Some(FP8x23 { mag: 8388608, sign: false }), Option::Some(354145)); + /// } + /// >>> [[[[7299130, 4884492]], [[2339070, 1559536]], [[3448557, 984617]], [[5745934, 3670947]], [[4665989, 3079292]], [[3375288, 948254]], [[3749966, 4911069]], [[1358829, 4368105]]]] + /// ``` + /// + fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor; } /// Cf: TensorTrait::new docstring diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index 3da518ec8..6fdb56f8c 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -484,6 +484,10 @@ impl BoolTensor of TensorTrait { ) -> Array> { panic(array!['not supported!']) } + + fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + panic(array!['not supported!']) + } } /// Implements partial equal for two `Tensor` using the `PartialEq` trait. diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 74acba5c6..fa66b9770 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -515,6 +515,10 @@ impl Complex64Tensor of TensorTrait { ) -> Tensor { panic(array!['not supported!']) } + + fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + panic(array!['not supported!']) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index cdc50bc4f..f8e338820 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -560,6 +560,10 @@ impl FP16x16Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + math::random_uniform_like::random_uniform_like(*tensor, high, low, seed) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index b0dc2d858..326d6c57e 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -512,6 +512,10 @@ impl FP16x16WTensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + math::random_uniform_like::random_uniform_like(*tensor, high, low, seed) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 4f862fd0e..e10a84b2e 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -561,6 +561,10 @@ impl FP32x32Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + math::random_uniform_like::random_uniform_like(*tensor, high, low, seed) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 1fe5591fc..d645f73da 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -561,6 +561,10 @@ impl FP64x64Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + math::random_uniform_like::random_uniform_like(*tensor, high, low, seed) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index 77d183c21..e07b4e172 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -559,6 +559,10 @@ impl FP8x23Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + math::random_uniform_like::random_uniform_like(*tensor, high, low, seed) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index ff6069087..18abe8408 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -498,6 +498,10 @@ impl FP8x23WTensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + math::random_uniform_like::random_uniform_like(*tensor, high, low, seed) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 50383d2df..bcd8bacd1 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -541,6 +541,10 @@ impl I32Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + panic(array!['not supported!']) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 7e81d90eb..d0d8afe2e 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -539,6 +539,10 @@ impl I8Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + panic(array!['not supported!']) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index 5a926a538..e9c002ec5 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -482,6 +482,10 @@ impl U32Tensor of TensorTrait { ) -> Array> { manipulation::split::split(self, axis, num_outputs, spl) } + + fn random_uniform_like(tensor: @Tensor, high: Option, low: Option, seed: Option) -> Tensor { + panic(array!['not supported!']) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/math.cairo b/src/operators/tensor/math.cairo index 13c2ca49a..ed6a89d33 100644 --- a/src/operators/tensor/math.cairo +++ b/src/operators/tensor/math.cairo @@ -61,3 +61,4 @@ mod erf; mod layer_normalization; mod resize; mod compress; +mod random_uniform_like; diff --git a/src/operators/tensor/math/random_uniform_like.cairo b/src/operators/tensor/math/random_uniform_like.cairo new file mode 100644 index 000000000..bf9e5f148 --- /dev/null +++ b/src/operators/tensor/math/random_uniform_like.cairo @@ -0,0 +1,126 @@ +use core::traits::Into; +use core::traits::TryInto; +use orion::operators::tensor::core::{Tensor, TensorTrait}; +use core::option::OptionTrait; + +use orion::numbers::fixed_point::core::FixedTrait; +use orion::numbers::NumberTrait; + +use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; +use orion::operators::tensor::math::{reduce_sum::accumulate_sum, arithmetic::div_downcast}; +use core::traits::PartialEq; +use alexandria_merkle_tree::merkle_tree::{pedersen::PedersenHasherImpl}; +use core::integer::{u128s_from_felt252, U128sFromFelt252Result}; +use core::traits; + +/// Cf: TensorTrait::random_uniform_like docstring +fn random_uniform_like< + T, + MAG, + impl TTensor: TensorTrait, + impl TNumber: NumberTrait, + impl TAdd: Add, + impl TSub: Sub, + impl TMul: Mul, + impl TDiv: Div, + impl TRem: Rem, + impl TTensorAdd: Add>, + impl TPartialOrd: PartialOrd, + impl TPartialEq: PartialEq, + impl TAddEq: AddEq, + impl TCopy: Copy, + impl TDrop: Drop, +>(tensor: Tensor, high: Option, low: Option, seed:Option) -> Tensor { + + let mut seed: usize = match seed { + Option::Some(seed) => seed, + Option::None(_) => NumberTrait::max_value(), + }; + let mut high = match high { + Option::Some(high) => high, + Option::None(_) => NumberTrait::one(), + }; + let mut low = match low { + Option::Some(low) => low, + Option::None(_) => NumberTrait::zero(), + }; + assert!(high > low, "high must be larger than low"); + let res = tensor_get_state(tensor,seed,high,low); + + return res; +} + + +fn tensor_get_state< + T, + MAG, + impl TTensor: TensorTrait, + impl TNumber: NumberTrait, + impl TAdd: Add, + impl TSub: Sub, + impl TMul: Mul, + impl TDiv: Div, + impl TRem: Rem, + impl TTensorAdd: Add>, + impl TPartialOrd: PartialOrd, + impl TPartialEq: PartialEq, + impl TAddEq: AddEq, + impl TCopy: Copy, + impl TDrop: Drop, +>(tensor: Tensor, mut seed: usize, high: T, low: T) -> Tensor { + let mut data = ArrayTrait::new(); + let mut count = (tensor.data).len(); + let mut i = 0; + let one: T = NumberTrait::one(); + + let half: T = NumberTrait::half(); + let two: T = one + one; + let three: T = two + one; + let max: T = NumberTrait::max_value(); + + loop { + if count == i { + break; + } + let mut v = NumberTrait::one(); + v = hash_random_range(seed, low, high); + let a: u64 = 1664525; + let c: u64 = 1013904223; + let m: u64 = 4294967295; + let s: u64 = (a * seed.try_into().unwrap() + c) % m; + seed = s.try_into().unwrap(); + data.append(v); + i += 1; + }; + return TensorTrait::new(tensor.shape, data.span()); +} + +// High level random in a range +// Only one random number per hash might be inefficient. +fn hash_random_range< + T, + MAG, + impl TNumber: NumberTrait, + impl TAdd: Add, + impl TSub: Sub, + impl TMul: Mul, + impl TDiv: Div, + impl TRem: Rem, + impl TPartialOrd: PartialOrd, + impl TPartialEq: PartialEq, + impl TAddEq: AddEq, + impl TCopy: Copy, + impl TDrop: Drop, +>(seed: usize, min: T, max: T) -> T { + let mut key = PedersenHasherImpl::new(); + let hash: felt252 = key.hash(seed.into(), 1); + let a: u128 = 4294967295; + let b: u128 = match u128s_from_felt252(hash) { + U128sFromFelt252Result::Narrow(x) => x, + U128sFromFelt252Result::Wide((x, _)) => x, + } % a; + let c: felt252 = b.into(); + let rnd: T = NumberTrait::from_felt(c); + let range = max - min + NumberTrait::one(); // + 1 to include max + min + rnd % range +} diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 6c70b42cb..90d979c0d 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -936,3 +936,5 @@ mod split_fp16x16_2d_variable_parts; mod split_fp16x16_zero_size; mod split_fp16x16_1d_uneven; mod split_fp16x16_2d_uneven; +mod random_uniform_like_fp16x16; +mod random_uniform_like_fp8x23; diff --git a/tests/nodes/random_uniform_like_fp16x16.cairo b/tests/nodes/random_uniform_like_fp16x16.cairo new file mode 100644 index 000000000..951a567b8 --- /dev/null +++ b/tests/nodes/random_uniform_like_fp16x16.cairo @@ -0,0 +1,21 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::numbers::{FixedTrait, FP16x16}; + +#[test] +#[available_gas(2000000000)] +fn test_random_uniform_like_fp16x16() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = TensorTrait::random_uniform_like(@input_0, Option::Some(FP16x16 { mag: 655360, sign: false }),Option::Some(FP16x16 { mag: 65536, sign: false }), Option::Some(354145)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/random_uniform_like_fp16x16/input_0.cairo b/tests/nodes/random_uniform_like_fp16x16/input_0.cairo new file mode 100644 index 000000000..eb300d79e --- /dev/null +++ b/tests/nodes/random_uniform_like_fp16x16/input_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(2); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 70016, sign: true }); + data.append(FP16x16 { mag: 57536, sign: false }); + data.append(FP16x16 { mag: 116032, sign: false }); + data.append(FP16x16 { mag: 162944, sign: true }); + data.append(FP16x16 { mag: 43360, sign: false }); + data.append(FP16x16 { mag: 128960, sign: false }); + data.append(FP16x16 { mag: 151808, sign: true }); + data.append(FP16x16 { mag: 28368, sign: false }); + data.append(FP16x16 { mag: 21024, sign: false }); + data.append(FP16x16 { mag: 24992, sign: false }); + data.append(FP16x16 { mag: 125120, sign: true }); + data.append(FP16x16 { mag: 79168, sign: true }); + data.append(FP16x16 { mag: 136960, sign: true }); + data.append(FP16x16 { mag: 10104, sign: true }); + data.append(FP16x16 { mag: 136704, sign: false }); + data.append(FP16x16 { mag: 184960, sign: true }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/random_uniform_like_fp16x16/output_0.cairo b/tests/nodes/random_uniform_like_fp16x16/output_0.cairo new file mode 100644 index 000000000..e809b8207 --- /dev/null +++ b/tests/nodes/random_uniform_like_fp16x16/output_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(2); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 486254, sign: false }); + data.append(FP16x16 { mag: 487133, sign: false }); + data.append(FP16x16 { mag: 112122, sign: false }); + data.append(FP16x16 { mag: 485208, sign: false }); + data.append(FP16x16 { mag: 565927, sign: false }); + data.append(FP16x16 { mag: 590441, sign: false }); + data.append(FP16x16 { mag: 73227, sign: false }); + data.append(FP16x16 { mag: 201392, sign: false }); + data.append(FP16x16 { mag: 342573, sign: false }); + data.append(FP16x16 { mag: 245684, sign: false }); + data.append(FP16x16 { mag: 368847, sign: false }); + data.append(FP16x16 { mag: 134871, sign: false }); + data.append(FP16x16 { mag: 449533, sign: false }); + data.append(FP16x16 { mag: 284826, sign: false }); + data.append(FP16x16 { mag: 234950, sign: false }); + data.append(FP16x16 { mag: 515285, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/random_uniform_like_fp8x23.cairo b/tests/nodes/random_uniform_like_fp8x23.cairo new file mode 100644 index 000000000..06c1ad47d --- /dev/null +++ b/tests/nodes/random_uniform_like_fp8x23.cairo @@ -0,0 +1,21 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::FP8x23TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +#[test] +#[available_gas(2000000000)] +fn test_random_uniform_like_fp8x23() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = TensorTrait::random_uniform_like(@input_0, Option::Some(FP8x23 { mag: 83886080, sign: false }),Option::Some(FP8x23 { mag: 8388608, sign: false }), Option::Some(354145)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/random_uniform_like_fp8x23/input_0.cairo b/tests/nodes/random_uniform_like_fp8x23/input_0.cairo new file mode 100644 index 000000000..af9a4ce03 --- /dev/null +++ b/tests/nodes/random_uniform_like_fp8x23/input_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(2); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 6985047, sign: true }); + data.append(FP8x23 { mag: 18908887, sign: false }); + data.append(FP8x23 { mag: 24337672, sign: true }); + data.append(FP8x23 { mag: 2420381, sign: false }); + data.append(FP8x23 { mag: 1071211, sign: true }); + data.append(FP8x23 { mag: 20033413, sign: true }); + data.append(FP8x23 { mag: 217485, sign: true }); + data.append(FP8x23 { mag: 4968906, sign: false }); + data.append(FP8x23 { mag: 5503174, sign: false }); + data.append(FP8x23 { mag: 4333577, sign: false }); + data.append(FP8x23 { mag: 16341821, sign: true }); + data.append(FP8x23 { mag: 18925428, sign: true }); + data.append(FP8x23 { mag: 17251664, sign: false }); + data.append(FP8x23 { mag: 23832813, sign: false }); + data.append(FP8x23 { mag: 3968519, sign: false }); + data.append(FP8x23 { mag: 22692691, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/random_uniform_like_fp8x23/output_0.cairo b/tests/nodes/random_uniform_like_fp8x23/output_0.cairo new file mode 100644 index 000000000..40d04f057 --- /dev/null +++ b/tests/nodes/random_uniform_like_fp8x23/output_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(2); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 43543406, sign: false }); + data.append(FP8x23 { mag: 82865885, sign: false }); + data.append(FP8x23 { mag: 16954874, sign: false }); + data.append(FP8x23 { mag: 19949400, sign: false }); + data.append(FP8x23 { mag: 61973159, sign: false }); + data.append(FP8x23 { mag: 46269033, sign: false }); + data.append(FP8x23 { mag: 21503499, sign: false }); + data.append(FP8x23 { mag: 19010224, sign: false }); + data.append(FP8x23 { mag: 51919405, sign: false }); + data.append(FP8x23 { mag: 53133236, sign: false }); + data.append(FP8x23 { mag: 62431439, sign: false }); + data.append(FP8x23 { mag: 22875863, sign: false }); + data.append(FP8x23 { mag: 65788925, sign: false }); + data.append(FP8x23 { mag: 21059738, sign: false }); + data.append(FP8x23 { mag: 81958342, sign: false }); + data.append(FP8x23 { mag: 76995797, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} From a29ca12b825f04c3692e7eff746846c7c28c4be4 Mon Sep 17 00:00:00 2001 From: zhangzhichao Date: Mon, 5 Feb 2024 19:41:42 +0800 Subject: [PATCH 26/46] docs: Added RandomUniformLike in README --- docs/framework/operators/tensor/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/framework/operators/tensor/README.md b/docs/framework/operators/tensor/README.md index 281135f63..bb46b6edf 100644 --- a/docs/framework/operators/tensor/README.md +++ b/docs/framework/operators/tensor/README.md @@ -120,6 +120,7 @@ use orion::operators::tensor::TensorTrait; | [`tensor.erf`](tensor.erf.md) | Computes the error function of the given input tensor element-wise. | | [`tensor.layer_normalization`](tensor.layer\_normalization.md) | computes the layer normalization of the input tensor. | | [`tensor.split`](tensor.split.md) | Split a tensor into a list of tensors, along the specified ‘axis’. | +| [`tensor.random_uniform_like`](tensor.random\_uniform\_like.md) | RandomUniformLike generates a tensor with random values using a uniform distribution, matching the shape of the input tensor. | ## Arithmetic Operations From b0dc2938f8637b2b2a3f7cb037bf603739d76cdc Mon Sep 17 00:00:00 2001 From: chachaleo Date: Tue, 6 Feb 2024 04:59:05 +0100 Subject: [PATCH 27/46] feat: col2im --- docs/SUMMARY.md | 1 + docs/framework/compatibility.md | 1 + .../operators/neural-network/README.md | 1 + .../operators/neural-network/nn.col2im.md | 97 +++++ .../operators/neural-network/nn.gemm.md | 11 +- nodegen/node/col2im.py | 320 +++++++++++++++++ src/numbers.cairo | 69 ++-- .../implementations/fp16x16/core.cairo | 3 +- .../implementations/fp16x16wide/core.cairo | 2 +- .../implementations/fp32x32/core.cairo | 3 +- .../implementations/fp64x64/core.cairo | 3 +- .../implementations/fp8x23/core.cairo | 4 +- .../implementations/fp8x23wide/core.cairo | 2 +- src/operators/nn/core.cairo | 107 ++++++ src/operators/nn/functional.cairo | 1 + src/operators/nn/functional/col2im.cairo | 332 ++++++++++++++++++ .../nn/implementations/nn_fp16x16.cairo | 11 + .../nn/implementations/nn_fp32x32.cairo | 11 + .../nn/implementations/nn_fp64x64.cairo | 11 + .../nn/implementations/nn_fp8x23.cairo | 11 + src/operators/nn/implementations/nn_i32.cairo | 11 + src/operators/nn/implementations/nn_i8.cairo | 11 + src/operators/nn/implementations/nn_u32.cairo | 11 + .../sequence/functional/sequence_at.cairo | 4 +- .../sequence/functional/sequence_erase.cairo | 3 +- .../sequence/functional/sequence_insert.cairo | 4 +- src/operators/tensor/core.cairo | 8 +- src/operators/tensor/helpers.cairo | 2 +- .../tensor/implementations/tensor_i32.cairo | 10 +- .../tensor/implementations/tensor_i8.cairo | 2 +- .../tensor/math/layer_normalization.cairo | 3 +- src/test_helper/tensor/i32.cairo | 3 +- src/test_helper/tensor/i8.cairo | 3 +- tests/nodes.cairo | 5 + tests/nodes/clip_fp16x16_2d.cairo | 6 +- tests/nodes/clip_fp16x16_3d.cairo | 6 +- tests/nodes/clip_fp8x23_2d.cairo | 6 +- tests/nodes/clip_fp8x23_3d.cairo | 6 +- tests/nodes/col2im.cairo | 22 ++ tests/nodes/col2im/input_0.cairo | 39 ++ tests/nodes/col2im/output_0.cairo | 40 +++ tests/nodes/col2im_5D.cairo | 27 ++ tests/nodes/col2im_5D/input_0.cairo | 134 +++++++ tests/nodes/col2im_5D/output_0.cairo | 136 +++++++ tests/nodes/col2im_dilations.cairo | 27 ++ tests/nodes/col2im_dilations/input_0.cairo | 34 ++ tests/nodes/col2im_dilations/output_0.cairo | 51 +++ tests/nodes/col2im_pads.cairo | 27 ++ tests/nodes/col2im_pads/input_0.cairo | 89 +++++ tests/nodes/col2im_pads/output_0.cairo | 40 +++ tests/nodes/col2im_strides.cairo | 27 ++ tests/nodes/col2im_strides/input_0.cairo | 50 +++ tests/nodes/col2im_strides/output_0.cairo | 40 +++ tests/nodes/compress_fp16x16_3d_axis1.cairo | 2 +- tests/nodes/compress_fp16x16_3d_axis2.cairo | 2 +- tests/nodes/compress_fp16x16_3d_axis3.cairo | 2 +- tests/nodes/compress_fp16x16_3d_default.cairo | 2 +- tests/nodes/compress_fp16x16_3d_noaxis.cairo | 2 +- tests/nodes/compress_fp8x23_3d_axis1.cairo | 2 +- tests/nodes/compress_fp8x23_3d_axis2.cairo | 2 +- tests/nodes/compress_fp8x23_3d_default.cairo | 2 +- tests/nodes/compress_i32_3d_axis1.cairo | 2 +- tests/nodes/compress_i32_3d_axis2.cairo | 2 +- tests/nodes/compress_i32_3d_default.cairo | 2 +- tests/nodes/compress_i8_3d_axis1.cairo | 2 +- tests/nodes/compress_i8_3d_axis2.cairo | 2 +- tests/nodes/compress_i8_3d_default.cairo | 2 +- tests/nodes/compress_u32_3d_axis1.cairo | 2 +- tests/nodes/compress_u32_3d_axis2.cairo | 2 +- tests/nodes/compress_u32_3d_axis2_2.cairo | 2 +- tests/nodes/compress_u32_3d_axis3.cairo | 2 +- tests/nodes/compress_u32_3d_default.cairo | 2 +- tests/nodes/gather_fp16x16_3d_axis1.cairo | 2 +- tests/nodes/gather_fp16x16_3d_axis2.cairo | 2 +- tests/nodes/gather_fp16x16_3d_default.cairo | 2 +- tests/nodes/gather_fp8x23_3d_axis1.cairo | 2 +- tests/nodes/gather_fp8x23_3d_axis2.cairo | 2 +- tests/nodes/gather_fp8x23_3d_default.cairo | 2 +- tests/nodes/gather_i32_3d_axis1.cairo | 2 +- tests/nodes/gather_i32_3d_axis2.cairo | 2 +- tests/nodes/gather_i32_3d_default.cairo | 2 +- tests/nodes/gather_i8_3d_axis1.cairo | 2 +- tests/nodes/gather_i8_3d_axis2.cairo | 2 +- tests/nodes/gather_i8_3d_default.cairo | 2 +- .../gather_nd_fp16x16_3d_batch_dims1.cairo | 2 +- .../gather_nd_fp16x16_3d_batch_dims2.cairo | 2 +- .../nodes/gather_nd_fp16x16_3d_default.cairo | 2 +- .../gather_nd_fp8x23_3d_batch_dims1.cairo | 2 +- .../gather_nd_fp8x23_3d_batch_dims2.cairo | 2 +- tests/nodes/gather_nd_fp8x23_3d_default.cairo | 2 +- .../nodes/gather_nd_i32_3d_batch_dims1.cairo | 2 +- .../nodes/gather_nd_i32_3d_batch_dims2.cairo | 2 +- tests/nodes/gather_nd_i32_3d_default.cairo | 2 +- tests/nodes/gather_nd_i8_3d_batch_dims1.cairo | 2 +- tests/nodes/gather_nd_i8_3d_default.cairo | 2 +- tests/nodes/gather_nd_u32_batch_dims1.cairo | 2 +- tests/nodes/gather_nd_u32_batch_dims2.cairo | 2 +- tests/nodes/gather_nd_u32_default.cairo | 2 +- tests/nodes/gather_u32_3d_axis1.cairo | 2 +- tests/nodes/gather_u32_3d_axis2.cairo | 2 +- tests/nodes/gather_u32_3d_default.cairo | 2 +- tests/nodes/gemm_all_attributes.cairo | 10 +- tests/nodes/gemm_alpha.cairo | 10 +- tests/nodes/gemm_beta.cairo | 10 +- tests/nodes/gemm_default_matrix_bias.cairo | 4 +- tests/nodes/gemm_default_no_bias.cairo | 4 +- tests/nodes/gemm_default_vector_bias.cairo | 4 +- tests/nodes/gemm_transposeA.cairo | 4 +- tests/nodes/gemm_transposeB.cairo | 4 +- tests/nodes/hard_sigmoid_fp16x16.cairo | 4 +- tests/nodes/hard_sigmoid_fp8x23.cairo | 4 +- tests/nodes/is_nan_fp16x16/input_0.cairo | 2 +- ...layer_normalization_3d_axis0_epsilon.cairo | 9 +- ...layer_normalization_3d_axis1_epsilon.cairo | 9 +- ...layer_normalization_3d_axis2_epsilon.cairo | 9 +- ...alization_3d_axis_negative_1_epsilon.cairo | 9 +- ...alization_3d_axis_negative_2_epsilon.cairo | 9 +- ...alization_3d_axis_negative_3_epsilon.cairo | 9 +- .../nodes/layer_normalization_4d_axis0.cairo | 5 +- .../nodes/layer_normalization_4d_axis1.cairo | 5 +- .../nodes/layer_normalization_4d_axis2.cairo | 5 +- .../nodes/layer_normalization_4d_axis3.cairo | 5 +- ...yer_normalization_4d_axis_negative_1.cairo | 5 +- ...yer_normalization_4d_axis_negative_2.cairo | 5 +- ...yer_normalization_4d_axis_negative_3.cairo | 5 +- ...yer_normalization_4d_axis_negative_4.cairo | 5 +- .../layer_normalization_default_axis.cairo | 5 +- tests/nodes/layer_normalization_test.cairo | 5 +- tests/nodes/scatter_fp16x16_3d_axis1.cairo | 8 +- .../nodes/scatter_fp16x16_3d_axis1_add.cairo | 8 +- tests/nodes/scatter_fp16x16_3d_default.cairo | 8 +- tests/nodes/scatter_fp8x23_axis1.cairo | 8 +- tests/nodes/scatter_fp8x23_default.cairo | 8 +- tests/nodes/scatter_fp8x23_mul.cairo | 8 +- tests/nodes/scatter_i8_axis1.cairo | 8 +- tests/nodes/scatter_i8_axis1_max.cairo | 8 +- tests/nodes/scatter_i8_default.cairo | 8 +- tests/nodes/scatter_u32_add.cairo | 8 +- tests/nodes/scatter_u32_axis1.cairo | 8 +- tests/nodes/scatter_u32_default.cairo | 8 +- tests/nodes/sequence_insert_fp16x16.cairo | 2 +- tests/nodes/sequence_insert_fp8x23.cairo | 2 +- tests/nodes/sequence_insert_i32.cairo | 2 +- tests/nodes/sequence_insert_i8.cairo | 2 +- tests/nodes/sequence_insert_u32.cairo | 2 +- tests/nodes/sequence_length_fp16x16.cairo | 4 +- tests/nodes/shrink_hard_fp16x16.cairo | 4 +- tests/nodes/shrink_hard_fp8x23.cairo | 4 +- tests/nodes/shrink_soft_fp16x16.cairo | 6 +- tests/nodes/shrink_soft_fp8x23.cairo | 6 +- tests/nodes/slice_fp16x16_2d.cairo | 8 +- tests/nodes/slice_fp16x16_3d.cairo | 8 +- tests/nodes/slice_fp8x23_2d.cairo | 8 +- tests/nodes/slice_fp8x23_3d.cairo | 8 +- tests/nodes/slice_i32_2d.cairo | 8 +- tests/nodes/slice_i32_3d.cairo | 8 +- tests/nodes/slice_i8_2d.cairo | 8 +- tests/nodes/slice_i8_3d.cairo | 8 +- tests/nodes/slice_u32_2d.cairo | 8 +- tests/nodes/slice_u32_3d.cairo | 8 +- tests/nodes/where_fp16x16.cairo | 2 +- tests/nodes/where_fp16x16_broadcast.cairo | 2 +- tests/nodes/where_fp8x23.cairo | 2 +- tests/nodes/where_fp8x23_broadcast.cairo | 2 +- tests/nodes/where_i32.cairo | 2 +- tests/nodes/where_i32_broadcast.cairo | 2 +- tests/nodes/where_i8.cairo | 2 +- tests/nodes/where_i8_broadcast.cairo | 2 +- tests/nodes/where_u32.cairo | 2 +- tests/nodes/where_u32_broadcast.cairo | 2 +- tests/operators/qlinear_add_test.cairo | 70 +--- tests/operators/qlinear_concat_test.cairo | 77 +--- tests/operators/qlinear_leakyrelu_test.cairo | 10 +- tests/operators/qlinear_matmul_test.cairo | 76 +--- tests/operators/qlinear_mul_test.cairo | 74 +--- 175 files changed, 2210 insertions(+), 475 deletions(-) create mode 100644 docs/framework/operators/neural-network/nn.col2im.md create mode 100644 nodegen/node/col2im.py create mode 100644 src/operators/nn/functional/col2im.cairo create mode 100644 tests/nodes/col2im.cairo create mode 100644 tests/nodes/col2im/input_0.cairo create mode 100644 tests/nodes/col2im/output_0.cairo create mode 100644 tests/nodes/col2im_5D.cairo create mode 100644 tests/nodes/col2im_5D/input_0.cairo create mode 100644 tests/nodes/col2im_5D/output_0.cairo create mode 100644 tests/nodes/col2im_dilations.cairo create mode 100644 tests/nodes/col2im_dilations/input_0.cairo create mode 100644 tests/nodes/col2im_dilations/output_0.cairo create mode 100644 tests/nodes/col2im_pads.cairo create mode 100644 tests/nodes/col2im_pads/input_0.cairo create mode 100644 tests/nodes/col2im_pads/output_0.cairo create mode 100644 tests/nodes/col2im_strides.cairo create mode 100644 tests/nodes/col2im_strides/input_0.cairo create mode 100644 tests/nodes/col2im_strides/output_0.cairo diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 649e411f9..6415ca622 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -160,6 +160,7 @@ * [nn.hard\_sigmoid](framework/operators/neural-network/nn.hard\_sigmoid.md) * [nn.thresholded\_relu](framework/operators/neural-network/nn.thresholded\_relu.md) * [nn.gemm](framework/operators/neural-network/nn.gemm.md) + * [nn.col2im](framework/operators/neural-network/nn.col2im.md) * [Machine Learning](framework/operators/machine-learning/README.md) * [Tree Ensemble Classifier](framework/operators/machine-learning/tree-ensemble-classifier/README.md) * [tree\_ensemble\_classifier.predict](framework/operators/machine-learning/tree-ensemble-classifier/tree\_ensemble\_classifier.predict.md) diff --git a/docs/framework/compatibility.md b/docs/framework/compatibility.md index 0e0e5be17..d7076be92 100644 --- a/docs/framework/compatibility.md +++ b/docs/framework/compatibility.md @@ -43,6 +43,7 @@ You can see below the list of current supported ONNX Operators: | [Softplus](operators/neural-network/nn.softplus.md) | :white\_check\_mark: | | [Linear](operators/neural-network/nn.linear.md) | :white\_check\_mark: | | [HardSigmoid](operators/neural-network/nn.hard\_sigmoid.md) | :white\_check\_mark: | +| [Col2im](operators/neural-network/nn.col2im\_sigmoid.md) | :white\_check\_mark: | | [Sinh](operators/tensor/tensor.sinh.md) | :white\_check\_mark: | | [Asinh](operators/tensor/tensor.asinh.md) | :white\_check\_mark: | | [Atanh](operators/tensor/tensor.atanh.md) | :white\_check\_mark: | diff --git a/docs/framework/operators/neural-network/README.md b/docs/framework/operators/neural-network/README.md index 8343d0c90..662c62c01 100644 --- a/docs/framework/operators/neural-network/README.md +++ b/docs/framework/operators/neural-network/README.md @@ -35,4 +35,5 @@ Orion supports currently these `NN` types. | [`nn.hard_sigmoid`](nn.hard\_sigmoid.md) | Applies the Hard Sigmoid function to an n-dimensional input tensor. | | [`nn.thresholded_relu`](nn.thresholded\_relu.md) | Performs the thresholded relu activation function element-wise. | | [`nn.gemm`](nn.gemm.md) | Performs General Matrix multiplication. | +| [`nn.col2im`](nn.col2im.md) | Rearranges column blocks back into a multidimensional image | diff --git a/docs/framework/operators/neural-network/nn.col2im.md b/docs/framework/operators/neural-network/nn.col2im.md new file mode 100644 index 000000000..fd5e82ffa --- /dev/null +++ b/docs/framework/operators/neural-network/nn.col2im.md @@ -0,0 +1,97 @@ + +# NNTrait::col2im + +```rust + col2im( + data: @Tensor, + image_shape: Span, + block_shape: Span, + dilations: Option>, + pads: Option>, + strides: Option>, +) -> Tensor +``` + +The operator rearranges column blocks back into a multidimensional image + +Col2Im behaves similarly to PyTorch's fold https://pytorch.org/docs/stable/generated/torch.nn.Fold.html, but it only supports batched multi-dimensional image tensors. Another implementation in Python with N-dimension support can be found at https://github.com/f-dangel/unfoldNd/. + +## Args + +* `data`(`@Tensor`) - Input data tensor to be rearranged from column blocks back into an image. This is a 3-dimensional tensor containing [N, C * n-ary-product(block_shape), L], where N is batch dimension, C is image channel dimension and L is number of blocks. +* `image_shape`(`Span`) - The shape of the spatial dimensions of the image after rearranging the column blocks.This is a 1-dimensional tensor with size of at least 2, containing the value [H_img, W_img] for a 2-D image or [dim_i1, dim_i2, ..., dim_iN] for a N-D image. +* `block_shape`(`Span`) - The shape of the block to apply on the input.This is a 1-dimensional tensor of size of at least 2, containing the value [H_block, W_block] for a 2-D image or [dim_b1, dim_b2, ..., dim_bN] for a N-D block.This is the block-shape before dilation is applied to it. +* `dilations`(`Option>`) - 1-dimensional tensor with dilation value along each spatial axis of the image. If not present, the dilation defaults to 1 along each spatial axis of the image. +* `pads`(`Option>`) - 1-dimensional tensor with padding value for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin is the number of pixels added at the beginning of axis `i` and xi_end is the number of pixels added at the end of axis `i`. If not present, the padding defaults to 0 along start and end of each spatial axis. +* `strides`(`Option>`) - 1-dimensional tensor with stride value along each spatial axis. If not present, the stride defaults to 1 along each spatial axis. + +## Returns + +A `Tensor` output tensor produced by rearranging blocks into an image. + +## Examples + +```rust +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::operators::nn::FP16x16NN; +use orion::numbers::FP16x16; +use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor}; + +fn example_col2im() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(5); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + let mut X = TensorTrait::new(shape.span(), data.span()); + + let image_shape = array![5, 5].span(); + let block_shape = array![1, 5].span(); + + return NNTrait::col2im( + @X, image_shape, block_shape, Option::None, Option::None, Option::None, + ); + + +} +>>> [ + [ + [ + [1.0, 2.0, 3.0, 4.0, 5.0], + [6.0, 7.0, 8.0, 9.0, 0.0], + [11.0, 12.0, 13.0, 14.0, 15.0], + [16.0, 17.0, 18.0, 19.0, 20.0], + [21.0, 22.0, 23.0, 24.0, 25.0], + ] + ] + ] + +```` + diff --git a/docs/framework/operators/neural-network/nn.gemm.md b/docs/framework/operators/neural-network/nn.gemm.md index 4ac734d73..b89d884fc 100644 --- a/docs/framework/operators/neural-network/nn.gemm.md +++ b/docs/framework/operators/neural-network/nn.gemm.md @@ -1,4 +1,4 @@ -# nn.gemm +# NNTrait::gemm ```rust fn gemm( @@ -12,18 +12,19 @@ ) -> Tensor; ``` -Performs General Matrix multiplication: [https://en.wikipedia.org/wiki/Basic\_Linear\_Algebra\_Subprograms#Level\_3](https://en.wikipedia.org/wiki/Basic\_Linear\_Algebra\_Subprograms#Level\_3) +Performs General Matrix multiplication: https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3 * A' = transpose(A) if transA else A * B' = transpose(B) if transB else B -Compute `Y = alpha * A' * B' + beta * C`, where input tensor A has shape (M, K) or (K, M), input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), and output tensor Y has shape (M, N). `A` will be transposed before doing the computation if attribute `transA` is `true`, same for `B` and `transB`. +Compute `Y = alpha * A' * B' + beta * C`, where input tensor A has shape (M, K) or (K, M), input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), and output tensor Y has shape (M, N). +`A` will be transposed before doing the computation if attribute `transA` is `true`, same for `B` and `transB`. ## Args * `A`(`Tensor`) - Input tensor A. The shape of `A` should be (M, K) if `transA` is `false`, or (K, M) if `transA` is `true`. * `B`(`Tensor`) - Input tensor B. The shape of `B` should be (K, N) if `transB` is `false`, or (N, K) if `transB` is `true`. -* `C`(`Option>`) - Optional input tensor C. The shape of C should be unidirectional broadcastable to (M, N). +* `C`(`Option>`) - Optional input tensor C. The shape of C should be unidirectional broadcastable to (M, N). * `alpha`(`Option`) - Optional scalar multiplier for the product of input tensors `A * B`. * `beta`(`Option`) - Optional scalar multiplier for input tensor `C`. * `transA`(`bool`) - Whether `A` should be transposed. @@ -63,4 +64,4 @@ A `Tensor` of shape (M, N). return y; } >>> tensor of shape [3;5] -``` +```` diff --git a/nodegen/node/col2im.py b/nodegen/node/col2im.py new file mode 100644 index 000000000..159d25dd0 --- /dev/null +++ b/nodegen/node/col2im.py @@ -0,0 +1,320 @@ + + +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait + + +def col2im(data, image_shape, block_shape, dilations=None, pads=None, strides=None): # type: ignore + if dilations is None: + dilations = [1 for s in image_shape] + if pads is None: + pads = [0 for s in image_shape] * 2 + if strides is None: + strides = [1 for s in image_shape] + bl = np.prod(block_shape) + C = data.shape[1] // bl + data = data.reshape(data.shape[:1] + (C,) + (bl,) + data.shape[2:]) + ks = tuple(block_shape) + res = None + for n in range(data.shape[0]): + for c in range(data.shape[1]): + out = col2im_naive_implementation( + data[n, c, ...], image_shape, ks, dilations, pads, strides + ) + if res is None: + new_shape = data.shape[:2] + out.shape + res = np.empty(new_shape, dtype=data.dtype) + res[n, c, ...] = out + return (res,) # type: ignore + +def _get_indices(i, shape): + res = np.empty((len(shape),), dtype=np.int64) + k = len(shape) - 1 + while k > 0: + m = i % shape[k] + res[k] = m + i -= m + i /= shape[k] + k -= 1 + res[0] = i + return res + +def _col2im_shape_check(X, output_shape, kernel_shape, dilations, pads, strides): # type: ignore + n_input_plane = X.shape[0] + + kernel_size = np.prod(kernel_shape) + + if n_input_plane % kernel_size != 0: + raise ValueError( + f"Expected size of input's dimension 1 to be divisible by the " + f"product of kernel_size={kernel_size}, " + f"but got input.size(1)={n_input_plane} " + f"and kernel_shape={kernel_shape}, X.shape={X.shape}, output_shape={output_shape}." + ) + + input_length = X.shape[1] + n_dims = len(output_shape) + n_blocks = [] + + + for i in range(n_dims): + n_block = ( + output_shape[i] + + pads[i, :].sum() + - dilations[i] * (kernel_shape[i] - 1) + - 1 + ) // strides[i] + 1 + n_blocks.append(n_block) + + + block_size = np.prod(n_blocks) + if input_length != block_size: + raise ValueError( + f"Given n_input_plane={n_input_plane}, X.shape={X.shape}, " + f"output_shape={output_shape}, kernel_shape={kernel_shape}, " + f"dilations={dilations}, pads={pads}, strides={strides}, " + f"expected size of input's dimension 2 to match the calculated number of " + f"sliding blocks {n_blocks} = {block_size}, " + f"but got input.size(2)={input_length}.", + ) + + +def col2im_naive_implementation(data, image_shape, kernel_shape, dilations, pads, strides): # type: ignore + + n_dims = len(pads) // 2 + new_pads = np.array([(pads[i], pads[i + n_dims]) for i in range(n_dims)]) + _col2im_shape_check(data, image_shape, kernel_shape, dilations, new_pads, strides) + + data_col = data + data_im = np.zeros(image_shape, dtype=data.dtype) + + dim_col = [] + for i in range(n_dims): + col = ( + image_shape[i] + + new_pads[i, :].sum() + - (dilations[i] * (kernel_shape[i] - 1) + 1) + ) // strides[i] + 1 + dim_col.append(col) + kernel_size = np.prod(kernel_shape) + col_size = np.prod(dim_col) + for c_col in range(kernel_size): + offset = _get_indices(c_col, kernel_shape) + + for col in range(col_size): + + ind_col = _get_indices(col, dim_col) + ind_im = [] + for i in range(n_dims): + ind = ( + ind_col[i] * strides[i] - new_pads[i, 0] + offset[i] * dilations[i] + ) + ind_im.append(ind) + if not _is_out(ind_im, data_im.shape): + data_im[tuple(ind_im)] += data_col[c_col, col] + + + return data_im + + +def _is_out(ind, shape): + for i, s in zip(ind, shape): + if i < 0: + return True + if i >= s: + return True + return False + + + +class Col2im(RunAll): + + @staticmethod + def export_col2im() -> None: + x = np.array( + [ + [ + [1.0, 6.0, 11.0, 16.0, 21.0], # (1, 5, 5) + [2.0, 7.0, 12.0, 17.0, 22.0], + [3.0, 8.0, 13.0, 18.0, 23.0], + [4.0, 9.0, 14.0, 19.0, 24.0], + [5.0, 0.0, 15.0, 20.0, 25.0], + ] + ] + ).astype(np.float32) + + image_shape = np.array([5, 5]).astype(np.int64) + block_shape = np.array([1, 5]).astype(np.int64) + + y = col2im(x,image_shape,block_shape) + y = np.array(y[0]) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "col2im" + func_sig = "NNTrait::col2im(" + func_sig += "@input_0," + func_sig += "array![5, 5].span()," + func_sig += "array![1, 5].span()," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None)" + make_test( + [x], y, func_sig, name, Trait.NN) + + + @staticmethod + def export_col2im_strides() -> None: + x = np.array( + [ + [ + [0.0, 0.0, 0.0, 0.0], # (1, 9, 4) + [1.0, 1.0, 1.0, 1.0], + [1.0, 1.0, 1.0, 1.0], + [1.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [1.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0], + ] + ] + ).astype(np.float32) + image_shape = np.array([5, 5]).astype(np.int64) + block_shape = np.array([3, 3]).astype(np.int64) + + y = col2im(x,image_shape,block_shape,strides=[2, 2]) + y = np.array(y[0]) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "col2im_strides" + func_sig = "NNTrait::col2im(" + func_sig += "@input_0," + func_sig += "array![5, 5].span()," + func_sig += "array![3, 3].span()," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![2, 2].span()))" + make_test( + [x], y, func_sig, name, Trait.NN) + + @staticmethod + def export_col2im_pads() -> None: + x = np.array( + [ + [ + [ + 1.0, 6.0, 11.0, 16.0, 21.0, 26, 31, 36, 41, 46, 51, 56, 61, 66, 71, + ], # (1, 5, 15) + [ + 2.0, 7.0, 12.0, 17.0, 22.0, 27, 32, 37, 42, 47, 52, 57, 62, 67, 72, + ], + [ + 3.0, 8.0, 13.0, 18.0, 23.0, 28, 33, 38, 43, 48, 53, 58, 63, 68, 73, + ], + [ + 4.0, 9.0, 14.0, 19.0, 24.0, 29, 34, 39, 44, 49, 54, 59, 64, 69, 74, + ], + [ + 5.0, 10.0, 15.0, 20.0, 25.0, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, + ], + ] + ] + ).astype(np.float32) + image_shape = np.array([5, 5]).astype(np.int64) + block_shape = np.array([1, 5]).astype(np.int64) + + y = col2im(x,image_shape,block_shape,pads=[0, 1, 0, 1]) + y = np.array(y[0]) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "col2im_pads" + func_sig = "NNTrait::col2im(" + func_sig += "@input_0," + func_sig += "array![5, 5].span()," + func_sig += "array![1, 5].span()," + func_sig += "Option::None," + func_sig += "Option::Some(array![0, 1, 0, 1].span())," + func_sig += "Option::None)" + make_test( + [x], y, func_sig, name, Trait.NN) + + @staticmethod + def export_col2im_dilations() -> None: + x = np.array( + [ + [ + [1.0, 5.0, 9.0, 13.0, 17], # (1, 4, 5) + [2.0, 6.0, 10.0, 14.0, 18], + [3.0, 7.0, 11.0, 15.0, 19], + [4.0, 8.0, 12.0, 16.0, 20], + ] + ] + ).astype(np.float32) + image_shape = np.array([6, 6]).astype(np.int64) + block_shape = np.array([2, 2]).astype(np.int64) + + + y = col2im(x,image_shape,block_shape, dilations=[1, 5]) + y = np.array(y[0]) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "col2im_dilations" + func_sig = "NNTrait::col2im(" + func_sig += "@input_0," + func_sig += "array![6, 6].span()," + func_sig += "array![2, 2].span()," + func_sig += "Option::Some(array![1, 5].span())," + func_sig += "Option::None," + func_sig += "Option::None)" + make_test( + [x], y, func_sig, name, Trait.NN) + + @staticmethod + def export_col2im_5D() -> None: + x = np.array( + [ + [ + [1, 6, 11, 16, 21, 26, 31, 36, 41, 46, 51, 56], # (1, 10, 12) + [2, 7, 12, 17, 22, 27, 32, 37, 42, 47, 52, 57], + [3, 8, 13, 18, 23, 28, 33, 38, 43, 48, 53, 58], + [4, 9, 14, 19, 24, 29, 34, 39, 44, 49, 54, 59], + [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60], + [61, 66, 71, 76, 81, 86, 91, 96, 101, 106, 111, 116], + [62, 67, 72, 77, 82, 87, 92, 97, 102, 107, 112, 117], + [63, 68, 73, 78, 83, 88, 93, 98, 103, 108, 113, 118], + [64, 69, 74, 79, 84, 89, 94, 99, 104, 109, 114, 119], + [65, 70, 75, 80, 85, 90, 95, 100, 105, 110, 115, 120], + ] + ] + ).astype(np.float32) + image_shape = np.array([3, 4, 5]).astype(np.int64) + block_shape = np.array([1, 1, 5]).astype(np.int64) + + y = col2im(x,image_shape,block_shape) + y = np.array(y[0]) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "col2im_5D" + func_sig = "NNTrait::col2im(" + func_sig += "@input_0," + func_sig += "array![3, 4, 5].span()," + func_sig += "array![1, 1, 5].span()," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None)" + make_test( + [x], y, func_sig, name, Trait.NN) + + + \ No newline at end of file diff --git a/src/numbers.cairo b/src/numbers.cairo index 936c128e1..1ce8a803d 100644 --- a/src/numbers.cairo +++ b/src/numbers.cairo @@ -2,10 +2,10 @@ mod fixed_point; mod complex_number; use orion::numbers::fixed_point::core::FixedTrait; -use orion::numbers::fixed_point::implementations::fp8x23::core::{ONE as ONE_fp8x23 }; -use orion::numbers::fixed_point::implementations::fp16x16::core::{ONE as ONE_fp16x16 }; -use orion::numbers::fixed_point::implementations::fp64x64::core::{ONE as ONE_fp64x64 }; -use orion::numbers::fixed_point::implementations::fp32x32::core::{ONE as ONE_fp32x32 }; +use orion::numbers::fixed_point::implementations::fp8x23::core::{ONE as ONE_fp8x23}; +use orion::numbers::fixed_point::implementations::fp16x16::core::{ONE as ONE_fp16x16}; +use orion::numbers::fixed_point::implementations::fp64x64::core::{ONE as ONE_fp64x64}; +use orion::numbers::fixed_point::implementations::fp32x32::core::{ONE as ONE_fp32x32}; // Common methods from Fixed Point and Signed Integers. trait NumberTrait { @@ -1535,7 +1535,7 @@ impl I8Number of NumberTrait { 0 } fn is_zero(self: i8) -> bool { - self == 0 + self == 0 } fn half() -> i8 { @@ -1571,7 +1571,7 @@ impl I8Number of NumberTrait { } fn max_value() -> i8 { - 127 + 127 } fn min(self: i8, other: i8) -> i8 { @@ -1661,7 +1661,7 @@ impl I8Number of NumberTrait { } fn is_neg_inf(self: i8) -> bool { - self == -127 + self == -127 } fn bitwise_and(lhs: i8, rhs: i8) -> i8 { @@ -1702,7 +1702,7 @@ impl I8Div of Div { let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); - let mut result = lhs_u128 / rhs_u128; + let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i8 = felt_result.try_into().unwrap(); if lhs * rhs < 0 { @@ -1729,7 +1729,7 @@ impl I8IntoFP8x23 of Into { } let number_felt: felt252 = self_positive.into(); let number_u32: u32 = number_felt.try_into().unwrap(); - FP8x23 {mag: number_u32 * ONE_fp8x23, sign: number_sign} + FP8x23 { mag: number_u32 * ONE_fp8x23, sign: number_sign } } } @@ -1742,7 +1742,7 @@ impl I8IntoFP16x16 of Into { } let number_felt: felt252 = self_positive.into(); let number_u32: u32 = number_felt.try_into().unwrap(); - FP16x16 {mag: number_u32 * ONE_fp16x16, sign: number_sign} + FP16x16 { mag: number_u32 * ONE_fp16x16, sign: number_sign } } } @@ -1755,7 +1755,7 @@ impl I8IntoFP64x64 of Into { } let number_felt: felt252 = self_positive.into(); let number_u128: u128 = number_felt.try_into().unwrap(); - FP64x64 {mag: number_u128 * ONE_fp64x64, sign: number_sign} + FP64x64 { mag: number_u128 * ONE_fp64x64, sign: number_sign } } } @@ -1768,7 +1768,7 @@ impl I8IntoFP32x32 of Into { } let number_felt: felt252 = self_positive.into(); let number_u128: u64 = number_felt.try_into().unwrap(); - FP32x32 {mag: number_u128 * ONE_fp32x32, sign: number_sign} + FP32x32 { mag: number_u128 * ONE_fp32x32, sign: number_sign } } } @@ -1877,7 +1877,7 @@ impl I16Number of NumberTrait { 0 } fn is_zero(self: i16) -> bool { - self == 0 + self == 0 } fn half() -> i16 { @@ -2003,7 +2003,7 @@ impl I16Number of NumberTrait { } fn is_neg_inf(self: i16) -> bool { - self == -32767 + self == -32767 } fn bitwise_and(lhs: i16, rhs: i16) -> i16 { @@ -2044,7 +2044,7 @@ impl I16Div of Div { let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); - let mut result = lhs_u128 / rhs_u128; + let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i16 = felt_result.try_into().unwrap(); if lhs * rhs < 0 { @@ -2167,7 +2167,7 @@ impl I32Number of NumberTrait { 0 } fn is_zero(self: i32) -> bool { - self == 0 + self == 0 } fn half() -> i32 { @@ -2203,7 +2203,7 @@ impl I32Number of NumberTrait { } fn max_value() -> i32 { - 2147483647 + 2147483647 } fn min(self: i32, other: i32) -> i32 { @@ -2281,7 +2281,7 @@ impl I32Number of NumberTrait { } fn INF() -> i32 { - 2147483647 + 2147483647 } fn is_inf(self: i32) -> bool { @@ -2289,11 +2289,11 @@ impl I32Number of NumberTrait { } fn is_pos_inf(self: i32) -> bool { - self == 2147483647 + self == 2147483647 } fn is_neg_inf(self: i32) -> bool { - self == -2147483647 + self == -2147483647 } fn bitwise_and(lhs: i32, rhs: i32) -> i32 { @@ -2334,7 +2334,7 @@ impl I32Div of Div { let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); - let mut result = lhs_u128 / rhs_u128; + let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i32 = felt_result.try_into().unwrap(); if lhs * rhs < 0 { @@ -2470,7 +2470,7 @@ impl I64Number of NumberTrait { 0 } fn is_zero(self: i64) -> bool { - self == 0 + self == 0 } fn half() -> i64 { @@ -2506,7 +2506,7 @@ impl I64Number of NumberTrait { } fn max_value() -> i64 { - 9223372036854775807 + 9223372036854775807 } fn min(self: i64, other: i64) -> i64 { @@ -2584,7 +2584,7 @@ impl I64Number of NumberTrait { } fn INF() -> i64 { - 9223372036854775807 + 9223372036854775807 } fn is_inf(self: i64) -> bool { @@ -2592,11 +2592,11 @@ impl I64Number of NumberTrait { } fn is_pos_inf(self: i64) -> bool { - self == 9223372036854775807 + self == 9223372036854775807 } fn is_neg_inf(self: i64) -> bool { - self == -9223372036854775807 + self == -9223372036854775807 } fn bitwise_and(lhs: i64, rhs: i64) -> i64 { @@ -2637,7 +2637,7 @@ impl I64Div of Div { let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); - let mut result = lhs_u128 / rhs_u128; + let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i64 = felt_result.try_into().unwrap(); if lhs * rhs < 0 { @@ -2760,7 +2760,7 @@ impl I128Number of NumberTrait { 0 } fn is_zero(self: i128) -> bool { - self == 0 + self == 0 } fn half() -> i128 { @@ -2796,7 +2796,7 @@ impl I128Number of NumberTrait { } fn max_value() -> i128 { - 170141183460469231731687303715884105727 + 170141183460469231731687303715884105727 } fn min(self: i128, other: i128) -> i128 { @@ -2874,19 +2874,20 @@ impl I128Number of NumberTrait { } fn INF() -> i128 { - 170141183460469231731687303715884105727 + 170141183460469231731687303715884105727 } fn is_inf(self: i128) -> bool { - (self == 170141183460469231731687303715884105727 || self == -170141183460469231731687303715884105727) + (self == 170141183460469231731687303715884105727 + || self == -170141183460469231731687303715884105727) } fn is_pos_inf(self: i128) -> bool { - self == 170141183460469231731687303715884105727 + self == 170141183460469231731687303715884105727 } fn is_neg_inf(self: i128) -> bool { - self == -170141183460469231731687303715884105727 + self == -170141183460469231731687303715884105727 } fn bitwise_and(lhs: i128, rhs: i128) -> i128 { @@ -2927,7 +2928,7 @@ impl I128Div of Div { let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); - let mut result = lhs_u128 / rhs_u128; + let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i128 = felt_result.try_into().unwrap(); // assigning the sign and returning diff --git a/src/numbers/fixed_point/implementations/fp16x16/core.cairo b/src/numbers/fixed_point/implementations/fp16x16/core.cairo index cff7996af..a260d886f 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/core.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/core.cairo @@ -436,9 +436,8 @@ fn _i8_try_from_fp(x: FP16x16) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, - Option::None(_) => Option::None(()) } } diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo index b3fe4d39b..176c1a115 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo @@ -451,7 +451,7 @@ fn _i8_try_from_fp(x: FP16x16W) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, Option::None(_) => Option::None(()) } diff --git a/src/numbers/fixed_point/implementations/fp32x32/core.cairo b/src/numbers/fixed_point/implementations/fp32x32/core.cairo index 9fa722e8e..34b06bc44 100644 --- a/src/numbers/fixed_point/implementations/fp32x32/core.cairo +++ b/src/numbers/fixed_point/implementations/fp32x32/core.cairo @@ -402,9 +402,8 @@ fn _i8_try_from_fp(x: FP32x32) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, - Option::None(_) => Option::None(()) } } diff --git a/src/numbers/fixed_point/implementations/fp64x64/core.cairo b/src/numbers/fixed_point/implementations/fp64x64/core.cairo index c98cb7c57..d35cb9cfa 100644 --- a/src/numbers/fixed_point/implementations/fp64x64/core.cairo +++ b/src/numbers/fixed_point/implementations/fp64x64/core.cairo @@ -402,9 +402,8 @@ fn _i8_try_from_fp(x: FP64x64) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, - Option::None(_) => Option::None(()) } } diff --git a/src/numbers/fixed_point/implementations/fp8x23/core.cairo b/src/numbers/fixed_point/implementations/fp8x23/core.cairo index b1ab1b6ac..6db9a5a43 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/core.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/core.cairo @@ -425,7 +425,7 @@ fn _i32_into_fp(x: FP8x23) -> i32 { fn _i8_try_from_fp(x: FP8x23) -> Option { let unscaled_mag: Option = (x.mag / ONE).try_into(); -// Option::Some(i8 { mag: unscaled_mag.unwrap(), sign: x.sign }) + // Option::Some(i8 { mag: unscaled_mag.unwrap(), sign: x.sign }) match unscaled_mag { Option::Some(val) => { let number_felt: felt252 = unscaled_mag.unwrap().into(); @@ -433,7 +433,7 @@ fn _i8_try_from_fp(x: FP8x23) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, Option::None(_) => Option::None(()) } diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo index c4b49c798..9d9b985de 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo @@ -439,7 +439,7 @@ fn _i8_try_from_fp(x: FP8x23W) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, Option::None(_) => Option::None(()) } diff --git a/src/operators/nn/core.cairo b/src/operators/nn/core.cairo index 3c99f4733..82eb9649a 100644 --- a/src/operators/nn/core.cairo +++ b/src/operators/nn/core.cairo @@ -14,6 +14,7 @@ use orion::operators::tensor::core::Tensor; /// hard_sigmoid - Applies the Hard Sigmoid function to an n-dimensional input tensor. /// thresholded_relu - Performs the thresholded relu activation function element-wise. /// gemm - Performs General Matrix multiplication. +/// col2im - Rearranges column blocks back into a multidimensional image trait NNTrait { /// # NNTrait::relu /// @@ -694,4 +695,110 @@ trait NNTrait { transA: bool, transB: bool ) -> Tensor; + /// + /// # NNTrait::col2im + /// + /// ```rust + /// col2im( + /// data: @Tensor, + /// image_shape: Span, + /// block_shape: Span, + /// dilations: Option>, + /// pads: Option>, + /// strides: Option>, + /// ) -> Tensor + /// ``` + /// + /// The operator rearranges column blocks back into a multidimensional image + /// + /// Col2Im behaves similarly to PyTorch's fold https://pytorch.org/docs/stable/generated/torch.nn.Fold.html, but it only supports batched multi-dimensional image tensors. Another implementation in Python with N-dimension support can be found at https://github.com/f-dangel/unfoldNd/. + /// + /// ## Args + /// + /// * `data`(`@Tensor`) - Input data tensor to be rearranged from column blocks back into an image. This is a 3-dimensional tensor containing [N, C * n-ary-product(block_shape), L], where N is batch dimension, C is image channel dimension and L is number of blocks. + /// * `image_shape`(`Span`) - The shape of the spatial dimensions of the image after rearranging the column blocks.This is a 1-dimensional tensor with size of at least 2, containing the value [H_img, W_img] for a 2-D image or [dim_i1, dim_i2, ..., dim_iN] for a N-D image. + /// * `block_shape`(`Span`) - The shape of the block to apply on the input.This is a 1-dimensional tensor of size of at least 2, containing the value [H_block, W_block] for a 2-D image or [dim_b1, dim_b2, ..., dim_bN] for a N-D block.This is the block-shape before dilation is applied to it. + /// * `dilations`(`Option>`) - 1-dimensional tensor with dilation value along each spatial axis of the image. If not present, the dilation defaults to 1 along each spatial axis of the image. + /// * `pads`(`Option>`) - 1-dimensional tensor with padding value for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin is the number of pixels added at the beginning of axis `i` and xi_end is the number of pixels added at the end of axis `i`. If not present, the padding defaults to 0 along start and end of each spatial axis. + /// * `strides`(`Option>`) - 1-dimensional tensor with stride value along each spatial axis. If not present, the stride defaults to 1 along each spatial axis. + /// + /// ## Returns + /// + /// A `Tensor` output tensor produced by rearranging blocks into an image. + /// + /// ## Examples + /// + /// ```rust + /// use orion::operators::nn::NNTrait; + /// use orion::numbers::FixedTrait; + /// use orion::operators::nn::FP16x16NN; + /// use orion::numbers::FP16x16; + /// use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor}; + /// + /// fn example_col2im() -> Tensor { + /// let mut shape = ArrayTrait::::new(); + /// shape.append(1); + /// shape.append(5); + /// shape.append(5); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 393216, sign: false }); + /// data.append(FP16x16 { mag: 720896, sign: false }); + /// data.append(FP16x16 { mag: 1048576, sign: false }); + /// data.append(FP16x16 { mag: 1376256, sign: false }); + /// data.append(FP16x16 { mag: 131072, sign: false }); + /// data.append(FP16x16 { mag: 458752, sign: false }); + /// data.append(FP16x16 { mag: 786432, sign: false }); + /// data.append(FP16x16 { mag: 1114112, sign: false }); + /// data.append(FP16x16 { mag: 1441792, sign: false }); + /// data.append(FP16x16 { mag: 196608, sign: false }); + /// data.append(FP16x16 { mag: 524288, sign: false }); + /// data.append(FP16x16 { mag: 851968, sign: false }); + /// data.append(FP16x16 { mag: 1179648, sign: false }); + /// data.append(FP16x16 { mag: 1507328, sign: false }); + /// data.append(FP16x16 { mag: 262144, sign: false }); + /// data.append(FP16x16 { mag: 589824, sign: false }); + /// data.append(FP16x16 { mag: 917504, sign: false }); + /// data.append(FP16x16 { mag: 1245184, sign: false }); + /// data.append(FP16x16 { mag: 1572864, sign: false }); + /// data.append(FP16x16 { mag: 327680, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 983040, sign: false }); + /// data.append(FP16x16 { mag: 1310720, sign: false }); + /// data.append(FP16x16 { mag: 1638400, sign: false }); + /// let mut X = TensorTrait::new(shape.span(), data.span()); + /// + /// let image_shape = array![5, 5].span(); + /// let block_shape = array![1, 5].span(); + /// + /// return NNTrait::col2im( + /// @X, image_shape, block_shape, Option::None, Option::None, Option::None, + /// ); + /// + /// + /// } + /// >>> [ + /// [ + /// [ + /// [1.0, 2.0, 3.0, 4.0, 5.0], + /// [6.0, 7.0, 8.0, 9.0, 0.0], + /// [11.0, 12.0, 13.0, 14.0, 15.0], + /// [16.0, 17.0, 18.0, 19.0, 20.0], + /// [21.0, 22.0, 23.0, 24.0, 25.0], + /// ] + /// ] + /// ] + /// + /// ```` + /// + /// + fn col2im( + data: @Tensor, + image_shape: Span, + block_shape: Span, + dilations: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor; } diff --git a/src/operators/nn/functional.cairo b/src/operators/nn/functional.cairo index a0fd96cc8..29c8132ef 100644 --- a/src/operators/nn/functional.cairo +++ b/src/operators/nn/functional.cairo @@ -10,3 +10,4 @@ mod logsoftmax; mod thresholded_relu; mod hard_sigmoid; mod gemm; +mod col2im; diff --git a/src/operators/nn/functional/col2im.cairo b/src/operators/nn/functional/col2im.cairo new file mode 100644 index 000000000..9dc81a117 --- /dev/null +++ b/src/operators/nn/functional/col2im.cairo @@ -0,0 +1,332 @@ +use orion::numbers::NumberTrait; +use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,}; +use orion::operators::vec::{NullableVec, NullableVecImpl}; +use orion::operators::tensor::core::{stride}; + + +fn col2im, +NumberTrait, +Copy, +Drop, +Add, +Mul,>( + data: @Tensor, + image_shape: Span, + block_shape: Span, + dilations: Option>, + pads: Option>, + strides: Option>, +) -> Tensor { + let dilations = match dilations { + Option::Some(dilations) => dilations, + Option::None => { + let mut dilations = ArrayTrait::new(); + let mut i = 0; + loop { + if i == image_shape.len() { + break; + } + dilations.append(1); + i += 1; + }; + dilations.span() + }, + }; + + let pads = match pads { + Option::Some(pads) => pads, + Option::None => { + let mut pads = ArrayTrait::new(); + let mut i = 0; + loop { + if i == image_shape.len() { + break; + } + pads.append(0); + pads.append(0); + i += 1; + }; + pads.span() + }, + }; + let strides = match strides { + Option::Some(strides) => strides, + Option::None => { + let mut strides = ArrayTrait::new(); + let mut i = 0; + loop { + if i == image_shape.len() { + break; + } + strides.append(1); + i += 1; + }; + strides.span() + }, + }; + + let bl = prod(block_shape, 0); + let C = *(*data).shape.at(1) / bl; + + let mut new_shape = array![*(*data).shape.at(0), C, bl]; + let mut i = 2; + loop { + if i == (*data).shape.len() { + break; + } + new_shape.append(*(*data).shape.at(i)); + i += 1; + }; + let data = data.reshape(new_shape.span()); + + let mut res = ArrayTrait::new(); + let data_stride = stride(data.shape); + + let mut n = 0; + loop { + if n == *data.shape.at(0) { + break; + } + let mut c = 0; + loop { + if c == *data.shape.at(1) { + break; + } + let data_n_c = TensorTrait::new( + SpanTrait::slice(data.shape, 2, data.shape.len() - 2), + SpanTrait::slice( + data.data, n * *data_stride.at(0) + c * *data_stride.at(1), *data_stride.at(1) + ) + ); + let mut out = col2im_naive_implementation( + @data_n_c, image_shape, block_shape, dilations, pads, strides + ); + let mut i = 0; + loop { + if i == out.len() { + break; + } + res.append(out.at(i)); + i += 1; + }; + c += 1; + }; + n += 1; + }; + + let mut new_shape = array![*data.shape.at(0), *data.shape.at(1)]; + let mut i = 0; + loop { + if i == image_shape.len() { + break; + } + new_shape.append(*image_shape.at(i)); + i += 1; + }; + + return TensorTrait::new(new_shape.span(), res.span()); +} + +fn get_image, +Copy>(self: @Tensor, row: usize) -> Span { + assert((*self).shape.len() == 2, 'Expected a 2D tensor'); + + let row_length = *self.shape[1]; + let start = row * row_length; + + (*self).data.slice(start, row_length) +} + +fn col2im_naive_implementation< + T, MAG, +TensorTrait, +NumberTrait, +Copy, +Drop, +Add, +>( + data: @Tensor, + image_shape: Span, + kernel_shape: Span, + dilations: Span, + pads: Span, + strides: Span, +) -> NullableVec { + let n_dims = pads.len() / 2; + + col2im_shape_check(data, image_shape, kernel_shape, dilations, pads, strides); + + let data_col = data; + let mut dim_col = ArrayTrait::new(); + let mut i = 0; + loop { + if i == n_dims { + break; + } + dim_col + .append( + (*image_shape.at(i) + + (*pads.at(i) + *pads.at(i + n_dims)) + - (*dilations.at(i) * (*kernel_shape.at(i) - 1) + 1)) + / *strides.at(i) + + 1 + ); + + i += 1; + }; + let dim_col = dim_col.span(); + + let stride_img = stride(image_shape); + + let mut data_im = NullableVecImpl::new(); + data_im.set(*image_shape.at(0) * *stride_img.at(0) - 1, NumberTrait::zero()); + + let kernel_size = prod(kernel_shape, 0); + let col_size = prod(dim_col, 0); + let mut c_col = 0; + loop { + if c_col == kernel_size { + break; + } + let offset = get_indices(c_col, kernel_shape).span(); + + let mut col = 0; + loop { + if col == col_size { + break; + } + let ind_col = get_indices(col, dim_col).span(); + let mut ind_im = ArrayTrait::new(); + let mut i = 0; + loop { + if i == n_dims { + break; + } + if (*ind_col.at(i) * *strides.at(i) + *offset.at(i) * *dilations.at(i)) < *pads + .at(i) { + let neg_index = *pads.at(i) + - (*ind_col.at(i) * *strides.at(i) + *offset.at(i) * *dilations.at(i)); + ind_im.append(*image_shape.at(i) + neg_index); + } else { + ind_im + .append( + *ind_col.at(i) * *strides.at(i) + + *offset.at(i) * *dilations.at(i) + - *pads.at(i) + ); + } + + i += 1; + }; + let ind_im = ind_im.span(); + if !is_out(ind_im, image_shape) { + let mut index = 0; + let mut i = 0; + loop { + if i == image_shape.len() { + break; + } + index += *stride_img.at(i) * *ind_im.at(i); + i += 1; + }; + data_im.set(index, data_im.at(index) + *(*data).data.at(c_col * col_size + col)); + } + col += 1; + }; + c_col += 1; + }; + + return data_im; +} + +fn col2im_shape_check, +Copy, +Drop,>( + X: @Tensor, + output_shape: Span, + kernel_shape: Span, + dilations: Span, + pads: Span, + strides: Span, +) { + let n_input_plane = *(*X).shape.at(0); + + let kernel_size = prod(kernel_shape, 0); + + assert(n_input_plane % kernel_size == 0, 'wrong input dimension'); + + let input_length = *(*X).shape.at(1); + let n_dims = output_shape.len(); + let mut n_blocks = ArrayTrait::new(); + + let mut i = 0; + loop { + if i == n_dims { + break; + } + n_blocks + .append( + (*output_shape.at(i) + + (*pads.at(i) + *pads.at(i + n_dims)) + - *dilations.at(i) * (*kernel_shape.at(i) - 1) + - 1) + / *strides.at(i) + + 1 + ); + i += 1; + }; + + let block_size = prod(n_blocks.span(), 0); + + assert(input_length == block_size, 'input_length != block_size'); +} + + +fn get_indices(index: usize, shape: Span,) -> Array { + let mut i = index; + let mut res = ArrayTrait::new(); + let mut k = shape.len() - 1; + loop { + if k == 0 { + break; + } + let m = i % *shape.at(k); + res.append(m); + i -= m; + i /= *shape.at(k); + k -= 1; + }; + + let mut new_res = ArrayTrait::new(); + new_res.append(i); + let mut i = shape.len() - 1; + loop { + if i == 0 { + break; + } + new_res.append(*res.at(i - 1)); + i -= 1; + }; + return new_res; +} + +fn is_out(ind: Span, shape: Span,) -> bool { + let mut n = 0; + let is_out = loop { + if n == ind.len() { + break false; + } + let s = *shape.at(n); + let i = *ind.at(n); + if i < 0 { + break true; + } + if i >= s { + break true; + } + n += 1; + }; + return is_out; +} + +fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul,>( + pA: Span, start: usize +) -> T { + let mut i = start; + let mut prod = NumberTrait::one(); + loop { + if i == pA.len() { + break; + } + prod = prod * (*pA.at(i)); + i += 1; + }; + return prod; +} diff --git a/src/operators/nn/implementations/nn_fp16x16.cairo b/src/operators/nn/implementations/nn_fp16x16.cairo index 785d3c9fa..f802bb208 100644 --- a/src/operators/nn/implementations/nn_fp16x16.cairo +++ b/src/operators/nn/implementations/nn_fp16x16.cairo @@ -72,4 +72,15 @@ impl FP16x16NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn col2im( + data: @Tensor, + image_shape: Span, + block_shape: Span, + dilations: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,) + } } diff --git a/src/operators/nn/implementations/nn_fp32x32.cairo b/src/operators/nn/implementations/nn_fp32x32.cairo index 0427ea5f7..40abd64be 100644 --- a/src/operators/nn/implementations/nn_fp32x32.cairo +++ b/src/operators/nn/implementations/nn_fp32x32.cairo @@ -66,4 +66,15 @@ impl FP32x32NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn col2im( + data: @Tensor, + image_shape: Span, + block_shape: Span, + dilations: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,) + } } diff --git a/src/operators/nn/implementations/nn_fp64x64.cairo b/src/operators/nn/implementations/nn_fp64x64.cairo index fec810679..e8b087859 100644 --- a/src/operators/nn/implementations/nn_fp64x64.cairo +++ b/src/operators/nn/implementations/nn_fp64x64.cairo @@ -66,4 +66,15 @@ impl FP64x64NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn col2im( + data: @Tensor, + image_shape: Span, + block_shape: Span, + dilations: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,) + } } diff --git a/src/operators/nn/implementations/nn_fp8x23.cairo b/src/operators/nn/implementations/nn_fp8x23.cairo index 9f5416121..947a9a100 100644 --- a/src/operators/nn/implementations/nn_fp8x23.cairo +++ b/src/operators/nn/implementations/nn_fp8x23.cairo @@ -70,4 +70,15 @@ impl FP8x23NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn col2im( + data: @Tensor, + image_shape: Span, + block_shape: Span, + dilations: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,) + } } diff --git a/src/operators/nn/implementations/nn_i32.cairo b/src/operators/nn/implementations/nn_i32.cairo index 1db66a1c6..239a5773a 100644 --- a/src/operators/nn/implementations/nn_i32.cairo +++ b/src/operators/nn/implementations/nn_i32.cairo @@ -61,4 +61,15 @@ impl I32NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn col2im( + data: @Tensor, + image_shape: Span, + block_shape: Span, + dilations: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,) + } } diff --git a/src/operators/nn/implementations/nn_i8.cairo b/src/operators/nn/implementations/nn_i8.cairo index e67bb7504..0aefa3d31 100644 --- a/src/operators/nn/implementations/nn_i8.cairo +++ b/src/operators/nn/implementations/nn_i8.cairo @@ -61,4 +61,15 @@ impl I8NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn col2im( + data: @Tensor, + image_shape: Span, + block_shape: Span, + dilations: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,) + } } diff --git a/src/operators/nn/implementations/nn_u32.cairo b/src/operators/nn/implementations/nn_u32.cairo index 370880e8d..3908cfc30 100644 --- a/src/operators/nn/implementations/nn_u32.cairo +++ b/src/operators/nn/implementations/nn_u32.cairo @@ -61,4 +61,15 @@ impl U32NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn col2im( + data: @Tensor, + image_shape: Span, + block_shape: Span, + dilations: Option>, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::col2im::col2im(data, image_shape, block_shape, dilations, pads, strides,) + } } diff --git a/src/operators/sequence/functional/sequence_at.cairo b/src/operators/sequence/functional/sequence_at.cairo index 7953abb9d..4a4aa9203 100644 --- a/src/operators/sequence/functional/sequence_at.cairo +++ b/src/operators/sequence/functional/sequence_at.cairo @@ -8,7 +8,9 @@ use orion::numbers::{NumberTrait, I32IntoU32, U32IntoI32}; fn sequence_at, impl TCopy: Copy, impl TDrop: Drop>( sequence: Array>, position: Tensor ) -> Tensor { - assert(position.shape.len() == 0 && position.data.len().into() == 1, 'Position must be a scalar'); + assert( + position.shape.len() == 0 && position.data.len().into() == 1, 'Position must be a scalar' + ); let position_value_i32: i32 = *position.data.at(0); let is_negative: bool = position_value_i32 < 0; diff --git a/src/operators/sequence/functional/sequence_erase.cairo b/src/operators/sequence/functional/sequence_erase.cairo index dd2a2aad6..573087b1f 100644 --- a/src/operators/sequence/functional/sequence_erase.cairo +++ b/src/operators/sequence/functional/sequence_erase.cairo @@ -3,7 +3,7 @@ use core::option::OptionTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::I32Tensor; -use orion::numbers::{ NumberTrait, I32IntoU32}; +use orion::numbers::{NumberTrait, I32IntoU32}; /// Cf: SequenceTrait::sequence_erase docstring fn sequence_erase, impl TCopy: Copy, impl TDrop: Drop>( @@ -56,4 +56,3 @@ fn sequence_erase, impl TCopy: Copy, impl TDr return output_sequence; } - diff --git a/src/operators/sequence/functional/sequence_insert.cairo b/src/operators/sequence/functional/sequence_insert.cairo index 256a1b91c..412fc6c4b 100644 --- a/src/operators/sequence/functional/sequence_insert.cairo +++ b/src/operators/sequence/functional/sequence_insert.cairo @@ -3,7 +3,7 @@ use core::option::OptionTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::I32Tensor; -use orion::numbers::{ NumberTrait, I32IntoU32}; +use orion::numbers::{NumberTrait, I32IntoU32}; /// Cf: SequenceTrait::sequence_insert docstring fn sequence_insert, impl TCopy: Copy, impl TDrop: Drop>( @@ -55,4 +55,4 @@ fn sequence_insert, impl TCopy: Copy, impl TD }; return new_sequence; -} \ No newline at end of file +} diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 70344eb97..4245b418f 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -5559,10 +5559,14 @@ fn squeeze(self: @Tensor, axes: Option>) -> Tensor { let mut reshape: Array = ArrayTrait::new(); let mut index = 0_i32; let axis = if *axis < 0 { - assert(*axis <= (*self.shape).len().into(), 'axis out of accepted range'); + assert( + *axis <= (*self.shape).len().into(), 'axis out of accepted range' + ); (*self.shape).len().into() - *axis } else { - assert(*axis < (*self.shape).len().into(), 'axis out of accepted range'); + assert( + *axis < (*self.shape).len().into(), 'axis out of accepted range' + ); *axis }; diff --git a/src/operators/tensor/helpers.cairo b/src/operators/tensor/helpers.cairo index 8c7e2b359..894dfc8d4 100644 --- a/src/operators/tensor/helpers.cairo +++ b/src/operators/tensor/helpers.cairo @@ -496,4 +496,4 @@ impl SpanPartialOrd, +Copy, +PartialEq, +PartialOrd> of Par fn lt(lhs: Span, rhs: Span) -> bool { span_cmp(lhs, rhs) < 0 } -} \ No newline at end of file +} diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 50383d2df..890a2d3b2 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -3,7 +3,7 @@ use core::array::SpanTrait; use core::option::OptionTrait; use core::traits::{TryInto, Into}; -use orion::numbers::{ I32Div, I32DivEq }; +use orion::numbers::{I32Div, I32DivEq}; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ @@ -221,13 +221,7 @@ impl I32Tensor of TensorTrait { fn quantize_linear( self: @Tensor, y_scale: @Tensor, y_zero_point: @Tensor ) -> Tensor:: { - quantization::quantize_linear::quantize_linear( - self, - y_scale, - y_zero_point, - -127, - 127 - ) + quantization::quantize_linear::quantize_linear(self, y_scale, y_zero_point, -127, 127) } fn dequantize_linear( diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 7e81d90eb..9366a0347 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -3,7 +3,7 @@ use core::array::SpanTrait; use core::option::OptionTrait; use core::traits::{TryInto, Into}; -use orion::numbers::{ I8Div, I8DivEq }; +use orion::numbers::{I8Div, I8DivEq}; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ diff --git a/src/operators/tensor/math/layer_normalization.cairo b/src/operators/tensor/math/layer_normalization.cairo index 372d5b1c2..bb0d9579b 100644 --- a/src/operators/tensor/math/layer_normalization.cairo +++ b/src/operators/tensor/math/layer_normalization.cairo @@ -3,7 +3,7 @@ use core::array::ArrayTrait; use core::array::SpanTrait; use core::option::OptionTrait; use core::traits::Into; -use orion::numbers::{ NumberTrait, I32IntoU32}; +use orion::numbers::{NumberTrait, I32IntoU32}; use orion::operators::tensor::{ TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor }; @@ -51,7 +51,6 @@ fn layer_normalization< Option::None => 1, }; - let axis = if axis < 0 { X_rank - axis.into() } else { diff --git a/src/test_helper/tensor/i32.cairo b/src/test_helper/tensor/i32.cairo index 0451fa442..89979eef0 100644 --- a/src/test_helper/tensor/i32.cairo +++ b/src/test_helper/tensor/i32.cairo @@ -93,7 +93,7 @@ fn i32_tensor_3x3_neg_helper() -> Tensor { sizes.append(3); let mut data = ArrayTrait::new(); - + data.append(0_i32); data.append(-1_i32); data.append(-2_i32); @@ -338,7 +338,6 @@ fn i32_tensor_3x3x3_helper() -> Tensor { data.append(24_i32); data.append(25_i32); data.append(26_i32); - let tensor = TensorTrait::new(sizes.span(), data.span()); diff --git a/src/test_helper/tensor/i8.cairo b/src/test_helper/tensor/i8.cairo index e492ad913..6d85e4b3e 100644 --- a/src/test_helper/tensor/i8.cairo +++ b/src/test_helper/tensor/i8.cairo @@ -93,7 +93,7 @@ fn i8_tensor_3x3_neg_helper() -> Tensor { sizes.append(3); let mut data = ArrayTrait::new(); - + data.append(0_i8); data.append(-1_i8); data.append(-2_i8); @@ -338,7 +338,6 @@ fn i8_tensor_3x3x3_helper() -> Tensor { data.append(24_i8); data.append(25_i8); data.append(26_i8); - let tensor = TensorTrait::new(sizes.span(), data.span()); diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 6c70b42cb..f1a731688 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -936,3 +936,8 @@ mod split_fp16x16_2d_variable_parts; mod split_fp16x16_zero_size; mod split_fp16x16_1d_uneven; mod split_fp16x16_2d_uneven; +mod col2im; +mod col2im_5D; +mod col2im_dilations; +mod col2im_pads; +mod col2im_strides; diff --git a/tests/nodes/clip_fp16x16_2d.cairo b/tests/nodes/clip_fp16x16_2d.cairo index d779d2790..b576203eb 100644 --- a/tests/nodes/clip_fp16x16_2d.cairo +++ b/tests/nodes/clip_fp16x16_2d.cairo @@ -15,7 +15,11 @@ fn test_clip_fp16x16_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.clip(Option::Some(FP16x16 { mag: 655360, sign: true }), Option::Some(FP16x16 { mag: 1310720, sign: false })); + let y = input_0 + .clip( + Option::Some(FP16x16 { mag: 655360, sign: true }), + Option::Some(FP16x16 { mag: 1310720, sign: false }) + ); assert_eq(y, z); } diff --git a/tests/nodes/clip_fp16x16_3d.cairo b/tests/nodes/clip_fp16x16_3d.cairo index d82de09dc..98bed1a61 100644 --- a/tests/nodes/clip_fp16x16_3d.cairo +++ b/tests/nodes/clip_fp16x16_3d.cairo @@ -15,7 +15,11 @@ fn test_clip_fp16x16_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.clip(Option::Some(FP16x16 { mag: 655360, sign: true }), Option::Some(FP16x16 { mag: 1310720, sign: false })); + let y = input_0 + .clip( + Option::Some(FP16x16 { mag: 655360, sign: true }), + Option::Some(FP16x16 { mag: 1310720, sign: false }) + ); assert_eq(y, z); } diff --git a/tests/nodes/clip_fp8x23_2d.cairo b/tests/nodes/clip_fp8x23_2d.cairo index 64f1792a1..60b38b565 100644 --- a/tests/nodes/clip_fp8x23_2d.cairo +++ b/tests/nodes/clip_fp8x23_2d.cairo @@ -15,7 +15,11 @@ fn test_clip_fp8x23_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.clip(Option::Some(FP8x23 { mag: 83886080, sign: true }), Option::Some(FP8x23 { mag: 167772160, sign: false })); + let y = input_0 + .clip( + Option::Some(FP8x23 { mag: 83886080, sign: true }), + Option::Some(FP8x23 { mag: 167772160, sign: false }) + ); assert_eq(y, z); } diff --git a/tests/nodes/clip_fp8x23_3d.cairo b/tests/nodes/clip_fp8x23_3d.cairo index 511b33859..cc80a61d7 100644 --- a/tests/nodes/clip_fp8x23_3d.cairo +++ b/tests/nodes/clip_fp8x23_3d.cairo @@ -15,7 +15,11 @@ fn test_clip_fp8x23_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.clip(Option::Some(FP8x23 { mag: 83886080, sign: true }), Option::Some(FP8x23 { mag: 167772160, sign: false })); + let y = input_0 + .clip( + Option::Some(FP8x23 { mag: 83886080, sign: true }), + Option::Some(FP8x23 { mag: 167772160, sign: false }) + ); assert_eq(y, z); } diff --git a/tests/nodes/col2im.cairo b/tests/nodes/col2im.cairo new file mode 100644 index 000000000..a2e8466cd --- /dev/null +++ b/tests/nodes/col2im.cairo @@ -0,0 +1,22 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::numbers::FixedTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_col2im() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::col2im( + @input_0, array![5, 5].span(), array![1, 5].span(), Option::None, Option::None, Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/col2im/input_0.cairo b/tests/nodes/col2im/input_0.cairo new file mode 100644 index 000000000..08bc60497 --- /dev/null +++ b/tests/nodes/col2im/input_0.cairo @@ -0,0 +1,39 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(5); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/col2im/output_0.cairo b/tests/nodes/col2im/output_0.cairo new file mode 100644 index 000000000..b231f7f65 --- /dev/null +++ b/tests/nodes/col2im/output_0.cairo @@ -0,0 +1,40 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(5); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/col2im_5D.cairo b/tests/nodes/col2im_5D.cairo new file mode 100644 index 000000000..58c3e5f51 --- /dev/null +++ b/tests/nodes/col2im_5D.cairo @@ -0,0 +1,27 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::numbers::FixedTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_col2im_5D() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::col2im( + @input_0, + array![3, 4, 5].span(), + array![1, 1, 5].span(), + Option::None, + Option::None, + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/col2im_5D/input_0.cairo b/tests/nodes/col2im_5D/input_0.cairo new file mode 100644 index 000000000..9aa20029d --- /dev/null +++ b/tests/nodes/col2im_5D/input_0.cairo @@ -0,0 +1,134 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(10); + shape.append(12); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1703936, sign: false }); + data.append(FP16x16 { mag: 2031616, sign: false }); + data.append(FP16x16 { mag: 2359296, sign: false }); + data.append(FP16x16 { mag: 2686976, sign: false }); + data.append(FP16x16 { mag: 3014656, sign: false }); + data.append(FP16x16 { mag: 3342336, sign: false }); + data.append(FP16x16 { mag: 3670016, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 2097152, sign: false }); + data.append(FP16x16 { mag: 2424832, sign: false }); + data.append(FP16x16 { mag: 2752512, sign: false }); + data.append(FP16x16 { mag: 3080192, sign: false }); + data.append(FP16x16 { mag: 3407872, sign: false }); + data.append(FP16x16 { mag: 3735552, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1835008, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 2490368, sign: false }); + data.append(FP16x16 { mag: 2818048, sign: false }); + data.append(FP16x16 { mag: 3145728, sign: false }); + data.append(FP16x16 { mag: 3473408, sign: false }); + data.append(FP16x16 { mag: 3801088, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1900544, sign: false }); + data.append(FP16x16 { mag: 2228224, sign: false }); + data.append(FP16x16 { mag: 2555904, sign: false }); + data.append(FP16x16 { mag: 2883584, sign: false }); + data.append(FP16x16 { mag: 3211264, sign: false }); + data.append(FP16x16 { mag: 3538944, sign: false }); + data.append(FP16x16 { mag: 3866624, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1966080, sign: false }); + data.append(FP16x16 { mag: 2293760, sign: false }); + data.append(FP16x16 { mag: 2621440, sign: false }); + data.append(FP16x16 { mag: 2949120, sign: false }); + data.append(FP16x16 { mag: 3276800, sign: false }); + data.append(FP16x16 { mag: 3604480, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 3997696, sign: false }); + data.append(FP16x16 { mag: 4325376, sign: false }); + data.append(FP16x16 { mag: 4653056, sign: false }); + data.append(FP16x16 { mag: 4980736, sign: false }); + data.append(FP16x16 { mag: 5308416, sign: false }); + data.append(FP16x16 { mag: 5636096, sign: false }); + data.append(FP16x16 { mag: 5963776, sign: false }); + data.append(FP16x16 { mag: 6291456, sign: false }); + data.append(FP16x16 { mag: 6619136, sign: false }); + data.append(FP16x16 { mag: 6946816, sign: false }); + data.append(FP16x16 { mag: 7274496, sign: false }); + data.append(FP16x16 { mag: 7602176, sign: false }); + data.append(FP16x16 { mag: 4063232, sign: false }); + data.append(FP16x16 { mag: 4390912, sign: false }); + data.append(FP16x16 { mag: 4718592, sign: false }); + data.append(FP16x16 { mag: 5046272, sign: false }); + data.append(FP16x16 { mag: 5373952, sign: false }); + data.append(FP16x16 { mag: 5701632, sign: false }); + data.append(FP16x16 { mag: 6029312, sign: false }); + data.append(FP16x16 { mag: 6356992, sign: false }); + data.append(FP16x16 { mag: 6684672, sign: false }); + data.append(FP16x16 { mag: 7012352, sign: false }); + data.append(FP16x16 { mag: 7340032, sign: false }); + data.append(FP16x16 { mag: 7667712, sign: false }); + data.append(FP16x16 { mag: 4128768, sign: false }); + data.append(FP16x16 { mag: 4456448, sign: false }); + data.append(FP16x16 { mag: 4784128, sign: false }); + data.append(FP16x16 { mag: 5111808, sign: false }); + data.append(FP16x16 { mag: 5439488, sign: false }); + data.append(FP16x16 { mag: 5767168, sign: false }); + data.append(FP16x16 { mag: 6094848, sign: false }); + data.append(FP16x16 { mag: 6422528, sign: false }); + data.append(FP16x16 { mag: 6750208, sign: false }); + data.append(FP16x16 { mag: 7077888, sign: false }); + data.append(FP16x16 { mag: 7405568, sign: false }); + data.append(FP16x16 { mag: 7733248, sign: false }); + data.append(FP16x16 { mag: 4194304, sign: false }); + data.append(FP16x16 { mag: 4521984, sign: false }); + data.append(FP16x16 { mag: 4849664, sign: false }); + data.append(FP16x16 { mag: 5177344, sign: false }); + data.append(FP16x16 { mag: 5505024, sign: false }); + data.append(FP16x16 { mag: 5832704, sign: false }); + data.append(FP16x16 { mag: 6160384, sign: false }); + data.append(FP16x16 { mag: 6488064, sign: false }); + data.append(FP16x16 { mag: 6815744, sign: false }); + data.append(FP16x16 { mag: 7143424, sign: false }); + data.append(FP16x16 { mag: 7471104, sign: false }); + data.append(FP16x16 { mag: 7798784, sign: false }); + data.append(FP16x16 { mag: 4259840, sign: false }); + data.append(FP16x16 { mag: 4587520, sign: false }); + data.append(FP16x16 { mag: 4915200, sign: false }); + data.append(FP16x16 { mag: 5242880, sign: false }); + data.append(FP16x16 { mag: 5570560, sign: false }); + data.append(FP16x16 { mag: 5898240, sign: false }); + data.append(FP16x16 { mag: 6225920, sign: false }); + data.append(FP16x16 { mag: 6553600, sign: false }); + data.append(FP16x16 { mag: 6881280, sign: false }); + data.append(FP16x16 { mag: 7208960, sign: false }); + data.append(FP16x16 { mag: 7536640, sign: false }); + data.append(FP16x16 { mag: 7864320, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/col2im_5D/output_0.cairo b/tests/nodes/col2im_5D/output_0.cairo new file mode 100644 index 000000000..316fc88fa --- /dev/null +++ b/tests/nodes/col2im_5D/output_0.cairo @@ -0,0 +1,136 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(3); + shape.append(4); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1703936, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 1835008, sign: false }); + data.append(FP16x16 { mag: 1900544, sign: false }); + data.append(FP16x16 { mag: 1966080, sign: false }); + data.append(FP16x16 { mag: 2031616, sign: false }); + data.append(FP16x16 { mag: 2097152, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 2228224, sign: false }); + data.append(FP16x16 { mag: 2293760, sign: false }); + data.append(FP16x16 { mag: 2359296, sign: false }); + data.append(FP16x16 { mag: 2424832, sign: false }); + data.append(FP16x16 { mag: 2490368, sign: false }); + data.append(FP16x16 { mag: 2555904, sign: false }); + data.append(FP16x16 { mag: 2621440, sign: false }); + data.append(FP16x16 { mag: 2686976, sign: false }); + data.append(FP16x16 { mag: 2752512, sign: false }); + data.append(FP16x16 { mag: 2818048, sign: false }); + data.append(FP16x16 { mag: 2883584, sign: false }); + data.append(FP16x16 { mag: 2949120, sign: false }); + data.append(FP16x16 { mag: 3014656, sign: false }); + data.append(FP16x16 { mag: 3080192, sign: false }); + data.append(FP16x16 { mag: 3145728, sign: false }); + data.append(FP16x16 { mag: 3211264, sign: false }); + data.append(FP16x16 { mag: 3276800, sign: false }); + data.append(FP16x16 { mag: 3342336, sign: false }); + data.append(FP16x16 { mag: 3407872, sign: false }); + data.append(FP16x16 { mag: 3473408, sign: false }); + data.append(FP16x16 { mag: 3538944, sign: false }); + data.append(FP16x16 { mag: 3604480, sign: false }); + data.append(FP16x16 { mag: 3670016, sign: false }); + data.append(FP16x16 { mag: 3735552, sign: false }); + data.append(FP16x16 { mag: 3801088, sign: false }); + data.append(FP16x16 { mag: 3866624, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 3997696, sign: false }); + data.append(FP16x16 { mag: 4063232, sign: false }); + data.append(FP16x16 { mag: 4128768, sign: false }); + data.append(FP16x16 { mag: 4194304, sign: false }); + data.append(FP16x16 { mag: 4259840, sign: false }); + data.append(FP16x16 { mag: 4325376, sign: false }); + data.append(FP16x16 { mag: 4390912, sign: false }); + data.append(FP16x16 { mag: 4456448, sign: false }); + data.append(FP16x16 { mag: 4521984, sign: false }); + data.append(FP16x16 { mag: 4587520, sign: false }); + data.append(FP16x16 { mag: 4653056, sign: false }); + data.append(FP16x16 { mag: 4718592, sign: false }); + data.append(FP16x16 { mag: 4784128, sign: false }); + data.append(FP16x16 { mag: 4849664, sign: false }); + data.append(FP16x16 { mag: 4915200, sign: false }); + data.append(FP16x16 { mag: 4980736, sign: false }); + data.append(FP16x16 { mag: 5046272, sign: false }); + data.append(FP16x16 { mag: 5111808, sign: false }); + data.append(FP16x16 { mag: 5177344, sign: false }); + data.append(FP16x16 { mag: 5242880, sign: false }); + data.append(FP16x16 { mag: 5308416, sign: false }); + data.append(FP16x16 { mag: 5373952, sign: false }); + data.append(FP16x16 { mag: 5439488, sign: false }); + data.append(FP16x16 { mag: 5505024, sign: false }); + data.append(FP16x16 { mag: 5570560, sign: false }); + data.append(FP16x16 { mag: 5636096, sign: false }); + data.append(FP16x16 { mag: 5701632, sign: false }); + data.append(FP16x16 { mag: 5767168, sign: false }); + data.append(FP16x16 { mag: 5832704, sign: false }); + data.append(FP16x16 { mag: 5898240, sign: false }); + data.append(FP16x16 { mag: 5963776, sign: false }); + data.append(FP16x16 { mag: 6029312, sign: false }); + data.append(FP16x16 { mag: 6094848, sign: false }); + data.append(FP16x16 { mag: 6160384, sign: false }); + data.append(FP16x16 { mag: 6225920, sign: false }); + data.append(FP16x16 { mag: 6291456, sign: false }); + data.append(FP16x16 { mag: 6356992, sign: false }); + data.append(FP16x16 { mag: 6422528, sign: false }); + data.append(FP16x16 { mag: 6488064, sign: false }); + data.append(FP16x16 { mag: 6553600, sign: false }); + data.append(FP16x16 { mag: 6619136, sign: false }); + data.append(FP16x16 { mag: 6684672, sign: false }); + data.append(FP16x16 { mag: 6750208, sign: false }); + data.append(FP16x16 { mag: 6815744, sign: false }); + data.append(FP16x16 { mag: 6881280, sign: false }); + data.append(FP16x16 { mag: 6946816, sign: false }); + data.append(FP16x16 { mag: 7012352, sign: false }); + data.append(FP16x16 { mag: 7077888, sign: false }); + data.append(FP16x16 { mag: 7143424, sign: false }); + data.append(FP16x16 { mag: 7208960, sign: false }); + data.append(FP16x16 { mag: 7274496, sign: false }); + data.append(FP16x16 { mag: 7340032, sign: false }); + data.append(FP16x16 { mag: 7405568, sign: false }); + data.append(FP16x16 { mag: 7471104, sign: false }); + data.append(FP16x16 { mag: 7536640, sign: false }); + data.append(FP16x16 { mag: 7602176, sign: false }); + data.append(FP16x16 { mag: 7667712, sign: false }); + data.append(FP16x16 { mag: 7733248, sign: false }); + data.append(FP16x16 { mag: 7798784, sign: false }); + data.append(FP16x16 { mag: 7864320, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/col2im_dilations.cairo b/tests/nodes/col2im_dilations.cairo new file mode 100644 index 000000000..04dc504c1 --- /dev/null +++ b/tests/nodes/col2im_dilations.cairo @@ -0,0 +1,27 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::numbers::FixedTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_col2im_dilations() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::col2im( + @input_0, + array![6, 6].span(), + array![2, 2].span(), + Option::Some(array![1, 5].span()), + Option::None, + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/col2im_dilations/input_0.cairo b/tests/nodes/col2im_dilations/input_0.cairo new file mode 100644 index 000000000..8bb8662f0 --- /dev/null +++ b/tests/nodes/col2im_dilations/input_0.cairo @@ -0,0 +1,34 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(4); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/col2im_dilations/output_0.cairo b/tests/nodes/col2im_dilations/output_0.cairo new file mode 100644 index 000000000..a17966d01 --- /dev/null +++ b/tests/nodes/col2im_dilations/output_0.cairo @@ -0,0 +1,51 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(6); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 1703936, sign: false }); + data.append(FP16x16 { mag: 2097152, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 2228224, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/col2im_pads.cairo b/tests/nodes/col2im_pads.cairo new file mode 100644 index 000000000..21eedcb28 --- /dev/null +++ b/tests/nodes/col2im_pads.cairo @@ -0,0 +1,27 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::numbers::FixedTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_col2im_pads() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::col2im( + @input_0, + array![5, 5].span(), + array![1, 5].span(), + Option::None, + Option::Some(array![0, 1, 0, 1].span()), + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/col2im_pads/input_0.cairo b/tests/nodes/col2im_pads/input_0.cairo new file mode 100644 index 000000000..2f7db424d --- /dev/null +++ b/tests/nodes/col2im_pads/input_0.cairo @@ -0,0 +1,89 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(5); + shape.append(15); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1703936, sign: false }); + data.append(FP16x16 { mag: 2031616, sign: false }); + data.append(FP16x16 { mag: 2359296, sign: false }); + data.append(FP16x16 { mag: 2686976, sign: false }); + data.append(FP16x16 { mag: 3014656, sign: false }); + data.append(FP16x16 { mag: 3342336, sign: false }); + data.append(FP16x16 { mag: 3670016, sign: false }); + data.append(FP16x16 { mag: 3997696, sign: false }); + data.append(FP16x16 { mag: 4325376, sign: false }); + data.append(FP16x16 { mag: 4653056, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 2097152, sign: false }); + data.append(FP16x16 { mag: 2424832, sign: false }); + data.append(FP16x16 { mag: 2752512, sign: false }); + data.append(FP16x16 { mag: 3080192, sign: false }); + data.append(FP16x16 { mag: 3407872, sign: false }); + data.append(FP16x16 { mag: 3735552, sign: false }); + data.append(FP16x16 { mag: 4063232, sign: false }); + data.append(FP16x16 { mag: 4390912, sign: false }); + data.append(FP16x16 { mag: 4718592, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1835008, sign: false }); + data.append(FP16x16 { mag: 2162688, sign: false }); + data.append(FP16x16 { mag: 2490368, sign: false }); + data.append(FP16x16 { mag: 2818048, sign: false }); + data.append(FP16x16 { mag: 3145728, sign: false }); + data.append(FP16x16 { mag: 3473408, sign: false }); + data.append(FP16x16 { mag: 3801088, sign: false }); + data.append(FP16x16 { mag: 4128768, sign: false }); + data.append(FP16x16 { mag: 4456448, sign: false }); + data.append(FP16x16 { mag: 4784128, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1900544, sign: false }); + data.append(FP16x16 { mag: 2228224, sign: false }); + data.append(FP16x16 { mag: 2555904, sign: false }); + data.append(FP16x16 { mag: 2883584, sign: false }); + data.append(FP16x16 { mag: 3211264, sign: false }); + data.append(FP16x16 { mag: 3538944, sign: false }); + data.append(FP16x16 { mag: 3866624, sign: false }); + data.append(FP16x16 { mag: 4194304, sign: false }); + data.append(FP16x16 { mag: 4521984, sign: false }); + data.append(FP16x16 { mag: 4849664, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1966080, sign: false }); + data.append(FP16x16 { mag: 2293760, sign: false }); + data.append(FP16x16 { mag: 2621440, sign: false }); + data.append(FP16x16 { mag: 2949120, sign: false }); + data.append(FP16x16 { mag: 3276800, sign: false }); + data.append(FP16x16 { mag: 3604480, sign: false }); + data.append(FP16x16 { mag: 3932160, sign: false }); + data.append(FP16x16 { mag: 4259840, sign: false }); + data.append(FP16x16 { mag: 4587520, sign: false }); + data.append(FP16x16 { mag: 4915200, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/col2im_pads/output_0.cairo b/tests/nodes/col2im_pads/output_0.cairo new file mode 100644 index 000000000..c921cd238 --- /dev/null +++ b/tests/nodes/col2im_pads/output_0.cairo @@ -0,0 +1,40 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(5); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1769472, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 2490368, sign: false }); + data.append(FP16x16 { mag: 4325376, sign: false }); + data.append(FP16x16 { mag: 4521984, sign: false }); + data.append(FP16x16 { mag: 4718592, sign: false }); + data.append(FP16x16 { mag: 3538944, sign: false }); + data.append(FP16x16 { mag: 4456448, sign: false }); + data.append(FP16x16 { mag: 7274496, sign: false }); + data.append(FP16x16 { mag: 7471104, sign: false }); + data.append(FP16x16 { mag: 7667712, sign: false }); + data.append(FP16x16 { mag: 5505024, sign: false }); + data.append(FP16x16 { mag: 6422528, sign: false }); + data.append(FP16x16 { mag: 10223616, sign: false }); + data.append(FP16x16 { mag: 10420224, sign: false }); + data.append(FP16x16 { mag: 10616832, sign: false }); + data.append(FP16x16 { mag: 7471104, sign: false }); + data.append(FP16x16 { mag: 8388608, sign: false }); + data.append(FP16x16 { mag: 13172736, sign: false }); + data.append(FP16x16 { mag: 13369344, sign: false }); + data.append(FP16x16 { mag: 13565952, sign: false }); + data.append(FP16x16 { mag: 9437184, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/col2im_strides.cairo b/tests/nodes/col2im_strides.cairo new file mode 100644 index 000000000..65bffba1e --- /dev/null +++ b/tests/nodes/col2im_strides.cairo @@ -0,0 +1,27 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::numbers::FixedTrait; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_col2im_strides() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::col2im( + @input_0, + array![5, 5].span(), + array![3, 3].span(), + Option::None, + Option::None, + Option::Some(array![2, 2].span()) + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/col2im_strides/input_0.cairo b/tests/nodes/col2im_strides/input_0.cairo new file mode 100644 index 000000000..12e910f8e --- /dev/null +++ b/tests/nodes/col2im_strides/input_0.cairo @@ -0,0 +1,50 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(9); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/col2im_strides/output_0.cairo b/tests/nodes/col2im_strides/output_0.cairo new file mode 100644 index 000000000..8921c4edb --- /dev/null +++ b/tests/nodes/col2im_strides/output_0.cairo @@ -0,0 +1,40 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(5); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/compress_fp16x16_3d_axis1.cairo b/tests/nodes/compress_fp16x16_3d_axis1.cairo index 2463dfa93..4189bd1e9 100644 --- a/tests/nodes/compress_fp16x16_3d_axis1.cairo +++ b/tests/nodes/compress_fp16x16_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp16x16_3d_axis2.cairo b/tests/nodes/compress_fp16x16_3d_axis2.cairo index a425e0988..e17e6bed4 100644 --- a/tests/nodes/compress_fp16x16_3d_axis2.cairo +++ b/tests/nodes/compress_fp16x16_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp16x16_3d_axis3.cairo b/tests/nodes/compress_fp16x16_3d_axis3.cairo index 3ad15cc97..fa9efb511 100644 --- a/tests/nodes/compress_fp16x16_3d_axis3.cairo +++ b/tests/nodes/compress_fp16x16_3d_axis3.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_axis3() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(3)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(3)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp16x16_3d_default.cairo b/tests/nodes/compress_fp16x16_3d_default.cairo index 4bff29c09..0a8b68bf2 100644 --- a/tests/nodes/compress_fp16x16_3d_default.cairo +++ b/tests/nodes/compress_fp16x16_3d_default.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp16x16_3d_noaxis.cairo b/tests/nodes/compress_fp16x16_3d_noaxis.cairo index e637f47c8..4e1b1620e 100644 --- a/tests/nodes/compress_fp16x16_3d_noaxis.cairo +++ b/tests/nodes/compress_fp16x16_3d_noaxis.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_noaxis() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::None(())); + let y_0 = input_0.compress(condition: input_1, axis: Option::None(())); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp8x23_3d_axis1.cairo b/tests/nodes/compress_fp8x23_3d_axis1.cairo index 24829c58f..03bdc8815 100644 --- a/tests/nodes/compress_fp8x23_3d_axis1.cairo +++ b/tests/nodes/compress_fp8x23_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_compress_fp8x23_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp8x23_3d_axis2.cairo b/tests/nodes/compress_fp8x23_3d_axis2.cairo index c4cf9a814..ca6bc4ec6 100644 --- a/tests/nodes/compress_fp8x23_3d_axis2.cairo +++ b/tests/nodes/compress_fp8x23_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_compress_fp8x23_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp8x23_3d_default.cairo b/tests/nodes/compress_fp8x23_3d_default.cairo index 6f590b622..f9acf8b7b 100644 --- a/tests/nodes/compress_fp8x23_3d_default.cairo +++ b/tests/nodes/compress_fp8x23_3d_default.cairo @@ -18,7 +18,7 @@ fn test_compress_fp8x23_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i32_3d_axis1.cairo b/tests/nodes/compress_i32_3d_axis1.cairo index e3d6a8072..6d3142fec 100644 --- a/tests/nodes/compress_i32_3d_axis1.cairo +++ b/tests/nodes/compress_i32_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_compress_i32_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i32_3d_axis2.cairo b/tests/nodes/compress_i32_3d_axis2.cairo index 3ae5828c8..242aef0ae 100644 --- a/tests/nodes/compress_i32_3d_axis2.cairo +++ b/tests/nodes/compress_i32_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_compress_i32_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i32_3d_default.cairo b/tests/nodes/compress_i32_3d_default.cairo index dde8e15cf..ab19213b0 100644 --- a/tests/nodes/compress_i32_3d_default.cairo +++ b/tests/nodes/compress_i32_3d_default.cairo @@ -18,7 +18,7 @@ fn test_compress_i32_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i8_3d_axis1.cairo b/tests/nodes/compress_i8_3d_axis1.cairo index 8fd8bb267..4ab02896a 100644 --- a/tests/nodes/compress_i8_3d_axis1.cairo +++ b/tests/nodes/compress_i8_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_compress_i8_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i8_3d_axis2.cairo b/tests/nodes/compress_i8_3d_axis2.cairo index 220210744..f0dbaef06 100644 --- a/tests/nodes/compress_i8_3d_axis2.cairo +++ b/tests/nodes/compress_i8_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_compress_i8_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i8_3d_default.cairo b/tests/nodes/compress_i8_3d_default.cairo index b802e589c..e4ad1fbc8 100644 --- a/tests/nodes/compress_i8_3d_default.cairo +++ b/tests/nodes/compress_i8_3d_default.cairo @@ -18,7 +18,7 @@ fn test_compress_i8_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_axis1.cairo b/tests/nodes/compress_u32_3d_axis1.cairo index 136f8b8ce..41a2adc63 100644 --- a/tests/nodes/compress_u32_3d_axis1.cairo +++ b/tests/nodes/compress_u32_3d_axis1.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_axis2.cairo b/tests/nodes/compress_u32_3d_axis2.cairo index 347e36676..801886380 100644 --- a/tests/nodes/compress_u32_3d_axis2.cairo +++ b/tests/nodes/compress_u32_3d_axis2.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_axis2_2.cairo b/tests/nodes/compress_u32_3d_axis2_2.cairo index abc515486..c5a20dbc2 100644 --- a/tests/nodes/compress_u32_3d_axis2_2.cairo +++ b/tests/nodes/compress_u32_3d_axis2_2.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_axis2_2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_axis3.cairo b/tests/nodes/compress_u32_3d_axis3.cairo index 10e1e507e..4edd5c8dc 100644 --- a/tests/nodes/compress_u32_3d_axis3.cairo +++ b/tests/nodes/compress_u32_3d_axis3.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_axis3() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(3)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(3)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_default.cairo b/tests/nodes/compress_u32_3d_default.cairo index ce12adac8..32068f9b7 100644 --- a/tests/nodes/compress_u32_3d_default.cairo +++ b/tests/nodes/compress_u32_3d_default.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp16x16_3d_axis1.cairo b/tests/nodes/gather_fp16x16_3d_axis1.cairo index 8c4af9664..429d085d4 100644 --- a/tests/nodes/gather_fp16x16_3d_axis1.cairo +++ b/tests/nodes/gather_fp16x16_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_gather_fp16x16_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(1)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp16x16_3d_axis2.cairo b/tests/nodes/gather_fp16x16_3d_axis2.cairo index 0b4f77ed8..cfb8a61d2 100644 --- a/tests/nodes/gather_fp16x16_3d_axis2.cairo +++ b/tests/nodes/gather_fp16x16_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_gather_fp16x16_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(2)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp16x16_3d_default.cairo b/tests/nodes/gather_fp16x16_3d_default.cairo index 91c9ebdd4..ee49aac75 100644 --- a/tests/nodes/gather_fp16x16_3d_default.cairo +++ b/tests/nodes/gather_fp16x16_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_fp16x16_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(0)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp8x23_3d_axis1.cairo b/tests/nodes/gather_fp8x23_3d_axis1.cairo index 6a5d1a046..c9c6dcf7f 100644 --- a/tests/nodes/gather_fp8x23_3d_axis1.cairo +++ b/tests/nodes/gather_fp8x23_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_gather_fp8x23_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(1)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp8x23_3d_axis2.cairo b/tests/nodes/gather_fp8x23_3d_axis2.cairo index d5a913163..726411dd2 100644 --- a/tests/nodes/gather_fp8x23_3d_axis2.cairo +++ b/tests/nodes/gather_fp8x23_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_gather_fp8x23_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(2)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp8x23_3d_default.cairo b/tests/nodes/gather_fp8x23_3d_default.cairo index 7f9492f8d..e844827f9 100644 --- a/tests/nodes/gather_fp8x23_3d_default.cairo +++ b/tests/nodes/gather_fp8x23_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_fp8x23_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(0)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i32_3d_axis1.cairo b/tests/nodes/gather_i32_3d_axis1.cairo index 8b1777d8f..6dbb78c47 100644 --- a/tests/nodes/gather_i32_3d_axis1.cairo +++ b/tests/nodes/gather_i32_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_gather_i32_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(1)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i32_3d_axis2.cairo b/tests/nodes/gather_i32_3d_axis2.cairo index bdc557d7a..29bd217b3 100644 --- a/tests/nodes/gather_i32_3d_axis2.cairo +++ b/tests/nodes/gather_i32_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_gather_i32_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(2)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i32_3d_default.cairo b/tests/nodes/gather_i32_3d_default.cairo index 9288c3dab..4c0b9c9bd 100644 --- a/tests/nodes/gather_i32_3d_default.cairo +++ b/tests/nodes/gather_i32_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_i32_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(0)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i8_3d_axis1.cairo b/tests/nodes/gather_i8_3d_axis1.cairo index 10dd5ce6f..140608123 100644 --- a/tests/nodes/gather_i8_3d_axis1.cairo +++ b/tests/nodes/gather_i8_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_gather_i8_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(1)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i8_3d_axis2.cairo b/tests/nodes/gather_i8_3d_axis2.cairo index 35f50077a..992cee33e 100644 --- a/tests/nodes/gather_i8_3d_axis2.cairo +++ b/tests/nodes/gather_i8_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_gather_i8_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(2)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i8_3d_default.cairo b/tests/nodes/gather_i8_3d_default.cairo index 5bc437a7b..0f8e6dec2 100644 --- a/tests/nodes/gather_i8_3d_default.cairo +++ b/tests/nodes/gather_i8_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_i8_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(0)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp16x16_3d_batch_dims1.cairo b/tests/nodes/gather_nd_fp16x16_3d_batch_dims1.cairo index 86de6e9b9..037d2ad93 100644 --- a/tests/nodes/gather_nd_fp16x16_3d_batch_dims1.cairo +++ b/tests/nodes/gather_nd_fp16x16_3d_batch_dims1.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp16x16_3d_batch_dims1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp16x16_3d_batch_dims2.cairo b/tests/nodes/gather_nd_fp16x16_3d_batch_dims2.cairo index d2ac3b2ce..3661bb6c5 100644 --- a/tests/nodes/gather_nd_fp16x16_3d_batch_dims2.cairo +++ b/tests/nodes/gather_nd_fp16x16_3d_batch_dims2.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp16x16_3d_batch_dims2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp16x16_3d_default.cairo b/tests/nodes/gather_nd_fp16x16_3d_default.cairo index 157266adb..60f116c86 100644 --- a/tests/nodes/gather_nd_fp16x16_3d_default.cairo +++ b/tests/nodes/gather_nd_fp16x16_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp16x16_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp8x23_3d_batch_dims1.cairo b/tests/nodes/gather_nd_fp8x23_3d_batch_dims1.cairo index 6da924b6c..c523e0135 100644 --- a/tests/nodes/gather_nd_fp8x23_3d_batch_dims1.cairo +++ b/tests/nodes/gather_nd_fp8x23_3d_batch_dims1.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp8x23_3d_batch_dims1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp8x23_3d_batch_dims2.cairo b/tests/nodes/gather_nd_fp8x23_3d_batch_dims2.cairo index 251d442ba..edb022910 100644 --- a/tests/nodes/gather_nd_fp8x23_3d_batch_dims2.cairo +++ b/tests/nodes/gather_nd_fp8x23_3d_batch_dims2.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp8x23_3d_batch_dims2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp8x23_3d_default.cairo b/tests/nodes/gather_nd_fp8x23_3d_default.cairo index 8ce119604..70b25cea1 100644 --- a/tests/nodes/gather_nd_fp8x23_3d_default.cairo +++ b/tests/nodes/gather_nd_fp8x23_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp8x23_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_i32_3d_batch_dims1.cairo b/tests/nodes/gather_nd_i32_3d_batch_dims1.cairo index 1d275fb4a..923c7f9ba 100644 --- a/tests/nodes/gather_nd_i32_3d_batch_dims1.cairo +++ b/tests/nodes/gather_nd_i32_3d_batch_dims1.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_i32_3d_batch_dims1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_i32_3d_batch_dims2.cairo b/tests/nodes/gather_nd_i32_3d_batch_dims2.cairo index 6bfa5cf4a..44ed06b2c 100644 --- a/tests/nodes/gather_nd_i32_3d_batch_dims2.cairo +++ b/tests/nodes/gather_nd_i32_3d_batch_dims2.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_i32_3d_batch_dims2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_i32_3d_default.cairo b/tests/nodes/gather_nd_i32_3d_default.cairo index 4fa1c55f1..5268e13f4 100644 --- a/tests/nodes/gather_nd_i32_3d_default.cairo +++ b/tests/nodes/gather_nd_i32_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_i32_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_i8_3d_batch_dims1.cairo b/tests/nodes/gather_nd_i8_3d_batch_dims1.cairo index b42d1a430..1d47f72ff 100644 --- a/tests/nodes/gather_nd_i8_3d_batch_dims1.cairo +++ b/tests/nodes/gather_nd_i8_3d_batch_dims1.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_i8_3d_batch_dims1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_i8_3d_default.cairo b/tests/nodes/gather_nd_i8_3d_default.cairo index 6ee8e0a9e..f9152f412 100644 --- a/tests/nodes/gather_nd_i8_3d_default.cairo +++ b/tests/nodes/gather_nd_i8_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_i8_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_u32_batch_dims1.cairo b/tests/nodes/gather_nd_u32_batch_dims1.cairo index d1bfb099c..7689359ee 100644 --- a/tests/nodes/gather_nd_u32_batch_dims1.cairo +++ b/tests/nodes/gather_nd_u32_batch_dims1.cairo @@ -16,7 +16,7 @@ fn test_gather_nd_u32_batch_dims1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_u32_batch_dims2.cairo b/tests/nodes/gather_nd_u32_batch_dims2.cairo index 2cd029255..4659cfaa7 100644 --- a/tests/nodes/gather_nd_u32_batch_dims2.cairo +++ b/tests/nodes/gather_nd_u32_batch_dims2.cairo @@ -16,7 +16,7 @@ fn test_gather_nd_u32_batch_dims2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_u32_default.cairo b/tests/nodes/gather_nd_u32_default.cairo index 5893b5017..e226d0eb0 100644 --- a/tests/nodes/gather_nd_u32_default.cairo +++ b/tests/nodes/gather_nd_u32_default.cairo @@ -16,7 +16,7 @@ fn test_gather_nd_u32_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_u32_3d_axis1.cairo b/tests/nodes/gather_u32_3d_axis1.cairo index 641d67f80..1a7a56d37 100644 --- a/tests/nodes/gather_u32_3d_axis1.cairo +++ b/tests/nodes/gather_u32_3d_axis1.cairo @@ -16,7 +16,7 @@ fn test_gather_u32_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(1)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_u32_3d_axis2.cairo b/tests/nodes/gather_u32_3d_axis2.cairo index 94f91a138..30d5f6a61 100644 --- a/tests/nodes/gather_u32_3d_axis2.cairo +++ b/tests/nodes/gather_u32_3d_axis2.cairo @@ -16,7 +16,7 @@ fn test_gather_u32_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(2)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_u32_3d_default.cairo b/tests/nodes/gather_u32_3d_default.cairo index 7931d3e27..8f223c4af 100644 --- a/tests/nodes/gather_u32_3d_default.cairo +++ b/tests/nodes/gather_u32_3d_default.cairo @@ -16,7 +16,7 @@ fn test_gather_u32_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(0)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gemm_all_attributes.cairo b/tests/nodes/gemm_all_attributes.cairo index c543ddb3b..2cbd9cab3 100644 --- a/tests/nodes/gemm_all_attributes.cairo +++ b/tests/nodes/gemm_all_attributes.cairo @@ -18,7 +18,15 @@ fn test_gemm_all_attributes() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::Some(FixedTrait::new(16384, false)), Option::Some(FixedTrait::new(22938, false)), true, true); + let y = NNTrait::gemm( + input_0, + input_1, + Option::Some(input_2), + Option::Some(FixedTrait::new(16384, false)), + Option::Some(FixedTrait::new(22938, false)), + true, + true + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_alpha.cairo b/tests/nodes/gemm_alpha.cairo index 074392584..dad8187f4 100644 --- a/tests/nodes/gemm_alpha.cairo +++ b/tests/nodes/gemm_alpha.cairo @@ -16,7 +16,15 @@ fn test_gemm_alpha() { let input_1 = input_1::input_1(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::None(()), Option::Some(FixedTrait::new(32768, false)), Option::None(()), false, false); + let y = NNTrait::gemm( + input_0, + input_1, + Option::None(()), + Option::Some(FixedTrait::new(32768, false)), + Option::None(()), + false, + false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_beta.cairo b/tests/nodes/gemm_beta.cairo index 9ec8fe530..9f417e32a 100644 --- a/tests/nodes/gemm_beta.cairo +++ b/tests/nodes/gemm_beta.cairo @@ -18,7 +18,15 @@ fn test_gemm_beta() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::None(()), Option::Some(FixedTrait::new(32768, false)), false, false); + let y = NNTrait::gemm( + input_0, + input_1, + Option::Some(input_2), + Option::None(()), + Option::Some(FixedTrait::new(32768, false)), + false, + false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_default_matrix_bias.cairo b/tests/nodes/gemm_default_matrix_bias.cairo index 76c6fff0c..16d00f933 100644 --- a/tests/nodes/gemm_default_matrix_bias.cairo +++ b/tests/nodes/gemm_default_matrix_bias.cairo @@ -18,7 +18,9 @@ fn test_gemm_default_matrix_bias() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::None(()), Option::None(()), false, false); + let y = NNTrait::gemm( + input_0, input_1, Option::Some(input_2), Option::None(()), Option::None(()), false, false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_default_no_bias.cairo b/tests/nodes/gemm_default_no_bias.cairo index b702bcfc3..ea43cd0fe 100644 --- a/tests/nodes/gemm_default_no_bias.cairo +++ b/tests/nodes/gemm_default_no_bias.cairo @@ -16,7 +16,9 @@ fn test_gemm_default_no_bias() { let input_1 = input_1::input_1(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::None(()), Option::None(()), Option::None(()), false, false); + let y = NNTrait::gemm( + input_0, input_1, Option::None(()), Option::None(()), Option::None(()), false, false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_default_vector_bias.cairo b/tests/nodes/gemm_default_vector_bias.cairo index 7f4f2646b..24826f739 100644 --- a/tests/nodes/gemm_default_vector_bias.cairo +++ b/tests/nodes/gemm_default_vector_bias.cairo @@ -18,7 +18,9 @@ fn test_gemm_default_vector_bias() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::None(()), Option::None(()), false, false); + let y = NNTrait::gemm( + input_0, input_1, Option::Some(input_2), Option::None(()), Option::None(()), false, false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_transposeA.cairo b/tests/nodes/gemm_transposeA.cairo index c0b49d799..76c4592e4 100644 --- a/tests/nodes/gemm_transposeA.cairo +++ b/tests/nodes/gemm_transposeA.cairo @@ -16,7 +16,9 @@ fn test_gemm_transposeA() { let input_1 = input_1::input_1(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::None(()), Option::None(()), Option::None(()), true, false); + let y = NNTrait::gemm( + input_0, input_1, Option::None(()), Option::None(()), Option::None(()), true, false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_transposeB.cairo b/tests/nodes/gemm_transposeB.cairo index 4c7ccbef4..1728fd014 100644 --- a/tests/nodes/gemm_transposeB.cairo +++ b/tests/nodes/gemm_transposeB.cairo @@ -16,7 +16,9 @@ fn test_gemm_transposeB() { let input_1 = input_1::input_1(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::None(()), Option::None(()), Option::None(()), false, true); + let y = NNTrait::gemm( + input_0, input_1, Option::None(()), Option::None(()), Option::None(()), false, true + ); assert_eq(y, z); } diff --git a/tests/nodes/hard_sigmoid_fp16x16.cairo b/tests/nodes/hard_sigmoid_fp16x16.cairo index 8a8f8672a..6ad8c8c6c 100644 --- a/tests/nodes/hard_sigmoid_fp16x16.cairo +++ b/tests/nodes/hard_sigmoid_fp16x16.cairo @@ -14,7 +14,9 @@ fn test_hard_sigmoid_fp16x16() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = NNTrait::hard_sigmoid(@input_0, @FixedTrait::new(13107, false), @FixedTrait::new(32768, false)); + let y = NNTrait::hard_sigmoid( + @input_0, @FixedTrait::new(13107, false), @FixedTrait::new(32768, false) + ); assert_eq(y, z); } diff --git a/tests/nodes/hard_sigmoid_fp8x23.cairo b/tests/nodes/hard_sigmoid_fp8x23.cairo index 317c25425..3697b1d7a 100644 --- a/tests/nodes/hard_sigmoid_fp8x23.cairo +++ b/tests/nodes/hard_sigmoid_fp8x23.cairo @@ -14,7 +14,9 @@ fn test_hard_sigmoid_fp8x23() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = NNTrait::hard_sigmoid(@input_0, @FixedTrait::new(1677721, false), @FixedTrait::new(4194304, false)); + let y = NNTrait::hard_sigmoid( + @input_0, @FixedTrait::new(1677721, false), @FixedTrait::new(4194304, false) + ); assert_eq(y, z); } diff --git a/tests/nodes/is_nan_fp16x16/input_0.cairo b/tests/nodes/is_nan_fp16x16/input_0.cairo index 576456503..8c86af4fb 100644 --- a/tests/nodes/is_nan_fp16x16/input_0.cairo +++ b/tests/nodes/is_nan_fp16x16/input_0.cairo @@ -15,4 +15,4 @@ fn input_0() -> Tensor { data.append(FixedTrait::NaN()); data.append(FixedTrait::NaN()); TensorTrait::new(shape.span(), data.span()) -} \ No newline at end of file +} diff --git a/tests/nodes/layer_normalization_3d_axis0_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis0_epsilon.cairo index 6931c44ec..93373e675 100644 --- a/tests/nodes/layer_normalization_3d_axis0_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis0_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis0_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(0),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(0), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_3d_axis1_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis1_epsilon.cairo index 1bdb8700d..72d384de1 100644 --- a/tests/nodes/layer_normalization_3d_axis1_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis1_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis1_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(1),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(1), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_3d_axis2_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis2_epsilon.cairo index 06505280b..44a5f550d 100644 --- a/tests/nodes/layer_normalization_3d_axis2_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis2_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis2_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(2),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(2), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_3d_axis_negative_1_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis_negative_1_epsilon.cairo index 4c095bf62..0b5b77e17 100644 --- a/tests/nodes/layer_normalization_3d_axis_negative_1_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis_negative_1_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis_negative_1_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-1),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(-1), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_3d_axis_negative_2_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis_negative_2_epsilon.cairo index 0be005ddd..5f632aa6e 100644 --- a/tests/nodes/layer_normalization_3d_axis_negative_2_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis_negative_2_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis_negative_2_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-2),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(-2), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_3d_axis_negative_3_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis_negative_3_epsilon.cairo index e3c602e1f..d08c443f8 100644 --- a/tests/nodes/layer_normalization_3d_axis_negative_3_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis_negative_3_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis_negative_3_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-3),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(-3), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis0.cairo b/tests/nodes/layer_normalization_4d_axis0.cairo index 45a825cd5..279acc624 100644 --- a/tests/nodes/layer_normalization_4d_axis0.cairo +++ b/tests/nodes/layer_normalization_4d_axis0.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis0() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(0),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(0), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis1.cairo b/tests/nodes/layer_normalization_4d_axis1.cairo index e7ee8885c..d8e00b332 100644 --- a/tests/nodes/layer_normalization_4d_axis1.cairo +++ b/tests/nodes/layer_normalization_4d_axis1.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis1() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(1),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(1), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis2.cairo b/tests/nodes/layer_normalization_4d_axis2.cairo index 3bd45e907..65b738957 100644 --- a/tests/nodes/layer_normalization_4d_axis2.cairo +++ b/tests/nodes/layer_normalization_4d_axis2.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis2() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(2),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(2), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis3.cairo b/tests/nodes/layer_normalization_4d_axis3.cairo index 4b173b4f6..fae5a51c7 100644 --- a/tests/nodes/layer_normalization_4d_axis3.cairo +++ b/tests/nodes/layer_normalization_4d_axis3.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis3() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(3),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(3), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis_negative_1.cairo b/tests/nodes/layer_normalization_4d_axis_negative_1.cairo index d7b04e192..2f879f988 100644 --- a/tests/nodes/layer_normalization_4d_axis_negative_1.cairo +++ b/tests/nodes/layer_normalization_4d_axis_negative_1.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis_negative_1() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-1),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(-1), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis_negative_2.cairo b/tests/nodes/layer_normalization_4d_axis_negative_2.cairo index 5e17a8b52..718c97ad5 100644 --- a/tests/nodes/layer_normalization_4d_axis_negative_2.cairo +++ b/tests/nodes/layer_normalization_4d_axis_negative_2.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis_negative_2() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(2),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(2), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis_negative_3.cairo b/tests/nodes/layer_normalization_4d_axis_negative_3.cairo index 4188eec6c..b97678d38 100644 --- a/tests/nodes/layer_normalization_4d_axis_negative_3.cairo +++ b/tests/nodes/layer_normalization_4d_axis_negative_3.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis_negative_3() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-3),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(-3), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis_negative_4.cairo b/tests/nodes/layer_normalization_4d_axis_negative_4.cairo index 5aa5971dc..94be87f32 100644 --- a/tests/nodes/layer_normalization_4d_axis_negative_4.cairo +++ b/tests/nodes/layer_normalization_4d_axis_negative_4.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis_negative_4() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-4),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(-4), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_default_axis.cairo b/tests/nodes/layer_normalization_default_axis.cairo index dd792e731..994ab7106 100644 --- a/tests/nodes/layer_normalization_default_axis.cairo +++ b/tests/nodes/layer_normalization_default_axis.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_default_axis() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::None,Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::None, Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_test.cairo b/tests/nodes/layer_normalization_test.cairo index 631dc6f46..ad8baa5f2 100644 --- a/tests/nodes/layer_normalization_test.cairo +++ b/tests/nodes/layer_normalization_test.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_test() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::None,Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::None, Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_fp16x16_3d_axis1.cairo b/tests/nodes/scatter_fp16x16_3d_axis1.cairo index b471e028c..5173d8bd7 100644 --- a/tests/nodes/scatter_fp16x16_3d_axis1.cairo +++ b/tests/nodes/scatter_fp16x16_3d_axis1.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp16x16_3d_axis1() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_fp16x16_3d_axis1_add.cairo b/tests/nodes/scatter_fp16x16_3d_axis1_add.cairo index c6fc48b15..be927416d 100644 --- a/tests/nodes/scatter_fp16x16_3d_axis1_add.cairo +++ b/tests/nodes/scatter_fp16x16_3d_axis1_add.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp16x16_3d_axis1_add() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('add')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('add') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_fp16x16_3d_default.cairo b/tests/nodes/scatter_fp16x16_3d_default.cairo index c14bbc0a6..b106de54d 100644 --- a/tests/nodes/scatter_fp16x16_3d_default.cairo +++ b/tests/nodes/scatter_fp16x16_3d_default.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp16x16_3d_default() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_fp8x23_axis1.cairo b/tests/nodes/scatter_fp8x23_axis1.cairo index e0008d409..8ff871c7b 100644 --- a/tests/nodes/scatter_fp8x23_axis1.cairo +++ b/tests/nodes/scatter_fp8x23_axis1.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp8x23_axis1() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_fp8x23_default.cairo b/tests/nodes/scatter_fp8x23_default.cairo index bdaea6568..157aca0bb 100644 --- a/tests/nodes/scatter_fp8x23_default.cairo +++ b/tests/nodes/scatter_fp8x23_default.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp8x23_default() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_fp8x23_mul.cairo b/tests/nodes/scatter_fp8x23_mul.cairo index 4430bf041..5b2305aee 100644 --- a/tests/nodes/scatter_fp8x23_mul.cairo +++ b/tests/nodes/scatter_fp8x23_mul.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp8x23_mul() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('mul')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('mul') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_i8_axis1.cairo b/tests/nodes/scatter_i8_axis1.cairo index e143463f1..c42123f3d 100644 --- a/tests/nodes/scatter_i8_axis1.cairo +++ b/tests/nodes/scatter_i8_axis1.cairo @@ -20,7 +20,13 @@ fn test_scatter_i8_axis1() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_i8_axis1_max.cairo b/tests/nodes/scatter_i8_axis1_max.cairo index 53dabbe40..844911a8d 100644 --- a/tests/nodes/scatter_i8_axis1_max.cairo +++ b/tests/nodes/scatter_i8_axis1_max.cairo @@ -20,7 +20,13 @@ fn test_scatter_i8_axis1_max() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('max')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('max') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_i8_default.cairo b/tests/nodes/scatter_i8_default.cairo index c41b29d7b..f658268ce 100644 --- a/tests/nodes/scatter_i8_default.cairo +++ b/tests/nodes/scatter_i8_default.cairo @@ -20,7 +20,13 @@ fn test_scatter_i8_default() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_u32_add.cairo b/tests/nodes/scatter_u32_add.cairo index 735b8fb5e..2b14d68d1 100644 --- a/tests/nodes/scatter_u32_add.cairo +++ b/tests/nodes/scatter_u32_add.cairo @@ -18,7 +18,13 @@ fn test_scatter_u32_add() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('add')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('add') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_u32_axis1.cairo b/tests/nodes/scatter_u32_axis1.cairo index e2a96e71b..2c85e2a6c 100644 --- a/tests/nodes/scatter_u32_axis1.cairo +++ b/tests/nodes/scatter_u32_axis1.cairo @@ -18,7 +18,13 @@ fn test_scatter_u32_axis1() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_u32_default.cairo b/tests/nodes/scatter_u32_default.cairo index 1ccdac72f..5fb16207c 100644 --- a/tests/nodes/scatter_u32_default.cairo +++ b/tests/nodes/scatter_u32_default.cairo @@ -18,7 +18,13 @@ fn test_scatter_u32_default() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/sequence_insert_fp16x16.cairo b/tests/nodes/sequence_insert_fp16x16.cairo index d30b0d3e1..70316ebb9 100644 --- a/tests/nodes/sequence_insert_fp16x16.cairo +++ b/tests/nodes/sequence_insert_fp16x16.cairo @@ -20,7 +20,7 @@ fn test_sequence_insert_fp16x16() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.sequence_insert(@input_1,Option::Some(input_2)); + let y = input_0.sequence_insert(@input_1, Option::Some(input_2)); assert_seq_eq(y, z); } diff --git a/tests/nodes/sequence_insert_fp8x23.cairo b/tests/nodes/sequence_insert_fp8x23.cairo index ad4d12be4..fb474c6d4 100644 --- a/tests/nodes/sequence_insert_fp8x23.cairo +++ b/tests/nodes/sequence_insert_fp8x23.cairo @@ -20,7 +20,7 @@ fn test_sequence_insert_fp8x23() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.sequence_insert(@input_1,Option::Some(input_2)); + let y = input_0.sequence_insert(@input_1, Option::Some(input_2)); assert_seq_eq(y, z); } diff --git a/tests/nodes/sequence_insert_i32.cairo b/tests/nodes/sequence_insert_i32.cairo index 3a397715d..7bcadba2d 100644 --- a/tests/nodes/sequence_insert_i32.cairo +++ b/tests/nodes/sequence_insert_i32.cairo @@ -18,7 +18,7 @@ fn test_sequence_insert_i32() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.sequence_insert(@input_1,Option::Some(input_2)); + let y = input_0.sequence_insert(@input_1, Option::Some(input_2)); assert_seq_eq(y, z); } diff --git a/tests/nodes/sequence_insert_i8.cairo b/tests/nodes/sequence_insert_i8.cairo index a304ff2c4..ff1be34fe 100644 --- a/tests/nodes/sequence_insert_i8.cairo +++ b/tests/nodes/sequence_insert_i8.cairo @@ -20,7 +20,7 @@ fn test_sequence_insert_i8() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.sequence_insert(@input_1,Option::Some(input_2)); + let y = input_0.sequence_insert(@input_1, Option::Some(input_2)); assert_seq_eq(y, z); } diff --git a/tests/nodes/sequence_insert_u32.cairo b/tests/nodes/sequence_insert_u32.cairo index dcd905f72..079d6a4a0 100644 --- a/tests/nodes/sequence_insert_u32.cairo +++ b/tests/nodes/sequence_insert_u32.cairo @@ -20,7 +20,7 @@ fn test_sequence_insert_u32() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.sequence_insert(@input_1,Option::Some(input_2)); + let y = input_0.sequence_insert(@input_1, Option::Some(input_2)); assert_seq_eq(y, z); } diff --git a/tests/nodes/sequence_length_fp16x16.cairo b/tests/nodes/sequence_length_fp16x16.cairo index d971d5569..559ec3ff6 100644 --- a/tests/nodes/sequence_length_fp16x16.cairo +++ b/tests/nodes/sequence_length_fp16x16.cairo @@ -13,10 +13,10 @@ use orion::operators::sequence::SequenceTrait; #[test] #[available_gas(2000000000)] fn test_sequence_length_fp16x16() { - let input_0 = input_0::input_0(); + let input_0 = input_0::input_0(); let z = output_0::output_0(); let y = input_0.sequence_length(); assert_eq(y, z); -} +} diff --git a/tests/nodes/shrink_hard_fp16x16.cairo b/tests/nodes/shrink_hard_fp16x16.cairo index 0818844b2..2f5ec5312 100644 --- a/tests/nodes/shrink_hard_fp16x16.cairo +++ b/tests/nodes/shrink_hard_fp16x16.cairo @@ -15,7 +15,9 @@ fn test_shrink_hard_fp16x16() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = TensorTrait::shrink(input_0, Option::None(()), Option::Some(FixedTrait::new(65536, false))); + let y = TensorTrait::shrink( + input_0, Option::None(()), Option::Some(FixedTrait::new(65536, false)) + ); assert_eq(y, z); } diff --git a/tests/nodes/shrink_hard_fp8x23.cairo b/tests/nodes/shrink_hard_fp8x23.cairo index 3c054f433..c76eec1ec 100644 --- a/tests/nodes/shrink_hard_fp8x23.cairo +++ b/tests/nodes/shrink_hard_fp8x23.cairo @@ -15,7 +15,9 @@ fn test_shrink_hard_fp8x23() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = TensorTrait::shrink(input_0, Option::None(()), Option::Some(FixedTrait::new(8388608, false))); + let y = TensorTrait::shrink( + input_0, Option::None(()), Option::Some(FixedTrait::new(8388608, false)) + ); assert_eq(y, z); } diff --git a/tests/nodes/shrink_soft_fp16x16.cairo b/tests/nodes/shrink_soft_fp16x16.cairo index 924ecfde5..aa975069c 100644 --- a/tests/nodes/shrink_soft_fp16x16.cairo +++ b/tests/nodes/shrink_soft_fp16x16.cairo @@ -15,7 +15,11 @@ fn test_shrink_soft_fp16x16() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = TensorTrait::shrink(input_0, Option::Some(FixedTrait::new(65536, false)), Option::Some(FixedTrait::new(65536, false))); + let y = TensorTrait::shrink( + input_0, + Option::Some(FixedTrait::new(65536, false)), + Option::Some(FixedTrait::new(65536, false)) + ); assert_eq(y, z); } diff --git a/tests/nodes/shrink_soft_fp8x23.cairo b/tests/nodes/shrink_soft_fp8x23.cairo index 01a314e10..8413beccd 100644 --- a/tests/nodes/shrink_soft_fp8x23.cairo +++ b/tests/nodes/shrink_soft_fp8x23.cairo @@ -15,7 +15,11 @@ fn test_shrink_soft_fp8x23() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = TensorTrait::shrink(input_0, Option::Some(FixedTrait::new(8388608, false)), Option::Some(FixedTrait::new(8388608, false))); + let y = TensorTrait::shrink( + input_0, + Option::Some(FixedTrait::new(8388608, false)), + Option::Some(FixedTrait::new(8388608, false)) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_fp16x16_2d.cairo b/tests/nodes/slice_fp16x16_2d.cairo index 5e3d593be..2a95e6e4b 100644 --- a/tests/nodes/slice_fp16x16_2d.cairo +++ b/tests/nodes/slice_fp16x16_2d.cairo @@ -14,7 +14,13 @@ fn test_slice_fp16x16_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span())); + let y = input_0 + .slice( + array![0, 2].span(), + array![2, 4].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 1].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_fp16x16_3d.cairo b/tests/nodes/slice_fp16x16_3d.cairo index d0b5462c4..a681191ce 100644 --- a/tests/nodes/slice_fp16x16_3d.cairo +++ b/tests/nodes/slice_fp16x16_3d.cairo @@ -14,7 +14,13 @@ fn test_slice_fp16x16_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span())); + let y = input_0 + .slice( + array![0, 0].span(), + array![3, 10].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 3].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_fp8x23_2d.cairo b/tests/nodes/slice_fp8x23_2d.cairo index 6a80a5422..56fed5a6a 100644 --- a/tests/nodes/slice_fp8x23_2d.cairo +++ b/tests/nodes/slice_fp8x23_2d.cairo @@ -14,7 +14,13 @@ fn test_slice_fp8x23_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span())); + let y = input_0 + .slice( + array![0, 2].span(), + array![2, 4].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 1].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_fp8x23_3d.cairo b/tests/nodes/slice_fp8x23_3d.cairo index 5c2af30b7..fd5e95485 100644 --- a/tests/nodes/slice_fp8x23_3d.cairo +++ b/tests/nodes/slice_fp8x23_3d.cairo @@ -14,7 +14,13 @@ fn test_slice_fp8x23_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span())); + let y = input_0 + .slice( + array![0, 0].span(), + array![3, 10].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 3].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_i32_2d.cairo b/tests/nodes/slice_i32_2d.cairo index 082b8f15f..f26a2a809 100644 --- a/tests/nodes/slice_i32_2d.cairo +++ b/tests/nodes/slice_i32_2d.cairo @@ -14,7 +14,13 @@ fn test_slice_i32_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span())); + let y = input_0 + .slice( + array![0, 2].span(), + array![2, 4].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 1].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_i32_3d.cairo b/tests/nodes/slice_i32_3d.cairo index 1683e6987..16fd3f51b 100644 --- a/tests/nodes/slice_i32_3d.cairo +++ b/tests/nodes/slice_i32_3d.cairo @@ -14,7 +14,13 @@ fn test_slice_i32_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span())); + let y = input_0 + .slice( + array![0, 0].span(), + array![3, 10].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 3].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_i8_2d.cairo b/tests/nodes/slice_i8_2d.cairo index fc7f35364..2dc5f6ab4 100644 --- a/tests/nodes/slice_i8_2d.cairo +++ b/tests/nodes/slice_i8_2d.cairo @@ -14,7 +14,13 @@ fn test_slice_i8_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span())); + let y = input_0 + .slice( + array![0, 2].span(), + array![2, 4].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 1].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_i8_3d.cairo b/tests/nodes/slice_i8_3d.cairo index ec8ea9ffd..a140d8681 100644 --- a/tests/nodes/slice_i8_3d.cairo +++ b/tests/nodes/slice_i8_3d.cairo @@ -14,7 +14,13 @@ fn test_slice_i8_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span())); + let y = input_0 + .slice( + array![0, 0].span(), + array![3, 10].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 3].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_u32_2d.cairo b/tests/nodes/slice_u32_2d.cairo index 27678fc0c..c5ad63061 100644 --- a/tests/nodes/slice_u32_2d.cairo +++ b/tests/nodes/slice_u32_2d.cairo @@ -14,7 +14,13 @@ fn test_slice_u32_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span())); + let y = input_0 + .slice( + array![0, 2].span(), + array![2, 4].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 1].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_u32_3d.cairo b/tests/nodes/slice_u32_3d.cairo index a3ca0e1bc..08a77cf55 100644 --- a/tests/nodes/slice_u32_3d.cairo +++ b/tests/nodes/slice_u32_3d.cairo @@ -14,7 +14,13 @@ fn test_slice_u32_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span())); + let y = input_0 + .slice( + array![0, 0].span(), + array![3, 10].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 3].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/where_fp16x16.cairo b/tests/nodes/where_fp16x16.cairo index 05467ef51..ae3416d67 100644 --- a/tests/nodes/where_fp16x16.cairo +++ b/tests/nodes/where_fp16x16.cairo @@ -18,7 +18,7 @@ fn test_where_fp16x16() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_fp16x16_broadcast.cairo b/tests/nodes/where_fp16x16_broadcast.cairo index b0d9b9faa..5df239b78 100644 --- a/tests/nodes/where_fp16x16_broadcast.cairo +++ b/tests/nodes/where_fp16x16_broadcast.cairo @@ -18,7 +18,7 @@ fn test_where_fp16x16_broadcast() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_fp8x23.cairo b/tests/nodes/where_fp8x23.cairo index 8661bf163..492db3766 100644 --- a/tests/nodes/where_fp8x23.cairo +++ b/tests/nodes/where_fp8x23.cairo @@ -18,7 +18,7 @@ fn test_where_fp8x23() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_fp8x23_broadcast.cairo b/tests/nodes/where_fp8x23_broadcast.cairo index 771c00bf4..112f9ef74 100644 --- a/tests/nodes/where_fp8x23_broadcast.cairo +++ b/tests/nodes/where_fp8x23_broadcast.cairo @@ -18,7 +18,7 @@ fn test_where_fp8x23_broadcast() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_i32.cairo b/tests/nodes/where_i32.cairo index 1662b010d..a455f8ac1 100644 --- a/tests/nodes/where_i32.cairo +++ b/tests/nodes/where_i32.cairo @@ -18,7 +18,7 @@ fn test_where_i32() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_i32_broadcast.cairo b/tests/nodes/where_i32_broadcast.cairo index 53aaf91e2..62891b235 100644 --- a/tests/nodes/where_i32_broadcast.cairo +++ b/tests/nodes/where_i32_broadcast.cairo @@ -18,7 +18,7 @@ fn test_where_i32_broadcast() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_i8.cairo b/tests/nodes/where_i8.cairo index 0627fd33b..6f54a1271 100644 --- a/tests/nodes/where_i8.cairo +++ b/tests/nodes/where_i8.cairo @@ -18,7 +18,7 @@ fn test_where_i8() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_i8_broadcast.cairo b/tests/nodes/where_i8_broadcast.cairo index 69e02821f..4bcb86a3d 100644 --- a/tests/nodes/where_i8_broadcast.cairo +++ b/tests/nodes/where_i8_broadcast.cairo @@ -18,7 +18,7 @@ fn test_where_i8_broadcast() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_u32.cairo b/tests/nodes/where_u32.cairo index a14d685ac..5f8a3119a 100644 --- a/tests/nodes/where_u32.cairo +++ b/tests/nodes/where_u32.cairo @@ -18,7 +18,7 @@ fn test_where_u32() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_u32_broadcast.cairo b/tests/nodes/where_u32_broadcast.cairo index b810f7143..4aedc56a1 100644 --- a/tests/nodes/where_u32_broadcast.cairo +++ b/tests/nodes/where_u32_broadcast.cairo @@ -18,7 +18,7 @@ fn test_where_u32_broadcast() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/operators/qlinear_add_test.cairo b/tests/operators/qlinear_add_test.cairo index 3163fb8e6..fe7f2af47 100644 --- a/tests/operators/qlinear_add_test.cairo +++ b/tests/operators/qlinear_add_test.cairo @@ -13,33 +13,13 @@ fn qlinearadd_test() { i8 >::new( shape: array![4, 2].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8 - ] - .span(), + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8].span(), ); let b = TensorTrait::< i8 >::new( shape: array![4, 2].span(), - data: array![ - 2_i8, - 4_i8, - 6_i8, - 8_i8, - 10_i8, - 12_i8, - 14_i8, - 16_i8 - ] - .span(), + data: array![2_i8, 4_i8, 6_i8, 8_i8, 10_i8, 12_i8, 14_i8, 16_i8].span(), ); let a_scale = TensorTrait::< @@ -82,30 +62,11 @@ fn qlinearadd_broadcast_test() { i8 >::new( shape: array![2, 4].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8 - ] - .span(), + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8].span(), ); let b = TensorTrait::< i8 - >::new( - shape: array![1, 4].span(), - data: array![ - 2_i8, - 4_i8, - 6_i8, - 8_i8, - ] - .span(), - ); + >::new(shape: array![1, 4].span(), data: array![2_i8, 4_i8, 6_i8, 8_i8,].span(),); let a_scale = TensorTrait::< FP16x16 @@ -146,29 +107,10 @@ fn qlinearadd_broadcast_test() { fn test_example_doc() { let a = TensorTrait::< i8 - >::new( - shape: array![2, 3].span(), - data: array![ - 6_i8, - 6_i8, - 6_i8, - 11_i8, - 11_i8, - 11_i8 - ] - .span(), - ); + >::new(shape: array![2, 3].span(), data: array![6_i8, 6_i8, 6_i8, 11_i8, 11_i8, 11_i8].span(),); let b = TensorTrait::< i8 - >::new( - shape: array![1, 3].span(), - data: array![ - 40_i8, - 40_i8, - 40_i8 - ] - .span(), - ); + >::new(shape: array![1, 3].span(), data: array![40_i8, 40_i8, 40_i8].span(),); let a_scale = TensorTrait::< FP16x16 diff --git a/tests/operators/qlinear_concat_test.cairo b/tests/operators/qlinear_concat_test.cairo index 101cefaa8..4c86b3ff8 100644 --- a/tests/operators/qlinear_concat_test.cairo +++ b/tests/operators/qlinear_concat_test.cairo @@ -19,28 +19,10 @@ fn print_span(mut span: Span) { fn qlinear_concat_test() { let tensor1 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 10_i8, - 20_i8, - 30_i8, - 40_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![10_i8, 20_i8, 30_i8, 40_i8,].span(),); let tensor2 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 20_i8, - 40_i8, - 60_i8, - 80_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![20_i8, 40_i8, 60_i8, 80_i8,].span(),); let tensors = array![tensor1, tensor2].span(); @@ -90,40 +72,13 @@ fn qlinear_concat_test() { fn qlinear_concat_test_shape() { let tensor1 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 2_i8, - 2_i8, - 2_i8, - 2_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![2_i8, 2_i8, 2_i8, 2_i8,].span(),); let tensor2 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 8_i8, - 8_i8, - 8_i8, - 8_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![8_i8, 8_i8, 8_i8, 8_i8,].span(),); let tensor3 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 10_i8, - 10_i8, - 10_i8, - 10_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![10_i8, 10_i8, 10_i8, 10_i8,].span(),); let tensors = array![tensor1, tensor2, tensor3].span(); @@ -177,28 +132,10 @@ fn qlinear_concat_test_shape() { fn qlinear_concat_example_doc() { let tensor1 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 5_i8, - 5_i8, - 5_i8, - 5_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![5_i8, 5_i8, 5_i8, 5_i8,].span(),); let tensor2 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 1_i8, - 1_i8, - 1_i8, - 1_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![1_i8, 1_i8, 1_i8, 1_i8,].span(),); let tensors = array![tensor1, tensor2].span(); diff --git a/tests/operators/qlinear_leakyrelu_test.cairo b/tests/operators/qlinear_leakyrelu_test.cairo index 9e6473d06..e180ab33b 100644 --- a/tests/operators/qlinear_leakyrelu_test.cairo +++ b/tests/operators/qlinear_leakyrelu_test.cairo @@ -12,15 +12,7 @@ fn qlinear_leakyrelu_test() { i8 >::new( shape: array![2, 3].span(), - data: array![ - -10_i8, - -10_i8, - -10_i8, - 10_i8, - 10_i8, - 10_i8 - ] - .span(), + data: array![-10_i8, -10_i8, -10_i8, 10_i8, 10_i8, 10_i8].span(), ); let a_scale = TensorTrait::< diff --git a/tests/operators/qlinear_matmul_test.cairo b/tests/operators/qlinear_matmul_test.cairo index bfbe04714..9d3f8fa4b 100644 --- a/tests/operators/qlinear_matmul_test.cairo +++ b/tests/operators/qlinear_matmul_test.cairo @@ -15,36 +15,13 @@ fn qlinearmatmul_2D_test() { i8 >::new( shape: array![2, 4].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8 - ] - .span(), + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8].span(), ); let b = TensorTrait::< i8 >::new( shape: array![4, 3].span(), - data: array![ - 2_i8, - 4_i8, - 6_i8, - 8_i8, - 10_i8, - 12_i8, - 14_i8, - 16_i8, - 18_i8, - 20_i8, - 22_i8, - 24_i8 - ] + data: array![2_i8, 4_i8, 6_i8, 8_i8, 10_i8, 12_i8, 14_i8, 16_i8, 18_i8, 20_i8, 22_i8, 24_i8] .span(), ); @@ -90,18 +67,7 @@ fn qlinearmatmul_3D_test() { >::new( shape: array![2, 2, 3].span(), data: array![ - -1_i8, - -2_i8, - -2_i8, - -3_i8, - -4_i8, - -4_i8, - -5_i8, - -6_i8, - -6_i8, - -7_i8, - -8_i8, - -8_i8 + -1_i8, -2_i8, -2_i8, -3_i8, -4_i8, -4_i8, -5_i8, -6_i8, -6_i8, -7_i8, -8_i8, -8_i8 ] .span(), ); @@ -110,18 +76,7 @@ fn qlinearmatmul_3D_test() { >::new( shape: array![2, 3, 2].span(), data: array![ - -2_i8, - -4_i8, - -6_i8, - -8_i8, - -10_i8, - -12_i8, - -2_i8, - -4_i8, - -6_i8, - -8_i8, - -10_i8, - -12_i8 + -2_i8, -4_i8, -6_i8, -8_i8, -10_i8, -12_i8, -2_i8, -4_i8, -6_i8, -8_i8, -10_i8, -12_i8 ] .span(), ); @@ -167,29 +122,10 @@ fn qlinearmatmul_3D_test() { fn test_example_doc() { let a = TensorTrait::< i8 - >::new( - shape: array![2, 3].span(), - data: array![ - 3_i8, - 4_i8, - 5_i8, - 2_i8, - 4_i8, - 3_i8 - ] - .span(), - ); + >::new(shape: array![2, 3].span(), data: array![3_i8, 4_i8, 5_i8, 2_i8, 4_i8, 3_i8].span(),); let b = TensorTrait::< i8 - >::new( - shape: array![3, 1].span(), - data: array![ - 4_i8, - 8_i8, - 4_i8 - ] - .span(), - ); + >::new(shape: array![3, 1].span(), data: array![4_i8, 8_i8, 4_i8].span(),); let a_scale = TensorTrait::< FP16x16 diff --git a/tests/operators/qlinear_mul_test.cairo b/tests/operators/qlinear_mul_test.cairo index 6bf292bcc..3babc1800 100644 --- a/tests/operators/qlinear_mul_test.cairo +++ b/tests/operators/qlinear_mul_test.cairo @@ -14,40 +14,14 @@ fn qlinearmul_test() { i8 >::new( shape: array![4, 3].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8, - 9_i8, - 10_i8, - 11_i8, - 12_i8 - ] + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8, 9_i8, 10_i8, 11_i8, 12_i8] .span(), ); let b = TensorTrait::< i8 >::new( shape: array![4, 3].span(), - data: array![ - 2_i8, - 4_i8, - 6_i8, - 8_i8, - 10_i8, - 12_i8, - 14_i8, - 16_i8, - 18_i8, - 20_i8, - 22_i8, - 24_i8 - ] + data: array![2_i8, 4_i8, 6_i8, 8_i8, 10_i8, 12_i8, 14_i8, 16_i8, 18_i8, 20_i8, 22_i8, 24_i8] .span(), ); @@ -96,30 +70,11 @@ fn qlinear_mul_broadcast_test() { i8 >::new( shape: array![2, 4].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8 - ] - .span(), + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8].span(), ); let b = TensorTrait::< i8 - >::new( - shape: array![1, 4].span(), - data: array![ - 2_i8, - 4_i8, - 6_i8, - 8_i8, - ] - .span(), - ); + >::new(shape: array![1, 4].span(), data: array![2_i8, 4_i8, 6_i8, 8_i8,].span(),); let a_scale = TensorTrait::< FP16x16 @@ -161,28 +116,11 @@ fn test_example_doc() { let a = TensorTrait::< i8 >::new( - shape: array![2, 3].span(), - data: array![ - 21_i8, - 21_i8, - 21_i8, - 41_i8, - 41_i8, - 41_i8 - ] - .span(), + shape: array![2, 3].span(), data: array![21_i8, 21_i8, 21_i8, 41_i8, 41_i8, 41_i8].span(), ); let b = TensorTrait::< i8 - >::new( - shape: array![1, 3].span(), - data: array![ - 4_i8, - 8_i8, - 12_i8 - ] - .span(), - ); + >::new(shape: array![1, 3].span(), data: array![4_i8, 8_i8, 12_i8].span(),); let a_scale = TensorTrait::< FP16x16 From 3d2d13d875e6b9dcf180882e173dee1459ec3cb7 Mon Sep 17 00:00:00 2001 From: chachaleo Date: Tue, 6 Feb 2024 05:07:38 +0100 Subject: [PATCH 28/46] fix: wrong path --- docs/framework/compatibility.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/framework/compatibility.md b/docs/framework/compatibility.md index 2a3adb9ee..e4beb1e7d 100644 --- a/docs/framework/compatibility.md +++ b/docs/framework/compatibility.md @@ -43,6 +43,7 @@ You can see below the list of current supported ONNX Operators: | [Softplus](operators/neural-network/nn.softplus.md) | :white\_check\_mark: | | [Linear](operators/neural-network/nn.linear.md) | :white\_check\_mark: | | [HardSigmoid](operators/neural-network/nn.hard\_sigmoid.md) | :white\_check\_mark: | +| [Conv](operators/neural-network/nn.conv.md) | :white\_check\_mark: | | [Sinh](operators/tensor/tensor.sinh.md) | :white\_check\_mark: | | [Asinh](operators/tensor/tensor.asinh.md) | :white\_check\_mark: | | [Atanh](operators/tensor/tensor.atanh.md) | :white\_check\_mark: | @@ -108,6 +109,5 @@ You can see below the list of current supported ONNX Operators: | [Erf](operators/tensor/tensor.erf.md) | :white\_check\_mark: | | [Compress](operators/tensor/tensor.compress.md) | :white\_check\_mark: | | [Layer_normalization](operators/tensor/tensor.layer_normalization.md) | :white\_check\_mark: | -| [Conv](operators/tensor/tensor.conv.md) | :white\_check\_mark: | Current Operators support: **97/156 (62%)** From fd7454d1892f077904fdf5b0afb61cb8b6e23b13 Mon Sep 17 00:00:00 2001 From: chachaleo Date: Fri, 9 Feb 2024 11:09:22 +0100 Subject: [PATCH 29/46] modif from review --- src/operators/nn/functional/conv.cairo | 67 ++++++++++++++------------ 1 file changed, 36 insertions(+), 31 deletions(-) diff --git a/src/operators/nn/functional/conv.cairo b/src/operators/nn/functional/conv.cairo index 444bcb6ed..c0d5cb2e1 100644 --- a/src/operators/nn/functional/conv.cairo +++ b/src/operators/nn/functional/conv.cairo @@ -6,6 +6,7 @@ use orion::numbers::{U32IntoI32, I32IntoU32, I32Div, I32Number}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,}; use orion::operators::vec::{NullableVec, NullableVecImpl}; use orion::operators::tensor::core::{stride}; +use core::clone::Clone; use core::debug::PrintTrait; @@ -39,6 +40,8 @@ fn conv< pads: Option>, strides: Option>, ) -> Tensor { + let nd = (*X).shape.len() - 2; + assert((*X).shape.len() >= 3, 'X must have at least 3 dim'); let dilations = match dilations { Option::Some(dilations) => dilations, @@ -112,8 +115,11 @@ fn conv< }; if group > 1 { + let sN = *(*X).shape.at(0); + let mut res_b = ArrayTrait::new(); let mut res_cv = ArrayTrait::new(); + let mut td = 0; let mg = *(*W).shape.at(0) / group; let dw = *(*W).shape.at(1); @@ -144,7 +150,7 @@ fn conv< let mut b = 0; loop { - if b == *(*X).shape.at(0) { + if b == sN { break; } let mut g = 0; @@ -188,7 +194,7 @@ fn conv< let res_b = res_b.span(); let res_cv = res_cv.span(); - let mut final_shape = array![*(*X).shape.at(0), td]; + let mut final_shape = array![sN, td]; let mut cv = *res_cv.at(0); @@ -269,7 +275,7 @@ fn conv< } // group == 1 - if *dilations.at(0) != 1 || min(dilations) != max(dilations) { + if *dilations.at(0) != 1 || min(dilations.clone()) != max(dilations.clone()) { // computation of the dilated kernel let nd = dilations.len(); let mut new_kernel_shape = ArrayTrait::new(); @@ -357,7 +363,7 @@ fn conv< let mut tail = ArrayTrait::new(); let mut i = 0; loop { - if i == (*X).shape.len() - 2 { + if i == nd { break; } let d = *(*X).shape.at(i); @@ -378,7 +384,7 @@ fn conv< let mut tail = ArrayTrait::new(); let mut i = 0; loop { - if i == (*X).shape.len() - 2 { + if i == nd { break; } let d = *(*X).shape.at(i); @@ -399,7 +405,7 @@ fn conv< let mut tail = ArrayTrait::new(); let mut i = 0; loop { - if i == (*X).shape.len() - 2 { + if i == nd { break; } let d = *(*X).shape.at(i); @@ -1052,7 +1058,6 @@ fn conv< } // if (*X).shape.len() > 5 - let nd = (*X).shape.len() - 2; let sN = *(*X).shape.at(0); let sC = *(*X).shape.at(1); @@ -1403,41 +1408,41 @@ fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul< } -fn min(a: Span) -> usize { +fn min(mut a: Span) -> usize { assert(a.len() > 0, 'span cannot be empty'); let mut min = *a.at(0); - let mut i = 0; loop { - if i == a.len() { - break; - } - let item = *a.at(i); - if item < min { - min = item; - } - i += 1; - }; - return min; + match a.pop_front() { + Option::Some(v) => { + if *v < min { + min = *v; + }; + }, + Option::None => { + break min; + } + }; + } } -fn max(a: Span) -> usize { +fn max(mut a: Span) -> usize { assert(a.len() > 0, 'span cannot be empty'); let mut max = *a.at(0); - let mut i = 0; loop { - if i == a.len() { - break; - } - let item = *a.at(i); - if item > max { - max = item; - } - i += 1; - }; - return max; + match a.pop_front() { + Option::Some(v) => { + if *v > max { + max = *v; + }; + }, + Option::None => { + break max; + } + }; + } } fn arange(start: usize, end: usize, step: usize) -> Span { From 33318b9cd30faee0cdaabcc259b3773995b0bebe Mon Sep 17 00:00:00 2001 From: chachaleo Date: Fri, 9 Feb 2024 12:20:48 +0100 Subject: [PATCH 30/46] feat: grid sample --- docs/SUMMARY.md | 1 + docs/framework/compatibility.md | 1 + .../operators/neural-network/README.md | 1 + .../operators/neural-network/nn.gemm.md | 11 +- .../neural-network/nn.grid_sample.md | 97 ++ nodegen/node/grid_sample.py | 700 +++++++++++++ src/numbers.cairo | 69 +- .../implementations/fp16x16/core.cairo | 3 +- .../implementations/fp16x16wide/core.cairo | 2 +- .../implementations/fp32x32/core.cairo | 3 +- .../implementations/fp64x64/core.cairo | 3 +- .../implementations/fp8x23/core.cairo | 4 +- .../implementations/fp8x23wide/core.cairo | 2 +- src/operators/nn/core.cairo | 106 ++ src/operators/nn/functional.cairo | 1 + src/operators/nn/functional/grid_sample.cairo | 986 ++++++++++++++++++ .../nn/implementations/nn_fp16x16.cairo | 10 + .../nn/implementations/nn_fp32x32.cairo | 10 + .../nn/implementations/nn_fp64x64.cairo | 10 + .../nn/implementations/nn_fp8x23.cairo | 10 + src/operators/nn/implementations/nn_i32.cairo | 10 + src/operators/nn/implementations/nn_i8.cairo | 10 + src/operators/nn/implementations/nn_u32.cairo | 10 + .../sequence/functional/sequence_at.cairo | 4 +- .../sequence/functional/sequence_erase.cairo | 3 +- .../sequence/functional/sequence_insert.cairo | 4 +- src/operators/tensor/core.cairo | 8 +- src/operators/tensor/helpers.cairo | 2 +- .../tensor/implementations/tensor_i32.cairo | 10 +- .../tensor/implementations/tensor_i8.cairo | 2 +- .../tensor/math/layer_normalization.cairo | 3 +- src/test_helper/tensor/i32.cairo | 3 +- src/test_helper/tensor/i8.cairo | 3 +- tests/nodes.cairo | 8 + tests/nodes/clip_fp16x16_2d.cairo | 6 +- tests/nodes/clip_fp16x16_3d.cairo | 6 +- tests/nodes/clip_fp8x23_2d.cairo | 6 +- tests/nodes/clip_fp8x23_3d.cairo | 6 +- tests/nodes/compress_fp16x16_3d_axis1.cairo | 2 +- tests/nodes/compress_fp16x16_3d_axis2.cairo | 2 +- tests/nodes/compress_fp16x16_3d_axis3.cairo | 2 +- tests/nodes/compress_fp16x16_3d_default.cairo | 2 +- tests/nodes/compress_fp16x16_3d_noaxis.cairo | 2 +- tests/nodes/compress_fp8x23_3d_axis1.cairo | 2 +- tests/nodes/compress_fp8x23_3d_axis2.cairo | 2 +- tests/nodes/compress_fp8x23_3d_default.cairo | 2 +- tests/nodes/compress_i32_3d_axis1.cairo | 2 +- tests/nodes/compress_i32_3d_axis2.cairo | 2 +- tests/nodes/compress_i32_3d_default.cairo | 2 +- tests/nodes/compress_i8_3d_axis1.cairo | 2 +- tests/nodes/compress_i8_3d_axis2.cairo | 2 +- tests/nodes/compress_i8_3d_default.cairo | 2 +- tests/nodes/compress_u32_3d_axis1.cairo | 2 +- tests/nodes/compress_u32_3d_axis2.cairo | 2 +- tests/nodes/compress_u32_3d_axis2_2.cairo | 2 +- tests/nodes/compress_u32_3d_axis3.cairo | 2 +- tests/nodes/compress_u32_3d_default.cairo | 2 +- tests/nodes/gather_fp16x16_3d_axis1.cairo | 2 +- tests/nodes/gather_fp16x16_3d_axis2.cairo | 2 +- tests/nodes/gather_fp16x16_3d_default.cairo | 2 +- tests/nodes/gather_fp8x23_3d_axis1.cairo | 2 +- tests/nodes/gather_fp8x23_3d_axis2.cairo | 2 +- tests/nodes/gather_fp8x23_3d_default.cairo | 2 +- tests/nodes/gather_i32_3d_axis1.cairo | 2 +- tests/nodes/gather_i32_3d_axis2.cairo | 2 +- tests/nodes/gather_i32_3d_default.cairo | 2 +- tests/nodes/gather_i8_3d_axis1.cairo | 2 +- tests/nodes/gather_i8_3d_axis2.cairo | 2 +- tests/nodes/gather_i8_3d_default.cairo | 2 +- .../gather_nd_fp16x16_3d_batch_dims1.cairo | 2 +- .../gather_nd_fp16x16_3d_batch_dims2.cairo | 2 +- .../nodes/gather_nd_fp16x16_3d_default.cairo | 2 +- .../gather_nd_fp8x23_3d_batch_dims1.cairo | 2 +- .../gather_nd_fp8x23_3d_batch_dims2.cairo | 2 +- tests/nodes/gather_nd_fp8x23_3d_default.cairo | 2 +- .../nodes/gather_nd_i32_3d_batch_dims1.cairo | 2 +- .../nodes/gather_nd_i32_3d_batch_dims2.cairo | 2 +- tests/nodes/gather_nd_i32_3d_default.cairo | 2 +- tests/nodes/gather_nd_i8_3d_batch_dims1.cairo | 2 +- tests/nodes/gather_nd_i8_3d_default.cairo | 2 +- tests/nodes/gather_nd_u32_batch_dims1.cairo | 2 +- tests/nodes/gather_nd_u32_batch_dims2.cairo | 2 +- tests/nodes/gather_nd_u32_default.cairo | 2 +- tests/nodes/gather_u32_3d_axis1.cairo | 2 +- tests/nodes/gather_u32_3d_axis2.cairo | 2 +- tests/nodes/gather_u32_3d_default.cairo | 2 +- tests/nodes/gemm_all_attributes.cairo | 10 +- tests/nodes/gemm_alpha.cairo | 10 +- tests/nodes/gemm_beta.cairo | 10 +- tests/nodes/gemm_default_matrix_bias.cairo | 4 +- tests/nodes/gemm_default_no_bias.cairo | 4 +- tests/nodes/gemm_default_vector_bias.cairo | 4 +- tests/nodes/gemm_transposeA.cairo | 4 +- tests/nodes/gemm_transposeB.cairo | 4 +- tests/nodes/grid_sample.cairo | 22 + tests/nodes/grid_sample/input_0.cairo | 31 + tests/nodes/grid_sample/input_1.cairo | 87 ++ tests/nodes/grid_sample/output_0.cairo | 51 + tests/nodes/grid_sample_aligncorners.cairo | 22 + .../grid_sample_aligncorners/input_0.cairo | 21 + .../grid_sample_aligncorners/input_1.cairo | 31 + .../grid_sample_aligncorners/output_0.cairo | 23 + tests/nodes/grid_sample_cubic.cairo | 25 + tests/nodes/grid_sample_cubic/input_0.cairo | 21 + tests/nodes/grid_sample_cubic/input_1.cairo | 31 + tests/nodes/grid_sample_cubic/output_0.cairo | 23 + tests/nodes/grid_sample_nearest.cairo | 25 + tests/nodes/grid_sample_nearest/input_0.cairo | 21 + tests/nodes/grid_sample_nearest/input_1.cairo | 31 + .../nodes/grid_sample_nearest/output_0.cairo | 23 + .../grid_sample_nearest_aligncorner.cairo | 25 + .../input_0.cairo | 21 + .../input_1.cairo | 31 + .../output_0.cairo | 23 + tests/nodes/grid_sample_padding_border.cairo | 25 + .../grid_sample_padding_border/input_0.cairo | 21 + .../grid_sample_padding_border/input_1.cairo | 31 + .../grid_sample_padding_border/output_0.cairo | 23 + .../grid_sample_padding_reflection.cairo | 25 + .../input_0.cairo | 21 + .../input_1.cairo | 31 + .../output_0.cairo | 23 + tests/nodes/grid_sample_padding_zeros.cairo | 23 + .../grid_sample_padding_zeros/input_0.cairo | 21 + .../grid_sample_padding_zeros/input_1.cairo | 31 + .../grid_sample_padding_zeros/output_0.cairo | 23 + tests/nodes/hard_sigmoid_fp16x16.cairo | 4 +- tests/nodes/hard_sigmoid_fp8x23.cairo | 4 +- tests/nodes/is_nan_fp16x16/input_0.cairo | 2 +- ...layer_normalization_3d_axis0_epsilon.cairo | 9 +- ...layer_normalization_3d_axis1_epsilon.cairo | 9 +- ...layer_normalization_3d_axis2_epsilon.cairo | 9 +- ...alization_3d_axis_negative_1_epsilon.cairo | 9 +- ...alization_3d_axis_negative_2_epsilon.cairo | 9 +- ...alization_3d_axis_negative_3_epsilon.cairo | 9 +- .../nodes/layer_normalization_4d_axis0.cairo | 5 +- .../nodes/layer_normalization_4d_axis1.cairo | 5 +- .../nodes/layer_normalization_4d_axis2.cairo | 5 +- .../nodes/layer_normalization_4d_axis3.cairo | 5 +- ...yer_normalization_4d_axis_negative_1.cairo | 5 +- ...yer_normalization_4d_axis_negative_2.cairo | 5 +- ...yer_normalization_4d_axis_negative_3.cairo | 5 +- ...yer_normalization_4d_axis_negative_4.cairo | 5 +- .../layer_normalization_default_axis.cairo | 5 +- tests/nodes/layer_normalization_test.cairo | 5 +- tests/nodes/scatter_fp16x16_3d_axis1.cairo | 8 +- .../nodes/scatter_fp16x16_3d_axis1_add.cairo | 8 +- tests/nodes/scatter_fp16x16_3d_default.cairo | 8 +- tests/nodes/scatter_fp8x23_axis1.cairo | 8 +- tests/nodes/scatter_fp8x23_default.cairo | 8 +- tests/nodes/scatter_fp8x23_mul.cairo | 8 +- tests/nodes/scatter_i8_axis1.cairo | 8 +- tests/nodes/scatter_i8_axis1_max.cairo | 8 +- tests/nodes/scatter_i8_default.cairo | 8 +- tests/nodes/scatter_u32_add.cairo | 8 +- tests/nodes/scatter_u32_axis1.cairo | 8 +- tests/nodes/scatter_u32_default.cairo | 8 +- tests/nodes/sequence_insert_fp16x16.cairo | 2 +- tests/nodes/sequence_insert_fp8x23.cairo | 2 +- tests/nodes/sequence_insert_i32.cairo | 2 +- tests/nodes/sequence_insert_i8.cairo | 2 +- tests/nodes/sequence_insert_u32.cairo | 2 +- tests/nodes/sequence_length_fp16x16.cairo | 4 +- tests/nodes/shrink_hard_fp16x16.cairo | 4 +- tests/nodes/shrink_hard_fp8x23.cairo | 4 +- tests/nodes/shrink_soft_fp16x16.cairo | 6 +- tests/nodes/shrink_soft_fp8x23.cairo | 6 +- tests/nodes/slice_fp16x16_2d.cairo | 8 +- tests/nodes/slice_fp16x16_3d.cairo | 8 +- tests/nodes/slice_fp8x23_2d.cairo | 8 +- tests/nodes/slice_fp8x23_3d.cairo | 8 +- tests/nodes/slice_i32_2d.cairo | 8 +- tests/nodes/slice_i32_3d.cairo | 8 +- tests/nodes/slice_i8_2d.cairo | 8 +- tests/nodes/slice_i8_3d.cairo | 8 +- tests/nodes/slice_u32_2d.cairo | 8 +- tests/nodes/slice_u32_3d.cairo | 8 +- tests/nodes/where_fp16x16.cairo | 2 +- tests/nodes/where_fp16x16_broadcast.cairo | 2 +- tests/nodes/where_fp8x23.cairo | 2 +- tests/nodes/where_fp8x23_broadcast.cairo | 2 +- tests/nodes/where_i32.cairo | 2 +- tests/nodes/where_i32_broadcast.cairo | 2 +- tests/nodes/where_i8.cairo | 2 +- tests/nodes/where_i8_broadcast.cairo | 2 +- tests/nodes/where_u32.cairo | 2 +- tests/nodes/where_u32_broadcast.cairo | 2 +- tests/operators/qlinear_add_test.cairo | 70 +- tests/operators/qlinear_concat_test.cairo | 77 +- tests/operators/qlinear_leakyrelu_test.cairo | 10 +- tests/operators/qlinear_matmul_test.cairo | 76 +- tests/operators/qlinear_mul_test.cairo | 74 +- 192 files changed, 3342 insertions(+), 475 deletions(-) create mode 100644 docs/framework/operators/neural-network/nn.grid_sample.md create mode 100644 nodegen/node/grid_sample.py create mode 100644 src/operators/nn/functional/grid_sample.cairo create mode 100644 tests/nodes/grid_sample.cairo create mode 100644 tests/nodes/grid_sample/input_0.cairo create mode 100644 tests/nodes/grid_sample/input_1.cairo create mode 100644 tests/nodes/grid_sample/output_0.cairo create mode 100644 tests/nodes/grid_sample_aligncorners.cairo create mode 100644 tests/nodes/grid_sample_aligncorners/input_0.cairo create mode 100644 tests/nodes/grid_sample_aligncorners/input_1.cairo create mode 100644 tests/nodes/grid_sample_aligncorners/output_0.cairo create mode 100644 tests/nodes/grid_sample_cubic.cairo create mode 100644 tests/nodes/grid_sample_cubic/input_0.cairo create mode 100644 tests/nodes/grid_sample_cubic/input_1.cairo create mode 100644 tests/nodes/grid_sample_cubic/output_0.cairo create mode 100644 tests/nodes/grid_sample_nearest.cairo create mode 100644 tests/nodes/grid_sample_nearest/input_0.cairo create mode 100644 tests/nodes/grid_sample_nearest/input_1.cairo create mode 100644 tests/nodes/grid_sample_nearest/output_0.cairo create mode 100644 tests/nodes/grid_sample_nearest_aligncorner.cairo create mode 100644 tests/nodes/grid_sample_nearest_aligncorner/input_0.cairo create mode 100644 tests/nodes/grid_sample_nearest_aligncorner/input_1.cairo create mode 100644 tests/nodes/grid_sample_nearest_aligncorner/output_0.cairo create mode 100644 tests/nodes/grid_sample_padding_border.cairo create mode 100644 tests/nodes/grid_sample_padding_border/input_0.cairo create mode 100644 tests/nodes/grid_sample_padding_border/input_1.cairo create mode 100644 tests/nodes/grid_sample_padding_border/output_0.cairo create mode 100644 tests/nodes/grid_sample_padding_reflection.cairo create mode 100644 tests/nodes/grid_sample_padding_reflection/input_0.cairo create mode 100644 tests/nodes/grid_sample_padding_reflection/input_1.cairo create mode 100644 tests/nodes/grid_sample_padding_reflection/output_0.cairo create mode 100644 tests/nodes/grid_sample_padding_zeros.cairo create mode 100644 tests/nodes/grid_sample_padding_zeros/input_0.cairo create mode 100644 tests/nodes/grid_sample_padding_zeros/input_1.cairo create mode 100644 tests/nodes/grid_sample_padding_zeros/output_0.cairo diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 649e411f9..41ddcbf18 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -160,6 +160,7 @@ * [nn.hard\_sigmoid](framework/operators/neural-network/nn.hard\_sigmoid.md) * [nn.thresholded\_relu](framework/operators/neural-network/nn.thresholded\_relu.md) * [nn.gemm](framework/operators/neural-network/nn.gemm.md) + * [nn.grid\_sample](framework/operators/neural-network/nn.grid\_sample.md) * [Machine Learning](framework/operators/machine-learning/README.md) * [Tree Ensemble Classifier](framework/operators/machine-learning/tree-ensemble-classifier/README.md) * [tree\_ensemble\_classifier.predict](framework/operators/machine-learning/tree-ensemble-classifier/tree\_ensemble\_classifier.predict.md) diff --git a/docs/framework/compatibility.md b/docs/framework/compatibility.md index 0e0e5be17..6d79d6750 100644 --- a/docs/framework/compatibility.md +++ b/docs/framework/compatibility.md @@ -43,6 +43,7 @@ You can see below the list of current supported ONNX Operators: | [Softplus](operators/neural-network/nn.softplus.md) | :white\_check\_mark: | | [Linear](operators/neural-network/nn.linear.md) | :white\_check\_mark: | | [HardSigmoid](operators/neural-network/nn.hard\_sigmoid.md) | :white\_check\_mark: | +| [GridSample](operators/neural-network/nn.grid\_sample_.md) | :white\_check\_mark: | | [Sinh](operators/tensor/tensor.sinh.md) | :white\_check\_mark: | | [Asinh](operators/tensor/tensor.asinh.md) | :white\_check\_mark: | | [Atanh](operators/tensor/tensor.atanh.md) | :white\_check\_mark: | diff --git a/docs/framework/operators/neural-network/README.md b/docs/framework/operators/neural-network/README.md index 8343d0c90..d3169b229 100644 --- a/docs/framework/operators/neural-network/README.md +++ b/docs/framework/operators/neural-network/README.md @@ -35,4 +35,5 @@ Orion supports currently these `NN` types. | [`nn.hard_sigmoid`](nn.hard\_sigmoid.md) | Applies the Hard Sigmoid function to an n-dimensional input tensor. | | [`nn.thresholded_relu`](nn.thresholded\_relu.md) | Performs the thresholded relu activation function element-wise. | | [`nn.gemm`](nn.gemm.md) | Performs General Matrix multiplication. | +| [`nn.grid_sample`](nn.grid\_sample.md) | Computes the grid sample of the input tensor and input grid. | diff --git a/docs/framework/operators/neural-network/nn.gemm.md b/docs/framework/operators/neural-network/nn.gemm.md index 4ac734d73..b89d884fc 100644 --- a/docs/framework/operators/neural-network/nn.gemm.md +++ b/docs/framework/operators/neural-network/nn.gemm.md @@ -1,4 +1,4 @@ -# nn.gemm +# NNTrait::gemm ```rust fn gemm( @@ -12,18 +12,19 @@ ) -> Tensor; ``` -Performs General Matrix multiplication: [https://en.wikipedia.org/wiki/Basic\_Linear\_Algebra\_Subprograms#Level\_3](https://en.wikipedia.org/wiki/Basic\_Linear\_Algebra\_Subprograms#Level\_3) +Performs General Matrix multiplication: https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3 * A' = transpose(A) if transA else A * B' = transpose(B) if transB else B -Compute `Y = alpha * A' * B' + beta * C`, where input tensor A has shape (M, K) or (K, M), input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), and output tensor Y has shape (M, N). `A` will be transposed before doing the computation if attribute `transA` is `true`, same for `B` and `transB`. +Compute `Y = alpha * A' * B' + beta * C`, where input tensor A has shape (M, K) or (K, M), input tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N), and output tensor Y has shape (M, N). +`A` will be transposed before doing the computation if attribute `transA` is `true`, same for `B` and `transB`. ## Args * `A`(`Tensor`) - Input tensor A. The shape of `A` should be (M, K) if `transA` is `false`, or (K, M) if `transA` is `true`. * `B`(`Tensor`) - Input tensor B. The shape of `B` should be (K, N) if `transB` is `false`, or (N, K) if `transB` is `true`. -* `C`(`Option>`) - Optional input tensor C. The shape of C should be unidirectional broadcastable to (M, N). +* `C`(`Option>`) - Optional input tensor C. The shape of C should be unidirectional broadcastable to (M, N). * `alpha`(`Option`) - Optional scalar multiplier for the product of input tensors `A * B`. * `beta`(`Option`) - Optional scalar multiplier for input tensor `C`. * `transA`(`bool`) - Whether `A` should be transposed. @@ -63,4 +64,4 @@ A `Tensor` of shape (M, N). return y; } >>> tensor of shape [3;5] -``` +```` diff --git a/docs/framework/operators/neural-network/nn.grid_sample.md b/docs/framework/operators/neural-network/nn.grid_sample.md new file mode 100644 index 000000000..ea05df395 --- /dev/null +++ b/docs/framework/operators/neural-network/nn.grid_sample.md @@ -0,0 +1,97 @@ +# NNTrait::grid_sample + +```rust + fn grid_sample( + X: @Tensor, + grid: @Tensor, + align_corner: Option, + mode: Option, + padding_mode: Option, +) -> Tensor; +``` + +Given an input X and a flow-field grid, computes the output Y using X values and pixel locations from the grid. + +## Args + +* `X`(`@Tensor`) - Input tensor of shape (N, C, D1, D2, ..., Dr), where N is the batch size, C is the number of channels, D1, D2, ..., Dr are the spatial dimensions. +* `grid`(`@Tensor`) - Input offset of shape (N, D1_out, D2_out, ..., Dr_out, r), where D1_out, D2_out, ..., Dr_out are the spatial dimensions of the grid and output, and r is the number of spatial dimensions. Grid specifies the sampling locations normalized by the input spatial dimensions. +* `align_corners`(`Option`) - default is 0. If align_corners=1, the extrema are considered as referring to the center points of the input's corner pixels. If align_corners=0, they are instead considered as referring to the corner points of the input's corner pixels +* `mode`(`Option`) - default is linear. Three interpolation modes: linear (default), nearest and cubic. +* `padding_mode`(`Option`) - default is zeros. Support padding modes for outside grid values: `zeros`(default), `border`, `reflection`. + +## Returns + +A `Tensor` of shape (N, C, D1_out, D2_out, ..., Dr_out) of the sampled values. + +## Example + +```rust +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::operators::nn::FP16x16NN; +use orion::numbers::FP16x16; +use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor}; + +fn example_grid_sample() -> Tensor { + + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(4); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 655360, sign: true }); + data.append(FP16x16 { mag: 655360, sign: true }); + data.append(FP16x16 { mag: 327680, sign: true }); + data.append(FP16x16 { mag: 327680, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + + let mut grid = TensorTrait::new(shape.span(), data.span()); + + + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + let mut X = TensorTrait::new(shape.span(), data.span()); + + + return NNTrait::grid_sample( + @X, @grid, Option::None, Option::None, Option::None, + ); + +} + +} +>>> [ + [ + [ + [0.0000, 0.0000, 1.7000, 0.0000], + [0.0000, 1.7000, 0.0000, 0.0000] + ] + ] + ] + +```` \ No newline at end of file diff --git a/nodegen/node/grid_sample.py b/nodegen/node/grid_sample.py new file mode 100644 index 000000000..a0d1af78e --- /dev/null +++ b/nodegen/node/grid_sample.py @@ -0,0 +1,700 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait +from .resize import _get_all_coords +import numbers +from typing import List + +import numpy as np + +#from onnx.reference.ops.op_resize import _get_all_coords + +def grid_sample(X, grid, mode='linear', padding_mode='zeros', align_corners=0): + x_dims = X.shape + grid_dims = grid.shape + N = x_dims[0] + C = x_dims[1] + y_dims = (N, C, *grid_dims[1:-1]) + if np.prod(y_dims) == 0: + return np.array([], dtype=X.dtype) + Y = np.empty(y_dims, dtype=X.dtype) + for n in range(N): + grid_data = grid[n] + for c in range(C): + X_data = X[n, c] + num_dims = len(x_dims[2:]) + dims = x_dims[2:] + border = _prepare_border(dims, align_corners=align_corners) + for ox in _get_all_coords(Y[n, c]): + nx = grid_data[tuple(ox)] + nx = nx[::-1] + x = _gs_denormalize_coordinates( + n=nx, dims=dims, align_corners=align_corners + ) + if mode == "nearest": + x = np.rint(x) + for i, v in enumerate(x): + x_min = border[i] + x_max = border[i + num_dims] + if v < x_min or v > x_max: + if padding_mode == "border": + x[i] = _clamp(v, 0, dims[i] - 1) + elif padding_mode == "reflection": + x[i] = _gs_reflect(v, x_min, x_max) + if mode == "nearest": + x = x.astype(np.int32) + Y[n][c][tuple(ox)] = _pixel_at_ndarray( + ndarray=X_data, + x=x, + border=border, + padding_mode=padding_mode, + ) + + elif mode == "linear": + Y[n][c][tuple(ox)] = _gs_linear_interpolation_nd_with_x( + data=X_data, x=x, border=border, padding_mode=padding_mode + ) + elif mode == "cubic": + Y[n][c][tuple(ox)] = _gs_cubic_interpolation_nd_with_x( + data=X_data, x=x, border=border, padding_mode=padding_mode + ) + else: + raise RuntimeError( + "GridSample interpolation only supports nearest, linear, and cubic modes." + ) + return (Y.astype(X.dtype),) + + +def _gs_denormalize(n, length: int, align_corners: bool): + if align_corners: + x = (n + 1) / 2.0 * (length - 1) + else: + x = ((n + 1) * length - 1) / 2.0 + return x + +def _gs_denormalize_coordinates(n, dims, align_corners: bool): + x = np.zeros(len(n), dtype=np.float32) + for i, (v, dim) in enumerate(zip(n, dims)): + x[i] = _gs_denormalize(n=v, length=dim, align_corners=align_corners) + return x + +def _gs_reflect(x, x_min, x_max): # type: ignore + """Reflect by the near border till within the borders + Use float for borders to avoid potential issues with integer T + """ + fx = x + rng = x_max - x_min + if fx < x_min: + dx = x_min - fx + n = int(dx / rng) + r = dx - n * rng + if n % 2 == 0: + fx = x_min + r + else: + fx = x_max - r + elif fx > x_max: + dx = fx - x_max + n = int(dx / rng) + r = dx - n * rng + if n % 2 == 0: + fx = x_max - r + else: + fx = x_min + r + return fx + +def _gs_get_cubic_coeffs(x, coeffs): # type: ignore + """Calculate cubic convolution interpolation coefficients + ROBERT G. KEYS https://ieeexplore.ieee.org/document/1163711 + Use float to avoid potential issues with integer. + """ + cubic_alpha = -0.75 + x = abs(x) + coeffs[0] = ( + (cubic_alpha * (x + 1) - 5 * cubic_alpha) * (x + 1) + 8 * cubic_alpha + ) * (x + 1) - 4 * cubic_alpha + coeffs[1] = ((cubic_alpha + 2) * x - (cubic_alpha + 3)) * x * x + 1 + coeffs[2] = ((cubic_alpha + 2) * (1 - x) - (cubic_alpha + 3)) * (1 - x) * ( + 1 - x + ) + 1 + coeffs[3] = ( + (cubic_alpha * (2 - x) - 5 * cubic_alpha) * (2 - x) + 8 * cubic_alpha + ) * (2 - x) - 4 * cubic_alpha + +def _gs_get_linear_coeffs(x, coeffs): + x = abs(x) + coeffs[0] = 1 - x + coeffs[1] = x + +def _gs_bicubic_interpolate(p, x, y): # type: ignore + v = np.empty((4,), dtype=p.dtype) + coeffs = np.empty((4,), dtype=p.dtype) + _gs_get_cubic_coeffs(x, coeffs) + for i in range(4): + v[i] = coeffs @ p[i, :] + _gs_get_cubic_coeffs(y, coeffs) + return coeffs @ v + +def _gs_cubic_interpolation_1d_with_x(data, x, border, padding_mode): + v = np.empty((4,), dtype=data.dtype) + coeffs = np.empty((4,), dtype=data.dtype) + x_0 = int(np.floor(x)) + x_1 = x_0 + 1 + x_2 = x_0 + 2 + x_minus_1 = x_0 - 1 + _gs_get_cubic_coeffs(x - x_0, coeffs) + v[0] = _pixel_at_array( + array=data, i=x_minus_1, border=border, padding_mode=padding_mode + ) + v[1] = _pixel_at_array( + array=data, i=x_0, border=border, padding_mode=padding_mode + ) + v[2] = _pixel_at_array( + array=data, i=x_1, border=border, padding_mode=padding_mode + ) + v[3] = _pixel_at_array( + array=data, i=x_2, border=border, padding_mode=padding_mode + ) + return coeffs @ v + +def _gs_linear_interpolation_1d_with_x(data, x, border, padding_mode): + v = np.empty((2,), dtype=data.dtype) + coeffs = np.empty((2,), dtype=data.dtype) + x_0 = int(np.floor(x)) + x_1 = x_0 + 1 + _gs_get_linear_coeffs(x - x_0, coeffs) + v[0] = _pixel_at_array( + array=data, i=x_0, border=border, padding_mode=padding_mode + ) + v[1] = _pixel_at_array( + array=data, i=x_1, border=border, padding_mode=padding_mode + ) + return coeffs @ v + +def _gs_linear_interpolation_nd_with_x(data, x, border, padding_mode): + num_dims = data.ndim + assert num_dims == len(x) == int(len(border) / 2) + if num_dims == 1: + return _gs_linear_interpolation_1d_with_x( + data=data, x=x[0], border=border, padding_mode=padding_mode + ) + res1d = [] + for i in range(data.shape[0]): + r = _gs_linear_interpolation_nd_with_x( + data=data[i], + x=x[1:], + border=list(border[1:num_dims]) + + list(border[1 + num_dims : 2 * num_dims]), + padding_mode=padding_mode, + ) + res1d.append(r) + res1d = np.array(res1d) + return _gs_linear_interpolation_1d_with_x( + data=res1d, + x=x[0], + border=[border[0], border[num_dims]], + padding_mode=padding_mode, + ) + +def _gs_cubic_interpolation_nd_with_x(data, x, border, padding_mode): + num_dims = data.ndim + assert num_dims == len(x) == int(len(border) / 2) + if num_dims == 1: + return _gs_cubic_interpolation_1d_with_x( + data=data, x=x[0], border=border, padding_mode=padding_mode + ) + res1d = [] + for i in range(data.shape[0]): + r = _gs_cubic_interpolation_nd_with_x( + data=data[i], + x=x[1:], + border=list(border[1:num_dims]) + + list(border[1 + num_dims : 2 * num_dims]), + padding_mode=padding_mode, + ) + res1d.append(r) + res1d = np.array(res1d) + return _gs_cubic_interpolation_1d_with_x( + data=res1d, + x=x[0], + border=[border[0], border[num_dims]], + padding_mode=padding_mode, + ) + +def _clamp(val, lo, hi): # type: ignore + if val < lo: + return lo + if val > hi: + return hi + return val + +def _pixel_at_ndarray(ndarray, x: List, border, padding_mode): # type: ignore + # boarder: [x_1_min, x_2_min, ..., x_1_max, x_2_max, ...] + num_dims = ndarray.ndim + assert num_dims == len(x) == int(len(border) / 2) + if num_dims == 1: + return _pixel_at_array( + array=ndarray, i=x[0], border=border, padding_mode=padding_mode + ) + i = x[0] + d = ndarray.shape[0] + if padding_mode == "zeros": + if i >= 0 and i < d: + ndarray = ndarray[i] + else: + # Trick + i = 0 + ndarray = np.zeros_like(ndarray[i]) + elif padding_mode == "border": + i = _clamp(i, 0, d - 1) + ndarray = ndarray[i] + else: + i = int(_gs_reflect(i, border[0], border[num_dims])) + ndarray = ndarray[i] + return _pixel_at_ndarray( + ndarray=ndarray, + x=x[1:], + border=list(border[1:num_dims]) + list(border[1 + num_dims : 2 * num_dims]), + padding_mode=padding_mode, + ) +def _pixel_at_array(array, i: int, border, padding_mode): # type: ignore + assert array.ndim == 1 + d = array.shape[0] + if padding_mode == "zeros": + if i >= 0 and i < d: + pixel = array[i] + else: + pixel = 0 + elif padding_mode == "border": + i = _clamp(i, 0, d - 1) + pixel = array[i] + else: + i = int(_gs_reflect(i, border[0], border[1])) + pixel = array[i] + return pixel + +def _prepare_border(dims, align_corners: bool): + # boarder: [x_1_min, x_2_min, ..., x_1_max, x_2_max, ...] + num_dims = len(dims) + borders = np.zeros(num_dims * 2) + for i in range(num_dims): + # min + borders[i] = -0.5 + # max + borders[i + num_dims] = dims[i] - 0.5 + if align_corners: + # min + borders[i] = 0.0 + # max + borders[i + num_dims] = dims[i] - 1.0 + return borders + + +class Grid_sample(RunAll): + + @staticmethod + def export_gridsample() -> None: + x = np.array( + [ + [ + [ + [0.0, 1.0, 2.0, 3.0], + [4.0, 5.0, 6.0, 7.0], + [8.0, 9.0, 10.0, 11.0], + [12.0, 13.0, 14.0, 15.0], + ] + ] + ], + dtype=np.float32, + ) + + grid = np.array( + [ + [ + [ + [-1.0000, -1.0000], + [-0.6000, -1.0000], + [-0.2000, -1.0000], + [0.2000, -1.0000], + [0.6000, -1.0000], + [1.0000, -1.0000], + ], + [ + [-1.0000, -0.6000], + [-0.6000, -0.6000], + [-0.2000, -0.6000], + [0.2000, -0.6000], + [0.6000, -0.6000], + [1.0000, -0.6000], + ], + [ + [-1.0000, -0.2000], + [-0.6000, -0.2000], + [-0.2000, -0.2000], + [0.2000, -0.2000], + [0.6000, -0.2000], + [1.0000, -0.2000], + ], + [ + [-1.0000, 0.2000], + [-0.6000, 0.2000], + [-0.2000, 0.2000], + [0.2000, 0.2000], + [0.6000, 0.2000], + [1.0000, 0.2000], + ], + [ + [-1.0000, 0.6000], + [-0.6000, 0.6000], + [-0.2000, 0.6000], + [0.2000, 0.6000], + [0.6000, 0.6000], + [1.0000, 0.6000], + ], + [ + [-1.0000, 1.0000], + [-0.6000, 1.0000], + [-0.2000, 1.0000], + [0.2000, 1.0000], + [0.6000, 1.0000], + [1.0000, 1.0000], + ], + ] + ], + dtype=np.float32, + ) + + y = grid_sample(x, grid, mode ="linear") + y = np.array(y[0]) + + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + + name = "grid_sample" + func_sig = "NNTrait::grid_sample(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None)" + make_test( + [x, grid], y, func_sig, name, Trait.NN) + + @staticmethod + def export_gridsample_paddingmode_zeros() -> None: + x = np.array( + [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]], + dtype=np.float32, + ) + grid = np.array( + [ + [ + [ + [-10.0000, -10.0000], + [-5.0000, -5.0000], + [-0.2000, -0.2000], + [10.0000, 10.0000], + ], + [ + [10.0000, 10.0000], + [-0.2000, -0.2000], + [5.0000, 5.0000], + [10.0000, 10.0000], + ], + ] + ], + dtype=np.float32, + ) + + y = grid_sample(x, grid, mode ="linear") + y = np.array(y[0]) + + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + + name = "grid_sample_padding_zeros" + func_sig = "NNTrait::grid_sample(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None)" + make_test( + [x, grid], y, func_sig, name, Trait.NN) + + @staticmethod + def export_gridsample_paddingmode_border() -> None: + x = np.array( + [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]], + dtype=np.float32, + ) + grid = np.array( + [ + [ + [ + [-10.0000, -10.0000], + [-5.0000, -5.0000], + [-0.2000, -0.2000], + [10.0000, 10.0000], + ], + [ + [10.0000, 10.0000], + [-0.2000, -0.2000], + [5.0000, 5.0000], + [10.0000, 10.0000], + ], + ] + ], + dtype=np.float32, + ) + + y = grid_sample(x, grid, mode ="linear", padding_mode="border") + y = np.array(y[0]) + + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + + name = "grid_sample_padding_border" + func_sig = "NNTrait::grid_sample(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(PADDING_MODE::BORDER))" + make_test( + [x, grid], y, func_sig, name, Trait.NN) + + @staticmethod + def export_gridsample_paddingmode_reflection() -> None: + x = np.array( + [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]], + dtype=np.float32, + ) + grid = np.array( + [ + [ + [ + [-10.0000, -10.0000], + [-5.0000, -5.0000], + [-0.2000, -0.2000], + [10.0000, 10.0000], + ], + [ + [10.0000, 10.0000], + [-0.2000, -0.2000], + [5.0000, 5.0000], + [10.0000, 10.0000], + ], + ] + ], + dtype=np.float32, + ) + + y = grid_sample(x, grid, mode ="linear", padding_mode="reflection") + y = np.array(y[0]) + + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + + name = "grid_sample_padding_reflection" + func_sig = "NNTrait::grid_sample(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(PADDING_MODE::REFLECTION))" + make_test( + [x, grid], y, func_sig, name, Trait.NN) + + @staticmethod + def export_gridsample_mode_aligncorners() -> None: + x = np.array( + [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]], + dtype=np.float32, + ) + grid = np.array( + [ + [ + [ + [-1.0000, -1.0000], + [-0.5000, -0.5000], + [-0.2000, -0.2000], + [0.0000, 0.0000], + ], + [ + [0.0000, 0.0000], + [-0.2000, -0.2000], + [0.5000, 0.5000], + [1.0000, 1.0000], + ], + ] + ], + dtype=np.float32, + ) + + y = grid_sample(x, grid, mode ="linear", align_corners=1) + y = np.array(y[0]) + + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + + name = "grid_sample_aligncorners" + func_sig = "NNTrait::grid_sample(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::Some(1)," + func_sig += "Option::None," + func_sig += "Option::None)" + make_test( + [x, grid], y, func_sig, name, Trait.NN) + + + @staticmethod + def export_gridsample_nearest() -> None: + x = np.array( + [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]], + dtype=np.float32, + ) + grid = np.array( + [ + [ + [ + [-1.0000, -1.0000], + [-0.5000, -0.5000], + [-0.2000, -0.2000], + [0.0000, 0.0000], + ], + [ + [0.0000, 0.0000], + [-0.2000, -0.2000], + [0.5000, 0.5000], + [1.0000, 1.0000], + ], + ] + ], + dtype=np.float32, + ) + + y = grid_sample(x, grid, mode ="nearest", align_corners=0) + y = np.array(y[0]) + + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + + name = "grid_sample_nearest" + func_sig = "NNTrait::grid_sample(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::Some(0)," + func_sig += "Option::Some(MODE::NEAREST)," + func_sig += "Option::None)" + make_test( + [x, grid], y, func_sig, name, Trait.NN) + + + @staticmethod + def export_gridsample_nearest_align_corner() -> None: + x = np.array( + [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]], + dtype=np.float32, + ) + grid = np.array( + [ + [ + [ + [-1.0000, -1.0000], + [-0.5000, -0.5000], + [-0.2000, -0.2000], + [0.0000, 0.0000], + ], + [ + [0.0000, 0.0000], + [-0.2000, -0.2000], + [0.5000, 0.5000], + [1.0000, 1.0000], + ], + ] + ], + dtype=np.float32, + ) + + y = grid_sample(x, grid, mode ="nearest", align_corners=1) + y = np.array(y[0]) + + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + + name = "grid_sample_nearest_aligncorner" + func_sig = "NNTrait::grid_sample(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::Some(1)," + func_sig += "Option::Some(MODE::NEAREST)," + func_sig += "Option::None)" + make_test( + [x, grid], y, func_sig, name, Trait.NN) + + @staticmethod + def export_gridsample_cubic() -> None: + x = np.array( + [[[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0]]]], + dtype=np.float32, + ) + grid = np.array( + [ + [ + [ + [-1.0000, -1.0000], + [-0.5000, -0.5000], + [-0.2000, -0.2000], + [0.0000, 0.0000], + ], + [ + [0.0000, 0.0000], + [-0.2000, -0.2000], + [0.5000, 0.5000], + [1.0000, 1.0000], + ], + ] + ], + dtype=np.float32, + ) + + y = grid_sample(x, grid, mode ="cubic", align_corners=0) + y = np.array(y[0]) + + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + grid = Tensor(Dtype.FP16x16, grid.shape, to_fp(grid.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + + name = "grid_sample_cubic" + func_sig = "NNTrait::grid_sample(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "Option::Some(0)," + func_sig += "Option::Some(MODE::CUBIC)," + func_sig += "Option::None)" + make_test( + [x, grid], y, func_sig, name, Trait.NN) diff --git a/src/numbers.cairo b/src/numbers.cairo index 936c128e1..1ce8a803d 100644 --- a/src/numbers.cairo +++ b/src/numbers.cairo @@ -2,10 +2,10 @@ mod fixed_point; mod complex_number; use orion::numbers::fixed_point::core::FixedTrait; -use orion::numbers::fixed_point::implementations::fp8x23::core::{ONE as ONE_fp8x23 }; -use orion::numbers::fixed_point::implementations::fp16x16::core::{ONE as ONE_fp16x16 }; -use orion::numbers::fixed_point::implementations::fp64x64::core::{ONE as ONE_fp64x64 }; -use orion::numbers::fixed_point::implementations::fp32x32::core::{ONE as ONE_fp32x32 }; +use orion::numbers::fixed_point::implementations::fp8x23::core::{ONE as ONE_fp8x23}; +use orion::numbers::fixed_point::implementations::fp16x16::core::{ONE as ONE_fp16x16}; +use orion::numbers::fixed_point::implementations::fp64x64::core::{ONE as ONE_fp64x64}; +use orion::numbers::fixed_point::implementations::fp32x32::core::{ONE as ONE_fp32x32}; // Common methods from Fixed Point and Signed Integers. trait NumberTrait { @@ -1535,7 +1535,7 @@ impl I8Number of NumberTrait { 0 } fn is_zero(self: i8) -> bool { - self == 0 + self == 0 } fn half() -> i8 { @@ -1571,7 +1571,7 @@ impl I8Number of NumberTrait { } fn max_value() -> i8 { - 127 + 127 } fn min(self: i8, other: i8) -> i8 { @@ -1661,7 +1661,7 @@ impl I8Number of NumberTrait { } fn is_neg_inf(self: i8) -> bool { - self == -127 + self == -127 } fn bitwise_and(lhs: i8, rhs: i8) -> i8 { @@ -1702,7 +1702,7 @@ impl I8Div of Div { let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); - let mut result = lhs_u128 / rhs_u128; + let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i8 = felt_result.try_into().unwrap(); if lhs * rhs < 0 { @@ -1729,7 +1729,7 @@ impl I8IntoFP8x23 of Into { } let number_felt: felt252 = self_positive.into(); let number_u32: u32 = number_felt.try_into().unwrap(); - FP8x23 {mag: number_u32 * ONE_fp8x23, sign: number_sign} + FP8x23 { mag: number_u32 * ONE_fp8x23, sign: number_sign } } } @@ -1742,7 +1742,7 @@ impl I8IntoFP16x16 of Into { } let number_felt: felt252 = self_positive.into(); let number_u32: u32 = number_felt.try_into().unwrap(); - FP16x16 {mag: number_u32 * ONE_fp16x16, sign: number_sign} + FP16x16 { mag: number_u32 * ONE_fp16x16, sign: number_sign } } } @@ -1755,7 +1755,7 @@ impl I8IntoFP64x64 of Into { } let number_felt: felt252 = self_positive.into(); let number_u128: u128 = number_felt.try_into().unwrap(); - FP64x64 {mag: number_u128 * ONE_fp64x64, sign: number_sign} + FP64x64 { mag: number_u128 * ONE_fp64x64, sign: number_sign } } } @@ -1768,7 +1768,7 @@ impl I8IntoFP32x32 of Into { } let number_felt: felt252 = self_positive.into(); let number_u128: u64 = number_felt.try_into().unwrap(); - FP32x32 {mag: number_u128 * ONE_fp32x32, sign: number_sign} + FP32x32 { mag: number_u128 * ONE_fp32x32, sign: number_sign } } } @@ -1877,7 +1877,7 @@ impl I16Number of NumberTrait { 0 } fn is_zero(self: i16) -> bool { - self == 0 + self == 0 } fn half() -> i16 { @@ -2003,7 +2003,7 @@ impl I16Number of NumberTrait { } fn is_neg_inf(self: i16) -> bool { - self == -32767 + self == -32767 } fn bitwise_and(lhs: i16, rhs: i16) -> i16 { @@ -2044,7 +2044,7 @@ impl I16Div of Div { let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); - let mut result = lhs_u128 / rhs_u128; + let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i16 = felt_result.try_into().unwrap(); if lhs * rhs < 0 { @@ -2167,7 +2167,7 @@ impl I32Number of NumberTrait { 0 } fn is_zero(self: i32) -> bool { - self == 0 + self == 0 } fn half() -> i32 { @@ -2203,7 +2203,7 @@ impl I32Number of NumberTrait { } fn max_value() -> i32 { - 2147483647 + 2147483647 } fn min(self: i32, other: i32) -> i32 { @@ -2281,7 +2281,7 @@ impl I32Number of NumberTrait { } fn INF() -> i32 { - 2147483647 + 2147483647 } fn is_inf(self: i32) -> bool { @@ -2289,11 +2289,11 @@ impl I32Number of NumberTrait { } fn is_pos_inf(self: i32) -> bool { - self == 2147483647 + self == 2147483647 } fn is_neg_inf(self: i32) -> bool { - self == -2147483647 + self == -2147483647 } fn bitwise_and(lhs: i32, rhs: i32) -> i32 { @@ -2334,7 +2334,7 @@ impl I32Div of Div { let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); - let mut result = lhs_u128 / rhs_u128; + let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i32 = felt_result.try_into().unwrap(); if lhs * rhs < 0 { @@ -2470,7 +2470,7 @@ impl I64Number of NumberTrait { 0 } fn is_zero(self: i64) -> bool { - self == 0 + self == 0 } fn half() -> i64 { @@ -2506,7 +2506,7 @@ impl I64Number of NumberTrait { } fn max_value() -> i64 { - 9223372036854775807 + 9223372036854775807 } fn min(self: i64, other: i64) -> i64 { @@ -2584,7 +2584,7 @@ impl I64Number of NumberTrait { } fn INF() -> i64 { - 9223372036854775807 + 9223372036854775807 } fn is_inf(self: i64) -> bool { @@ -2592,11 +2592,11 @@ impl I64Number of NumberTrait { } fn is_pos_inf(self: i64) -> bool { - self == 9223372036854775807 + self == 9223372036854775807 } fn is_neg_inf(self: i64) -> bool { - self == -9223372036854775807 + self == -9223372036854775807 } fn bitwise_and(lhs: i64, rhs: i64) -> i64 { @@ -2637,7 +2637,7 @@ impl I64Div of Div { let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); - let mut result = lhs_u128 / rhs_u128; + let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i64 = felt_result.try_into().unwrap(); if lhs * rhs < 0 { @@ -2760,7 +2760,7 @@ impl I128Number of NumberTrait { 0 } fn is_zero(self: i128) -> bool { - self == 0 + self == 0 } fn half() -> i128 { @@ -2796,7 +2796,7 @@ impl I128Number of NumberTrait { } fn max_value() -> i128 { - 170141183460469231731687303715884105727 + 170141183460469231731687303715884105727 } fn min(self: i128, other: i128) -> i128 { @@ -2874,19 +2874,20 @@ impl I128Number of NumberTrait { } fn INF() -> i128 { - 170141183460469231731687303715884105727 + 170141183460469231731687303715884105727 } fn is_inf(self: i128) -> bool { - (self == 170141183460469231731687303715884105727 || self == -170141183460469231731687303715884105727) + (self == 170141183460469231731687303715884105727 + || self == -170141183460469231731687303715884105727) } fn is_pos_inf(self: i128) -> bool { - self == 170141183460469231731687303715884105727 + self == 170141183460469231731687303715884105727 } fn is_neg_inf(self: i128) -> bool { - self == -170141183460469231731687303715884105727 + self == -170141183460469231731687303715884105727 } fn bitwise_and(lhs: i128, rhs: i128) -> i128 { @@ -2927,7 +2928,7 @@ impl I128Div of Div { let rhs_felt: felt252 = rhs_positive.into(); let lhs_u128: u128 = lhs_felt.try_into().unwrap(); let rhs_u128: u128 = rhs_felt.try_into().unwrap(); - let mut result = lhs_u128 / rhs_u128; + let mut result = lhs_u128 / rhs_u128; let felt_result: felt252 = result.into(); let signed_int_result: i128 = felt_result.try_into().unwrap(); // assigning the sign and returning diff --git a/src/numbers/fixed_point/implementations/fp16x16/core.cairo b/src/numbers/fixed_point/implementations/fp16x16/core.cairo index cff7996af..a260d886f 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/core.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/core.cairo @@ -436,9 +436,8 @@ fn _i8_try_from_fp(x: FP16x16) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, - Option::None(_) => Option::None(()) } } diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo index b3fe4d39b..176c1a115 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo @@ -451,7 +451,7 @@ fn _i8_try_from_fp(x: FP16x16W) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, Option::None(_) => Option::None(()) } diff --git a/src/numbers/fixed_point/implementations/fp32x32/core.cairo b/src/numbers/fixed_point/implementations/fp32x32/core.cairo index 9fa722e8e..34b06bc44 100644 --- a/src/numbers/fixed_point/implementations/fp32x32/core.cairo +++ b/src/numbers/fixed_point/implementations/fp32x32/core.cairo @@ -402,9 +402,8 @@ fn _i8_try_from_fp(x: FP32x32) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, - Option::None(_) => Option::None(()) } } diff --git a/src/numbers/fixed_point/implementations/fp64x64/core.cairo b/src/numbers/fixed_point/implementations/fp64x64/core.cairo index c98cb7c57..d35cb9cfa 100644 --- a/src/numbers/fixed_point/implementations/fp64x64/core.cairo +++ b/src/numbers/fixed_point/implementations/fp64x64/core.cairo @@ -402,9 +402,8 @@ fn _i8_try_from_fp(x: FP64x64) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, - Option::None(_) => Option::None(()) } } diff --git a/src/numbers/fixed_point/implementations/fp8x23/core.cairo b/src/numbers/fixed_point/implementations/fp8x23/core.cairo index b1ab1b6ac..6db9a5a43 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/core.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/core.cairo @@ -425,7 +425,7 @@ fn _i32_into_fp(x: FP8x23) -> i32 { fn _i8_try_from_fp(x: FP8x23) -> Option { let unscaled_mag: Option = (x.mag / ONE).try_into(); -// Option::Some(i8 { mag: unscaled_mag.unwrap(), sign: x.sign }) + // Option::Some(i8 { mag: unscaled_mag.unwrap(), sign: x.sign }) match unscaled_mag { Option::Some(val) => { let number_felt: felt252 = unscaled_mag.unwrap().into(); @@ -433,7 +433,7 @@ fn _i8_try_from_fp(x: FP8x23) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, Option::None(_) => Option::None(()) } diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo index c4b49c798..9d9b985de 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo @@ -439,7 +439,7 @@ fn _i8_try_from_fp(x: FP8x23W) -> Option { if x.sign { return Option::Some(number_i8 * -1_i8); } - Option::Some(number_i8) + Option::Some(number_i8) }, Option::None(_) => Option::None(()) } diff --git a/src/operators/nn/core.cairo b/src/operators/nn/core.cairo index 3c99f4733..5cac2b81c 100644 --- a/src/operators/nn/core.cairo +++ b/src/operators/nn/core.cairo @@ -14,6 +14,7 @@ use orion::operators::tensor::core::Tensor; /// hard_sigmoid - Applies the Hard Sigmoid function to an n-dimensional input tensor. /// thresholded_relu - Performs the thresholded relu activation function element-wise. /// gemm - Performs General Matrix multiplication. +/// grid_sample - Computes the grid sample of the input tensor and input grid. trait NNTrait { /// # NNTrait::relu /// @@ -694,4 +695,109 @@ trait NNTrait { transA: bool, transB: bool ) -> Tensor; + /// # NNTrait::grid_sample + /// + /// ```rust + /// fn grid_sample( + /// X: @Tensor, + /// grid: @Tensor, + /// align_corner: Option, + /// mode: Option, + /// padding_mode: Option, + /// ) -> Tensor; + /// ``` + /// + /// Given an input X and a flow-field grid, computes the output Y using X values and pixel locations from the grid. + /// + /// ## Args + /// + /// * `X`(`@Tensor`) - Input tensor of shape (N, C, D1, D2, ..., Dr), where N is the batch size, C is the number of channels, D1, D2, ..., Dr are the spatial dimensions. + /// * `grid`(`@Tensor`) - Input offset of shape (N, D1_out, D2_out, ..., Dr_out, r), where D1_out, D2_out, ..., Dr_out are the spatial dimensions of the grid and output, and r is the number of spatial dimensions. Grid specifies the sampling locations normalized by the input spatial dimensions. + /// * `align_corners`(`Option`) - default is 0. If align_corners=1, the extrema are considered as referring to the center points of the input's corner pixels. If align_corners=0, they are instead considered as referring to the corner points of the input's corner pixels + /// * `mode`(`Option`) - default is linear. Three interpolation modes: linear (default), nearest and cubic. + /// * `padding_mode`(`Option`) - default is zeros. Support padding modes for outside grid values: `zeros`(default), `border`, `reflection`. + /// + /// ## Returns + /// + /// A `Tensor` of shape (N, C, D1_out, D2_out, ..., Dr_out) of the sampled values. + /// + /// ## Example + /// + /// ```rust + /// use orion::operators::nn::NNTrait; + /// use orion::numbers::FixedTrait; + /// use orion::operators::nn::FP16x16NN; + /// use orion::numbers::FP16x16; + /// use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor}; + /// + /// fn example_grid_sample() -> Tensor { + /// + /// let mut shape = ArrayTrait::::new(); + /// shape.append(1); + /// shape.append(2); + /// shape.append(4); + /// shape.append(2); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(FP16x16 { mag: 655360, sign: true }); + /// data.append(FP16x16 { mag: 655360, sign: true }); + /// data.append(FP16x16 { mag: 327680, sign: true }); + /// data.append(FP16x16 { mag: 327680, sign: true }); + /// data.append(FP16x16 { mag: 13107, sign: true }); + /// data.append(FP16x16 { mag: 13107, sign: true }); + /// data.append(FP16x16 { mag: 655360, sign: false }); + /// data.append(FP16x16 { mag: 655360, sign: false }); + /// data.append(FP16x16 { mag: 655360, sign: false }); + /// data.append(FP16x16 { mag: 655360, sign: false }); + /// data.append(FP16x16 { mag: 13107, sign: true }); + /// data.append(FP16x16 { mag: 13107, sign: true }); + /// data.append(FP16x16 { mag: 327680, sign: false }); + /// data.append(FP16x16 { mag: 327680, sign: false }); + /// data.append(FP16x16 { mag: 655360, sign: false }); + /// data.append(FP16x16 { mag: 655360, sign: false }); + /// + /// let mut grid = TensorTrait::new(shape.span(), data.span()); + /// + /// + /// let mut shape = ArrayTrait::::new(); + /// shape.append(1); + /// shape.append(1); + /// shape.append(3); + /// shape.append(2); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 131072, sign: false }); + /// data.append(FP16x16 { mag: 196608, sign: false }); + /// data.append(FP16x16 { mag: 262144, sign: false }); + /// data.append(FP16x16 { mag: 327680, sign: false }); + /// let mut X = TensorTrait::new(shape.span(), data.span()); + /// + /// + /// return NNTrait::grid_sample( + /// @X, @grid, Option::None, Option::None, Option::None, + /// ); + /// + /// } + /// + ///} + /// >>> [ + /// [ + /// [ + /// [0.0000, 0.0000, 1.7000, 0.0000], + /// [0.0000, 1.7000, 0.0000, 0.0000] + /// ] + /// ] + /// ] + /// + /// ```` + + fn grid_sample( + X: @Tensor, + grid: @Tensor, + align_corner: Option, + mode: Option, + padding_mode: Option, + ) -> Tensor; } diff --git a/src/operators/nn/functional.cairo b/src/operators/nn/functional.cairo index a0fd96cc8..d685f16db 100644 --- a/src/operators/nn/functional.cairo +++ b/src/operators/nn/functional.cairo @@ -10,3 +10,4 @@ mod logsoftmax; mod thresholded_relu; mod hard_sigmoid; mod gemm; +mod grid_sample; diff --git a/src/operators/nn/functional/grid_sample.cairo b/src/operators/nn/functional/grid_sample.cairo new file mode 100644 index 000000000..50e94b420 --- /dev/null +++ b/src/operators/nn/functional/grid_sample.cairo @@ -0,0 +1,986 @@ +use core::option::OptionTrait; +use core::traits::TryInto; +use orion::numbers::NumberTrait; +use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,}; +use orion::operators::vec::{NullableVec, NullableVecImpl}; +use orion::operators::tensor::core::{stride}; +use core::debug::PrintTrait; +use orion::numbers::FP16x16; +use orion::operators::tensor::{FP16x16Tensor}; + +#[derive(Copy, Drop)] +enum MODE { + NEAREST, + LINEAR, + CUBIC, +} + +#[derive(Copy, Drop)] +enum PADDING_MODE { + ZEROS, + BORDER, + REFLECTION, +} + +fn grid_sample< + T, + MAG, + +TensorTrait, + +NumberTrait, + +Copy, + +Drop, + +Add, + +Mul, + +Sub, + +Div, + +AddEq, + +PrintTrait, + +PartialOrd, + +PartialEq, + +TryInto, + +Into, + +Rem, + +Neg, + +SubEq, +>( + X: @Tensor, + grid: @Tensor, + align_corner: Option, + mode: Option, + padding_mode: Option, +) -> Tensor { + let align_corner = match align_corner { + Option::Some(align_corner) => align_corner, + Option::None => 0, + }; + + let mode = match mode { + Option::Some(mode) => mode, + Option::None => MODE::LINEAR, + }; + + let padding_mode = match padding_mode { + Option::Some(padding_mode) => padding_mode, + Option::None => PADDING_MODE::ZEROS, + }; + + let x_dims = (*X).shape; + let x_stride = stride((*X).shape); + let grid_dims = (*grid).shape; + let grid_stride = stride((*grid).shape); + + let N = *x_dims.at(0); + let C = *x_dims.at(1); + + let num_dims = x_dims.len() - 2; + let dims = SpanTrait::slice(x_dims, 2, num_dims); + + let border = prepare_border(X, dims, align_corner); + + let mut y_dims = array![N, C]; + y_dims.append_span(SpanTrait::slice(grid_dims, 1, grid_dims.len() - 2)); + let y_dims = y_dims.span(); + + if prod(y_dims, 0) == 0 { + return TensorTrait::new(array![].span(), array![].span()); + } + + let mut Y = ArrayTrait::new(); + + let mut n = 0; + loop { + if n == N { + break; + } + let grid_data = SpanTrait::slice((*grid).data, n * *grid_stride.at(0), *grid_stride.at(0)); + let grid_data_stride = SpanTrait::slice(grid_stride, 1, grid_stride.len() - 1); + + let mut c = 0; + loop { + if c == C { + break; + } + let X_data = SpanTrait::slice( + (*X).data, n * *x_stride.at(0) + c * *x_stride.at(1), *x_stride.at(1) + ); + let X_data_stride = SpanTrait::slice(x_stride, 2, grid_stride.len() - 2); + let all_coords = get_all_coords(SpanTrait::slice(grid_dims, 1, grid_dims.len() - 2)); + let mut ix = 0; + loop { + if ix == all_coords.len() { + break; + } + + let ox = *all_coords.at(ix); + let nx = get_sub(grid_data, grid_data_stride, ox); + let nx = reverse(nx); + let x = gs_denormalize_coordinates(nx, dims, align_corner); + + let x = match mode { + MODE::NEAREST => { rint(x) }, + MODE::LINEAR => { x }, + MODE::CUBIC => { x }, + }; + + let mut new_x = ArrayTrait::new(); + let mut i = 0; + loop { + if i == x.len() { + break; + } + let v = *x.at(i); + + let mut x_min = *border.at(i); + let mut x_max = *border.at(i + num_dims); + let new_v = if v < x_min || v > x_max { + let v = match padding_mode { + PADDING_MODE::ZEROS => { v }, + PADDING_MODE::BORDER => { + clamp( + v, + NumberTrait::zero(), + NumberTrait::new_unscaled((*dims.at(i)).into(), false) + - NumberTrait::one() + ) + }, + PADDING_MODE::REFLECTION => { gs_reflect(v, x_min, x_max) }, + }; + v + } else { + v + }; + new_x.append(new_v); + i += 1; + }; + let x = new_x.span(); + + let y = match mode { + MODE::NEAREST => { + pixel_at_ndarray(X_data, dims, X_data_stride, x, border, padding_mode) + }, + MODE::LINEAR => { + gs_linear_interpolation_nd_with_x( + X_data, dims, X_data_stride, x, border, padding_mode + ) + }, + MODE::CUBIC => { + gs_cubic_interpolation_nd_with_x( + X_data, dims, X_data_stride, x, border, padding_mode + ) + }, + }; + Y.append(y); + + ix += 1; + }; + c += 1; + }; + n += 1; + }; + return TensorTrait::new(y_dims, Y.span()); +} + +fn gs_cubic_interpolation_1d_with_x< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +TensorTrait, + +Mul, + +Add, + +Div, + +Sub, + +AddEq, + +TryInto, + +Into, + +PartialOrd, + +PartialEq, + +Rem, + +PrintTrait, +>( + data: Span, x: T, border: Span, padding_mode: PADDING_MODE +) -> T { + let x_0 = NumberTrait::floor(x); + let x_1 = x_0 + NumberTrait::one(); + let x_2 = x_1 + NumberTrait::one(); + let x_minus_1 = x_0 - NumberTrait::one(); + + let coeffs = gs_get_cubic_coeffs(x - x_0); + + let v_0 = pixel_at_array(data, x_minus_1.try_into().unwrap(), border, padding_mode); + let v_1 = pixel_at_array(data, x_0.try_into().unwrap(), border, padding_mode); + let v_2 = pixel_at_array(data, x_1.try_into().unwrap(), border, padding_mode); + let v_3 = pixel_at_array(data, x_2.try_into().unwrap(), border, padding_mode); + + let v = array![v_0, v_1, v_2, v_3].span(); + + return dot(coeffs, v); +} + +fn gs_get_cubic_coeffs< + T, + MAG, + +TensorTrait, + +NumberTrait, + +PartialOrd, + +PartialEq, + +Copy, + +Drop, + +AddEq, + +Add, + +Div, + +Mul, + +Sub, +>( + x: T +) -> Span { + let one = NumberTrait::one(); + let two = one + NumberTrait::one(); + let three = two + NumberTrait::one(); + let four = three + NumberTrait::one(); + let five = four + NumberTrait::one(); + let eigth = four + four; + + let A = NumberTrait::neg(three / four); + let x = NumberTrait::abs(x); + + let mut coeffs = ArrayTrait::new(); + + coeffs.append(((A * (x + one) - five * A) * (x + one) + eigth * A) * (x + one) - four * A); + coeffs.append(((A + two) * x - (A + three)) * x * x + one); + coeffs.append(((A + two) * (one - x) - (A + three)) * (one - x) * (one - x) + one); + coeffs + .append( + ((A * ((one - x) + one) - five * A) * ((one - x) + one) + eigth * A) * ((one - x) + one) + - four * A + ); + return coeffs.span(); +} + +fn gs_cubic_interpolation_nd_with_x< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +TensorTrait, + +Mul, + +Add, + +Div, + +Sub, + +AddEq, + +TryInto, + +Into, + +PartialOrd, + +PartialEq, + +Rem, + +PrintTrait, +>( + data: Span, + data_dims: Span, + data_stride: Span, + x: Span, + border: Span, + padding_mode: PADDING_MODE +) -> T { + let num_dims = data_dims.len(); + + assert(num_dims == x.len(), 'pixel at nd array: wrong dim'); + assert(num_dims == (border.len() / 2), 'pixel at nd array: wrong dim'); + + if num_dims == 1 { + let a = gs_cubic_interpolation_1d_with_x(data, *x.at(0), border, padding_mode); + return a; + } + + let mut res1d = ArrayTrait::new(); + + let mut i = 0; + loop { + if i == *data_dims.at(0) { + break; + } + let sub_data = SpanTrait::slice(data, i * *data_stride.at(0), *data_stride.at(0)); + let sub_x = SpanTrait::slice(x, 1, x.len() - 1); + + let data_dims_sub = SpanTrait::slice(data_dims, 1, data_dims.len() - 1); + let data_stride_sub = SpanTrait::slice(data_stride, 1, data_stride.len() - 1); + + let border1 = SpanTrait::slice(border, 1, num_dims - 1); + let border2 = SpanTrait::slice(border, num_dims + 1, num_dims - 1); + let mut border = ArrayTrait::new(); + border.append_span(border1); + border.append_span(border2); + + let r = gs_cubic_interpolation_nd_with_x( + sub_data, data_dims_sub, data_stride_sub, sub_x, border.span(), padding_mode + ); + res1d.append(r); + i += 1; + }; + + return gs_cubic_interpolation_1d_with_x( + res1d.span(), *x.at(0), array![*border.at(0), *border.at(num_dims)].span(), padding_mode + ); +} + + +fn gs_get_linear_coeffs, +Copy, +NumberTrait, +Sub,>( + x: T +) -> Span { + let x = NumberTrait::abs(x); + return array![NumberTrait::one() - x, x].span(); +} + + +fn gs_linear_interpolation_1d_with_x< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +TensorTrait, + +Mul, + +Add, + +Div, + +Sub, + +TryInto, + +Into, + +PartialOrd, + +PartialEq, + +Rem, + +PrintTrait +>( + data: Span, x: T, border: Span, padding_mode: PADDING_MODE +) -> T { + let x_0 = NumberTrait::floor(x); + let x_1 = x_0 + NumberTrait::one(); + + let coeffs = gs_get_linear_coeffs(x - x_0); + + let v_0 = pixel_at_array(data, x_0.try_into().unwrap(), border, padding_mode); + let v_1 = pixel_at_array(data, x_1.try_into().unwrap(), border, padding_mode); + + let v = array![v_0, v_1].span(); + + return dot(coeffs, v); +} + +fn dot, +Copy, +NumberTrait, +Add, +TensorTrait, +Mul,>( + a: Span, b: Span +) -> T { + assert(a.len() == b.len(), 'dot: wrong len'); + + let mut i = 0; + let mut sum = NumberTrait::zero(); + loop { + if i == a.len() { + break; + } + sum = sum + *a.at(i) * *b.at(i); + i += 1; + }; + + return sum; +} + + +fn gs_linear_interpolation_nd_with_x< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +TensorTrait, + +Mul, + +Add, + +Div, + +Sub, + +TryInto, + +Into, + +PartialOrd, + +PartialEq, + +Rem, + +PrintTrait +>( + data: Span, + data_dims: Span, + data_stride: Span, + x: Span, + border: Span, + padding_mode: PADDING_MODE +) -> T { + let num_dims = data_dims.len(); + + assert(num_dims == x.len(), 'pixel at nd array: wrong dim'); + assert(num_dims == (border.len() / 2), 'pixel at nd array: wrong dim'); + + if num_dims == 1 { + let a = gs_linear_interpolation_1d_with_x(data, *x.at(0), border, padding_mode); + return a; + } + + let mut res1d = ArrayTrait::new(); + + let mut i = 0; + loop { + if i == *data_dims.at(0) { + break; + } + let sub_data = SpanTrait::slice(data, i * *data_stride.at(0), *data_stride.at(0)); + let sub_x = SpanTrait::slice(x, 1, x.len() - 1); + + let data_dims_sub = SpanTrait::slice(data_dims, 1, data_dims.len() - 1); + let data_stride_sub = SpanTrait::slice(data_stride, 1, data_stride.len() - 1); + + let border1 = SpanTrait::slice(border, 1, num_dims - 1); + let border2 = SpanTrait::slice(border, num_dims + 1, num_dims - 1); + let mut border = ArrayTrait::new(); + border.append_span(border1); + border.append_span(border2); + + let r = gs_linear_interpolation_nd_with_x( + sub_data, data_dims_sub, data_stride_sub, sub_x, border.span(), padding_mode + ); + res1d.append(r); + i += 1; + }; + + return gs_linear_interpolation_1d_with_x( + res1d.span(), *x.at(0), array![*border.at(0), *border.at(num_dims)].span(), padding_mode + ); +} + + +fn pixel_at_ndarray< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +TensorTrait, + +Mul, + +Add, + +Div, + +Sub, + +TryInto, + +Into, + +PartialOrd, + +PartialEq, + +Rem, + +PrintTrait, +>( + ndarray: Span, + ndarray_dims: Span, + ndarray_stride: Span, + x: Span, + border: Span, + padding_mode: PADDING_MODE +) -> T { + let num_dims = ndarray_dims.len(); + + assert(num_dims == x.len(), 'pixel at nd array: wrong dim'); + assert(num_dims == (border.len() / 2), 'pixel at nd array: wrong dim'); + + let i = *x.at(0); + + if num_dims == 1 { + return pixel_at_array(ndarray, *x.at(0), border, padding_mode); + } + + let d = NumberTrait::new_unscaled((*ndarray_dims.at(0)).into(), false); + + let ndarray = match padding_mode { + PADDING_MODE::ZEROS => { + let ndarray = if i >= NumberTrait::zero() && i < d { + SpanTrait::slice( + ndarray, i.try_into().unwrap() * *ndarray_stride.at(0), *ndarray_stride.at(0) + ) + } else { + let ndarray: Span = zeros(*ndarray_stride.at(0)); + ndarray + }; + ndarray + }, + PADDING_MODE::BORDER => { + let i = clamp(i, NumberTrait::zero(), d - NumberTrait::one()); + SpanTrait::slice( + ndarray, i.try_into().unwrap() * *ndarray_stride.at(0), *ndarray_stride.at(0) + ) + }, + PADDING_MODE::REFLECTION => { + let i: usize = (gs_reflect(i, *border.at(0), *border.at(num_dims))).try_into().unwrap(); + SpanTrait::slice(ndarray, i * *ndarray_stride.at(0), *ndarray_stride.at(0)) + }, + }; + + let x = SpanTrait::slice(x, 1, x.len() - 1); + let ndarray_dims = SpanTrait::slice(ndarray_dims, 1, ndarray_dims.len() - 1); + let ndarray_stride = SpanTrait::slice(ndarray_stride, 1, ndarray_stride.len() - 1); + + let border1 = SpanTrait::slice(border, 1, num_dims - 1); + let border2 = SpanTrait::slice(border, num_dims + 1, num_dims - 1); + let mut border = ArrayTrait::new(); + border.append_span(border1); + border.append_span(border2); + + return pixel_at_ndarray(ndarray, ndarray_dims, ndarray_stride, x, border.span(), padding_mode); +} + +fn pixel_at_array< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +TensorTrait, + +Mul, + +Add, + +Div, + +Sub, + +TryInto, + +Into, + +PartialOrd, + +PartialEq, + +Rem, + +PrintTrait, +>( + array: Span, i: T, border: Span, padding_mode: PADDING_MODE +) -> T { + let d = NumberTrait::new_unscaled(array.len().into(), false); + + let pixel = match padding_mode { + PADDING_MODE::ZEROS => { + let pixel = if i >= NumberTrait::zero() && i < d { + *array.at(i.try_into().unwrap()) + } else { + NumberTrait::zero() + }; + pixel + }, + PADDING_MODE::BORDER => { + let i = clamp(i, NumberTrait::zero(), d - NumberTrait::one()); + let pixel = *array.at(i.try_into().unwrap()); + pixel + }, + PADDING_MODE::REFLECTION => { + let i: usize = (gs_reflect(i, *border.at(0), *border.at(1))).try_into().unwrap(); + let pixel = *array.at(i); + pixel + }, + }; + + return pixel; +} + +fn zeros, +Copy, +NumberTrait>(n: usize) -> Span { + let mut zeros = ArrayTrait::new(); + let mut i = 0; + loop { + if i == n { + break; + } + zeros.append(NumberTrait::zero()); + i += 1; + }; + + return zeros.span(); +} + +fn rint< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +SubEq, + +Rem, + +PartialEq, + +PartialOrd, + +Add, + +Sub +>( + data: Span +) -> Span { + // round to nearest if ties rounds to the nearest even value. + let mut rint = ArrayTrait::new(); + let two: T = NumberTrait::one() + NumberTrait::one(); + + let mut i = 0; + loop { + if i == data.len() { + break; + } + let x = *data.at(i); + let mut round = NumberTrait::round(x); + + let diff = round - x; + if diff == NumberTrait::half() { + if round % two != NumberTrait::zero() { + round -= NumberTrait::one() + } + } + rint.append(round); + i += 1; + }; + + return rint.span(); +} + +fn clamp, +Copy, +NumberTrait, +PartialOrd>( + val: T, low: T, high: T +) -> T { + if val < low { + return low; + } + if val > high { + return high; + } + return val; +} + +fn gs_reflect< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +PartialOrd, + +PartialEq, + +Add, + +Sub, + +Div, + +Mul, + +Rem, + +PrintTrait, +>( + x: T, x_min: T, x_max: T +) -> T { + let two: T = NumberTrait::one() + NumberTrait::one(); + let mut fx = x; + let rng = x_max - x_min; + + let fx = if fx < x_min { + let dx = x_min - fx; + let n = NumberTrait::floor(dx / rng); + let r = dx - n * rng; + let fx = if NumberTrait::round(n % two) == NumberTrait::zero() { + x_min + r + } else { + x_max - r + }; + fx + } else if fx > x_max { + let dx = fx - x_max; + let n = NumberTrait::floor(dx / rng); + let r = dx - n * rng; + + let fx = if NumberTrait::round(n % two) == NumberTrait::zero() { + x_max - r + } else { + x_min + r + }; + fx + } else { + fx + }; + + return fx; +} + + +fn reverse, +Drop,>(data: Span) -> Span { + let mut rev = ArrayTrait::new(); + let mut i = data.len(); + loop { + if i == 0 { + break; + } + rev.append(*data.at(i - 1)); + + i -= 1; + }; + + return rev.span(); +} + +fn get_sub, +Drop,>( + data: Span, stride_data: Span, index: Span, +) -> Span { + let mut acc_indices = 0; + let mut i = 0; + loop { + if i == index.len() { + break; + } + acc_indices += *index.at(i) * *stride_data.at(i); + + i += 1; + }; + + return SpanTrait::slice(data, acc_indices, *stride_data.at(index.len() - 1)); +} + + +fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul,>( + pA: Span, start: usize +) -> T { + let mut i = start; + let mut prod = NumberTrait::one(); + loop { + if i == pA.len() { + break; + } + prod = prod * (*pA.at(i)); + i += 1; + }; + return prod; +} + + +fn prepare_border< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +TensorTrait, + +Mul, + +Add, + +Div, + +Sub, + +Into, + +Neg +>( + self: @Tensor, dims: Span, align_corner: usize +) -> Span { + let num_dims = dims.len(); + + let mut borders1 = ArrayTrait::new(); + let mut borders2 = ArrayTrait::new(); + + let mut i = 0; + loop { + if i == num_dims { + break; + } + if align_corner == 0 { + borders1.append(-NumberTrait::half()); + borders2 + .append( + NumberTrait::new_unscaled((*dims.at(i)).into(), false) - NumberTrait::half() + ); + } else { + borders1.append(NumberTrait::zero()); + borders2 + .append( + NumberTrait::new_unscaled((*dims.at(i)).into(), false) - NumberTrait::one() + ); + } + i += 1; + }; + borders1.append_span(borders2.span()); + return borders1.span(); +} + +fn arange(start: usize, end: usize, step: usize) -> Span { + assert((end - start) % step == 0, 'incompatible step value'); + let mut arr = ArrayTrait::new(); + let mut i = start; + loop { + if i >= end { + break; + } + arr.append(i); + i += step; + }; + return arr.span(); +} + + +fn gs_denormalize_coordinates< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +TensorTrait, + +Mul, + +Add, + +Div, + +Sub, + +Into +>( + n: Span, dims: Span, align_corner: usize +) -> Span { + let mut x = ArrayTrait::new(); + + let mut i = 0; + loop { + if i == n.len() { + break; + } + let v = *n.at(i); + let dim = *dims.at(i); + x.append(gs_denormalize(v, dim, align_corner)); + i += 1; + }; + + return x.span(); +} + +fn gs_denormalize< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +TensorTrait, + +Mul, + +Add, + +Div, + +Sub, + +Into +>( + n: T, length: usize, align_corner: usize +) -> T { + let length = NumberTrait::new_unscaled(length.into(), false); + let two: T = NumberTrait::one() + NumberTrait::one(); + + let x = if align_corner == 0 { + ((n + NumberTrait::one()) * length - NumberTrait::one()) / two + } else { + (n + NumberTrait::one()) / two * (length - NumberTrait::one()) + }; + + return x; +} + +fn get_all_coords(shape: Span) -> Span> { + let mut all_indices = ArrayTrait::new(); + + let mut i = 0; + loop { + if i == shape.len() { + break; + } + all_indices.append(arange(0, *shape.at(i), 1)); + i += 1; + }; + + return cartesian(all_indices.span()); +} + +fn cartesian(mut arrays: Span>,) -> Span> { + let mut n = 1; + let mut i = arrays.len() - 1; + loop { + n = n * (*(arrays.at(i))).len(); + if i == 0 { + break; + } + i -= 1; + }; + + let mut i = 0; + let mut size_arrays = ArrayTrait::new(); + let mut m = n; + loop { + if i == arrays.len() { + break; + } + size_arrays.append((*(arrays.at(i))).len()); + + i += 1; + }; + let size_arrays = size_arrays.span(); + let mut output_arrays = ArrayTrait::>::new(); + let mut m = n; + + let mut i = 0; + loop { + if i == arrays.len() { + break; + } + m = m / (*(arrays.at(i))).len(); + let mut out = repeat(*(arrays.at(i)), m); + out = repeat_2(out, size_arrays, i); + + output_arrays.append(out); + i += 1; + }; + let output_arrays = output_arrays.span(); + + let mut i = 0; + let mut ret = ArrayTrait::new(); + loop { + if i == n { + break; + } + let mut j = 0; + let mut x = ArrayTrait::new(); + loop { + if j == arrays.len() { + break; + } + + x.append(*(output_arrays.at(j)).at(i)); + j += 1; + }; + ret.append(x.span()); + i += 1; + }; + + return ret.span(); +} + + +fn repeat_2(mut array: Array, size_array: Span, index: usize) -> Array { + let mut size = array.len(); + let mut i = 0; + loop { + if i == index { + break; + } + let mut j = 1; + loop { + if j == *size_array.at(index - 1 - i) { + break; + } + let mut k = 0; + loop { + if k == size { + break; + } + array.append(*array.at(k)); + k += 1; + }; + j += 1; + }; + size = size * *size_array.at(index - 1 - i); + i += 1; + }; + array +} + +fn repeat(array: Span, m: usize,) -> Array { + let mut out = ArrayTrait::new(); + let mut j = 0; + loop { + if j == array.len() { + break; + } + let mut k = 0; + loop { + if k == m { + break; + } + out.append(*array.at(j)); + k += 1; + }; + j += 1; + }; + + out +} diff --git a/src/operators/nn/implementations/nn_fp16x16.cairo b/src/operators/nn/implementations/nn_fp16x16.cairo index 785d3c9fa..42f49e646 100644 --- a/src/operators/nn/implementations/nn_fp16x16.cairo +++ b/src/operators/nn/implementations/nn_fp16x16.cairo @@ -72,4 +72,14 @@ impl FP16x16NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn grid_sample( + X: @Tensor, + grid: @Tensor, + align_corner: Option, + mode: Option, + padding_mode: Option, + ) -> Tensor { + functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode) + } } diff --git a/src/operators/nn/implementations/nn_fp32x32.cairo b/src/operators/nn/implementations/nn_fp32x32.cairo index 0427ea5f7..f6f6b53b3 100644 --- a/src/operators/nn/implementations/nn_fp32x32.cairo +++ b/src/operators/nn/implementations/nn_fp32x32.cairo @@ -66,4 +66,14 @@ impl FP32x32NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn grid_sample( + X: @Tensor, + grid: @Tensor, + align_corner: Option, + mode: Option, + padding_mode: Option, + ) -> Tensor { + functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode) + } } diff --git a/src/operators/nn/implementations/nn_fp64x64.cairo b/src/operators/nn/implementations/nn_fp64x64.cairo index fec810679..d5e6f057d 100644 --- a/src/operators/nn/implementations/nn_fp64x64.cairo +++ b/src/operators/nn/implementations/nn_fp64x64.cairo @@ -66,4 +66,14 @@ impl FP64x64NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn grid_sample( + X: @Tensor, + grid: @Tensor, + align_corner: Option, + mode: Option, + padding_mode: Option, + ) -> Tensor { + functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode) + } } diff --git a/src/operators/nn/implementations/nn_fp8x23.cairo b/src/operators/nn/implementations/nn_fp8x23.cairo index 9f5416121..434023564 100644 --- a/src/operators/nn/implementations/nn_fp8x23.cairo +++ b/src/operators/nn/implementations/nn_fp8x23.cairo @@ -70,4 +70,14 @@ impl FP8x23NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn grid_sample( + X: @Tensor, + grid: @Tensor, + align_corner: Option, + mode: Option, + padding_mode: Option, + ) -> Tensor { + functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode) + } } diff --git a/src/operators/nn/implementations/nn_i32.cairo b/src/operators/nn/implementations/nn_i32.cairo index 1db66a1c6..b50765ff1 100644 --- a/src/operators/nn/implementations/nn_i32.cairo +++ b/src/operators/nn/implementations/nn_i32.cairo @@ -61,4 +61,14 @@ impl I32NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn grid_sample( + X: @Tensor, + grid: @Tensor, + align_corner: Option, + mode: Option, + padding_mode: Option, + ) -> Tensor { + panic(array!['not supported!']) + } } diff --git a/src/operators/nn/implementations/nn_i8.cairo b/src/operators/nn/implementations/nn_i8.cairo index e67bb7504..96313eace 100644 --- a/src/operators/nn/implementations/nn_i8.cairo +++ b/src/operators/nn/implementations/nn_i8.cairo @@ -61,4 +61,14 @@ impl I8NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn grid_sample( + X: @Tensor, + grid: @Tensor, + align_corner: Option, + mode: Option, + padding_mode: Option, + ) -> Tensor { + panic(array!['not supported!']) + } } diff --git a/src/operators/nn/implementations/nn_u32.cairo b/src/operators/nn/implementations/nn_u32.cairo index 370880e8d..a62d3857d 100644 --- a/src/operators/nn/implementations/nn_u32.cairo +++ b/src/operators/nn/implementations/nn_u32.cairo @@ -61,4 +61,14 @@ impl U32NN of NNTrait { ) -> Tensor { functional::gemm::gemm(A, B, C, alpha, beta, transA, transB) } + + fn grid_sample( + X: @Tensor, + grid: @Tensor, + align_corner: Option, + mode: Option, + padding_mode: Option, + ) -> Tensor { + panic(array!['not supported!']) + } } diff --git a/src/operators/sequence/functional/sequence_at.cairo b/src/operators/sequence/functional/sequence_at.cairo index 7953abb9d..4a4aa9203 100644 --- a/src/operators/sequence/functional/sequence_at.cairo +++ b/src/operators/sequence/functional/sequence_at.cairo @@ -8,7 +8,9 @@ use orion::numbers::{NumberTrait, I32IntoU32, U32IntoI32}; fn sequence_at, impl TCopy: Copy, impl TDrop: Drop>( sequence: Array>, position: Tensor ) -> Tensor { - assert(position.shape.len() == 0 && position.data.len().into() == 1, 'Position must be a scalar'); + assert( + position.shape.len() == 0 && position.data.len().into() == 1, 'Position must be a scalar' + ); let position_value_i32: i32 = *position.data.at(0); let is_negative: bool = position_value_i32 < 0; diff --git a/src/operators/sequence/functional/sequence_erase.cairo b/src/operators/sequence/functional/sequence_erase.cairo index dd2a2aad6..573087b1f 100644 --- a/src/operators/sequence/functional/sequence_erase.cairo +++ b/src/operators/sequence/functional/sequence_erase.cairo @@ -3,7 +3,7 @@ use core::option::OptionTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::I32Tensor; -use orion::numbers::{ NumberTrait, I32IntoU32}; +use orion::numbers::{NumberTrait, I32IntoU32}; /// Cf: SequenceTrait::sequence_erase docstring fn sequence_erase, impl TCopy: Copy, impl TDrop: Drop>( @@ -56,4 +56,3 @@ fn sequence_erase, impl TCopy: Copy, impl TDr return output_sequence; } - diff --git a/src/operators/sequence/functional/sequence_insert.cairo b/src/operators/sequence/functional/sequence_insert.cairo index 256a1b91c..412fc6c4b 100644 --- a/src/operators/sequence/functional/sequence_insert.cairo +++ b/src/operators/sequence/functional/sequence_insert.cairo @@ -3,7 +3,7 @@ use core::option::OptionTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; use orion::operators::tensor::I32Tensor; -use orion::numbers::{ NumberTrait, I32IntoU32}; +use orion::numbers::{NumberTrait, I32IntoU32}; /// Cf: SequenceTrait::sequence_insert docstring fn sequence_insert, impl TCopy: Copy, impl TDrop: Drop>( @@ -55,4 +55,4 @@ fn sequence_insert, impl TCopy: Copy, impl TD }; return new_sequence; -} \ No newline at end of file +} diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 70344eb97..4245b418f 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -5559,10 +5559,14 @@ fn squeeze(self: @Tensor, axes: Option>) -> Tensor { let mut reshape: Array = ArrayTrait::new(); let mut index = 0_i32; let axis = if *axis < 0 { - assert(*axis <= (*self.shape).len().into(), 'axis out of accepted range'); + assert( + *axis <= (*self.shape).len().into(), 'axis out of accepted range' + ); (*self.shape).len().into() - *axis } else { - assert(*axis < (*self.shape).len().into(), 'axis out of accepted range'); + assert( + *axis < (*self.shape).len().into(), 'axis out of accepted range' + ); *axis }; diff --git a/src/operators/tensor/helpers.cairo b/src/operators/tensor/helpers.cairo index 8c7e2b359..894dfc8d4 100644 --- a/src/operators/tensor/helpers.cairo +++ b/src/operators/tensor/helpers.cairo @@ -496,4 +496,4 @@ impl SpanPartialOrd, +Copy, +PartialEq, +PartialOrd> of Par fn lt(lhs: Span, rhs: Span) -> bool { span_cmp(lhs, rhs) < 0 } -} \ No newline at end of file +} diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 50383d2df..890a2d3b2 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -3,7 +3,7 @@ use core::array::SpanTrait; use core::option::OptionTrait; use core::traits::{TryInto, Into}; -use orion::numbers::{ I32Div, I32DivEq }; +use orion::numbers::{I32Div, I32DivEq}; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ @@ -221,13 +221,7 @@ impl I32Tensor of TensorTrait { fn quantize_linear( self: @Tensor, y_scale: @Tensor, y_zero_point: @Tensor ) -> Tensor:: { - quantization::quantize_linear::quantize_linear( - self, - y_scale, - y_zero_point, - -127, - 127 - ) + quantization::quantize_linear::quantize_linear(self, y_scale, y_zero_point, -127, 127) } fn dequantize_linear( diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 7e81d90eb..9366a0347 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -3,7 +3,7 @@ use core::array::SpanTrait; use core::option::OptionTrait; use core::traits::{TryInto, Into}; -use orion::numbers::{ I8Div, I8DivEq }; +use orion::numbers::{I8Div, I8DivEq}; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::helpers::SpanPartialOrd; use orion::operators::tensor::core::{ diff --git a/src/operators/tensor/math/layer_normalization.cairo b/src/operators/tensor/math/layer_normalization.cairo index 372d5b1c2..bb0d9579b 100644 --- a/src/operators/tensor/math/layer_normalization.cairo +++ b/src/operators/tensor/math/layer_normalization.cairo @@ -3,7 +3,7 @@ use core::array::ArrayTrait; use core::array::SpanTrait; use core::option::OptionTrait; use core::traits::Into; -use orion::numbers::{ NumberTrait, I32IntoU32}; +use orion::numbers::{NumberTrait, I32IntoU32}; use orion::operators::tensor::{ TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor }; @@ -51,7 +51,6 @@ fn layer_normalization< Option::None => 1, }; - let axis = if axis < 0 { X_rank - axis.into() } else { diff --git a/src/test_helper/tensor/i32.cairo b/src/test_helper/tensor/i32.cairo index 0451fa442..89979eef0 100644 --- a/src/test_helper/tensor/i32.cairo +++ b/src/test_helper/tensor/i32.cairo @@ -93,7 +93,7 @@ fn i32_tensor_3x3_neg_helper() -> Tensor { sizes.append(3); let mut data = ArrayTrait::new(); - + data.append(0_i32); data.append(-1_i32); data.append(-2_i32); @@ -338,7 +338,6 @@ fn i32_tensor_3x3x3_helper() -> Tensor { data.append(24_i32); data.append(25_i32); data.append(26_i32); - let tensor = TensorTrait::new(sizes.span(), data.span()); diff --git a/src/test_helper/tensor/i8.cairo b/src/test_helper/tensor/i8.cairo index e492ad913..6d85e4b3e 100644 --- a/src/test_helper/tensor/i8.cairo +++ b/src/test_helper/tensor/i8.cairo @@ -93,7 +93,7 @@ fn i8_tensor_3x3_neg_helper() -> Tensor { sizes.append(3); let mut data = ArrayTrait::new(); - + data.append(0_i8); data.append(-1_i8); data.append(-2_i8); @@ -338,7 +338,6 @@ fn i8_tensor_3x3x3_helper() -> Tensor { data.append(24_i8); data.append(25_i8); data.append(26_i8); - let tensor = TensorTrait::new(sizes.span(), data.span()); diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 6c70b42cb..22d34b5e3 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -936,3 +936,11 @@ mod split_fp16x16_2d_variable_parts; mod split_fp16x16_zero_size; mod split_fp16x16_1d_uneven; mod split_fp16x16_2d_uneven; +mod grid_sample; +mod grid_sample_cubic; +mod grid_sample_aligncorners; +mod grid_sample_nearest; +mod grid_sample_nearest_aligncorner; +mod grid_sample_padding_border; +mod grid_sample_padding_reflection; +mod grid_sample_padding_zeros; diff --git a/tests/nodes/clip_fp16x16_2d.cairo b/tests/nodes/clip_fp16x16_2d.cairo index d779d2790..b576203eb 100644 --- a/tests/nodes/clip_fp16x16_2d.cairo +++ b/tests/nodes/clip_fp16x16_2d.cairo @@ -15,7 +15,11 @@ fn test_clip_fp16x16_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.clip(Option::Some(FP16x16 { mag: 655360, sign: true }), Option::Some(FP16x16 { mag: 1310720, sign: false })); + let y = input_0 + .clip( + Option::Some(FP16x16 { mag: 655360, sign: true }), + Option::Some(FP16x16 { mag: 1310720, sign: false }) + ); assert_eq(y, z); } diff --git a/tests/nodes/clip_fp16x16_3d.cairo b/tests/nodes/clip_fp16x16_3d.cairo index d82de09dc..98bed1a61 100644 --- a/tests/nodes/clip_fp16x16_3d.cairo +++ b/tests/nodes/clip_fp16x16_3d.cairo @@ -15,7 +15,11 @@ fn test_clip_fp16x16_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.clip(Option::Some(FP16x16 { mag: 655360, sign: true }), Option::Some(FP16x16 { mag: 1310720, sign: false })); + let y = input_0 + .clip( + Option::Some(FP16x16 { mag: 655360, sign: true }), + Option::Some(FP16x16 { mag: 1310720, sign: false }) + ); assert_eq(y, z); } diff --git a/tests/nodes/clip_fp8x23_2d.cairo b/tests/nodes/clip_fp8x23_2d.cairo index 64f1792a1..60b38b565 100644 --- a/tests/nodes/clip_fp8x23_2d.cairo +++ b/tests/nodes/clip_fp8x23_2d.cairo @@ -15,7 +15,11 @@ fn test_clip_fp8x23_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.clip(Option::Some(FP8x23 { mag: 83886080, sign: true }), Option::Some(FP8x23 { mag: 167772160, sign: false })); + let y = input_0 + .clip( + Option::Some(FP8x23 { mag: 83886080, sign: true }), + Option::Some(FP8x23 { mag: 167772160, sign: false }) + ); assert_eq(y, z); } diff --git a/tests/nodes/clip_fp8x23_3d.cairo b/tests/nodes/clip_fp8x23_3d.cairo index 511b33859..cc80a61d7 100644 --- a/tests/nodes/clip_fp8x23_3d.cairo +++ b/tests/nodes/clip_fp8x23_3d.cairo @@ -15,7 +15,11 @@ fn test_clip_fp8x23_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.clip(Option::Some(FP8x23 { mag: 83886080, sign: true }), Option::Some(FP8x23 { mag: 167772160, sign: false })); + let y = input_0 + .clip( + Option::Some(FP8x23 { mag: 83886080, sign: true }), + Option::Some(FP8x23 { mag: 167772160, sign: false }) + ); assert_eq(y, z); } diff --git a/tests/nodes/compress_fp16x16_3d_axis1.cairo b/tests/nodes/compress_fp16x16_3d_axis1.cairo index 2463dfa93..4189bd1e9 100644 --- a/tests/nodes/compress_fp16x16_3d_axis1.cairo +++ b/tests/nodes/compress_fp16x16_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp16x16_3d_axis2.cairo b/tests/nodes/compress_fp16x16_3d_axis2.cairo index a425e0988..e17e6bed4 100644 --- a/tests/nodes/compress_fp16x16_3d_axis2.cairo +++ b/tests/nodes/compress_fp16x16_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp16x16_3d_axis3.cairo b/tests/nodes/compress_fp16x16_3d_axis3.cairo index 3ad15cc97..fa9efb511 100644 --- a/tests/nodes/compress_fp16x16_3d_axis3.cairo +++ b/tests/nodes/compress_fp16x16_3d_axis3.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_axis3() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(3)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(3)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp16x16_3d_default.cairo b/tests/nodes/compress_fp16x16_3d_default.cairo index 4bff29c09..0a8b68bf2 100644 --- a/tests/nodes/compress_fp16x16_3d_default.cairo +++ b/tests/nodes/compress_fp16x16_3d_default.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp16x16_3d_noaxis.cairo b/tests/nodes/compress_fp16x16_3d_noaxis.cairo index e637f47c8..4e1b1620e 100644 --- a/tests/nodes/compress_fp16x16_3d_noaxis.cairo +++ b/tests/nodes/compress_fp16x16_3d_noaxis.cairo @@ -18,7 +18,7 @@ fn test_compress_fp16x16_3d_noaxis() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::None(())); + let y_0 = input_0.compress(condition: input_1, axis: Option::None(())); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp8x23_3d_axis1.cairo b/tests/nodes/compress_fp8x23_3d_axis1.cairo index 24829c58f..03bdc8815 100644 --- a/tests/nodes/compress_fp8x23_3d_axis1.cairo +++ b/tests/nodes/compress_fp8x23_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_compress_fp8x23_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp8x23_3d_axis2.cairo b/tests/nodes/compress_fp8x23_3d_axis2.cairo index c4cf9a814..ca6bc4ec6 100644 --- a/tests/nodes/compress_fp8x23_3d_axis2.cairo +++ b/tests/nodes/compress_fp8x23_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_compress_fp8x23_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_fp8x23_3d_default.cairo b/tests/nodes/compress_fp8x23_3d_default.cairo index 6f590b622..f9acf8b7b 100644 --- a/tests/nodes/compress_fp8x23_3d_default.cairo +++ b/tests/nodes/compress_fp8x23_3d_default.cairo @@ -18,7 +18,7 @@ fn test_compress_fp8x23_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i32_3d_axis1.cairo b/tests/nodes/compress_i32_3d_axis1.cairo index e3d6a8072..6d3142fec 100644 --- a/tests/nodes/compress_i32_3d_axis1.cairo +++ b/tests/nodes/compress_i32_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_compress_i32_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i32_3d_axis2.cairo b/tests/nodes/compress_i32_3d_axis2.cairo index 3ae5828c8..242aef0ae 100644 --- a/tests/nodes/compress_i32_3d_axis2.cairo +++ b/tests/nodes/compress_i32_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_compress_i32_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i32_3d_default.cairo b/tests/nodes/compress_i32_3d_default.cairo index dde8e15cf..ab19213b0 100644 --- a/tests/nodes/compress_i32_3d_default.cairo +++ b/tests/nodes/compress_i32_3d_default.cairo @@ -18,7 +18,7 @@ fn test_compress_i32_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i8_3d_axis1.cairo b/tests/nodes/compress_i8_3d_axis1.cairo index 8fd8bb267..4ab02896a 100644 --- a/tests/nodes/compress_i8_3d_axis1.cairo +++ b/tests/nodes/compress_i8_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_compress_i8_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i8_3d_axis2.cairo b/tests/nodes/compress_i8_3d_axis2.cairo index 220210744..f0dbaef06 100644 --- a/tests/nodes/compress_i8_3d_axis2.cairo +++ b/tests/nodes/compress_i8_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_compress_i8_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_i8_3d_default.cairo b/tests/nodes/compress_i8_3d_default.cairo index b802e589c..e4ad1fbc8 100644 --- a/tests/nodes/compress_i8_3d_default.cairo +++ b/tests/nodes/compress_i8_3d_default.cairo @@ -18,7 +18,7 @@ fn test_compress_i8_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_axis1.cairo b/tests/nodes/compress_u32_3d_axis1.cairo index 136f8b8ce..41a2adc63 100644 --- a/tests/nodes/compress_u32_3d_axis1.cairo +++ b/tests/nodes/compress_u32_3d_axis1.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(1)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_axis2.cairo b/tests/nodes/compress_u32_3d_axis2.cairo index 347e36676..801886380 100644 --- a/tests/nodes/compress_u32_3d_axis2.cairo +++ b/tests/nodes/compress_u32_3d_axis2.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_axis2_2.cairo b/tests/nodes/compress_u32_3d_axis2_2.cairo index abc515486..c5a20dbc2 100644 --- a/tests/nodes/compress_u32_3d_axis2_2.cairo +++ b/tests/nodes/compress_u32_3d_axis2_2.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_axis2_2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(2)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_axis3.cairo b/tests/nodes/compress_u32_3d_axis3.cairo index 10e1e507e..4edd5c8dc 100644 --- a/tests/nodes/compress_u32_3d_axis3.cairo +++ b/tests/nodes/compress_u32_3d_axis3.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_axis3() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(3)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(3)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/compress_u32_3d_default.cairo b/tests/nodes/compress_u32_3d_default.cairo index ce12adac8..32068f9b7 100644 --- a/tests/nodes/compress_u32_3d_default.cairo +++ b/tests/nodes/compress_u32_3d_default.cairo @@ -16,7 +16,7 @@ fn test_compress_u32_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.compress(condition:input_1, axis:Option::Some(0)); + let y_0 = input_0.compress(condition: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp16x16_3d_axis1.cairo b/tests/nodes/gather_fp16x16_3d_axis1.cairo index 8c4af9664..429d085d4 100644 --- a/tests/nodes/gather_fp16x16_3d_axis1.cairo +++ b/tests/nodes/gather_fp16x16_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_gather_fp16x16_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(1)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp16x16_3d_axis2.cairo b/tests/nodes/gather_fp16x16_3d_axis2.cairo index 0b4f77ed8..cfb8a61d2 100644 --- a/tests/nodes/gather_fp16x16_3d_axis2.cairo +++ b/tests/nodes/gather_fp16x16_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_gather_fp16x16_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(2)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp16x16_3d_default.cairo b/tests/nodes/gather_fp16x16_3d_default.cairo index 91c9ebdd4..ee49aac75 100644 --- a/tests/nodes/gather_fp16x16_3d_default.cairo +++ b/tests/nodes/gather_fp16x16_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_fp16x16_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(0)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp8x23_3d_axis1.cairo b/tests/nodes/gather_fp8x23_3d_axis1.cairo index 6a5d1a046..c9c6dcf7f 100644 --- a/tests/nodes/gather_fp8x23_3d_axis1.cairo +++ b/tests/nodes/gather_fp8x23_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_gather_fp8x23_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(1)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp8x23_3d_axis2.cairo b/tests/nodes/gather_fp8x23_3d_axis2.cairo index d5a913163..726411dd2 100644 --- a/tests/nodes/gather_fp8x23_3d_axis2.cairo +++ b/tests/nodes/gather_fp8x23_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_gather_fp8x23_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(2)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp8x23_3d_default.cairo b/tests/nodes/gather_fp8x23_3d_default.cairo index 7f9492f8d..e844827f9 100644 --- a/tests/nodes/gather_fp8x23_3d_default.cairo +++ b/tests/nodes/gather_fp8x23_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_fp8x23_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(0)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i32_3d_axis1.cairo b/tests/nodes/gather_i32_3d_axis1.cairo index 8b1777d8f..6dbb78c47 100644 --- a/tests/nodes/gather_i32_3d_axis1.cairo +++ b/tests/nodes/gather_i32_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_gather_i32_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(1)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i32_3d_axis2.cairo b/tests/nodes/gather_i32_3d_axis2.cairo index bdc557d7a..29bd217b3 100644 --- a/tests/nodes/gather_i32_3d_axis2.cairo +++ b/tests/nodes/gather_i32_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_gather_i32_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(2)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i32_3d_default.cairo b/tests/nodes/gather_i32_3d_default.cairo index 9288c3dab..4c0b9c9bd 100644 --- a/tests/nodes/gather_i32_3d_default.cairo +++ b/tests/nodes/gather_i32_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_i32_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(0)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i8_3d_axis1.cairo b/tests/nodes/gather_i8_3d_axis1.cairo index 10dd5ce6f..140608123 100644 --- a/tests/nodes/gather_i8_3d_axis1.cairo +++ b/tests/nodes/gather_i8_3d_axis1.cairo @@ -18,7 +18,7 @@ fn test_gather_i8_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(1)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i8_3d_axis2.cairo b/tests/nodes/gather_i8_3d_axis2.cairo index 35f50077a..992cee33e 100644 --- a/tests/nodes/gather_i8_3d_axis2.cairo +++ b/tests/nodes/gather_i8_3d_axis2.cairo @@ -18,7 +18,7 @@ fn test_gather_i8_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(2)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_i8_3d_default.cairo b/tests/nodes/gather_i8_3d_default.cairo index 5bc437a7b..0f8e6dec2 100644 --- a/tests/nodes/gather_i8_3d_default.cairo +++ b/tests/nodes/gather_i8_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_i8_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(0)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp16x16_3d_batch_dims1.cairo b/tests/nodes/gather_nd_fp16x16_3d_batch_dims1.cairo index 86de6e9b9..037d2ad93 100644 --- a/tests/nodes/gather_nd_fp16x16_3d_batch_dims1.cairo +++ b/tests/nodes/gather_nd_fp16x16_3d_batch_dims1.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp16x16_3d_batch_dims1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp16x16_3d_batch_dims2.cairo b/tests/nodes/gather_nd_fp16x16_3d_batch_dims2.cairo index d2ac3b2ce..3661bb6c5 100644 --- a/tests/nodes/gather_nd_fp16x16_3d_batch_dims2.cairo +++ b/tests/nodes/gather_nd_fp16x16_3d_batch_dims2.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp16x16_3d_batch_dims2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp16x16_3d_default.cairo b/tests/nodes/gather_nd_fp16x16_3d_default.cairo index 157266adb..60f116c86 100644 --- a/tests/nodes/gather_nd_fp16x16_3d_default.cairo +++ b/tests/nodes/gather_nd_fp16x16_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp16x16_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp8x23_3d_batch_dims1.cairo b/tests/nodes/gather_nd_fp8x23_3d_batch_dims1.cairo index 6da924b6c..c523e0135 100644 --- a/tests/nodes/gather_nd_fp8x23_3d_batch_dims1.cairo +++ b/tests/nodes/gather_nd_fp8x23_3d_batch_dims1.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp8x23_3d_batch_dims1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp8x23_3d_batch_dims2.cairo b/tests/nodes/gather_nd_fp8x23_3d_batch_dims2.cairo index 251d442ba..edb022910 100644 --- a/tests/nodes/gather_nd_fp8x23_3d_batch_dims2.cairo +++ b/tests/nodes/gather_nd_fp8x23_3d_batch_dims2.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp8x23_3d_batch_dims2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_fp8x23_3d_default.cairo b/tests/nodes/gather_nd_fp8x23_3d_default.cairo index 8ce119604..70b25cea1 100644 --- a/tests/nodes/gather_nd_fp8x23_3d_default.cairo +++ b/tests/nodes/gather_nd_fp8x23_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_fp8x23_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_i32_3d_batch_dims1.cairo b/tests/nodes/gather_nd_i32_3d_batch_dims1.cairo index 1d275fb4a..923c7f9ba 100644 --- a/tests/nodes/gather_nd_i32_3d_batch_dims1.cairo +++ b/tests/nodes/gather_nd_i32_3d_batch_dims1.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_i32_3d_batch_dims1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_i32_3d_batch_dims2.cairo b/tests/nodes/gather_nd_i32_3d_batch_dims2.cairo index 6bfa5cf4a..44ed06b2c 100644 --- a/tests/nodes/gather_nd_i32_3d_batch_dims2.cairo +++ b/tests/nodes/gather_nd_i32_3d_batch_dims2.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_i32_3d_batch_dims2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_i32_3d_default.cairo b/tests/nodes/gather_nd_i32_3d_default.cairo index 4fa1c55f1..5268e13f4 100644 --- a/tests/nodes/gather_nd_i32_3d_default.cairo +++ b/tests/nodes/gather_nd_i32_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_i32_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_i8_3d_batch_dims1.cairo b/tests/nodes/gather_nd_i8_3d_batch_dims1.cairo index b42d1a430..1d47f72ff 100644 --- a/tests/nodes/gather_nd_i8_3d_batch_dims1.cairo +++ b/tests/nodes/gather_nd_i8_3d_batch_dims1.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_i8_3d_batch_dims1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_i8_3d_default.cairo b/tests/nodes/gather_nd_i8_3d_default.cairo index 6ee8e0a9e..f9152f412 100644 --- a/tests/nodes/gather_nd_i8_3d_default.cairo +++ b/tests/nodes/gather_nd_i8_3d_default.cairo @@ -18,7 +18,7 @@ fn test_gather_nd_i8_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_u32_batch_dims1.cairo b/tests/nodes/gather_nd_u32_batch_dims1.cairo index d1bfb099c..7689359ee 100644 --- a/tests/nodes/gather_nd_u32_batch_dims1.cairo +++ b/tests/nodes/gather_nd_u32_batch_dims1.cairo @@ -16,7 +16,7 @@ fn test_gather_nd_u32_batch_dims1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(1)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_u32_batch_dims2.cairo b/tests/nodes/gather_nd_u32_batch_dims2.cairo index 2cd029255..4659cfaa7 100644 --- a/tests/nodes/gather_nd_u32_batch_dims2.cairo +++ b/tests/nodes/gather_nd_u32_batch_dims2.cairo @@ -16,7 +16,7 @@ fn test_gather_nd_u32_batch_dims2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(2)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_nd_u32_default.cairo b/tests/nodes/gather_nd_u32_default.cairo index 5893b5017..e226d0eb0 100644 --- a/tests/nodes/gather_nd_u32_default.cairo +++ b/tests/nodes/gather_nd_u32_default.cairo @@ -16,7 +16,7 @@ fn test_gather_nd_u32_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather_nd(indices:input_1, batch_dims:Option::Some(0)); + let y_0 = input_0.gather_nd(indices: input_1, batch_dims: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_u32_3d_axis1.cairo b/tests/nodes/gather_u32_3d_axis1.cairo index 641d67f80..1a7a56d37 100644 --- a/tests/nodes/gather_u32_3d_axis1.cairo +++ b/tests/nodes/gather_u32_3d_axis1.cairo @@ -16,7 +16,7 @@ fn test_gather_u32_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(1)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_u32_3d_axis2.cairo b/tests/nodes/gather_u32_3d_axis2.cairo index 94f91a138..30d5f6a61 100644 --- a/tests/nodes/gather_u32_3d_axis2.cairo +++ b/tests/nodes/gather_u32_3d_axis2.cairo @@ -16,7 +16,7 @@ fn test_gather_u32_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(2)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_u32_3d_default.cairo b/tests/nodes/gather_u32_3d_default.cairo index 7931d3e27..8f223c4af 100644 --- a/tests/nodes/gather_u32_3d_default.cairo +++ b/tests/nodes/gather_u32_3d_default.cairo @@ -16,7 +16,7 @@ fn test_gather_u32_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices:input_1, axis:Option::Some(0)); + let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gemm_all_attributes.cairo b/tests/nodes/gemm_all_attributes.cairo index c543ddb3b..2cbd9cab3 100644 --- a/tests/nodes/gemm_all_attributes.cairo +++ b/tests/nodes/gemm_all_attributes.cairo @@ -18,7 +18,15 @@ fn test_gemm_all_attributes() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::Some(FixedTrait::new(16384, false)), Option::Some(FixedTrait::new(22938, false)), true, true); + let y = NNTrait::gemm( + input_0, + input_1, + Option::Some(input_2), + Option::Some(FixedTrait::new(16384, false)), + Option::Some(FixedTrait::new(22938, false)), + true, + true + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_alpha.cairo b/tests/nodes/gemm_alpha.cairo index 074392584..dad8187f4 100644 --- a/tests/nodes/gemm_alpha.cairo +++ b/tests/nodes/gemm_alpha.cairo @@ -16,7 +16,15 @@ fn test_gemm_alpha() { let input_1 = input_1::input_1(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::None(()), Option::Some(FixedTrait::new(32768, false)), Option::None(()), false, false); + let y = NNTrait::gemm( + input_0, + input_1, + Option::None(()), + Option::Some(FixedTrait::new(32768, false)), + Option::None(()), + false, + false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_beta.cairo b/tests/nodes/gemm_beta.cairo index 9ec8fe530..9f417e32a 100644 --- a/tests/nodes/gemm_beta.cairo +++ b/tests/nodes/gemm_beta.cairo @@ -18,7 +18,15 @@ fn test_gemm_beta() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::None(()), Option::Some(FixedTrait::new(32768, false)), false, false); + let y = NNTrait::gemm( + input_0, + input_1, + Option::Some(input_2), + Option::None(()), + Option::Some(FixedTrait::new(32768, false)), + false, + false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_default_matrix_bias.cairo b/tests/nodes/gemm_default_matrix_bias.cairo index 76c6fff0c..16d00f933 100644 --- a/tests/nodes/gemm_default_matrix_bias.cairo +++ b/tests/nodes/gemm_default_matrix_bias.cairo @@ -18,7 +18,9 @@ fn test_gemm_default_matrix_bias() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::None(()), Option::None(()), false, false); + let y = NNTrait::gemm( + input_0, input_1, Option::Some(input_2), Option::None(()), Option::None(()), false, false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_default_no_bias.cairo b/tests/nodes/gemm_default_no_bias.cairo index b702bcfc3..ea43cd0fe 100644 --- a/tests/nodes/gemm_default_no_bias.cairo +++ b/tests/nodes/gemm_default_no_bias.cairo @@ -16,7 +16,9 @@ fn test_gemm_default_no_bias() { let input_1 = input_1::input_1(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::None(()), Option::None(()), Option::None(()), false, false); + let y = NNTrait::gemm( + input_0, input_1, Option::None(()), Option::None(()), Option::None(()), false, false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_default_vector_bias.cairo b/tests/nodes/gemm_default_vector_bias.cairo index 7f4f2646b..24826f739 100644 --- a/tests/nodes/gemm_default_vector_bias.cairo +++ b/tests/nodes/gemm_default_vector_bias.cairo @@ -18,7 +18,9 @@ fn test_gemm_default_vector_bias() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::Some(input_2), Option::None(()), Option::None(()), false, false); + let y = NNTrait::gemm( + input_0, input_1, Option::Some(input_2), Option::None(()), Option::None(()), false, false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_transposeA.cairo b/tests/nodes/gemm_transposeA.cairo index c0b49d799..76c4592e4 100644 --- a/tests/nodes/gemm_transposeA.cairo +++ b/tests/nodes/gemm_transposeA.cairo @@ -16,7 +16,9 @@ fn test_gemm_transposeA() { let input_1 = input_1::input_1(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::None(()), Option::None(()), Option::None(()), true, false); + let y = NNTrait::gemm( + input_0, input_1, Option::None(()), Option::None(()), Option::None(()), true, false + ); assert_eq(y, z); } diff --git a/tests/nodes/gemm_transposeB.cairo b/tests/nodes/gemm_transposeB.cairo index 4c7ccbef4..1728fd014 100644 --- a/tests/nodes/gemm_transposeB.cairo +++ b/tests/nodes/gemm_transposeB.cairo @@ -16,7 +16,9 @@ fn test_gemm_transposeB() { let input_1 = input_1::input_1(); let z = output_0::output_0(); - let y = NNTrait::gemm(input_0, input_1, Option::None(()), Option::None(()), Option::None(()), false, true); + let y = NNTrait::gemm( + input_0, input_1, Option::None(()), Option::None(()), Option::None(()), false, true + ); assert_eq(y, z); } diff --git a/tests/nodes/grid_sample.cairo b/tests/nodes/grid_sample.cairo new file mode 100644 index 000000000..f1f536c26 --- /dev/null +++ b/tests/nodes/grid_sample.cairo @@ -0,0 +1,22 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::operators::nn::FP16x16NN; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::NNTrait; + +#[test] +#[available_gas(2000000000)] +fn test_grid_sample() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::grid_sample(@input_0, @input_1, Option::None, Option::None, Option::None); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/grid_sample/input_0.cairo b/tests/nodes/grid_sample/input_0.cairo new file mode 100644 index 000000000..6cad74813 --- /dev/null +++ b/tests/nodes/grid_sample/input_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample/input_1.cairo b/tests/nodes/grid_sample/input_1.cairo new file mode 100644 index 000000000..4c7cbf0f0 --- /dev/null +++ b/tests/nodes/grid_sample/input_1.cairo @@ -0,0 +1,87 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(6); + shape.append(6); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 39321, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 13107, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 39321, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 39321, sign: true }); + data.append(FP16x16 { mag: 39321, sign: true }); + data.append(FP16x16 { mag: 39321, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 39321, sign: true }); + data.append(FP16x16 { mag: 13107, sign: false }); + data.append(FP16x16 { mag: 39321, sign: true }); + data.append(FP16x16 { mag: 39321, sign: false }); + data.append(FP16x16 { mag: 39321, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 39321, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 39321, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 13107, sign: false }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 39321, sign: false }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 13107, sign: false }); + data.append(FP16x16 { mag: 39321, sign: true }); + data.append(FP16x16 { mag: 13107, sign: false }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 13107, sign: false }); + data.append(FP16x16 { mag: 13107, sign: false }); + data.append(FP16x16 { mag: 13107, sign: false }); + data.append(FP16x16 { mag: 39321, sign: false }); + data.append(FP16x16 { mag: 13107, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 13107, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 39321, sign: false }); + data.append(FP16x16 { mag: 39321, sign: true }); + data.append(FP16x16 { mag: 39321, sign: false }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 39321, sign: false }); + data.append(FP16x16 { mag: 13107, sign: false }); + data.append(FP16x16 { mag: 39321, sign: false }); + data.append(FP16x16 { mag: 39321, sign: false }); + data.append(FP16x16 { mag: 39321, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 39321, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 39321, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 13107, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 39321, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample/output_0.cairo b/tests/nodes/grid_sample/output_0.cairo new file mode 100644 index 000000000..9a7d8cf99 --- /dev/null +++ b/tests/nodes/grid_sample/output_0.cairo @@ -0,0 +1,51 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(6); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 9830, sign: false }); + data.append(FP16x16 { mag: 36044, sign: false }); + data.append(FP16x16 { mag: 62259, sign: false }); + data.append(FP16x16 { mag: 88473, sign: false }); + data.append(FP16x16 { mag: 49152, sign: false }); + data.append(FP16x16 { mag: 39321, sign: false }); + data.append(FP16x16 { mag: 98303, sign: false }); + data.append(FP16x16 { mag: 150732, sign: false }); + data.append(FP16x16 { mag: 203161, sign: false }); + data.append(FP16x16 { mag: 255590, sign: false }); + data.append(FP16x16 { mag: 137625, sign: false }); + data.append(FP16x16 { mag: 144179, sign: false }); + data.append(FP16x16 { mag: 308019, sign: false }); + data.append(FP16x16 { mag: 360448, sign: false }); + data.append(FP16x16 { mag: 412876, sign: false }); + data.append(FP16x16 { mag: 465305, sign: false }); + data.append(FP16x16 { mag: 242483, sign: false }); + data.append(FP16x16 { mag: 249036, sign: false }); + data.append(FP16x16 { mag: 517734, sign: false }); + data.append(FP16x16 { mag: 570163, sign: false }); + data.append(FP16x16 { mag: 622592, sign: false }); + data.append(FP16x16 { mag: 675020, sign: false }); + data.append(FP16x16 { mag: 347340, sign: false }); + data.append(FP16x16 { mag: 353894, sign: false }); + data.append(FP16x16 { mag: 727449, sign: false }); + data.append(FP16x16 { mag: 779878, sign: false }); + data.append(FP16x16 { mag: 832307, sign: false }); + data.append(FP16x16 { mag: 884736, sign: false }); + data.append(FP16x16 { mag: 452198, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 403046, sign: false }); + data.append(FP16x16 { mag: 429260, sign: false }); + data.append(FP16x16 { mag: 455475, sign: false }); + data.append(FP16x16 { mag: 481689, sign: false }); + data.append(FP16x16 { mag: 245760, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample_aligncorners.cairo b/tests/nodes/grid_sample_aligncorners.cairo new file mode 100644 index 000000000..a2fbed701 --- /dev/null +++ b/tests/nodes/grid_sample_aligncorners.cairo @@ -0,0 +1,22 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::operators::nn::FP16x16NN; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::NNTrait; + +#[test] +#[available_gas(2000000000)] +fn test_grid_sample_aligncorners() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::grid_sample(@input_0, @input_1, Option::Some(1), Option::None, Option::None); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/grid_sample_aligncorners/input_0.cairo b/tests/nodes/grid_sample_aligncorners/input_0.cairo new file mode 100644 index 000000000..b144a3463 --- /dev/null +++ b/tests/nodes/grid_sample_aligncorners/input_0.cairo @@ -0,0 +1,21 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample_aligncorners/input_1.cairo b/tests/nodes/grid_sample_aligncorners/input_1.cairo new file mode 100644 index 000000000..0cb0affba --- /dev/null +++ b/tests/nodes/grid_sample_aligncorners/input_1.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(4); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 32768, sign: true }); + data.append(FP16x16 { mag: 32768, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 32768, sign: false }); + data.append(FP16x16 { mag: 32768, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample_aligncorners/output_0.cairo b/tests/nodes/grid_sample_aligncorners/output_0.cairo new file mode 100644 index 000000000..1a5af6443 --- /dev/null +++ b/tests/nodes/grid_sample_aligncorners/output_0.cairo @@ -0,0 +1,23 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 81920, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 163840, sign: false }); + data.append(FP16x16 { mag: 163840, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 245760, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample_cubic.cairo b/tests/nodes/grid_sample_cubic.cairo new file mode 100644 index 000000000..fc790006b --- /dev/null +++ b/tests/nodes/grid_sample_cubic.cairo @@ -0,0 +1,25 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::operators::nn::FP16x16NN; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::NNTrait; +use orion::operators::nn::functional::grid_sample::MODE; + +#[test] +#[available_gas(2000000000)] +fn test_grid_sample_cubic() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::grid_sample( + @input_0, @input_1, Option::Some(0), Option::Some(MODE::CUBIC), Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/grid_sample_cubic/input_0.cairo b/tests/nodes/grid_sample_cubic/input_0.cairo new file mode 100644 index 000000000..b144a3463 --- /dev/null +++ b/tests/nodes/grid_sample_cubic/input_0.cairo @@ -0,0 +1,21 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample_cubic/input_1.cairo b/tests/nodes/grid_sample_cubic/input_1.cairo new file mode 100644 index 000000000..0cb0affba --- /dev/null +++ b/tests/nodes/grid_sample_cubic/input_1.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(4); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 32768, sign: true }); + data.append(FP16x16 { mag: 32768, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 32768, sign: false }); + data.append(FP16x16 { mag: 32768, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample_cubic/output_0.cairo b/tests/nodes/grid_sample_cubic/output_0.cairo new file mode 100644 index 000000000..aa2e9231e --- /dev/null +++ b/tests/nodes/grid_sample_cubic/output_0.cairo @@ -0,0 +1,23 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 9216, sign: true }); + data.append(FP16x16 { mag: 25088, sign: false }); + data.append(FP16x16 { mag: 115051, sign: false }); + data.append(FP16x16 { mag: 194560, sign: false }); + data.append(FP16x16 { mag: 194560, sign: false }); + data.append(FP16x16 { mag: 115051, sign: false }); + data.append(FP16x16 { mag: 337152, sign: false }); + data.append(FP16x16 { mag: 91136, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample_nearest.cairo b/tests/nodes/grid_sample_nearest.cairo new file mode 100644 index 000000000..3a5dc4a07 --- /dev/null +++ b/tests/nodes/grid_sample_nearest.cairo @@ -0,0 +1,25 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::operators::nn::FP16x16NN; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::NNTrait; +use orion::operators::nn::functional::grid_sample::MODE; + +#[test] +#[available_gas(2000000000)] +fn test_grid_sample_nearest() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::grid_sample( + @input_0, @input_1, Option::Some(0), Option::Some(MODE::NEAREST), Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/grid_sample_nearest/input_0.cairo b/tests/nodes/grid_sample_nearest/input_0.cairo new file mode 100644 index 000000000..b144a3463 --- /dev/null +++ b/tests/nodes/grid_sample_nearest/input_0.cairo @@ -0,0 +1,21 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample_nearest/input_1.cairo b/tests/nodes/grid_sample_nearest/input_1.cairo new file mode 100644 index 000000000..0cb0affba --- /dev/null +++ b/tests/nodes/grid_sample_nearest/input_1.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(4); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 32768, sign: true }); + data.append(FP16x16 { mag: 32768, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 32768, sign: false }); + data.append(FP16x16 { mag: 32768, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample_nearest/output_0.cairo b/tests/nodes/grid_sample_nearest/output_0.cairo new file mode 100644 index 000000000..141bc4aea --- /dev/null +++ b/tests/nodes/grid_sample_nearest/output_0.cairo @@ -0,0 +1,23 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample_nearest_aligncorner.cairo b/tests/nodes/grid_sample_nearest_aligncorner.cairo new file mode 100644 index 000000000..6e24295b3 --- /dev/null +++ b/tests/nodes/grid_sample_nearest_aligncorner.cairo @@ -0,0 +1,25 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::operators::nn::FP16x16NN; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::NNTrait; +use orion::operators::nn::functional::grid_sample::MODE; + +#[test] +#[available_gas(2000000000)] +fn test_grid_sample_nearest_aligncorner() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::grid_sample( + @input_0, @input_1, Option::Some(1), Option::Some(MODE::NEAREST), Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/grid_sample_nearest_aligncorner/input_0.cairo b/tests/nodes/grid_sample_nearest_aligncorner/input_0.cairo new file mode 100644 index 000000000..b144a3463 --- /dev/null +++ b/tests/nodes/grid_sample_nearest_aligncorner/input_0.cairo @@ -0,0 +1,21 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample_nearest_aligncorner/input_1.cairo b/tests/nodes/grid_sample_nearest_aligncorner/input_1.cairo new file mode 100644 index 000000000..0cb0affba --- /dev/null +++ b/tests/nodes/grid_sample_nearest_aligncorner/input_1.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(4); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 32768, sign: true }); + data.append(FP16x16 { mag: 32768, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 32768, sign: false }); + data.append(FP16x16 { mag: 32768, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample_nearest_aligncorner/output_0.cairo b/tests/nodes/grid_sample_nearest_aligncorner/output_0.cairo new file mode 100644 index 000000000..4f580819f --- /dev/null +++ b/tests/nodes/grid_sample_nearest_aligncorner/output_0.cairo @@ -0,0 +1,23 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample_padding_border.cairo b/tests/nodes/grid_sample_padding_border.cairo new file mode 100644 index 000000000..135b43eef --- /dev/null +++ b/tests/nodes/grid_sample_padding_border.cairo @@ -0,0 +1,25 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::operators::nn::FP16x16NN; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::NNTrait; +use orion::operators::nn::functional::grid_sample::PADDING_MODE; + +#[test] +#[available_gas(2000000000)] +fn test_grid_sample_padding_border() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::grid_sample( + @input_0, @input_1, Option::None, Option::None, Option::Some(PADDING_MODE::BORDER) + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/grid_sample_padding_border/input_0.cairo b/tests/nodes/grid_sample_padding_border/input_0.cairo new file mode 100644 index 000000000..b144a3463 --- /dev/null +++ b/tests/nodes/grid_sample_padding_border/input_0.cairo @@ -0,0 +1,21 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample_padding_border/input_1.cairo b/tests/nodes/grid_sample_padding_border/input_1.cairo new file mode 100644 index 000000000..ed38ea4e4 --- /dev/null +++ b/tests/nodes/grid_sample_padding_border/input_1.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(4); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 655360, sign: true }); + data.append(FP16x16 { mag: 655360, sign: true }); + data.append(FP16x16 { mag: 327680, sign: true }); + data.append(FP16x16 { mag: 327680, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample_padding_border/output_0.cairo b/tests/nodes/grid_sample_padding_border/output_0.cairo new file mode 100644 index 000000000..53913c214 --- /dev/null +++ b/tests/nodes/grid_sample_padding_border/output_0.cairo @@ -0,0 +1,23 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 111411, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 111411, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample_padding_reflection.cairo b/tests/nodes/grid_sample_padding_reflection.cairo new file mode 100644 index 000000000..54590e0ba --- /dev/null +++ b/tests/nodes/grid_sample_padding_reflection.cairo @@ -0,0 +1,25 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::operators::nn::FP16x16NN; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::NNTrait; +use orion::operators::nn::functional::grid_sample::PADDING_MODE; + +#[test] +#[available_gas(2000000000)] +fn test_grid_sample_padding_reflection() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::grid_sample( + @input_0, @input_1, Option::None, Option::None, Option::Some(PADDING_MODE::REFLECTION) + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/grid_sample_padding_reflection/input_0.cairo b/tests/nodes/grid_sample_padding_reflection/input_0.cairo new file mode 100644 index 000000000..b144a3463 --- /dev/null +++ b/tests/nodes/grid_sample_padding_reflection/input_0.cairo @@ -0,0 +1,21 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample_padding_reflection/input_1.cairo b/tests/nodes/grid_sample_padding_reflection/input_1.cairo new file mode 100644 index 000000000..ed38ea4e4 --- /dev/null +++ b/tests/nodes/grid_sample_padding_reflection/input_1.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(4); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 655360, sign: true }); + data.append(FP16x16 { mag: 655360, sign: true }); + data.append(FP16x16 { mag: 327680, sign: true }); + data.append(FP16x16 { mag: 327680, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample_padding_reflection/output_0.cairo b/tests/nodes/grid_sample_padding_reflection/output_0.cairo new file mode 100644 index 000000000..1fa9d7870 --- /dev/null +++ b/tests/nodes/grid_sample_padding_reflection/output_0.cairo @@ -0,0 +1,23 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 163840, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 111411, sign: false }); + data.append(FP16x16 { mag: 163840, sign: false }); + data.append(FP16x16 { mag: 163840, sign: false }); + data.append(FP16x16 { mag: 111411, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 163840, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample_padding_zeros.cairo b/tests/nodes/grid_sample_padding_zeros.cairo new file mode 100644 index 000000000..b7ff7c6b3 --- /dev/null +++ b/tests/nodes/grid_sample_padding_zeros.cairo @@ -0,0 +1,23 @@ +mod input_0; +mod input_1; +mod output_0; + + +use orion::operators::nn::FP16x16NN; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::NNTrait; +use orion::operators::nn::functional::grid_sample::PADDING_MODE; + +#[test] +#[available_gas(2000000000)] +fn test_grid_sample_padding_zeros() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::grid_sample(@input_0, @input_1, Option::None, Option::None, Option::None); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/grid_sample_padding_zeros/input_0.cairo b/tests/nodes/grid_sample_padding_zeros/input_0.cairo new file mode 100644 index 000000000..b144a3463 --- /dev/null +++ b/tests/nodes/grid_sample_padding_zeros/input_0.cairo @@ -0,0 +1,21 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample_padding_zeros/input_1.cairo b/tests/nodes/grid_sample_padding_zeros/input_1.cairo new file mode 100644 index 000000000..ed38ea4e4 --- /dev/null +++ b/tests/nodes/grid_sample_padding_zeros/input_1.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(4); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 655360, sign: true }); + data.append(FP16x16 { mag: 655360, sign: true }); + data.append(FP16x16 { mag: 327680, sign: true }); + data.append(FP16x16 { mag: 327680, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/grid_sample_padding_zeros/output_0.cairo b/tests/nodes/grid_sample_padding_zeros/output_0.cairo new file mode 100644 index 000000000..1a82d1b43 --- /dev/null +++ b/tests/nodes/grid_sample_padding_zeros/output_0.cairo @@ -0,0 +1,23 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 111411, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 111411, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/hard_sigmoid_fp16x16.cairo b/tests/nodes/hard_sigmoid_fp16x16.cairo index 8a8f8672a..6ad8c8c6c 100644 --- a/tests/nodes/hard_sigmoid_fp16x16.cairo +++ b/tests/nodes/hard_sigmoid_fp16x16.cairo @@ -14,7 +14,9 @@ fn test_hard_sigmoid_fp16x16() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = NNTrait::hard_sigmoid(@input_0, @FixedTrait::new(13107, false), @FixedTrait::new(32768, false)); + let y = NNTrait::hard_sigmoid( + @input_0, @FixedTrait::new(13107, false), @FixedTrait::new(32768, false) + ); assert_eq(y, z); } diff --git a/tests/nodes/hard_sigmoid_fp8x23.cairo b/tests/nodes/hard_sigmoid_fp8x23.cairo index 317c25425..3697b1d7a 100644 --- a/tests/nodes/hard_sigmoid_fp8x23.cairo +++ b/tests/nodes/hard_sigmoid_fp8x23.cairo @@ -14,7 +14,9 @@ fn test_hard_sigmoid_fp8x23() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = NNTrait::hard_sigmoid(@input_0, @FixedTrait::new(1677721, false), @FixedTrait::new(4194304, false)); + let y = NNTrait::hard_sigmoid( + @input_0, @FixedTrait::new(1677721, false), @FixedTrait::new(4194304, false) + ); assert_eq(y, z); } diff --git a/tests/nodes/is_nan_fp16x16/input_0.cairo b/tests/nodes/is_nan_fp16x16/input_0.cairo index 576456503..8c86af4fb 100644 --- a/tests/nodes/is_nan_fp16x16/input_0.cairo +++ b/tests/nodes/is_nan_fp16x16/input_0.cairo @@ -15,4 +15,4 @@ fn input_0() -> Tensor { data.append(FixedTrait::NaN()); data.append(FixedTrait::NaN()); TensorTrait::new(shape.span(), data.span()) -} \ No newline at end of file +} diff --git a/tests/nodes/layer_normalization_3d_axis0_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis0_epsilon.cairo index 6931c44ec..93373e675 100644 --- a/tests/nodes/layer_normalization_3d_axis0_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis0_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis0_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(0),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(0), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_3d_axis1_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis1_epsilon.cairo index 1bdb8700d..72d384de1 100644 --- a/tests/nodes/layer_normalization_3d_axis1_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis1_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis1_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(1),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(1), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_3d_axis2_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis2_epsilon.cairo index 06505280b..44a5f550d 100644 --- a/tests/nodes/layer_normalization_3d_axis2_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis2_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis2_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(2),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(2), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_3d_axis_negative_1_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis_negative_1_epsilon.cairo index 4c095bf62..0b5b77e17 100644 --- a/tests/nodes/layer_normalization_3d_axis_negative_1_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis_negative_1_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis_negative_1_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-1),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(-1), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_3d_axis_negative_2_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis_negative_2_epsilon.cairo index 0be005ddd..5f632aa6e 100644 --- a/tests/nodes/layer_normalization_3d_axis_negative_2_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis_negative_2_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis_negative_2_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-2),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(-2), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_3d_axis_negative_3_epsilon.cairo b/tests/nodes/layer_normalization_3d_axis_negative_3_epsilon.cairo index e3c602e1f..d08c443f8 100644 --- a/tests/nodes/layer_normalization_3d_axis_negative_3_epsilon.cairo +++ b/tests/nodes/layer_normalization_3d_axis_negative_3_epsilon.cairo @@ -19,7 +19,14 @@ fn test_layer_normalization_3d_axis_negative_3_epsilon() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-3),Option::Some(FixedTrait::new(6554, false)),Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, + Option::Some(@input_2), + Option::Some(-3), + Option::Some(FixedTrait::new(6554, false)), + Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis0.cairo b/tests/nodes/layer_normalization_4d_axis0.cairo index 45a825cd5..279acc624 100644 --- a/tests/nodes/layer_normalization_4d_axis0.cairo +++ b/tests/nodes/layer_normalization_4d_axis0.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis0() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(0),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(0), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis1.cairo b/tests/nodes/layer_normalization_4d_axis1.cairo index e7ee8885c..d8e00b332 100644 --- a/tests/nodes/layer_normalization_4d_axis1.cairo +++ b/tests/nodes/layer_normalization_4d_axis1.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis1() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(1),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(1), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis2.cairo b/tests/nodes/layer_normalization_4d_axis2.cairo index 3bd45e907..65b738957 100644 --- a/tests/nodes/layer_normalization_4d_axis2.cairo +++ b/tests/nodes/layer_normalization_4d_axis2.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis2() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(2),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(2), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis3.cairo b/tests/nodes/layer_normalization_4d_axis3.cairo index 4b173b4f6..fae5a51c7 100644 --- a/tests/nodes/layer_normalization_4d_axis3.cairo +++ b/tests/nodes/layer_normalization_4d_axis3.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis3() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(3),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(3), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis_negative_1.cairo b/tests/nodes/layer_normalization_4d_axis_negative_1.cairo index d7b04e192..2f879f988 100644 --- a/tests/nodes/layer_normalization_4d_axis_negative_1.cairo +++ b/tests/nodes/layer_normalization_4d_axis_negative_1.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis_negative_1() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-1),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(-1), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis_negative_2.cairo b/tests/nodes/layer_normalization_4d_axis_negative_2.cairo index 5e17a8b52..718c97ad5 100644 --- a/tests/nodes/layer_normalization_4d_axis_negative_2.cairo +++ b/tests/nodes/layer_normalization_4d_axis_negative_2.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis_negative_2() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(2),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(2), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis_negative_3.cairo b/tests/nodes/layer_normalization_4d_axis_negative_3.cairo index 4188eec6c..b97678d38 100644 --- a/tests/nodes/layer_normalization_4d_axis_negative_3.cairo +++ b/tests/nodes/layer_normalization_4d_axis_negative_3.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis_negative_3() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-3),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(-3), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_4d_axis_negative_4.cairo b/tests/nodes/layer_normalization_4d_axis_negative_4.cairo index 5aa5971dc..94be87f32 100644 --- a/tests/nodes/layer_normalization_4d_axis_negative_4.cairo +++ b/tests/nodes/layer_normalization_4d_axis_negative_4.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_4d_axis_negative_4() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(-4),Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::Some(-4), Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_default_axis.cairo b/tests/nodes/layer_normalization_default_axis.cairo index dd792e731..994ab7106 100644 --- a/tests/nodes/layer_normalization_default_axis.cairo +++ b/tests/nodes/layer_normalization_default_axis.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_default_axis() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::None,Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::None, Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/layer_normalization_test.cairo b/tests/nodes/layer_normalization_test.cairo index 631dc6f46..ad8baa5f2 100644 --- a/tests/nodes/layer_normalization_test.cairo +++ b/tests/nodes/layer_normalization_test.cairo @@ -19,7 +19,10 @@ fn test_layer_normalization_test() { let input_2 = input_2::input_2(); let z_0 = output_0::output_0(); - let (y_0, _, _) = input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::None,Option::None,Option::None); + let (y_0, _, _) = input_0 + .layer_normalization( + @input_1, Option::Some(@input_2), Option::None, Option::None, Option::None + ); assert_eq(y_0, z_0); } diff --git a/tests/nodes/scatter_fp16x16_3d_axis1.cairo b/tests/nodes/scatter_fp16x16_3d_axis1.cairo index b471e028c..5173d8bd7 100644 --- a/tests/nodes/scatter_fp16x16_3d_axis1.cairo +++ b/tests/nodes/scatter_fp16x16_3d_axis1.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp16x16_3d_axis1() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_fp16x16_3d_axis1_add.cairo b/tests/nodes/scatter_fp16x16_3d_axis1_add.cairo index c6fc48b15..be927416d 100644 --- a/tests/nodes/scatter_fp16x16_3d_axis1_add.cairo +++ b/tests/nodes/scatter_fp16x16_3d_axis1_add.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp16x16_3d_axis1_add() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('add')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('add') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_fp16x16_3d_default.cairo b/tests/nodes/scatter_fp16x16_3d_default.cairo index c14bbc0a6..b106de54d 100644 --- a/tests/nodes/scatter_fp16x16_3d_default.cairo +++ b/tests/nodes/scatter_fp16x16_3d_default.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp16x16_3d_default() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_fp8x23_axis1.cairo b/tests/nodes/scatter_fp8x23_axis1.cairo index e0008d409..8ff871c7b 100644 --- a/tests/nodes/scatter_fp8x23_axis1.cairo +++ b/tests/nodes/scatter_fp8x23_axis1.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp8x23_axis1() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_fp8x23_default.cairo b/tests/nodes/scatter_fp8x23_default.cairo index bdaea6568..157aca0bb 100644 --- a/tests/nodes/scatter_fp8x23_default.cairo +++ b/tests/nodes/scatter_fp8x23_default.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp8x23_default() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_fp8x23_mul.cairo b/tests/nodes/scatter_fp8x23_mul.cairo index 4430bf041..5b2305aee 100644 --- a/tests/nodes/scatter_fp8x23_mul.cairo +++ b/tests/nodes/scatter_fp8x23_mul.cairo @@ -20,7 +20,13 @@ fn test_scatter_fp8x23_mul() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('mul')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('mul') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_i8_axis1.cairo b/tests/nodes/scatter_i8_axis1.cairo index e143463f1..c42123f3d 100644 --- a/tests/nodes/scatter_i8_axis1.cairo +++ b/tests/nodes/scatter_i8_axis1.cairo @@ -20,7 +20,13 @@ fn test_scatter_i8_axis1() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_i8_axis1_max.cairo b/tests/nodes/scatter_i8_axis1_max.cairo index 53dabbe40..844911a8d 100644 --- a/tests/nodes/scatter_i8_axis1_max.cairo +++ b/tests/nodes/scatter_i8_axis1_max.cairo @@ -20,7 +20,13 @@ fn test_scatter_i8_axis1_max() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('max')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('max') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_i8_default.cairo b/tests/nodes/scatter_i8_default.cairo index c41b29d7b..f658268ce 100644 --- a/tests/nodes/scatter_i8_default.cairo +++ b/tests/nodes/scatter_i8_default.cairo @@ -20,7 +20,13 @@ fn test_scatter_i8_default() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_u32_add.cairo b/tests/nodes/scatter_u32_add.cairo index 735b8fb5e..2b14d68d1 100644 --- a/tests/nodes/scatter_u32_add.cairo +++ b/tests/nodes/scatter_u32_add.cairo @@ -18,7 +18,13 @@ fn test_scatter_u32_add() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('add')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('add') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_u32_axis1.cairo b/tests/nodes/scatter_u32_axis1.cairo index e2a96e71b..2c85e2a6c 100644 --- a/tests/nodes/scatter_u32_axis1.cairo +++ b/tests/nodes/scatter_u32_axis1.cairo @@ -18,7 +18,13 @@ fn test_scatter_u32_axis1() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(1), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/scatter_u32_default.cairo b/tests/nodes/scatter_u32_default.cairo index 1ccdac72f..5fb16207c 100644 --- a/tests/nodes/scatter_u32_default.cairo +++ b/tests/nodes/scatter_u32_default.cairo @@ -18,7 +18,13 @@ fn test_scatter_u32_default() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none')); + let y = input_0 + .scatter( + updates: input_1, + indices: input_2, + axis: Option::Some(0), + reduction: Option::Some('none') + ); assert_eq(y, z); } diff --git a/tests/nodes/sequence_insert_fp16x16.cairo b/tests/nodes/sequence_insert_fp16x16.cairo index d30b0d3e1..70316ebb9 100644 --- a/tests/nodes/sequence_insert_fp16x16.cairo +++ b/tests/nodes/sequence_insert_fp16x16.cairo @@ -20,7 +20,7 @@ fn test_sequence_insert_fp16x16() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.sequence_insert(@input_1,Option::Some(input_2)); + let y = input_0.sequence_insert(@input_1, Option::Some(input_2)); assert_seq_eq(y, z); } diff --git a/tests/nodes/sequence_insert_fp8x23.cairo b/tests/nodes/sequence_insert_fp8x23.cairo index ad4d12be4..fb474c6d4 100644 --- a/tests/nodes/sequence_insert_fp8x23.cairo +++ b/tests/nodes/sequence_insert_fp8x23.cairo @@ -20,7 +20,7 @@ fn test_sequence_insert_fp8x23() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.sequence_insert(@input_1,Option::Some(input_2)); + let y = input_0.sequence_insert(@input_1, Option::Some(input_2)); assert_seq_eq(y, z); } diff --git a/tests/nodes/sequence_insert_i32.cairo b/tests/nodes/sequence_insert_i32.cairo index 3a397715d..7bcadba2d 100644 --- a/tests/nodes/sequence_insert_i32.cairo +++ b/tests/nodes/sequence_insert_i32.cairo @@ -18,7 +18,7 @@ fn test_sequence_insert_i32() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.sequence_insert(@input_1,Option::Some(input_2)); + let y = input_0.sequence_insert(@input_1, Option::Some(input_2)); assert_seq_eq(y, z); } diff --git a/tests/nodes/sequence_insert_i8.cairo b/tests/nodes/sequence_insert_i8.cairo index a304ff2c4..ff1be34fe 100644 --- a/tests/nodes/sequence_insert_i8.cairo +++ b/tests/nodes/sequence_insert_i8.cairo @@ -20,7 +20,7 @@ fn test_sequence_insert_i8() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.sequence_insert(@input_1,Option::Some(input_2)); + let y = input_0.sequence_insert(@input_1, Option::Some(input_2)); assert_seq_eq(y, z); } diff --git a/tests/nodes/sequence_insert_u32.cairo b/tests/nodes/sequence_insert_u32.cairo index dcd905f72..079d6a4a0 100644 --- a/tests/nodes/sequence_insert_u32.cairo +++ b/tests/nodes/sequence_insert_u32.cairo @@ -20,7 +20,7 @@ fn test_sequence_insert_u32() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.sequence_insert(@input_1,Option::Some(input_2)); + let y = input_0.sequence_insert(@input_1, Option::Some(input_2)); assert_seq_eq(y, z); } diff --git a/tests/nodes/sequence_length_fp16x16.cairo b/tests/nodes/sequence_length_fp16x16.cairo index d971d5569..559ec3ff6 100644 --- a/tests/nodes/sequence_length_fp16x16.cairo +++ b/tests/nodes/sequence_length_fp16x16.cairo @@ -13,10 +13,10 @@ use orion::operators::sequence::SequenceTrait; #[test] #[available_gas(2000000000)] fn test_sequence_length_fp16x16() { - let input_0 = input_0::input_0(); + let input_0 = input_0::input_0(); let z = output_0::output_0(); let y = input_0.sequence_length(); assert_eq(y, z); -} +} diff --git a/tests/nodes/shrink_hard_fp16x16.cairo b/tests/nodes/shrink_hard_fp16x16.cairo index 0818844b2..2f5ec5312 100644 --- a/tests/nodes/shrink_hard_fp16x16.cairo +++ b/tests/nodes/shrink_hard_fp16x16.cairo @@ -15,7 +15,9 @@ fn test_shrink_hard_fp16x16() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = TensorTrait::shrink(input_0, Option::None(()), Option::Some(FixedTrait::new(65536, false))); + let y = TensorTrait::shrink( + input_0, Option::None(()), Option::Some(FixedTrait::new(65536, false)) + ); assert_eq(y, z); } diff --git a/tests/nodes/shrink_hard_fp8x23.cairo b/tests/nodes/shrink_hard_fp8x23.cairo index 3c054f433..c76eec1ec 100644 --- a/tests/nodes/shrink_hard_fp8x23.cairo +++ b/tests/nodes/shrink_hard_fp8x23.cairo @@ -15,7 +15,9 @@ fn test_shrink_hard_fp8x23() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = TensorTrait::shrink(input_0, Option::None(()), Option::Some(FixedTrait::new(8388608, false))); + let y = TensorTrait::shrink( + input_0, Option::None(()), Option::Some(FixedTrait::new(8388608, false)) + ); assert_eq(y, z); } diff --git a/tests/nodes/shrink_soft_fp16x16.cairo b/tests/nodes/shrink_soft_fp16x16.cairo index 924ecfde5..aa975069c 100644 --- a/tests/nodes/shrink_soft_fp16x16.cairo +++ b/tests/nodes/shrink_soft_fp16x16.cairo @@ -15,7 +15,11 @@ fn test_shrink_soft_fp16x16() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = TensorTrait::shrink(input_0, Option::Some(FixedTrait::new(65536, false)), Option::Some(FixedTrait::new(65536, false))); + let y = TensorTrait::shrink( + input_0, + Option::Some(FixedTrait::new(65536, false)), + Option::Some(FixedTrait::new(65536, false)) + ); assert_eq(y, z); } diff --git a/tests/nodes/shrink_soft_fp8x23.cairo b/tests/nodes/shrink_soft_fp8x23.cairo index 01a314e10..8413beccd 100644 --- a/tests/nodes/shrink_soft_fp8x23.cairo +++ b/tests/nodes/shrink_soft_fp8x23.cairo @@ -15,7 +15,11 @@ fn test_shrink_soft_fp8x23() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = TensorTrait::shrink(input_0, Option::Some(FixedTrait::new(8388608, false)), Option::Some(FixedTrait::new(8388608, false))); + let y = TensorTrait::shrink( + input_0, + Option::Some(FixedTrait::new(8388608, false)), + Option::Some(FixedTrait::new(8388608, false)) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_fp16x16_2d.cairo b/tests/nodes/slice_fp16x16_2d.cairo index 5e3d593be..2a95e6e4b 100644 --- a/tests/nodes/slice_fp16x16_2d.cairo +++ b/tests/nodes/slice_fp16x16_2d.cairo @@ -14,7 +14,13 @@ fn test_slice_fp16x16_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span())); + let y = input_0 + .slice( + array![0, 2].span(), + array![2, 4].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 1].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_fp16x16_3d.cairo b/tests/nodes/slice_fp16x16_3d.cairo index d0b5462c4..a681191ce 100644 --- a/tests/nodes/slice_fp16x16_3d.cairo +++ b/tests/nodes/slice_fp16x16_3d.cairo @@ -14,7 +14,13 @@ fn test_slice_fp16x16_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span())); + let y = input_0 + .slice( + array![0, 0].span(), + array![3, 10].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 3].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_fp8x23_2d.cairo b/tests/nodes/slice_fp8x23_2d.cairo index 6a80a5422..56fed5a6a 100644 --- a/tests/nodes/slice_fp8x23_2d.cairo +++ b/tests/nodes/slice_fp8x23_2d.cairo @@ -14,7 +14,13 @@ fn test_slice_fp8x23_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span())); + let y = input_0 + .slice( + array![0, 2].span(), + array![2, 4].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 1].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_fp8x23_3d.cairo b/tests/nodes/slice_fp8x23_3d.cairo index 5c2af30b7..fd5e95485 100644 --- a/tests/nodes/slice_fp8x23_3d.cairo +++ b/tests/nodes/slice_fp8x23_3d.cairo @@ -14,7 +14,13 @@ fn test_slice_fp8x23_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span())); + let y = input_0 + .slice( + array![0, 0].span(), + array![3, 10].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 3].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_i32_2d.cairo b/tests/nodes/slice_i32_2d.cairo index 082b8f15f..f26a2a809 100644 --- a/tests/nodes/slice_i32_2d.cairo +++ b/tests/nodes/slice_i32_2d.cairo @@ -14,7 +14,13 @@ fn test_slice_i32_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span())); + let y = input_0 + .slice( + array![0, 2].span(), + array![2, 4].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 1].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_i32_3d.cairo b/tests/nodes/slice_i32_3d.cairo index 1683e6987..16fd3f51b 100644 --- a/tests/nodes/slice_i32_3d.cairo +++ b/tests/nodes/slice_i32_3d.cairo @@ -14,7 +14,13 @@ fn test_slice_i32_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span())); + let y = input_0 + .slice( + array![0, 0].span(), + array![3, 10].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 3].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_i8_2d.cairo b/tests/nodes/slice_i8_2d.cairo index fc7f35364..2dc5f6ab4 100644 --- a/tests/nodes/slice_i8_2d.cairo +++ b/tests/nodes/slice_i8_2d.cairo @@ -14,7 +14,13 @@ fn test_slice_i8_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span())); + let y = input_0 + .slice( + array![0, 2].span(), + array![2, 4].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 1].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_i8_3d.cairo b/tests/nodes/slice_i8_3d.cairo index ec8ea9ffd..a140d8681 100644 --- a/tests/nodes/slice_i8_3d.cairo +++ b/tests/nodes/slice_i8_3d.cairo @@ -14,7 +14,13 @@ fn test_slice_i8_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span())); + let y = input_0 + .slice( + array![0, 0].span(), + array![3, 10].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 3].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_u32_2d.cairo b/tests/nodes/slice_u32_2d.cairo index 27678fc0c..c5ad63061 100644 --- a/tests/nodes/slice_u32_2d.cairo +++ b/tests/nodes/slice_u32_2d.cairo @@ -14,7 +14,13 @@ fn test_slice_u32_2d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span())); + let y = input_0 + .slice( + array![0, 2].span(), + array![2, 4].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 1].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/slice_u32_3d.cairo b/tests/nodes/slice_u32_3d.cairo index a3ca0e1bc..08a77cf55 100644 --- a/tests/nodes/slice_u32_3d.cairo +++ b/tests/nodes/slice_u32_3d.cairo @@ -14,7 +14,13 @@ fn test_slice_u32_3d() { let input_0 = input_0::input_0(); let z = output_0::output_0(); - let y = input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span())); + let y = input_0 + .slice( + array![0, 0].span(), + array![3, 10].span(), + Option::Some(array![0, 1].span()), + Option::Some(array![1, 3].span()) + ); assert_eq(y, z); } diff --git a/tests/nodes/where_fp16x16.cairo b/tests/nodes/where_fp16x16.cairo index 05467ef51..ae3416d67 100644 --- a/tests/nodes/where_fp16x16.cairo +++ b/tests/nodes/where_fp16x16.cairo @@ -18,7 +18,7 @@ fn test_where_fp16x16() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_fp16x16_broadcast.cairo b/tests/nodes/where_fp16x16_broadcast.cairo index b0d9b9faa..5df239b78 100644 --- a/tests/nodes/where_fp16x16_broadcast.cairo +++ b/tests/nodes/where_fp16x16_broadcast.cairo @@ -18,7 +18,7 @@ fn test_where_fp16x16_broadcast() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_fp8x23.cairo b/tests/nodes/where_fp8x23.cairo index 8661bf163..492db3766 100644 --- a/tests/nodes/where_fp8x23.cairo +++ b/tests/nodes/where_fp8x23.cairo @@ -18,7 +18,7 @@ fn test_where_fp8x23() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_fp8x23_broadcast.cairo b/tests/nodes/where_fp8x23_broadcast.cairo index 771c00bf4..112f9ef74 100644 --- a/tests/nodes/where_fp8x23_broadcast.cairo +++ b/tests/nodes/where_fp8x23_broadcast.cairo @@ -18,7 +18,7 @@ fn test_where_fp8x23_broadcast() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_i32.cairo b/tests/nodes/where_i32.cairo index 1662b010d..a455f8ac1 100644 --- a/tests/nodes/where_i32.cairo +++ b/tests/nodes/where_i32.cairo @@ -18,7 +18,7 @@ fn test_where_i32() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_i32_broadcast.cairo b/tests/nodes/where_i32_broadcast.cairo index 53aaf91e2..62891b235 100644 --- a/tests/nodes/where_i32_broadcast.cairo +++ b/tests/nodes/where_i32_broadcast.cairo @@ -18,7 +18,7 @@ fn test_where_i32_broadcast() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_i8.cairo b/tests/nodes/where_i8.cairo index 0627fd33b..6f54a1271 100644 --- a/tests/nodes/where_i8.cairo +++ b/tests/nodes/where_i8.cairo @@ -18,7 +18,7 @@ fn test_where_i8() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_i8_broadcast.cairo b/tests/nodes/where_i8_broadcast.cairo index 69e02821f..4bcb86a3d 100644 --- a/tests/nodes/where_i8_broadcast.cairo +++ b/tests/nodes/where_i8_broadcast.cairo @@ -18,7 +18,7 @@ fn test_where_i8_broadcast() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_u32.cairo b/tests/nodes/where_u32.cairo index a14d685ac..5f8a3119a 100644 --- a/tests/nodes/where_u32.cairo +++ b/tests/nodes/where_u32.cairo @@ -18,7 +18,7 @@ fn test_where_u32() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/nodes/where_u32_broadcast.cairo b/tests/nodes/where_u32_broadcast.cairo index b810f7143..4aedc56a1 100644 --- a/tests/nodes/where_u32_broadcast.cairo +++ b/tests/nodes/where_u32_broadcast.cairo @@ -18,7 +18,7 @@ fn test_where_u32_broadcast() { let input_2 = input_2::input_2(); let z = output_0::output_0(); - let y = input_0.where(@input_1,@input_2); + let y = input_0.where(@input_1, @input_2); assert_eq(y, z); } diff --git a/tests/operators/qlinear_add_test.cairo b/tests/operators/qlinear_add_test.cairo index 3163fb8e6..fe7f2af47 100644 --- a/tests/operators/qlinear_add_test.cairo +++ b/tests/operators/qlinear_add_test.cairo @@ -13,33 +13,13 @@ fn qlinearadd_test() { i8 >::new( shape: array![4, 2].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8 - ] - .span(), + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8].span(), ); let b = TensorTrait::< i8 >::new( shape: array![4, 2].span(), - data: array![ - 2_i8, - 4_i8, - 6_i8, - 8_i8, - 10_i8, - 12_i8, - 14_i8, - 16_i8 - ] - .span(), + data: array![2_i8, 4_i8, 6_i8, 8_i8, 10_i8, 12_i8, 14_i8, 16_i8].span(), ); let a_scale = TensorTrait::< @@ -82,30 +62,11 @@ fn qlinearadd_broadcast_test() { i8 >::new( shape: array![2, 4].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8 - ] - .span(), + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8].span(), ); let b = TensorTrait::< i8 - >::new( - shape: array![1, 4].span(), - data: array![ - 2_i8, - 4_i8, - 6_i8, - 8_i8, - ] - .span(), - ); + >::new(shape: array![1, 4].span(), data: array![2_i8, 4_i8, 6_i8, 8_i8,].span(),); let a_scale = TensorTrait::< FP16x16 @@ -146,29 +107,10 @@ fn qlinearadd_broadcast_test() { fn test_example_doc() { let a = TensorTrait::< i8 - >::new( - shape: array![2, 3].span(), - data: array![ - 6_i8, - 6_i8, - 6_i8, - 11_i8, - 11_i8, - 11_i8 - ] - .span(), - ); + >::new(shape: array![2, 3].span(), data: array![6_i8, 6_i8, 6_i8, 11_i8, 11_i8, 11_i8].span(),); let b = TensorTrait::< i8 - >::new( - shape: array![1, 3].span(), - data: array![ - 40_i8, - 40_i8, - 40_i8 - ] - .span(), - ); + >::new(shape: array![1, 3].span(), data: array![40_i8, 40_i8, 40_i8].span(),); let a_scale = TensorTrait::< FP16x16 diff --git a/tests/operators/qlinear_concat_test.cairo b/tests/operators/qlinear_concat_test.cairo index 101cefaa8..4c86b3ff8 100644 --- a/tests/operators/qlinear_concat_test.cairo +++ b/tests/operators/qlinear_concat_test.cairo @@ -19,28 +19,10 @@ fn print_span(mut span: Span) { fn qlinear_concat_test() { let tensor1 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 10_i8, - 20_i8, - 30_i8, - 40_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![10_i8, 20_i8, 30_i8, 40_i8,].span(),); let tensor2 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 20_i8, - 40_i8, - 60_i8, - 80_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![20_i8, 40_i8, 60_i8, 80_i8,].span(),); let tensors = array![tensor1, tensor2].span(); @@ -90,40 +72,13 @@ fn qlinear_concat_test() { fn qlinear_concat_test_shape() { let tensor1 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 2_i8, - 2_i8, - 2_i8, - 2_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![2_i8, 2_i8, 2_i8, 2_i8,].span(),); let tensor2 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 8_i8, - 8_i8, - 8_i8, - 8_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![8_i8, 8_i8, 8_i8, 8_i8,].span(),); let tensor3 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 10_i8, - 10_i8, - 10_i8, - 10_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![10_i8, 10_i8, 10_i8, 10_i8,].span(),); let tensors = array![tensor1, tensor2, tensor3].span(); @@ -177,28 +132,10 @@ fn qlinear_concat_test_shape() { fn qlinear_concat_example_doc() { let tensor1 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 5_i8, - 5_i8, - 5_i8, - 5_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![5_i8, 5_i8, 5_i8, 5_i8,].span(),); let tensor2 = TensorTrait::< i8 - >::new( - shape: array![2, 2].span(), - data: array![ - 1_i8, - 1_i8, - 1_i8, - 1_i8, - ] - .span(), - ); + >::new(shape: array![2, 2].span(), data: array![1_i8, 1_i8, 1_i8, 1_i8,].span(),); let tensors = array![tensor1, tensor2].span(); diff --git a/tests/operators/qlinear_leakyrelu_test.cairo b/tests/operators/qlinear_leakyrelu_test.cairo index 9e6473d06..e180ab33b 100644 --- a/tests/operators/qlinear_leakyrelu_test.cairo +++ b/tests/operators/qlinear_leakyrelu_test.cairo @@ -12,15 +12,7 @@ fn qlinear_leakyrelu_test() { i8 >::new( shape: array![2, 3].span(), - data: array![ - -10_i8, - -10_i8, - -10_i8, - 10_i8, - 10_i8, - 10_i8 - ] - .span(), + data: array![-10_i8, -10_i8, -10_i8, 10_i8, 10_i8, 10_i8].span(), ); let a_scale = TensorTrait::< diff --git a/tests/operators/qlinear_matmul_test.cairo b/tests/operators/qlinear_matmul_test.cairo index bfbe04714..9d3f8fa4b 100644 --- a/tests/operators/qlinear_matmul_test.cairo +++ b/tests/operators/qlinear_matmul_test.cairo @@ -15,36 +15,13 @@ fn qlinearmatmul_2D_test() { i8 >::new( shape: array![2, 4].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8 - ] - .span(), + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8].span(), ); let b = TensorTrait::< i8 >::new( shape: array![4, 3].span(), - data: array![ - 2_i8, - 4_i8, - 6_i8, - 8_i8, - 10_i8, - 12_i8, - 14_i8, - 16_i8, - 18_i8, - 20_i8, - 22_i8, - 24_i8 - ] + data: array![2_i8, 4_i8, 6_i8, 8_i8, 10_i8, 12_i8, 14_i8, 16_i8, 18_i8, 20_i8, 22_i8, 24_i8] .span(), ); @@ -90,18 +67,7 @@ fn qlinearmatmul_3D_test() { >::new( shape: array![2, 2, 3].span(), data: array![ - -1_i8, - -2_i8, - -2_i8, - -3_i8, - -4_i8, - -4_i8, - -5_i8, - -6_i8, - -6_i8, - -7_i8, - -8_i8, - -8_i8 + -1_i8, -2_i8, -2_i8, -3_i8, -4_i8, -4_i8, -5_i8, -6_i8, -6_i8, -7_i8, -8_i8, -8_i8 ] .span(), ); @@ -110,18 +76,7 @@ fn qlinearmatmul_3D_test() { >::new( shape: array![2, 3, 2].span(), data: array![ - -2_i8, - -4_i8, - -6_i8, - -8_i8, - -10_i8, - -12_i8, - -2_i8, - -4_i8, - -6_i8, - -8_i8, - -10_i8, - -12_i8 + -2_i8, -4_i8, -6_i8, -8_i8, -10_i8, -12_i8, -2_i8, -4_i8, -6_i8, -8_i8, -10_i8, -12_i8 ] .span(), ); @@ -167,29 +122,10 @@ fn qlinearmatmul_3D_test() { fn test_example_doc() { let a = TensorTrait::< i8 - >::new( - shape: array![2, 3].span(), - data: array![ - 3_i8, - 4_i8, - 5_i8, - 2_i8, - 4_i8, - 3_i8 - ] - .span(), - ); + >::new(shape: array![2, 3].span(), data: array![3_i8, 4_i8, 5_i8, 2_i8, 4_i8, 3_i8].span(),); let b = TensorTrait::< i8 - >::new( - shape: array![3, 1].span(), - data: array![ - 4_i8, - 8_i8, - 4_i8 - ] - .span(), - ); + >::new(shape: array![3, 1].span(), data: array![4_i8, 8_i8, 4_i8].span(),); let a_scale = TensorTrait::< FP16x16 diff --git a/tests/operators/qlinear_mul_test.cairo b/tests/operators/qlinear_mul_test.cairo index 6bf292bcc..3babc1800 100644 --- a/tests/operators/qlinear_mul_test.cairo +++ b/tests/operators/qlinear_mul_test.cairo @@ -14,40 +14,14 @@ fn qlinearmul_test() { i8 >::new( shape: array![4, 3].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8, - 9_i8, - 10_i8, - 11_i8, - 12_i8 - ] + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8, 9_i8, 10_i8, 11_i8, 12_i8] .span(), ); let b = TensorTrait::< i8 >::new( shape: array![4, 3].span(), - data: array![ - 2_i8, - 4_i8, - 6_i8, - 8_i8, - 10_i8, - 12_i8, - 14_i8, - 16_i8, - 18_i8, - 20_i8, - 22_i8, - 24_i8 - ] + data: array![2_i8, 4_i8, 6_i8, 8_i8, 10_i8, 12_i8, 14_i8, 16_i8, 18_i8, 20_i8, 22_i8, 24_i8] .span(), ); @@ -96,30 +70,11 @@ fn qlinear_mul_broadcast_test() { i8 >::new( shape: array![2, 4].span(), - data: array![ - 1_i8, - 2_i8, - 3_i8, - 4_i8, - 5_i8, - 6_i8, - 7_i8, - 8_i8 - ] - .span(), + data: array![1_i8, 2_i8, 3_i8, 4_i8, 5_i8, 6_i8, 7_i8, 8_i8].span(), ); let b = TensorTrait::< i8 - >::new( - shape: array![1, 4].span(), - data: array![ - 2_i8, - 4_i8, - 6_i8, - 8_i8, - ] - .span(), - ); + >::new(shape: array![1, 4].span(), data: array![2_i8, 4_i8, 6_i8, 8_i8,].span(),); let a_scale = TensorTrait::< FP16x16 @@ -161,28 +116,11 @@ fn test_example_doc() { let a = TensorTrait::< i8 >::new( - shape: array![2, 3].span(), - data: array![ - 21_i8, - 21_i8, - 21_i8, - 41_i8, - 41_i8, - 41_i8 - ] - .span(), + shape: array![2, 3].span(), data: array![21_i8, 21_i8, 21_i8, 41_i8, 41_i8, 41_i8].span(), ); let b = TensorTrait::< i8 - >::new( - shape: array![1, 3].span(), - data: array![ - 4_i8, - 8_i8, - 12_i8 - ] - .span(), - ); + >::new(shape: array![1, 3].span(), data: array![4_i8, 8_i8, 12_i8].span(),); let a_scale = TensorTrait::< FP16x16 From 5b239a9501c2a06fe58b1abaf1acef34e9815f28 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Fri, 9 Feb 2024 14:59:36 +0200 Subject: [PATCH 31/46] update summary and compatibility docs --- docs/SUMMARY.md | 1 + docs/framework/compatibility.md | 13 +++++++------ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 0a6a07008..d73bf84aa 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -148,6 +148,7 @@ * [tensor.compress](framework/operators/tensor/tensor.compress.md) * [tensor.layer_normalization](framework/operators/tensor/tensor.layer_normalization.md) * [tensor.scatter\_nd](framework/operators/tensor/tensor.scatter\_nd.md) + * [tensor.dynamic_quantize_linear](framework/operators/tensor/tensor.dequantize_linear.md) * [Neural Network](framework/operators/neural-network/README.md) * [nn.relu](framework/operators/neural-network/nn.relu.md) * [nn.leaky\_relu](framework/operators/neural-network/nn.leaky\_relu.md) diff --git a/docs/framework/compatibility.md b/docs/framework/compatibility.md index ee505b742..d6f760cbf 100644 --- a/docs/framework/compatibility.md +++ b/docs/framework/compatibility.md @@ -37,13 +37,13 @@ You can see below the list of current supported ONNX Operators: | [ThresholdedRelu](operators/neural-network/nn.thresholded\_relu.md) | :white\_check\_mark: | | [Sigmoid](operators/neural-network/nn.sigmoid.md) | :white\_check\_mark: | | [Softmax](operators/neural-network/nn.softmax.md) | :white\_check\_mark: | -| [Softmax_zero](operators/neural-network/nn.softmax_zero.md) | :white\_check\_mark: | +| [Softmax_zero](operators/neural-network/nn.softmax_zero.md) | :white\_check\_mark: | | [LogSoftmax](operators/neural-network/nn.logsoftmax.md) | :white\_check\_mark: | | [Softsign](operators/neural-network/nn.softsign.md) | :white\_check\_mark: | | [Softplus](operators/neural-network/nn.softplus.md) | :white\_check\_mark: | | [Linear](operators/neural-network/nn.linear.md) | :white\_check\_mark: | | [HardSigmoid](operators/neural-network/nn.hard\_sigmoid.md) | :white\_check\_mark: | -| [Conv](operators/neural-network/nn.conv.md) | :white\_check\_mark: | +| [Conv](operators/neural-network/nn.conv.md) | :white\_check\_mark: | | [Sinh](operators/tensor/tensor.sinh.md) | :white\_check\_mark: | | [Asinh](operators/tensor/tensor.asinh.md) | :white\_check\_mark: | | [Atanh](operators/tensor/tensor.atanh.md) | :white\_check\_mark: | @@ -104,11 +104,12 @@ You can see below the list of current supported ONNX Operators: | [IsNaN](operators/tensor/tensor.is\_nan.md) | :white\_check\_mark: | | [IsInf](operators/tensor/tensor.is\_inf.md) | :white\_check\_mark: | | [Not](operators/tensor/tensor.not.md) | :white\_check\_mark: | -| [GatherND](operators/tensor/tensor.gather/_nd.md) | :white\_check\_mark: | -| [ReduceLogSum](operators/tensor/tensor.reduce\_log\_sum.md) | :white\_check\_mark: | -| [Erf](operators/tensor/tensor.erf.md) | :white\_check\_mark: | -| [Compress](operators/tensor/tensor.compress.md) | :white\_check\_mark: | +| [GatherND](operators/tensor/tensor.gather/_nd.md) | :white\_check\_mark: | +| [ReduceLogSum](operators/tensor/tensor.reduce\_log\_sum.md) | :white\_check\_mark: | +| [Erf](operators/tensor/tensor.erf.md) | :white\_check\_mark: | +| [Compress](operators/tensor/tensor.compress.md) | :white\_check\_mark: | | [Layer_normalization](operators/tensor/tensor.layer_normalization.md) | :white\_check\_mark: | | [ScatterND](operators/tensor/tensor.scatter/_nd.md) | :white\_check\_mark: | +| [DequantizeLinear](operators/tensor/tensor.dequantize_linear.md) | :white\_check\_mark: | Current Operators support: **98/156 (62%)** From 7a3f35fe965788a27af86f13cca614e0838fecea Mon Sep 17 00:00:00 2001 From: "allcontributors[bot]" <46447321+allcontributors[bot]@users.noreply.github.com> Date: Fri, 9 Feb 2024 13:04:32 +0000 Subject: [PATCH 32/46] docs: update README.md [skip ci] --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 1f48201d3..b22552a19 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ # Orion: An Open-source Framework for Validity and ZK ML ✨ -[![All Contributors](https://img.shields.io/badge/all_contributors-29-orange.svg?style=flat-square)](#contributors-) +[![All Contributors](https://img.shields.io/badge/all_contributors-30-orange.svg?style=flat-square)](#contributors-) Orion is an open-source, community-driven framework dedicated to Provable Machine Learning. It provides essential components and a new ONNX runtime for building verifiable Machine Learning models using [STARKs](https://starkware.co/stark/). @@ -106,6 +106,7 @@ Thanks goes to these wonderful people: Vid Kersic
Vid Kersic

💻 + canacechan
canacechan

💻 From ae18b2ae82ec603f417e8c6742b0107e620ebacc Mon Sep 17 00:00:00 2001 From: "allcontributors[bot]" <46447321+allcontributors[bot]@users.noreply.github.com> Date: Fri, 9 Feb 2024 13:04:33 +0000 Subject: [PATCH 33/46] docs: update .all-contributorsrc [skip ci] --- .all-contributorsrc | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.all-contributorsrc b/.all-contributorsrc index 7f624a7d6..a63ff26f7 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -269,6 +269,15 @@ "contributions": [ "code" ] + }, + { + "login": "canacechan", + "name": "canacechan", + "avatar_url": "https://avatars.githubusercontent.com/u/127183619?v=4", + "profile": "https://github.com/canacechan", + "contributions": [ + "code" + ] } ], "contributorsPerLine": 7, From 2cb12fff54c9a84827b9d0457e947fcc08ed1048 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Fri, 9 Feb 2024 15:18:56 +0200 Subject: [PATCH 34/46] add missing ops to summarry and compatibility --- docs/SUMMARY.md | 2 ++ docs/framework/compatibility.md | 2 ++ 2 files changed, 4 insertions(+) diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index d73bf84aa..2a6ee1def 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -163,6 +163,8 @@ * [nn.thresholded\_relu](framework/operators/neural-network/nn.thresholded\_relu.md) * [nn.gemm](framework/operators/neural-network/nn.gemm.md) * [nn.conv](framework/operators/neural-network/nn.conv.md) + * [nn.depth_to_space](framework/operators/neural-network/nn.depth_to_space.md) + * [nn.space_to_depth](framework/operators/neural-network/nn.space_to_depth.md) * [Machine Learning](framework/operators/machine-learning/README.md) * [Tree Ensemble Classifier](framework/operators/machine-learning/tree-ensemble-classifier/README.md) * [tree\_ensemble\_classifier.predict](framework/operators/machine-learning/tree-ensemble-classifier/tree\_ensemble\_classifier.predict.md) diff --git a/docs/framework/compatibility.md b/docs/framework/compatibility.md index d6f760cbf..12e04e565 100644 --- a/docs/framework/compatibility.md +++ b/docs/framework/compatibility.md @@ -111,5 +111,7 @@ You can see below the list of current supported ONNX Operators: | [Layer_normalization](operators/tensor/tensor.layer_normalization.md) | :white\_check\_mark: | | [ScatterND](operators/tensor/tensor.scatter/_nd.md) | :white\_check\_mark: | | [DequantizeLinear](operators/tensor/tensor.dequantize_linear.md) | :white\_check\_mark: | +| [SpaceToDepth](operators/neural-network/nn.space_to_depth.md) | :white\_check\_mark: | +| [DepthToSpace](operators/neural-network/nn.depth_to_space.md) | :white\_check\_mark: | Current Operators support: **98/156 (62%)** From 6672b77011c97f11fda5bb3f5cb620d25b04fdb0 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Fri, 9 Feb 2024 15:36:21 +0200 Subject: [PATCH 35/46] update compatibility and summary docs --- docs/SUMMARY.md | 1 + docs/framework/compatibility.md | 1 + 2 files changed, 2 insertions(+) diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 2a6ee1def..f1b411655 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -149,6 +149,7 @@ * [tensor.layer_normalization](framework/operators/tensor/tensor.layer_normalization.md) * [tensor.scatter\_nd](framework/operators/tensor/tensor.scatter\_nd.md) * [tensor.dynamic_quantize_linear](framework/operators/tensor/tensor.dequantize_linear.md) + * [tensor.optional](framework/operators/tensor/tensor.optional.md) * [Neural Network](framework/operators/neural-network/README.md) * [nn.relu](framework/operators/neural-network/nn.relu.md) * [nn.leaky\_relu](framework/operators/neural-network/nn.leaky\_relu.md) diff --git a/docs/framework/compatibility.md b/docs/framework/compatibility.md index 12e04e565..7bc00899c 100644 --- a/docs/framework/compatibility.md +++ b/docs/framework/compatibility.md @@ -113,5 +113,6 @@ You can see below the list of current supported ONNX Operators: | [DequantizeLinear](operators/tensor/tensor.dequantize_linear.md) | :white\_check\_mark: | | [SpaceToDepth](operators/neural-network/nn.space_to_depth.md) | :white\_check\_mark: | | [DepthToSpace](operators/neural-network/nn.depth_to_space.md) | :white\_check\_mark: | +| [Optional](operators/tensor/tensor.optional.md) | :white\_check\_mark: | Current Operators support: **98/156 (62%)** From db8cea0eff15d926c98828c108b977fea55ed298 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Fri, 9 Feb 2024 18:03:49 +0200 Subject: [PATCH 36/46] update docs --- .../operators/neural-network/nn.conv.md | 4 +- .../neural-network/nn.depth_to_space.md | 62 ++++++++++--------- docs/framework/operators/tensor/README.md | 2 + .../operators/tensor/tensor.split.md | 1 + 4 files changed, 38 insertions(+), 31 deletions(-) diff --git a/docs/framework/operators/neural-network/nn.conv.md b/docs/framework/operators/neural-network/nn.conv.md index 91d05c369..086737f0b 100644 --- a/docs/framework/operators/neural-network/nn.conv.md +++ b/docs/framework/operators/neural-network/nn.conv.md @@ -1,5 +1,5 @@ -# NNTrait::conv_transpose +# NNTrait::conv ```rust conv( @@ -43,7 +43,7 @@ use orion::numbers::FP16x16; use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor}; -fn example_conv_transpose() -> Tensor { +fn example_conv() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(1); shape.append(1); diff --git a/docs/framework/operators/neural-network/nn.depth_to_space.md b/docs/framework/operators/neural-network/nn.depth_to_space.md index 7e23e27d0..5c7f4725e 100644 --- a/docs/framework/operators/neural-network/nn.depth_to_space.md +++ b/docs/framework/operators/neural-network/nn.depth_to_space.md @@ -21,34 +21,38 @@ A `Tensor` of [N, C/(blocksize * blocksize), H * blocksize, W * blocksize]. ```rust use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I8Tensor; -use orion::numbers::{IntegerTrait, i8}; - -fn relu_example() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(4); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(i8 { mag: 1, sign: false }); - data.append(i8 { mag: 3, sign: true }); - data.append(i8 { mag: 3, sign: true }); - data.append(i8 { mag: 1, sign: false }); - data.append(i8 { mag: 1, sign: true }); - data.append(i8 { mag: 3, sign: true }); - data.append(i8 { mag: 2, sign: true }); - data.append(i8 { mag: 1, sign: true }); - data.append(i8 { mag: 1, sign: true }); - data.append(i8 { mag: 2, sign: false }); - data.append(i8 { mag: 1, sign: true }); - data.append(i8 { mag: 2, sign: true }); - data.append(i8 { mag: 3, sign: true }); - data.append(i8 { mag: 3, sign: true }); - data.append(i8 { mag: 2, sign: false }); - data.append(i8 { mag: 2, sign: false }); - let tensor = TensorTrait::new(shape.span(), data.span()); +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; +use orion::operators::nn::NNTrait; +use orion::operators::nn::I8NN; +use orion::numbers::FixedTrait; + +fn depth_to_space_example() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(4); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(-2); + data.append(0); + data.append(-1); + data.append(0); + data.append(0); + data.append(-3); + data.append(2); + data.append(1); + data.append(-2); + data.append(-2); + data.append(0); + data.append(-2); + data.append(-1); + data.append(-1); + data.append(2); + data.append(2); + let tensor = TensorTrait::new(shape.span(), data.span()); + return NNTrait::depth_to_space(@tensor, 2, 'DCR'); } ->>> [[[[1, 1, 3, 3], [1, 3, 2, 3], [3, 2, 1, 1], [1, 2, 2, 2]]]] +>>> [[[[-2, 0, 0, -3], [-2, -1, -2, -1], [-1, 2, 0, 1], [0, 2, -2, 2]]]] ``` diff --git a/docs/framework/operators/tensor/README.md b/docs/framework/operators/tensor/README.md index 4dd0a5608..c4615384c 100644 --- a/docs/framework/operators/tensor/README.md +++ b/docs/framework/operators/tensor/README.md @@ -120,6 +120,8 @@ use orion::operators::tensor::TensorTrait; | [`tensor.erf`](tensor.erf.md) | Computes the error function of the given input tensor element-wise. | | [`tensor.layer_normalization`](tensor.layer\_normalization.md) | computes the layer normalization of the input tensor. | | [`tensor.split`](tensor.split.md) | Split a tensor into a list of tensors, along the specified ‘axis’. | +| [`tensor.optional`](tensor.optional.md) | Constructs an optional-type value containing either an empty optional of a certain type specified by the attribute, or a non-empty value containing the input element. | +| [`tensor.dynamic_quantize_linear`](tensor.dynamic\_quantize\_linear.md) | Computes the Scale, Zero Point and FP32->8Bit conversion of FP32 Input data. | | [`tensor.scatter_nd`](tensor.scatter\_nd.md) | The output of the operation is produced by creating a copy of the input data, and then updating its value to values specified by updates at specific index positions specified by indices. Its output shape is the same as the shape of data | ## Arithmetic Operations diff --git a/docs/framework/operators/tensor/tensor.split.md b/docs/framework/operators/tensor/tensor.split.md index 7c0e8b157..4fa8510e1 100644 --- a/docs/framework/operators/tensor/tensor.split.md +++ b/docs/framework/operators/tensor/tensor.split.md @@ -7,6 +7,7 @@ ## Args Split a tensor into a list of tensors, along the specified ‘axis’ +## Args * `self`(`@Tensor`) - The input tensor. * `axis`(`usize`) - The axis along which to split on. From fcb88e1b1f6672f26034b2cb8132927e3f7eab4f Mon Sep 17 00:00:00 2001 From: "allcontributors[bot]" <46447321+allcontributors[bot]@users.noreply.github.com> Date: Fri, 9 Feb 2024 16:22:00 +0000 Subject: [PATCH 37/46] docs: update README.md [skip ci] --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 1f48201d3..d43e3b3d5 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ # Orion: An Open-source Framework for Validity and ZK ML ✨ -[![All Contributors](https://img.shields.io/badge/all_contributors-29-orange.svg?style=flat-square)](#contributors-) +[![All Contributors](https://img.shields.io/badge/all_contributors-30-orange.svg?style=flat-square)](#contributors-) Orion is an open-source, community-driven framework dedicated to Provable Machine Learning. It provides essential components and a new ONNX runtime for building verifiable Machine Learning models using [STARKs](https://starkware.co/stark/). @@ -106,6 +106,7 @@ Thanks goes to these wonderful people: Vid Kersic
Vid Kersic

💻 + Trunks @ Carbonable
Trunks @ Carbonable

📖 From 5f6af73a2e6524084139abcbb5128ac457f4f16e Mon Sep 17 00:00:00 2001 From: "allcontributors[bot]" <46447321+allcontributors[bot]@users.noreply.github.com> Date: Fri, 9 Feb 2024 16:22:01 +0000 Subject: [PATCH 38/46] docs: update .all-contributorsrc [skip ci] --- .all-contributorsrc | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.all-contributorsrc b/.all-contributorsrc index 7f624a7d6..402441186 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -269,6 +269,15 @@ "contributions": [ "code" ] + }, + { + "login": "tekkac", + "name": "Trunks @ Carbonable", + "avatar_url": "https://avatars.githubusercontent.com/u/98529704?v=4", + "profile": "https://github.com/tekkac", + "contributions": [ + "doc" + ] } ], "contributorsPerLine": 7, From 9b060c24a36dfae8f5ecd9fc0737092d368dea69 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Sun, 11 Feb 2024 13:10:17 +0200 Subject: [PATCH 39/46] update doc --- docs/SUMMARY.md | 1 + docs/framework/compatibility.md | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index c836ac066..739057814 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -150,6 +150,7 @@ * [tensor.scatter\_nd](framework/operators/tensor/tensor.scatter\_nd.md) * [tensor.dynamic_quantize_linear](framework/operators/tensor/tensor.dequantize_linear.md) * [tensor.optional](framework/operators/tensor/tensor.optional.md) + * [tensor.reverse_sequence](framework/operators/tensor/tensor.reverse_sequence.md) * [Neural Network](framework/operators/neural-network/README.md) * [nn.relu](framework/operators/neural-network/nn.relu.md) * [nn.leaky\_relu](framework/operators/neural-network/nn.leaky\_relu.md) diff --git a/docs/framework/compatibility.md b/docs/framework/compatibility.md index d01aac724..2853a1acb 100644 --- a/docs/framework/compatibility.md +++ b/docs/framework/compatibility.md @@ -43,7 +43,7 @@ You can see below the list of current supported ONNX Operators: | [Softplus](operators/neural-network/nn.softplus.md) | :white\_check\_mark: | | [Linear](operators/neural-network/nn.linear.md) | :white\_check\_mark: | | [HardSigmoid](operators/neural-network/nn.hard\_sigmoid.md) | :white\_check\_mark: | -| [ConvTranspose](operators/neural-network/nn.conv\_transpose_.md) | :white\_check\_mark: | +| [ConvTranspose](operators/neural-network/nn.conv\_transpose_.md) | :white\_check\_mark: | | [Conv](operators/neural-network/nn.conv.md) | :white\_check\_mark: | | [Sinh](operators/tensor/tensor.sinh.md) | :white\_check\_mark: | | [Asinh](operators/tensor/tensor.asinh.md) | :white\_check\_mark: | @@ -115,5 +115,7 @@ You can see below the list of current supported ONNX Operators: | [SpaceToDepth](operators/neural-network/nn.space_to_depth.md) | :white\_check\_mark: | | [DepthToSpace](operators/neural-network/nn.depth_to_space.md) | :white\_check\_mark: | | [Optional](operators/tensor/tensor.optional.md) | :white\_check\_mark: | +| [ReverseSequence](operators/tensor/tensor.reverse_sequence.md) | :white\_check\_mark: | + Current Operators support: **98/156 (62%)** From 33403007ecd15ca0dcb74d21bd58118cc02404a8 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Sun, 11 Feb 2024 13:18:34 +0200 Subject: [PATCH 40/46] update doc --- docs/SUMMARY.md | 1 + docs/framework/compatibility.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 739057814..5f9d85581 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -151,6 +151,7 @@ * [tensor.dynamic_quantize_linear](framework/operators/tensor/tensor.dequantize_linear.md) * [tensor.optional](framework/operators/tensor/tensor.optional.md) * [tensor.reverse_sequence](framework/operators/tensor/tensor.reverse_sequence.md) + * [tensor.split_to_sequence](framework/operators/tensor/tensor.split_to_sequence.md) * [Neural Network](framework/operators/neural-network/README.md) * [nn.relu](framework/operators/neural-network/nn.relu.md) * [nn.leaky\_relu](framework/operators/neural-network/nn.leaky\_relu.md) diff --git a/docs/framework/compatibility.md b/docs/framework/compatibility.md index 2853a1acb..be95bb7ad 100644 --- a/docs/framework/compatibility.md +++ b/docs/framework/compatibility.md @@ -116,6 +116,6 @@ You can see below the list of current supported ONNX Operators: | [DepthToSpace](operators/neural-network/nn.depth_to_space.md) | :white\_check\_mark: | | [Optional](operators/tensor/tensor.optional.md) | :white\_check\_mark: | | [ReverseSequence](operators/tensor/tensor.reverse_sequence.md) | :white\_check\_mark: | - +| [SplitToSequence](operators/tensor/tensor.split_to_sequence.md) | :white\_check\_mark: | Current Operators support: **98/156 (62%)** From be72b39fbc21a8bece2e332d606756d78c8d351d Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Sun, 11 Feb 2024 13:44:38 +0200 Subject: [PATCH 41/46] update doc --- docs/SUMMARY.md | 4 ++++ docs/framework/compatibility.md | 4 ++++ docs/framework/operators/tensor/README.md | 1 + .../operators/tensor/tensor.reverse_sequence.md | 13 +++++++++---- 4 files changed, 18 insertions(+), 4 deletions(-) diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 5f9d85581..93abfeaba 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -152,6 +152,10 @@ * [tensor.optional](framework/operators/tensor/tensor.optional.md) * [tensor.reverse_sequence](framework/operators/tensor/tensor.reverse_sequence.md) * [tensor.split_to_sequence](framework/operators/tensor/tensor.split_to_sequence.md) + * [tensor.range](framework/operators/tensor/tensor.range.md) + * [tensor.hann_window](framework/operators/tensor/tensor.hann_window.md) + * [tensor.hamming_window](framework/operators/tensor/tensor.hamming_window.md) + * [tensor.blackman_window](framework/operators/tensor/tensor.blackman_window.md) * [Neural Network](framework/operators/neural-network/README.md) * [nn.relu](framework/operators/neural-network/nn.relu.md) * [nn.leaky\_relu](framework/operators/neural-network/nn.leaky\_relu.md) diff --git a/docs/framework/compatibility.md b/docs/framework/compatibility.md index be95bb7ad..529f0f38e 100644 --- a/docs/framework/compatibility.md +++ b/docs/framework/compatibility.md @@ -117,5 +117,9 @@ You can see below the list of current supported ONNX Operators: | [Optional](operators/tensor/tensor.optional.md) | :white\_check\_mark: | | [ReverseSequence](operators/tensor/tensor.reverse_sequence.md) | :white\_check\_mark: | | [SplitToSequence](operators/tensor/tensor.split_to_sequence.md) | :white\_check\_mark: | +| [Range](operators/tensor/tensor.range.md) | :white\_check\_mark: | +| [HannWindow](operators/tensor/tensor.tensor.hann_window.md) | :white\_check\_mark: | +| [HammingWindow](operators/tensor/tensor.tensor.hamming_window.md) | :white\_check\_mark: | +| [BlackmanWindow](operators/tensor/tensor.tensor.blackman_window.md) | :white\_check\_mark: | Current Operators support: **98/156 (62%)** diff --git a/docs/framework/operators/tensor/README.md b/docs/framework/operators/tensor/README.md index 94c149c67..da1d48b24 100644 --- a/docs/framework/operators/tensor/README.md +++ b/docs/framework/operators/tensor/README.md @@ -120,6 +120,7 @@ use orion::operators::tensor::TensorTrait; | [`tensor.erf`](tensor.erf.md) | Computes the error function of the given input tensor element-wise. | | [`tensor.layer_normalization`](tensor.layer\_normalization.md) | computes the layer normalization of the input tensor. | | [`tensor.split`](tensor.split.md) | Split a tensor into a list of tensors, along the specified ‘axis’. | +| [`tensor.split_to_sequence`](tensor.split\_to\_sequence.md) | Split a tensor into a sequence of tensors, along the specified ‘axis’. | | [`tensor.range`](tensor.range.md) | Generate a tensor containing a sequence of numbers that begin at start and extends by increments of delta up to limit (exclusive). | | [`tensor.hann_window`](tensor.hann\_window.md) | Generates a Hann window as described in the paper https://ieeexplore.ieee.org/document/1455106. | | [`tensor.hamming_window`](tensor.hamming\_window.md) | Generates a Hamming window as described in the paper https://ieeexplore.ieee.org/document/1455106. | diff --git a/docs/framework/operators/tensor/tensor.reverse_sequence.md b/docs/framework/operators/tensor/tensor.reverse_sequence.md index d03eaf7b8..3cfbf5b18 100644 --- a/docs/framework/operators/tensor/tensor.reverse_sequence.md +++ b/docs/framework/operators/tensor/tensor.reverse_sequence.md @@ -1,8 +1,8 @@ # tensor.reverse_sequence ```rust - fn reverse_sequence(self: @Array>, sequence_lens: @Tensor, batch_axis: Option, time_axis: Option) -> - Array>; + fn reverse_sequence(self: @Tensor, sequence_lens: @Tensor, batch_axis: Option, time_axis: Option) -> + Tensor; ``` Reverse batch of sequences having different lengths specified by sequence_lens. @@ -34,11 +34,16 @@ fn reverse_sequence_example() -> Tensor { 0, 1, 2, 3, 4, 5, 6, 7,8,9,10,11,12,13,14,15,16 ].span(), ); - let sequence_lens = TensorTrait::::new(array![4,4].span(), array![1,2,3,4].span()); + let sequence_lens = TensorTrait::::new(array![4].span(), array![1,2,3,4].span()); let batch_axis = Option::Some(0); let time_axis = Option::Some(1); // We can call `split` function as follows. return tensor.reverse_sequence(sequence_lens, batch_axis, time_axis); } ->>> [0,1,2,3,5,4,6,7,10,9,8,11,15,14,13,12] +>>> [ + [0,1,2,3], + [5,4,6,7], + [10,9,8,11], + [15,14,13,12] + ] ``` From fc3099c9601a304039a29e0ca88ab3dd4620c3ca Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Sun, 11 Feb 2024 13:55:30 +0200 Subject: [PATCH 42/46] update doc --- docs/SUMMARY.md | 1 + docs/framework/compatibility.md | 1 + 2 files changed, 2 insertions(+) diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 93abfeaba..add0dc41c 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -156,6 +156,7 @@ * [tensor.hann_window](framework/operators/tensor/tensor.hann_window.md) * [tensor.hamming_window](framework/operators/tensor/tensor.hamming_window.md) * [tensor.blackman_window](framework/operators/tensor/tensor.blackman_window.md) + * [tensor.random_uniform_like](framework/operators/tensor/tensor.random_uniform_like.md) * [Neural Network](framework/operators/neural-network/README.md) * [nn.relu](framework/operators/neural-network/nn.relu.md) * [nn.leaky\_relu](framework/operators/neural-network/nn.leaky\_relu.md) diff --git a/docs/framework/compatibility.md b/docs/framework/compatibility.md index 529f0f38e..1bda80f17 100644 --- a/docs/framework/compatibility.md +++ b/docs/framework/compatibility.md @@ -121,5 +121,6 @@ You can see below the list of current supported ONNX Operators: | [HannWindow](operators/tensor/tensor.tensor.hann_window.md) | :white\_check\_mark: | | [HammingWindow](operators/tensor/tensor.tensor.hamming_window.md) | :white\_check\_mark: | | [BlackmanWindow](operators/tensor/tensor.tensor.blackman_window.md) | :white\_check\_mark: | +| [RandomUniformLike](operators/tensor/tensor.tensor.random_uniform_like.md) | :white\_check\_mark: | Current Operators support: **98/156 (62%)** From 25e29ac186914c5c6ff16efa32a0c6ce9c1fc5d4 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Sun, 11 Feb 2024 15:18:53 +0200 Subject: [PATCH 43/46] fix orion warnings --- .tool-versions | 2 +- ...r-forecasting-aaves-lifetime-repayments.md | 6 +-- ...erifiable-principal-components-analysis.md | 8 +-- .../verifiable-support-vector-machine.md | 6 +-- src/numbers/complex_number/complex64.cairo | 1 - .../implementations/fp16x16/core.cairo | 4 +- .../implementations/fp16x16/helpers.cairo | 4 +- .../implementations/fp16x16/math/core.cairo | 4 +- .../implementations/fp16x16wide/core.cairo | 6 +-- .../implementations/fp16x16wide/helpers.cairo | 4 +- .../fp16x16wide/math/core.cairo | 4 +- .../implementations/fp32x32/core.cairo | 4 +- .../implementations/fp64x64/core.cairo | 4 +- .../implementations/fp8x23/core.cairo | 4 +- .../implementations/fp8x23/helpers.cairo | 4 +- .../implementations/fp8x23/math/core.cairo | 4 +- .../implementations/fp8x23wide/core.cairo | 6 +-- .../implementations/fp8x23wide/helpers.cairo | 4 +- .../fp8x23wide/math/core.cairo | 4 +- .../ml/linear/linear_classifier.cairo | 8 +-- .../ml/linear/linear_regressor.cairo | 2 +- src/operators/ml/svm/svm_classifier.cairo | 3 -- src/operators/ml/tree_ensemble/core.cairo | 2 +- .../tree_ensemble_classifier.cairo | 40 +++++++-------- .../tree_ensemble_regressor.cairo | 28 +++++------ src/operators/nn/functional/col2im.cairo | 1 - src/operators/nn/functional/conv.cairo | 5 -- .../nn/functional/conv_transpose.cairo | 5 +- src/operators/nn/functional/gemm.cairo | 2 +- src/operators/nn/functional/grid_sample.cairo | 1 - .../nn/functional/hard_sigmoid.cairo | 2 +- src/operators/nn/functional/leaky_relu.cairo | 2 +- src/operators/nn/functional/relu.cairo | 2 +- src/operators/nn/functional/sigmoid.cairo | 2 +- .../nn/functional/softmax_zero.cairo | 8 +-- src/operators/nn/functional/softplus.cairo | 2 +- src/operators/nn/functional/softsign.cairo | 2 +- .../nn/functional/thresholded_relu.cairo | 2 +- .../functional/concat_from_sequence.cairo | 6 +-- .../sequence/functional/sequence_erase.cairo | 4 +- .../sequence/functional/sequence_insert.cairo | 4 +- src/operators/tensor/core.cairo | 50 +++++++++---------- src/operators/tensor/helpers.cairo | 24 ++++----- src/operators/tensor/linalg/matmul.cairo | 6 +-- src/operators/tensor/linalg/trilu.cairo | 4 +- .../manipulation/reverse_sequence.cairo | 2 +- src/operators/tensor/manipulation/split.cairo | 14 +++--- .../manipulation/split_to_sequence.cairo | 12 ++--- src/operators/tensor/math/abs.cairo | 2 +- src/operators/tensor/math/acos.cairo | 2 +- src/operators/tensor/math/acosh.cairo | 2 +- src/operators/tensor/math/argmax.cairo | 8 +-- src/operators/tensor/math/argmin.cairo | 8 +-- src/operators/tensor/math/arithmetic.cairo | 8 +-- src/operators/tensor/math/asin.cairo | 2 +- src/operators/tensor/math/asinh.cairo | 2 +- src/operators/tensor/math/atan.cairo | 2 +- src/operators/tensor/math/binarizer.cairo | 2 +- src/operators/tensor/math/ceil.cairo | 2 +- src/operators/tensor/math/compress.cairo | 18 +++---- src/operators/tensor/math/concat.cairo | 12 ++--- src/operators/tensor/math/cos.cairo | 2 +- src/operators/tensor/math/cosh.cairo | 2 +- src/operators/tensor/math/cumsum.cairo | 7 ++- src/operators/tensor/math/erf.cairo | 2 +- src/operators/tensor/math/exp.cairo | 4 +- src/operators/tensor/math/flatten.cairo | 2 +- src/operators/tensor/math/gather.cairo | 12 ++--- .../tensor/math/gather_elements.cairo | 10 ++-- src/operators/tensor/math/gather_nd.cairo | 17 +++---- src/operators/tensor/math/is_inf.cairo | 6 +-- src/operators/tensor/math/is_nan.cairo | 2 +- .../tensor/math/layer_normalization.cairo | 6 --- src/operators/tensor/math/log.cairo | 2 +- src/operators/tensor/math/max_in_tensor.cairo | 2 +- src/operators/tensor/math/min_in_tensor.cairo | 2 +- src/operators/tensor/math/neg.cairo | 2 +- src/operators/tensor/math/not.cairo | 2 +- src/operators/tensor/math/onehot.cairo | 9 ++-- .../tensor/math/optional_get_element.cairo | 2 +- .../tensor/math/random_uniform_like.cairo | 12 ++--- src/operators/tensor/math/reduce_l2.cairo | 2 +- src/operators/tensor/math/reduce_mean.cairo | 14 +++--- src/operators/tensor/math/reduce_min.cairo | 14 +++--- src/operators/tensor/math/reduce_prod.cairo | 2 +- src/operators/tensor/math/reduce_sum.cairo | 2 +- .../tensor/math/reduce_sum_square.cairo | 2 +- src/operators/tensor/math/resize.cairo | 29 ++++------- src/operators/tensor/math/round.cairo | 2 +- src/operators/tensor/math/scatter.cairo | 29 +++++------ src/operators/tensor/math/scatter_nd.cairo | 15 +++--- src/operators/tensor/math/shrink.cairo | 2 +- src/operators/tensor/math/sign.cairo | 2 +- src/operators/tensor/math/sin.cairo | 2 +- src/operators/tensor/math/sinh.cairo | 2 +- src/operators/tensor/math/sqrt.cairo | 2 +- src/operators/tensor/math/tanh.cairo | 2 +- .../tensor/ml/array_feature_extractor.cairo | 8 ++- .../quantization/dequantize_linear.cairo | 3 +- .../tensor/quantization/qlinear_concat.cairo | 2 +- .../quantization/qlinear_leakyrelu.cairo | 3 +- .../tensor/quantization/qlinear_matmul.cairo | 4 +- .../tensor/quantization/quantize_linear.cairo | 2 +- tests/ml/linear_classifier_test.cairo | 2 - tests/ml/tree_ensemble_regressor.cairo | 6 --- tests/numbers/complex_number_test.cairo | 4 +- tests/operators/qlinear_concat_test.cairo | 2 +- tests/operators/qlinear_matmul_test.cairo | 2 +- .../onehot_fp_test/onehot_fp16x16_test.cairo | 6 +-- .../onehot_fp_test/onehot_fp8x23_test.cairo | 6 +-- 110 files changed, 308 insertions(+), 368 deletions(-) diff --git a/.tool-versions b/.tool-versions index 21cfc8077..ebe254233 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1 +1 @@ -scarb 2.4.0 +scarb 2.5.3 diff --git a/docs/academy/tutorials/provable-mlr-forecasting-aaves-lifetime-repayments.md b/docs/academy/tutorials/provable-mlr-forecasting-aaves-lifetime-repayments.md index 68c4daac6..5a630e096 100644 --- a/docs/academy/tutorials/provable-mlr-forecasting-aaves-lifetime-repayments.md +++ b/docs/academy/tutorials/provable-mlr-forecasting-aaves-lifetime-repayments.md @@ -481,7 +481,7 @@ fn normalize_label_data(tensor_data: Tensor) -> Tensor { normalized_array.append(diff / range); i += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; // convert normalized array values to tensor format @@ -619,7 +619,7 @@ fn add_bias_term(x_feature: Tensor, axis: u32) -> Tensor { result.append(*x_val); j += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; result.append(FixedTrait::new(65536, false)); //65536=ONE in FP16x16, change accordingly @@ -855,7 +855,7 @@ fn calculate_r_score(Y_values: Tensor, Y_pred_values: Tensor) squared_mean_diff_vals.append(squared_mean_diff); i += 1; }, - Option::None(_) => { break; } + Option::None => { break; } } }; diff --git a/docs/academy/tutorials/verifiable-principal-components-analysis.md b/docs/academy/tutorials/verifiable-principal-components-analysis.md index 5a692686f..f33736810 100644 --- a/docs/academy/tutorials/verifiable-principal-components-analysis.md +++ b/docs/academy/tutorials/verifiable-principal-components-analysis.md @@ -443,7 +443,7 @@ fn div_by_scalar(self: @Tensor, divisor: u32) -> Tensor { Option::Some(elem) => { data_array.append(FixedTrait::new(*elem.mag / divisor, *elem.sign)); }, - Option::None(_) => { + Option::None => { break TensorTrait::::new((*self).shape, data_array.span()); } }; @@ -457,7 +457,7 @@ fn div_by_fp(self: @Tensor, divisor: FP16x16) -> Tensor { loop { match data.pop_front() { Option::Some(elem) => { data_array.append(FP16x16Div::div(*elem, divisor)); }, - Option::None(_) => { + Option::None => { break TensorTrait::::new((*self).shape, data_array.span()); } }; @@ -618,7 +618,7 @@ fn extract_diagonal(self: @Tensor) -> Tensor { Option::Some(elem) => { if x == y { data_array.append(*elem); }; }, - Option::None(_) => { break; } + Option::None => { break; } }; y += 1; }; @@ -672,7 +672,7 @@ fn update_eigen_values( y += 1; index += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/docs/academy/tutorials/verifiable-support-vector-machine.md b/docs/academy/tutorials/verifiable-support-vector-machine.md index faffedec0..6d99ba0f0 100644 --- a/docs/academy/tutorials/verifiable-support-vector-machine.md +++ b/docs/academy/tutorials/verifiable-support-vector-machine.md @@ -363,7 +363,7 @@ fn accuracy(y: @Tensor, z: @Tensor) -> FP16x16 { counter += 1; }; }, - Option::None(_) => { + Option::None => { break; } }; @@ -407,7 +407,7 @@ fn less(y: @Tensor, z: @Tensor) -> Tensor { smaller_index = (1 + smaller_index) % smaller_data.len(); }, - Option::None(_) => { + Option::None => { break; } }; @@ -431,7 +431,7 @@ fn sign(z: @Tensor) -> Tensor { }; data_result.append(result); }, - Option::None(_) => { + Option::None => { break; } }; diff --git a/src/numbers/complex_number/complex64.cairo b/src/numbers/complex_number/complex64.cairo index c3b649c6d..20fb57f88 100644 --- a/src/numbers/complex_number/complex64.cairo +++ b/src/numbers/complex_number/complex64.cairo @@ -222,7 +222,6 @@ impl Complex64Impl of ComplexTrait { //atanh(z) = 1/2 * [ln (1 + z) - ln(1 - z)] fn atanh(self: complex64) -> complex64 { let two = Complex64Impl::new(FP64x64Impl::new(TWO, false), FP64x64Impl::ZERO()); - let i = Complex64Impl::new(FP64x64Impl::ZERO(), FP64x64Impl::ONE()); let one = Complex64Impl::new(FP64x64Impl::ONE(), FP64x64Impl::ZERO()); let atanh = (Complex64Impl::ln(one + self) - Complex64Impl::ln(one - self)) / two; diff --git a/src/numbers/fixed_point/implementations/fp16x16/core.cairo b/src/numbers/fixed_point/implementations/fp16x16/core.cairo index a260d886f..d39820ce8 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/core.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/core.cairo @@ -430,7 +430,7 @@ fn _i8_try_from_fp(x: FP16x16) -> Option { let unscaled_mag: Option = (x.mag / ONE).try_into(); match unscaled_mag { - Option::Some(val) => { + Option::Some => { let number_felt: felt252 = unscaled_mag.unwrap().into(); let mut number_i8: i8 = number_felt.try_into().unwrap(); if x.sign { @@ -438,6 +438,6 @@ fn _i8_try_from_fp(x: FP16x16) -> Option { } Option::Some(number_i8) }, - Option::None(_) => Option::None(()) + Option::None => Option::None(()) } } diff --git a/src/numbers/fixed_point/implementations/fp16x16/helpers.cairo b/src/numbers/fixed_point/implementations/fp16x16/helpers.cairo index 03e0f49fb..0cd5a8f0f 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/helpers.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/helpers.cairo @@ -12,7 +12,7 @@ const DEFAULT_PRECISION: u32 = 7; // 1e-4 fn assert_precise(result: FP16x16, expected: felt252, msg: felt252, custom_precision: Option) { let precision = match custom_precision { Option::Some(val) => val, - Option::None(_) => DEFAULT_PRECISION, + Option::None => DEFAULT_PRECISION, }; let diff = (result - FixedTrait::from_felt(expected)).mag; @@ -28,7 +28,7 @@ fn assert_relative( ) { let precision = match custom_precision { Option::Some(val) => val, - Option::None(_) => DEFAULT_PRECISION, + Option::None => DEFAULT_PRECISION, }; let diff = result - FixedTrait::from_felt(expected); diff --git a/src/numbers/fixed_point/implementations/fp16x16/math/core.cairo b/src/numbers/fixed_point/implementations/fp16x16/math/core.cairo index 6cb9bebfe..d477f051d 100644 --- a/src/numbers/fixed_point/implementations/fp16x16/math/core.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16/math/core.cairo @@ -211,7 +211,7 @@ fn neg(a: FP16x16) -> FP16x16 { // self is a FP16x16 point value // b is a FP16x16 point value fn pow(a: FP16x16, b: FP16x16) -> FP16x16 { - let (div, rem) = integer::u32_safe_divmod(b.mag, u32_as_non_zero(ONE)); + let (_, rem) = integer::u32_safe_divmod(b.mag, u32_as_non_zero(ONE)); // use the more performant integer pow when y is an int if (rem == 0) { @@ -330,7 +330,7 @@ mod tests { #[should_panic] fn test_negative_try_into_u128() { let a = FixedTrait::::new_unscaled(1, true); - let a: u128 = a.try_into().unwrap(); + let _a: u128 = a.try_into().unwrap(); } #[test] diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo index 0c322ad6a..9c97cce46 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/core.cairo @@ -262,7 +262,7 @@ impl FP16x16WTryIntoFP16x16 of TryInto { fn try_into(self: FP16x16W) -> Option { match self.mag.try_into() { Option::Some(val) => { Option::Some(FP16x16 { mag: val, sign: self.sign }) }, - Option::None(_) => { Option::None(()) } + Option::None => { Option::None(()) } } } } @@ -445,7 +445,7 @@ fn _i8_try_from_fp(x: FP16x16W) -> Option { let unscaled_mag: Option = (x.mag / ONE).try_into(); match unscaled_mag { - Option::Some(val) => { + Option::Some => { let number_felt: felt252 = unscaled_mag.unwrap().into(); let mut number_i8: i8 = number_felt.try_into().unwrap(); if x.sign { @@ -453,6 +453,6 @@ fn _i8_try_from_fp(x: FP16x16W) -> Option { } Option::Some(number_i8) }, - Option::None(_) => Option::None(()) + Option::None => Option::None(()) } } diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/helpers.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/helpers.cairo index d6f50b1b5..c9627852a 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/helpers.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/helpers.cairo @@ -14,7 +14,7 @@ fn assert_precise( ) { let precision = match custom_precision { Option::Some(val) => val, - Option::None(_) => DEFAULT_PRECISION, + Option::None => DEFAULT_PRECISION, }; let diff = (result - FixedTrait::from_felt(expected)).mag; @@ -30,7 +30,7 @@ fn assert_relative( ) { let precision = match custom_precision { Option::Some(val) => val, - Option::None(_) => DEFAULT_PRECISION, + Option::None => DEFAULT_PRECISION, }; let diff = result - FixedTrait::from_felt(expected); diff --git a/src/numbers/fixed_point/implementations/fp16x16wide/math/core.cairo b/src/numbers/fixed_point/implementations/fp16x16wide/math/core.cairo index 62288a0f1..902a54b48 100644 --- a/src/numbers/fixed_point/implementations/fp16x16wide/math/core.cairo +++ b/src/numbers/fixed_point/implementations/fp16x16wide/math/core.cairo @@ -211,7 +211,7 @@ fn neg(a: FP16x16W) -> FP16x16W { // self is a FP16x16W point value // b is a FP16x16W point value fn pow(a: FP16x16W, b: FP16x16W) -> FP16x16W { - let (div, rem) = integer::u64_safe_divmod(b.mag, u64_as_non_zero(ONE)); + let (_, rem) = integer::u64_safe_divmod(b.mag, u64_as_non_zero(ONE)); // use the more performant integer pow when y is an int if (rem == 0) { @@ -330,7 +330,7 @@ mod tests { #[should_panic] fn test_negative_try_into_u128() { let a = FixedTrait::::new_unscaled(1, true); - let a: u128 = a.try_into().unwrap(); + let _a: u128 = a.try_into().unwrap(); } #[test] diff --git a/src/numbers/fixed_point/implementations/fp32x32/core.cairo b/src/numbers/fixed_point/implementations/fp32x32/core.cairo index 34b06bc44..e7fd8e24d 100644 --- a/src/numbers/fixed_point/implementations/fp32x32/core.cairo +++ b/src/numbers/fixed_point/implementations/fp32x32/core.cairo @@ -396,7 +396,7 @@ fn _i8_try_from_fp(x: FP32x32) -> Option { let unscaled_mag: Option = (x.mag / ONE).try_into(); match unscaled_mag { - Option::Some(val) => { + Option::Some => { let number_felt: felt252 = unscaled_mag.unwrap().into(); let mut number_i8: i8 = number_felt.try_into().unwrap(); if x.sign { @@ -404,6 +404,6 @@ fn _i8_try_from_fp(x: FP32x32) -> Option { } Option::Some(number_i8) }, - Option::None(_) => Option::None(()) + Option::None => Option::None(()) } } diff --git a/src/numbers/fixed_point/implementations/fp64x64/core.cairo b/src/numbers/fixed_point/implementations/fp64x64/core.cairo index d35cb9cfa..23af67564 100644 --- a/src/numbers/fixed_point/implementations/fp64x64/core.cairo +++ b/src/numbers/fixed_point/implementations/fp64x64/core.cairo @@ -396,7 +396,7 @@ fn _i8_try_from_fp(x: FP64x64) -> Option { let unscaled_mag: Option = (x.mag / ONE).try_into(); match unscaled_mag { - Option::Some(val) => { + Option::Some => { let number_felt: felt252 = unscaled_mag.unwrap().into(); let mut number_i8: i8 = number_felt.try_into().unwrap(); if x.sign { @@ -404,6 +404,6 @@ fn _i8_try_from_fp(x: FP64x64) -> Option { } Option::Some(number_i8) }, - Option::None(_) => Option::None(()) + Option::None => Option::None(()) } } diff --git a/src/numbers/fixed_point/implementations/fp8x23/core.cairo b/src/numbers/fixed_point/implementations/fp8x23/core.cairo index 6db9a5a43..20b0788f3 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/core.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/core.cairo @@ -427,7 +427,7 @@ fn _i8_try_from_fp(x: FP8x23) -> Option { let unscaled_mag: Option = (x.mag / ONE).try_into(); // Option::Some(i8 { mag: unscaled_mag.unwrap(), sign: x.sign }) match unscaled_mag { - Option::Some(val) => { + Option::Some => { let number_felt: felt252 = unscaled_mag.unwrap().into(); let mut number_i8: i8 = number_felt.try_into().unwrap(); if x.sign { @@ -435,6 +435,6 @@ fn _i8_try_from_fp(x: FP8x23) -> Option { } Option::Some(number_i8) }, - Option::None(_) => Option::None(()) + Option::None => Option::None(()) } } diff --git a/src/numbers/fixed_point/implementations/fp8x23/helpers.cairo b/src/numbers/fixed_point/implementations/fp8x23/helpers.cairo index f019ef08a..58e0ca344 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/helpers.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/helpers.cairo @@ -12,7 +12,7 @@ const DEFAULT_PRECISION: u32 = 8; // 1e-6 fn assert_precise(result: FP8x23, expected: felt252, msg: felt252, custom_precision: Option) { let precision = match custom_precision { Option::Some(val) => val, - Option::None(_) => DEFAULT_PRECISION, + Option::None => DEFAULT_PRECISION, }; let diff = (result - FixedTrait::from_felt(expected)).mag; @@ -26,7 +26,7 @@ fn assert_precise(result: FP8x23, expected: felt252, msg: felt252, custom_precis fn assert_relative(result: FP8x23, expected: felt252, msg: felt252, custom_precision: Option) { let precision = match custom_precision { Option::Some(val) => val, - Option::None(_) => DEFAULT_PRECISION, + Option::None => DEFAULT_PRECISION, }; let diff = result - FixedTrait::from_felt(expected); diff --git a/src/numbers/fixed_point/implementations/fp8x23/math/core.cairo b/src/numbers/fixed_point/implementations/fp8x23/math/core.cairo index 9e3d2cdce..0e0b3fa48 100644 --- a/src/numbers/fixed_point/implementations/fp8x23/math/core.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23/math/core.cairo @@ -212,7 +212,7 @@ fn neg(a: FP8x23) -> FP8x23 { // self is a FP8x23 point value // b is a FP8x23 point value fn pow(a: FP8x23, b: FP8x23) -> FP8x23 { - let (div, rem) = integer::u32_safe_divmod(b.mag, u32_as_non_zero(ONE)); + let (_, rem) = integer::u32_safe_divmod(b.mag, u32_as_non_zero(ONE)); // use the more performant integer pow when y is an int if (rem == 0) { @@ -331,7 +331,7 @@ mod tests { #[should_panic] fn test_negative_try_into_u128() { let a = FixedTrait::::new_unscaled(1, true); - let a: u128 = a.try_into().unwrap(); + let _a: u128 = a.try_into().unwrap(); } #[test] diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo index 30b059b26..1fac5453d 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/core.cairo @@ -256,7 +256,7 @@ impl FP8x23WTryIntoFP8x23 of TryInto { fn try_into(self: FP8x23W) -> Option { match self.mag.try_into() { Option::Some(val) => { Option::Some(FP8x23 { mag: val, sign: self.sign }) }, - Option::None(_) => { Option::None(()) } + Option::None => { Option::None(()) } } } } @@ -443,7 +443,7 @@ fn _i8_try_from_fp(x: FP8x23W) -> Option { let unscaled_mag: Option = (x.mag / ONE).try_into(); match unscaled_mag { - Option::Some(val) => { + Option::Some => { let number_felt: felt252 = unscaled_mag.unwrap().into(); let mut number_i8: i8 = number_felt.try_into().unwrap(); if x.sign { @@ -451,6 +451,6 @@ fn _i8_try_from_fp(x: FP8x23W) -> Option { } Option::Some(number_i8) }, - Option::None(_) => Option::None(()) + Option::None => Option::None(()) } } diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/helpers.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/helpers.cairo index 292dc44d5..b34475914 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/helpers.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/helpers.cairo @@ -12,7 +12,7 @@ const DEFAULT_PRECISION: u64 = 8; // 1e-6 fn assert_precise(result: FP8x23W, expected: felt252, msg: felt252, custom_precision: Option) { let precision = match custom_precision { Option::Some(val) => val, - Option::None(_) => DEFAULT_PRECISION, + Option::None => DEFAULT_PRECISION, }; let diff = (result - FixedTrait::from_felt(expected)).mag; @@ -28,7 +28,7 @@ fn assert_relative( ) { let precision = match custom_precision { Option::Some(val) => val, - Option::None(_) => DEFAULT_PRECISION, + Option::None => DEFAULT_PRECISION, }; let diff = result - FixedTrait::from_felt(expected); diff --git a/src/numbers/fixed_point/implementations/fp8x23wide/math/core.cairo b/src/numbers/fixed_point/implementations/fp8x23wide/math/core.cairo index 9bf6a3db1..3d89ccce0 100644 --- a/src/numbers/fixed_point/implementations/fp8x23wide/math/core.cairo +++ b/src/numbers/fixed_point/implementations/fp8x23wide/math/core.cairo @@ -212,7 +212,7 @@ fn neg(a: FP8x23W) -> FP8x23W { // self is a FP8x23W point value // b is a FP8x23W point value fn pow(a: FP8x23W, b: FP8x23W) -> FP8x23W { - let (div, rem) = integer::u64_safe_divmod(b.mag, u64_as_non_zero(ONE)); + let (_, rem) = integer::u64_safe_divmod(b.mag, u64_as_non_zero(ONE)); // use the more performant integer pow when y is an int if (rem == 0) { @@ -331,7 +331,7 @@ mod tests { #[should_panic] fn test_negative_try_into_u128() { let a = FixedTrait::::new_unscaled(1, true); - let a: u128 = a.try_into().unwrap(); + let _a: u128 = a.try_into().unwrap(); } #[test] diff --git a/src/operators/ml/linear/linear_classifier.cairo b/src/operators/ml/linear/linear_classifier.cairo index fad7ea2d4..b9bed234a 100644 --- a/src/operators/ml/linear/linear_classifier.cairo +++ b/src/operators/ml/linear/linear_classifier.cairo @@ -170,12 +170,12 @@ impl LinearClassifierImpl< let intercepts = TensorTrait::new(shape.span(), intercepts); scores = TensorTrait::add(scores, intercepts); }, - Option::None(_) => {}, + Option::None => {}, }; let (n_classes, classlabels) = match self.classlabels { Option::Some(classlabels) => { (classlabels.len(), classlabels) }, - Option::None(_) => { (0, ArrayTrait::::new().span()) }, + Option::None => { (0, ArrayTrait::::new().span()) }, }; if *coefficients.shape.at(1) == 1 && n_classes == 2 { let mut new_scores = ArrayTrait::new(); @@ -186,7 +186,7 @@ impl LinearClassifierImpl< new_scores.append(NumberTrait::neg(*item)); new_scores.append(*item); }, - Option::None(_) => { break; }, + Option::None => { break; }, } }; scores = TensorTrait::new(array![*scores.shape.at(0), 2].span(), new_scores.span()); @@ -207,7 +207,7 @@ impl LinearClassifierImpl< loop { match labels.data.pop_front() { Option::Some(i) => { labels_list.append(*classlabels[*i]); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; } else { diff --git a/src/operators/ml/linear/linear_regressor.cairo b/src/operators/ml/linear/linear_regressor.cairo index 85aec9560..75e461729 100644 --- a/src/operators/ml/linear/linear_regressor.cairo +++ b/src/operators/ml/linear/linear_regressor.cairo @@ -207,7 +207,7 @@ impl LinearRegressorImpl< let intercepts = TensorTrait::new(shape.span(), intercepts); score = TensorTrait::add(score, intercepts); }, - Option::None(_) => {}, + Option::None => {}, }; // Post Transform diff --git a/src/operators/ml/svm/svm_classifier.cairo b/src/operators/ml/svm/svm_classifier.cairo index 1c7f4dc2a..fcaee16e3 100644 --- a/src/operators/ml/svm/svm_classifier.cairo +++ b/src/operators/ml/svm/svm_classifier.cairo @@ -678,7 +678,6 @@ fn compute_final_scores< has_proba: bool, classlabels: Span ) -> (usize, Tensor) { - let mut max_weight = 0; let (max_class, max_weight) = if votes.len() > 0 { let max_class = argmax_span(votes); @@ -726,7 +725,6 @@ fn write_scores< >( n_classes: usize, scores: Tensor, post_transform: POST_TRANSFORM, add_second_class: usize ) -> Tensor { - let mut write_additional_scores = 0; let new_scores = if n_classes >= 2 { let new_scores = match post_transform { @@ -895,7 +893,6 @@ fn probablities< *scores.at(index), *self.prob_a.at(index), *self.prob_b.at(index) ); - let mut val2 = NumberTrait::max(val1, NumberTrait::zero()); // ONNX : max(val1, 1.0e-7) let mut val2 = NumberTrait::min( val1, NumberTrait::one() ); // ONNX : min(val2, (1 - 1.0e-7)) diff --git a/src/operators/ml/tree_ensemble/core.cairo b/src/operators/ml/tree_ensemble/core.cairo index 7cbe7122f..08b4b6ef6 100644 --- a/src/operators/ml/tree_ensemble/core.cairo +++ b/src/operators/ml/tree_ensemble/core.cairo @@ -112,7 +112,7 @@ impl TreeEnsembleImpl< TreeEnsembleImpl::::leaf_index_tree(ref self, row_data, *tree_id) ) }, - Option::None(_) => { break; } + Option::None => { break; } }; }; outputs.append_all(ref outs); diff --git a/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo b/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo index 051965260..ab073a5b5 100644 --- a/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo +++ b/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo @@ -361,7 +361,7 @@ impl TreeEnsembleClassifierImpl< ); t_index.append(class_index.get(key).deref()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; let mut t_index = t_index.span(); @@ -372,7 +372,7 @@ impl TreeEnsembleClassifierImpl< loop { match its.pop_front() { Option::Some(it) => { - let prev_val = match res.get(i, *self.class_ids[*it]) { + match res.get(i, *self.class_ids[*it]) { Option::Some(val) => { res .set( @@ -391,11 +391,11 @@ impl TreeEnsembleClassifierImpl< }, }; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; i += 1; @@ -408,8 +408,8 @@ impl TreeEnsembleClassifierImpl< let mut class_id: usize = 0; // Get first class_id in class_ids match class_ids.pop_front() { - Option::Some(c_id) => { let mut class_id = *c_id; }, - Option::None(_) => { let mut class_id: usize = 0; } + Option::Some(c_id) => { class_id = *c_id; }, + Option::None => { class_id = 0; } }; loop { if i == self.class_ids.len() { @@ -425,7 +425,7 @@ impl TreeEnsembleClassifierImpl< break; } }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -438,9 +438,9 @@ impl TreeEnsembleClassifierImpl< break; } // Exchange - let res_ele_1 = match res.get(i, 0) { + match res.get(i, 0) { Option::Some(res_0) => { new_res.set(i, 1, res_0); }, - Option::None(_) => { new_res.set(i, 1, NumberTrait::zero()); }, + Option::None => { new_res.set(i, 1, NumberTrait::zero()); }, }; i += 1; }; @@ -452,12 +452,12 @@ impl TreeEnsembleClassifierImpl< break; } // Exchange - let res_ele_0 = match new_res.get(i, 1) { + match new_res.get(i, 1) { Option::Some(res_1) => { let value = NumberTrait::sub(NumberTrait::one(), res_1); new_res.set(i, 0, value); }, - Option::None(_) => { new_res.set(i, 0, NumberTrait::zero()); }, + Option::None => { new_res.set(i, 0, NumberTrait::zero()); }, }; i += 1; }; @@ -469,9 +469,9 @@ impl TreeEnsembleClassifierImpl< break; } // Exchange - let res_ele_0 = match new_res.get(i, 1) { + match new_res.get(i, 1) { Option::Some(res_1) => { new_res.set(i, 0, res_1.neg()); }, - Option::None(_) => { new_res.set(i, 0, NumberTrait::zero()); }, + Option::None => { new_res.set(i, 0, NumberTrait::zero()); }, }; i += 1; }; @@ -483,9 +483,9 @@ impl TreeEnsembleClassifierImpl< break; } // Exchange - let res_ele_0 = match new_res.get(i, 1) { + match new_res.get(i, 1) { Option::Some(res_1) => { new_res.set(i, 0, res_1.neg()); }, - Option::None(_) => { new_res.set(i, 0, NumberTrait::zero()); }, + Option::None => { new_res.set(i, 0, NumberTrait::zero()); }, }; i += 1; }; @@ -497,9 +497,9 @@ impl TreeEnsembleClassifierImpl< break; } // Exchange - let res_ele_0 = match new_res.get(i, 1) { + match new_res.get(i, 1) { Option::Some(res_1) => { new_res.set(i, 0, res_1.neg()); }, - Option::None(_) => { new_res.set(i, 0, NumberTrait::zero()); }, + Option::None => { new_res.set(i, 0, NumberTrait::zero()); }, }; i += 1; }; @@ -511,12 +511,12 @@ impl TreeEnsembleClassifierImpl< break; } // Exchange - let res_ele_0 = match new_res.get(i, 1) { + match new_res.get(i, 1) { Option::Some(res_1) => { let value = NumberTrait::sub(NumberTrait::one(), res_1); new_res.set(i, 0, value); }, - Option::None(_) => { new_res.set(i, 0, NumberTrait::zero()); }, + Option::None => { new_res.set(i, 0, NumberTrait::zero()); }, }; i += 1; }; @@ -541,7 +541,7 @@ impl TreeEnsembleClassifierImpl< loop { match labels.pop_front() { Option::Some(i) => { labels_list.append(*self.classlabels[*i]); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo b/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo index 9848efd9e..215ad2a96 100644 --- a/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo +++ b/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo @@ -301,7 +301,7 @@ impl TreeEnsembleRegressorImpl< ); t_index.append(target_index.get(key).deref()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; let mut t_index = t_index.span(); @@ -354,7 +354,7 @@ impl TreeEnsembleRegressorImpl< POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'), }; - return res; + return new_scores; } } @@ -387,7 +387,7 @@ fn compute_res_SUM< loop { match its.pop_front() { Option::Some(it) => { - let prev_val = match res.get(i, *self.target_ids[*it]) { + match res.get(i, *self.target_ids[*it]) { Option::Some(val) => { res .set( @@ -401,11 +401,11 @@ fn compute_res_SUM< }, }; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; } @@ -441,7 +441,7 @@ fn compute_res_AVERAGE< loop { match its.pop_front() { Option::Some(it) => { - let prev_val = match res.get(i, *self.target_ids[*it]) { + match res.get(i, *self.target_ids[*it]) { Option::Some(val) => { res .set( @@ -460,11 +460,11 @@ fn compute_res_AVERAGE< }, }; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; } @@ -505,7 +505,7 @@ fn compute_res_MIN< loop { match its.pop_front() { Option::Some(it) => { - let prev_val = match res.get(i, *self.target_ids[*it]) { + match res.get(i, *self.target_ids[*it]) { Option::Some(val) => { res .set( @@ -519,11 +519,11 @@ fn compute_res_MIN< }, }; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; } @@ -565,7 +565,7 @@ fn compute_res_MAX< loop { match its.pop_front() { Option::Some(it) => { - let prev_val = match res.get(i, *self.target_ids[*it]) { + match res.get(i, *self.target_ids[*it]) { Option::Some(val) => { res .set( @@ -579,11 +579,11 @@ fn compute_res_MAX< }, }; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; } diff --git a/src/operators/nn/functional/col2im.cairo b/src/operators/nn/functional/col2im.cairo index 9dc81a117..1f1aa0d48 100644 --- a/src/operators/nn/functional/col2im.cairo +++ b/src/operators/nn/functional/col2im.cairo @@ -145,7 +145,6 @@ fn col2im_naive_implementation< col2im_shape_check(data, image_shape, kernel_shape, dilations, pads, strides); - let data_col = data; let mut dim_col = ArrayTrait::new(); let mut i = 0; loop { diff --git a/src/operators/nn/functional/conv.cairo b/src/operators/nn/functional/conv.cairo index c0d5cb2e1..926bcb2b5 100644 --- a/src/operators/nn/functional/conv.cairo +++ b/src/operators/nn/functional/conv.cairo @@ -217,7 +217,6 @@ fn conv< if i == res_b.len() { break; } - let b = *res_b.at(i); let cv = *res_cv.at(i); let mut n = 0; @@ -295,7 +294,6 @@ fn conv< }; let new_shape = new_shape.span(); let new_w_strides = stride(new_shape); - let w_strides = stride((*W).shape); let mut new_w = NullableVecImpl::new(); new_w.set(*new_shape.at(0) * *new_w_strides.at(0) - 1, NumberTrait::zero()); @@ -352,8 +350,6 @@ fn conv< prev = flatten_index; i += 1; }; - let W = @TensorTrait::new(new_shape, new_w_arr.span()); - let kernel_shape = new_kernel_shape; } let pads = match auto_pad { @@ -1474,7 +1470,6 @@ fn cartesian(mut arrays: Span>,) -> Span> { let mut i = 0; let mut size_arrays = ArrayTrait::new(); - let mut m = n; loop { if i == arrays.len() { break; diff --git a/src/operators/nn/functional/conv_transpose.cairo b/src/operators/nn/functional/conv_transpose.cairo index 2b84def04..bd324e0d6 100644 --- a/src/operators/nn/functional/conv_transpose.cairo +++ b/src/operators/nn/functional/conv_transpose.cairo @@ -91,7 +91,7 @@ fn conv_transpose< strides.span() }, }; - let (pads, n_dims, output_shape) = match pads { + let (pads, _, output_shape) = match pads { Option::Some(pads) => { let n_dims = (*X).shape.len() - 2; @@ -332,8 +332,6 @@ fn conv_transpose< let n = prod((*X).shape, 2); let k = C / group; - let w_reshaped = TensorTrait::new(array![group, k, m].span(), (*W).data); - let mut final = ArrayTrait::new(); if group == 1 { @@ -560,7 +558,6 @@ fn col2im_naive_implementation< col2im_shape_check(data, image_shape, kernel_shape, dilations, pads, strides); - let data_col = data; let mut dim_col = ArrayTrait::new(); let mut i = 0; loop { diff --git a/src/operators/nn/functional/gemm.cairo b/src/operators/nn/functional/gemm.cairo index f9cce21af..c37bda880 100644 --- a/src/operators/nn/functional/gemm.cairo +++ b/src/operators/nn/functional/gemm.cairo @@ -59,6 +59,6 @@ fn gemm< return mul_by_scalar(@A.matmul(@B), alpha) + mul_by_scalar(@c, beta); }, - Option::None(_) => { return mul_by_scalar(@A.matmul(@B), alpha); } + Option::None => { return mul_by_scalar(@A.matmul(@B), alpha); } } } diff --git a/src/operators/nn/functional/grid_sample.cairo b/src/operators/nn/functional/grid_sample.cairo index 50e94b420..ed1cb01b6 100644 --- a/src/operators/nn/functional/grid_sample.cairo +++ b/src/operators/nn/functional/grid_sample.cairo @@ -885,7 +885,6 @@ fn cartesian(mut arrays: Span>,) -> Span> { let mut i = 0; let mut size_arrays = ArrayTrait::new(); - let mut m = n; loop { if i == arrays.len() { break; diff --git a/src/operators/nn/functional/hard_sigmoid.cairo b/src/operators/nn/functional/hard_sigmoid.cairo index 5d4a69b8d..bd9714757 100644 --- a/src/operators/nn/functional/hard_sigmoid.cairo +++ b/src/operators/nn/functional/hard_sigmoid.cairo @@ -32,7 +32,7 @@ fn hard_sigmoid< let result = temp.min(NumberTrait::one()).max(NumberTrait::zero()); data_result.append(result); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/nn/functional/leaky_relu.cairo b/src/operators/nn/functional/leaky_relu.cairo index 5aa29c5bd..d1677d48f 100644 --- a/src/operators/nn/functional/leaky_relu.cairo +++ b/src/operators/nn/functional/leaky_relu.cairo @@ -34,7 +34,7 @@ fn leaky_relu< data_result.append(*item * *alpha); }; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/nn/functional/relu.cairo b/src/operators/nn/functional/relu.cairo index d0fd2e7ca..7555c515d 100644 --- a/src/operators/nn/functional/relu.cairo +++ b/src/operators/nn/functional/relu.cairo @@ -28,7 +28,7 @@ fn relu< data_result.append(*item); }; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/nn/functional/sigmoid.cairo b/src/operators/nn/functional/sigmoid.cairo index 57358a14e..c7ed638aa 100644 --- a/src/operators/nn/functional/sigmoid.cairo +++ b/src/operators/nn/functional/sigmoid.cairo @@ -32,7 +32,7 @@ fn sigmoid< / (NumberTrait::one() + (*item * NumberTrait::neg_one()).exp()); data_result.append(result); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/nn/functional/softmax_zero.cairo b/src/operators/nn/functional/softmax_zero.cairo index c0aa13dac..e90dd4784 100644 --- a/src/operators/nn/functional/softmax_zero.cairo +++ b/src/operators/nn/functional/softmax_zero.cairo @@ -87,7 +87,7 @@ fn exp_zero< result.append((*item).exp()); } }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -130,7 +130,7 @@ fn exp_upcast_zero< result.append((TIntoW::into(*item)).exp()); } }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -176,10 +176,10 @@ fn reduce_sum_no_zero< let mut index: usize = 0; loop { let output_indices = unravel_index(index, output_shape); - let current_sum = accumulate_sum::(*self.data, *self.shape, output_indices, axis); + let mut current_sum = accumulate_sum::(*self.data, *self.shape, output_indices, axis); if current_sum == NumberTrait::zero() { - let current_sum: T = NumberTrait::one(); + current_sum = NumberTrait::one(); } output_data.append(current_sum); diff --git a/src/operators/nn/functional/softplus.cairo b/src/operators/nn/functional/softplus.cairo index c4f976085..1d876c535 100644 --- a/src/operators/nn/functional/softplus.cairo +++ b/src/operators/nn/functional/softplus.cairo @@ -30,7 +30,7 @@ fn softplus< let result = (FixedTrait::ONE() + (*item).exp()).ln(); data_result.append(result); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/nn/functional/softsign.cairo b/src/operators/nn/functional/softsign.cairo index f3326df57..8d20ff297 100644 --- a/src/operators/nn/functional/softsign.cairo +++ b/src/operators/nn/functional/softsign.cairo @@ -30,7 +30,7 @@ fn softsign< let result = *item / (FixedTrait::ONE() + (*item).abs()); data_result.append(result); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/nn/functional/thresholded_relu.cairo b/src/operators/nn/functional/thresholded_relu.cairo index 43d0e5a81..36533660b 100644 --- a/src/operators/nn/functional/thresholded_relu.cairo +++ b/src/operators/nn/functional/thresholded_relu.cairo @@ -28,7 +28,7 @@ fn thresholded_relu< data_result.append(*item); }; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/sequence/functional/concat_from_sequence.cairo b/src/operators/sequence/functional/concat_from_sequence.cairo index 4503aa996..336bb0553 100644 --- a/src/operators/sequence/functional/concat_from_sequence.cairo +++ b/src/operators/sequence/functional/concat_from_sequence.cairo @@ -20,7 +20,7 @@ fn concat_from_sequence< assert(val == 0 || val == 1, 'new_axis must be 0 or 1'); val }, - Option::None(_) => 0 + Option::None => 0 }; let first_tensor = *sequence.at(0); @@ -86,7 +86,7 @@ fn concat_with_new_axis< let mut reshaped_tensor = add_new_dimension(input_sequence_value, axis_value); reshaped_sequence.append(reshaped_tensor); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; concat(reshaped_sequence.span(), axis_value) @@ -110,7 +110,7 @@ fn add_new_dimension< new_tensor_shape.append(*tensor_shape_value); tensor_shape_counter += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; if axis >= tensor.shape.len() { diff --git a/src/operators/sequence/functional/sequence_erase.cairo b/src/operators/sequence/functional/sequence_erase.cairo index 573087b1f..3c6a6d57a 100644 --- a/src/operators/sequence/functional/sequence_erase.cairo +++ b/src/operators/sequence/functional/sequence_erase.cairo @@ -11,7 +11,7 @@ fn sequence_erase, impl TCopy: Copy, impl TDr ) -> Array> { let position: Tensor = match position { Option::Some(p) => p, - Option::None(_) => { + Option::None => { let mut shape = ArrayTrait::::new(); let mut data = ArrayTrait::::new(); data.append(-1_i32); @@ -49,7 +49,7 @@ fn sequence_erase, impl TCopy: Copy, impl TDr tensor_counter += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/sequence/functional/sequence_insert.cairo b/src/operators/sequence/functional/sequence_insert.cairo index 412fc6c4b..83b333387 100644 --- a/src/operators/sequence/functional/sequence_insert.cairo +++ b/src/operators/sequence/functional/sequence_insert.cairo @@ -11,7 +11,7 @@ fn sequence_insert, impl TCopy: Copy, impl TD ) -> Array> { let position: Tensor = match position { Option::Some(p) => p, - Option::None(_) => { + Option::None => { let mut shape = ArrayTrait::::new(); let mut data = ArrayTrait::::new(); data.append(-1_i32); @@ -50,7 +50,7 @@ fn sequence_insert, impl TCopy: Copy, impl TD position_value -= 1; } }, - Option::None(_) => { break; }, + Option::None => { break; }, }; }; diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 058c0162a..23018044d 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -5709,7 +5709,7 @@ fn ravel_index(mut shape: Span, mut indices: Span) -> usize { stride *= *i; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -5734,7 +5734,7 @@ fn unravel_index(index: usize, mut shape: Span) -> Span { result.append(coord); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -5755,7 +5755,7 @@ fn stride(mut shape: Span) -> Span { temp_result.append(accumulated); accumulated *= *i; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -5763,7 +5763,7 @@ fn stride(mut shape: Span) -> Span { loop { match temp_result.pop_back() { Option::Some(val) => { result.append(*val); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -5820,7 +5820,7 @@ fn slice, impl TCopy: Copy, impl TDrop: Drop< ) -> Tensor { let axes = match axes { Option::Some(axes) => axes, - Option::None(_) => { + Option::None => { let mut ret: Array = ArrayTrait::new(); let mut i: usize = 0; let stop_i = starts.len() - 1; @@ -5836,7 +5836,7 @@ fn slice, impl TCopy: Copy, impl TDrop: Drop< }; let steps = match steps { Option::Some(steps) => steps, - Option::None(_) => { + Option::None => { let mut ret: Array = ArrayTrait::new(); let mut i: usize = 0; let stop_i = starts.len() - 1; @@ -5867,7 +5867,7 @@ fn slice, impl TCopy: Copy, impl TDrop: Drop< Option::Some(ele) => { let (axis_index, is_found) = match axes.index_of(i) { Option::Some(axis_index) => (axis_index, true), - Option::None(_) => (0, false), + Option::None => (0, false), }; let mut processed_params = (0, 0, 0, 0); @@ -5915,7 +5915,7 @@ fn slice, impl TCopy: Copy, impl TDrop: Drop< i += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -5939,7 +5939,7 @@ fn slice, impl TCopy: Copy, impl TDrop: Drop< let mut steps = processed_steps.span(); loop { match shape.pop_front() { - Option::Some(item) => { + Option::Some => { let start = *starts.pop_front().unwrap(); let end = *ends.pop_front().unwrap(); let step = *steps.pop_front().unwrap(); @@ -5956,7 +5956,7 @@ fn slice, impl TCopy: Copy, impl TDrop: Drop< break (); } }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -5966,7 +5966,7 @@ fn slice, impl TCopy: Copy, impl TDrop: Drop< j += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -5999,17 +5999,17 @@ fn nonzero< let mut self_shape_copy = *self.shape; loop { match self_shape_copy.pop_front() { - Option::Some(val) => { + Option::Some => { indexes_of_dimensions.append(*indices.at(i)); i += 1; }, - Option::None(_) => { break (); } + Option::None => { break (); } }; }; } j += 1; }, - Option::None(_) => { break (); } + Option::None => { break (); } }; }; @@ -6028,7 +6028,7 @@ fn nonzero< loop { match self_shape_copy.pop_front() { - Option::Some(val) => { + Option::Some => { let mut k: usize = 0; loop { @@ -6041,7 +6041,7 @@ fn nonzero< }; i += 1; }, - Option::None(_) => { break (); } + Option::None => { break (); } }; }; @@ -6088,17 +6088,17 @@ fn squeeze(self: @Tensor, axes: Option>) -> Tensor { reshape.append(*shape); } }, - Option::None(_) => { break; }, + Option::None => { break; }, }; index += 1; }; shape = reshape.span(); }, - Option::None(_) => { break shape; }, + Option::None => { break shape; }, }; } }, - Option::None(_) => { + Option::None => { let mut reshape: Array = ArrayTrait::new(); let mut shape = *self.shape; loop { @@ -6106,7 +6106,7 @@ fn squeeze(self: @Tensor, axes: Option>) -> Tensor { Option::Some(shape) => { if *shape != 1 { reshape.append(*shape); } }, - Option::None(_) => { break reshape.span(); }, + Option::None => { break reshape.span(); }, }; } }, @@ -6133,7 +6133,7 @@ fn unsqueeze(self: @Tensor, axes: Span) -> Tensor { output_shape.append(*val); i += 1; }, - Option::None(_) => { break (); } + Option::None => { break (); } }; }; }; @@ -6178,7 +6178,7 @@ fn sign< }; sign_data_array.append(sign_data); }, - Option::None(_) => { + Option::None => { break Tensor:: { shape: *self.shape, data: sign_data_array.span() }; } }; @@ -6199,11 +6199,11 @@ fn clip< ) -> Tensor { let min = match min { Option::Some(min) => min, - Option::None(_) => { NumberTrait::min_value() }, + Option::None => { NumberTrait::min_value() }, }; let max = match max { Option::Some(max) => max, - Option::None(_) => { NumberTrait::max_value() }, + Option::None => { NumberTrait::max_value() }, }; let mut return_data: Array = ArrayTrait::new(); @@ -6220,7 +6220,7 @@ fn clip< return_data.append(*val); } }, - Option::None(_) => { break (); } + Option::None => { break (); } }; }; diff --git a/src/operators/tensor/helpers.cairo b/src/operators/tensor/helpers.cairo index ea29aa110..caa8d3b21 100644 --- a/src/operators/tensor/helpers.cairo +++ b/src/operators/tensor/helpers.cairo @@ -23,7 +23,7 @@ fn len_from_shape(mut shape: Span) -> usize { loop { match shape.pop_front() { Option::Some(item) => { result *= *item; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -63,7 +63,7 @@ fn check_compatibility(mut shape_1: Span, mut shape_2: Span) { 'tensors shape must match' ); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; } @@ -94,7 +94,7 @@ fn broadcast_index_mapping(mut shape: Span, mut indices: Span) -> let index = (indices_val % *shape_val) * stride_val; result += index; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -133,7 +133,7 @@ fn reduce_output_shape(mut input_shape: Span, axis: usize, keepdims: bool n += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -163,7 +163,7 @@ fn permutation_output_shape(input_shape: Span, mut axes: Span) -> loop { match axes.pop_front() { Option::Some(item) => { output_shape.append(*input_shape[*item]); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -233,7 +233,7 @@ fn find_axis(mut axes: Span, target_axis: usize) -> usize { } axis += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -262,14 +262,14 @@ fn broadcast_shape(mut shape1: Span, mut shape2: Span) -> Span { dim1 = *item; }, - Option::None(_) => { if shape1.len() == 0 && shape2.len() == 0 { + Option::None => { if shape1.len() == 0 && shape2.len() == 0 { break (); }; } }; match shape2.pop_front() { Option::Some(item) => { dim2 = *item; }, - Option::None(_) => { if shape1.len() == 0 && shape2.len() == 0 { + Option::None => { if shape1.len() == 0 && shape2.len() == 0 { break (); }; } }; @@ -310,7 +310,7 @@ fn replace_index(mut shape: Span, index: usize, value: usize) -> Span { break; } + Option::None => { break; } }; }; @@ -512,14 +512,14 @@ fn optional_has_element, +Drop, +TensorTrait,>( x: Option> ) -> Tensor { match x{ - Option::Some(ele) => { + Option::Some => { let mut shape = ArrayTrait::::new(); shape.append(1); let mut data = ArrayTrait::::new(); data.append(true); TensorTrait::new(shape.span(), data.span()) }, - Option::None(_) => { + Option::None => { let mut shape = ArrayTrait::::new(); shape.append(1); let mut data = ArrayTrait::::new(); @@ -548,7 +548,7 @@ fn optional_get_element, +Drop, +TensorTrait,>( Option::Some(ele) => { ele }, - Option::None(_) => { + Option::None => { panic(array!['The input is an empty', 'optional-type.']) } } diff --git a/src/operators/tensor/linalg/matmul.cairo b/src/operators/tensor/linalg/matmul.cairo index 435f258a8..5be41efa5 100644 --- a/src/operators/tensor/linalg/matmul.cairo +++ b/src/operators/tensor/linalg/matmul.cairo @@ -78,7 +78,7 @@ fn dot_product< let element_product = *vec1_item * *vec2.pop_front().unwrap(); result += element_product; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -185,7 +185,7 @@ fn prepare_shape_for_matmul(mut shape: Span, is_first_tensor: bool) -> Sp loop { match shape.pop_front() { Option::Some(item) => { shape_adjusted.append(*item); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -197,7 +197,7 @@ fn prepare_shape_for_matmul(mut shape: Span, is_first_tensor: bool) -> Sp loop { match shape.pop_front() { Option::Some(item) => { shape_adjusted.append(*item); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/linalg/trilu.cairo b/src/operators/tensor/linalg/trilu.cairo index 0c2ef1bdb..08bfdcc98 100644 --- a/src/operators/tensor/linalg/trilu.cairo +++ b/src/operators/tensor/linalg/trilu.cairo @@ -41,7 +41,7 @@ fn trilu< i += 1; output_size.append(*val); }, - Option::None(_) => { break (); } + Option::None => { break (); } } } } @@ -79,7 +79,7 @@ fn trilu< NumberTrait::zero() } }, - Option::None(_) => { break (); } + Option::None => { break (); } }; output_data.append(result); diff --git a/src/operators/tensor/manipulation/reverse_sequence.cairo b/src/operators/tensor/manipulation/reverse_sequence.cairo index e59485ffd..efec92399 100644 --- a/src/operators/tensor/manipulation/reverse_sequence.cairo +++ b/src/operators/tensor/manipulation/reverse_sequence.cairo @@ -44,7 +44,7 @@ fn reverse_sequence< Option::Some(ele) => { data.append(*((*self).data).at(ele)); }, - Option::None(_) => { + Option::None => { break; } } diff --git a/src/operators/tensor/manipulation/split.cairo b/src/operators/tensor/manipulation/split.cairo index 495fa2ecc..3919c034f 100644 --- a/src/operators/tensor/manipulation/split.cairo +++ b/src/operators/tensor/manipulation/split.cairo @@ -13,11 +13,11 @@ fn split< self: @Tensor, axis: usize, num_outputs: Option, split: Option> ) -> Array> { let has_num_outputs = match num_outputs { - Option::Some(value) => { true }, + Option::Some => { true }, Option::None => false, }; let has_split = match split { - Option::Some(value) => { true }, + Option::Some => { true }, Option::None => false, }; assert(!(has_num_outputs && has_split), 'split or num_outputs not both.'); @@ -73,7 +73,7 @@ fn split_num_outputs, +Drop, +TensorTrait,>( Option::Some(split_last_one) => { split.append(split_last_one + *(*t).shape.at(axis) - div * (num_outputs - 1)); }, - Option::None(_) => { assert(false, 'split is none array'); } + Option::None => { assert(false, 'split is none array'); } } } @@ -101,14 +101,14 @@ fn split_num_outputs, +Drop, +TensorTrait,>( let end_ele_0 = match sli.get(axis, 0) { Option::Some(res) => { res }, - Option::None(_) => { + Option::None => { assert(false, 'Get end_ele_0 is failed'); 0 }, }; let end_ele_1 = match sli.get(axis, 1) { Option::Some(res) => { res }, - Option::None(_) => { + Option::None => { assert(false, 'Get end_ele_0 is failed'); 0 }, @@ -154,14 +154,14 @@ fn split_has_split, +Drop, +TensorTrait,>( let end_ele_0 = match sli.get(axis, 0) { Option::Some(res) => { res }, - Option::None(_) => { + Option::None => { assert(false, 'Get end_ele_0 is failed'); 0 }, }; let end_ele_1 = match sli.get(axis, 1) { Option::Some(res) => { res }, - Option::None(_) => { + Option::None => { assert(false, 'Get end_ele_0 is failed'); 0 }, diff --git a/src/operators/tensor/manipulation/split_to_sequence.cairo b/src/operators/tensor/manipulation/split_to_sequence.cairo index 53f0f07bc..7ff3ff8db 100644 --- a/src/operators/tensor/manipulation/split_to_sequence.cairo +++ b/src/operators/tensor/manipulation/split_to_sequence.cairo @@ -13,7 +13,7 @@ fn split_to_sequence< self: @Tensor, axis: usize, keepdims: usize, split: Option> ) -> Array> { let has_split = match split { - Option::Some(value) => { true }, + Option::Some => { true }, Option::None => false, }; let mut has_num_outputs = false; @@ -121,7 +121,7 @@ fn split_num_outputs< Option::Some(split_last_one) => { split.append(split_last_one + *(*t).shape.at(axis) - div * (num_outputs - 1)); }, - Option::None(_) => { assert(false, 'split is none array'); } + Option::None => { assert(false, 'split is none array'); } } } @@ -149,14 +149,14 @@ fn split_num_outputs< let end_ele_0 = match sli.get(axis, 0) { Option::Some(res) => { res }, - Option::None(_) => { + Option::None => { assert(false, 'Get end_ele_0 is failed'); 0 }, }; let end_ele_1 = match sli.get(axis, 1) { Option::Some(res) => { res }, - Option::None(_) => { + Option::None => { assert(false, 'Get end_ele_0 is failed'); 0 }, @@ -207,14 +207,14 @@ fn split_has_split< let end_ele_0 = match sli.get(axis, 0) { Option::Some(res) => { res }, - Option::None(_) => { + Option::None => { assert(false, 'Get end_ele_0 is failed'); 0 }, }; let end_ele_1 = match sli.get(axis, 1) { Option::Some(res) => { res }, - Option::None(_) => { + Option::None => { assert(false, 'Get end_ele_0 is failed'); 0 }, diff --git a/src/operators/tensor/math/abs.cairo b/src/operators/tensor/math/abs.cairo index a7d8956ac..129e05b40 100644 --- a/src/operators/tensor/math/abs.cairo +++ b/src/operators/tensor/math/abs.cairo @@ -20,7 +20,7 @@ fn abs< loop { match z.data.pop_front() { Option::Some(item) => { data_result.append((*item).abs()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/acos.cairo b/src/operators/tensor/math/acos.cairo index c36260752..477f11450 100644 --- a/src/operators/tensor/math/acos.cairo +++ b/src/operators/tensor/math/acos.cairo @@ -21,7 +21,7 @@ fn acos< loop { match self.data.pop_front() { Option::Some(item) => { result.append((*item).acos()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/acosh.cairo b/src/operators/tensor/math/acosh.cairo index f486d5609..c9d159ca0 100644 --- a/src/operators/tensor/math/acosh.cairo +++ b/src/operators/tensor/math/acosh.cairo @@ -23,7 +23,7 @@ fn acosh< loop { match self.data.pop_front() { Option::Some(item) => { result.append((*item).acosh()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/argmax.cairo b/src/operators/tensor/math/argmax.cairo index 52671766a..d4b54f9ae 100644 --- a/src/operators/tensor/math/argmax.cairo +++ b/src/operators/tensor/math/argmax.cairo @@ -22,12 +22,12 @@ fn argmax< ) -> Tensor { let keepdims = match keepdims { Option::Some(val) => val, - Option::None(_) => true, + Option::None => true, }; let select_last_index = match select_last_index { Option::Some(val) => val, - Option::None(_) => false, + Option::None => false, }; assert(axis <= (*self.shape).len(), 'axis out of dimensions'); @@ -88,7 +88,7 @@ fn find_argmax_1D< let mut max = match input.data.pop_front() { Option::Some(item) => *item, - Option::None(_) => { + Option::None => { return TensorTrait::< usize >::new(reduce_output_shape(input.shape, axis, keepdims), output_data.span()); @@ -111,7 +111,7 @@ fn find_argmax_1D< } }; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/argmin.cairo b/src/operators/tensor/math/argmin.cairo index 77eef9a99..51502fd52 100644 --- a/src/operators/tensor/math/argmin.cairo +++ b/src/operators/tensor/math/argmin.cairo @@ -22,12 +22,12 @@ fn argmin< ) -> Tensor { let keepdims = match keepdims { Option::Some(val) => val, - Option::None(_) => true, + Option::None => true, }; let select_last_index = match select_last_index { Option::Some(val) => val, - Option::None(_) => false, + Option::None => false, }; assert(axis <= (*self.shape).len(), 'axis out of dimensions'); @@ -89,7 +89,7 @@ fn find_argmin_1D< let mut min = match input.data.pop_front() { Option::Some(item) => *item, - Option::None(_) => { + Option::None => { return TensorTrait::< usize >::new(reduce_output_shape(input.shape, axis, keepdims), output_data.span()); @@ -112,7 +112,7 @@ fn find_argmin_1D< } }; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/arithmetic.cairo b/src/operators/tensor/math/arithmetic.cairo index 89f4869bd..fdbbb7863 100644 --- a/src/operators/tensor/math/arithmetic.cairo +++ b/src/operators/tensor/math/arithmetic.cairo @@ -59,7 +59,7 @@ fn add_by_scalar< loop { match input_data.pop_front() { Option::Some(ele) => { data_result.append(*ele + val); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -161,7 +161,7 @@ fn sub_by_scalar< loop { match input_data.pop_front() { Option::Some(ele) => { data_result.append(*ele - val); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -263,7 +263,7 @@ fn mul_by_scalar< loop { match input_data.pop_front() { Option::Some(ele) => { data_result.append(*ele * val); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -365,7 +365,7 @@ fn div_by_scalar< loop { match input_data.pop_front() { Option::Some(ele) => { data_result.append(*ele / val); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/asin.cairo b/src/operators/tensor/math/asin.cairo index b33132797..60c440d8d 100644 --- a/src/operators/tensor/math/asin.cairo +++ b/src/operators/tensor/math/asin.cairo @@ -22,7 +22,7 @@ fn asin< loop { match self.data.pop_front() { Option::Some(item) => { result.append((*item).asin()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/asinh.cairo b/src/operators/tensor/math/asinh.cairo index 8d015554d..b94efa9a4 100644 --- a/src/operators/tensor/math/asinh.cairo +++ b/src/operators/tensor/math/asinh.cairo @@ -24,7 +24,7 @@ fn asinh< loop { match self.data.pop_front() { Option::Some(item) => { result.append((*item).asinh()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/atan.cairo b/src/operators/tensor/math/atan.cairo index b5f93eb1c..f08271c0c 100644 --- a/src/operators/tensor/math/atan.cairo +++ b/src/operators/tensor/math/atan.cairo @@ -23,7 +23,7 @@ fn atan< loop { match self.data.pop_front() { Option::Some(item) => { result.append((*item).atan()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/binarizer.cairo b/src/operators/tensor/math/binarizer.cairo index bc42e7ac4..0a02bc91b 100644 --- a/src/operators/tensor/math/binarizer.cairo +++ b/src/operators/tensor/math/binarizer.cairo @@ -34,7 +34,7 @@ fn binarizer< binarized_data.append(NumberTrait::zero()); } }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/ceil.cairo b/src/operators/tensor/math/ceil.cairo index 73182f09a..b6448b11d 100644 --- a/src/operators/tensor/math/ceil.cairo +++ b/src/operators/tensor/math/ceil.cairo @@ -21,7 +21,7 @@ fn ceil< loop { match z.data.pop_front() { Option::Some(item) => { data_result.append((*item).ceil()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/compress.cairo b/src/operators/tensor/math/compress.cairo index d22eb1d82..80a4f7648 100644 --- a/src/operators/tensor/math/compress.cairo +++ b/src/operators/tensor/math/compress.cairo @@ -19,7 +19,7 @@ fn compress, impl TCopy: Copy, impl TDro ) -> Tensor { let axis = match axis { Option::Some(val) => val, - Option::None(_) => 999 + Option::None => 999 }; let data_rank = (*self.shape).len(); @@ -28,7 +28,6 @@ fn compress, impl TCopy: Copy, impl TDro assert((condition_rank == 1), 'condition rank must be 1'); let mut data_shape = *self.shape; - let mut condition_shape = condition.shape; if (axis != 999) { assert(*data_shape.at(axis) >= condition.data.len(), 'index out of bound'); @@ -38,7 +37,6 @@ fn compress, impl TCopy: Copy, impl TDro let mut index_data = ArrayTrait::new(); let mut output_data = ArrayTrait::new(); - let mut data = *self.data; let mut condition_data = condition.data; let mut ind = 0; @@ -52,7 +50,7 @@ fn compress, impl TCopy: Copy, impl TDro } ind += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -63,7 +61,7 @@ fn compress, impl TCopy: Copy, impl TDro loop { match data_shape.pop_front() { Option::Some(val) => { total_shape *= *val; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -79,7 +77,7 @@ fn compress, impl TCopy: Copy, impl TDro } ind += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; } else { @@ -108,15 +106,13 @@ fn compress, impl TCopy: Copy, impl TDro } ind += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; let mut ind = 0; - let mut ind_loop = 0; let mut inner_index: usize = 0; - let mut condition_data_clone = condition_data.clone(); loop { if (ind == other_loop_breaker) { @@ -141,7 +137,7 @@ fn compress, impl TCopy: Copy, impl TDro } inner_index += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -151,7 +147,7 @@ fn compress, impl TCopy: Copy, impl TDro loop { match index_data.pop_front() { Option::Some(val) => { output_data.append(*self.data[val]); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; } diff --git a/src/operators/tensor/math/concat.cairo b/src/operators/tensor/math/concat.cairo index 77ed9dac3..1826d8d69 100644 --- a/src/operators/tensor/math/concat.cairo +++ b/src/operators/tensor/math/concat.cairo @@ -47,11 +47,11 @@ fn validate_shapes(mut tensors: Span>, mut base_shape: Span, } axis_index += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; } @@ -65,7 +65,7 @@ fn compute_output_size( loop { match tensors.pop_front() { Option::Some(tensor) => { axis_size += *(*tensor.shape).at(axis); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -80,7 +80,7 @@ fn compute_output_size( } shape_index += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -117,7 +117,7 @@ fn concatenate_data, impl TDrop: Drop,>( inner_index += 1; }; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -141,7 +141,7 @@ fn product_upto(mut shape: Span, upto: usize) -> usize { total *= *val; index += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/cos.cairo b/src/operators/tensor/math/cos.cairo index 5abeb327d..943b6528b 100644 --- a/src/operators/tensor/math/cos.cairo +++ b/src/operators/tensor/math/cos.cairo @@ -24,7 +24,7 @@ fn cos< loop { match self.data.pop_front() { Option::Some(item) => { result.append((*item).cos()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/cosh.cairo b/src/operators/tensor/math/cosh.cairo index 08c434f5a..df8a7b40c 100644 --- a/src/operators/tensor/math/cosh.cairo +++ b/src/operators/tensor/math/cosh.cairo @@ -24,7 +24,7 @@ fn cosh< loop { match self.data.pop_front() { Option::Some(item) => { result.append((*item).cosh()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/cumsum.cairo b/src/operators/tensor/math/cumsum.cairo index 1ffe51821..99aea3156 100644 --- a/src/operators/tensor/math/cumsum.cairo +++ b/src/operators/tensor/math/cumsum.cairo @@ -23,7 +23,7 @@ fn cumsum< ) -> Tensor { let reverse = match reverse { Option::Some(val) => val, - Option::None(_) => false + Option::None => false }; if reverse { @@ -45,7 +45,7 @@ fn cumsum_forward< ) -> Tensor { let exclusive = match exclusive { Option::Some(val) => val, - Option::None(_) => false, + Option::None => false, }; assert(axis < (*self.shape).len(), 'axis out of dimensions'); @@ -108,7 +108,7 @@ fn cumsum_reverse< ) -> Tensor { let exclusive = match exclusive { Option::Some(val) => val, - Option::None(_) => false, + Option::None => false, }; assert(axis < (*self.shape).len(), 'axis out of dimensions'); @@ -153,7 +153,6 @@ fn cumsum_reverse< let previous_axis_element_index = ravel_index( *self.shape, previous_axis_element_indices ); - let mut z = *(data)[previous_axis_element_index]; if exclusive { output_data.append(*output_data[previous_axis_element_index] - *(data)[index]); diff --git a/src/operators/tensor/math/erf.cairo b/src/operators/tensor/math/erf.cairo index 44a755c15..8cc8ab055 100644 --- a/src/operators/tensor/math/erf.cairo +++ b/src/operators/tensor/math/erf.cairo @@ -23,7 +23,7 @@ fn erf< loop { match z.data.pop_front() { Option::Some(item) => { data_result.append((*item).erf()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/exp.cairo b/src/operators/tensor/math/exp.cairo index 889082d56..0c1700abf 100644 --- a/src/operators/tensor/math/exp.cairo +++ b/src/operators/tensor/math/exp.cairo @@ -24,7 +24,7 @@ fn exp< loop { match self.data.pop_front() { Option::Some(item) => { result.append((*item).exp()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -54,7 +54,7 @@ fn exp_upcast< loop { match self.data.pop_front() { Option::Some(item) => { result.append((TIntoW::into(*item)).exp()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/flatten.cairo b/src/operators/tensor/math/flatten.cairo index 7b0df7457..d8e5b5583 100644 --- a/src/operators/tensor/math/flatten.cairo +++ b/src/operators/tensor/math/flatten.cairo @@ -21,7 +21,7 @@ fn flatten>(self: @Tensor, axis: usize) new_shape_first_axis *= *val; index += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/gather.cairo b/src/operators/tensor/math/gather.cairo index fab9a3dc8..93662868b 100644 --- a/src/operators/tensor/math/gather.cairo +++ b/src/operators/tensor/math/gather.cairo @@ -18,7 +18,7 @@ fn gather, impl TCopy: Copy, impl TDrop: ) -> Tensor { let axis = match axis { Option::Some(val) => val, - Option::None(_) => 0 + Option::None => 0 }; assert(axis < (*self.shape).len(), 'axis out of dimensions'); @@ -38,7 +38,7 @@ fn gather, impl TCopy: Copy, impl TDrop: loop { match indices_shape.pop_front() { Option::Some(item) => { output_size.append(*item); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; } else { @@ -46,7 +46,7 @@ fn gather, impl TCopy: Copy, impl TDrop: } i += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -66,7 +66,7 @@ fn gather, impl TCopy: Copy, impl TDrop: divisor /= *val; i += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -80,7 +80,7 @@ fn gather, impl TCopy: Copy, impl TDrop: } break_loop *= *val; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -108,7 +108,7 @@ fn gather, impl TCopy: Copy, impl TDrop: inner_loop += 1; } }, - Option::None(_) => { break; }, + Option::None => { break; }, }; }; diff --git a/src/operators/tensor/math/gather_elements.cairo b/src/operators/tensor/math/gather_elements.cairo index 5032eb2b9..f34e3e6b3 100644 --- a/src/operators/tensor/math/gather_elements.cairo +++ b/src/operators/tensor/math/gather_elements.cairo @@ -19,7 +19,7 @@ fn gather_elements, impl TCopy: Copy, im ) -> Tensor { let axis = match axis { Option::Some(val) => val, - Option::None(_) => 0 + Option::None => 0 }; assert(axis < (*self.shape).len(), 'axis out of dimensions'); @@ -44,7 +44,7 @@ fn gather_elements, impl TCopy: Copy, im } ind += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -63,7 +63,7 @@ fn gather_elements, impl TCopy: Copy, im } ind += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -84,7 +84,7 @@ fn gather_elements, impl TCopy: Copy, im } ind += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -109,7 +109,7 @@ fn gather_elements, impl TCopy: Copy, im } i += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/gather_nd.cairo b/src/operators/tensor/math/gather_nd.cairo index 737a4fe32..5d6c75ce1 100644 --- a/src/operators/tensor/math/gather_nd.cairo +++ b/src/operators/tensor/math/gather_nd.cairo @@ -19,7 +19,7 @@ fn gather_nd, impl TCopy: Copy, impl TDr ) -> Tensor { let batch_dims = match batch_dims { Option::Some(val) => val, - Option::None(_) => 0 + Option::None => 0 }; let data_rank = (*self.shape).len(); @@ -57,14 +57,14 @@ fn gather_nd, impl TCopy: Copy, impl TDr batch_dims_shape.append(*val); ind += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; loop { match indices_shape_clone.pop_front() { Option::Some(val) => { batch_dims_shape.append(*val); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -72,7 +72,6 @@ fn gather_nd, impl TCopy: Copy, impl TDr output_shape = batch_dims_shape; } else { let mut ind = 0; - let mut multiple = 1; output_shape = batch_dims_shape; loop { match data_shape_clone.pop_front() { @@ -82,7 +81,7 @@ fn gather_nd, impl TCopy: Copy, impl TDr } ind += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; } @@ -104,7 +103,7 @@ fn gather_nd, impl TCopy: Copy, impl TDr } ind += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -119,7 +118,7 @@ fn gather_nd, impl TCopy: Copy, impl TDr } ind += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -147,14 +146,14 @@ fn gather_nd, impl TCopy: Copy, impl TDr result = 0; }; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; loop { match index_data.pop_front() { Option::Some(val) => { output_data.append(*self.data[val]); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/is_inf.cairo b/src/operators/tensor/math/is_inf.cairo index 69d6b252a..d3a5f8f4f 100644 --- a/src/operators/tensor/math/is_inf.cairo +++ b/src/operators/tensor/math/is_inf.cairo @@ -52,7 +52,7 @@ fn is_inf< loop { match y.pop_front() { Option::Some(item) => { data_result.append((*item).is_inf()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -75,7 +75,7 @@ fn is_pos_inf< loop { match y.pop_front() { Option::Some(item) => { data_result.append((*item).is_pos_inf()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -98,7 +98,7 @@ fn is_neg_inf< loop { match y.pop_front() { Option::Some(item) => { data_result.append((*item).is_neg_inf()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/is_nan.cairo b/src/operators/tensor/math/is_nan.cairo index 9d81d79c6..817cf5f4d 100644 --- a/src/operators/tensor/math/is_nan.cairo +++ b/src/operators/tensor/math/is_nan.cairo @@ -22,7 +22,7 @@ fn is_nan< loop { match y.pop_front() { Option::Some(item) => { data_result.append((*item).is_nan()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/layer_normalization.cairo b/src/operators/tensor/math/layer_normalization.cairo index bb0d9579b..1417b7e2b 100644 --- a/src/operators/tensor/math/layer_normalization.cairo +++ b/src/operators/tensor/math/layer_normalization.cairo @@ -35,7 +35,6 @@ fn layer_normalization< stash_type: Option, ) -> (Tensor, Tensor, Tensor) { let X_rank = (*self).shape.len(); - let X_shape = (*self).shape; let mut axis = match axis { Option::Some(axis) => axis, @@ -46,11 +45,6 @@ fn layer_normalization< Option::None => NumberTrait::zero(), // default of onnx is 1e-05 }; - let stash_type = match stash_type { - Option::Some(stash_type) => stash_type, - Option::None => 1, - }; - let axis = if axis < 0 { X_rank - axis.into() } else { diff --git a/src/operators/tensor/math/log.cairo b/src/operators/tensor/math/log.cairo index e55291fca..fa153c61b 100644 --- a/src/operators/tensor/math/log.cairo +++ b/src/operators/tensor/math/log.cairo @@ -24,7 +24,7 @@ fn log< loop { match self.data.pop_front() { Option::Some(item) => { result.append((*item).ln()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/max_in_tensor.cairo b/src/operators/tensor/math/max_in_tensor.cairo index 318138ee2..f1aabdafb 100644 --- a/src/operators/tensor/math/max_in_tensor.cairo +++ b/src/operators/tensor/math/max_in_tensor.cairo @@ -24,7 +24,7 @@ fn max_in_tensor< max_value = check_max; } }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/min_in_tensor.cairo b/src/operators/tensor/math/min_in_tensor.cairo index dc02c49dc..efa4356e5 100644 --- a/src/operators/tensor/math/min_in_tensor.cairo +++ b/src/operators/tensor/math/min_in_tensor.cairo @@ -24,7 +24,7 @@ fn min_in_tensor< min_value = check_min; } }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/neg.cairo b/src/operators/tensor/math/neg.cairo index e89b5a42a..0eaa8b3da 100644 --- a/src/operators/tensor/math/neg.cairo +++ b/src/operators/tensor/math/neg.cairo @@ -20,7 +20,7 @@ fn neg< loop { match z.data.pop_front() { Option::Some(item) => { data_result.append((*item).neg()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/not.cairo b/src/operators/tensor/math/not.cairo index 2dd074c51..93e25c525 100644 --- a/src/operators/tensor/math/not.cairo +++ b/src/operators/tensor/math/not.cairo @@ -14,7 +14,7 @@ fn not(mut z: Tensor) -> Tensor { loop { match z.data.pop_front() { Option::Some(item) => { data_result.append((!*item)); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/onehot.cairo b/src/operators/tensor/math/onehot.cairo index 37b533262..bad9c9ef0 100644 --- a/src/operators/tensor/math/onehot.cairo +++ b/src/operators/tensor/math/onehot.cairo @@ -35,13 +35,11 @@ fn onehot_encode< // using 999 to denote -1, innermost dimension let axis = match axis { Option::Some(val) => val, - Option::None(_) => 999 + Option::None => 999 }; assert(((axis == 999) | (axis.into() <= rank)), 'axis out of dimensions'); - let tensor_len: usize = data.len(); - let mut output_data = ArrayTrait::new(); let mut output_size = ArrayTrait::::new(); @@ -49,7 +47,7 @@ fn onehot_encode< loop { match shape.pop_front() { Option::Some(size) => { output_size.append(*size); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; output_size.append(depth.into()); @@ -84,7 +82,7 @@ fn onehot_encode< inner_index += 1; }; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -106,7 +104,6 @@ fn onehot_encode< index += 1; }; - let mut index: usize = 0; output_tensor = output_tensor.transpose(tranpose_axes.span()); } diff --git a/src/operators/tensor/math/optional_get_element.cairo b/src/operators/tensor/math/optional_get_element.cairo index b8d508d1c..3af112f85 100644 --- a/src/operators/tensor/math/optional_get_element.cairo +++ b/src/operators/tensor/math/optional_get_element.cairo @@ -21,7 +21,7 @@ fn optional_get_element< // use of match to get element within and out the array bound match z.data.get(index) { Option::Some(item) => { data_result.append((*item.unbox())); }, - Option::None(_) => {} + Option::None => {} }; return TensorTrait::::new(z.shape, data_result.span()); diff --git a/src/operators/tensor/math/random_uniform_like.cairo b/src/operators/tensor/math/random_uniform_like.cairo index bf9e5f148..0b9c06cda 100644 --- a/src/operators/tensor/math/random_uniform_like.cairo +++ b/src/operators/tensor/math/random_uniform_like.cairo @@ -34,15 +34,15 @@ fn random_uniform_like< let mut seed: usize = match seed { Option::Some(seed) => seed, - Option::None(_) => NumberTrait::max_value(), + Option::None => NumberTrait::max_value(), }; let mut high = match high { Option::Some(high) => high, - Option::None(_) => NumberTrait::one(), + Option::None => NumberTrait::one(), }; let mut low = match low { Option::Some(low) => low, - Option::None(_) => NumberTrait::zero(), + Option::None => NumberTrait::zero(), }; assert!(high > low, "high must be larger than low"); let res = tensor_get_state(tensor,seed,high,low); @@ -71,12 +71,6 @@ fn tensor_get_state< let mut data = ArrayTrait::new(); let mut count = (tensor.data).len(); let mut i = 0; - let one: T = NumberTrait::one(); - - let half: T = NumberTrait::half(); - let two: T = one + one; - let three: T = two + one; - let max: T = NumberTrait::max_value(); loop { if count == i { diff --git a/src/operators/tensor/math/reduce_l2.cairo b/src/operators/tensor/math/reduce_l2.cairo index e633e28d6..8bb5bc888 100644 --- a/src/operators/tensor/math/reduce_l2.cairo +++ b/src/operators/tensor/math/reduce_l2.cairo @@ -27,7 +27,7 @@ fn square< let ele = *item; output_data.append(ele * ele); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/reduce_mean.cairo b/src/operators/tensor/math/reduce_mean.cairo index 0024e4680..87cbd49cc 100644 --- a/src/operators/tensor/math/reduce_mean.cairo +++ b/src/operators/tensor/math/reduce_mean.cairo @@ -35,7 +35,7 @@ fn reduce_mean< ) -> Tensor { let noop_with_empty_axes = match noop_with_empty_axes { Option::Some(noop_with_empty_axes) => noop_with_empty_axes, - Option::None(_) => { false }, + Option::None => { false }, }; let axes = match axes { Option::Some(axes) => { @@ -48,14 +48,14 @@ fn reduce_mean< loop { match copy_axes.pop_front() { Option::Some(axis) => { axes_arr.append(*axis); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; let sorted_axes = bubble_sort::bubble_sort_elements(axes_arr).span(); sorted_axes } }, - Option::None(_) => { + Option::None => { if (noop_with_empty_axes == true) { return *self; } @@ -64,7 +64,7 @@ fn reduce_mean< }; let keepdims = match keepdims { Option::Some(keepdims) => keepdims, - Option::None(_) => { true }, + Option::None => { true }, }; let mut axis_c = 0; @@ -99,7 +99,7 @@ fn reduce_mean< data = temp_data.span(); axis_c += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -109,7 +109,7 @@ fn reduce_mean< loop { match axes_copy.pop_front() { Option::Some(axis) => { shape = reduce_output_shape(shape, *axis, true); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; return TensorTrait::::new(shape, data); @@ -169,7 +169,7 @@ fn accumulate_mean< axis_index += NumberTrait::one(); axis_indexu32 += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; } diff --git a/src/operators/tensor/math/reduce_min.cairo b/src/operators/tensor/math/reduce_min.cairo index 1ee23eed1..3bfb590b8 100644 --- a/src/operators/tensor/math/reduce_min.cairo +++ b/src/operators/tensor/math/reduce_min.cairo @@ -33,7 +33,7 @@ fn reduce_min< ) -> Tensor { let noop_with_empty_axes = match noop_with_empty_axes { Option::Some(noop_with_empty_axes) => noop_with_empty_axes, - Option::None(_) => { false }, + Option::None => { false }, }; let axes = match axes { Option::Some(axes) => { @@ -46,14 +46,14 @@ fn reduce_min< loop { match copy_axes.pop_front() { Option::Some(axis) => { axes_arr.append(*axis); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; let sorted_axes = bubble_sort::bubble_sort_elements(axes_arr).span(); sorted_axes } }, - Option::None(_) => { + Option::None => { if (noop_with_empty_axes == true) { return *self; } @@ -62,7 +62,7 @@ fn reduce_min< }; let keepdims = match keepdims { Option::Some(keepdims) => keepdims, - Option::None(_) => { true }, + Option::None => { true }, }; let mut axis_c = 0; @@ -97,7 +97,7 @@ fn reduce_min< data = temp_data.span(); axis_c += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -107,7 +107,7 @@ fn reduce_min< loop { match axes_copy.pop_front() { Option::Some(axis) => { shape = reduce_output_shape(shape, *axis, true); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; return TensorTrait::::new(shape, data); @@ -165,7 +165,7 @@ fn accumulate_min< Option::Some(item) => { if (*item < min) { min = *item; } }, - Option::None(_) => { break; } + Option::None => { break; } }; }; } diff --git a/src/operators/tensor/math/reduce_prod.cairo b/src/operators/tensor/math/reduce_prod.cairo index 5e1111303..cf66dec97 100644 --- a/src/operators/tensor/math/reduce_prod.cairo +++ b/src/operators/tensor/math/reduce_prod.cairo @@ -149,7 +149,7 @@ fn accumulate_production< loop { match input_data.pop_front() { Option::Some(item) => { acc *= *item; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; } diff --git a/src/operators/tensor/math/reduce_sum.cairo b/src/operators/tensor/math/reduce_sum.cairo index 2dcd7bc0f..ab834136f 100644 --- a/src/operators/tensor/math/reduce_sum.cairo +++ b/src/operators/tensor/math/reduce_sum.cairo @@ -101,7 +101,7 @@ fn accumulate_sum< loop { match input_data.pop_front() { Option::Some(item) => { acc += *item; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; } diff --git a/src/operators/tensor/math/reduce_sum_square.cairo b/src/operators/tensor/math/reduce_sum_square.cairo index 7d1cdcc6e..329b8fb4e 100644 --- a/src/operators/tensor/math/reduce_sum_square.cairo +++ b/src/operators/tensor/math/reduce_sum_square.cairo @@ -28,7 +28,7 @@ fn square< let ele = *item; output_data.append(ele * ele); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/resize.cairo b/src/operators/tensor/math/resize.cairo index ecae7b186..5d10d1875 100644 --- a/src/operators/tensor/math/resize.cairo +++ b/src/operators/tensor/math/resize.cairo @@ -428,7 +428,6 @@ fn interpolate_nd< if i == scale_factors.len() { break; } - let scale: usize = (*scale_factors.at(i)).try_into().unwrap(); let item = *scale_factors.at(i) * NumberTrait::new_unscaled((*(*(data).shape).at(i)).into(), false); @@ -514,7 +513,6 @@ fn cartesian(mut arrays: Span>,) -> Array> { let mut i = 0; let mut size_arrays = ArrayTrait::new(); - let mut m = n; loop { if i == arrays.len() { break; @@ -775,7 +773,6 @@ fn get_row_n, +Copy, +Drop,>( let mut output_data = ArrayTrait::new(); let mut output_shape = ArrayTrait::new(); let mut stride_output = 1; - let mut n: usize = 0; let mut i = 0; loop { @@ -834,7 +831,7 @@ fn interpolate_1d_with_x< ) -> Tensor { let coordinate_transformation_mode = match coordinate_transformation_mode { Option::Some(coordinate_transformation_mode) => coordinate_transformation_mode, - Option::None(_) => { TRANSFORMATION_MODE::HALF_PIXEL }, + Option::None => { TRANSFORMATION_MODE::HALF_PIXEL }, }; let input_width = (*data).data.len(); @@ -886,7 +883,7 @@ fn interpolate_1d_with_x< }; x_ori }, - Option::None(_) => { core::panic_with_felt252('roi cannot be None.') }, + Option::None => { core::panic_with_felt252('roi cannot be None.') }, }; x_ori }, @@ -918,10 +915,10 @@ fn interpolate_1d_with_x< let mut coeffs = match mode { MODE::NEAREST => { let coeffs = match antialias { - Option::Some(antialias) => core::panic_with_felt252( + Option::Some => core::panic_with_felt252( 'antialias not for mode NEAREST' ), - Option::None(_) => { nearest_coeffs(ratio, nearest_mode) }, + Option::None => { nearest_coeffs(ratio, nearest_mode) }, }; coeffs }, @@ -935,16 +932,16 @@ fn interpolate_1d_with_x< }; coeffs }, - Option::None(_) => { linear_coeffs(ratio) }, + Option::None => { linear_coeffs(ratio) }, }; coeffs }, MODE::CUBIC => { let coeffs = match antialias { - Option::Some(antialias) => { + Option::Some => { cubic_coeffs_antialias(ratio, scale_factor, cubic_coeff_a) }, - Option::None(_) => { cubic_coeffs(ratio, cubic_coeff_a) }, + Option::None => { cubic_coeffs(ratio, cubic_coeff_a) }, }; coeffs }, @@ -1092,7 +1089,7 @@ fn get_neighbor_idxes< >( mut x: T, n: usize, limit: usize, ) -> Tensor { - let pad_width: usize = NumberTrait::< + let _pad_width: usize = NumberTrait::< T >::ceil( NumberTrait::new_unscaled(n.into(), false) @@ -1263,12 +1260,11 @@ fn cubic_coeffs< let three = two + NumberTrait::one(); let four = three + NumberTrait::one(); let five = four + NumberTrait::one(); - let five = four + NumberTrait::one(); let eigth = four + four; let A = match A { Option::Some(A) => A, - Option::None(_) => { NumberTrait::neg(three / four) }, + Option::None => { NumberTrait::neg(three / four) }, }; let mut coeffs = ArrayTrait::new(); @@ -1314,9 +1310,6 @@ fn cubic_coeffs_antialias< let two = one + NumberTrait::one(); let three = two + NumberTrait::one(); let four = three + NumberTrait::one(); - let five = four + NumberTrait::one(); - let five = four + NumberTrait::one(); - let eigth = four + four; let scale = NumberTrait::min(scale, NumberTrait::one()); @@ -1326,7 +1319,7 @@ fn cubic_coeffs_antialias< let A = match A { Option::Some(A) => A, - Option::None(_) => { NumberTrait::neg(three / four) }, + Option::None => { NumberTrait::neg(three / four) }, }; let mut coeffs = ArrayTrait::new(); @@ -1418,7 +1411,7 @@ fn nearest_coeffs< ) -> Tensor { let nearest_mode = match nearest_mode { Option::Some(nearest_mode) => { nearest_mode }, - Option::None(_) => { NEAREST_MODE::ROUND_PREFER_FLOOR }, + Option::None => { NEAREST_MODE::ROUND_PREFER_FLOOR }, }; let mut ret = ArrayTrait::new(); diff --git a/src/operators/tensor/math/round.cairo b/src/operators/tensor/math/round.cairo index fafd804d0..5515dad9b 100644 --- a/src/operators/tensor/math/round.cairo +++ b/src/operators/tensor/math/round.cairo @@ -22,7 +22,7 @@ fn round< loop { match self.data.pop_front() { Option::Some(item) => { result.append((*item).round()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/scatter.cairo b/src/operators/tensor/math/scatter.cairo index 565371584..a108ae4e2 100644 --- a/src/operators/tensor/math/scatter.cairo +++ b/src/operators/tensor/math/scatter.cairo @@ -32,12 +32,12 @@ fn scatter< ) -> Tensor { let mut axis = match axis { Option::Some(val) => val, - Option::None(_) => 0 + Option::None => 0 }; let reduction = match reduction { Option::Some(val) => val, - Option::None(_) => 'none' + Option::None => 'none' }; let data_rank = (*self.shape).len(); @@ -65,9 +65,9 @@ fn scatter< let mut data_shape_copy = data_shape; let mut indices_shape_copy = indices_shape; + *data_shape_copy.pop_front().unwrap(); + *indices_shape_copy.pop_front().unwrap(); - let data_loop_first = *data_shape_copy.pop_front().unwrap(); - let indices_loop_first = *indices_shape_copy.pop_front().unwrap(); let mut indices_loop: usize = 1; let mut data_loop: usize = 1; @@ -76,20 +76,18 @@ fn scatter< loop { match indices_shape_copy.pop_front() { Option::Some(val) => { - let d = *val; indices_loop *= *val; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; loop { match data_shape_copy.pop_front() { Option::Some(val) => { - let d = *val; data_loop *= *val; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; } @@ -114,7 +112,6 @@ fn scatter< let mut shift = 0; loop { - let mut i: usize = 0; let mut result: usize = 0; match data_indices.pop_front() { @@ -155,7 +152,7 @@ fn scatter< loop { match span.pop_front() { Option::Some(val) => { arr.append(*val); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; arr.append(total_count); @@ -164,7 +161,7 @@ fn scatter< } total_count += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -199,7 +196,7 @@ fn scatter< loop { match span.pop_front() { Option::Some(val) => { result += *data_updates[*val]; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; output_data.append(result); @@ -209,7 +206,7 @@ fn scatter< loop { match span.pop_front() { Option::Some(val) => { result *= *data_updates[*val]; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; output_data.append(result); @@ -224,7 +221,7 @@ fn scatter< result = holder; } }, - Option::None(_) => { break; } + Option::None => { break; } }; }; output_data.append(result); @@ -239,7 +236,7 @@ fn scatter< result = holder; } }, - Option::None(_) => { break; } + Option::None => { break; } }; }; output_data.append(result); @@ -249,7 +246,7 @@ fn scatter< i += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/scatter_nd.cairo b/src/operators/tensor/math/scatter_nd.cairo index ba8dba1c6..61535f618 100644 --- a/src/operators/tensor/math/scatter_nd.cairo +++ b/src/operators/tensor/math/scatter_nd.cairo @@ -32,12 +32,10 @@ fn scatter_nd< let reduction = match reduction { Option::Some(val) => val, - Option::None(_) => 'none' + Option::None => 'none' }; let data_rank = (*self.shape).len(); - let indices_rank = (indices.shape).len(); - let updates_rank = (updates.shape).len(); let mut data_shape = *self.shape; let mut indices_shape = indices.shape; let updates_shape = updates.shape; @@ -56,7 +54,7 @@ fn scatter_nd< loop { match indices_shape.pop_front() { Option::Some(val) => { batch_dims_shape.append(*val);}, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -68,7 +66,7 @@ fn scatter_nd< batch_dims_shape.append(*val); } }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -78,7 +76,7 @@ fn scatter_nd< Option::Some(val) => { assert(val == *updates_shape[ind], 'must be same'); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -92,7 +90,7 @@ fn scatter_nd< loop { match data_shape_clone.pop_front() { Option::Some(val) => { indexer *= *val;}, - Option::None(_) => { break; } + Option::None => { break; } }; } } @@ -105,7 +103,7 @@ fn scatter_nd< updates_index_dict.insert((*val).into(), dict_ind); dict_ind += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -118,7 +116,6 @@ fn scatter_nd< if (index == num){ break; } - let updates_index = (index/indexer); let comp_index = updates_index_dict.get(index.into()); if (comp_index == 0) { diff --git a/src/operators/tensor/math/shrink.cairo b/src/operators/tensor/math/shrink.cairo index ff1f3cb28..20ed4041f 100644 --- a/src/operators/tensor/math/shrink.cairo +++ b/src/operators/tensor/math/shrink.cairo @@ -44,7 +44,7 @@ fn shrink< data_result.append(NumberTrait::zero()); } }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/sign.cairo b/src/operators/tensor/math/sign.cairo index 5ac509d52..557a96995 100644 --- a/src/operators/tensor/math/sign.cairo +++ b/src/operators/tensor/math/sign.cairo @@ -22,7 +22,7 @@ fn sign< loop { match self.data.pop_front() { Option::Some(item) => { result.append((*item).sign()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/sin.cairo b/src/operators/tensor/math/sin.cairo index cc810eab7..91e5d9949 100644 --- a/src/operators/tensor/math/sin.cairo +++ b/src/operators/tensor/math/sin.cairo @@ -24,7 +24,7 @@ fn sin< loop { match self.data.pop_front() { Option::Some(item) => { result.append((*item).sin()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/sinh.cairo b/src/operators/tensor/math/sinh.cairo index 7c3373288..72caffd21 100644 --- a/src/operators/tensor/math/sinh.cairo +++ b/src/operators/tensor/math/sinh.cairo @@ -24,7 +24,7 @@ fn sinh< loop { match self.data.pop_front() { Option::Some(item) => { result.append((*item).sinh()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/sqrt.cairo b/src/operators/tensor/math/sqrt.cairo index f3111bed9..22ca78d77 100644 --- a/src/operators/tensor/math/sqrt.cairo +++ b/src/operators/tensor/math/sqrt.cairo @@ -23,7 +23,7 @@ fn sqrt< loop { match self.data.pop_front() { Option::Some(item) => { result.append((*item).sqrt()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/tanh.cairo b/src/operators/tensor/math/tanh.cairo index f6f3eb6e1..681f4d8f6 100644 --- a/src/operators/tensor/math/tanh.cairo +++ b/src/operators/tensor/math/tanh.cairo @@ -24,7 +24,7 @@ fn tanh< loop { match self.data.pop_front() { Option::Some(item) => { result.append((*item).tanh()); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/ml/array_feature_extractor.cairo b/src/operators/tensor/ml/array_feature_extractor.cairo index a9f6ff3d3..8605c00ab 100644 --- a/src/operators/tensor/ml/array_feature_extractor.cairo +++ b/src/operators/tensor/ml/array_feature_extractor.cairo @@ -30,8 +30,6 @@ fn process_1D_tensor, impl TCopy: Copy, impl ) -> Tensor { let mut output_data = ArrayTrait::::new(); - let mut indices_counter: usize = 0; - let mut indices_values: Span = indices.data; let self_len = *self.shape.at(0); loop { @@ -41,7 +39,7 @@ fn process_1D_tensor, impl TCopy: Copy, impl let mut current_data_value = *self.data.at(*current_indices_value); output_data.append(current_data_value); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -71,7 +69,7 @@ fn calculate_output_shape< input_shape_counter += 1; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; @@ -114,7 +112,7 @@ fn calculate_output_data, impl TCopy: Copy, i let mut current_data_value = *self.data.at(flat_index); output_data.append(current_data_value); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/quantization/dequantize_linear.cairo b/src/operators/tensor/quantization/dequantize_linear.cairo index f1585cad8..b17c4a2d3 100644 --- a/src/operators/tensor/quantization/dequantize_linear.cairo +++ b/src/operators/tensor/quantization/dequantize_linear.cairo @@ -45,7 +45,6 @@ fn dequantize_per_axis< >( x: @Tensor, x_scale: @Tensor, x_zero_point: @Tensor ) -> Tensor:: { - let mut result_data = ArrayTrait::::new(); (*x - *x_zero_point) * *x_scale } @@ -72,7 +71,7 @@ fn dequantize_element_wise< let dequantized = dequantize(*item, x_scale, x_zero_point); result_data.append(dequantized); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/quantization/qlinear_concat.cairo b/src/operators/tensor/quantization/qlinear_concat.cairo index b5505091c..7d6280202 100644 --- a/src/operators/tensor/quantization/qlinear_concat.cairo +++ b/src/operators/tensor/quantization/qlinear_concat.cairo @@ -133,7 +133,7 @@ fn dequantize_tensors< array .append(dequantize_linear(@(*tensor), @(*scales.at(i)), @(*zero_points.at(i)))); }, - Option::None(_) => { break; } + Option::None => { break; } }; i += 1; }; diff --git a/src/operators/tensor/quantization/qlinear_leakyrelu.cairo b/src/operators/tensor/quantization/qlinear_leakyrelu.cairo index b5b614b4a..4fc0db823 100644 --- a/src/operators/tensor/quantization/qlinear_leakyrelu.cairo +++ b/src/operators/tensor/quantization/qlinear_leakyrelu.cairo @@ -37,7 +37,6 @@ fn qlinear_leakyrelu< let mut dequantized_a = dequantize_linear(@(*a), a_scale, a_zero_point); let mut result_data = ArrayTrait::::new(); - let mut i = 0; loop { match dequantized_a.data.pop_front() { Option::Some(elem) => { @@ -47,7 +46,7 @@ fn qlinear_leakyrelu< result_data.append(*elem); } }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/src/operators/tensor/quantization/qlinear_matmul.cairo b/src/operators/tensor/quantization/qlinear_matmul.cairo index bbb4c174a..03e542945 100644 --- a/src/operators/tensor/quantization/qlinear_matmul.cairo +++ b/src/operators/tensor/quantization/qlinear_matmul.cairo @@ -107,7 +107,7 @@ fn x_shape(ref x_data: Array, mut shape: Span, m: usize, n: usize) } match shape.pop_front() { Option::Some(elem) => { x_data.append(*elem); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; x_data.append(m); @@ -122,7 +122,7 @@ fn stride(mut shape: Span) -> usize { loop { match shape.pop_back() { Option::Some(i) => { accumulated *= *i; }, - Option::None(_) => { break; } + Option::None => { break; } }; }; return accumulated; diff --git a/src/operators/tensor/quantization/quantize_linear.cairo b/src/operators/tensor/quantization/quantize_linear.cairo index 91cc16155..90633516a 100644 --- a/src/operators/tensor/quantization/quantize_linear.cairo +++ b/src/operators/tensor/quantization/quantize_linear.cairo @@ -78,7 +78,7 @@ fn quantize_element_wise< let quantized = quantize(*item, y_scale, y_zero_point, min, max); result_data.append(quantized); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; diff --git a/tests/ml/linear_classifier_test.cairo b/tests/ml/linear_classifier_test.cairo index e0c892328..8dc59afd9 100644 --- a/tests/ml/linear_classifier_test.cairo +++ b/tests/ml/linear_classifier_test.cairo @@ -259,8 +259,6 @@ fn linear_classifier_helper( let classlabels: Span = array![0, 1, 2].span(); let classlabels = Option::Some(classlabels); - let classlabels_strings: Option> = Option::None; - let coefficients: Span = array![ FP16x16 { mag: 38011, sign: true }, FP16x16 { mag: 19005, sign: true }, diff --git a/tests/ml/tree_ensemble_regressor.cairo b/tests/ml/tree_ensemble_regressor.cairo index 2ee505774..5b1aeeb41 100644 --- a/tests/ml/tree_ensemble_regressor.cairo +++ b/tests/ml/tree_ensemble_regressor.cairo @@ -37,8 +37,6 @@ fn test_tree_ensemble_regressor_AVERAGE() { let mut res = TreeEnsembleRegressorTrait::predict(ref regressor, X); - let check = @res.get(1, 0).unwrap().mag; - // ASSERT RES assert( relative_eq(@res.get(0, 0).unwrap(), @FP16x16 { mag: 18904, sign: false }) == true, @@ -61,8 +59,6 @@ fn test_tree_ensemble_regressor_MIN() { let mut res = TreeEnsembleRegressorTrait::predict(ref regressor, X); - let check = @res.get(1, 0).unwrap().mag; - // ASSERT RES assert( relative_eq(@res.get(0, 0).unwrap(), @FP16x16 { mag: 5041, sign: false }) == true, @@ -85,8 +81,6 @@ fn test_tree_ensemble_regressor_MAX() { let mut res = TreeEnsembleRegressorTrait::predict(ref regressor, X); - let check = @res.get(1, 0).unwrap().mag; - // ASSERT RES assert( relative_eq(@res.get(0, 0).unwrap(), @FP16x16 { mag: 32768, sign: false }) == true, diff --git a/tests/numbers/complex_number_test.cairo b/tests/numbers/complex_number_test.cairo index 053bb83ad..b1d45f133 100644 --- a/tests/numbers/complex_number_test.cairo +++ b/tests/numbers/complex_number_test.cairo @@ -236,9 +236,9 @@ fn test_exp() { FixedTrait::new(73786976294838206464, false), FixedTrait::new(774763251095801167872, false) ); // 4 + 42i - let z = ComplexTrait::exp(a); + let _z = ComplexTrait::exp(a); - let z_expected: complex64 = ComplexTrait::new( + let _z_expected: complex64 = ComplexTrait::new( FixedTrait::new(402848450095324460000, true), FixedTrait::new(923082101320478400000, true) ); } diff --git a/tests/operators/qlinear_concat_test.cairo b/tests/operators/qlinear_concat_test.cairo index 4c86b3ff8..7dce1747e 100644 --- a/tests/operators/qlinear_concat_test.cairo +++ b/tests/operators/qlinear_concat_test.cairo @@ -8,7 +8,7 @@ fn print_span(mut span: Span) { loop { match span.pop_front() { Option::Some(i) => { (*i).print(); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; } diff --git a/tests/operators/qlinear_matmul_test.cairo b/tests/operators/qlinear_matmul_test.cairo index 9d3f8fa4b..1f5cf2dc7 100644 --- a/tests/operators/qlinear_matmul_test.cairo +++ b/tests/operators/qlinear_matmul_test.cairo @@ -166,7 +166,7 @@ fn print_span(mut span: Span) { loop { match span.pop_front() { Option::Some(i) => { (*i).print(); }, - Option::None(_) => { break; } + Option::None => { break; } }; }; } diff --git a/tests/tensor_core/onehot/onehot_fp_test/onehot_fp16x16_test.cairo b/tests/tensor_core/onehot/onehot_fp_test/onehot_fp16x16_test.cairo index dd8406f9f..ff678bb2d 100644 --- a/tests/tensor_core/onehot/onehot_fp_test/onehot_fp16x16_test.cairo +++ b/tests/tensor_core/onehot/onehot_fp_test/onehot_fp16x16_test.cairo @@ -155,7 +155,7 @@ mod tensor_1D { let depth = 3; let axis: Option = Option::Some(3); - let result = tensor.onehot(depth: depth, axis: axis, values: values.span()); + let _result = tensor.onehot(depth: depth, axis: axis, values: values.span()); } #[test] @@ -253,7 +253,7 @@ mod tensor_1D { let depth = 4; let axis: Option = Option::Some(3); - let result = tensor.onehot(depth: depth, axis: axis, values: values.span()); + let _result = tensor.onehot(depth: depth, axis: axis, values: values.span()); } #[test] @@ -384,7 +384,7 @@ mod tensor_1D { let depth = 4; let axis: Option = Option::Some(4); - let result = tensor.onehot(depth: depth, axis: axis, values: values.span()); + let _result = tensor.onehot(depth: depth, axis: axis, values: values.span()); } #[test] diff --git a/tests/tensor_core/onehot/onehot_fp_test/onehot_fp8x23_test.cairo b/tests/tensor_core/onehot/onehot_fp_test/onehot_fp8x23_test.cairo index f95833fd6..aa6705e89 100644 --- a/tests/tensor_core/onehot/onehot_fp_test/onehot_fp8x23_test.cairo +++ b/tests/tensor_core/onehot/onehot_fp_test/onehot_fp8x23_test.cairo @@ -153,7 +153,7 @@ mod tensor_1D { let depth = 3; let axis: Option = Option::Some(3); - let result = tensor.onehot(depth: depth, axis: axis, values: values.span()); + let _result = tensor.onehot(depth: depth, axis: axis, values: values.span()); } #[test] @@ -251,7 +251,7 @@ mod tensor_1D { let depth = 4; let axis: Option = Option::Some(3); - let result = tensor.onehot(depth: depth, axis: axis, values: values.span()); + let _result = tensor.onehot(depth: depth, axis: axis, values: values.span()); } #[test] @@ -382,7 +382,7 @@ mod tensor_1D { let depth = 4; let axis: Option = Option::Some(4); - let result = tensor.onehot(depth: depth, axis: axis, values: values.span()); + let _result = tensor.onehot(depth: depth, axis: axis, values: values.span()); } #[test] From 6fb02b00a5285fa3369cef18aa5c044d1cd3a929 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Sun, 11 Feb 2024 15:33:45 +0200 Subject: [PATCH 44/46] update alexandria --- Scarb.toml | 6 +++--- src/operators/tensor/math/reduce_mean.cairo | 2 +- src/operators/tensor/math/reduce_min.cairo | 2 +- src/operators/tensor/math/resize.cairo | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Scarb.toml b/Scarb.toml index f9e01ec6e..0ff4c4035 100644 --- a/Scarb.toml +++ b/Scarb.toml @@ -7,9 +7,9 @@ description = "ONNX Runtime in Cairo for verifiable ML inference using STARK" homepage = "https://github.com/gizatechxyz/orion" [dependencies] -alexandria_merkle_tree = { git = "https://github.com/keep-starknet-strange/alexandria.git", rev = "01a7690" } -alexandria_data_structures = { git = "https://github.com/keep-starknet-strange/alexandria.git", rev = "01a7690" } -alexandria_sorting = { git = "https://github.com/keep-starknet-strange/alexandria.git", rev = "01a7690" } +alexandria_merkle_tree = { git = "https://github.com/keep-starknet-strange/alexandria.git", rev = "800f5ad" } +alexandria_data_structures = { git = "https://github.com/keep-starknet-strange/alexandria.git", rev = "800f5ad" } +alexandria_sorting = { git = "https://github.com/keep-starknet-strange/alexandria.git", rev = "800f5ad" } cubit = { git = "https://github.com/influenceth/cubit.git", rev = "6275608" } [scripts] diff --git a/src/operators/tensor/math/reduce_mean.cairo b/src/operators/tensor/math/reduce_mean.cairo index 87cbd49cc..a692fdb91 100644 --- a/src/operators/tensor/math/reduce_mean.cairo +++ b/src/operators/tensor/math/reduce_mean.cairo @@ -51,7 +51,7 @@ fn reduce_mean< Option::None => { break; } }; }; - let sorted_axes = bubble_sort::bubble_sort_elements(axes_arr).span(); + let sorted_axes = bubble_sort::bubble_sort_elements(axes_arr, true).span(); sorted_axes } }, diff --git a/src/operators/tensor/math/reduce_min.cairo b/src/operators/tensor/math/reduce_min.cairo index 3bfb590b8..eb268c1f2 100644 --- a/src/operators/tensor/math/reduce_min.cairo +++ b/src/operators/tensor/math/reduce_min.cairo @@ -49,7 +49,7 @@ fn reduce_min< Option::None => { break; } }; }; - let sorted_axes = bubble_sort::bubble_sort_elements(axes_arr).span(); + let sorted_axes = bubble_sort::bubble_sort_elements(axes_arr, true).span(); sorted_axes } }, diff --git a/src/operators/tensor/math/resize.cairo b/src/operators/tensor/math/resize.cairo index 5d10d1875..961fc3853 100644 --- a/src/operators/tensor/math/resize.cairo +++ b/src/operators/tensor/math/resize.cairo @@ -1142,7 +1142,7 @@ fn get_neighbor_idxes< core::panic_with_felt252('MUST BE EVEN'); } - idxes = bubble_sort::bubble_sort_elements(idxes); + idxes = bubble_sort::bubble_sort_elements(idxes, true); let mut shape = ArrayTrait::new(); shape.append(n); From 85f0347e924aea36e48328496db0a5486eb11fe8 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Sun, 11 Feb 2024 15:39:54 +0200 Subject: [PATCH 45/46] Update test.yaml --- .github/workflows/test.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 33e343ae4..c1fc2cfd6 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -9,5 +9,5 @@ jobs: - uses: actions/checkout@v3 - uses: software-mansion/setup-scarb@v1 with: - scarb-version: "2.4.0" + scarb-version: "2.5.3" - run: scarb test --workspace && scarb fmt --workspace \ No newline at end of file From 4d4be27edf87056f26d24684291eec64f65ea096 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Sun, 11 Feb 2024 16:03:39 +0200 Subject: [PATCH 46/46] Update Scarb.toml --- Scarb.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Scarb.toml b/Scarb.toml index 0ff4c4035..463e4ac62 100644 --- a/Scarb.toml +++ b/Scarb.toml @@ -1,7 +1,7 @@ [package] name = "orion" -version = "0.2.1" -cairo-version = "2.4.0" +version = "0.2.3" +cairo-version = "2.5.3" edition = "2023_10" description = "ONNX Runtime in Cairo for verifiable ML inference using STARK" homepage = "https://github.com/gizatechxyz/orion"