Skip to content

Commit

Permalink
Merge pull request gizatechxyz#486 from hakymulla/reduce_log_sum
Browse files Browse the repository at this point in the history
reduce log sum operator
  • Loading branch information
raphaelDkhn authored Dec 10, 2023
2 parents 8443aae + 2b9f0ed commit 1c5c140
Show file tree
Hide file tree
Showing 38 changed files with 703 additions and 0 deletions.
5 changes: 5 additions & 0 deletions docs/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,11 @@ All notable changes to this project will be documented in this file.

The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## [Unreleased] - 2023-12-01

## Added
- Reduce LogSum Operator

## [Unreleased] - 2023-12-05

## Added
Expand Down
1 change: 1 addition & 0 deletions docs/SUMMARY.md
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,7 @@
* [tensor.is_inf](framework/operators/tensor/tensor.is\_inf.md)
* [tensor.not](framework/operators/tensor/tensor.not.md)
* [tensor.erf](framework/operators/tensor/tensor.erf.md)
* [tensor.reduce_log_sum](framework/operators/tensor/tensor.reduce_log_sum.md)
* [Neural Network](framework/operators/neural-network/README.md)
* [nn.relu](framework/operators/neural-network/nn.relu.md)
* [nn.leaky\_relu](framework/operators/neural-network/nn.leaky\_relu.md)
Expand Down
1 change: 1 addition & 0 deletions docs/framework/compatibility.md
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ You can see below the list of current supported ONNX Operators:
| [IsNaN](operators/tensor/tensor.is\_nan.md) | :white\_check\_mark: |
| [IsInf](operators/tensor/tensor.is\_inf.md) | :white\_check\_mark: |
| [Not](operators/tensor/tensor.not.md) | :white\_check\_mark: |
| [ReduceLogSum](operators/tensor/tensor.reduce\_log\_sum.md) | :white\_check\_mark: |
| [Erf](operators/tensor/tensor.erf.md) | :white\_check\_mark: |


Expand Down
1 change: 1 addition & 0 deletions docs/framework/operators/tensor/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,7 @@ use orion::operators::tensor::TensorTrait;
| [`tensor.is_nan`](tensor.is\_nan.md) | Returns which elements of the input are NaN. |
| [`tensor.is_inf`](tensor.is\_inf.md) | Maps infinity to true and other values to false. |
| [`tensor.not`](tensor.not.md) | Computes the logical negation of all elements in the input tensor. |
| [`tensor.reduce_log_sum`](tensor.reduce\_log\_sum.md) | Computes the log sum of the input tensor's elements along the provided axes. |
| [`tensor.erf`](tensor.erf.md) | Computes the error function of the given input tensor element-wise. |

## Arithmetic Operations
Expand Down
45 changes: 45 additions & 0 deletions docs/framework/operators/tensor/tensor.reduce_log_sum.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
## tensor.reduce_log_sum

```rust
fn reduce_log_sum(self: @Tensor<T>, axis: usize, keepdims: bool) -> Tensor<T>;
```

Computes the log sum of the input tensor's elements along the provided axes.
## Args

* `self`(`@Tensor<T>`) - The input tensor.
* `axis`(`usize`) - The dimension to reduce.
* `keepdims`(`bool`) - If true, retains reduced dimensions with length 1.

## Panics

* Panics if axis is not in the range of the input tensor's dimensions.

## Returns

A new `Tensor<T>` instance with the specified axis reduced by summing its elements.

fn reduce_log_sum() -> Tensor<u32> {

let mut sizes = ArrayTrait::new();
sizes.append(2);
sizes.append(2);
sizes.append(2);

let mut data = ArrayTrait::new();
data.append(FixedTrait::new_unscaled(1, false));
data.append(FixedTrait::new_unscaled(2, false));
data.append(FixedTrait::new_unscaled(3, false));
data.append(FixedTrait::new_unscaled(4, false));
data.append(FixedTrait::new_unscaled(5, false));
data.append(FixedTrait::new_unscaled(6, false));
data.append(FixedTrait::new_unscaled(7, false));
data.append(FixedTrait::new_unscaled(8, false));

let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());

We can call `reduce_log_sum` function as follows.
return tensor.reduce_log_sum(axis: 2, keepdims: false);
}
>>> [[0x11938, 0x1f203], [0x265d9, 0x2b540]]
```
117 changes: 117 additions & 0 deletions nodegen/node/reduce_log_sum.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
import numpy as np


class Reduce_log_sum(RunAll):
@staticmethod
def reduce_log_sum_fp8x23():
def reduce_log_sum_export_do_not_keepdims():
shape = [3, 2, 2]
axes = np.array([2], dtype=np.int64)
keepdims = False
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)
y = np.log(np.sum(x, axis=tuple(axes), keepdims=False))

x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))

name = "reduce_log_sum_fp8x23_export_do_not_keepdims"
make_test(
[x], y, "input_0.reduce_log_sum(2, false)", name)

def reduce_log_sum_export_keepdims():
shape = [3, 2, 2]
axes = np.array([2], dtype=np.int64)
keepdims = True
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64)
y = np.log(np.sum(x, axis=tuple(axes), keepdims=True))

x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))

name = "reduce_log_sum_fp8x23_export_keepdims"
make_test(
[x], y, "input_0.reduce_log_sum(2, true)", name)

def reduce_log_sum_axis_0():
shape = [3, 3, 3]
axes = np.array([0], dtype=np.int64)
keepdims = True
x = np.reshape(np.arange(1, np.prod(shape) + 1), shape)
y = np.log(np.sum(x, axis=tuple(axes), keepdims=True))

x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))

name = "reduce_log_sum_fp8x23_export_negative_axes_keepdims"
make_test(
[x], y, "input_0.reduce_log_sum(0, true)", name)


reduce_log_sum_export_do_not_keepdims()
reduce_log_sum_export_keepdims()
reduce_log_sum_axis_0()

@staticmethod
def reduce_log_sum_fp16x16():
def reduce_log_sum_export_do_not_keepdims():
shape = [3, 2, 2]
axes = np.array([2], dtype=np.int64)
keepdims = False
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64)
y = np.log(np.sum(x, axis=tuple(axes), keepdims=False))

x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))

name = "reduce_log_sum_fp16x16_export_do_not_keepdims"
make_test(
[x], y, "input_0.reduce_log_sum(2, false)", name)

def reduce_log_sum_export_keepdims():
shape = [3, 2, 2]
axes = np.array([2], dtype=np.int64)
keepdims = True
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64)
y = np.log(np.sum(x, axis=tuple(axes), keepdims=True))


x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))

name = "reduce_log_sum_fp16x16_export_keepdims"
make_test(
[x], y, "input_0.reduce_log_sum(2, true)", name)

def reduce_log_sum_axis_0():
shape = [2, 2, 2]
axes = np.array([0], dtype=np.int64)
keepdims = True
x = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape).astype(np.int64)
y = np.log(np.sum(x, axis=tuple(axes), keepdims=True))

x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))

name = "reduce_log_sum_fp16x16_export_negative_axes_keepdims"
make_test(
[x], y, "input_0.reduce_log_sum(0, true)", name)


reduce_log_sum_export_do_not_keepdims()
reduce_log_sum_export_keepdims()
reduce_log_sum_axis_0()
48 changes: 48 additions & 0 deletions src/operators/tensor/core.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,7 @@ impl TensorSerde<T, impl TSerde: Serde<T>, impl TDrop: Drop<T>> of Serde<Tensor<
/// is_nan - Returns which elements of the input are NaN.
/// is_inf - Maps infinity to true and other values to false.
/// not - Computes the logical negation of all elements in the input tensor.
/// reduce_log_sum - Computes the log sum of the input tensor's elements along the provided axes.
/// erf - Computes the error function of the given input tensor element-wise.
trait TensorTrait<T> {
/// # tensor.new
Expand Down Expand Up @@ -4854,6 +4855,53 @@ trait TensorTrait<T> {
/// ```
///
fn not(self: @Tensor<T>) -> Tensor<T>;
/// ## tensor.reduce_log_sum
///
/// ```rust
/// fn reduce_log_sum(self: @Tensor<T>, axis: usize, keepdims: bool) -> Tensor<T>;
/// ```
///
/// Computes the log sum of the input tensor's elements along the provided axes.
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `axis`(`usize`) - The dimension to reduce.
/// * `keepdims`(`bool`) - If true, retains reduced dimensions with length 1.
///
/// ## Panics
///
/// * Panics if axis is not in the range of the input tensor's dimensions.
///
/// ## Returns
///
/// A new `Tensor<T>` instance with the specified axis reduced by summing its elements.
///
/// fn reduce_log_sum() -> Tensor<u32> {
///
/// let mut sizes = ArrayTrait::new();
/// sizes.append(2);
/// sizes.append(2);
/// sizes.append(2);
///
/// let mut data = ArrayTrait::new();
/// data.append(FixedTrait::new_unscaled(1, false));
/// data.append(FixedTrait::new_unscaled(2, false));
/// data.append(FixedTrait::new_unscaled(3, false));
/// data.append(FixedTrait::new_unscaled(4, false));
/// data.append(FixedTrait::new_unscaled(5, false));
/// data.append(FixedTrait::new_unscaled(6, false));
/// data.append(FixedTrait::new_unscaled(7, false));
/// data.append(FixedTrait::new_unscaled(8, false));
///
/// let tensor = TensorTrait::<FP16x16>::new(sizes.span(), data.span());
///
/// We can call `reduce_log_sum` function as follows.
/// return tensor.reduce_log_sum(axis: 2, keepdims: false);
/// }
/// >>> [[0x11938, 0x1f203], [0x265d9, 0x2b540]]
/// ```
///
fn reduce_log_sum(self: @Tensor<T>, axis: usize, keepdims: bool) -> Tensor<T>;
/// ## tensor.erf
///
/// ```rust
Expand Down
4 changes: 4 additions & 0 deletions src/operators/tensor/implementations/tensor_bool.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -461,6 +461,10 @@ impl BoolTensor of TensorTrait<bool> {
fn erf(self: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}

fn reduce_log_sum(self: @Tensor<bool>, axis: usize, keepdims: bool) -> Tensor<bool> {
panic(array!['not supported!'])
}
}

/// Implements partial equal for two `Tensor<bool>` using the `PartialEq` trait.
Expand Down
5 changes: 5 additions & 0 deletions src/operators/tensor/implementations/tensor_fp16x16.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -518,6 +518,11 @@ impl FP16x16Tensor of TensorTrait<FP16x16> {
) -> Tensor<FP16x16> {
math::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}

fn reduce_log_sum(self: @Tensor<FP16x16>, axis: usize, keepdims: bool) -> Tensor<FP16x16> {
math::reduce_log_sum::reduce_log_sum(self, axis, keepdims)
}


fn erf(self: @Tensor<FP16x16>) -> Tensor<FP16x16> {
math::erf::erf(*self)
Expand Down
5 changes: 5 additions & 0 deletions src/operators/tensor/implementations/tensor_fp16x16wide.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -484,6 +484,11 @@ impl FP16x16WTensor of TensorTrait<FP16x16W> {
) -> Tensor<FP16x16W> {
math::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}

fn reduce_log_sum(self: @Tensor<FP16x16W>, axis: usize, keepdims: bool) -> Tensor<FP16x16W> {
math::reduce_log_sum::reduce_log_sum(self, axis, keepdims)
}


fn erf(self: @Tensor<FP16x16W>) -> Tensor<FP16x16W> {
math::erf::erf(*self)
Expand Down
5 changes: 5 additions & 0 deletions src/operators/tensor/implementations/tensor_fp32x32.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -519,6 +519,11 @@ impl FP32x32Tensor of TensorTrait<FP32x32> {
) -> Tensor<FP32x32> {
math::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}

fn reduce_log_sum(self: @Tensor<FP32x32>, axis: usize, keepdims: bool) -> Tensor<FP32x32> {
math::reduce_log_sum::reduce_log_sum(self, axis, keepdims)
}


fn erf(self: @Tensor<FP32x32>) -> Tensor<FP32x32> {
math::erf::erf(*self)
Expand Down
5 changes: 5 additions & 0 deletions src/operators/tensor/implementations/tensor_fp64x64.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -520,6 +520,11 @@ impl FP64x64Tensor of TensorTrait<FP64x64> {
) -> Tensor<FP64x64> {
math::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}

fn reduce_log_sum(self: @Tensor<FP64x64>, axis: usize, keepdims: bool) -> Tensor<FP64x64> {
math::reduce_log_sum::reduce_log_sum(self, axis, keepdims)
}


fn erf(self: @Tensor<FP64x64>) -> Tensor<FP64x64> {
math::erf::erf(*self)
Expand Down
4 changes: 4 additions & 0 deletions src/operators/tensor/implementations/tensor_fp8x23.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -519,6 +519,10 @@ impl FP8x23Tensor of TensorTrait<FP8x23> {
math::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}

fn reduce_log_sum(self: @Tensor<FP8x23>, axis: usize, keepdims: bool) -> Tensor<FP8x23> {
math::reduce_log_sum::reduce_log_sum(self, axis, keepdims)
}

fn erf(self: @Tensor<FP8x23>) -> Tensor<FP8x23> {
math::erf::erf(*self)
}
Expand Down
4 changes: 4 additions & 0 deletions src/operators/tensor/implementations/tensor_fp8x23wide.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -472,6 +472,10 @@ impl FP8x23WTensor of TensorTrait<FP8x23W> {
math::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}

fn reduce_log_sum(self: @Tensor<FP8x23W>, axis: usize, keepdims: bool) -> Tensor<FP8x23W> {
math::reduce_log_sum::reduce_log_sum(self, axis, keepdims)
}

fn erf(self: @Tensor<FP8x23W>) -> Tensor<FP8x23W> {
math::erf::erf(*self)
}
Expand Down
4 changes: 4 additions & 0 deletions src/operators/tensor/implementations/tensor_i32.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -516,6 +516,10 @@ impl I32Tensor of TensorTrait<i32> {
math::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}

fn reduce_log_sum(self: @Tensor<i32>, axis: usize, keepdims: bool) -> Tensor<i32> {
panic(array!['not supported!'])
}

fn erf(self: @Tensor<i32>) -> Tensor<i32> {
panic(array!['not supported!'])
}
Expand Down
4 changes: 4 additions & 0 deletions src/operators/tensor/implementations/tensor_i8.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -514,6 +514,10 @@ impl I8Tensor of TensorTrait<i8> {
math::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}

fn reduce_log_sum(self: @Tensor<i8>, axis: usize, keepdims: bool) -> Tensor<i8> {
panic(array!['not supported!'])
}

fn erf(self: @Tensor<i8>) -> Tensor<i8> {
panic(array!['not supported!'])
}
Expand Down
4 changes: 4 additions & 0 deletions src/operators/tensor/implementations/tensor_u32.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -457,6 +457,10 @@ impl U32Tensor of TensorTrait<u32> {
math::concat_from_sequence::concat_from_sequence(sequence, axis, new_axis)
}

fn reduce_log_sum(self: @Tensor<u32>, axis: usize, keepdims: bool) -> Tensor<u32> {
panic(array!['not supported!'])
}

fn erf(self: @Tensor<u32>) -> Tensor<u32> {
panic(array!['not supported!'])
}
Expand Down
1 change: 1 addition & 0 deletions src/operators/tensor/math.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -62,4 +62,5 @@ mod sequence_insert;
mod concat_from_sequence;
mod is_nan;
mod is_inf;
mod reduce_log_sum;
mod erf;
Loading

0 comments on commit 1c5c140

Please sign in to comment.