diff --git a/.all-contributorsrc b/.all-contributorsrc
index 8a41f9cbe..7f624a7d6 100644
--- a/.all-contributorsrc
+++ b/.all-contributorsrc
@@ -260,6 +260,15 @@
"contributions": [
"code"
]
+ },
+ {
+ "login": "Vid201",
+ "name": "Vid Kersic",
+ "avatar_url": "https://avatars.githubusercontent.com/u/38610409?v=4",
+ "profile": "https://github.com/Vid201",
+ "contributions": [
+ "code"
+ ]
}
],
"contributorsPerLine": 7,
diff --git a/.tool-versions b/.tool-versions
new file mode 100644
index 000000000..21cfc8077
--- /dev/null
+++ b/.tool-versions
@@ -0,0 +1 @@
+scarb 2.4.0
diff --git a/README.md b/README.md
index d0b3055e4..1f48201d3 100644
--- a/README.md
+++ b/README.md
@@ -23,7 +23,7 @@
# Orion: An Open-source Framework for Validity and ZK ML ✨
-[![All Contributors](https://img.shields.io/badge/all_contributors-28-orange.svg?style=flat-square)](#contributors-)
+[![All Contributors](https://img.shields.io/badge/all_contributors-29-orange.svg?style=flat-square)](#contributors-)
Orion is an open-source, community-driven framework dedicated to Provable Machine Learning. It provides essential components and a new ONNX runtime for building verifiable Machine Learning models using [STARKs](https://starkware.co/stark/).
@@ -104,6 +104,9 @@ Thanks goes to these wonderful people:
Bilgin Koçak 💻 |
akhercha 💻 |
+
+ Vid Kersic 💻 |
+
diff --git a/Scarb.toml b/Scarb.toml
index 6ebf1fd1f..cf6169b1f 100644
--- a/Scarb.toml
+++ b/Scarb.toml
@@ -1,6 +1,6 @@
[package]
name = "orion"
-version = "0.1.9"
+version = "0.2.0"
cairo-version = "2.4.0"
edition = "2023_10"
description = "ONNX Runtime in Cairo for verifiable ML inference using STARK"
diff --git a/docgen/src/main.rs b/docgen/src/main.rs
index 20426c832..ed2a69460 100644
--- a/docgen/src/main.rs
+++ b/docgen/src/main.rs
@@ -19,6 +19,14 @@ fn main() {
doc_trait(trait_path, doc_path, label);
doc_functions(trait_path, doc_path, trait_name, label);
+ // SEQUENCE DOC
+ let trait_path = "src/operators/sequence/core.cairo";
+ let doc_path = "docs/framework/operators/sequence";
+ let label = "sequence";
+ let trait_name = "SequenceTrait";
+ doc_trait(trait_path, doc_path, label);
+ doc_functions(trait_path, doc_path, trait_name, label);
+
// FIXED POINT DOC
let trait_path = "src/numbers/fixed_point/core.cairo";
let doc_path = "docs/framework/numbers/fixed-point";
@@ -66,6 +74,22 @@ fn main() {
let trait_name: &str = "LinearRegressorTrait";
doc_trait(trait_path, doc_path, label);
doc_functions(trait_path, doc_path, trait_name, label);
+
+ // LINEAR CLASSIFIER DOC
+ let trait_path = "src/operators/ml/linear/linear_classifier.cairo";
+ let doc_path = "docs/framework/operators/machine-learning/linear-classifier";
+ let label = "linear_classifier";
+ let trait_name: &str = "LinearClassifierTrait";
+ doc_trait(trait_path, doc_path, label);
+ doc_functions(trait_path, doc_path, trait_name, label);
+
+ // SVM REGRESSOR DOC
+ let trait_path = "src/operators/ml/svm/svm_regressor.cairo";
+ let doc_path = "docs/framework/operators/machine-learning/svm-regressor";
+ let label = "svm_regressor";
+ let trait_name: &str = "SVMRegressorTrait";
+ doc_trait(trait_path, doc_path, label);
+ doc_functions(trait_path, doc_path, trait_name, label);
}
fn doc_trait(trait_path: &str, doc_path: &str, label: &str) {
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 9c22caa01..a7ec58d5f 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -4,7 +4,11 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## [Unreleased] - 2023-12-25
+## Added
+- Compress Operator.
+
## [Unreleased] - 2023-12-14
## Added
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index 62ae2a2b3..f956159ec 100644
--- a/docs/SUMMARY.md
+++ b/docs/SUMMARY.md
@@ -100,6 +100,7 @@
* [tensor.bitwise\_and](framework/operators/tensor/tensor.bitwise\_and.md)
* [tensor.bitwise\_xor](framework/operators/tensor/tensor.bitwise\_xor.md)
* [tensor.bitwise\_or](framework/operators/tensor/tensor.bitwise\_or.md)
+ * [tensor.resize](framework/operators/tensor/tensor.resize.md)
* [tensor.round](framework/operators/tensor/tensor.round.md)
* [tensor.scatter](framework/operators/tensor/tensor.scatter.md)
* [tensor.array\_feature\_extractor](framework/operators/tensor/tensor.array\_feature\_extractor.md)
@@ -110,28 +111,24 @@
* [tensor.reduce\_prod](framework/operators/tensor/tensor.reduce\_prod.md)
* [tensor.gather\_elements](framework/operators/tensor/tensor.gather\_elements.md)
* [tensor.gather\_nd](framework/operators/tensor/tensor.gather\_nd.md)
- * [tensor.sequence\_length](framework/operators/tensor/tensor.sequence\_length.md)
- * [tensor.sequence\_at](framework/operators/tensor/tensor.sequence\_at.md)
* [tensor.reduce\_min](framework/operators/tensor/tensor.reduce\_min.md)
- * [tensor.sequence\_construct](framework/operators/tensor/tensor.sequence\_construct.md)
* [tensor.shrink](framework/operators/tensor/tensor.shrink.md)
- * [tensor.sequence\_empty](framework/operators/tensor/tensor.sequence\_empty.md)
* [tensor.reduce\_mean](framework/operators/tensor/tensor.reduce\_mean.md)
* [tensor.pow](framework/operators/tensor/tensor.pow.md)
- * [tensor.sequence\_erase](framework/operators/tensor/tensor.sequence\_erase.md)
- * [tensor.sequence\_insert](framework/operators/tensor/tensor.sequence\_insert.md)
- * [tensor.concat\_from\_sequence](framework/operators/tensor/tensor.concat\_from\_sequence.md)
* [tensor.is\_nan](framework/operators/tensor/tensor.is\_nan.md)
* [tensor.is\_inf](framework/operators/tensor/tensor.is\_inf.md)
* [tensor.not](framework/operators/tensor/tensor.not.md)
* [tensor.erf](framework/operators/tensor/tensor.erf.md)
* [tensor.reduce\_log\_sum](framework/operators/tensor/tensor.reduce\_log\_sum.md)
* [tensor.unique](framework/operators/tensor/tensor.unique.md)
+ * [tensor.compress](framework/operators/tensor/tensor.compress.md)
+ * [tensor.layer_normalization](framework/operators/tensor/tensor.layer_normalization.md)
* [Neural Network](framework/operators/neural-network/README.md)
* [nn.relu](framework/operators/neural-network/nn.relu.md)
* [nn.leaky\_relu](framework/operators/neural-network/nn.leaky\_relu.md)
* [nn.sigmoid](framework/operators/neural-network/nn.sigmoid.md)
* [nn.softmax](framework/operators/neural-network/nn.softmax.md)
+ * [nn.softmax_zero](framework/operators/neural-network/nn.softmax_zero.md)
* [nn.logsoftmax](framework/operators/neural-network/nn.logsoftmax.md)
* [nn.softsign](framework/operators/neural-network/nn.softsign.md)
* [nn.softplus](framework/operators/neural-network/nn.softplus.md)
@@ -142,6 +139,24 @@
* [Machine Learning](framework/operators/machine-learning/README.md)
* [Tree Ensemble Classifier](framework/operators/machine-learning/tree-ensemble-classifier/README.md)
* [tree\_ensemble\_classifier.predict](framework/operators/machine-learning/tree-ensemble-classifier/tree\_ensemble\_classifier.predict.md)
+ * [Tree Ensemble Regressor](framework/operators/machine-learning/tree-ensemble-regressor/README.md)
+ * [tree\_ensemble\_regressor.predict](framework/operators/machine-learning/tree-ensemble-regressor/tree\_ensemble\_regressor.predict.md)
+ * [Linear Classifier](framework/operators/machine-learning/linear-classifier/README.md)
+ * [linear\_classifier.predict](framework/operators/machine-learning/linear-classifier/linear\_classifier.predict.md)
+ * [Linear Regressor](framework/operators/machine-learning/linear-regressor/README.md)
+ * [linear\_regressor.predict](framework/operators/machine-learning/linear-regressor/linear\_regressor.predict.md)
+ * [SVM Regressor](framework/operators/machine-learning/svm-regressor/README.md)
+ * [svm\_regressor.predict](framework/operators/machine-learning/svm-regressor/svm\_regressor.predict.md)
+ * [Sequence](framework/operators/sequence/README.md)
+ * [sequence.sequence\_construct](framework/operators/sequence/sequence.sequence\_construct.md)
+ * [sequence.sequence\_empty](framework/operators/sequence/sequence.sequence\_empty.md)
+ * [sequence.sequence\_length](framework/operators/sequence/sequence.sequence\_length.md)
+ * [sequence.sequence\_at](framework/operators/sequence/sequence.sequence\_at.md)
+ * [sequence.sequence\_empty](framework/operators/sequence/sequence.sequence\_empty.md)
+ * [sequence.sequence\_erase](framework/operators/sequence/sequence.sequence\_erase.md)
+ * [sequence.sequence\_insert](framework/operators/sequence/sequence.sequence\_insert.md)
+ * [sequence.concat\_from\_sequence](framework/operators/sequence/sequence.concat\_from\_sequence.md)
+
## 🏛 Hub
diff --git a/docs/academy/tutorials/provable-mlr-forecasting-aaves-lifetime-repayments.md b/docs/academy/tutorials/provable-mlr-forecasting-aaves-lifetime-repayments.md
index 54b35181e..68c4daac6 100644
--- a/docs/academy/tutorials/provable-mlr-forecasting-aaves-lifetime-repayments.md
+++ b/docs/academy/tutorials/provable-mlr-forecasting-aaves-lifetime-repayments.md
@@ -205,7 +205,7 @@ X_min = np.min(X_original, axis=0)
X_max = np.max(X_original, axis=0)
X_range = X_max - X_min
df_forecast_data_normalized = (df_forecast_data - X_min) / X_range
-# tranpose the matrix and add bias
+# transpose the matrix and add bias
df_forecast_data_normalized_transposed= df_forecast_data_normalized.T
df_forecast_data_normalized_transposed_with_bias = np.vstack((df_forecast_data_normalized_transposed, np.ones(df_forecast_data_normalized_transposed.shape[1])))
#normalized forecasts
@@ -913,7 +913,7 @@ fn normalize_user_x_inputs(
shape: array![data_len].span(), data: x_range.span()
);
- // for normalizing 2D user inputed feature vals
+ // for normalizing 2D user inputted feature vals
if x_inputs.shape.len() > 1 {
let mut j: u32 = 0;
loop {
@@ -1069,7 +1069,7 @@ let mut rescale_forecasts = rescale_predictions(forecast_results, main_y_vals);
Our model will get tested under the `multiple_linear_regression_test()` function which will follow these steps:
-1. Data retrival: The function initiates by fetching the AAVE dataset's x and y values.
+1. Data retrieval: The function initiates by fetching the AAVE dataset's x and y values.
2. Dataset construction and normalization: A new Dataset object gets initialized by passing the x and y variables. It is then normalized using the built-in `normalize_dataset()` method.
3. Model fitting: Using the `MultipleLinearRegression` function we fit the normalized dataset and compute the regression coefficients.
4. Computing accuracy of the model: To calculate the accuracy we utilize the `predict` method to compute the dot product between the model's regression coefficients and the x values. We then compute the R-squared score to measure the accuracy of our model.
diff --git a/docs/framework/compatibility.md b/docs/framework/compatibility.md
index 68cd44241..0e0e5be17 100644
--- a/docs/framework/compatibility.md
+++ b/docs/framework/compatibility.md
@@ -37,6 +37,7 @@ You can see below the list of current supported ONNX Operators:
| [ThresholdedRelu](operators/neural-network/nn.thresholded\_relu.md) | :white\_check\_mark: |
| [Sigmoid](operators/neural-network/nn.sigmoid.md) | :white\_check\_mark: |
| [Softmax](operators/neural-network/nn.softmax.md) | :white\_check\_mark: |
+| [Softmax_zero](operators/neural-network/nn.softmax_zero.md) | :white\_check\_mark: |
| [LogSoftmax](operators/neural-network/nn.logsoftmax.md) | :white\_check\_mark: |
| [Softsign](operators/neural-network/nn.softsign.md) | :white\_check\_mark: |
| [Softplus](operators/neural-network/nn.softplus.md) | :white\_check\_mark: |
@@ -77,6 +78,7 @@ You can see below the list of current supported ONNX Operators:
| [BitwiseAnd](operators/tensor/tensor.bitwise_and.md) | :white\_check\_mark: |
| [BitwiseOr](operators/tensor/tensor.bitwise_or.md) | :white\_check\_mark: |
| [BitwiseXor](operators/tensor/tensor.bitwise_xor.md) | :white\_check\_mark: |
+| [Resize](operators/tensor/tensor.resize.md) | :white\_check\_mark: |
| [Round](operators/tensor/tensor.round.md) | :white\_check\_mark: |
| [MaxInTensor](operators/tensor/tensor.max\_in\_tensor.md) | :white\_check\_mark: |
| [Max](operators/tensor/tensor.max.md) | :white\_check\_mark: |
@@ -89,20 +91,22 @@ You can see below the list of current supported ONNX Operators:
| [ReduceL1](operators/tensor/tensor.reduce\_l1.md) | :white\_check\_mark: |
| [ReduceL2](operators/tensor/tensor.reduce\_l2.md) | :white\_check\_mark: |
| [GatherElements](operators/tensor/tensor.gather/_elements.md) | :white\_check\_mark: |
-| [SequenceLength](operators/tensor/tensor.sequence\_length.md) | :white\_check\_mark: |
-| [SequenceAt](operators/tensor/tensor.sequence\_at.md) | :white\_check\_mark: |
-| [SequenceConstruct](operators/tensor/tensor.sequence\_construct.md) | :white\_check\_mark: |
+| [SequenceLength](operators/sequence/sequence.sequence\_length.md) | :white\_check\_mark: |
+| [SequenceAt](operators/sequence/sequence.sequence\_at.md) | :white\_check\_mark: |
+| [SequenceConstruct](operators/sequence/sequence.sequence\_construct.md) | :white\_check\_mark: |
| [Shrink](operators/tensor/tensor.shrink.md) | :white\_check\_mark: |
-| [SequenceEmpty](operators/tensor/tensor.sequence\_empty.md) | :white\_check\_mark: |
+| [SequenceEmpty](operators/sequence/sequence.sequence\_empty.md) | :white\_check\_mark: |
| [ReduceL2](operators/tensor/tensor.reduce\_l2.md) | :white\_check\_mark: |
-| [SequenceErase](operators/tensor/tensor.sequence\_erase.md) | :white\_check\_mark: |
-| [SequenceInsert](operators/tensor/tensor.sequence\_insert.md) | :white\_check\_mark: |
-| [ConcatFromSequence](operators/tensor/tensor.concat\_from\_sequence.md) | :white\_check\_mark: |
+| [SequenceErase](operators/sequence/sequence.sequence\_erase.md) | :white\_check\_mark: |
+| [SequenceInsert](operators/sequence/sequence.sequence\_insert.md) | :white\_check\_mark: |
+| [ConcatFromSequence](operators/sequence/sequence.concat\_from\_sequence.md) | :white\_check\_mark: |
| [IsNaN](operators/tensor/tensor.is\_nan.md) | :white\_check\_mark: |
| [IsInf](operators/tensor/tensor.is\_inf.md) | :white\_check\_mark: |
| [Not](operators/tensor/tensor.not.md) | :white\_check\_mark: |
| [GatherND](operators/tensor/tensor.gather/_nd.md) | :white\_check\_mark: |
| [ReduceLogSum](operators/tensor/tensor.reduce\_log\_sum.md) | :white\_check\_mark: |
-| [Erf](operators/tensor/tensor.erf.md) | :white\_check\_mark: |
+| [Erf](operators/tensor/tensor.erf.md) | :white\_check\_mark: |
+| [Compress](operators/tensor/tensor.compress.md) | :white\_check\_mark: |
+| [Layer_normalization](operators/tensor/tensor.layer_normalization.md) | :white\_check\_mark: |
-Current Operators support: **96/156 (62%)**
+Current Operators support: **97/156 (62%)**
diff --git a/docs/framework/operators/machine-learning/linear-classifier/README.md b/docs/framework/operators/machine-learning/linear-classifier/README.md
new file mode 100644
index 000000000..7323f8b7f
--- /dev/null
+++ b/docs/framework/operators/machine-learning/linear-classifier/README.md
@@ -0,0 +1,23 @@
+# Linear Classifier
+
+`LinearClassifierTrait` provides a trait definition for linear classification problem.
+
+```rust
+use orion::operators::ml::LinearClassificationTrait;
+```
+
+### Data types
+
+Orion supports currently only fixed point data types for `LinearClassificationTrait`.
+
+| Data type | dtype |
+| -------------------- | ------------------------------------------------------------- |
+| Fixed point (signed) | `LinearClassifierTrait` |
+
+
+***
+
+| function | description |
+| --- | --- |
+| [`linear_classifier.predict`](linear_classifier.predict.md) | Performs the linear classification. |
+
diff --git a/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md b/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md
new file mode 100644
index 000000000..aec154f68
--- /dev/null
+++ b/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md
@@ -0,0 +1,100 @@
+# LinearClassifierTrait::predict
+
+```rust
+ fn predict(ref self: LinearClassifier, X: Tensor) -> Tensor;
+```
+
+Linear Classifier. Performs the linear classification.
+
+## Args
+
+* `self`: LinearClassifier - A LinearClassifier object.
+* `X`: Input 2D tensor.
+
+## Returns
+
+* Tensor containing the linear classification evaluation of the input X.
+
+## Type Constraints
+
+`LinearClassifier` and `X` must be fixed points
+
+## Examples
+
+```rust
+use orion::numbers::FP16x16;
+use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor};
+
+use orion::operators::ml::linear::linear_classifier::{
+ LinearClassifierTrait, POST_TRANSFORM, LinearClassifier
+};
+
+fn linear_classifier_helper(
+ post_transform: POST_TRANSFORM
+) -> (LinearClassifier, Tensor) {
+
+ let classlabels: Span = array![0, 1, 2].span();
+ let classlabels = Option::Some(classlabels);
+
+ let classlabels_strings: Option> = Option::None;
+
+ let coefficients: Span = array![
+ FP16x16 { mag: 38011, sign: true },
+ FP16x16 { mag: 19005, sign: true },
+ FP16x16 { mag: 5898, sign: true },
+ FP16x16 { mag: 38011, sign: false },
+ FP16x16 { mag: 19005, sign: false },
+ FP16x16 { mag: 5898, sign: false },
+ ]
+ .span();
+
+ let intercepts: Span = array![
+ FP16x16 { mag: 176947, sign: false },
+ FP16x16 { mag: 176947, sign: true },
+ FP16x16 { mag: 32768, sign: false },
+ ]
+ .span();
+ let intercepts = Option::Some(intercepts);
+
+ let multi_class: usize = 0;
+
+ let mut classifier: LinearClassifier = LinearClassifier {
+ classlabels,
+ coefficients,
+ intercepts,
+ multi_class,
+ post_transform
+ };
+
+ let mut X: Tensor = TensorTrait::new(
+ array![3, 2].span(),
+ array![
+ FP16x16 { mag: 0, sign: false },
+ FP16x16 { mag: 65536, sign: false },
+ FP16x16 { mag: 131072, sign: false },
+ FP16x16 { mag: 196608, sign: false },
+ FP16x16 { mag: 262144, sign: false },
+ FP16x16 { mag: 327680, sign: false },
+ ]
+ .span()
+ );
+
+ (classifier, X)
+}
+
+fn linear_classifier_multi_softmax() -> (Span, Tensor) {
+ let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::SOFTMAX);
+
+ let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X);
+
+ (labels, scores)
+}
+
+>>>
+([0, 2, 2],
+ [
+ [0.852656, 0.009192, 0.138152],
+ [0.318722, 0.05216, 0.629118],
+ [0.036323, 0.090237, 0.87344]
+ ])
+```
\ No newline at end of file
diff --git a/docs/framework/operators/machine-learning/svm-regressor/README.md b/docs/framework/operators/machine-learning/svm-regressor/README.md
new file mode 100644
index 000000000..f659cbbd1
--- /dev/null
+++ b/docs/framework/operators/machine-learning/svm-regressor/README.md
@@ -0,0 +1,23 @@
+# SVM Regressor
+
+`SVMRegressorTrait` provides a trait definition for svm regression problem.
+
+```rust
+use orion::operators::ml::SVMRegressorTrait;
+```
+
+### Data types
+
+Orion supports currently only fixed point data types for `SVMRegressorTrait`.
+
+| Data type | dtype |
+| -------------------- | ------------------------------------------------------------- |
+| Fixed point (signed) | `SVMRegressorTrait` |
+
+
+***
+
+| function | description |
+| --- | --- |
+| [`svm_regressor.predict`](svm_regressor.predict.md) | Returns the regressed values for each input in N. |
+
diff --git a/docs/framework/operators/machine-learning/svm-regressor/svm_regressor.predict.md b/docs/framework/operators/machine-learning/svm-regressor/svm_regressor.predict.md
new file mode 100644
index 000000000..68be4c922
--- /dev/null
+++ b/docs/framework/operators/machine-learning/svm-regressor/svm_regressor.predict.md
@@ -0,0 +1,111 @@
+# SVMRegressorTrait::predict
+
+```rust
+ fn predict(ref self: SVMRegressor, X: Tensor) -> Tensor;
+```
+
+Support Vector Machine regression prediction and one-class SVM anomaly detection.
+
+## Args
+
+* `self`: SVMRegressor - A SVMRegressor object.
+* `X`: Input 2D tensor.
+
+## Returns
+
+* Tensor containing the Support Vector Machine regression prediction and one-class SVM anomaly detection of the input X.
+
+## Type Constraints
+
+`SVMRegressor` and `X` must be fixed points
+
+## Examples
+
+```rust
+use orion::numbers::FP16x16;
+use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor};
+use orion::operators::tensor::FP16x16TensorPartialEq;
+
+use orion::operators::ml::svm::svm_regressor::{SVMRegressorTrait, POST_TRANSFORM, SVMRegressor};
+use orion::operators::ml::svm::core::{KERNEL_TYPE};
+
+fn example_svm_regressor_linear() -> Tensor {
+ let coefficients: Span = array![
+ FP16x16 { mag: 65536, sign: false },
+ FP16x16 { mag: 65536, sign: true },
+ FP16x16 { mag: 54959, sign: false },
+ FP16x16 { mag: 54959, sign: true },
+ FP16x16 { mag: 29299, sign: false },
+ FP16x16 { mag: 65536, sign: true },
+ FP16x16 { mag: 36236, sign: false }
+ ]
+ .span();
+ let n_supports: usize = 7;
+ let one_class: usize = 0;
+ let rho: Span = array![FP16x16 { mag: 35788, sign: false }].span();
+ let support_vectors: Span = array![
+ FP16x16 { mag: 8421, sign: true },
+ FP16x16 { mag: 5842, sign: false },
+ FP16x16 { mag: 4510, sign: false },
+ FP16x16 { mag: 5202, sign: true },
+ FP16x16 { mag: 14783, sign: true },
+ FP16x16 { mag: 17380, sign: true },
+ FP16x16 { mag: 60595, sign: false },
+ FP16x16 { mag: 1674, sign: true },
+ FP16x16 { mag: 38669, sign: true },
+ FP16x16 { mag: 63803, sign: false },
+ FP16x16 { mag: 87720, sign: true },
+ FP16x16 { mag: 22236, sign: false },
+ FP16x16 { mag: 61816, sign: false },
+ FP16x16 { mag: 34267, sign: true },
+ FP16x16 { mag: 36418, sign: false },
+ FP16x16 { mag: 27471, sign: false },
+ FP16x16 { mag: 28421, sign: false },
+ FP16x16 { mag: 69270, sign: true },
+ FP16x16 { mag: 152819, sign: false },
+ FP16x16 { mag: 4065, sign: false },
+ FP16x16 { mag: 62274, sign: true }
+ ]
+ .span();
+ let post_transform = POST_TRANSFORM::NONE;
+ let kernel_params: Span = array![
+ FP16x16 { mag: 27812, sign: false },
+ FP16x16 { mag: 0, sign: false },
+ FP16x16 { mag: 196608, sign: false }
+ ]
+ .span();
+ let kernel_type = KERNEL_TYPE::LINEAR;
+
+ let mut regressor: SVMRegressor = SVMRegressor {
+ coefficients,
+ kernel_params,
+ kernel_type,
+ n_supports,
+ one_class,
+ post_transform,
+ rho,
+ support_vectors,
+ };
+
+ let mut X: Tensor = TensorTrait::new(
+ array![3, 3].span(),
+ array![
+ FP16x16 { mag: 32768, sign: true },
+ FP16x16 { mag: 26214, sign: true },
+ FP16x16 { mag: 19660, sign: true },
+ FP16x16 { mag: 13107, sign: true },
+ FP16x16 { mag: 6553, sign: true },
+ FP16x16 { mag: 0, sign: false },
+ FP16x16 { mag: 6553, sign: false },
+ FP16x16 { mag: 13107, sign: false },
+ FP16x16 { mag: 19660, sign: false },
+ ]
+ .span()
+ );
+
+ return SVMRegressorTrait::predict(ref regressor, X);
+}
+
+>>> [[-0.468206], [0.227487], [0.92318]]
+```
+
diff --git a/docs/framework/operators/machine-learning/tree-ensemble-regressor/README.md b/docs/framework/operators/machine-learning/tree-ensemble-regressor/README.md
index cbd3747cf..ae21bfcb1 100644
--- a/docs/framework/operators/machine-learning/tree-ensemble-regressor/README.md
+++ b/docs/framework/operators/machine-learning/tree-ensemble-regressor/README.md
@@ -14,6 +14,9 @@ Orion supports currently only fixed point data types for `TreeEnsembleRegressorT
| -------------------- | ------------------------------------------------------------- |
| Fixed point (signed) | `TreeRegressorTrait` |
+### How to construct `TreeEnsembleRegressor`
+
+You can utilize [this notebook](https://colab.research.google.com/drive/1zZC0tM7I5Mt542_cBsxaWcGPWzgxybGs?usp=sharing#scrollTo=VkXxLxDejrf3) to translate parameters from your ONNX TreeEnsembleRegressor model into Cairo code. Efforts are underway to integrate this functionality into Giza-CLI, aiming to enhance the user experience.
***
diff --git a/docs/framework/operators/neural-network/README.md b/docs/framework/operators/neural-network/README.md
index cd1c92f8d..8343d0c90 100644
--- a/docs/framework/operators/neural-network/README.md
+++ b/docs/framework/operators/neural-network/README.md
@@ -27,6 +27,7 @@ Orion supports currently these `NN` types.
| [`nn.leaky_relu`](nn.leaky\_relu.md) | Applies the leaky rectified linear unit (Leaky ReLU) activation function element-wise. |
| [`nn.sigmoid`](nn.sigmoid.md) | Applies the Sigmoid function to an n-dimensional input tensor. |
| [`nn.softmax`](nn.softmax.md) | Computes softmax activations. |
+| [`nn.softmax_zero`](nn.softmax\_zero.md) | Computes softmax zero. |
| [`nn.logsoftmax`](nn.logsoftmax.md) | Applies the natural log to Softmax function to an n-dimensional input Tensor. |
| [`nn.softsign`](nn.softsign.md) | Applies the Softsign function element-wise. |
| [`nn.softplus`](nn.softplus.md) | Applies the Softplus function element-wise. |
diff --git a/docs/framework/operators/neural-network/nn.softmax_zero.md b/docs/framework/operators/neural-network/nn.softmax_zero.md
new file mode 100644
index 000000000..f5cc9159b
--- /dev/null
+++ b/docs/framework/operators/neural-network/nn.softmax_zero.md
@@ -0,0 +1,64 @@
+# NNTrait::softmax_zero
+
+```rust
+ fn softmax_zero(tensor: @Tensor, axis: usize) -> Tensor;
+```
+
+Applies the Softmax zero function to an n-dimensional input Tensor rescaling them so that the elements of the n-dimensional output Tensor lie in the range \[0,1] and sum to 1 while keeping the zero elements to zero.
+
+The softmax zero on the set $\mathbf{x} = (x_1, ..., x_n)$ is given by :
+
+$$
+\text{softmax zero}(x_i) = \begin{cases}
+0 & \qquad x_i = 0 \\
+\frac{e^{x_i}}{ \sum_{x \in {S}} e^{x}} & \qquad \text{otherwise}
+\end{cases}
+$$
+where $S$ in a subset of $\mathbf{x}$ given by
+
+$$
+ \ S = \{ (x_1, \ldots, x_k) \mid 1 \leq k \leq n, x_j \neq 0 \text{ for } 1 \leq j \leq k \}
+$$
+
+## Args
+
+* `tensor`(`@Tensor`) - The input tensor.
+* `axis`(`usize`) - The axis along which to compute the softmax zero.
+
+## Returns
+
+A Tensor of fixed point numbers with the same shape than the input Tensor.
+
+## Type Constraints
+
+Constrain input and output types to fixed point tensors.
+
+## Examples
+
+```rust
+use core::array::{ArrayTrait, SpanTrait};
+
+use orion::operators::tensor::{TensorTrait, Tensor, FP8x23Tensor};
+use orion::operators::nn::{NNTrait, FP8x23NN};
+use orion::numbers::{FP8x23, FixedTrait};
+
+use core::debug::PrintTrait;
+
+fn softmax_zero_example() -> Tensor {
+ let tensor = TensorTrait::::new(
+ shape: array![2, 2].span(),
+ data: array![
+ FixedTrait::new(0, false),
+ FixedTrait::new(8388608, false),
+ FixedTrait::new(16777216, false),
+ FixedTrait::new(25165824, false),
+ ]
+ .span(),
+ );
+
+ return NNTrait::softmax_zero(@tensor, 1);
+}
+>>> [[0,0x800000],[2256043,6132564]]
+ // The fixed point representation of
+ // [[0, 1],[0.2689, 0.7311]]
+```
diff --git a/docs/framework/operators/sequence/README.md b/docs/framework/operators/sequence/README.md
new file mode 100644
index 000000000..6b3599e84
--- /dev/null
+++ b/docs/framework/operators/sequence/README.md
@@ -0,0 +1,32 @@
+# Sequence
+
+A Sequence represents an array of tensors.
+
+```rust
+use orion::operators::sequence;
+```
+
+### Data types
+
+Orion supports currently these `Sequence` types.
+
+| Data type | dtype |
+| ------------------------- | -------------------------------------------------------- |
+| 32-bit integer (signed) | `Array>` |
+| 8-bit integer (signed) | `Array>` |
+| 32-bit integer (unsigned) | `Array>` |
+| Fixed point (signed) | `Array>` |
+
+### Sequence**Trait**
+
+`SequenceTrait` defines the operations that can be performed on a Sequence of tensors.
+
+| function | description |
+| --- | --- |
+| [`sequence.sequence_construct`](sequence.sequence\_construct.md) | Constructs a tensor sequence containing the input tensors. |
+| [`sequence.sequence_empty`](sequence.sequence\_empty.md) | Returns an empty tensor sequence. |
+| [`sequence.sequence_length`](sequence.sequence\_length.md) | Returns the length of the input sequence. |
+| [`sequence.sequence_insert`](sequence.sequence\_insert.md) | Insert a tensor into a sequence. |
+| [`sequence.sequence_at`](sequence.sequence\_at.md) | Outputs the tensor at the specified position in the input sequence. |
+| [`sequence.concat_from_sequence`](sequence.concat\_from\_sequence.md) | Concatenate a sequence of tensors into a single tensor. |
+
diff --git a/docs/framework/operators/tensor/tensor.concat_from_sequence.md b/docs/framework/operators/sequence/sequence.concat_from_sequence.md
similarity index 100%
rename from docs/framework/operators/tensor/tensor.concat_from_sequence.md
rename to docs/framework/operators/sequence/sequence.concat_from_sequence.md
diff --git a/docs/framework/operators/tensor/tensor.sequence_at.md b/docs/framework/operators/sequence/sequence.sequence_at.md
similarity index 100%
rename from docs/framework/operators/tensor/tensor.sequence_at.md
rename to docs/framework/operators/sequence/sequence.sequence_at.md
diff --git a/docs/framework/operators/tensor/tensor.sequence_construct.md b/docs/framework/operators/sequence/sequence.sequence_construct.md
similarity index 78%
rename from docs/framework/operators/tensor/tensor.sequence_construct.md
rename to docs/framework/operators/sequence/sequence.sequence_construct.md
index d5e627bd1..92f005020 100644
--- a/docs/framework/operators/tensor/tensor.sequence_construct.md
+++ b/docs/framework/operators/sequence/sequence.sequence_construct.md
@@ -1,4 +1,4 @@
-## tensor.sequence_construct
+## sequence.sequence_construct
```rust
fn sequence_construct(tensors: Array>) -> Array>;
@@ -21,14 +21,15 @@ A tensor sequence `Array>` containing the input tensors.
## Examples
```rust
-use core::array::{ArrayTrait, SpanTrait};
+use array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
+use orion::operators::sequence::SequenceTrait;
fn sequence_construct_example() -> Array> {
let tensor1 = TensorTrait::new(shape: array![2, 2].span(), data: array![0, 1, 2, 3].span());
let tensor2 = TensorTrait::new(shape: array![2, 2].span(), data: array![4, 5, 6, 7].span());
- let result = TensorTrait::sequence_construct(tensors: array![tensor1, tensor2]);
+ let result = SequenceTrait::sequence_construct(tensors: array![tensor1, tensor2]);
return result;
}
>>> [[0, 1, 2, 3], [4, 5, 6, 7]]
diff --git a/docs/framework/operators/tensor/tensor.sequence_empty.md b/docs/framework/operators/sequence/sequence.sequence_empty.md
similarity index 73%
rename from docs/framework/operators/tensor/tensor.sequence_empty.md
rename to docs/framework/operators/sequence/sequence.sequence_empty.md
index 60ea380e5..8c2568759 100644
--- a/docs/framework/operators/tensor/tensor.sequence_empty.md
+++ b/docs/framework/operators/sequence/sequence.sequence_empty.md
@@ -1,4 +1,4 @@
-# tensor.sequence_empty
+## sequence.sequence_empty
```rust
fn sequence_empty() -> Array>;
@@ -17,16 +17,17 @@ An empty `Array>` instance.
Let's create a new empty sequence.
```rust
-use core::array::{ArrayTrait, SpanTrait};
+use array::{ArrayTrait, SpanTrait};
use orion::operators::tensor::{
TensorTrait, // we import the trait
Tensor, // we import the type
U32Tensor // we import the implementation.
};
+use orion::operators::sequence::SequenceTrait;
fn sequence_empty_example() -> Array> {
- let sequence = TensorTrait::sequence_empty();
+ let sequence = SequenceTrait::sequence_empty();
return sequence;
}
diff --git a/docs/framework/operators/tensor/tensor.sequence_erase.md b/docs/framework/operators/sequence/sequence.sequence_erase.md
similarity index 100%
rename from docs/framework/operators/tensor/tensor.sequence_erase.md
rename to docs/framework/operators/sequence/sequence.sequence_erase.md
diff --git a/docs/framework/operators/tensor/tensor.sequence_insert.md b/docs/framework/operators/sequence/sequence.sequence_insert.md
similarity index 100%
rename from docs/framework/operators/tensor/tensor.sequence_insert.md
rename to docs/framework/operators/sequence/sequence.sequence_insert.md
diff --git a/docs/framework/operators/tensor/tensor.sequence_length.md b/docs/framework/operators/sequence/sequence.sequence_length.md
similarity index 100%
rename from docs/framework/operators/tensor/tensor.sequence_length.md
rename to docs/framework/operators/sequence/sequence.sequence_length.md
diff --git a/docs/framework/operators/tensor/README.md b/docs/framework/operators/tensor/README.md
index 777e00b78..281135f63 100644
--- a/docs/framework/operators/tensor/README.md
+++ b/docs/framework/operators/tensor/README.md
@@ -98,6 +98,7 @@ use orion::operators::tensor::TensorTrait;
| [`tensor.bitwise_and`](tensor.bitwise\_and.md) | Computes the bitwise AND of two tensors element-wise. |
| [`tensor.bitwise_xor`](tensor.bitwise\_xor.md) | Computes the bitwise XOR of two tensors element-wise. |
| [`tensor.bitwise_or`](tensor.bitwise\_or.md) | Computes the bitwise OR of two tensors element-wise. |
+| [`tensor.resize`](tensor.resize.md) | Resizes the input tensor. |
| [`tensor.round`](tensor.round.md) | Computes the round value of all elements in the input tensor. |
| [`tensor.reduce_l1`](tensor.reduce\_l1.md) | Computes the L1 norm of the input tensor's elements along the provided axes. |
| [`tensor.trilu`](tensor.trilu.md) | Returns the upper or lower triangular part of a tensor or a batch of 2D matrices. |
@@ -106,25 +107,19 @@ use orion::operators::tensor::TensorTrait;
| [`tensor.reduce_l2`](tensor.reduce\_l2.md) | Computes the L2 norm of the input tensor's elements along the provided axes. |
| [`tensor.gather_elements`](tensor.gather\_elements.md) | GatherElements is an indexing operation that produces its output by indexing into the input data tensor at index positions determined by elements of the indices tensor. |
| [`tensor.reduce_min`](tensor.reduce\_min.md) | Computes the min of the input tensor's elements along the provided axes. |
-| [`tensor.sequence_empty`](tensor.sequence\_empty.md) | Returns an empty tensor sequence. |
-| [`tensor.sequence_length`](tensor.sequence\_length.md) | Returns the length of the input sequence. |
-| [`tensor.sequence_insert`](tensor.sequence\_insert.md) | Insert a tensor into a sequence. |
-| [`tensor.sequence_at`](tensor.sequence\_at.md) | Outputs the tensor at the specified position in the input sequence. |
-| [`tensor.sequence_construct`](tensor.sequence\_construct.md) | Constructs a tensor sequence containing the input tensors. |
-| [`tensor.shrink`](tensor.shrink.md) | Shrinks the input tensor element-wise to the output tensor. |
| [`tensor.reduce_mean`](tensor.reduce\_mean.md) | Computes the mean of the input tensor's elements along the provided axes. |
| [`tensor.pow`](tensor.pow.md) | Pow takes input data (Tensor) and exponent Tensor, and produces one output data (Tensor) where the function f(x) = x^exponent, is applied to the data tensor elementwise. |
-| [`tensor.sequence_empty`](tensor.sequence\_empty.md) | Returns an empty tensor sequence. |
| [`tensor.binarizer`](tensor.binarizer.md) | Maps the values of a tensor element-wise to 0 or 1 based on the comparison against a threshold value. |
| [`tensor.array_feature_extractor`](tensor.array\_feature\_extractor.md) | Selects elements of the input tensor based on the indices passed applied to the last tensor axis. |
| [`tensor.reduce_min`](tensor.reduce\_min.md) | Computes the min of the input tensor's elements along the provided axes. |
-| [`tensor.concat_from_sequence`](tensor.concat\_from\_sequence.md) | Concatenate a sequence of tensors into a single tensor. |
| [`tensor.is_nan`](tensor.is\_nan.md) | Returns which elements of the input are NaN. |
| [`tensor.is_inf`](tensor.is\_inf.md) | Maps infinity to true and other values to false. |
| [`tensor.not`](tensor.not.md) | Computes the logical negation of all elements in the input tensor. |
| [`tensor.gather_nd`](tensor.gather\_nd.md) | Given data tensor of rank r >= 1, indices tensor of rank q >= 1, and batch_dims integer b, this operator gathers slices of data into an output tensor of rank q + r - indices_shape[-1] - 1 - b. |
| [`tensor.reduce_log_sum`](tensor.reduce\_log\_sum.md) | Computes the log sum of the input tensor's elements along the provided axes. |
| [`tensor.erf`](tensor.erf.md) | Computes the error function of the given input tensor element-wise. |
+| [`tensor.layer_normalization`](tensor.layer\_normalization.md) | computes the layer normalization of the input tensor. |
+| [`tensor.split`](tensor.split.md) | Split a tensor into a list of tensors, along the specified ‘axis’. |
## Arithmetic Operations
diff --git a/docs/framework/operators/tensor/tensor.compress.md b/docs/framework/operators/tensor/tensor.compress.md
new file mode 100644
index 000000000..59cb043b3
--- /dev/null
+++ b/docs/framework/operators/tensor/tensor.compress.md
@@ -0,0 +1,39 @@
+# tensor.compress
+
+```rust
+ fn compress(self: @Tensor, condition: Tensor, axis: Option) -> Tensor;
+```
+
+Selects slices from an input tensor along a given axis where condition evaluates to True for each axis index. In case axis is not provided, input is flattened before elements are selected.
+
+## Args
+
+* `self`(`@Tensor`) - The input tensor.
+* `condition`(`Tensor`) - Rank 1 tensor of booleans to indicate which slices or data elements to be selected. Its length can be less than the input length along the axis or the flattened input size if axis is not specified. In such cases data slices or elements exceeding the condition length are discarded.
+* `axis`(`Option`) - (Optional) Axis along which to take slices. If not specified, input is flattened before elements being selected. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input).
+
+## Panics
+
+* Panics if condition rank is not equal to 1.
+
+## Returns
+
+A new `Tensor` .
+fn compress_example() -> Tensor {
+ let tensor = TensorTrait::::new(
+ shape: array![3, 2].span(),
+ data: array![[1, 2], [3, 4], [5, 6]].span(),
+ );
+ let condition = TensorTrait::::new(
+ shape: array![3].span(),
+ data: array![0, 1, 1].span(),
+ );
+
+ return tensor.compress(
+ condition: condition,
+ axis: Option::Some((0)),
+ );
+}
+>>> [[3, 4],
+ [5, 6]]
+```
diff --git a/docs/framework/operators/tensor/tensor.erf.md b/docs/framework/operators/tensor/tensor.erf.md
index 19ce86a94..384a941d0 100644
--- a/docs/framework/operators/tensor/tensor.erf.md
+++ b/docs/framework/operators/tensor/tensor.erf.md
@@ -6,10 +6,6 @@
Computes the mean of the input tensor's elements along the provided axes.
-## Args
-
-* `self`(`@Tensor`) - The input tensor.
-
## Returns
A new `Tensor` of the same shape as the input tensor with
diff --git a/docs/framework/operators/tensor/tensor.gather_nd.md b/docs/framework/operators/tensor/tensor.gather_nd.md
index 021d4f235..a922b41ad 100644
--- a/docs/framework/operators/tensor/tensor.gather_nd.md
+++ b/docs/framework/operators/tensor/tensor.gather_nd.md
@@ -21,14 +21,6 @@ Given data tensor of rank r >= 1, indices tensor of rank q >= 1, and batch_dims
## Returns
A new `Tensor` .
-
-## Example
-
-```rust
-use array::{ArrayTrait, SpanTrait};
-
-use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
-
fn gather_nd_example() -> Tensor {
let tensor = TensorTrait::::new(
shape: array![2, 2].span(),
@@ -41,7 +33,7 @@ fn gather_nd_example() -> Tensor {
return tensor.gather_nd(
indices: indices,
- axis: Option::None((0)),
+ axis: Option::Some((0)),
);
}
>>> [[0, 1],
diff --git a/docs/framework/operators/tensor/tensor.layer_normalization.md b/docs/framework/operators/tensor/tensor.layer_normalization.md
new file mode 100644
index 000000000..e52476b1c
--- /dev/null
+++ b/docs/framework/operators/tensor/tensor.layer_normalization.md
@@ -0,0 +1,89 @@
+# tensor.layer_normalization
+
+```rust
+ fn layer_normalization(
+ self: @Tensor,
+ scale: @Tensor,
+ B: Option<@Tensor>,
+ axis: Option,
+ epsilon: Option,
+ stash_type: Option,
+) -> (Tensor, Tensor, Tensor);
+```
+
+Layer normalization of the input, in two stages.
+The first stage is standardization, which makes the normalized elements have zero mean and unit variances.
+The second stage then scales and shifts the outcome of the first stage
+## Args
+
+* `self`(`@Tensor`) - The input tensor.
+* `scale`(`@Tensor,`) - Scale tensor.
+* `B`(`Option<@Tensor>`) - Bias tensor.
+* `axis`(`Option`) (default is -1) - The first normalization dimension. If rank(X) is r, axis' allowed range is [-r, r). Negative value means counting dimensions from the back.
+* `epsilon`(`Option`) (default is 0) - The epsilon value to use to avoid division by zero.
+* `stash_type`(`Option`) - Precise the computation precision - unused the precision is defined by the type of the tensor.
+## Panics
+
+* Panics if condition rank is not equal to 1.
+
+## Returns
+
+A new normalized tensor`Tensor`.
+A tensor containing the mean `Tensor`.
+A tensor containing the inverse standard deviation `Tensor`.
+
+## Example
+
+```rust
+use orion::operators::tensor::{TensorTrait, Tensor};
+use orion::operators::tensor::FP16x16TensorPartialEq;
+use core::array::{ArrayTrait, SpanTrait};
+use orion::operators::tensor::FP16x16Tensor;
+use orion::numbers::{FixedTrait, FP16x16};
+
+fn layer_normalization_example() -> (Tensor, Tensor, Tensor) {
+ let mut shape = ArrayTrait::::new();
+ shape.append(3);
+ shape.append(4);
+
+ let mut data = ArrayTrait::new();
+ data.append(FP16x16 { mag: 41143, sign: true });
+ data.append(FP16x16 { mag: 51803, sign: false });
+ data.append(FP16x16 { mag: 113556, sign: false });
+ data.append(FP16x16 { mag: 64774, sign: false });
+ data.append(FP16x16 { mag: 866, sign: false });
+ data.append(FP16x16 { mag: 698, sign: true });
+ data.append(FP16x16 { mag: 106500, sign: false });
+ data.append(FP16x16 { mag: 98929, sign: false });
+ data.append(FP16x16 { mag: 7551, sign: false });
+ data.append(FP16x16 { mag: 30689, sign: true });
+ data.append(FP16x16 { mag: 38325, sign: false });
+ data.append(FP16x16 { mag: 48164, sign: false });
+ let X = TensorTrait::new(shape.span(), data.span());
+
+ let shape = ArrayTrait::::new();
+ shape.append(4);
+ let mut data = ArrayTrait::new();
+ data.append(FP16x16 { mag: 49855, sign: false });
+ data.append(FP16x16 { mag: 150787, sign: false });
+ data.append(FP16x16 { mag: 83498, sign: true });
+ data.append(FP16x16 { mag: 30346, sign: false });
+ let scale = TensorTrait::new(shape.span(), data.span());
+
+
+ let mut shape = ArrayTrait::::new();
+ shape.append(4);
+ let mut data = ArrayTrait::new();
+ data.append(FP16x16 { mag: 54864, sign: true });
+ data.append(FP16x16 { mag: 50952, sign: false });
+ data.append(FP16x16 { mag: 8870, sign: true });
+ data.append(FP16x16 { mag: 23216, sign: true });
+ let bias = TensorTrait::new(shape.span(), data.span());
+
+ return X.layer_normalization(@scale,Option::Some(@bias),Option::None,Option::None,Option::None);
+}
+>>> [[-0.48926553 1.0185822 -0.02138367 -0.39223218]
+ [-0.7945549 0.99696046 0.04332176 -0.412645 ]
+ [-0.5664707 0.7491956 -0.7896356 -0.5320859 ]]
+
+```
diff --git a/docs/framework/operators/tensor/tensor.resize.md b/docs/framework/operators/tensor/tensor.resize.md
new file mode 100644
index 000000000..35164970d
--- /dev/null
+++ b/docs/framework/operators/tensor/tensor.resize.md
@@ -0,0 +1,229 @@
+#tensor.resize
+
+```rust
+ fn resize(
+ self: @Tensor,
+ roi: Option>,
+ scales: Option>,
+ sizes: Option>,
+ antialias: Option,
+ axes: Option>,
+ coordinate_transformation_mode: Option,
+ cubic_coeff_a: Option,
+ exclude_outside: Option,
+ extrapolation_value: Option,
+ keep_aspect_ratio_policy: Option,
+ mode: Option,
+ nearest_mode: Option,
+ ) -> Tensor;
+```
+
+Resizes the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood in the input tensor.
+
+## Args
+
+* `self`(`@Tensor`) - The input tensor.
+* `roi` (`Option>`) (optional) - 1-D tensor given as [start1, ..., startN, end1, ..., endN], where N is the rank of X or the length of axes, if provided. It only takes effect when coordinate_transformation_mode is "tf_crop_and_resize"
+* `scales` (`Option>`) (optional) - The scale array along each dimension. It takes value greater than 0. If it's less than 1, it's sampling down, otherwise, it's upsampling. The number of elements of 'scales' should be the same as the rank of input 'X' or the length of 'axes', if provided. One and only one of 'scales' and 'sizes' MUST be specified.
+* `sizes` (`Option>`) (optional) - Target size of the output tensor. Its interpretation depends on the 'keep_aspect_ratio_policy' value. The number of elements of 'sizes' should be the same as the rank of input 'X', or the length of 'axes', if provided. One and only one of 'scales' and 'sizes' MUST be specified.
+* `antialias` (`Option`) (default is 0) - If set to 1, "linear" and "cubic" interpolation modes will use an antialiasing filter when downscaling. Antialiasing is achieved by stretching the resampling filter by a factor max(1, 1 / scale).
+* `axes`(`Option>`) - If provided, it specifies a subset of axes that 'roi', 'scales' and 'sizes' refer to. If not provided, all axes are assumed [0, 1, ..., r-1], where r = rank(data).
+* `coordinate_transformation_mode` (`Option`) (default is half_pixel) - This attribute describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor.
+* `cubic_coeff_a` (`Option`) (default is -0.75) - The coefficient 'a' used in cubic interpolation.
+* `exclude_outside` (`Option`) (default is false) - If set to true, the weight of sampling locations outside the tensor will be set to 0 and the weight will be renormalized so that their sum is 1.0.
+* `extrapolation_value` (`Option`) (default is 0.0) - When coordinate_transformation_mode is "tf_crop_and_resize" and x_original is outside the range [0, length_original - 1], this value is used as the corresponding output value.
+* `keep_aspect_ratio_policy` (`Option`) (default is stretch) - This attribute describes how to interpret the `sizes` input with regard to keeping the original aspect ratio of the input, and it is not applicable when the `scales` input is used.
+* `mode` (`Option`) (default is nearest) - Three interpolation modes: "nearest", "linear" and "cubic".
+* `nearest_mode` (`Option`) (default is round_prefer_floor) - Four modes: "round_prefer_floor" (as known as round half down), "round_prefer_ceil" (as known as round half up), "floor", "ceil". Only used by nearest interpolation.
+
+## Panics
+
+* Panics if both scales and sizes are `Option::None`.
+* Panics if roi is `Option::None` for the coordinate_transformation_mode `tf_crop_and_resize`.
+* Panics if antialias is not `Option::None` for mode `nearest`.
+
+## Returns
+
+A new resized `Tensor` of the dimension given by output_dimension = floor(input_dimension * (roi_end - roi_start) * scale) is scale is specified, or output_size if size is specified (note that some value of the parameter `keep_aspect_ratio_policy` can change sizes and therefore the dimension of the output tensor)
+
+## Example
+
+```rust
+use core::array::{ArrayTrait, SpanTrait};
+use orion::operators::tensor::{TensorTrait, Tensor, FP16x16Tensor, FP16x16TensorPartialEq};
+use orion::operators::tensor::math::resize::{
+ MODE, NEAREST_MODE, KEEP_ASPECT_RATIO_POLICY, TRANSFORMATION_MODE
+};
+use orion::numbers::{FP16x16, FP16x16Impl, FixedTrait};
+use core::debug::PrintTrait;
+
+fn example_resize_downsample_scales_linear() -> Tensor{
+ let mut data = TensorTrait::<
+ FP16x16
+ >::new(
+ shape: array![1, 1, 2, 4].span(),
+ data: array![
+ FixedTrait::::new(65536, false), //1
+ FixedTrait::::new(131072, false), //2
+ FixedTrait::::new(196608, false), //3
+ FixedTrait::::new(262144, false), //4
+ FixedTrait::::new(327680, false), //5
+ FixedTrait::::new(393216, false), //6
+ FixedTrait::::new(458752, false), //7
+ FixedTrait::::new(524288, false), //8
+ ]
+ .span(),
+ );
+ let mut scales = array![
+ FixedTrait::::new(65536, false), //1
+ FixedTrait::::new(65536, false),
+ FixedTrait::::new(39322, false), //0.6
+ FixedTrait::::new(39322, false)
+ ]
+ .span();
+
+ let scales = Option::Some(scales);
+
+ return data.resize(
+ Option::None,
+ scales,
+ Option::None,
+ Option::None,
+ Option::None,
+ Option::None,
+ Option::None,
+ Option::None,
+ Option::None,
+ Option::None,
+ Option::Some(MODE::LINEAR),
+ Option::None,
+ );
+
+}
+>>> [[[[2.6666665 4.3333331]]]]
+
+
+
+fn example_resize_tf_crop_and_resize_extrapolation_value() -> Tensor {
+ let mut data = TensorTrait::<
+ FP16x16
+ >::new(
+ shape: array![1, 1, 4, 4].span(),
+ data: array![
+ FixedTrait::::new(65536, false),
+ FixedTrait::::new(131072, false),
+ FixedTrait::::new(196608, false),
+ FixedTrait::::new(262144, false),
+ FixedTrait::::new(327680, false),
+ FixedTrait::::new(393216, false),
+ FixedTrait::::new(458752, false),
+ FixedTrait::::new(524288, false),
+ FixedTrait::::new(589824, false),
+ FixedTrait::::new(655360, false),
+ FixedTrait::::new(720896, false),
+ FixedTrait::::new(786432, false),
+ FixedTrait::::new(851968, false),
+ FixedTrait::::new(917504, false),
+ FixedTrait::::new(983040, false),
+ FixedTrait::::new(1048576, false),
+ ]
+ .span(),
+ );
+
+ let mut roi = TensorTrait::<
+ FP16x16
+ >::new(
+ shape: array![8].span(),
+ data: array![
+ FixedTrait::::new(0, false),
+ FixedTrait::::new(0, false),
+ FixedTrait::::new(26214, false),
+ FixedTrait::::new(39322, false),
+ FixedTrait::::new(65536, false),
+ FixedTrait::::new(65536, false),
+ FixedTrait::::new(78643, false),
+ FixedTrait::::new(111411, false),
+ ]
+ .span(),
+ );
+ let roi = Option::Some(roi);
+
+ let mut sizes = array![1, 1, 3, 3].span();
+ let sizes = Option::Some(sizes);
+
+ let extrapolation_value = Option::Some(FixedTrait::::new(655360, false));
+
+ return data.resize(
+ roi,
+ Option::None,
+ sizes,
+ Option::None,
+ Option::None,
+ Option::Some(TRANSFORMATION_MODE::TF_CROP_AND_RESIZE),
+ Option::None,
+ Option::None,
+ extrapolation_value,
+ Option::None,
+ Option::Some(MODE::LINEAR),
+ Option::None,
+ );
+
+}
+>>> [[[[ 7.6000004 10. 10. ]
+ [12.400001 10. 10. ]
+ [10. 10. 10. ]]]]
+
+
+
+fn example_resize_downsample_sizes_cubic_antialias() -> Tensor {
+ let mut data = TensorTrait::<
+ FP16x16
+ >::new(
+ shape: array![1, 1, 4, 4].span(),
+ data: array![
+ FixedTrait::::new(65536, false),
+ FixedTrait::::new(131072, false),
+ FixedTrait::::new(196608, false),
+ FixedTrait::::new(262144, false),
+ FixedTrait::::new(327680, false),
+ FixedTrait::::new(393216, false),
+ FixedTrait::::new(458752, false),
+ FixedTrait::::new(524288, false),
+ FixedTrait::::new(589824, false),
+ FixedTrait::::new(655360, false),
+ FixedTrait::::new(720896, false),
+ FixedTrait::::new(786432, false),
+ FixedTrait::::new(851968, false),
+ FixedTrait::::new(917504, false),
+ FixedTrait::::new(983040, false),
+ FixedTrait::::new(1048576, false),
+ ]
+ .span(),
+ );
+
+ let antialias = Option::Some(1);
+
+ let mut sizes = array![1, 1, 3, 3].span();
+ let sizes = Option::Some(sizes);
+
+ return data.resize(
+ Option::None,
+ Option::None,
+ sizes,
+ antialias,
+ Option::None,
+ Option::None,
+ Option::None,
+ Option::None,
+ Option::None,
+ Option::None,
+ Option::Some(MODE::CUBIC),
+ Option::None,
+ );
+}
+
+>>> [[[[ 1.7750092 3.1200073 4.4650054]
+ [ 7.1550016 8.5 9.844998 ]
+ [12.534994 13.8799925 15.224991 ]]]]
+
+```
diff --git a/docs/framework/operators/tensor/tensor.split.md b/docs/framework/operators/tensor/tensor.split.md
new file mode 100644
index 000000000..26b4a546f
--- /dev/null
+++ b/docs/framework/operators/tensor/tensor.split.md
@@ -0,0 +1,47 @@
+# tensor.split
+
+```rust
+ fn split(self: @Tensor, axis: usize, num_outputs: Option, split: Option>
+ ) -> Array>;
+```
+
+Split a tensor into a list of tensors, along the specified ‘axis’
+
+
+* `self`(`@Tensor`) - The input tensor.
+* `axis`(`usize`) - The axis along which to split on.
+* `num_outputs `(Option) - Number of outputs to split parts of the tensor into.
+* `split `(Option>) - Optional length of each output.
+
+## Panics
+
+* Panics if the 'axis' accepted range is not [-rank, rank-1] where r = rank(input).
+* Panics if the 'split' values not >= 0. Sum of the values is not equal to the dim value at ‘axis’ specified.
+* Panics if the input 'split' or the attribute 'num_outputs' both are specified or not.
+
+## Returns
+
+One or more outputs forming list of tensors after splitting.
+
+## Examples
+
+```rust
+use core::array::{ArrayTrait, SpanTrait};
+use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
+use core::option::OptionTrait;
+fn split_tensor_example() -> Array> {
+ let tensor: Tensor = TensorTrait::::new(
+ shape: array![2,4].span(),
+ data: array![
+ 0, 1, 2, 3, 4, 5, 6, 7
+ ].span(),
+ );
+ let num_outputs = Option::Some(2);
+ // split = Option::Some(array![1, 1].span());
+ let split_num: Option> = Option::None(());
+ // We can call `split` function as follows.
+ return tensor.split(0, num_outputs, split_num);
+}
+>>> [[0,1],[4,5]]
+ [[2,3],[6,7]]
+```
diff --git a/nodegen/helpers.py b/nodegen/helpers.py
index cf876ccc0..03fae966c 100644
--- a/nodegen/helpers.py
+++ b/nodegen/helpers.py
@@ -10,6 +10,8 @@
class FixedImpl(Enum):
FP8x23 = 'FP8x23'
FP16x16 = 'FP16x16'
+ FP64x64 = 'FP64x64'
+
def to_fp(x: np.ndarray, fp_impl: FixedImpl):
@@ -18,15 +20,19 @@ def to_fp(x: np.ndarray, fp_impl: FixedImpl):
return (x * 2**23).astype(np.int64)
case FixedImpl.FP16x16:
return (x * 2**16).astype(np.int64)
+ case FixedImpl.FP64x64:
+ return (x * 2**64)
class Dtype(Enum):
FP8x23 = 'FP8x23'
FP16x16 = 'FP16x16'
+ FP64x64 = 'FP64x64'
I8 = 'i8'
I32 = 'i32'
U32 = 'u32'
BOOL = 'bool'
+ COMPLEX64 = 'complex64'
class Tensor:
@@ -42,6 +48,7 @@ def __init__(self, dtype: Dtype, shape: tuple, data: np.ndarray):
class Trait(Enum):
TENSOR = 'TENSOR'
NN = 'NN'
+ SEQUENCE = 'SEQUENCE'
def make_test(inputs: list[Tensor | Sequence], output: Tensor | Sequence, func_sig: str, name: str, trait: Trait = Trait.TENSOR):
@@ -166,8 +173,15 @@ def get_data_statement(data: np.ndarray, dtype: Dtype) -> list[str]:
return ["FP8x23 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"}" for x in data.flatten()]
case Dtype.FP16x16:
return ["FP16x16 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"}" for x in data.flatten()]
+ case Dtype.FP64x64:
+ return ["FP64x64 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"}" for x in data.flatten()]
case Dtype.BOOL:
return [str(x).lower() for x in data.flatten()]
+ case Dtype.COMPLEX64:
+ return ["complex64 { "+"real: FP64x64 { "+f"mag: {abs(int(np.real(x)))}, sign: {str(np.real(x) < 0).lower()} "+"} , img: FP64x64 { "+f"mag: {abs(int(np.imag(x)))}, sign: {str(np.imag(x) < 0).lower()} "+"} }" for x in data.flatten()]
+
+
+
def get_data_statement_for_sequences(data: Sequence, dtype: Dtype) -> list[list[str]]:
@@ -186,7 +200,13 @@ def get_test_refs(dtype: Dtype, trait: Trait) -> list[str]:
if trait == Trait.NN and dtype == Dtype.BOOL:
raise Exception("NN trait does not support bool dtype")
- dtype_ref = dtype_to_nn[dtype] if trait == Trait.NN else dtype_to_tensor[dtype]
+ if trait == Trait.NN:
+ dtype_ref = dtype_to_nn[dtype]
+ elif trait == Trait.SEQUENCE:
+ dtype_ref = dtype_to_sequence[dtype]
+ else:
+ dtype_ref = dtype_to_tensor[dtype]
+
refs = [
*trait_to_ref[trait],
*dtype_ref,
@@ -217,6 +237,10 @@ def find_all_types(tensors: list[Tensor | Sequence]) -> list[Dtype]:
"orion::numbers::FixedTrait",
"orion::operators::nn::NNTrait",
],
+ Trait.SEQUENCE: [
+ "array::{ArrayTrait, SpanTrait}",
+ "orion::operators::sequence::SequenceTrait",
+ ],
}
@@ -227,6 +251,7 @@ def find_all_types(tensors: list[Tensor | Sequence]) -> list[Dtype]:
Dtype.FP8x23: ["orion::operators::tensor::FP8x23Tensor",],
Dtype.FP16x16: ["orion::operators::tensor::FP16x16Tensor",],
Dtype.BOOL: ["orion::operators::tensor::BoolTensor",],
+ Dtype.COMPLEX64: ["orion::operators::tensor::Complex64Tensor",],
}
@@ -239,6 +264,15 @@ def find_all_types(tensors: list[Tensor | Sequence]) -> list[Dtype]:
}
+dtype_to_sequence = {
+ Dtype.U32: ["orion::operators::sequence::U32Sequence",],
+ Dtype.I32: ["orion::operators::sequence::I32Sequence",],
+ Dtype.I8: ["orion::operators::sequence::I8Sequence",],
+ Dtype.FP8x23: ["orion::operators::sequence::FP8x23Sequence",],
+ Dtype.FP16x16: ["orion::operators::sequence::FP16x16Sequence",],
+}
+
+
dtype_to_partial_eq = {
Dtype.U32: ["orion::operators::tensor::U32TensorPartialEq",],
Dtype.I32: ["orion::operators::tensor::I32TensorPartialEq",],
@@ -246,6 +280,7 @@ def find_all_types(tensors: list[Tensor | Sequence]) -> list[Dtype]:
Dtype.FP8x23: ["orion::operators::tensor::FP8x23TensorPartialEq",],
Dtype.FP16x16: ["orion::operators::tensor::FP16x16TensorPartialEq",],
Dtype.BOOL: ["orion::operators::tensor::BoolTensorPartialEq",],
+ Dtype.COMPLEX64: ["orion::operators::tensor::Complex64TensorPartialEq",],
}
@@ -256,4 +291,5 @@ def find_all_types(tensors: list[Tensor | Sequence]) -> list[Dtype]:
Dtype.FP8x23: ["orion::numbers::{FixedTrait, FP8x23}",],
Dtype.FP16x16: ["orion::numbers::{FixedTrait, FP16x16}",],
Dtype.BOOL: [],
+ Dtype.COMPLEX64: ["orion::numbers::{NumberTrait, complex64}",],
}
\ No newline at end of file
diff --git a/nodegen/node/compress.py b/nodegen/node/compress.py
new file mode 100644
index 000000000..cca518f54
--- /dev/null
+++ b/nodegen/node/compress.py
@@ -0,0 +1,326 @@
+import numpy as np
+from nodegen.node import RunAll
+from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
+
+class Compress(RunAll):
+
+ @staticmethod
+ def compress_fp16x16():
+
+ def compress_3D():
+ def default():
+ x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64)
+ x2 = np.array([0, 1, 1]).astype(np.uint32)
+ y = x1.compress(x2, axis=0)
+
+ x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
+ x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(
+ y.flatten(), FixedImpl.FP16x16))
+
+ name = "compress_fp16x16_3d_default"
+ make_test(
+ inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(0))",
+ name= name)
+
+ def axis1():
+ x1 = np.arange(0,180).reshape(3,4,3,5).astype(np.int64)
+ x2 = np.array([1, 1, 1, 0]).astype(np.int64)
+ y = x1.compress(x2, axis=1)
+
+ x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
+ x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(
+ y.flatten(), FixedImpl.FP16x16))
+
+ name = "compress_fp16x16_3d_axis1"
+ make_test(
+ inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(1))",
+ name= name)
+
+ def axis2():
+ x1 = np.arange(0,48).reshape(4,3,4).astype(np.int64)
+ x2 = np.array([1, 0, 1, 1]).astype(np.int64)
+ y = x1.compress(x2, axis=2)
+
+ x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
+ x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(
+ y.flatten(), FixedImpl.FP16x16))
+
+ name = "compress_fp16x16_3d_axis2"
+ make_test(
+ inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(2))",
+ name= name)
+
+ def axis3():
+ x1 = np.arange(0,96).reshape(4,3,4, 2).astype(np.int64)
+ x2 = np.array([1, 0]).astype(np.int64)
+ y = x1.compress(x2, axis=3)
+
+ x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
+ x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(
+ y.flatten(), FixedImpl.FP16x16))
+
+ name = "compress_fp16x16_3d_axis3"
+ make_test(
+ inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(3))",
+ name= name)
+
+ def noaxis():
+ x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64)
+ x2 = np.array([1, 0, 1, 0, 1, 1, 1, 1, 1]).astype(np.int64)
+ y = x1.compress(x2)
+
+ x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
+ x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(
+ y.flatten(), FixedImpl.FP16x16))
+
+ name = "compress_fp16x16_3d_noaxis"
+ make_test(
+ inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::None(()))",
+ name= name)
+
+ default()
+ axis1()
+ axis2()
+ axis3()
+ noaxis()
+ compress_3D()
+
+ @staticmethod
+ def compress_fp8x23():
+
+ def compress_3D():
+ def default():
+ x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64)
+ x2 = np.array([0, 1, 1]).astype(np.uint32)
+ y = x1.compress(x2, axis=0)
+
+ x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23))
+ x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
+ y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23))
+
+ name = "compress_fp8x23_3d_default"
+ make_test(
+ inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(0))",
+ name= name)
+
+ def axis1():
+ x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64)
+ x2 = np.array([0, 1, 1]).astype(np.uint32)
+ y = x1.compress(x2, axis=1)
+
+ x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23))
+ x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
+ y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23))
+
+ name = "compress_fp8x23_3d_axis1"
+ make_test(
+ inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(1))",
+ name= name)
+
+ def axis2():
+ x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64)
+ x2 = np.array([0, 1, 1]).astype(np.uint32)
+ y = x1.compress(x2, axis=2)
+
+ x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23))
+ x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
+ y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23))
+
+ name = "compress_fp8x23_3d_axis2"
+ make_test(
+ inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(2))",
+ name= name)
+
+ default()
+ axis1()
+ axis2()
+ compress_3D()
+
+ @staticmethod
+ def compress_i8():
+
+ def compress_3D():
+ def default():
+ x1 = np.arange(0,27).reshape(3,3,3).astype(np.int8)
+ x2 = np.array([0, 1, 1]).astype(np.uint8)
+ y = x1.compress(x2, axis=0)
+
+ x1 = Tensor(Dtype.I8, x1.shape, x1.flatten())
+ x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
+ y = Tensor(Dtype.I8, y.shape, y.flatten())
+
+ name = "compress_i8_3d_default"
+ make_test(
+ inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(0))",
+ name= name)
+
+ def axis1():
+ x1 = np.arange(0,27).reshape(3,3,3).astype(np.int8)
+ x2 = np.array([0, 1, 1]).astype(np.uint8)
+ y = x1.compress(x2, axis=1)
+
+ x1 = Tensor(Dtype.I8, x1.shape, x1.flatten())
+ x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
+ y = Tensor(Dtype.I8, y.shape, y.flatten())
+
+ name = "compress_i8_3d_axis1"
+ make_test(
+ inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(1))",
+ name= name)
+
+ def axis2():
+ x1 = np.arange(0,27).reshape(3,3,3).astype(np.int8)
+ x2 = np.array([0, 1, 1]).astype(np.uint8)
+ y = x1.compress(x2, axis=2)
+
+ x1 = Tensor(Dtype.I8, x1.shape, x1.flatten())
+ x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
+ y = Tensor(Dtype.I8, y.shape, y.flatten())
+
+ name = "compress_i8_3d_axis2"
+ make_test(
+ inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(2))",
+ name= name)
+
+ default()
+ axis1()
+ axis2()
+ compress_3D()
+
+
+ @staticmethod
+ def compress_i32():
+
+ def compress_3D():
+ def default():
+ x1 = np.arange(0,27).reshape(3,3,3).astype(np.int32)
+ x2 = np.array([0, 1, 1]).astype(np.int32)
+ y = x1.compress(x2, axis=0)
+
+ x1 = Tensor(Dtype.I32, x1.shape, x1.flatten())
+ x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
+ y = Tensor(Dtype.I32, y.shape, y.flatten())
+
+ name = "compress_i32_3d_default"
+ make_test(
+ inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(0))",
+ name= name)
+
+ def axis1():
+ x1 = np.arange(0,27).reshape(3,3,3).astype(np.int32)
+ x2 = np.array([0, 1, 1]).astype(np.int32)
+ y = x1.compress(x2, axis=1)
+
+ x1 = Tensor(Dtype.I32, x1.shape, x1.flatten())
+ x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
+ y = Tensor(Dtype.I32, y.shape, y.flatten())
+
+ name = "compress_i32_3d_axis1"
+ make_test(
+ inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(1))",
+ name= name)
+
+ def axis2():
+ x1 = np.arange(0,27).reshape(3,3,3).astype(np.int32)
+ x2 = np.array([0, 1, 1]).astype(np.int32)
+ y = x1.compress(x2, axis=2)
+
+ x1 = Tensor(Dtype.I32, x1.shape, x1.flatten())
+ x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
+ y = Tensor(Dtype.I32, y.shape, y.flatten())
+
+ name = "compress_i32_3d_axis2"
+ make_test(
+ inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(2))",
+ name= name)
+
+ default()
+ axis1()
+ axis2()
+ compress_3D()
+
+ @staticmethod
+ def compress_u32():
+
+ def compress_3D():
+ def default():
+ x1 = np.arange(0,48).reshape(4,4,3).astype(np.uint32)
+ x2 = np.array([1, 1]).astype(np.uint32)
+ y = x1.compress(x2, axis=0)
+
+ x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
+ x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
+ y = Tensor(Dtype.U32, y.shape, y.flatten())
+
+ name = "compress_u32_3d_default"
+ make_test(
+ inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(0))",
+ name= name)
+
+ def axis1():
+ x1 = np.arange(0,36).reshape(3,4,3).astype(np.uint32)
+ x2 = np.array([0, 1, 1]).astype(np.uint32)
+ y = x1.compress(x2, axis=1)
+
+ x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
+ x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
+ y = Tensor(Dtype.U32, y.shape, y.flatten())
+
+ name = "compress_u32_3d_axis1"
+ make_test(
+ inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(1))",
+ name= name)
+
+ def axis2():
+ x1 = np.arange(0,48).reshape(3,4,4).astype(np.uint32)
+ x2 = np.array([0, 1, 1]).astype(np.uint32)
+ y = x1.compress(x2, axis=2)
+
+ x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
+ x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
+ y = Tensor(Dtype.U32, y.shape, y.flatten())
+
+ name = "compress_u32_3d_axis2"
+ make_test(
+ inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(2))",
+ name= name)
+
+ def axis2_2():
+ x1 = np.arange(0,60).reshape(3,4,5).astype(np.uint32)
+ x2 = np.array([0, 1, 1]).astype(np.uint32)
+ y = x1.compress(x2, axis=2)
+
+ x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
+ x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
+ y = Tensor(Dtype.U32, y.shape, y.flatten())
+
+ name = "compress_u32_3d_axis2_2"
+ make_test(
+ inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(2))",
+ name= name)
+
+ def axis3():
+ x1 = np.arange(0,270).reshape(3,3,5,6).astype(np.uint32)
+ x2 = np.array([0, 1, 1,1,0,1]).astype(np.uint32)
+ y = x1.compress(x2, axis=3)
+
+ x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
+ x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
+ y = Tensor(Dtype.U32, y.shape, y.flatten())
+
+ name = "compress_u32_3d_axis3"
+ make_test(
+ inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(3))",
+ name= name)
+
+ default()
+ axis1()
+ axis2()
+ axis2_2()
+ axis3()
+ compress_3D()
diff --git a/nodegen/node/concat_from_sequence.py b/nodegen/node/concat_from_sequence.py
index 4918785ea..eb6d6c9e1 100644
--- a/nodegen/node/concat_from_sequence.py
+++ b/nodegen/node/concat_from_sequence.py
@@ -1,6 +1,6 @@
import numpy as np
from nodegen.node import RunAll
-from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
+from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
class Concat_from_sequence(RunAll):
@@ -25,7 +25,7 @@ def new_axis_zero():
concatenated_tensor = Tensor(Dtype.U32, concatenated_tensor.shape, concatenated_tensor.flatten())
name = "concat_from_sequence_u32_new_axis_zero"
- make_test([sequence], concatenated_tensor, "TensorTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::Some(0))", name)
+ make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::Some(0))", name, Trait.SEQUENCE)
def new_axis_one():
sequence = []
@@ -45,7 +45,7 @@ def new_axis_one():
concatenated_tensor = Tensor(Dtype.U32, concatenated_tensor.shape, concatenated_tensor.flatten())
name = "concat_from_sequence_u32_new_axis_one"
- make_test([sequence], concatenated_tensor, "TensorTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::Some(1))", name)
+ make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::Some(1))", name, Trait.SEQUENCE)
def new_axis_default():
sequence = []
@@ -65,7 +65,7 @@ def new_axis_default():
concatenated_tensor = Tensor(Dtype.U32, concatenated_tensor.shape, concatenated_tensor.flatten())
name = "concat_from_sequence_u32_new_axis_default"
- make_test([sequence], concatenated_tensor, "TensorTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::None(()))", name)
+ make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::None(()))", name, Trait.SEQUENCE)
new_axis_zero()
new_axis_one()
@@ -92,7 +92,7 @@ def new_axis_zero():
concatenated_tensor = Tensor(Dtype.I32, concatenated_tensor.shape, concatenated_tensor.flatten())
name = "concat_from_sequence_i32_new_axis_zero"
- make_test([sequence], concatenated_tensor, "TensorTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::Some(0))", name)
+ make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::Some(0))", name, Trait.SEQUENCE)
def new_axis_one():
sequence = []
@@ -112,7 +112,7 @@ def new_axis_one():
concatenated_tensor = Tensor(Dtype.I32, concatenated_tensor.shape, concatenated_tensor.flatten())
name = "concat_from_sequence_i32_new_axis_one"
- make_test([sequence], concatenated_tensor, "TensorTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::Some(1))", name)
+ make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::Some(1))", name, Trait.SEQUENCE)
def new_axis_default():
sequence = []
@@ -132,7 +132,7 @@ def new_axis_default():
concatenated_tensor = Tensor(Dtype.I32, concatenated_tensor.shape, concatenated_tensor.flatten())
name = "concat_from_sequence_i32_new_axis_default"
- make_test([sequence], concatenated_tensor, "TensorTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::None(()))", name)
+ make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::None(()))", name, Trait.SEQUENCE)
new_axis_zero()
new_axis_one()
@@ -159,7 +159,7 @@ def new_axis_zero():
concatenated_tensor = Tensor(Dtype.I8, concatenated_tensor.shape, concatenated_tensor.flatten())
name = "concat_from_sequence_i8_new_axis_zero"
- make_test([sequence], concatenated_tensor, "TensorTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::Some(0))", name)
+ make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::Some(0))", name, Trait.SEQUENCE)
def new_axis_one():
sequence = []
@@ -179,7 +179,7 @@ def new_axis_one():
concatenated_tensor = Tensor(Dtype.I8, concatenated_tensor.shape, concatenated_tensor.flatten())
name = "concat_from_sequence_i8_new_axis_one"
- make_test([sequence], concatenated_tensor, "TensorTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::Some(1))", name)
+ make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::Some(1))", name, Trait.SEQUENCE)
def new_axis_default():
sequence = []
@@ -199,7 +199,7 @@ def new_axis_default():
concatenated_tensor = Tensor(Dtype.I8, concatenated_tensor.shape, concatenated_tensor.flatten())
name = "concat_from_sequence_i8_new_axis_default"
- make_test([sequence], concatenated_tensor, "TensorTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::None(()))", name)
+ make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::None(()))", name, Trait.SEQUENCE)
new_axis_zero()
new_axis_one()
@@ -226,7 +226,7 @@ def new_axis_zero():
concatenated_tensor = Tensor(Dtype.FP8x23, concatenated_tensor.shape, to_fp(concatenated_tensor.flatten(), FixedImpl.FP8x23))
name = "concat_from_sequence_fp8x23_new_axis_zero"
- make_test([sequence], concatenated_tensor, "TensorTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::Some(0))", name)
+ make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::Some(0))", name, Trait.SEQUENCE)
def new_axis_one():
sequence = []
@@ -246,7 +246,7 @@ def new_axis_one():
concatenated_tensor = Tensor(Dtype.FP8x23, concatenated_tensor.shape, to_fp(concatenated_tensor.flatten(), FixedImpl.FP8x23))
name = "concat_from_sequence_fp8x23_new_axis_one"
- make_test([sequence], concatenated_tensor, "TensorTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::Some(1))", name)
+ make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::Some(1))", name, Trait.SEQUENCE)
def new_axis_default():
sequence = []
@@ -266,7 +266,7 @@ def new_axis_default():
concatenated_tensor = Tensor(Dtype.FP8x23, concatenated_tensor.shape, to_fp(concatenated_tensor.flatten(), FixedImpl.FP8x23))
name = "concat_from_sequence_fp8x23_new_axis_default"
- make_test([sequence], concatenated_tensor, "TensorTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::None(()))", name)
+ make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::None(()))", name, Trait.SEQUENCE)
new_axis_zero()
new_axis_one()
@@ -293,7 +293,7 @@ def new_axis_zero():
concatenated_tensor = Tensor(Dtype.FP16x16, concatenated_tensor.shape, to_fp(concatenated_tensor.flatten(), FixedImpl.FP16x16))
name = "concat_from_sequence_fp16x16_new_axis_zero"
- make_test([sequence], concatenated_tensor, "TensorTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::Some(0))", name)
+ make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::Some(0))", name, Trait.SEQUENCE)
def new_axis_one():
sequence = []
@@ -313,7 +313,7 @@ def new_axis_one():
concatenated_tensor = Tensor(Dtype.FP16x16, concatenated_tensor.shape, to_fp(concatenated_tensor.flatten(), FixedImpl.FP16x16))
name = "concat_from_sequence_fp16x16_new_axis_one"
- make_test([sequence], concatenated_tensor, "TensorTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::Some(1))", name)
+ make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::Some(1))", name, Trait.SEQUENCE)
def new_axis_default():
sequence = []
@@ -333,7 +333,7 @@ def new_axis_default():
concatenated_tensor = Tensor(Dtype.FP16x16, concatenated_tensor.shape, to_fp(concatenated_tensor.flatten(), FixedImpl.FP16x16))
name = "concat_from_sequence_fp16x16_new_axis_default"
- make_test([sequence], concatenated_tensor, "TensorTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::None(()))", name)
+ make_test([sequence], concatenated_tensor, "SequenceTrait::concat_from_sequence(input_0, IntegerTrait::::new(1, false), Option::None(()))", name, Trait.SEQUENCE)
new_axis_zero()
new_axis_one()
diff --git a/nodegen/node/layer_normalization.py b/nodegen/node/layer_normalization.py
new file mode 100644
index 000000000..54f6e63fb
--- /dev/null
+++ b/nodegen/node/layer_normalization.py
@@ -0,0 +1,152 @@
+import numpy as np
+from nodegen.node import RunAll
+from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
+import numpy as np
+
+import onnx
+from onnx.backend.test.case.base import Base
+from onnx.backend.test.case.node import expect
+
+
+def _layer_normalization(X, W, B, axis=-1, epsilon=1e-5):
+ X_shape = X.shape
+ X_rank = len(X_shape)
+ if axis < 0:
+ axis = axis + X_rank
+ unsqueezed_rank = X_rank - axis
+ reduction_shape = X_shape[0:axis] + (1,) * unsqueezed_rank
+
+ row_number = 1
+ col_number = 1
+ for i in range(X_rank):
+ if i < axis:
+ row_number *= X_shape[i]
+ else:
+ col_number *= X_shape[i]
+ x_mat = np.reshape(X, (row_number, col_number))
+ x_mean = np.sum(x_mat, axis=1, keepdims=True) / col_number
+ x_diff = x_mat - x_mean
+ x_squared_diff = x_diff * x_diff
+ variance = np.sum(x_squared_diff, axis=1, keepdims=True) / col_number
+ variance_eps = variance + epsilon
+ std_dev = np.sqrt(variance_eps)
+ inv_std_dev = np.reciprocal(std_dev)
+ y_mat = x_diff * inv_std_dev
+ Y = np.reshape(y_mat, X_shape) * W + B
+ X_mean = np.reshape(x_mean, reduction_shape)
+ X_inv_std_dev = np.reshape(inv_std_dev, reduction_shape)
+
+ return Y, X_mean, X_inv_std_dev
+
+
+def calculate_normalized_shape(X_shape, axis):
+ X_rank = len(X_shape)
+ if axis < 0:
+ axis = axis + X_rank
+ return X_shape[axis:]
+
+
+class Layer_normalization(RunAll):
+ @staticmethod
+ def export4d() -> None:
+ X = np.random.randn(2, 3, 4, 5).astype(np.float32)
+
+ def case(axis: int) -> None:
+ normalized_shape = calculate_normalized_shape(X.shape, axis)
+ W = np.random.randn(*normalized_shape).astype(np.float32)
+ B = np.random.randn(*normalized_shape).astype(np.float32)
+ Y, mean, inv_std_dev = _layer_normalization(X, W, B, axis)
+
+ if axis < 0:
+ name = f"layer_normalization_4d_axis_negative_{-axis}"
+ func_sig = f"input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(IntegerTrait::::new({-axis}, true)),Option::None,Option::None)"
+ else:
+ name = f"layer_normalization_4d_axis{axis}"
+ func_sig = f"input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(IntegerTrait::::new({axis}, false)),Option::None,Option::None)"
+
+
+ x = Tensor(Dtype.FP8x23, X.shape, to_fp(X.flatten(), FixedImpl.FP8x23))
+ w = Tensor(Dtype.FP8x23, W.shape, to_fp(W.flatten(), FixedImpl.FP8x23))
+ b = Tensor(Dtype.FP8x23, B.shape, to_fp(B.flatten(), FixedImpl.FP8x23))
+ y = Tensor(Dtype.FP8x23, Y.shape, to_fp(Y.flatten(), FixedImpl.FP8x23))
+
+ make_test([x,w,b], y, func_sig, name)
+
+
+ for i in range(len(X.shape)):
+ case(i)
+ case(i - len(X.shape))
+
+ @staticmethod
+ def export_default_axis() -> None:
+ X = np.random.randn(2, 3, 4, 5).astype(np.float32)
+
+ normalized_shape = calculate_normalized_shape(X.shape, -1)
+ W = np.random.randn(*normalized_shape).astype(np.float32)
+ B = np.random.randn(*normalized_shape).astype(np.float32)
+ Y, mean, inv_std_dev = _layer_normalization(X, W, B)
+
+ x = Tensor(Dtype.FP16x16, X.shape, to_fp(X.flatten(), FixedImpl.FP16x16))
+ w = Tensor(Dtype.FP16x16, W.shape, to_fp(W.flatten(), FixedImpl.FP16x16))
+ b = Tensor(Dtype.FP16x16, B.shape, to_fp(B.flatten(), FixedImpl.FP16x16))
+ y = Tensor(Dtype.FP16x16, Y.shape, to_fp(Y.flatten(), FixedImpl.FP16x16))
+
+ name = "layer_normalization_default_axis"
+ make_test([x,w,b], y, "input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::None,Option::None,Option::None)", name)
+
+ @staticmethod
+ def export3d_epsilon() -> None:
+ epsilon = 1e-1
+ X = np.random.randn(2, 3, 5).astype(np.float32)
+
+ def case(axis: int) -> None:
+ normalized_shape = calculate_normalized_shape(X.shape, axis)
+ W = np.random.randn(*normalized_shape).astype(np.float32)
+ B = np.random.randn(*normalized_shape).astype(np.float32)
+ Y, mean, inv_std_dev = _layer_normalization(X, W, B, axis, epsilon)
+
+ if axis < 0:
+ name = f"layer_normalization_3d_axis_negative_{-axis}_epsilon"
+ func_sig = f"input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(IntegerTrait::::new({-axis}, true)),Option::Some(FixedTrait::new(6554, false)),Option::None)"
+ else:
+ name = f"layer_normalization_3d_axis{axis}_epsilon"
+ func_sig = f"input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::Some(IntegerTrait::::new({axis}, false)),Option::Some(FixedTrait::new(6554, false)),Option::None)"
+
+ x = Tensor(Dtype.FP16x16, X.shape, to_fp(X.flatten(), FixedImpl.FP16x16))
+ w = Tensor(Dtype.FP16x16, W.shape, to_fp(W.flatten(), FixedImpl.FP16x16))
+ b = Tensor(Dtype.FP16x16, B.shape, to_fp(B.flatten(), FixedImpl.FP16x16))
+ y = Tensor(Dtype.FP16x16, Y.shape, to_fp(Y.flatten(), FixedImpl.FP16x16))
+
+ make_test([x,w,b], y, func_sig, name)
+
+
+ for i in range(len(X.shape)):
+ case(i)
+ case(i - len(X.shape))
+
+ @staticmethod
+ def test_2d_example() -> None:
+ X = np.random.randn(3, 4).astype(np.float32)
+
+ def case(axis: int) -> None:
+ normalized_shape = calculate_normalized_shape(X.shape, axis)
+ W = np.random.randn(*normalized_shape).astype(np.float32)
+ B = np.random.randn(*normalized_shape).astype(np.float32)
+ Y, mean, inv_std_dev = _layer_normalization(X, W, B, axis=axis)
+
+ node = onnx.helper.make_node(
+ "LayerNormalization",
+ inputs=["X", "W", "B"],
+ outputs=["Y", "Mean", "InvStdDev"],
+ axis=axis,
+ )
+
+ x = Tensor(Dtype.FP16x16, X.shape, to_fp(X.flatten(), FixedImpl.FP16x16))
+ w = Tensor(Dtype.FP16x16, W.shape, to_fp(W.flatten(), FixedImpl.FP16x16))
+ b = Tensor(Dtype.FP16x16, B.shape, to_fp(B.flatten(), FixedImpl.FP16x16))
+ y = Tensor(Dtype.FP16x16, Y.shape, to_fp(Y.flatten(), FixedImpl.FP16x16))
+
+ name = "layer_normalization_test"
+ make_test([x,w,b], y, "input_0.layer_normalization(@input_1,Option::Some(@input_2),Option::None,Option::None,Option::None)", name)
+
+ case(-1)
\ No newline at end of file
diff --git a/nodegen/node/reduce_l2.py b/nodegen/node/reduce_l2.py
index 081125f54..67c8cfa10 100644
--- a/nodegen/node/reduce_l2.py
+++ b/nodegen/node/reduce_l2.py
@@ -4,6 +4,7 @@
import numpy as np
+
class Reduce_l2(RunAll):
@staticmethod
def reduce_l2_fp8x23():
@@ -107,4 +108,29 @@ def reduce_l2_axis_0():
reduce_l2_export_do_not_keepdims()
reduce_l2_export_keepdims()
+ reduce_l2_axis_0()
+
+ @staticmethod
+ def reduce_l2_complex64():
+
+
+
+ def reduce_l2_axis_0():
+ shape = [2, 3]
+ axes = np.array([0], dtype=np.int64)
+ keepdims = True
+ x = np.reshape(np.array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j, 4.- 1.j]), shape)
+ y = np.sqrt(np.sum(a=np.square(abs(x)), axis=tuple(axes), keepdims=True))
+ print(to_fp(x.flatten(), FixedImpl.FP64x64))
+
+ x = Tensor(Dtype.COMPLEX64, x.shape, to_fp(
+ x.flatten(), FixedImpl.FP64x64))
+
+ y = Tensor(Dtype.COMPLEX64, y.shape, to_fp(
+ y.flatten(), FixedImpl.FP64x64))
+
+ name = "reduce_l2_complex64_axis_0"
+ make_test(
+ [x], y, "input_0.reduce_l2(0, true)", name)
+
reduce_l2_axis_0()
\ No newline at end of file
diff --git a/nodegen/node/resize.py b/nodegen/node/resize.py
new file mode 100644
index 000000000..65cafa9ba
--- /dev/null
+++ b/nodegen/node/resize.py
@@ -0,0 +1,2204 @@
+# Python test implementation from ONNX library : https://github.com/onnx/onnx/blob/main/onnx/reference/ops/op_resize.py
+
+import numpy as np
+from typing import Any, Callable
+
+from nodegen.node import RunAll
+from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
+
+
+def _cartesian(arrays: list[np.ndarray], out: np.ndarray | None = None) -> np.ndarray:
+ #From https://stackoverflow.com/a/1235363
+ arrays = [np.asarray(x) for x in arrays]
+ dtype = arrays[0].dtype
+
+ n = np.prod([x.size for x in arrays])
+ if out is None:
+ out = np.zeros([n, len(arrays)], dtype=dtype)
+
+ m = n // arrays[0].size
+ out[:, 0] = np.repeat(arrays[0], m)
+ if arrays[1:]:
+ _cartesian(arrays[1:], out=out[0:m, 1:])
+ for j in range(1, arrays[0].size):
+ out[j * m : (j + 1) * m, 1:] = out[0:m, 1:]
+ return out
+
+
+def _get_neighbor_idxes(x: float, n: int, limit: int) -> np.ndarray:
+ idxes = sorted(range(limit), key=lambda idx: (abs(x - idx), idx))[:n]
+ idxes = sorted(idxes)
+ return np.array(idxes)
+
+
+def _get_neighbor(x: float, n: int, data: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
+
+ pad_width = np.ceil(n / 2).astype(int)
+ padded = np.pad(data, pad_width, mode="edge")
+ x += pad_width
+
+ idxes = _get_neighbor_idxes(x, n, len(padded))
+
+
+ ret = padded[idxes]
+ return idxes - pad_width, ret
+
+def linear_coeffs(ratio: float, scale: float | None = None) -> np.ndarray:
+ del scale
+ return np.array([1 - ratio, ratio])
+
+
+def linear_coeffs_antialias(ratio: float, scale: float) -> np.ndarray:
+ scale = min(scale, 1.0)
+
+ start = int(np.floor(-1 / scale) + 1)
+ footprint = 2 - 2 * start
+ args = (np.arange(start, start + footprint) - ratio) * scale
+ coeffs = np.clip(1 - np.abs(args), 0, 1)
+
+ return np.array(coeffs) / sum(coeffs)
+
+def cubic_coeffs_antialias(ratio: float, scale: float, A: float = -0.75) -> np.ndarray:
+ scale = min(scale, 1.0)
+
+ def compute_coeff(x: float) -> float:
+ x = abs(x)
+ x_2 = x * x
+ x_3 = x * x_2
+ if x <= 1:
+ return (A + 2) * x_3 - (A + 3) * x_2 + 1
+ if x < 2:
+ return A * x_3 - 5 * A * x_2 + 8 * A * x - 4 * A
+ return 0.0
+
+ i_start = int(np.floor(-2 / scale) + 1)
+ i_end = 2 - i_start
+ args = [scale * (i - ratio) for i in range(i_start, i_end)]
+ coeffs = [compute_coeff(x) for x in args]
+ return np.array(coeffs) / sum(coeffs)
+
+def nearest_coeffs(
+ ratio: float | int | np.ndarray, mode: str = "round_prefer_floor"
+) -> np.ndarray:
+ if isinstance(ratio, int) or ratio.is_integer():
+ return np.array([0, 1])
+ if mode == "round_prefer_floor":
+ return np.array([ratio <= 0.5, ratio > 0.5])
+ if mode == "round_prefer_ceil":
+ return np.array([ratio < 0.5, ratio >= 0.5])
+ if mode == "floor":
+ return np.array([1, 0])
+ if mode == "ceil":
+ return np.array([0, 1])
+ raise ValueError(f"Unexpected value {mode!r}.")
+
+
+
+def _interpolate_1d_with_x(
+ data: np.ndarray,
+ scale_factor: float,
+ output_width_int: int,
+ x: float,
+ get_coeffs: Callable[[float, float], np.ndarray],
+ roi: np.ndarray | None = None,
+ extrapolation_value: float = 0.0,
+ coordinate_transformation_mode: str = "half_pixel",
+ exclude_outside: bool = False,
+) -> np.ndarray:
+
+ input_width = len(data)
+ output_width = scale_factor * input_width
+
+ if coordinate_transformation_mode == "align_corners":
+ if output_width == 1:
+ x_ori = 0.0
+ else:
+ x_ori = x * (input_width - 1) / (output_width - 1)
+ elif coordinate_transformation_mode == "asymmetric":
+ x_ori = x / scale_factor
+ elif coordinate_transformation_mode == "tf_crop_and_resize":
+ if roi is None:
+ raise ValueError("roi cannot be None.")
+ if output_width == 1:
+ x_ori = (roi[1] - roi[0]) * (input_width - 1) / 2
+ else:
+ x_ori = x * (roi[1] - roi[0]) * (input_width - 1) / (output_width - 1)
+ x_ori += roi[0] * (input_width - 1)
+
+ if x_ori < 0 or x_ori > input_width - 1:
+ return np.array(extrapolation_value)
+ elif coordinate_transformation_mode == "pytorch_half_pixel":
+ if output_width == 1:
+ x_ori = -0.5
+ else:
+ x_ori = (x + 0.5) / scale_factor - 0.5
+ elif coordinate_transformation_mode == "half_pixel":
+ x_ori = (x + 0.5) / scale_factor - 0.5
+ elif coordinate_transformation_mode == "half_pixel_symmetric":
+ adjustment = output_width_int / output_width
+ center = input_width / 2
+ offset = center * (1 - adjustment)
+ x_ori = offset + (x + 0.5) / scale_factor - 0.5
+ else:
+ raise ValueError(
+ f"Invalid coordinate_transformation_mode: {coordinate_transformation_mode!r}."
+ )
+
+ x_ori_int = np.floor(x_ori).astype(int).item()
+
+ if x_ori.is_integer():
+ ratio = 1
+ else:
+ ratio = x_ori - x_ori_int
+
+ coeffs = get_coeffs(ratio, scale_factor)
+ n = len(coeffs)
+
+ idxes, points = _get_neighbor(x_ori, n, data)
+
+ if exclude_outside:
+ for i, idx in enumerate(idxes):
+ if idx < 0 or idx >= input_width:
+ coeffs[i] = 0
+ coeffs /= sum(coeffs)
+
+ return np.dot(coeffs, points).item()
+
+
+def _interpolate_nd_with_x(
+ data: np.ndarray,
+ n: int,
+ scale_factors: list[float],
+ output_size: list[int],
+ x: list[float],
+ get_coeffs: Callable[[float, float], np.ndarray],
+ roi: np.ndarray | None = None,
+ exclude_outside: bool = False,
+ **kwargs: Any,
+) -> np.ndarray:
+
+ if n == 1:
+ return _interpolate_1d_with_x(
+ data,
+ scale_factors[0],
+ output_size[0],
+ x[0],
+ get_coeffs,
+ roi=roi,
+ exclude_outside=exclude_outside,
+ **kwargs,
+ )
+ res1d = []
+
+ for i in range(data.shape[0]):
+ r = _interpolate_nd_with_x(
+ data[i],
+ n - 1,
+ scale_factors[1:],
+ output_size[1:],
+ x[1:],
+ get_coeffs,
+ roi=None if roi is None else np.concatenate([roi[1:n], roi[n + 1 :]]),
+ exclude_outside=exclude_outside,
+ **kwargs,
+ )
+ res1d.append(r)
+
+
+ return _interpolate_1d_with_x(
+ res1d,
+ scale_factors[0],
+ output_size[0],
+ x[0],
+ get_coeffs,
+ roi=None if roi is None else [roi[0], roi[n]],
+ exclude_outside=exclude_outside,
+ **kwargs,
+ )
+
+
+def _get_all_coords(data: np.ndarray) -> np.ndarray:
+ return _cartesian(
+ [list(range(data.shape[i])) for i in range(len(data.shape))]
+ )
+
+
+def interpolate_nd(
+ data: np.ndarray,
+ get_coeffs: Callable[[float, float], np.ndarray],
+ output_size: list[int] | None = None,
+ scale_factors: list[float] | None = None,
+ axes: list[int] | None = None,
+ roi: np.ndarray | None = None,
+ keep_aspect_ratio_policy: str | None = "stretch",
+ exclude_outside: bool = False,
+ **kwargs: Any,
+) -> np.ndarray:
+ if output_size is None and scale_factors is None:
+ raise ValueError("output_size is None and scale_factors is None.")
+
+ r = len(data.shape)
+ if axes is not None:
+ if scale_factors is not None:
+ new_scale_factors = [1.0] * r
+ for i, d in enumerate(axes):
+ new_scale_factors[d] = scale_factors[i]
+ scale_factors = new_scale_factors
+
+ if output_size is not None:
+ new_output_size = [data.shape[i] for i in range(r)]
+ for i, d in enumerate(axes):
+ new_output_size[d] = output_size[i]
+ output_size = new_output_size
+
+
+ if roi is not None:
+ new_roi = ([0.0] * r) + ([1.0] * r)
+ naxes = len(axes)
+ for i, d in enumerate(axes):
+ new_roi[d] = roi[i]
+ new_roi[r + d] = roi[naxes + i]
+ roi = new_roi
+ else:
+ axes = list(range(r))
+
+ if output_size is not None:
+ scale_factors = [output_size[i] / data.shape[i] for i in range(r)]
+ if keep_aspect_ratio_policy != "stretch":
+ if keep_aspect_ratio_policy == "not_larger":
+ scale = np.array(scale_factors)[axes].min()
+ elif keep_aspect_ratio_policy == "not_smaller":
+ scale = np.array(scale_factors)[axes].max()
+ else:
+ raise ValueError(
+ f"Invalid keep_aspect_ratio_policy={keep_aspect_ratio_policy!r}"
+ )
+
+ scale_factors = [scale if i in axes else 1.0 for i in range(r)]
+
+ def round_half_up(x: float) -> int:
+ return int(x + 0.5)
+
+ output_size = [
+ round_half_up(scale * data.shape[i]) if i in axes else data.shape[i]
+ for i in range(r)
+ ]
+
+ else:
+ output_size = (scale_factors * np.array(data.shape)).astype(int)
+ if scale_factors is None:
+ raise ValueError("scale_factors is None.")
+ if output_size is None:
+ raise ValueError("output_size is None.")
+
+ ret = np.zeros(output_size)
+ for x in _get_all_coords(ret):
+ ret[tuple(x)] = _interpolate_nd_with_x(
+ data,
+ len(data.shape),
+ scale_factors,
+ output_size,
+ x,
+ get_coeffs,
+ roi=roi,
+ exclude_outside=exclude_outside,
+ **kwargs,
+ )
+ return ret
+
+
+def cubic_coeffs(
+ ratio: float, scale: float | None = None, A: float = -0.75
+) -> np.ndarray:
+ del scale # Unused
+ coeffs = [
+ ((A * (ratio + 1) - 5 * A) * (ratio + 1) + 8 * A) * (ratio + 1) - 4 * A,
+ ((A + 2) * ratio - (A + 3)) * ratio * ratio + 1,
+ ((A + 2) * (1 - ratio) - (A + 3)) * (1 - ratio) * (1 - ratio) + 1,
+ ((A * ((1 - ratio) + 1) - 5 * A) * ((1 - ratio) + 1) + 8 * A)
+ * ((1 - ratio) + 1)
+ - 4 * A,
+ ]
+ return np.array(coeffs)
+
+
+
+
+class Resize(RunAll):
+
+ @staticmethod
+ def resize_upsample_scales_nearest() -> None:
+
+ data = np.array(
+ [
+ [
+ [
+ [1, 2],
+ [3, 4],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ scales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)
+
+ output = interpolate_nd(
+ data, lambda x, _: nearest_coeffs(x), scale_factors=scales
+ ).astype(np.float32)
+
+ x = [data, scales]
+ y = output
+ for i in range(len(x)):
+ x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16))
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_upsample_scales_nearest"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "scales,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(TRANSFORMATION_MODE::HALF_PIXEL_SYMMETRIC),"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::NEAREST),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+
+ @staticmethod
+ def resize_downsample_scales_nearest() -> None:
+
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
+
+ output = interpolate_nd(
+ data, lambda x, _: nearest_coeffs(x), scale_factors=scales
+ ).astype(np.float32)
+
+ x = [data, scales]
+ y = output
+ for i in range(len(x)):
+ x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16))
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_downsample_scales_nearest"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "scales,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::NEAREST),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+
+ @staticmethod
+ def resize_upsample_sizes_nearest() -> None:
+
+ data = np.array(
+ [
+ [
+ [
+ [1, 2],
+ [3, 4],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ sizes = np.array([1, 1, 7, 8], dtype=np.int64)
+
+ output = interpolate_nd(
+ data, lambda x, _: nearest_coeffs(x), output_size=sizes
+ ).astype(np.float32)
+
+ x = [data, sizes]
+ y = output
+ x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16))
+ x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten())
+
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_upsample_sizes_nearest"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "sizes,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::NEAREST),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+
+ @staticmethod
+ def resize_downsample_sizes_nearest() -> None:
+
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ sizes = np.array([1, 1, 1, 3], dtype=np.int64)
+
+ output = interpolate_nd(
+ data, lambda x, _: nearest_coeffs(x), output_size=sizes
+ ).astype(np.float32)
+
+ x = [data, sizes]
+ y = output
+ x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16))
+ x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten())
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_downsample_sizes_nearest"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "sizes,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::NEAREST),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+ @staticmethod
+ def resize_upsample_scales_linear() -> None:
+
+ data = np.array(
+ [
+ [
+ [
+ [1, 2],
+ [3, 4],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
+
+ output = interpolate_nd(
+ data, lambda x, _: linear_coeffs(x, None), scale_factors=scales
+ ).astype(np.float32)
+
+ x = [data, scales]
+ y = output
+
+ for i in range(len(x)):
+ x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16))
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_upsample_scales_linear"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "scales,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::LINEAR),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+
+ @staticmethod
+ def resize_upsample_scales_linear_align_corners() -> None:
+
+ data = np.array(
+ [
+ [
+ [
+ [1, 2],
+ [3, 4],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
+
+ output = interpolate_nd(
+ data,
+ lambda x, _: linear_coeffs(x, None),
+ scale_factors=scales,
+ coordinate_transformation_mode="align_corners",
+ ).astype(np.float32)
+
+ x = [data, scales]
+ y = output
+
+ for i in range(len(x)):
+ x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16))
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_upsample_scales_linear_align_corners"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "scales,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(TRANSFORMATION_MODE::ALIGN_CORNERS),"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::LINEAR),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+
+ @staticmethod
+ def resize_downsample_scales_linear() -> None:
+
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
+
+ output = interpolate_nd(
+ data, lambda x, _: linear_coeffs(x, None), scale_factors=scales
+ ).astype(np.float32)
+
+ x = [data, scales]
+ y = output
+
+ for i in range(len(x)):
+ x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16))
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_upsample_scales_linear"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "scales,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::LINEAR),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+ @staticmethod
+ def resize_downsample_scales_linear_align_corners() -> None:
+
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
+
+ output = interpolate_nd(
+ data,
+ lambda x, _: linear_coeffs(x, None),
+ scale_factors=scales,
+ coordinate_transformation_mode="align_corners",
+ ).astype(np.float32)
+
+ x = [data, scales]
+ y = output
+
+ for i in range(len(x)):
+ x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16))
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_downsample_scales_linear_align_corners"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "scales,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(TRANSFORMATION_MODE::ALIGN_CORNERS),"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::LINEAR),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+ @staticmethod
+ def resize_upsample_scales_cubic() -> None:
+
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
+
+ output = interpolate_nd(
+ data, lambda x, _: cubic_coeffs(x, None), scale_factors=scales
+ ).astype(np.float32)
+
+ x = [data, scales]
+ y = output
+
+ for i in range(len(x)):
+ x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16))
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_upsample_scales_cubic"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "scales,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::CUBIC),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+ @staticmethod
+ def resize_upsample_scales_cubic_align_corners() -> None:
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
+
+ output = interpolate_nd(
+ data,
+ lambda x, _: cubic_coeffs(x),
+ scale_factors=scales,
+ coordinate_transformation_mode="align_corners",
+ ).astype(np.float32)
+
+ x = [data, scales]
+ y = output
+
+ for i in range(len(x)):
+ x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16))
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_upsample_scales_cubic_align_corners"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "scales,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(TRANSFORMATION_MODE::ALIGN_CORNERS),"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::CUBIC),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+ @staticmethod
+ def resize_downsample_scales_cubic() -> None:
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ scales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)
+ output = interpolate_nd(
+ data, lambda x, _: cubic_coeffs(x), scale_factors=scales
+ ).astype(np.float32)
+
+ x = [data, scales]
+ y = output
+
+ for i in range(len(x)):
+ x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16))
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_downsample_scales_cubic"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "scales,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::CUBIC),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+
+ @staticmethod
+ def resize_downsample_scales_cubic_align_corners() -> None:
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ scales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)
+
+ output = interpolate_nd(
+ data,
+ lambda x, _: cubic_coeffs(x),
+ scale_factors=scales,
+ coordinate_transformation_mode="align_corners",
+ ).astype(np.float32)
+
+ x = [data, scales]
+ y = output
+
+ for i in range(len(x)):
+ x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16))
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_downsample_scales_cubic_align_corners"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "scales,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(TRANSFORMATION_MODE::ALIGN_CORNERS),"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::CUBIC),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+
+ @staticmethod
+ def resize_upsample_sizes_cubic() -> None:
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ sizes = np.array([1, 1, 9, 10], dtype=np.int64)
+ output = interpolate_nd(
+ data, lambda x, _: cubic_coeffs(x), output_size=sizes
+ ).astype(np.float32)
+
+ x = [data, sizes]
+ y = output
+ x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16))
+ x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten())
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_upsample_sizes_cubic"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "sizes,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::CUBIC),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+
+ @staticmethod
+ def resize_downsample_sizes_cubic() -> None:
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ sizes = np.array([1, 1, 3, 3], dtype=np.int64)
+
+ output = interpolate_nd(
+ data, lambda x, _: cubic_coeffs(x), output_size=sizes
+ ).astype(np.float32)
+
+ x = [data, sizes]
+ y = output
+ x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16))
+ x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten())
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_downsample_sizes_cubic"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "sizes,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::CUBIC),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+
+
+
+
+ @staticmethod
+ def resize_upsample_scales_cubic_A_n0p5_exclude_outside() -> None:
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
+ output = interpolate_nd(
+ data,
+ lambda x, _: cubic_coeffs(x, A=-0.5),
+ scale_factors=scales,
+ exclude_outside=True,
+ ).astype(np.float32)
+
+ x = [data, scales]
+ y = output
+
+ for i in range(len(x)):
+ x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16))
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_upsample_scales_cubic_A_n0p5_exclude_outside"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "scales,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(FixedTrait::::new(32768, true)),"
+ func_sig += "Option::Some(true),"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::CUBIC),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+
+ @staticmethod
+ def resize_downsample_scales_cubic_A_n0p5_exclude_outside() -> None:
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ scales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)
+ output = interpolate_nd(
+ data,
+ lambda x, _: cubic_coeffs(x, A=-0.5),
+ scale_factors=scales,
+ exclude_outside=True,
+ ).astype(np.float32)
+
+ x = [data, scales]
+ y = output
+
+ for i in range(len(x)):
+ x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16))
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_downsample_scales_cubic_A_n0p5_exclude_outside"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "scales,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(FixedTrait::::new(32768, true)),"
+ func_sig += "Option::Some(true),"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::CUBIC),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+
+
+ @staticmethod
+ def resize_upsample_scales_cubic_asymmetric() -> None:
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
+
+ output = interpolate_nd(
+ data,
+ lambda x, _: cubic_coeffs(x, A=-0.75),
+ scale_factors=scales,
+ coordinate_transformation_mode="asymmetric",
+ ).astype(np.float32)
+
+ x = [data, scales]
+ y = output
+
+ for i in range(len(x)):
+ x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16))
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_upsample_scales_cubic_asymmetric"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "scales,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(TRANSFORMATION_MODE::ASYMMETRIC),"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::CUBIC),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+
+
+ @staticmethod
+ def resize_tf_crop_and_resize() -> None:
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+ roi = np.array([0, 0, 0.4, 0.6, 1, 1, 0.6, 0.8], dtype=np.float32)
+ sizes = np.array([1, 1, 3, 3], dtype=np.int64)
+
+ output = interpolate_nd(
+ data,
+ lambda x, _: linear_coeffs(x),
+ output_size=sizes,
+ roi=roi,
+ coordinate_transformation_mode="tf_crop_and_resize",
+ ).astype(np.float32)
+ x = [data, sizes, roi]
+ y = output
+ x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16))
+ x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten())
+ x[2] = Tensor(Dtype.FP16x16, x[2].shape, to_fp(x[2].flatten(), FixedImpl.FP16x16))
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+ name = "resize_tf_crop_and_resize"
+ func_sig = "data.resize("
+ func_sig += "roi,"
+ func_sig += "Option::None,"
+ func_sig += "sizes,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(TRANSFORMATION_MODE::TF_CROP_AND_RESIZE),"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::LINEAR),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1], x[2]], y, func_sig, name)
+
+ @staticmethod
+ def resize_tf_crop_and_resize_extrapolation_value() -> None:
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ roi = np.array([0, 0, 0.4, 0.6, 1, 1, 1.2, 1.7], dtype=np.float32)
+ sizes = np.array([1, 1, 3, 3], dtype=np.int64)
+
+ output = interpolate_nd(
+ data,
+ lambda x, _: linear_coeffs(x),
+ output_size=sizes,
+ roi=roi,
+ coordinate_transformation_mode="tf_crop_and_resize",
+ extrapolation_value=10.0,
+ ).astype(np.float32)
+
+ x = [data, sizes, roi]
+ y = output
+ x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16))
+ x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten())
+ x[2] = Tensor(Dtype.FP16x16, x[2].shape, to_fp(x[2].flatten(), FixedImpl.FP16x16))
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+ name = "resize_tf_crop_and_resize_extrapolation_value"
+ func_sig = "data.resize("
+ func_sig += "roi,"
+ func_sig += "Option::None,"
+ func_sig += "sizes,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(TRANSFORMATION_MODE::TF_CROP_AND_RESIZE),"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(FixedTrait::::new(655360, false)),"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::LINEAR),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1], x[2]], y, func_sig, name)
+
+ @staticmethod
+ def resize_downsample_sizes_linear_pytorch_half_pixel() -> None:
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ sizes = np.array([1, 1, 3, 1], dtype=np.int64)
+ output = interpolate_nd(
+ data,
+ lambda x, _: linear_coeffs(x),
+ output_size=sizes,
+ coordinate_transformation_mode="pytorch_half_pixel",
+ ).astype(np.float32)
+
+ x = [data, sizes]
+ y = output
+ x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16))
+ x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten())
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+ name = "resize_downsample_sizes_linear_pytorch_half_pixel"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "sizes,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(TRANSFORMATION_MODE::PYTORCH_HALF_PIXEL),"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::LINEAR),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+
+
+ @staticmethod
+ def resize_upsample_sizes_nearest_floor_align_corners() -> None:
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ sizes = np.array([1, 1, 8, 8], dtype=np.int64)
+ output = interpolate_nd(
+ data,
+ lambda x, _: nearest_coeffs(x, mode="floor"),
+ output_size=sizes,
+ coordinate_transformation_mode="align_corners",
+ ).astype(np.float32)
+
+ x = [data, sizes]
+ y = output
+ x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16))
+ x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten())
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+ name = "resize_upsample_sizes_nearest_floor_align_corners"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "sizes,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(TRANSFORMATION_MODE::ALIGN_CORNERS),"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::NEAREST),"
+ func_sig += "Option::Some(NEAREST_MODE::FLOOR),)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+ @staticmethod
+ def resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric() -> None:
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ sizes = np.array([1, 1, 8, 8], dtype=np.int64)
+
+ output = interpolate_nd(
+ data,
+ lambda x, _: nearest_coeffs(x, mode="round_prefer_ceil"),
+ output_size=sizes,
+ coordinate_transformation_mode="asymmetric",
+ ).astype(np.float32)
+
+ x = [data, sizes]
+ y = output
+ x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16))
+ x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten())
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+ name = "resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "sizes,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(TRANSFORMATION_MODE::ASYMMETRIC),"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::NEAREST),"
+ func_sig += "Option::Some(NEAREST_MODE::ROUND_PREFER_CEIL),)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+
+ @staticmethod
+ def resize_upsample_sizes_nearest_ceil_half_pixel() -> None:
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ sizes = np.array([1, 1, 8, 8], dtype=np.int64)
+
+ output = interpolate_nd(
+ data, lambda x, _: nearest_coeffs(x, mode="ceil"), output_size=sizes
+ ).astype(np.float32)
+
+ x = [data, sizes]
+ y = output
+ x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16))
+ x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten())
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+ name = "resize_upsample_sizes_nearest_ceil_half_pixel"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "sizes,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(TRANSFORMATION_MODE::HALF_PIXEL),"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::NEAREST),"
+ func_sig += "Option::Some(NEAREST_MODE::CEIL),)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+ @staticmethod
+ def resize_downsample_scales_linear_antialias() -> None:
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
+
+ output = interpolate_nd(
+ data, linear_coeffs_antialias, scale_factors=scales
+ ).astype(np.float32)
+
+ x = [data, scales]
+ y = output
+
+ for i in range(len(x)):
+ x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16))
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_downsample_scales_linear_antialias"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "scales,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(1),"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::LINEAR),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+ @staticmethod
+ def resize_downsample_sizes_linear_antialias() -> None:
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ sizes = np.array([1, 1, 3, 3], dtype=np.int64)
+
+ output = interpolate_nd(
+ data, linear_coeffs_antialias, output_size=sizes
+ ).astype(np.float32)
+
+ x = [data, sizes]
+ y = output
+ x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16))
+ x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten())
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+ name = "resize_downsample_sizes_linear_pytorch_half_pixel"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "sizes,"
+ func_sig += "Option::Some(1),"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::LINEAR),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+
+ @staticmethod
+ def resize_downsample_scales_cubic_antialias() -> None:
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
+
+ output = interpolate_nd(
+ data, cubic_coeffs_antialias, scale_factors=scales
+ ).astype(np.float32)
+
+ x = [data, scales]
+ y = output
+
+ for i in range(len(x)):
+ x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16))
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_downsample_scales_cubic_antialias"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "scales,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(1),"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::CUBIC),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+ @staticmethod
+ def resize_downsample_sizes_cubic_antialias() -> None:
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ sizes = np.array([1, 1, 3, 3], dtype=np.int64)
+
+ output = interpolate_nd(data, cubic_coeffs_antialias, output_size=sizes).astype(
+ np.float32
+ )
+ x = [data, sizes]
+ y = output
+ x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16))
+ x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten())
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+ name = "resize_downsample_sizes_cubic_antialias"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "sizes,"
+ func_sig += "Option::Some(1),"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::CUBIC),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+ @staticmethod
+ def resize_upsample_scales_nearest_axes_2_3() -> None:
+ axes = np.array([2, 3], dtype=np.int64)
+ data = np.array(
+ [
+ [
+ [
+ [1, 2],
+ [3, 4],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ scales = np.array([2.0, 3.0], dtype=np.float32)
+
+ output = interpolate_nd(
+ data, lambda x, _: nearest_coeffs(x), scale_factors=scales, axes=axes
+ ).astype(np.float32)
+
+ x = [data, scales, axes]
+ y = output
+
+ x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16))
+ x[1] = Tensor(Dtype.FP16x16, x[1].shape, to_fp(x[1].flatten(), FixedImpl.FP16x16))
+ x[2] = Tensor(Dtype.U32, x[2].shape, x[2].flatten())
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_upsample_scales_nearest_axes_2_3"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "scales,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "axes,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::NEAREST),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1], x[2]], y, func_sig, name)
+
+ @staticmethod
+ def resize_upsample_scales_nearest_axes_3_2() -> None:
+
+ axes = np.array([3, 2], dtype=np.int64)
+ data = np.array([[[[1, 2],[3, 4],]]],dtype=np.float32,)
+
+ scales = np.array([3.0, 2.0], dtype=np.float32)
+
+ output = interpolate_nd(
+ data, lambda x, _: nearest_coeffs(x), scale_factors=scales, axes=axes
+ ).astype(np.float32)
+ x = [data, scales, axes]
+ y = output
+
+ x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16))
+ x[1] = Tensor(Dtype.FP16x16, x[1].shape, to_fp(x[1].flatten(), FixedImpl.FP16x16))
+ x[2] = Tensor(Dtype.U32, x[2].shape, x[2].flatten())
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_upsample_scales_nearest_axes_3_2"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "scales,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "axes,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::NEAREST),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1], x[2]], y, func_sig, name)
+
+ @staticmethod
+ def resize_upsample_sizes_nearest_axes_2_3() -> None:
+ data = np.array(
+ [
+ [
+ [
+ [1, 2],
+ [3, 4],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ sizes = np.array([7, 8], dtype=np.int64)
+ axes = np.array([2, 3], dtype=np.int64)
+
+ output = interpolate_nd(
+ data, lambda x, _: nearest_coeffs(x), output_size=sizes, axes=axes
+ ).astype(np.float32)
+
+ x = [data, sizes, axes]
+ y = output
+
+ x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16))
+ x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten())
+ x[2] = Tensor(Dtype.U32, x[2].shape, x[2].flatten())
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_upsample_sizes_nearest_axes_2_3"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "sizes,"
+ func_sig += "Option::None,"
+ func_sig += "axes,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::NEAREST),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1], x[2]], y, func_sig, name)
+
+ @staticmethod
+ def resize_upsample_sizes_nearest_axes_3_2() -> None:
+ data = np.array(
+ [
+ [
+ [
+ [1, 2],
+ [3, 4],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ sizes = np.array([8, 7], dtype=np.int64)
+ axes = np.array([3, 2], dtype=np.int64)
+
+ output = interpolate_nd(
+ data, lambda x, _: nearest_coeffs(x), output_size=sizes, axes=axes
+ ).astype(np.float32)
+
+ x = [data, sizes, axes]
+ y = output
+
+ x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16))
+ x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten())
+ x[2] = Tensor(Dtype.U32, x[2].shape, x[2].flatten())
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_upsample_sizes_nearest_axes_3_2"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "sizes,"
+ func_sig += "Option::None,"
+ func_sig += "axes,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::NEAREST),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1], x[2]], y, func_sig, name)
+
+
+ @staticmethod
+ def resize_tf_crop_and_resize_axes_2_3() -> None:
+ axes = np.array([2, 3], dtype=np.int64)
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ roi = np.array([0.4, 0.6, 0.6, 0.8], dtype=np.float32)
+ sizes = np.array([3, 3], dtype=np.int64)
+
+ output = interpolate_nd(
+ data,
+ lambda x, _: linear_coeffs(x),
+ output_size=sizes,
+ roi=roi,
+ axes=axes,
+ coordinate_transformation_mode="tf_crop_and_resize",
+ ).astype(np.float32)
+
+ x = [data, sizes, roi, axes]
+ y = output
+
+ x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16))
+ x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten())
+ x[2] = Tensor(Dtype.FP16x16, x[2].shape, to_fp(x[2].flatten(), FixedImpl.FP16x16))
+ x[3] = Tensor(Dtype.U32, x[3].shape, x[3].flatten())
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_tf_crop_and_resize_axes_2_3"
+ func_sig = "data.resize("
+ func_sig += "roi,"
+ func_sig += "Option::None,"
+ func_sig += "sizes,"
+ func_sig += "Option::None,"
+ func_sig += "axes,"
+ func_sig += "Option::Some(TRANSFORMATION_MODE::TF_CROP_AND_RESIZE),"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::LINEAR),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1], x[2], x[3]], y, func_sig, name)
+
+ @staticmethod
+ def resize_tf_crop_and_resize_axes_3_2() -> None:
+ axes = np.array([3, 2], dtype=np.int64)
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ roi = np.array([0.6, 0.4, 0.8, 0.6], dtype=np.float32)
+ sizes = np.array([3, 3], dtype=np.int64)
+
+ output = interpolate_nd(
+ data,
+ lambda x, _: linear_coeffs(x),
+ output_size=sizes,
+ roi=roi,
+ axes=axes,
+ coordinate_transformation_mode="tf_crop_and_resize",
+ ).astype(np.float32)
+
+ x = [data, sizes, roi, axes]
+ y = output
+
+ x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16))
+ x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten())
+ x[2] = Tensor(Dtype.FP16x16, x[2].shape, to_fp(x[2].flatten(), FixedImpl.FP16x16))
+ x[3] = Tensor(Dtype.U32, x[3].shape, x[3].flatten())
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_tf_crop_and_resize_axes_3_2"
+ func_sig = "data.resize("
+ func_sig += "roi,"
+ func_sig += "Option::None,"
+ func_sig += "sizes,"
+ func_sig += "Option::None,"
+ func_sig += "axes,"
+ func_sig += "Option::Some(TRANSFORMATION_MODE::TF_CROP_AND_RESIZE),"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::LINEAR),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1], x[2], x[3]], y, func_sig, name)
+
+
+
+ @staticmethod
+ def resize_upsample_sizes_nearest_not_larger() -> None:
+ keep_aspect_ratio_policy = "not_larger"
+ axes = np.array([2, 3], dtype=np.int64)
+ data = np.array(
+ [
+ [
+ [
+ [1, 2],
+ [3, 4],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ sizes = np.array([7, 8], dtype=np.int64)
+ output = interpolate_nd(
+ data,
+ lambda x, _: nearest_coeffs(x),
+ output_size=sizes,
+ axes=axes,
+ keep_aspect_ratio_policy=keep_aspect_ratio_policy,
+ ).astype(np.float32)
+
+ x = [data, sizes, axes]
+ y = output
+
+ x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16))
+ x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten())
+ x[2] = Tensor(Dtype.U32, x[2].shape, x[2].flatten())
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_upsample_sizes_nearest_not_larger"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "sizes,"
+ func_sig += "Option::None,"
+ func_sig += "axes,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(KEEP_ASPECT_RATIO_POLICY::NOT_LARGER),"
+ func_sig += "Option::Some(MODE::NEAREST),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1], x[2]], y, func_sig, name)
+
+
+
+ @staticmethod
+ def resize_upsample_sizes_nearest_not_smaller() -> None:
+ keep_aspect_ratio_policy = "not_smaller"
+ axes = np.array([2, 3], dtype=np.int64)
+ data = np.array(
+ [
+ [
+ [
+ [1, 2],
+ [3, 4],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ sizes = np.array([7, 8], dtype=np.int64) # Results in 8x8
+
+ output = interpolate_nd(
+ data,
+ lambda x, _: nearest_coeffs(x),
+ output_size=sizes,
+ axes=axes,
+ keep_aspect_ratio_policy=keep_aspect_ratio_policy,
+ ).astype(np.float32)
+
+ x = [data, sizes, axes]
+ y = output
+
+ x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16))
+ x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten())
+ x[2] = Tensor(Dtype.U32, x[2].shape, x[2].flatten())
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_upsample_sizes_nearest_not_smaller"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "sizes,"
+ func_sig += "Option::None,"
+ func_sig += "axes,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(KEEP_ASPECT_RATIO_POLICY::NOT_SMALLER),"
+ func_sig += "Option::Some(MODE::NEAREST),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1], x[2]], y, func_sig, name)
+
+
+
+
+ @staticmethod
+ def resize_downsample_sizes_nearest_not_larger() -> None:
+ keep_aspect_ratio_policy = "not_larger"
+ axes = np.array([2, 3], dtype=np.int64)
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ sizes = np.array([1, 3], dtype=np.int64)
+
+ output = interpolate_nd(
+ data,
+ lambda x, _: nearest_coeffs(x),
+ output_size=sizes,
+ axes=axes,
+ keep_aspect_ratio_policy=keep_aspect_ratio_policy,
+ ).astype(np.float32)
+
+ x = [data, sizes, axes]
+ y = output
+
+ x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16))
+ x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten())
+ x[2] = Tensor(Dtype.U32, x[2].shape, x[2].flatten())
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_downsample_sizes_nearest_not_larger"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "sizes,"
+ func_sig += "Option::None,"
+ func_sig += "axes,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(KEEP_ASPECT_RATIO_POLICY::NOT_LARGER),"
+ func_sig += "Option::Some(MODE::NEAREST),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1], x[2]], y, func_sig, name)
+
+
+
+ @staticmethod
+ def resize_downsample_sizes_nearest_not_smaller() -> None:
+ keep_aspect_ratio_policy = "not_smaller"
+ axes = np.array([2, 3], dtype=np.int64)
+
+ data = np.array(
+ [
+ [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ ]
+ ]
+ ],
+ dtype=np.float32,
+ )
+
+ sizes = np.array([1, 3], dtype=np.int64)
+ output = interpolate_nd(
+ data,
+ lambda x, _: nearest_coeffs(x),
+ output_size=sizes,
+ axes=axes,
+ keep_aspect_ratio_policy=keep_aspect_ratio_policy,
+ ).astype(np.float32)
+
+ x = [data, sizes, axes]
+ y = output
+
+ x[0] = Tensor(Dtype.FP16x16, x[0].shape, to_fp(x[0].flatten(), FixedImpl.FP16x16))
+ x[1] = Tensor(Dtype.U32, x[1].shape, x[1].flatten())
+ x[2] = Tensor(Dtype.U32, x[2].shape, x[2].flatten())
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_downsample_sizes_nearest_not_smaller"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "sizes,"
+ func_sig += "Option::None,"
+ func_sig += "axes,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(KEEP_ASPECT_RATIO_POLICY::NOT_SMALLER),"
+ func_sig += "Option::Some(MODE::NEAREST),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1], x[2]], y, func_sig, name)
+
+
+
+
+ @staticmethod
+ def resize_downsample_scales_linear_half_pixel_symmetric() -> None:
+ data = np.array([[[[1, 2, 3, 4]]]], dtype=np.float32)
+ scales = np.array([1.0, 1.0, 1.0, 0.6], dtype=np.float32)
+
+
+ output = interpolate_nd(
+ data,
+ lambda x, _: linear_coeffs(x),
+ scale_factors=scales,
+ coordinate_transformation_mode="half_pixel_symmetric",
+ ).astype(np.float32)
+
+ x = [data, scales]
+ y = output
+ for i in range(len(x)):
+ x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16))
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_downsample_scales_linear_half_pixel_symmetric"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "scales,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(TRANSFORMATION_MODE::HALF_PIXEL_SYMMETRIC),"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::LINEAR),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+
+
+ @staticmethod
+ def resize_upsample_scales_linear_half_pixel_symmetric() -> None:
+ data = np.array([[[[1, 2], [3, 4]]]], dtype=np.float32)
+ scales = np.array([1.0, 1.0, 2.3, 2.94], dtype=np.float32)
+
+ output = interpolate_nd(
+ data,
+ lambda x, _: linear_coeffs(x),
+ scale_factors=scales,
+ coordinate_transformation_mode="half_pixel_symmetric",
+ ).astype(np.float32)
+
+ x = [data, scales]
+ y = output
+ for i in range(len(x)):
+ x[i] = Tensor(Dtype.FP16x16, x[i].shape, to_fp(x[i].flatten(), FixedImpl.FP16x16))
+
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
+
+ name = "resize_upsample_scales_linear_half_pixel_symmetric"
+ func_sig = "data.resize("
+ func_sig += "Option::None,"
+ func_sig += "scales,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(TRANSFORMATION_MODE::HALF_PIXEL_SYMMETRIC),"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::None,"
+ func_sig += "Option::Some(MODE::LINEAR),"
+ func_sig += "Option::None,)"
+ make_test([x[0], x[1]], y, func_sig, name)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/nodegen/node/sequence_at.py b/nodegen/node/sequence_at.py
index b65ac5dae..a108ad52a 100644
--- a/nodegen/node/sequence_at.py
+++ b/nodegen/node/sequence_at.py
@@ -1,6 +1,6 @@
import numpy as np
from nodegen.node import RunAll
-from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
+from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
scalar = lambda x: Tensor(Dtype.I32, (), np.array([x]).astype(np.int32).flatten())
@@ -23,7 +23,7 @@ def positive_position():
position = scalar(2)
name = "sequence_at_u32_positive"
- make_test([sequence, position], sequence[2], "TensorTrait::sequence_at(input_0, input_1)", name)
+ make_test([sequence, position], sequence[2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE)
def negative_position():
sequence = []
@@ -38,7 +38,7 @@ def negative_position():
position = scalar(-2)
name = "sequence_at_u32_negative"
- make_test([sequence, position], sequence[-2], "TensorTrait::sequence_at(input_0, input_1)", name)
+ make_test([sequence, position], sequence[-2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE)
positive_position()
negative_position()
@@ -59,7 +59,7 @@ def positive_position():
position = scalar(2)
name = "sequence_at_i32_positive"
- make_test([sequence, position], sequence[2], "TensorTrait::sequence_at(input_0, input_1)", name)
+ make_test([sequence, position], sequence[2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE)
def negative_position():
sequence = []
@@ -74,7 +74,7 @@ def negative_position():
position = scalar(-2)
name = "sequence_at_i32_negative"
- make_test([sequence, position], sequence[-2], "TensorTrait::sequence_at(input_0, input_1)", name)
+ make_test([sequence, position], sequence[-2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE)
positive_position()
negative_position()
@@ -95,7 +95,7 @@ def positive_position():
position = scalar(2)
name = "sequence_at_i8_positive"
- make_test([sequence, position], sequence[2], "TensorTrait::sequence_at(input_0, input_1)", name)
+ make_test([sequence, position], sequence[2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE)
def negative_position():
sequence = []
@@ -110,7 +110,7 @@ def negative_position():
position = scalar(-2)
name = "sequence_at_i8_negative"
- make_test([sequence, position], sequence[-2], "TensorTrait::sequence_at(input_0, input_1)", name)
+ make_test([sequence, position], sequence[-2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE)
positive_position()
negative_position()
@@ -131,7 +131,7 @@ def positive_position():
position = scalar(2)
name = "sequence_at_fp8x23_positive"
- make_test([sequence, position], sequence[2], "TensorTrait::sequence_at(input_0, input_1)", name)
+ make_test([sequence, position], sequence[2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE)
def negative_position():
sequence = []
@@ -146,7 +146,7 @@ def negative_position():
position = scalar(-2)
name = "sequence_at_fp8x23_negative"
- make_test([sequence, position], sequence[-2], "TensorTrait::sequence_at(input_0, input_1)", name)
+ make_test([sequence, position], sequence[-2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE)
positive_position()
negative_position()
@@ -167,7 +167,7 @@ def positive_position():
position = scalar(2)
name = "sequence_at_fp16x16_positive"
- make_test([sequence, position], sequence[2], "TensorTrait::sequence_at(input_0, input_1)", name)
+ make_test([sequence, position], sequence[2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE)
def negative_position():
sequence = []
@@ -182,7 +182,7 @@ def negative_position():
position = scalar(-2)
name = "sequence_at_fp16x16_negative"
- make_test([sequence, position], sequence[-2], "TensorTrait::sequence_at(input_0, input_1)", name)
+ make_test([sequence, position], sequence[-2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE)
positive_position()
negative_position()
diff --git a/nodegen/node/sequence_construct.py b/nodegen/node/sequence_construct.py
index 7c76d1532..fba919f56 100644
--- a/nodegen/node/sequence_construct.py
+++ b/nodegen/node/sequence_construct.py
@@ -1,6 +1,6 @@
import numpy as np
from nodegen.node import RunAll
-from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
+from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
class Sequence_construct(RunAll):
@@ -18,7 +18,7 @@ def sequence_construct_u32():
sequence.append(tensor)
name = "sequence_construct_u32"
- make_test([sequence], sequence, "TensorTrait::sequence_construct(input_0)", name)
+ make_test([sequence], sequence, "SequenceTrait::sequence_construct(input_0)", name, Trait.SEQUENCE)
@staticmethod
@@ -34,7 +34,7 @@ def sequence_construct_i32():
sequence.append(tensor)
name = "sequence_construct_i32"
- make_test([sequence], sequence, "TensorTrait::sequence_construct(input_0)", name)
+ make_test([sequence], sequence, "SequenceTrait::sequence_construct(input_0)", name, Trait.SEQUENCE)
@staticmethod
@@ -50,7 +50,7 @@ def sequence_construct_i8():
sequence.append(tensor)
name = "sequence_construct_i8"
- make_test([sequence], sequence, "TensorTrait::sequence_construct(input_0)", name)
+ make_test([sequence], sequence, "SequenceTrait::sequence_construct(input_0)", name, Trait.SEQUENCE)
@staticmethod
@@ -66,7 +66,7 @@ def sequence_construct_fp8x23():
sequence.append(tensor)
name = "sequence_construct_fp8x23"
- make_test([sequence], sequence, "TensorTrait::sequence_construct(input_0)", name)
+ make_test([sequence], sequence, "SequenceTrait::sequence_construct(input_0)", name, Trait.SEQUENCE)
@staticmethod
@@ -82,4 +82,4 @@ def sequence_construct_fp16x16():
sequence.append(tensor)
name = "sequence_construct_fp16x16"
- make_test([sequence], sequence, "TensorTrait::sequence_construct(input_0)", name)
+ make_test([sequence], sequence, "SequenceTrait::sequence_construct(input_0)", name, Trait.SEQUENCE)
diff --git a/nodegen/node/sequence_empty.py b/nodegen/node/sequence_empty.py
index 91dc78dbc..779859fa5 100644
--- a/nodegen/node/sequence_empty.py
+++ b/nodegen/node/sequence_empty.py
@@ -1,6 +1,6 @@
import numpy as np
from nodegen.node import RunAll
-from ..helpers import make_test, Dtype, Tensor
+from ..helpers import make_test, Dtype, Tensor, Trait
class Sequence_empty(RunAll):
@@ -14,8 +14,9 @@ def default():
make_test(
inputs=[],
output=[t],
- func_sig="TensorTrait::sequence_empty()",
+ func_sig="SequenceTrait::sequence_empty()",
name="sequence_empty_u32",
+ trait=Trait.SEQUENCE
)
default()
@@ -29,8 +30,9 @@ def default():
make_test(
inputs=[],
output=[t],
- func_sig="TensorTrait::sequence_empty()",
+ func_sig="SequenceTrait::sequence_empty()",
name="sequence_empty_i32",
+ trait=Trait.SEQUENCE
)
default()
@@ -44,8 +46,9 @@ def default():
make_test(
inputs=[],
output=[t],
- func_sig="TensorTrait::sequence_empty()",
+ func_sig="SequenceTrait::sequence_empty()",
name="sequence_empty_i8",
+ trait=Trait.SEQUENCE
)
default()
@@ -59,8 +62,9 @@ def default():
make_test(
inputs=[],
output=[t],
- func_sig="TensorTrait::sequence_empty()",
+ func_sig="SequenceTrait::sequence_empty()",
name="sequence_empty_fp8x23",
+ trait=Trait.SEQUENCE
)
default()
@@ -74,8 +78,9 @@ def default():
make_test(
inputs=[],
output=[t],
- func_sig="TensorTrait::sequence_empty()",
+ func_sig="SequenceTrait::sequence_empty()",
name="sequence_empty_fp16x16",
+ trait=Trait.SEQUENCE
)
default()
diff --git a/nodegen/node/sequence_erase.py b/nodegen/node/sequence_erase.py
index 8f8f07c46..a57b2e7d2 100644
--- a/nodegen/node/sequence_erase.py
+++ b/nodegen/node/sequence_erase.py
@@ -1,6 +1,6 @@
import numpy as np
from nodegen.node import RunAll
-from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
+from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
scalar = lambda x: Tensor(Dtype.I32, (), np.array([x]).astype(np.int32).flatten())
@@ -26,7 +26,7 @@ def positive_position():
output_sequence.pop(2)
name = "sequence_erase_u32_positive"
- make_test([sequence, position], output_sequence, "TensorTrait::sequence_erase(input_0, Option::Some(input_1))", name)
+ make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE)
def negative_position():
sequence = []
@@ -44,7 +44,7 @@ def negative_position():
output_sequence.pop(-2)
name = "sequence_erase_u32_negative"
- make_test([sequence, position], output_sequence, "TensorTrait::sequence_erase(input_0, Option::Some(input_1))", name)
+ make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE)
def empty_position():
sequence = []
@@ -60,7 +60,7 @@ def empty_position():
output_sequence.pop(-1)
name = "sequence_erase_u32_empty"
- make_test([sequence], output_sequence, "TensorTrait::sequence_erase(input_0, Option::None(()))", name)
+ make_test([sequence], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::None(()))", name, Trait.SEQUENCE)
positive_position()
negative_position()
@@ -85,7 +85,7 @@ def positive_position():
output_sequence.pop(2)
name = "sequence_erase_i32_positive"
- make_test([sequence, position], output_sequence, "TensorTrait::sequence_erase(input_0, Option::Some(input_1))", name)
+ make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE)
def negative_position():
sequence = []
@@ -103,7 +103,7 @@ def negative_position():
output_sequence.pop(-2)
name = "sequence_erase_i32_negative"
- make_test([sequence, position], output_sequence, "TensorTrait::sequence_erase(input_0, Option::Some(input_1))", name)
+ make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE)
def empty_position():
sequence = []
@@ -119,7 +119,7 @@ def empty_position():
output_sequence.pop(-1)
name = "sequence_erase_i32_empty"
- make_test([sequence], output_sequence, "TensorTrait::sequence_erase(input_0, Option::None(()))", name)
+ make_test([sequence], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::None(()))", name, Trait.SEQUENCE)
positive_position()
negative_position()
@@ -144,7 +144,7 @@ def positive_position():
output_sequence.pop(2)
name = "sequence_erase_i8_positive"
- make_test([sequence, position], output_sequence, "TensorTrait::sequence_erase(input_0, Option::Some(input_1))", name)
+ make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE)
def negative_position():
sequence = []
@@ -162,7 +162,7 @@ def negative_position():
output_sequence.pop(-2)
name = "sequence_erase_i8_negative"
- make_test([sequence, position], output_sequence, "TensorTrait::sequence_erase(input_0, Option::Some(input_1))", name)
+ make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE)
def empty_position():
sequence = []
@@ -178,7 +178,7 @@ def empty_position():
output_sequence.pop(-1)
name = "sequence_erase_i8_empty"
- make_test([sequence], output_sequence, "TensorTrait::sequence_erase(input_0, Option::None(()))", name)
+ make_test([sequence], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::None(()))", name, Trait.SEQUENCE)
positive_position()
negative_position()
@@ -203,7 +203,7 @@ def positive_position():
output_sequence.pop(2)
name = "sequence_erase_fp8x23_positive"
- make_test([sequence, position], output_sequence, "TensorTrait::sequence_erase(input_0, Option::Some(input_1))", name)
+ make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE)
def negative_position():
sequence = []
@@ -221,7 +221,7 @@ def negative_position():
output_sequence.pop(-2)
name = "sequence_erase_fp8x23_negative"
- make_test([sequence, position], output_sequence, "TensorTrait::sequence_erase(input_0, Option::Some(input_1))", name)
+ make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE)
def empty_position():
sequence = []
@@ -237,7 +237,7 @@ def empty_position():
output_sequence.pop(-1)
name = "sequence_erase_fp8x23_empty"
- make_test([sequence], output_sequence, "TensorTrait::sequence_erase(input_0, Option::None(()))", name)
+ make_test([sequence], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::None(()))", name, Trait.SEQUENCE)
positive_position()
negative_position()
@@ -262,7 +262,7 @@ def positive_position():
output_sequence.pop(2)
name = "sequence_erase_fp16x16_positive"
- make_test([sequence, position], output_sequence, "TensorTrait::sequence_erase(input_0, Option::Some(input_1))", name)
+ make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE)
def negative_position():
sequence = []
@@ -280,7 +280,7 @@ def negative_position():
output_sequence.pop(-2)
name = "sequence_erase_fp16x16_negative"
- make_test([sequence, position], output_sequence, "TensorTrait::sequence_erase(input_0, Option::Some(input_1))", name)
+ make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE)
def empty_position():
sequence = []
@@ -296,7 +296,7 @@ def empty_position():
output_sequence.pop(-1)
name = "sequence_erase_fp16x16_empty"
- make_test([sequence], output_sequence, "TensorTrait::sequence_erase(input_0, Option::None(()))", name)
+ make_test([sequence], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::None(()))", name, Trait.SEQUENCE)
positive_position()
negative_position()
diff --git a/nodegen/node/sequence_insert.py b/nodegen/node/sequence_insert.py
index 9cc2ceb82..e4ae7be55 100644
--- a/nodegen/node/sequence_insert.py
+++ b/nodegen/node/sequence_insert.py
@@ -1,6 +1,6 @@
import numpy as np
from nodegen.node import RunAll
-from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
+from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
scalar = lambda x: Tensor(Dtype.I32, (), np.array([x]).astype(np.int32).flatten())
@@ -30,7 +30,7 @@ def default():
expected_sequence.insert(position, tensor)
name = "sequence_insert_u32"
- make_test([sequence, tensor, scalar(position)], expected_sequence, "input_0.sequence_insert(@input_1,Option::Some(input_2))", name)
+ make_test([sequence, tensor, scalar(position)], expected_sequence, "input_0.sequence_insert(@input_1,Option::Some(input_2))", name, Trait.SEQUENCE)
default()
@@ -56,7 +56,7 @@ def default():
expected_sequence.insert(position, tensor)
name = "sequence_insert_i32"
- make_test([sequence, tensor, scalar(position)], expected_sequence, "input_0.sequence_insert(@input_1,Option::Some(input_2))", name)
+ make_test([sequence, tensor, scalar(position)], expected_sequence, "input_0.sequence_insert(@input_1,Option::Some(input_2))", name, Trait.SEQUENCE)
default()
@@ -82,7 +82,7 @@ def default():
expected_sequence.insert(position, tensor)
name = "sequence_insert_i8"
- make_test([sequence, tensor, scalar(position)], expected_sequence, "input_0.sequence_insert(@input_1,Option::Some(input_2))", name)
+ make_test([sequence, tensor, scalar(position)], expected_sequence, "input_0.sequence_insert(@input_1,Option::Some(input_2))", name, Trait.SEQUENCE)
default()
@@ -110,7 +110,7 @@ def default():
expected_sequence.insert(position, tensor)
name = "sequence_insert_fp8x23"
- make_test([sequence, tensor, scalar(position)], expected_sequence, "input_0.sequence_insert(@input_1,Option::Some(input_2))", name)
+ make_test([sequence, tensor, scalar(position)], expected_sequence, "input_0.sequence_insert(@input_1,Option::Some(input_2))", name, Trait.SEQUENCE)
default()
@@ -138,6 +138,6 @@ def default():
expected_sequence.insert(position, tensor)
name = "sequence_insert_fp16x16"
- make_test([sequence, tensor, scalar(position)], expected_sequence, "input_0.sequence_insert(@input_1,Option::Some(input_2))", name)
+ make_test([sequence, tensor, scalar(position)], expected_sequence, "input_0.sequence_insert(@input_1,Option::Some(input_2))", name, Trait.SEQUENCE)
default()
diff --git a/nodegen/node/sequence_length.py b/nodegen/node/sequence_length.py
index 87f0dcd01..7b8993309 100644
--- a/nodegen/node/sequence_length.py
+++ b/nodegen/node/sequence_length.py
@@ -1,6 +1,6 @@
import numpy as np
from nodegen.node import RunAll
-from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
+from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
scalar = lambda x: Tensor(Dtype.U32, (), np.array([x]).astype(np.uint32).flatten())
@@ -21,7 +21,7 @@ def default():
sequence.append(tensor)
name = "sequence_length_u32"
- make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name)
+ make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE)
def broadcast():
sequence = []
@@ -35,7 +35,7 @@ def broadcast():
sequence.append(tensor)
name = "sequence_length_u32_broadcast"
- make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name)
+ make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE)
default()
broadcast()
@@ -54,7 +54,7 @@ def default():
sequence.append(tensor)
name = "sequence_length_i32"
- make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name)
+ make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE)
def broadcast():
sequence = []
@@ -68,7 +68,7 @@ def broadcast():
sequence.append(tensor)
name = "sequence_length_i32_broadcast"
- make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name)
+ make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE)
default()
broadcast()
@@ -87,7 +87,7 @@ def default():
sequence.append(tensor)
name = "sequence_length_i8"
- make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name)
+ make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE)
def broadcast():
sequence = []
@@ -101,7 +101,7 @@ def broadcast():
sequence.append(tensor)
name = "sequence_length_i8_broadcast"
- make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name)
+ make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE)
default()
broadcast()
@@ -120,7 +120,7 @@ def default():
sequence.append(tensor)
name = "sequence_length_fp8x23"
- make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name)
+ make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE)
def broadcast():
sequence = []
@@ -134,7 +134,7 @@ def broadcast():
sequence.append(tensor)
name = "sequence_length_fp8x23_broadcast"
- make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name)
+ make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE)
default()
broadcast()
@@ -153,7 +153,7 @@ def default():
sequence.append(tensor)
name = "sequence_length_fp16x16"
- make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name)
+ make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE)
def broadcast():
sequence = []
@@ -167,7 +167,7 @@ def broadcast():
sequence.append(tensor)
name = "sequence_length_fp16x16_broadcast"
- make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name)
+ make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE)
default()
broadcast()
diff --git a/nodegen/node/softmax_zero.py b/nodegen/node/softmax_zero.py
new file mode 100644
index 000000000..40e5528cb
--- /dev/null
+++ b/nodegen/node/softmax_zero.py
@@ -0,0 +1,48 @@
+import numpy as np
+from nodegen.node import RunAll
+from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
+
+
+def softmax_zero(x: np.ndarray, axis: int = -1) -> np.ndarray:
+ x_max = np.max(x, axis=axis, keepdims=True)
+ tmp = np.exp(x - x_max)
+ tmp = np.where(x == 0.0, 0.0, tmp)
+
+ s = np.sum(tmp, axis=axis, keepdims=True)
+ s = np.where(s == 0.0, 1, s)
+
+
+ return tmp / s
+
+
+class Softmax_zero(RunAll):
+
+
+ @staticmethod
+ def fp8x23():
+ x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64)
+ y = softmax_zero(x)
+
+ x = Tensor(Dtype.FP8x23, x.shape, to_fp(
+ x.flatten(), FixedImpl.FP8x23))
+ y = Tensor(Dtype.FP8x23, y.shape, to_fp(
+ y.flatten(), FixedImpl.FP8x23))
+
+ name = "softmax_zero_fp8x23"
+ make_test([x], y, "NNTrait::softmax_zero(@input_0, 1)",
+ name, Trait.NN)
+
+ @staticmethod
+ def fp16x16():
+ x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64)
+ y = softmax_zero(x)
+
+ x = Tensor(Dtype.FP16x16, x.shape, to_fp(
+ x.flatten(), FixedImpl.FP16x16))
+ y = Tensor(Dtype.FP16x16, y.shape, to_fp(
+ y.flatten(), FixedImpl.FP16x16))
+
+ name = "softmax_zero_fp16x16"
+ make_test([x], y, "NNTrait::softmax_zero(@input_0, 1)",
+ name, Trait.NN)
+
diff --git a/nodegen/node/split.py b/nodegen/node/split.py
new file mode 100644
index 000000000..8e765141e
--- /dev/null
+++ b/nodegen/node/split.py
@@ -0,0 +1,261 @@
+import numpy as np
+from nodegen.node import RunAll
+from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
+
+
+class Split(RunAll):
+ @staticmethod
+ def split_u32():
+ def split_1D():
+ x = np.random.randint(0, 255, 6).astype(np.uint32)
+ y = [
+ np.array(x[0:2]).astype(np.uint32),
+ np.array(x[2:4]).astype(np.uint32),
+ np.array(x[4:6]).astype(np.uint32),
+ ]
+
+ _x = Tensor(Dtype.U32, x.shape, x.flatten())
+ _y = [
+ Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
+ Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
+ Tensor(Dtype.U32, y[2].shape, y[2].flatten()),
+ ]
+
+ name = "split_u32_1d_equal_parts"
+ make_test(
+ [_x], _y, "input_0.split(0, Option::Some(3), Option::None(()))", name)
+ y = [
+ np.array(x[0:2]).astype(np.uint32),
+ np.array(x[2:6]).astype(np.uint32),
+ ]
+ _y = [
+ Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
+ Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
+ ]
+ name = "split_u32_1d_variable_parts"
+ make_test(
+ [_x], _y, "input_0.split(0, Option::None(()), Option::Some(TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),)))", name)
+ def split_2D():
+ x = np.random.randint(0, 255, (2, 6)).astype(np.uint32)
+ y = [
+ np.array(x[0:2, 0:3]).astype(np.uint32),
+ np.array(x[0:2, 3:6]).astype(np.uint32),
+ ]
+ _x = Tensor(Dtype.U32, x.shape, x.flatten())
+ _y = [
+ Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
+ Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
+ ]
+ name = "split_u32_2d_equal_parts"
+ make_test(
+ [_x], _y, "input_0.split(1, Option::Some(2), Option::None(()))", name)
+
+ y = [
+ np.array(x[0:2, 0:2]).astype(np.uint32),
+ np.array(x[0:2, 2:6]).astype(np.uint32)
+ ]
+ _y = [
+ Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
+ Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
+ ]
+ name = "split_u32_2d_variable_parts"
+ make_test(
+ [_x], _y, "input_0.split(1, Option::None(()), Option::Some(TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),)))", name)
+
+ def split_zero_size():
+ # 1-dimensional tensor with dimension_size=0
+ x = np.array([]).astype(np.uint32)
+ y = [
+ np.array([]).astype(np.uint32),
+ np.array([]).astype(np.uint32),
+ np.array([]).astype(np.uint32),
+ ]
+ _x = Tensor(Dtype.U32, x.shape, x.flatten())
+ _y = [
+ Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
+ Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
+ Tensor(Dtype.U32, y[2].shape, y[2].flatten()),
+ ]
+ # Split emtpy tensor to tensors of size zero
+ name = "split_u32_zero_size"
+ make_test(
+ [_x], _y, "input_0.split(0, Option::None(()), Option::Some(TensorTrait::::new(shape: array![3].span(), data: array![0, 0, 0].span(),)))", name)
+
+
+ def split_1d_uneven():
+ x = np.random.randint(0, 255, 7).astype(np.uint32)
+ y = [
+ np.array(x[0:2]).astype(np.uint32),
+ np.array(x[2:4]).astype(np.uint32),
+ np.array(x[4:6]).astype(np.uint32),
+ np.array(x[6:7]).astype(np.uint32),
+ ]
+
+ _x = Tensor(Dtype.U32, x.shape, x.flatten())
+ _y = [
+ Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
+ Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
+ Tensor(Dtype.U32, y[2].shape, y[2].flatten()),
+ Tensor(Dtype.U32, y[3].shape, y[3].flatten()),
+ ]
+
+ name = "split_u32_1d_uneven"
+ make_test(
+ [_x], _y, "input_0.split(0, Option::Some(4), Option::None(()))", name)
+
+
+ def split_2d_uneven():
+ x = np.random.randint(0, 255, (2, 8)).astype(np.uint32)
+ y = [
+ np.array(x[0:2, 0:3]).astype(np.uint32),
+ np.array(x[0:2, 3:6]).astype(np.uint32),
+ np.array(x[0:2, 6:8]).astype(np.uint32)
+ ]
+ _x = Tensor(Dtype.U32, x.shape, x.flatten())
+ _y = [
+ Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
+ Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
+ Tensor(Dtype.U32, y[2].shape, y[2].flatten()),
+ ]
+
+ name = "split_u32_2d_uneven"
+ make_test(
+ [_x], _y, "input_0.split(1, Option::Some(3), Option::None(()))", name)
+
+
+ split_1D()
+ split_2D()
+ split_zero_size()
+ split_1d_uneven()
+ split_2d_uneven()
+
+ @staticmethod
+ def split_fp16x16():
+ def split_1D():
+ x = to_fp(np.random.randint(-127, 127, 6
+ ).astype(np.int64), FixedImpl.FP16x16)
+ y = [
+ np.array(x[0:2]).astype(np.int64),
+ np.array(x[2:4]).astype(np.int64),
+ np.array(x[4:6]).astype(np.int64),
+ ]
+
+ _x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
+ _y = [
+ Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()),
+ Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()),
+ Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()),
+ ]
+
+ name = "split_fp16x16_1d_equal_parts"
+ make_test(
+ [_x], _y, "input_0.split(0, Option::Some(3), Option::None(()))", name)
+ y = [
+ np.array(x[0:2]).astype(np.int64),
+ np.array(x[2:6]).astype(np.int64),
+ ]
+ _y = [
+ Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()),
+ Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()),
+ ]
+ name = "split_fp16x16_1d_variable_parts"
+ make_test(
+ [_x], _y, "input_0.split(0, Option::None(()), Option::Some(TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),)))", name)
+ def split_2D():
+ x = to_fp(np.random.randint(-127, 127, (2, 6)
+ ).astype(np.int64), FixedImpl.FP16x16)
+ y = [
+ np.array(x[0:2, 0:3]).astype(np.int64),
+ np.array(x[0:2, 3:6]).astype(np.int64),
+ ]
+ _x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
+ _y = [
+ Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()),
+ Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()),
+ ]
+ name = "split_fp16x16_2d_equal_parts"
+ make_test(
+ [_x], _y, "input_0.split(1, Option::Some(2), Option::None(()))", name)
+
+ y = [
+ np.array(x[0:2, 0:2]).astype(np.int64),
+ np.array(x[0:2, 2:6]).astype(np.int64)
+ ]
+ _y = [
+ Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()),
+ Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()),
+ ]
+ name = "split_fp16x16_2d_variable_parts"
+ make_test(
+ [_x], _y, "input_0.split(1, Option::None(()), Option::Some(TensorTrait::::new(shape: array![2].span(), data: array![2, 4].span(),)))", name)
+
+ def split_zero_size():
+ # 1-dimensional tensor with dimension_size=0
+ x = to_fp(np.array([]).astype(np.int64
+ ).astype(np.int64), FixedImpl.FP16x16)
+ y = [
+ np.array([]).astype(np.int64),
+ np.array([]).astype(np.int64),
+ np.array([]).astype(np.int64),
+ ]
+ _x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
+ _y = [
+ Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()),
+ Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()),
+ Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()),
+ ]
+ # Split emtpy tensor to tensors of size zero
+ name = "split_fp16x16_zero_size"
+ make_test(
+ [_x], _y, "input_0.split(0, Option::None(()), Option::Some(TensorTrait::::new(shape: array![3].span(), data: array![0, 0, 0].span(),)))", name)
+
+
+ def split_1d_uneven():
+ x = to_fp(np.random.randint(-127, 127, 7
+ ).astype(np.int64), FixedImpl.FP16x16)
+ y = [
+ np.array(x[0:2]).astype(np.int64),
+ np.array(x[2:4]).astype(np.int64),
+ np.array(x[4:6]).astype(np.int64),
+ np.array(x[6:7]).astype(np.int64),
+ ]
+
+ _x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
+ _y = [
+ Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()),
+ Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()),
+ Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()),
+ Tensor(Dtype.FP16x16, y[3].shape, y[3].flatten()),
+ ]
+
+ name = "split_fp16x16_1d_uneven"
+ make_test(
+ [_x], _y, "input_0.split(0, Option::Some(4), Option::None(()))", name)
+
+
+ def split_2d_uneven():
+ x = to_fp(np.random.randint(-127, 127, (2, 8)
+ ).astype(np.int64), FixedImpl.FP16x16)
+ y = [
+ np.array(x[0:2, 0:3]).astype(np.int64),
+ np.array(x[0:2, 3:6]).astype(np.int64),
+ np.array(x[0:2, 6:8]).astype(np.int64)
+ ]
+ _x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
+ _y = [
+ Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()),
+ Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()),
+ Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()),
+ ]
+
+ name = "split_fp16x16_2d_uneven"
+ make_test(
+ [_x], _y, "input_0.split(1, Option::Some(3), Option::None(()))", name)
+
+
+ split_1D()
+ split_2D()
+ split_zero_size()
+ split_1d_uneven()
+ split_2d_uneven()
+
\ No newline at end of file
diff --git a/src/numbers/complex_number/complex64.cairo b/src/numbers/complex_number/complex64.cairo
index fe7b70581..c3b649c6d 100644
--- a/src/numbers/complex_number/complex64.cairo
+++ b/src/numbers/complex_number/complex64.cairo
@@ -73,7 +73,12 @@ impl Complex64Impl of ComplexTrait {
let y = self.img;
let two = FP64x64Impl::new(TWO, false);
let real = (((x.pow(two) + y.pow(two)).sqrt() + x) / two).sqrt();
- let img = (((x.pow(two) + y.pow(two)).sqrt() - x) / two).sqrt();
+ let img = if y == FP64x64Impl::ZERO() {
+ FP64x64Impl::ZERO()
+ } else {
+ (((x.pow(two) + y.pow(two)).sqrt() - x) / two).sqrt()
+ };
+
let img = FP64x64Impl::new(img.mag, y.sign);
complex64 { real, img }
}
diff --git a/src/operators.cairo b/src/operators.cairo
index 1ca6cdee2..f125386a2 100644
--- a/src/operators.cairo
+++ b/src/operators.cairo
@@ -3,3 +3,4 @@ mod nn;
mod ml;
mod matrix;
mod vec;
+mod sequence;
diff --git a/src/operators/ml.cairo b/src/operators/ml.cairo
index 4bfd10060..724664216 100644
--- a/src/operators/ml.cairo
+++ b/src/operators/ml.cairo
@@ -1,5 +1,6 @@
mod tree_ensemble;
mod linear;
+mod svm;
use orion::operators::ml::tree_ensemble::core::{
TreeEnsemble, TreeEnsembleAttributes, TreeEnsembleImpl, NODE_MODES
@@ -11,4 +12,11 @@ use orion::operators::ml::tree_ensemble::tree_ensemble_classifier::{
use orion::operators::ml::tree_ensemble::tree_ensemble_regressor::{
TreeEnsembleRegressor, TreeEnsembleRegressorImpl, TreeEnsembleRegressorTrait, AGGREGATE_FUNCTION
};
-use orion::operators::ml::linear::linear_regressor::{LinearRegressorTrait, LinearRegressorImpl, LinearRegressor};
+
+use orion::operators::ml::linear::linear_regressor::{
+ LinearRegressorTrait, LinearRegressorImpl, LinearRegressor
+};
+
+use orion::operators::ml::linear::linear_classifier::{
+ LinearClassifierTrait, LinearClassifierImpl, LinearClassifier
+};
diff --git a/src/operators/ml/linear.cairo b/src/operators/ml/linear.cairo
index bc8062bd1..ef33bae6b 100644
--- a/src/operators/ml/linear.cairo
+++ b/src/operators/ml/linear.cairo
@@ -1 +1,2 @@
mod linear_regressor;
+mod linear_classifier;
diff --git a/src/operators/ml/linear/linear_classifier.cairo b/src/operators/ml/linear/linear_classifier.cairo
new file mode 100644
index 000000000..fad7ea2d4
--- /dev/null
+++ b/src/operators/ml/linear/linear_classifier.cairo
@@ -0,0 +1,284 @@
+use core::array::ArrayTrait;
+use core::array::SpanTrait;
+use orion::numbers::FP16x16;
+
+use orion::operators::tensor::{Tensor, TensorTrait};
+use orion::numbers::NumberTrait;
+use orion::operators::tensor::{I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, FP16x16TensorAdd};
+use orion::numbers::{FP32x32, FP32x32Impl, FixedTrait};
+use orion::operators::nn::{NNTrait, FP16x16NN};
+
+
+#[derive(Destruct)]
+struct LinearClassifier {
+ classlabels: Option>,
+ coefficients: Span,
+ intercepts: Option>,
+ multi_class: usize,
+ post_transform: POST_TRANSFORM,
+}
+
+
+#[derive(Copy, Drop)]
+enum POST_TRANSFORM {
+ NONE,
+ SOFTMAX,
+ LOGISTIC,
+ SOFTMAXZERO,
+ PROBIT,
+}
+
+/// Trait
+///
+/// predict - Performs the linear classification.
+trait LinearClassifierTrait {
+ /// # LinearClassifierTrait::predict
+ ///
+ /// ```rust
+ /// fn predict(ref self: LinearClassifier, X: Tensor) -> Tensor;
+ /// ```
+ ///
+ /// Linear Classifier. Performs the linear classification.
+ ///
+ /// ## Args
+ ///
+ /// * `self`: LinearClassifier - A LinearClassifier object.
+ /// * `X`: Input 2D tensor.
+ ///
+ /// ## Returns
+ ///
+ /// * Tensor containing the linear classification evaluation of the input X.
+ ///
+ /// ## Type Constraints
+ ///
+ /// `LinearClassifier` and `X` must be fixed points
+ ///
+ /// ## Examples
+ ///
+ /// ```rust
+ /// use orion::numbers::FP16x16;
+ /// use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor};
+ ///
+ /// use orion::operators::ml::linear::linear_classifier::{
+ /// LinearClassifierTrait, POST_TRANSFORM, LinearClassifier
+ /// };
+ ///
+ /// fn linear_classifier_helper(
+ /// post_transform: POST_TRANSFORM
+ /// ) -> (LinearClassifier, Tensor) {
+ ///
+ /// let classlabels: Span = array![0, 1, 2].span();
+ /// let classlabels = Option::Some(classlabels);
+ ///
+ /// let classlabels_strings: Option> = Option::None;
+ ///
+ /// let coefficients: Span = array![
+ /// FP16x16 { mag: 38011, sign: true },
+ /// FP16x16 { mag: 19005, sign: true },
+ /// FP16x16 { mag: 5898, sign: true },
+ /// FP16x16 { mag: 38011, sign: false },
+ /// FP16x16 { mag: 19005, sign: false },
+ /// FP16x16 { mag: 5898, sign: false },
+ /// ]
+ /// .span();
+ ///
+ /// let intercepts: Span = array![
+ /// FP16x16 { mag: 176947, sign: false },
+ /// FP16x16 { mag: 176947, sign: true },
+ /// FP16x16 { mag: 32768, sign: false },
+ /// ]
+ /// .span();
+ /// let intercepts = Option::Some(intercepts);
+ ///
+ /// let multi_class: usize = 0;
+ ///
+ /// let mut classifier: LinearClassifier = LinearClassifier {
+ /// classlabels,
+ /// coefficients,
+ /// intercepts,
+ /// multi_class,
+ /// post_transform
+ /// };
+ ///
+ /// let mut X: Tensor = TensorTrait::new(
+ /// array![3, 2].span(),
+ /// array![
+ /// FP16x16 { mag: 0, sign: false },
+ /// FP16x16 { mag: 65536, sign: false },
+ /// FP16x16 { mag: 131072, sign: false },
+ /// FP16x16 { mag: 196608, sign: false },
+ /// FP16x16 { mag: 262144, sign: false },
+ /// FP16x16 { mag: 327680, sign: false },
+ /// ]
+ /// .span()
+ /// );
+ ///
+ /// (classifier, X)
+ /// }
+ ///
+ /// fn linear_classifier_multi_softmax() -> (Span, Tensor) {
+ /// let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::SOFTMAX);
+ ///
+ /// let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X);
+ ///
+ /// (labels, scores)
+ /// }
+ ///
+ /// >>>
+ /// ([0, 2, 2],
+ /// [
+ /// [0.852656, 0.009192, 0.138152],
+ /// [0.318722, 0.05216, 0.629118],
+ /// [0.036323, 0.090237, 0.87344]
+ /// ])
+ /// ```
+ fn predict(ref self: LinearClassifier, X: Tensor) -> (Span, Tensor);
+}
+
+impl LinearClassifierImpl<
+ T,
+ MAG,
+ +Drop,
+ +Copy,
+ +NumberTrait,
+ +PartialOrd