Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DRAFT CFe fuse Mul Add to Fullyconnected #13415

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion compiler/luci-pass-value-py-test/test.lst
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,14 @@
# Format:
# eval(MODEL PASS)
# MODEL: tflite model file name in build/compiler/common-artifacts folder.
# PASS: Optimization Pass to test. Supports only one Pass for now.
# PASS: Optimization Pass to test. Supports one or Passes.
#

# eval(Net_Preactivation_BN_000 fuse_preactivation_batchnorm) : value diff exist
# --> https://github.com/Samsung/ONE/issues/5782
eval(FullyConnected_007 replace_non_const_fc_with_batch_matmul)
eval(HardSwish_001 decompose_hardswish)
eval(Mul_002 fold_mul)
eval(Net_Add_FloorMod_Gather_000 remove_gather_guard)
eval(Net_Add_FullyConnected_000 fuse_add_to_fullyconnected_bias)
eval(Net_Add_FullyConnected_001 fuse_add_to_fullyconnected_bias)
Expand Down
9 changes: 7 additions & 2 deletions compiler/luci-pass-value-py-test/test_luci_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,13 @@ def luci_eval_verify(test_name,
assert np.allclose(
luci_output_data, intp_output_data, rtol=rtolint, atol=atolint), err_msg
elif output_details["dtype"] == np.float32:
assert np.allclose(
luci_output_data, intp_output_data, rtol=rtolf32, atol=atolf32), err_msg
diff_comp = np.allclose(
luci_output_data, intp_output_data, rtol=rtolf32, atol=atolf32)
if not diff_comp:
print("\r\ntflite:\r\n", intp_output_data, flush=True)
print("\r\ncircle:\r\n", luci_output_data, flush=True)
print("\r\nDiff:\r\n", intp_output_data - luci_output_data, flush=True)
assert diff_comp, err_msg
elif output_details["dtype"] == np.int64:
assert np.allclose(
luci_output_data, intp_output_data, rtol=rtolint, atol=atolint), err_msg
Expand Down
35 changes: 35 additions & 0 deletions res/TensorFlowLiteRecipes/Mul_002/test.recipe
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
operand {
name: "ifm1"
type: FLOAT32
shape { dim: 3 dim: 1 dim: 5 }
filler {
tag: "gaussian"
arg: "0.0"
arg: "1.0"
}
}
operand {
name: "ifm2"
type: FLOAT32
shape { dim: 5 }
filler {
tag: "gaussian"
arg: "0.0"
arg: "1.0"
}
}
operand {
name: "ofm"
type: FLOAT32
shape { dim: 3 dim: 1 dim: 5 }
}
operation {
type: "Mul"
input: "ifm1"
input: "ifm2"
output: "ofm"
mul_options {
activation: NONE
}
}
output: "ofm"