Skip to content

Commit

Permalink
DRAFT CFE fix S64 paddings in Pad
Browse files Browse the repository at this point in the history
on-going draft to fix S64 paddings in Pad.

Signed-off-by: SaeHie Park <[email protected]>
  • Loading branch information
seanshpark committed Jul 24, 2024
1 parent 8a76a55 commit 3b04d5c
Show file tree
Hide file tree
Showing 5 changed files with 184 additions and 3 deletions.
38 changes: 38 additions & 0 deletions compiler/luci/pass/include/luci/Pass/CanonicalizePass.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
/*
* Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef __LUCI_CANONICALIZE_PASS_H__
#define __LUCI_CANONICALIZE_PASS_H__

#include <logo/Pass.h>

namespace luci
{

/**
* @brief Class to canoncalize CircleNodes
*
*/
struct CanonicalizePass final : public logo::Pass
{
const char *name(void) const final { return "luci::CanonicalizePass"; }

bool run(loco::Graph *g) final;
};

} // namespace luci

#endif // __LUCI_CANONICALIZE_PASS_H__
99 changes: 99 additions & 0 deletions compiler/luci/pass/src/CanonicalizePass.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
/*
* Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "luci/Pass/CanonicalizePass.h"

#include <luci/IR/CircleNodes.h>

#include <loco/IR/DataType.h>

#include <limits>

#define CHECK_OR_FALSE(condition) \
if (not(condition)) \
return false;

namespace
{

/**
* Convert S64 CircleConst paddings to S32
*/
bool paddings_to_s32(luci::CirclePad *pad)
{
// check conditions
auto paddings = dynamic_cast<luci::CircleConst *>(pad->paddings());
CHECK_OR_FALSE(paddings);
CHECK_OR_FALSE(paddings->dtype() == loco::DataType::S64);

// TODO relocate to helpers/CreateCircleConst.h when necessary
auto num_elements = paddings->size<loco::DataType::S64>();
for (uint32_t i = 0; i < num_elements; i++)
{
auto v64 = paddings->at<loco::DataType::S64>(i);
auto hval = static_cast<int64_t>(std::numeric_limits<int32_t>::max());
auto lval = static_cast<int64_t>(std::numeric_limits<int32_t>::lowest());
CHECK_OR_FALSE(v64 < hval);
CHECK_OR_FALSE(v64 > lval);
}

auto paddings_s32 = pad->graph()->nodes()->create<luci::CircleConst>();
paddings_s32->name(paddings->name() + "_S32");
paddings_s32->dtype(loco::DataType::S32);
paddings_s32->rank(paddings->rank());
for (uint32_t i = 0; i < paddings->rank(); i++)
paddings_s32->dim(i).set(paddings->dim(i).value());
paddings_s32->shape_status(luci::ShapeStatus::VALID);

paddings_s32->size<loco::DataType::S32>(num_elements);
for (uint32_t i = 0; i < num_elements; i++)
{
auto v64 = paddings->at<loco::DataType::S64>(i);
paddings_s32->at<loco::DataType::S32>(i) = static_cast<int32_t>(v64);
}

// replace paddings with S32 dtype
pad->paddings(paddings_s32);

return true;
}

} // namespace

namespace luci
{

/**
* Canonicalize circle nodes
*/
bool CanonicalizePass::run(loco::Graph *g)
{
bool changed = false;
for (auto node : loco::active_nodes(loco::output_nodes(g)))
{
if (auto pad = dynamic_cast<luci::CirclePad *>(node))
{
if (paddings_to_s32(pad))
changed = true;
}

// TODO add more canonicalization
}

return changed;
}

} // namespace luci
4 changes: 4 additions & 0 deletions compiler/luci/pass/src/CircleOptimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

#include "luci/CircleOptimizer.h"

#include "luci/Pass/CanonicalizePass.h"
#include "luci/Pass/ConvertNCHWToNHWCPass.h"
#include "luci/Pass/CommonSubExpressionEliminationPass.h"
#include "luci/Pass/ExpandBroadcastConstPass.h"
Expand Down Expand Up @@ -260,6 +261,9 @@ void CircleOptimizer::optimize(loco::Graph *g) const
phase.emplace_back(std::make_unique<luci::CircleShapeInferencePass>());
phase.emplace_back(std::make_unique<luci::CircleTypeInferencePass>());

// Run canonicalization
phase.emplace_back(std::make_unique<luci::CanonicalizePass>());

if (_options->query(Options::Algorithm::CommonSubExpressionElimination))
{
phase.emplace_back(std::make_unique<luci::CommonSubExpressionEliminationPass>());
Expand Down
15 changes: 12 additions & 3 deletions compiler/luci/service/src/CircleShapeInferenceRule.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -215,11 +215,12 @@ template <class CIRCLENODE>
loco::NodeShape use_paddings(const CIRCLENODE *node, const luci::CircleConst *paddings)
{
const loco::DataType S32 = loco::DataType::S32;
const loco::DataType S64 = loco::DataType::S64;

auto input_shape = luci::shape_get(node->input()).template as<loco::TensorShape>();

// TODO support other data type
LUCI_ASSERT(paddings->dtype() == S32, "Only support int 32 for now");
LUCI_ASSERT(paddings->dtype() == S32 || paddings->dtype() == S64, "Support int 32/64 for now");
LUCI_ASSERT(paddings->rank() == 2, "paddings should be rank 2")

int32_t n = paddings->dim(0).value();
Expand All @@ -236,8 +237,16 @@ loco::NodeShape use_paddings(const CIRCLENODE *node, const luci::CircleConst *pa
{
int32_t idx = ni * 2;
int value = input_shape.dim(ni).value();
value += paddings->at<S32>(idx + 0); // left
value += paddings->at<S32>(idx + 1); // right
if (paddings->dtype() == S32)
{
value += paddings->at<S32>(idx + 0); // left
value += paddings->at<S32>(idx + 1); // right
}
else
{
value += static_cast<int32_t>(paddings->at<S64>(idx + 0)); // left
value += static_cast<int32_t>(paddings->at<S64>(idx + 1)); // right
}
output_shape.dim(ni) = value;
}

Expand Down
31 changes: 31 additions & 0 deletions res/TensorFlowLiteRecipes/Pad_001/test.recipe
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
# padding with INT64, others same as Pad_000
operand {
name: "ifm"
type: FLOAT32
shape { dim: 1 dim: 3 dim: 3 dim: 2 }
}
operand {
name: "padding"
type: INT64
shape { dim: 4 dim: 2 }
filler {
tag: "explicit"
arg: "0" arg: "0"
arg: "1" arg: "1"
arg: "2" arg: "2"
arg: "0" arg: "0"
}
}
operand {
name: "ofm"
type: FLOAT32
shape { dim: 1 dim: 5 dim: 7 dim: 2 }
}
operation {
type: "Pad"
input: "ifm"
input: "padding"
output: "ofm"
}
input: "ifm"
output: "ofm"

0 comments on commit 3b04d5c

Please sign in to comment.