Skip to content

Commit

Permalink
Fixed unreasonable type of parameter in broadcast
Browse files Browse the repository at this point in the history
Change type of shape from int to uint & add ut for broadcast
From github issue #376

Type: Code Improvement
Signed-off-by: Feiyue Chen <[email protected]>
  • Loading branch information
chenfeiyue-cfy committed Oct 28, 2022
1 parent fde6d79 commit 1c9062c
Show file tree
Hide file tree
Showing 3 changed files with 57 additions and 18 deletions.
6 changes: 3 additions & 3 deletions include/tim/vx/ops/broadcast.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,12 +45,12 @@ namespace ops {

class Broadcast : public BuiltinOp {
public:
Broadcast(Graph* graph, const std::vector<int32_t>& shape, const std::vector<int32_t>& dimensions = {});
Broadcast(Graph* graph, const std::vector<uint32_t>& shape, const std::vector<int32_t>& dimensions = {});

std::shared_ptr<Operation> Clone(std::shared_ptr<Graph>& graph) const override;

protected:
const std::vector<int32_t> shape_;
protected:
std::vector<uint32_t> shape_;
std::vector<int32_t> dimensions_;
};

Expand Down
13 changes: 7 additions & 6 deletions src/tim/vx/ops/broadcast.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,23 +30,24 @@
namespace tim {
namespace vx {
namespace ops {
Broadcast::Broadcast(Graph* graph, const std::vector<int32_t>& shape,
Broadcast::Broadcast(Graph* graph, const std::vector <uint32_t>& shape,
const std::vector<int32_t>& dimensions)
: BuiltinOp(graph, VSI_NN_OP_EXPAND_BROADCAST),
shape_(shape),
dimensions_(dimensions) {
this->impl()->node()->nn_param.expand_broadcast.dim_num = shape_.size();
this->impl()->node()->nn_param.expand_broadcast.shape = (uint32_t*)shape_.data();
this->impl()->node()->nn_param.expand_broadcast.shape = shape_.data();
#ifdef VSI_EXPAND_BROADCAST_ENABLE_DIMENSIONS
this->impl()->node()->nn_param.expand_broadcast.dimensions_num = dimensions_.size();
if (dimensions.size() > 0)
{
int dim_num = shape.size();
for (uint32_t i = 0; i < dimensions.size(); ++i) {
dimensions_[i] += (dimensions[i] < 0 ? dim_num : 0U);
for (uint32_t i = 0; i < dimensions.size(); i++)
{
dimensions_[i] += (dimensions[i] < 0 ? shape_.size() : 0U );
}
this->impl()->node()->nn_param.expand_broadcast.dimensions = (uint32_t*)dimensions_.data();
} else {
} else
{
this->impl()->node()->nn_param.expand_broadcast.dimensions = nullptr;
}
#endif
Expand Down
56 changes: 47 additions & 9 deletions src/tim/vx/ops/broadcast_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ TEST(Broadcast, ScalarTo2D_2x3) {
std::vector<float> golden = {
2.25f, 2.25f, 2.25f, 2.25f, 2.25f, 2.25f,
};
std::vector<int32_t> shape = {3, 2};
std::vector<uint32_t> shape = {3, 2};

EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(),
in_data.size() * sizeof(float)));
Expand Down Expand Up @@ -93,7 +93,7 @@ TEST(Broadcast, 1DTo2D) {
std::vector<float> golden = {
1.f, 2.f, 3.f, 1.f, 2.f, 3.f,
};
std::vector<int32_t> shape = {3, 2};
std::vector<uint32_t> shape = {3, 2};

EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(),
in_data.size() * sizeof(float)));
Expand Down Expand Up @@ -125,7 +125,7 @@ TEST(Broadcast, 1DTo2D_WithDims0) {
1.f, 2.f,
1.f, 2.f,
};
std::vector<int32_t> shape = {2, 2};
std::vector<uint32_t> shape = {2, 2};
std::vector<int32_t> dimensions = {0};

EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(),
Expand Down Expand Up @@ -158,7 +158,7 @@ TEST(Broadcast, 1DTo2D_WithDims1) {
1.f, 1.f,
2.f, 2.f,
};
std::vector<int32_t> shape = {2, 2};
std::vector<uint32_t> shape = {2, 2};
std::vector<int32_t> dimensions = {1};

EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(),
Expand All @@ -170,6 +170,44 @@ TEST(Broadcast, 1DTo2D_WithDims1) {
CheckResult(graph, golden, output_tensor);
}

TEST(Broadcast, 1DTo2D_WithDimsMinus2) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();

tim::vx::ShapeType input_shape({3});
tim::vx::ShapeType output_shape({3, 2});
tim::vx::TensorSpec input_spec(tim::vx::DataType::FLOAT32, input_shape,
tim::vx::TensorAttribute::INPUT);
tim::vx::TensorSpec output_spec(tim::vx::DataType::FLOAT32, output_shape,
tim::vx::TensorAttribute::OUTPUT);

auto input_tensor = graph->CreateTensor(input_spec);
auto output_tensor = graph->CreateTensor(output_spec);

std::vector<float> in_data = {
1.f, 2.f, 3.f
};
std::vector<float> golden = {
1.f, 2.f, 3.f,
1.f, 2.f, 3.f
};
std::vector<uint32_t> shape = {3, 2};
std::vector<int32_t> dimensions = {-2};
EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(),
in_data.size() * sizeof(float)));

auto op = graph->CreateOperation<tim::vx::ops::Broadcast>(shape, dimensions);
(*op).BindInputs({input_tensor}).BindOutputs({output_tensor});


EXPECT_TRUE(graph->Compile());
EXPECT_TRUE(graph->Run());

std::vector<float> output(golden.size());
EXPECT_TRUE(output_tensor->CopyDataFromTensor(output.data()));
EXPECT_EQ(golden, output);
}

TEST(Broadcast, 1DTo3D_WithDims0) {
auto ctx = tim::vx::Context::Create();
auto graph = ctx->CreateGraph();
Expand All @@ -190,7 +228,7 @@ TEST(Broadcast, 1DTo3D_WithDims0) {
std::vector<float> golden = {
1.f, 2.f, 1.f, 2.f, 1.f, 2.f, 1.f, 2.f,
};
std::vector<int32_t> shape = {2, 2, 2};
std::vector<uint32_t> shape = {2, 2, 2};
std::vector<int32_t> dimensions = {0};

EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(),
Expand Down Expand Up @@ -222,7 +260,7 @@ TEST(Broadcast, 1DTo3D_WithDims1) {
std::vector<float> golden = {
1.f, 1.f, 2.f, 2.f, 1.f, 1.f, 2.f, 2.f,
};
std::vector<int32_t> shape = {2, 2, 2};
std::vector<uint32_t> shape = {2, 2, 2};
std::vector<int32_t> dimensions = {1};

EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(),
Expand Down Expand Up @@ -254,7 +292,7 @@ TEST(Broadcast, 1DTo3D_WithDims2) {
std::vector<float> golden = {
1.f, 1.f, 1.f, 1.f, 2.f, 2.f, 2.f, 2.f,
};
std::vector<int32_t> shape = {2, 2, 2};
std::vector<uint32_t> shape = {2, 2, 2};
std::vector<int32_t> dimensions = {2};

EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(),
Expand Down Expand Up @@ -286,7 +324,7 @@ TEST(Broadcast, 2DTo3D_WithDims02) {
std::vector<float> golden = {
1.f, 5.f, 1.f, 5.f, 2.f, 6.f, 2.f, 6.f,
};
std::vector<int32_t> shape = {2, 2, 2};
std::vector<uint32_t> shape = {2, 2, 2};
std::vector<int32_t> dimensions = {0, 2};

EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(),
Expand Down Expand Up @@ -318,7 +356,7 @@ TEST(Broadcast, 2DTo3D_WithDims12) {
std::vector<float> golden = {
1.f, 1.f, 5.f, 5.f, 2.f, 2.f, 6.f, 6.f,
};
std::vector<int32_t> shape = {2, 2, 2};
std::vector<uint32_t> shape = {2, 2, 2};
std::vector<int32_t> dimensions = {1, 2};

EXPECT_TRUE(input_tensor->CopyDataToTensor(in_data.data(),
Expand Down

0 comments on commit 1c9062c

Please sign in to comment.