Skip to content

Commit

Permalink
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
feat: initialize the new fusion algorithm
Browse files Browse the repository at this point in the history
Signed-off-by: vividf <yihsiang.fang@tier4.jp>
vividf committed Jan 24, 2025
1 parent 2169394 commit 68e1474
Showing 22 changed files with 1,113 additions and 297 deletions.
Original file line number Diff line number Diff line change
@@ -17,6 +17,8 @@ endif()
# Build non-CUDA dependent nodes
ament_auto_add_library(${PROJECT_NAME} SHARED
src/camera_projection.cpp
src/fusion_collector.cpp
src/fusion_matching_strategy.cpp
src/fusion_node.cpp
src/debugger.cpp
src/utils/geometry.cpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
/**:
ros__parameters:
input_offset_ms: [61.67, 111.67, 45.0, 28.33, 78.33, 95.0]
timeout_ms: 70.0
match_threshold_ms: 50.0
rois_timestamp_offsets: [0.06167, 0.011167, 0.045, 0.02833, 0.07833, 0.095]
timeout_sec: 0.070
# match_threshold_ms: 50.0
image_buffer_size: 15
# projection setting for each ROI whether unrectify image
point_project_to_unrectified_image: [false, false, false, false, false, false]
@@ -22,3 +22,7 @@

# debug parameters
publish_processing_time_detail: false
matching_strategy:
type: advanced
det3d_noise_window: 0.01
rois_timestamp_noise_window: [0.005, 0.005, 0.005, 0.005, 0.005, 0.005]
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
// Copyright 2024 TIER IV, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "autoware/image_projection_based_fusion/camera_projection.hpp"

#include <rclcpp/rclcpp.hpp>

#include <cstddef>
#include <memory>
#include <mutex>
#include <unordered_map>
#include <vector>

namespace autoware::image_projection_based_fusion
{
using autoware::image_projection_based_fusion::CameraProjection;

template <class Msg3D, class Msg2D, class ExportObj>
class FusionNode;

template <class Msg2D>
struct Det2dStatus
{
// camera index
std::size_t id = 0;
// camera projection
std::unique_ptr<CameraProjection> camera_projector_ptr;
bool project_to_unrectified_image = false;
bool approximate_camera_projection = false;
};

struct FusionCollectorInfoBase
{
virtual ~FusionCollectorInfoBase() = default;
};

struct NaiveCollectorInfo : public FusionCollectorInfoBase
{
double timestamp;
double threshold;

explicit NaiveCollectorInfo(double timestamp = 0.0, double threshold = 0.0)
: timestamp(timestamp), threshold(threshold)
{
}
};

struct AdvancedCollectorInfo : public FusionCollectorInfoBase
{
double timestamp;
double noise_window;

explicit AdvancedCollectorInfo(double timestamp = 0.0, double noise_window = 0.0)
: timestamp(timestamp), noise_window(noise_window)
{
}
};

template <class Msg3D, class Msg2D, class ExportObj>
class FusionCollector
{
public:
FusionCollector(
std::shared_ptr<FusionNode<Msg3D, Msg2D, ExportObj>> && ros2_parent_node, double timeout_sec,
bool debug_mode);
bool process_msg_3d(const typename Msg3D::ConstSharedPtr msg_3d);
bool process_rois(const std::size_t & roi_id, const typename Msg2D::ConstSharedPtr det2d_msg);
void fusion_callback();

[[nodiscard]] bool fusion_finished() const;

void set_info(std::shared_ptr<FusionCollectorInfoBase> collector_info);
[[nodiscard]] std::shared_ptr<FusionCollectorInfoBase> get_info() const;
// void show_debug_message();
bool ready_to_fuse();
bool rois_exists(const std::size_t & rois_id);
bool det3d_exists();

private:
std::shared_ptr<FusionNode<Msg3D, Msg2D, ExportObj>> ros2_parent_node_;
rclcpp::TimerBase::SharedPtr timer_;
typename Msg3D::ConstSharedPtr det3d_msg_{nullptr};
std::vector<Det2dStatus<Msg2D>> det2d_list_;
std::unordered_map<std::size_t, typename Msg2D::ConstSharedPtr> id_to_roi_map_;
double timeout_sec_;
double rois_number_;
bool debug_mode_;
bool fusion_finished_{false};
std::mutex fusion_mutex_;
std::shared_ptr<FusionCollectorInfoBase> fusion_collector_info_;
};

} // namespace autoware::image_projection_based_fusion
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
// Copyright 2025 TIER IV, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "autoware/image_projection_based_fusion/fusion_collector.hpp"

#include <rclcpp/rclcpp.hpp>

#include <cstddef>
#include <list>
#include <memory>
#include <optional>
#include <string>
#include <unordered_map>

namespace autoware::image_projection_based_fusion
{

template <class Msg3D, class Msg2D, class ExportObj>
class FusionNode;

struct MatchingParamsBase
{
virtual ~MatchingParamsBase() = default;
};

struct Det3dMatchingParams : public MatchingParamsBase
{
double det3d_timestamp;

explicit Det3dMatchingParams(double det3d_timestamp = 0.0) : det3d_timestamp(det3d_timestamp) {}
};

struct RoisMatchingParams : public MatchingParamsBase
{
double rois_timestamp;
std::size_t rois_id;

explicit RoisMatchingParams(double rois_timestamp = 0.0, std::size_t rois_id = 0)
: rois_timestamp(rois_timestamp), rois_id(rois_id)
{
}
};

template <class Msg3D, class Msg2D, class ExportObj>
class FusionMatchingStrategy
{
public:
virtual ~FusionMatchingStrategy() = default;

[[nodiscard]] virtual std::optional<std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>>>
match_rois_to_collector(
const std::list<std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>>> & fusion_collectors,
const std::shared_ptr<RoisMatchingParams> & params) const = 0;

[[nodiscard]] virtual std::optional<std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>>>
match_det3d_to_collector(
const std::list<std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>>> & fusion_collectors,
const std::shared_ptr<Det3dMatchingParams> & params) const = 0;
virtual void set_collector_info(
std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>> & collector,
const std::shared_ptr<MatchingParamsBase> & matching_params) = 0;
};

template <class Msg3D, class Msg2D, class ExportObj>
class NaiveMatchingStrategy : public FusionMatchingStrategy<Msg3D, Msg2D, ExportObj>
{
public:
explicit NaiveMatchingStrategy(
std::shared_ptr<FusionNode<Msg3D, Msg2D, ExportObj>> && ros2_parent_node,
std::size_t rois_number);

[[nodiscard]] std::optional<std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>>>
match_rois_to_collector(
const std::list<std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>>> & fusion_collectors,
const std::shared_ptr<RoisMatchingParams> & params) const override;

[[nodiscard]] std::optional<std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>>>
match_det3d_to_collector(
const std::list<std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>>> & fusion_collectors,
const std::shared_ptr<Det3dMatchingParams> & params) const override;

void set_collector_info(
std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>> & collector,
const std::shared_ptr<MatchingParamsBase> & matching_params) override;

private:
std::unordered_map<std::size_t, double> id_to_offset_map_;
double threshold_;
std::shared_ptr<FusionNode<Msg3D, Msg2D, ExportObj>> ros2_parent_node_;
};

template <class Msg3D, class Msg2D, class ExportObj>
class AdvancedMatchingStrategy : public FusionMatchingStrategy<Msg3D, Msg2D, ExportObj>
{
public:
explicit AdvancedMatchingStrategy(
std::shared_ptr<FusionNode<Msg3D, Msg2D, ExportObj>> && ros2_parent_node,
std::size_t rois_number);

[[nodiscard]] std::optional<std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>>>
match_rois_to_collector(
const std::list<std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>>> & fusion_collectors,
const std::shared_ptr<RoisMatchingParams> & params) const override;

[[nodiscard]] std::optional<std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>>>
match_det3d_to_collector(
const std::list<std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>>> & fusion_collectors,
const std::shared_ptr<Det3dMatchingParams> & params) const override;
void set_collector_info(
std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>> & collector,
const std::shared_ptr<MatchingParamsBase> & matching_params) override;

double get_offset(
const double & det3d_timestamp,
const std::optional<std::unordered_map<std::string, std::string>> & concatenated_status) const;

private:
std::unordered_map<std::size_t, double> id_to_offset_map_;
std::unordered_map<std::size_t, double> id_to_noise_window_map_;
double det3d_noise_window_;
std::shared_ptr<FusionNode<Msg3D, Msg2D, ExportObj>> ros2_parent_node_;
};

template <class Msg3D, class Msg2D, class ExportObj>
std::shared_ptr<FusionMatchingStrategy<Msg3D, Msg2D, ExportObj>> parse_matching_strategy(
rclcpp::Node & node);

} // namespace autoware::image_projection_based_fusion
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright 2022 TIER IV, Inc.
// Copyright 2025 TIER IV, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,8 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#ifndef AUTOWARE__IMAGE_PROJECTION_BASED_FUSION__FUSION_NODE_HPP_
#define AUTOWARE__IMAGE_PROJECTION_BASED_FUSION__FUSION_NODE_HPP_
#pragma once

#include "autoware/image_projection_based_fusion/fusion_collector.hpp"
#include "autoware/image_projection_based_fusion/fusion_matching_strategy.hpp"
// TODO(vivid): maybe we should move this to different node.hpp
#include "autoware/image_projection_based_fusion/fusion_types.hpp"

#include <autoware/image_projection_based_fusion/camera_projection.hpp>
#include <autoware/image_projection_based_fusion/debugger.hpp>
@@ -23,6 +27,8 @@
#include <rclcpp/rclcpp.hpp>

#include <autoware_perception_msgs/msg/detected_objects.hpp>
#include <diagnostic_msgs/msg/detail/diagnostic_array__struct.hpp>
#include <diagnostic_msgs/msg/diagnostic_status.hpp>
#include <sensor_msgs/msg/camera_info.hpp>
#include <sensor_msgs/msg/image.hpp>
#include <sensor_msgs/point_cloud2_iterator.hpp>
@@ -38,44 +44,16 @@
#include <tf2_ros/buffer.h>
#include <tf2_ros/transform_listener.h>

#include <map>
#include <cstddef>
#include <list>
#include <memory>
#include <set>
#include <optional>
#include <string>
#include <utility>
#include <unordered_map>
#include <vector>

namespace autoware::image_projection_based_fusion
{
using autoware_perception_msgs::msg::DetectedObject;
using autoware_perception_msgs::msg::DetectedObjects;
using sensor_msgs::msg::CameraInfo;
using sensor_msgs::msg::Image;
using PointCloudMsgType = sensor_msgs::msg::PointCloud2;
using RoiMsgType = tier4_perception_msgs::msg::DetectedObjectsWithFeature;
using ClusterMsgType = tier4_perception_msgs::msg::DetectedObjectsWithFeature;
using ClusterObjType = tier4_perception_msgs::msg::DetectedObjectWithFeature;
using tier4_perception_msgs::msg::DetectedObjectWithFeature;
using PointCloud = pcl::PointCloud<pcl::PointXYZ>;
using autoware::image_projection_based_fusion::CameraProjection;
using autoware_perception_msgs::msg::ObjectClassification;

template <class Msg2D>
struct Det2dStatus
{
// camera index
std::size_t id = 0;
// camera projection
std::unique_ptr<CameraProjection> camera_projector_ptr;
bool project_to_unrectified_image = false;
bool approximate_camera_projection = false;
// process flags
bool is_fused = false;
// timing
double input_offset_ms = 0.0;
// cache
std::map<int64_t, typename Msg2D::ConstSharedPtr> cached_det2d_msgs;
};

template <class Msg3D, class Msg2D, class ExportObj>
class FusionNode : public rclcpp::Node
@@ -89,57 +67,57 @@
explicit FusionNode(
const std::string & node_name, const rclcpp::NodeOptions & options, int queue_size);

virtual void preprocess(Msg3D & output_msg);
virtual void fuse_on_single_image(
const Msg3D & input_msg, const Det2dStatus<Msg2D> & det2d, const Msg2D & input_roi_msg,
Msg3D & output_msg) = 0;
void export_process(typename Msg3D::SharedPtr & output_det3d_msg);
std::optional<std::unordered_map<std::string, std::string>> find_concatenation_status(
double timestamp);

private:
// Common process methods
void cameraInfoCallback(
void camera_info_callback(
const sensor_msgs::msg::CameraInfo::ConstSharedPtr input_camera_info_msg,
const std::size_t camera_id);

// callback for timer
void timer_callback();
void setPeriod(const int64_t new_period);
void exportProcess();

// 2d detection management methods
bool checkAllDet2dFused()
{
for (const auto & det2d : det2d_list_) {
if (!det2d.is_fused) {
return false;
}
}
return true;
}
void manage_collector_list();
void manage_concatenated_status_map(const double & current_timestam);

Check warning on line 85 in perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp

GitHub Actions / spell-check-differential

Unknown word (timestam)

// camera projection
float approx_grid_cell_w_size_;
float approx_grid_cell_h_size_;

bool debug_mode_{false};

// timer
rclcpp::TimerBase::SharedPtr timer_;
double timeout_ms_{};
double match_threshold_ms_{};
double timeout_sec_{};

std::vector<typename rclcpp::Subscription<Msg2D>::SharedPtr> rois_subs_;
std::vector<rclcpp::Subscription<sensor_msgs::msg::CameraInfo>::SharedPtr> camera_info_subs_;

// cache for fusion
int64_t cached_det3d_msg_timestamp_;
typename Msg3D::SharedPtr cached_det3d_msg_ptr_;
std::unique_ptr<FusionMatchingStrategy<Msg3D, Msg2D, ExportObj>> fusion_matching_strategy_;
std::mutex fusion_collectors_mutex_;
std::list<std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>>> fusion_collectors_;

std::unordered_map<std::size_t, double> id_to_noise_window_map_;
std::unordered_map<std::size_t, double> id_to_offset_map_;
double cloud_noise_window_;

// timestamp: (key, value)
std::unordered_map<double, std::unordered_map<std::string, std::string>> concatenated_status_map_;

protected:
void setDet2DStatus(std::size_t rois_number);
void set_det2d_status(std::size_t rois_number);

// callback for main subscription
void subCallback(const typename Msg3D::ConstSharedPtr input_msg);
void sub_callback(const typename Msg3D::ConstSharedPtr det3d_msg);
// callback for roi subscription
void roiCallback(const typename Msg2D::ConstSharedPtr input_roi_msg, const std::size_t roi_i);
void roi_callback(const typename Msg2D::ConstSharedPtr det2d_msg, const std::size_t roi_i);

void diagnostic_callback(const diagnostic_msgs::msg::DiagnosticArray::SharedPtr diagnostic_msg);

// Custom process methods
virtual void preprocess(Msg3D & output_msg);
virtual void fuseOnSingleImage(
const Msg3D & input_msg, const Det2dStatus<Msg2D> & det2d, const Msg2D & input_roi_msg,
Msg3D & output_msg) = 0;
virtual void postprocess(const Msg3D & processing_msg, ExportObj & output_msg);
virtual void publish(const ExportObj & output_msg);

@@ -152,6 +130,7 @@

// 3d detection subscription
typename rclcpp::Subscription<Msg3D>::SharedPtr det3d_sub_;
rclcpp::Subscription<diagnostic_msgs::msg::DiagnosticArray>::SharedPtr sub_diag_;

// parameters for out_of_scope filter
float filter_scope_min_x_;
@@ -161,6 +140,8 @@
float filter_scope_min_z_;
float filter_scope_max_z_;

std::string matching_strategy_;

// output publisher
typename rclcpp::Publisher<ExportObj>::SharedPtr pub_ptr_;

@@ -179,5 +160,3 @@
};

} // namespace autoware::image_projection_based_fusion

#endif // AUTOWARE__IMAGE_PROJECTION_BASED_FUSION__FUSION_NODE_HPP_
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
// Copyright 2025 TIER IV, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <autoware_perception_msgs/msg/detected_objects.hpp>
#include <sensor_msgs/msg/camera_info.hpp>
#include <sensor_msgs/msg/image.hpp>
#include <tier4_perception_msgs/msg/detected_objects_with_feature.hpp>

#include <pcl/point_cloud.h>
#include <pcl/point_types.h>

namespace autoware::image_projection_based_fusion
{
using autoware_perception_msgs::msg::DetectedObject;
using autoware_perception_msgs::msg::DetectedObjects;
using sensor_msgs::msg::CameraInfo;
using sensor_msgs::msg::Image;
using PointCloudMsgType = sensor_msgs::msg::PointCloud2;
using RoiMsgType = tier4_perception_msgs::msg::DetectedObjectsWithFeature;
using ClusterMsgType = tier4_perception_msgs::msg::DetectedObjectsWithFeature;
using ClusterObjType = tier4_perception_msgs::msg::DetectedObjectWithFeature;
using tier4_perception_msgs::msg::DetectedObjectWithFeature;
using PointCloud = pcl::PointCloud<pcl::PointXYZ>;
using autoware_perception_msgs::msg::ObjectClassification;
} // namespace autoware::image_projection_based_fusion
Original file line number Diff line number Diff line change
@@ -50,7 +50,7 @@ class PointPaintingFusionNode : public FusionNode<PointCloudMsgType, RoiMsgType,
private:
void preprocess(PointCloudMsgType & pointcloud_msg) override;

void fuseOnSingleImage(
void fuse_on_single_image(
const PointCloudMsgType & input_pointcloud_msg, const Det2dStatus<RoiMsgType> & det2d,
const RoiMsgType & input_roi_msg, PointCloudMsgType & painted_pointcloud_msg) override;

Original file line number Diff line number Diff line change
@@ -32,7 +32,7 @@ class RoiClusterFusionNode : public FusionNode<ClusterMsgType, RoiMsgType, Clust
private:
void preprocess(ClusterMsgType & output_cluster_msg) override;

void fuseOnSingleImage(
void fuse_on_single_image(
const ClusterMsgType & input_cluster_msg, const Det2dStatus<RoiMsgType> & det2d,
const RoiMsgType & input_roi_msg, ClusterMsgType & output_cluster_msg) override;

Original file line number Diff line number Diff line change
@@ -39,7 +39,7 @@ class RoiDetectedObjectFusionNode : public FusionNode<DetectedObjects, RoiMsgTyp
private:
void preprocess(DetectedObjects & output_msg) override;

void fuseOnSingleImage(
void fuse_on_single_image(
const DetectedObjects & input_object_msg, const Det2dStatus<RoiMsgType> & det2d,
const RoiMsgType & input_roi_msg, DetectedObjects & output_object_msg) override;

Original file line number Diff line number Diff line change
@@ -34,7 +34,7 @@ class RoiPointCloudFusionNode : public FusionNode<PointCloudMsgType, RoiMsgType,
rclcpp::Publisher<PointCloudMsgType>::SharedPtr point_pub_ptr_;
rclcpp::Publisher<PointCloudMsgType>::SharedPtr cluster_debug_pub_;

void fuseOnSingleImage(
void fuse_on_single_image(
const PointCloudMsgType & input_pointcloud_msg, const Det2dStatus<RoiMsgType> & det2d,
const RoiMsgType & input_roi_msg, PointCloudMsgType & output_pointcloud_msg) override;

Original file line number Diff line number Diff line change
@@ -42,7 +42,7 @@ class SegmentPointCloudFusionNode : public FusionNode<PointCloudMsgType, Image,
private:
void preprocess(PointCloudMsgType & pointcloud_msg) override;

void fuseOnSingleImage(
void fuse_on_single_image(
const PointCloudMsgType & input_pointcloud_msg, const Det2dStatus<Image> & det2d,
const Image & input_mask, PointCloudMsgType & output_pointcloud_msg) override;

Original file line number Diff line number Diff line change
@@ -63,7 +63,7 @@ struct PointData
size_t orig_index;
};

bool checkCameraInfo(const sensor_msgs::msg::CameraInfo & camera_info);
bool check_camera_info(const sensor_msgs::msg::CameraInfo & camera_info);

std::optional<geometry_msgs::msg::TransformStamped> getTransformStamped(
const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id,
Original file line number Diff line number Diff line change
@@ -6,24 +6,22 @@
"fusion_common": {
"type": "object",
"properties": {
"input_offset_ms": {
"rois_timestamp_offsets": {
"type": "array",
"description": "An array of timestamp offsets for each camera [ms].",
"default": [61.67, 111.67, 45.0, 28.33, 78.33, 95.0]
"items": {
"type": "number",
"minimum": 0.0
},
"description": "An array of timestamp offsets for each camera [s].",
"minItems": 2,
"default": [0.06167, 0.011167, 0.045, 0.02833, 0.07833, 0.095]
},
"timeout_ms": {
"type": "number",
"description": "A timeout value can be assigned within a single frame [ms].",
"default": 70.0,
"minimum": 1.0,
"maximum": 100.0
},
"match_threshold_ms": {
"type": "number",
"description": "A maximum threshold value to synchronize RoIs from multiple cameras [ms].",
"default": 50.0,
"minimum": 0.0,
"maximum": 100.0
"description": "A timeout value can be assigned within a single frame [s].",
"default": 0.07,
"minimum": 0.001,
"maximum": 0.1
},
"image_buffer_size": {
"type": "integer",
@@ -85,6 +83,61 @@
"type": "number",
"description": "The height of grid cell used in approximated projection [pixel].",
"default": 1.0
},
"matching_strategy": {
"type": "object",
"properties": {
"type": {
"type": "string",
"enum": ["naive", "advanced"],
"default": "advanced",
"description": "Set it to `advanced` if you want to use more accurate and complicated logic for matching LiDAR and camera; otherwise, set it to `naive`."
},
"threshold": {
"type": "number",
"description": "A maximum threshold value to synchronize RoIs from multiple cameras [s].",
"default": 0.05,
"minimum": 0.0,
"maximum": 0.1
},
"det3d_noise_window": {
"type": "number",
"description": "det3d noise window in seconds.",
"default": 0.001,
"minimum": 0.0
},
"rois_timestamp_noise_window": {
"type": "array",
"items": {
"type": "number",
"minimum": 0.0
},
"default": [0.005, 0.005, 0.005, 0.005, 0.005, 0.005],
"minItems": 2,
"description": "List of camera timestamp noise windows in seconds. The noise values should be specified in the same order as the input_topics."
}
},
"required": ["type"],
"dependencies": {
"type": {
"oneOf": [
{
"properties": { "type": { "const": "naive" } },
"required": ["threshold"],
"not": {
"required": ["det3d_noise_window", "rois_timestamp_noise_window"]
}
},
{
"properties": { "type": { "const": "advanced" } },
"required": ["det3d_noise_window", "rois_timestamp_noise_window"],
"not": {
"required": ["threshold"]
}
}
]
}
}
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,200 @@
// Copyright 2024 TIER IV, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "autoware/image_projection_based_fusion/fusion_collector.hpp"

#include "autoware/image_projection_based_fusion/fusion_node.hpp"
#include "autoware/image_projection_based_fusion/fusion_types.hpp"

#include <rclcpp/rclcpp.hpp>

#include <memory>
#include <string>
#include <unordered_map>
#include <utility>

namespace autoware::image_projection_based_fusion
{

template <class Msg3D, class Msg2D, class ExportObj>
FusionCollector<Msg3D, Msg2D, ExportObj>::FusionCollector(
std::shared_ptr<FusionNode<Msg3D, Msg2D, ExportObj>> && ros2_parent_node, double timeout_sec,
bool debug_mode)
: ros2_parent_node_(std::move(ros2_parent_node)), timeout_sec_(timeout_sec), debug_mode_(debug_mode)
{
const auto period_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(
std::chrono::duration<double>(timeout_sec_));

timer_ =
rclcpp::create_timer(ros2_parent_node_, ros2_parent_node_->get_clock(), period_ns, [this]() {
std::lock_guard<std::mutex> fusion_lock(fusion_mutex_);
if (fusion_finished_) return;
fusion_callback();
});
}

template <class Msg3D, class Msg2D, class ExportObj>
void FusionCollector<Msg3D, Msg2D, ExportObj>::set_info(
std::shared_ptr<FusionCollectorInfoBase> fusion_collector_info)
{
fusion_collector_info_ = std::move(fusion_collector_info);
}

template <class Msg3D, class Msg2D, class ExportObj>
std::shared_ptr<FusionCollectorInfoBase> FusionCollector<Msg3D, Msg2D, ExportObj>::get_info() const
{
return fusion_collector_info_;
}

template <class Msg3D, class Msg2D, class ExportObj>
bool FusionCollector<Msg3D, Msg2D, ExportObj>::process_msg_3d(
const typename Msg3D::ConstSharedPtr msg_3d)
{
std::lock_guard<std::mutex> fusion_lock(fusion_mutex_);
if (fusion_finished_) return false;

if (det3d_msg_ != nullptr) {
RCLCPP_WARN_STREAM_THROTTLE(
ros2_parent_node_->get_logger(), *ros2_parent_node_->get_clock(),
std::chrono::milliseconds(10000).count(),
"Pointcloud already exists in the collector. Check the timestamp of the pointcloud.");
}

det3d_msg_ = msg_3d;
if (ready_to_fuse()) {
fusion_callback();
}

return true;
}

Check warning on line 80 in perception/autoware_image_projection_based_fusion/src/fusion_collector.cpp

CodeScene Delta Analysis / CodeScene Cloud Delta Analysis (main)

❌ New issue: Code Duplication

The module contains 2 functions with similar structure: process_msg_3d,process_rois. Avoid duplicated, aka copy-pasted, code inside the module. More duplication lowers the code health.

template <class Msg3D, class Msg2D, class ExportObj>
bool FusionCollector<Msg3D, Msg2D, ExportObj>::process_rois(
const std::size_t & roi_id, const typename Msg2D::ConstSharedPtr det2d_msg)
{
std::lock_guard<std::mutex> fusion_lock(fusion_mutex_);
if (fusion_finished_) return false;

if (id_to_roi_map_.find(roi_id) != id_to_roi_map_.end()) {
RCLCPP_WARN_STREAM_THROTTLE(
ros2_parent_node_->get_logger(), *ros2_parent_node_->get_clock(),
std::chrono::milliseconds(10000).count(),
"ROIS '" << roi_id << "' already exists in the collector. Check the timestamp of the rois.");
}
id_to_roi_map_[roi_id] = det2d_msg;
if (ready_to_fuse()) {
fusion_callback();
}

return true;
}

template <class Msg3D, class Msg2D, class ExportObj>
bool FusionCollector<Msg3D, Msg2D, ExportObj>::ready_to_fuse()
{
return id_to_roi_map_.size() == rois_number_ && det3d_msg_ != nullptr;
}

template <class Msg3D, class Msg2D, class ExportObj>
bool FusionCollector<Msg3D, Msg2D, ExportObj>::fusion_finished() const
{
return fusion_finished_;
}

template <class Msg3D, class Msg2D, class ExportObj>
void FusionCollector<Msg3D, Msg2D, ExportObj>::fusion_callback()
{
// if (debug_mode_) {
// show_debug_message();
// }

// All pointcloud and rois are received or the timer has timed out, cancel the timer and fuse
// them.
timer_->cancel();

typename Msg3D::SharedPtr output_det3d_msg = std::make_shared<Msg3D>(*det3d_msg_);
ros2_parent_node_->preprocess(*output_det3d_msg);

for (const auto & [roi_id, roi] : id_to_roi_map_) {
ros2_parent_node_->fuse_on_single_image(
*det3d_msg_, det2d_list_[roi_id], *roi, *output_det3d_msg);
}

ros2_parent_node_->export_process(output_det3d_msg);
fusion_finished_ = true;
}

template <class Msg3D, class Msg2D, class ExportObj>
bool FusionCollector<Msg3D, Msg2D, ExportObj>::rois_exists(const std::size_t & rois_id)
{
return id_to_roi_map_.find(rois_id) != id_to_roi_map_.end();
}

template <class Msg3D, class Msg2D, class ExportObj>
bool FusionCollector<Msg3D, Msg2D, ExportObj>::det3d_exists()
{
return det3d_msg_ != nullptr;
}

// void CloudCollector::show_debug_message()
// {
// auto time_until_trigger = timer_->time_until_trigger();
// std::stringstream log_stream;
// log_stream << std::fixed << std::setprecision(6);
// log_stream << "Collector's fusion callback time: "
// << ros2_parent_node_->get_clock()->now().seconds() << " seconds\n";

// if (auto advanced_info = std::dynamic_pointer_cast<AdvancedCollectorInfo>(collector_info_)) {
// log_stream << "Advanced strategy:\n Collector's reference time min: "
// << advanced_info->timestamp - advanced_info->noise_window
// << " to max: " << advanced_info->timestamp + advanced_info->noise_window
// << " seconds\n";
// } else if (auto naive_info = std::dynamic_pointer_cast<NaiveCollectorInfo>(collector_info_)) {
// log_stream << "Naive strategy:\n Collector's timestamp: " << naive_info->timestamp
// << " seconds\n";
// }

// log_stream << "Time until trigger: " << (time_until_trigger.count() / 1e9) << " seconds\n";

// log_stream << "Pointclouds: [";
// std::string separator = "";
// for (const auto & [topic, cloud] : topic_to_cloud_map_) {
// log_stream << separator;
// log_stream << "[" << topic << ", " << rclcpp::Time(cloud->header.stamp).seconds() << "]";
// separator = ", ";
// }

// log_stream << "]\n";

// RCLCPP_INFO(ros2_parent_node_->get_logger(), "%s", log_stream.str().c_str());
// }

// Explicit instantiation for the supported types

// pointpainting fusion
template class FusionCollector<PointCloudMsgType, RoiMsgType, DetectedObjects>;

// roi cluster fusion
template class FusionCollector<ClusterMsgType, RoiMsgType, ClusterMsgType>;

// roi detected-object fusion
template class FusionCollector<DetectedObjects, RoiMsgType, DetectedObjects>;

// roi pointcloud fusion
template class FusionCollector<PointCloudMsgType, RoiMsgType, ClusterMsgType>;

// segment pointcloud fusion
template class FusionCollector<PointCloudMsgType, Image, PointCloudMsgType>;

} // namespace autoware::image_projection_based_fusion
Original file line number Diff line number Diff line change
@@ -0,0 +1,287 @@
// Copyright 2025 TIER IV, Inc.

Check warning on line 1 in perception/autoware_image_projection_based_fusion/src/fusion_matching_strategy.cpp

CodeScene Delta Analysis / CodeScene Cloud Delta Analysis (main)

❌ New issue: Overall Code Complexity

This module has a mean cyclomatic complexity of 5.14 across 7 functions. The mean complexity threshold is 4. This file has many conditional statements (e.g. if, for, while) across its implementation, leading to lower code health. Avoid adding more conditionals.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "autoware/image_projection_based_fusion/fusion_matching_strategy.hpp"

#include "autoware/image_projection_based_fusion/fusion_collector.hpp"
#include "autoware/image_projection_based_fusion/fusion_node.hpp"
#include "autoware/image_projection_based_fusion/fusion_types.hpp"

#include <rclcpp/rclcpp.hpp>

#include <cmath>
#include <cstddef>
#include <list>
#include <memory>
#include <string>
#include <utility>
#include <vector>

namespace autoware::image_projection_based_fusion
{

template <class Msg3D, class Msg2D, class ExportObj>
NaiveMatchingStrategy<Msg3D, Msg2D, ExportObj>::NaiveMatchingStrategy(
std::shared_ptr<FusionNode<Msg3D, Msg2D, ExportObj>> && ros2_parent_node, std::size_t rois_number)
: ros2_parent_node_(std::move(ros2_parent_node))
{
auto rois_timestamp_offsets =
ros2_parent_node_->template declare_parameter<std::vector<double>>("rois_timestamp_offsets");
threshold_ = ros2_parent_node_->template declare_parameter<double>("matching_strategy.threshold");

if (rois_timestamp_offsets.size() != rois_number) {
throw std::runtime_error("The number of rois does not match the number of timestamp offsets.");
}

for (size_t i = 0; i < rois_number; i++) {
id_to_offset_map_[i] = rois_timestamp_offsets[i];
}

RCLCPP_INFO(
ros2_parent_node_->get_logger(), "Utilize advanced matching strategy for fusion nodes.");
}

template <class Msg3D, class Msg2D, class ExportObj>
std::optional<std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>>>
NaiveMatchingStrategy<Msg3D, Msg2D, ExportObj>::match_rois_to_collector(
const std::list<std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>>> & fusion_collectors,
const std::shared_ptr<RoisMatchingParams> & params) const
{
std::optional<double> smallest_time_difference = std::nullopt;
std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>> closest_collector = nullptr;

for (const auto & fusion_collector : fusion_collectors) {
if (!fusion_collector->rois_exists(params->rois_id)) {
auto info = fusion_collector->get_info();
if (auto naive_info = std::dynamic_pointer_cast<NaiveCollectorInfo>(info)) {
double time_difference = std::abs(params->rois_timestamp - naive_info->timestamp);
if (
!smallest_time_difference ||
(time_difference < smallest_time_difference && time_difference < naive_info->threshold)) {

Check warning on line 71 in perception/autoware_image_projection_based_fusion/src/fusion_matching_strategy.cpp

CodeScene Delta Analysis / CodeScene Cloud Delta Analysis (main)

❌ New issue: Complex Conditional

match_rois_to_collector has 1 complex conditionals with 2 branches, threshold = 2. A complex conditional is an expression inside a branch (e.g. if, for, while) which consists of multiple, logical operators such as AND/OR. The more logical operators in an expression, the more severe the code smell.
smallest_time_difference = time_difference;
closest_collector = fusion_collector;
}
}
}
}

if (closest_collector != nullptr) {
return closest_collector;
}
return std::nullopt;
}

Check warning on line 83 in perception/autoware_image_projection_based_fusion/src/fusion_matching_strategy.cpp

CodeScene Delta Analysis / CodeScene Cloud Delta Analysis (main)

❌ New issue: Code Duplication

The module contains 2 functions with similar structure: match_det3d_to_collector,match_rois_to_collector. Avoid duplicated, aka copy-pasted, code inside the module. More duplication lowers the code health.

Check warning on line 83 in perception/autoware_image_projection_based_fusion/src/fusion_matching_strategy.cpp

CodeScene Delta Analysis / CodeScene Cloud Delta Analysis (main)

❌ New issue: Deep, Nested Complexity

match_rois_to_collector has a nested complexity depth of 4, threshold = 4. This function contains deeply nested logic such as if statements and/or loops. The deeper the nesting, the lower the code health.

template <class Msg3D, class Msg2D, class ExportObj>
std::optional<std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>>>
NaiveMatchingStrategy<Msg3D, Msg2D, ExportObj>::match_det3d_to_collector(
const std::list<std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>>> & fusion_collectors,
const std::shared_ptr<Det3dMatchingParams> & params) const
{
std::optional<double> smallest_time_difference = std::nullopt;
std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>> closest_collector = nullptr;

for (const auto & fusion_collector : fusion_collectors) {
if (!fusion_collector->det3d_exists()) {
auto info = fusion_collector->get_info();
if (auto naive_info = std::dynamic_pointer_cast<NaiveCollectorInfo>(info)) {
double time_difference = std::abs(params->det3d_timestamp - naive_info->timestamp);
if (
!smallest_time_difference ||
(time_difference < smallest_time_difference && time_difference < naive_info->threshold)) {

Check warning on line 101 in perception/autoware_image_projection_based_fusion/src/fusion_matching_strategy.cpp

CodeScene Delta Analysis / CodeScene Cloud Delta Analysis (main)

❌ New issue: Complex Conditional

match_det3d_to_collector has 1 complex conditionals with 2 branches, threshold = 2. A complex conditional is an expression inside a branch (e.g. if, for, while) which consists of multiple, logical operators such as AND/OR. The more logical operators in an expression, the more severe the code smell.
smallest_time_difference = time_difference;
closest_collector = fusion_collector;
}
}
}
}

if (closest_collector != nullptr) {
return closest_collector;
}
return std::nullopt;
}

Check warning on line 113 in perception/autoware_image_projection_based_fusion/src/fusion_matching_strategy.cpp

CodeScene Delta Analysis / CodeScene Cloud Delta Analysis (main)

❌ New issue: Deep, Nested Complexity

match_det3d_to_collector has a nested complexity depth of 4, threshold = 4. This function contains deeply nested logic such as if statements and/or loops. The deeper the nesting, the lower the code health.

template <class Msg3D, class Msg2D, class ExportObj>
void NaiveMatchingStrategy<Msg3D, Msg2D, ExportObj>::set_collector_info(
std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>> & collector,
const std::shared_ptr<MatchingParamsBase> & matching_params)
{
if (
auto det3d_matching_params = std::dynamic_pointer_cast<Det3dMatchingParams>(matching_params)) {
auto info =
std::make_shared<NaiveCollectorInfo>(det3d_matching_params->det3d_timestamp, threshold_);
collector->set_info(info);
} else if (
auto rois_matching_params = std::dynamic_pointer_cast<RoisMatchingParams>(matching_params)) {
auto info = std::make_shared<NaiveCollectorInfo>(
rois_matching_params->rois_timestamp - id_to_offset_map_.at(rois_matching_params->rois_id),
threshold_);
collector->set_info(info);
}
}

template <class Msg3D, class Msg2D, class ExportObj>
AdvancedMatchingStrategy<Msg3D, Msg2D, ExportObj>::AdvancedMatchingStrategy(
std::shared_ptr<FusionNode<Msg3D, Msg2D, ExportObj>> && ros2_parent_node, std::size_t rois_number)
: ros2_parent_node_(std::move(ros2_parent_node))
{
auto rois_timestamp_offsets =
ros2_parent_node_->template declare_parameter<std::vector<double>>("rois_timestamp_offsets");
det3d_noise_window_ =
ros2_parent_node_->template declare_parameter<double>("matching_strategy.det3d_noise_window");
auto rois_timestamp_noise_window =
ros2_parent_node_->template declare_parameter<std::vector<double>>(
"matching_strategy.rois_timestamp_noise_window");

if (rois_timestamp_offsets.size() != rois_number) {
throw std::runtime_error("The number of rois does not match the number of timestamp offsets.");
}
if (rois_timestamp_noise_window.size() != rois_number) {
throw std::runtime_error(
"The number of rois does not match the number of timestamp noise window.");
}

for (size_t i = 0; i < rois_number; i++) {
id_to_offset_map_[i] = rois_timestamp_offsets[i];
id_to_noise_window_map_[i] = rois_timestamp_noise_window[i];
}

RCLCPP_INFO(
ros2_parent_node_->get_logger(), "Utilize advanced matching strategy for fusion nodes.");
}

template <class Msg3D, class Msg2D, class ExportObj>
std::optional<std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>>>
AdvancedMatchingStrategy<Msg3D, Msg2D, ExportObj>::match_rois_to_collector(
const std::list<std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>>> & fusion_collectors,
const std::shared_ptr<RoisMatchingParams> & params) const
{
for (const auto & fusion_collector : fusion_collectors) {
auto info = fusion_collector->get_info();
if (auto advanced_info = std::dynamic_pointer_cast<AdvancedCollectorInfo>(info)) {
auto reference_timestamp_min = advanced_info->timestamp - advanced_info->noise_window;
auto reference_timestamp_max = advanced_info->timestamp + advanced_info->noise_window;
double time = params->rois_timestamp - id_to_offset_map_.at(params->rois_id);
if (
time < reference_timestamp_max + id_to_noise_window_map_.at(params->rois_id) &&
time > reference_timestamp_min - id_to_noise_window_map_.at(params->rois_id)) {
return fusion_collector;
}
}
}
return std::nullopt;
}

template <class Msg3D, class Msg2D, class ExportObj>
std::optional<std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>>>
AdvancedMatchingStrategy<Msg3D, Msg2D, ExportObj>::match_det3d_to_collector(
const std::list<std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>>> & fusion_collectors,
const std::shared_ptr<Det3dMatchingParams> & params) const
{
auto concatenated_status = ros2_parent_node_->find_concatenation_status(params->det3d_timestamp);

// TODO(vivid): double check this logic and fix the name
double offset = get_offset(params->det3d_timestamp, concatenated_status);

for (const auto & fusion_collector : fusion_collectors) {
auto info = fusion_collector->get_info();
if (auto advanced_info = std::dynamic_pointer_cast<AdvancedCollectorInfo>(info)) {
auto reference_timestamp_min = advanced_info->timestamp - advanced_info->noise_window;
auto reference_timestamp_max = advanced_info->timestamp + advanced_info->noise_window;
if (
params->det3d_timestamp - offset < reference_timestamp_max + det3d_noise_window_ &&
params->det3d_timestamp - offset > reference_timestamp_min - det3d_noise_window_) {
return fusion_collector;
}
}
}
return std::nullopt;
}

template <class Msg3D, class Msg2D, class ExportObj>
void AdvancedMatchingStrategy<Msg3D, Msg2D, ExportObj>::set_collector_info(
std::shared_ptr<FusionCollector<Msg3D, Msg2D, ExportObj>> & collector,
const std::shared_ptr<MatchingParamsBase> & matching_params)
{
if (
auto det3d_matching_params = std::dynamic_pointer_cast<Det3dMatchingParams>(matching_params)) {
auto concatenated_status =
ros2_parent_node_->find_concatenation_status(det3d_matching_params->det3d_timestamp);
double offset = get_offset(det3d_matching_params->det3d_timestamp, concatenated_status);

auto info = std::make_shared<AdvancedCollectorInfo>(
det3d_matching_params->det3d_timestamp - offset, det3d_noise_window_);
collector->set_info(info);
} else if (
auto rois_matching_params = std::dynamic_pointer_cast<RoisMatchingParams>(matching_params)) {
auto info = std::make_shared<AdvancedCollectorInfo>(
rois_matching_params->rois_timestamp - id_to_offset_map_[rois_matching_params->rois_id],
id_to_noise_window_map_[rois_matching_params->rois_id]);
collector->set_info(info);
}
}

template <class Msg3D, class Msg2D, class ExportObj>
double AdvancedMatchingStrategy<Msg3D, Msg2D, ExportObj>::get_offset(
const double & det3d_timestamp,
const std::optional<std::unordered_map<std::string, std::string>> & concatenated_status) const
{
double offset = 0.0;
if (concatenated_status) {
auto status_map = concatenated_status.value(); // Retrieve the inner map
if (
status_map["cloud_concatenation_success"] == "False" &&
det3d_timestamp > std::stod(status_map["reference_timestamp_max"])) {
// The defined earliest pointcloud is missed in the concatenation of pointcloud
offset = det3d_timestamp - (std::stod(status_map["reference_timestamp_min"]) +
(std::stod(status_map["reference_timestamp_max"]) -
std::stod(status_map["reference_timestamp_min"])) /
2);
}
}

return offset;
}

// pointpainting fusion
template class NaiveMatchingStrategy<PointCloudMsgType, RoiMsgType, DetectedObjects>;

// roi cluster fusion
template class NaiveMatchingStrategy<ClusterMsgType, RoiMsgType, ClusterMsgType>;

// roi detected-object fusion
template class NaiveMatchingStrategy<DetectedObjects, RoiMsgType, DetectedObjects>;

// roi pointcloud fusion
template class NaiveMatchingStrategy<PointCloudMsgType, RoiMsgType, ClusterMsgType>;

// segment pointcloud fusion
template class NaiveMatchingStrategy<PointCloudMsgType, Image, PointCloudMsgType>;

// pointpainting fusion
template class AdvancedMatchingStrategy<PointCloudMsgType, RoiMsgType, DetectedObjects>;

// roi cluster fusion
template class AdvancedMatchingStrategy<ClusterMsgType, RoiMsgType, ClusterMsgType>;

// roi detected-object fusion
template class AdvancedMatchingStrategy<DetectedObjects, RoiMsgType, DetectedObjects>;

// roi pointcloud fusion
template class AdvancedMatchingStrategy<PointCloudMsgType, RoiMsgType, ClusterMsgType>;

// segment pointcloud fusion
template class AdvancedMatchingStrategy<PointCloudMsgType, Image, PointCloudMsgType>;

} // namespace autoware::image_projection_based_fusion
404 changes: 205 additions & 199 deletions perception/autoware_image_projection_based_fusion/src/fusion_node.cpp

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -157,7 +157,7 @@ PointPaintingFusionNode::PointPaintingFusionNode(const rclcpp::NodeOptions & opt

// subscriber
std::function<void(const PointCloudMsgType::ConstSharedPtr msg)> sub_callback =
std::bind(&PointPaintingFusionNode::subCallback, this, std::placeholders::_1);
std::bind(&PointPaintingFusionNode::sub_callback, this, std::placeholders::_1);
det3d_sub_ = this->create_subscription<PointCloudMsgType>(
"~/input/pointcloud", rclcpp::SensorDataQoS().keep_last(3), sub_callback);

@@ -256,7 +256,7 @@ void PointPaintingFusionNode::preprocess(PointCloudMsgType & painted_pointcloud_
static_cast<uint32_t>(painted_pointcloud_msg.data.size() / painted_pointcloud_msg.height);
}

void PointPaintingFusionNode::fuseOnSingleImage(
void PointPaintingFusionNode::fuse_on_single_image(
__attribute__((unused)) const PointCloudMsgType & input_pointcloud_msg,
const Det2dStatus<RoiMsgType> & det2d, const RoiMsgType & input_roi_msg,
PointCloudMsgType & painted_pointcloud_msg)
Original file line number Diff line number Diff line change
@@ -73,7 +73,7 @@
}
}

void RoiClusterFusionNode::fuseOnSingleImage(
void RoiClusterFusionNode::fuse_on_single_image(

Check notice on line 76 in perception/autoware_image_projection_based_fusion/src/roi_cluster_fusion/node.cpp

CodeScene Delta Analysis / CodeScene Cloud Delta Analysis (main)

✅ No longer an issue: Complex Method

RoiClusterFusionNode::fuseOnSingleImage is no longer above the threshold for cyclomatic complexity. This function has many conditional statements (e.g. if, for, while), leading to lower code health. Avoid adding more conditionals and code to it without refactoring.

Check notice on line 76 in perception/autoware_image_projection_based_fusion/src/roi_cluster_fusion/node.cpp

CodeScene Delta Analysis / CodeScene Cloud Delta Analysis (main)

ℹ New issue: Complex Method

RoiClusterFusionNode::fuse_on_single_image has a cyclomatic complexity of 37, threshold = 9. This function has many conditional statements (e.g. if, for, while), leading to lower code health. Avoid adding more conditionals and code to it without refactoring.

Check notice on line 76 in perception/autoware_image_projection_based_fusion/src/roi_cluster_fusion/node.cpp

CodeScene Delta Analysis / CodeScene Cloud Delta Analysis (main)

✅ No longer an issue: Bumpy Road Ahead

RoiClusterFusionNode::fuseOnSingleImage is no longer above the threshold for logical blocks with deeply nested code. The Bumpy Road code smell is a function that contains multiple chunks of nested conditional logic. The deeper the nesting and the more bumps, the lower the code health.

Check notice on line 76 in perception/autoware_image_projection_based_fusion/src/roi_cluster_fusion/node.cpp

CodeScene Delta Analysis / CodeScene Cloud Delta Analysis (main)

ℹ New issue: Bumpy Road Ahead

RoiClusterFusionNode::fuse_on_single_image has 5 blocks with nested conditional logic. Any nesting of 2 or deeper is considered. Threshold is one single, nested block per function. The Bumpy Road code smell is a function that contains multiple chunks of nested conditional logic. The deeper the nesting and the more bumps, the lower the code health.

Check notice on line 76 in perception/autoware_image_projection_based_fusion/src/roi_cluster_fusion/node.cpp

CodeScene Delta Analysis / CodeScene Cloud Delta Analysis (main)

✅ No longer an issue: Deep, Nested Complexity

RoiClusterFusionNode::fuseOnSingleImage is no longer above the threshold for nested complexity depth. This function contains deeply nested logic such as if statements and/or loops. The deeper the nesting, the lower the code health.

Check notice on line 76 in perception/autoware_image_projection_based_fusion/src/roi_cluster_fusion/node.cpp

CodeScene Delta Analysis / CodeScene Cloud Delta Analysis (main)

ℹ New issue: Deep, Nested Complexity

RoiClusterFusionNode::fuse_on_single_image has a nested complexity depth of 4, threshold = 4. This function contains deeply nested logic such as if statements and/or loops. The deeper the nesting, the lower the code health.
const ClusterMsgType & input_cluster_msg, const Det2dStatus<RoiMsgType> & det2d,
const RoiMsgType & input_roi_msg, ClusterMsgType & output_cluster_msg)
{
Original file line number Diff line number Diff line change
@@ -83,7 +83,7 @@ void RoiDetectedObjectFusionNode::preprocess(DetectedObjects & output_msg)
ignored_object_flags_map_.insert(std::make_pair(timestamp_nsec, ignored_object_flags));
}

void RoiDetectedObjectFusionNode::fuseOnSingleImage(
void RoiDetectedObjectFusionNode::fuse_on_single_image(
const DetectedObjects & input_object_msg, const Det2dStatus<RoiMsgType> & det2d,
const RoiMsgType & input_roi_msg, DetectedObjects & output_object_msg __attribute__((unused)))
{
Original file line number Diff line number Diff line change
@@ -49,7 +49,7 @@ RoiPointCloudFusionNode::RoiPointCloudFusionNode(const rclcpp::NodeOptions & opt
cluster_debug_pub_ = this->create_publisher<PointCloudMsgType>("debug/clusters", 1);
}

void RoiPointCloudFusionNode::fuseOnSingleImage(
void RoiPointCloudFusionNode::fuse_on_single_image(
const PointCloudMsgType & input_pointcloud_msg, const Det2dStatus<RoiMsgType> & det2d,
const RoiMsgType & input_roi_msg,
__attribute__((unused)) PointCloudMsgType & output_pointcloud_msg)
Original file line number Diff line number Diff line change
@@ -62,7 +62,7 @@ void SegmentPointCloudFusionNode::preprocess(__attribute__((unused))
return;
}

void SegmentPointCloudFusionNode::fuseOnSingleImage(
void SegmentPointCloudFusionNode::fuse_on_single_image(
const PointCloudMsgType & input_pointcloud_msg, const Det2dStatus<Image> & det2d,
[[maybe_unused]] const Image & input_mask,
__attribute__((unused)) PointCloudMsgType & output_cloud)
Original file line number Diff line number Diff line change
@@ -22,23 +22,23 @@

namespace autoware::image_projection_based_fusion
{
bool checkCameraInfo(const sensor_msgs::msg::CameraInfo & camera_info)
bool check_camera_info(const sensor_msgs::msg::CameraInfo & camera_info)
{
const bool is_supported_model =
(camera_info.distortion_model == sensor_msgs::distortion_models::PLUMB_BOB ||
camera_info.distortion_model == sensor_msgs::distortion_models::RATIONAL_POLYNOMIAL);
if (!is_supported_model) {
RCLCPP_ERROR_STREAM(
rclcpp::get_logger("image_projection_based_fusion"),
"checkCameraInfo: Unsupported distortion model: " << camera_info.distortion_model);
"check_camera_info: Unsupported distortion model: " << camera_info.distortion_model);
return false;
}
const bool is_supported_distortion_param =
(camera_info.d.size() == 5 || camera_info.d.size() == 8);
if (!is_supported_distortion_param) {
RCLCPP_ERROR_STREAM(
rclcpp::get_logger("image_projection_based_fusion"),
"checkCameraInfo: Unsupported distortion coefficients size: " << camera_info.d.size());
"check_camera_info: Unsupported distortion coefficients size: " << camera_info.d.size());
return false;
}
return true;

0 comments on commit 68e1474

Please sign in to comment.