diff --git a/launch/tier4_perception_launch/launch/object_recognition/detection/detector/camera_lidar_detector.launch.xml b/launch/tier4_perception_launch/launch/object_recognition/detection/detector/camera_lidar_detector.launch.xml
index ddcbbbaeae107..758918e32f1d5 100644
--- a/launch/tier4_perception_launch/launch/object_recognition/detection/detector/camera_lidar_detector.launch.xml
+++ b/launch/tier4_perception_launch/launch/object_recognition/detection/detector/camera_lidar_detector.launch.xml
@@ -82,8 +82,9 @@
-
-
+
+
+
diff --git a/launch/tier4_perception_launch/launch/object_recognition/detection/detector/lidar_dnn_detector.launch.xml b/launch/tier4_perception_launch/launch/object_recognition/detection/detector/lidar_dnn_detector.launch.xml
index ab89da7112b66..1563942d706af 100644
--- a/launch/tier4_perception_launch/launch/object_recognition/detection/detector/lidar_dnn_detector.launch.xml
+++ b/launch/tier4_perception_launch/launch/object_recognition/detection/detector/lidar_dnn_detector.launch.xml
@@ -18,8 +18,9 @@
-
-
+
+
+
diff --git a/perception/image_projection_based_fusion/config/pointpainting.param.yaml b/perception/image_projection_based_fusion/config/pointpainting.param.yaml
old mode 100755
new mode 100644
index 53ac6f4cafc28..30136bc8f47d0
--- a/perception/image_projection_based_fusion/config/pointpainting.param.yaml
+++ b/perception/image_projection_based_fusion/config/pointpainting.param.yaml
@@ -1,26 +1,10 @@
/**:
ros__parameters:
- trt_precision: fp16
encoder_onnx_path: "$(var model_path)/pts_voxel_encoder_$(var model_name).onnx"
encoder_engine_path: "$(var model_path)/pts_voxel_encoder_$(var model_name).engine"
head_onnx_path: "$(var model_path)/pts_backbone_neck_head_$(var model_name).onnx"
head_engine_path: "$(var model_path)/pts_backbone_neck_head_$(var model_name).engine"
-
- model_params:
- class_names: ["CAR", "TRUCK", "BUS", "BICYCLE", "PEDESTRIAN"]
- paint_class_names: ["CAR", "BICYCLE", "PEDESTRIAN"]
- point_feature_size: 7 # x, y, z, time-lag and car, pedestrian, bicycle
- max_voxel_size: 40000
- point_cloud_range: [-121.6, -76.8, -3.0, 121.6, 76.8, 5.0]
- voxel_size: [0.32, 0.32, 8.0]
- downsample_factor: 1
- encoder_in_feature_size: 12
- yaw_norm_thresholds: [0.3, 0.3, 0.3, 0.3, 0.0]
- has_variance: false
- has_twist: false
- densification_params:
- world_frame_id: "map"
- num_past_frames: 0
+ trt_precision: fp16
post_process_params:
# post-process params
circle_nms_dist_threshold: 0.3
@@ -28,6 +12,10 @@
iou_nms_search_distance_2d: 10.0
iou_nms_threshold: 0.1
score_threshold: 0.35
+ yaw_norm_thresholds: [0.3, 0.3, 0.3, 0.3, 0.0]
+ densification_params:
+ world_frame_id: "map"
+ num_past_frames: 1
omp_params:
# omp params
num_threads: 1
diff --git a/perception/image_projection_based_fusion/config/pointpainting_ml_package.param.yaml b/perception/image_projection_based_fusion/config/pointpainting_ml_package.param.yaml
new file mode 100755
index 0000000000000..ca76a8ecf2dac
--- /dev/null
+++ b/perception/image_projection_based_fusion/config/pointpainting_ml_package.param.yaml
@@ -0,0 +1,13 @@
+/**:
+ ros__parameters:
+ model_params:
+ class_names: ["CAR", "TRUCK", "BUS", "BICYCLE", "PEDESTRIAN"]
+ paint_class_names: ["CAR", "BICYCLE", "PEDESTRIAN"]
+ point_feature_size: 7 # x, y, z, time-lag and car, pedestrian, bicycle
+ max_voxel_size: 40000
+ point_cloud_range: [-121.6, -76.8, -3.0, 121.6, 76.8, 5.0]
+ voxel_size: [0.32, 0.32, 8.0]
+ downsample_factor: 1
+ encoder_in_feature_size: 12
+ has_twist: false
+ has_variance: false
diff --git a/perception/image_projection_based_fusion/docs/pointpainting-fusion.md b/perception/image_projection_based_fusion/docs/pointpainting-fusion.md
index d50a3f97390ca..85e740d95ebd0 100644
--- a/perception/image_projection_based_fusion/docs/pointpainting-fusion.md
+++ b/perception/image_projection_based_fusion/docs/pointpainting-fusion.md
@@ -33,17 +33,22 @@ The lidar points are projected onto the output of an image-only 2d object detect
### Core Parameters
-| Name | Type | Default Value | Description |
-| ------------------------------- | ------ | ------------- | ----------------------------------------------------------- |
-| `score_threshold` | float | `0.4` | detected objects with score less than threshold are ignored |
-| `densification_world_frame_id` | string | `map` | the world frame id to fuse multi-frame pointcloud |
-| `densification_num_past_frames` | int | `0` | the number of past frames to fuse with the current frame |
-| `trt_precision` | string | `fp16` | TensorRT inference precision: `fp32` or `fp16` |
-| `encoder_onnx_path` | string | `""` | path to VoxelFeatureEncoder ONNX file |
-| `encoder_engine_path` | string | `""` | path to VoxelFeatureEncoder TensorRT Engine file |
-| `head_onnx_path` | string | `""` | path to DetectionHead ONNX file |
-| `head_engine_path` | string | `""` | path to DetectionHead TensorRT Engine file |
-| `build_only` | bool | `false` | shutdown the node after TensorRT engine file is built |
+| Name | Type | Default Value | Description |
+| ------------------------------------------------ | ------------ | ------------------------- | ----------------------------------------------------------- |
+| `encoder_onnx_path` | string | `""` | path to VoxelFeatureEncoder ONNX file |
+| `encoder_engine_path` | string | `""` | path to VoxelFeatureEncoder TensorRT Engine file |
+| `head_onnx_path` | string | `""` | path to DetectionHead ONNX file |
+| `head_engine_path` | string | `""` | path to DetectionHead TensorRT Engine file |
+| `build_only` | bool | `false` | shutdown the node after TensorRT engine file is built |
+| `trt_precision` | string | `fp16` | TensorRT inference precision: `fp32` or `fp16` |
+| `post_process_params.score_threshold` | double | `0.4` | detected objects with score less than threshold are ignored |
+| `post_process_params.yaw_norm_thresholds` | list[double] | [0.3, 0.3, 0.3, 0.3, 0.0] | An array of distance threshold values of norm of yaw [rad]. |
+| `post_process_params.iou_nms_target_class_names` | list[string] | ["CAR"] | An array of class names to be target in NMS. |
+| `post_process_params.iou_nms_search_distance_2d` | double | 10.0 | A maximum distance value to search the nearest objects. |
+| `post_process_params.iou_nms_threshold` | double | 0.1 | A threshold value of NMS using IoU score. |
+| `post_process_params.has_twist` | boolean | false | Indicates whether the model outputs twist value. |
+| `densification_params.world_frame_id` | string | `map` | the world frame id to fuse multi-frame pointcloud |
+| `densification_params.num_past_frames` | int | `1` | the number of past frames to fuse with the current frame |
## Assumptions / Known limits
diff --git a/perception/image_projection_based_fusion/launch/pointpainting_fusion.launch.xml b/perception/image_projection_based_fusion/launch/pointpainting_fusion.launch.xml
index 9236d7d789ed1..a22b2c2a13bda 100644
--- a/perception/image_projection_based_fusion/launch/pointpainting_fusion.launch.xml
+++ b/perception/image_projection_based_fusion/launch/pointpainting_fusion.launch.xml
@@ -21,8 +21,9 @@
-
-
+
+
+
@@ -48,6 +49,7 @@
+
@@ -89,6 +91,7 @@
+
diff --git a/perception/image_projection_based_fusion/schema/pointpainting.schema.json b/perception/image_projection_based_fusion/schema/pointpainting.schema.json
index bc8f0512263b6..57d8bc500f3c6 100644
--- a/perception/image_projection_based_fusion/schema/pointpainting.schema.json
+++ b/perception/image_projection_based_fusion/schema/pointpainting.schema.json
@@ -6,71 +6,11 @@
"pointpainting": {
"type": "object",
"properties": {
- "model_params": {
- "type": "object",
- "description": "Parameters for model configuration.",
- "properties": {
- "class_names": {
- "type": "array",
- "description": "An array of class names will be predicted.",
- "default": ["CAR", "TRUCK", "BUS", "BICYCLE", "PEDESTRIAN"],
- "uniqueItems": true
- },
- "paint_class_names": {
- "type": "array",
- "description": "An array of class names will be painted by PointPainting",
- "default": ["CAR", "BICYCLE", "PEDESTRIAN"],
- "uniqueItems": true
- },
- "point_feature_size": {
- "type": "integer",
- "description": "A number of channels of point feature layer.",
- "default": 7
- },
- "max_voxel_size": {
- "type": "integer",
- "description": "A maximum size of voxel grid.",
- "default": 40000
- },
- "point_cloud_range": {
- "type": "array",
- "description": "An array of distance ranges of each class, this must have same length with `class_names`.",
- "default": [-121.6, -76.8, -3.0, 121.6, 76.8, 5.0]
- },
- "voxel_size": {
- "type": "array",
- "description": "An array of voxel grid sizes for PointPainting, this must have same length with `paint_class_names`.",
- "default": [0.32, 0.32, 8.0]
- },
- "down_sample_factor": {
- "type": "integer",
- "description": "A scale factor of downsampling points",
- "default": 1,
- "minimum": 1
- },
- "encoder_in_feature_size": {
- "type": "integer",
- "description": "A size of encoder input feature channels.",
- "default": 12
- },
- "yaw_norm_thresholds": {
- "type": "array",
- "description": "An array of distance threshold values of norm of yaw [rad].",
- "default": [0.3, 0.3, 0.3, 0.3, 0.0],
- "minimum": 0.0,
- "maximum": 1.0
- },
- "has_variance": {
- "type": "boolean",
- "description": "Indicates whether the model outputs variance value.",
- "default": false
- },
- "has_twist": {
- "type": "boolean",
- "description": "Indicates whether the model outputs twist value.",
- "default": false
- }
- }
+ "trt_precision": {
+ "type": "string",
+ "description": "TensorRT inference precision.",
+ "default": "fp16",
+ "enum": ["fp32", "fp16"]
},
"densification_params": {
"type": "object",
@@ -112,7 +52,7 @@
"default": ["CAR"],
"uniqueItems": true
},
- "iou_search_distance_2d": {
+ "iou_nms_search_distance_2d": {
"type": "number",
"description": "A maximum distance value to search the nearest objects.",
"default": 10.0,
@@ -124,6 +64,23 @@
"default": 0.1,
"minimum": 0.0,
"maximum": 1.0
+ },
+ "yaw_norm_thresholds": {
+ "type": "array",
+ "description": "An array of distance threshold values of norm of yaw [rad].",
+ "default": [0.3, 0.3, 0.3, 0.3, 0.0],
+ "minimum": 0.0,
+ "maximum": 1.0
+ },
+ "has_variance": {
+ "type": "boolean",
+ "description": "Indicates whether the model outputs variance value.",
+ "default": false
+ },
+ "has_twist": {
+ "type": "boolean",
+ "description": "Indicates whether the model outputs twist value.",
+ "default": false
}
}
},
@@ -139,7 +96,7 @@
}
}
},
- "required": ["model_params", "densification_params", "post_process_params", "omp_params"]
+ "required": []
}
},
"properties": {
diff --git a/perception/image_projection_based_fusion/schema/pointpainting_ml_package.schema.json b/perception/image_projection_based_fusion/schema/pointpainting_ml_package.schema.json
new file mode 100644
index 0000000000000..0f89d050c1828
--- /dev/null
+++ b/perception/image_projection_based_fusion/schema/pointpainting_ml_package.schema.json
@@ -0,0 +1,74 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Parameters for Point Painting ML model",
+ "type": "object",
+ "definitions": {
+ "pointpainting_ml_package": {
+ "type": "object",
+ "properties": {
+ "model_params": {
+ "type": "object",
+ "description": "Parameters for model configuration.",
+ "properties": {
+ "class_names": {
+ "type": "array",
+ "description": "An array of class names will be predicted.",
+ "default": ["CAR", "TRUCK", "BUS", "BICYCLE", "PEDESTRIAN"],
+ "uniqueItems": true
+ },
+ "paint_class_names": {
+ "type": "array",
+ "description": "An array of class names will be painted by PointPainting",
+ "default": ["CAR", "BICYCLE", "PEDESTRIAN"],
+ "uniqueItems": true
+ },
+ "point_feature_size": {
+ "type": "integer",
+ "description": "A number of channels of point feature layer.",
+ "default": 7
+ },
+ "max_voxel_size": {
+ "type": "integer",
+ "description": "A maximum size of voxel grid.",
+ "default": 40000
+ },
+ "point_cloud_range": {
+ "type": "array",
+ "description": "An array of distance ranges of each class, this must have same length with `class_names`.",
+ "default": [-121.6, -76.8, -3.0, 121.6, 76.8, 5.0]
+ },
+ "voxel_size": {
+ "type": "array",
+ "description": "An array of voxel grid sizes for PointPainting, this must have same length with `paint_class_names`.",
+ "default": [0.32, 0.32, 8.0]
+ },
+ "down_sample_factor": {
+ "type": "integer",
+ "description": "A scale factor of downsampling points",
+ "default": 1,
+ "minimum": 1
+ },
+ "encoder_in_feature_size": {
+ "type": "integer",
+ "description": "A size of encoder input feature channels.",
+ "default": 12
+ }
+ }
+ }
+ },
+ "required": []
+ }
+ },
+ "properties": {
+ "/**": {
+ "type": "object",
+ "properties": {
+ "ros__parameters": {
+ "$ref": "#/definitions/pointpainting_ml_package"
+ }
+ },
+ "required": ["ros__parameters"]
+ }
+ },
+ "required": ["/**"]
+}
diff --git a/perception/image_projection_based_fusion/src/pointpainting_fusion/node.cpp b/perception/image_projection_based_fusion/src/pointpainting_fusion/node.cpp
index c4c5941be5fec..2c33df0b65afc 100644
--- a/perception/image_projection_based_fusion/src/pointpainting_fusion/node.cpp
+++ b/perception/image_projection_based_fusion/src/pointpainting_fusion/node.cpp
@@ -102,7 +102,7 @@ PointPaintingFusionNode::PointPaintingFusionNode(const rclcpp::NodeOptions & opt
const float circle_nms_dist_threshold = static_cast(
this->declare_parameter("post_process_params.circle_nms_dist_threshold"));
const auto yaw_norm_thresholds =
- this->declare_parameter>("model_params.yaw_norm_thresholds");
+ this->declare_parameter>("post_process_params.yaw_norm_thresholds");
// densification param
const std::string densification_world_frame_id =
this->declare_parameter("densification_params.world_frame_id");
@@ -140,8 +140,8 @@ PointPaintingFusionNode::PointPaintingFusionNode(const rclcpp::NodeOptions & opt
isClassTable_.erase(cls);
}
}
- has_variance_ = this->declare_parameter("has_variance");
has_twist_ = this->declare_parameter("model_params.has_twist");
+ has_variance_ = this->declare_parameter("model_params.has_variance");
const std::size_t point_feature_size = static_cast(
this->declare_parameter("model_params.point_feature_size"));
const std::size_t max_voxel_size =
diff --git a/perception/lidar_centerpoint/README.md b/perception/lidar_centerpoint/README.md
index ed71349d5bd7f..09cee5266760c 100644
--- a/perception/lidar_centerpoint/README.md
+++ b/perception/lidar_centerpoint/README.md
@@ -30,20 +30,22 @@ We trained the models using .
### Core Parameters
-| Name | Type | Default Value | Description |
-| ------------------------------- | ------------ | ------------- | ------------------------------------------------------------- |
-| `score_threshold` | float | `0.4` | detected objects with score less than threshold are ignored |
-| `densification_world_frame_id` | string | `map` | the world frame id to fuse multi-frame pointcloud |
-| `densification_num_past_frames` | int | `1` | the number of past frames to fuse with the current frame |
-| `trt_precision` | string | `fp16` | TensorRT inference precision: `fp32` or `fp16` |
-| `encoder_onnx_path` | string | `""` | path to VoxelFeatureEncoder ONNX file |
-| `encoder_engine_path` | string | `""` | path to VoxelFeatureEncoder TensorRT Engine file |
-| `head_onnx_path` | string | `""` | path to DetectionHead ONNX file |
-| `head_engine_path` | string | `""` | path to DetectionHead TensorRT Engine file |
-| `nms_iou_target_class_names` | list[string] | - | target classes for IoU-based Non Maximum Suppression |
-| `nms_iou_search_distance_2d` | double | - | If two objects are farther than the value, NMS isn't applied. |
-| `nms_iou_threshold` | double | - | IoU threshold for the IoU-based Non Maximum Suppression |
-| `build_only` | bool | `false` | shutdown the node after TensorRT engine file is built |
+| Name | Type | Default Value | Description |
+| ------------------------------------------------ | ------------ | ------------------------- | ------------------------------------------------------------- |
+| `encoder_onnx_path` | string | `""` | path to VoxelFeatureEncoder ONNX file |
+| `encoder_engine_path` | string | `""` | path to VoxelFeatureEncoder TensorRT Engine file |
+| `head_onnx_path` | string | `""` | path to DetectionHead ONNX file |
+| `head_engine_path` | string | `""` | path to DetectionHead TensorRT Engine file |
+| `build_only` | bool | `false` | shutdown the node after TensorRT engine file is built |
+| `trt_precision` | string | `fp16` | TensorRT inference precision: `fp32` or `fp16` |
+| `post_process_params.score_threshold` | double | `0.4` | detected objects with score less than threshold are ignored |
+| `post_process_params.yaw_norm_thresholds` | list[double] | [0.3, 0.3, 0.3, 0.3, 0.0] | An array of distance threshold values of norm of yaw [rad]. |
+| `post_process_params.iou_nms_target_class_names` | list[string] | - | target classes for IoU-based Non Maximum Suppression |
+| `post_process_params.iou_nms_search_distance_2d` | double | - | If two objects are farther than the value, NMS isn't applied. |
+| `post_process_params.iou_nms_threshold` | double | - | IoU threshold for the IoU-based Non Maximum Suppression |
+| `post_process_params.has_twist` | boolean | false | Indicates whether the model outputs twist value. |
+| `densification_params.world_frame_id` | string | `map` | the world frame id to fuse multi-frame pointcloud |
+| `densification_params.num_past_frames` | int | `1` | the number of past frames to fuse with the current frame |
### The `build_only` option
diff --git a/perception/lidar_centerpoint/config/centerpoint.param.yaml b/perception/lidar_centerpoint/config/centerpoint.param.yaml
index 38c57285e552d..17ed9a447cdb8 100644
--- a/perception/lidar_centerpoint/config/centerpoint.param.yaml
+++ b/perception/lidar_centerpoint/config/centerpoint.param.yaml
@@ -1,27 +1,19 @@
/**:
ros__parameters:
- class_names: ["CAR", "TRUCK", "BUS", "BICYCLE", "PEDESTRIAN"]
- point_feature_size: 4
- max_voxel_size: 40000
- point_cloud_range: [-76.8, -76.8, -4.0, 76.8, 76.8, 6.0]
- voxel_size: [0.32, 0.32, 10.0]
- downsample_factor: 1
- encoder_in_feature_size: 9
- # post-process params
- circle_nms_dist_threshold: 0.5
- iou_nms_target_class_names: ["CAR"]
- iou_nms_search_distance_2d: 10.0
- iou_nms_threshold: 0.1
- yaw_norm_thresholds: [0.3, 0.3, 0.3, 0.3, 0.0]
- score_threshold: 0.35
- has_variance: false
- has_twist: false
- trt_precision: fp16
- densification_num_past_frames: 1
- densification_world_frame_id: map
-
# weight files
encoder_onnx_path: "$(var model_path)/pts_voxel_encoder_$(var model_name).onnx"
encoder_engine_path: "$(var model_path)/pts_voxel_encoder_$(var model_name).engine"
head_onnx_path: "$(var model_path)/pts_backbone_neck_head_$(var model_name).onnx"
head_engine_path: "$(var model_path)/pts_backbone_neck_head_$(var model_name).engine"
+ trt_precision: fp16
+ post_process_params:
+ # post-process params
+ circle_nms_dist_threshold: 0.5
+ iou_nms_target_class_names: ["CAR"]
+ iou_nms_search_distance_2d: 10.0
+ iou_nms_threshold: 0.1
+ score_threshold: 0.35
+ yaw_norm_thresholds: [0.3, 0.3, 0.3, 0.3, 0.0]
+ densification_params:
+ world_frame_id: map
+ num_past_frames: 1
diff --git a/perception/lidar_centerpoint/config/centerpoint_ml_package.param.yaml b/perception/lidar_centerpoint/config/centerpoint_ml_package.param.yaml
new file mode 100644
index 0000000000000..c8d17890e33be
--- /dev/null
+++ b/perception/lidar_centerpoint/config/centerpoint_ml_package.param.yaml
@@ -0,0 +1,12 @@
+/**:
+ ros__parameters:
+ model_params:
+ class_names: ["CAR", "TRUCK", "BUS", "BICYCLE", "PEDESTRIAN"]
+ point_feature_size: 4
+ max_voxel_size: 40000
+ point_cloud_range: [-76.8, -76.8, -4.0, 76.8, 76.8, 6.0]
+ voxel_size: [0.32, 0.32, 10.0]
+ downsample_factor: 1
+ encoder_in_feature_size: 9
+ has_variance: false
+ has_twist: false
diff --git a/perception/lidar_centerpoint/config/centerpoint_sigma.param.yaml b/perception/lidar_centerpoint/config/centerpoint_sigma.param.yaml
deleted file mode 100644
index c217f6321381a..0000000000000
--- a/perception/lidar_centerpoint/config/centerpoint_sigma.param.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-/**:
- ros__parameters:
- class_names: ["CAR", "TRUCK", "BUS", "BICYCLE", "PEDESTRIAN"]
- point_feature_size: 4
- max_voxel_size: 40000
- point_cloud_range: [-76.8, -76.8, -4.0, 76.8, 76.8, 6.0]
- voxel_size: [0.32, 0.32, 10.0]
- downsample_factor: 1
- encoder_in_feature_size: 9
- # post-process params
- circle_nms_dist_threshold: 0.5
- iou_nms_target_class_names: ["CAR"]
- iou_nms_search_distance_2d: 10.0
- iou_nms_threshold: 0.1
- yaw_norm_thresholds: [0.3, 0.3, 0.3, 0.3, 0.0]
- score_threshold: 0.35
- has_variance: true
- has_twist: true
- trt_precision: fp16
- densification_num_past_frames: 1
- densification_world_frame_id: map
-
- # weight files
- encoder_onnx_path: "$(var model_path)/pts_voxel_encoder_$(var model_name).onnx"
- encoder_engine_path: "$(var model_path)/pts_voxel_encoder_$(var model_name).engine"
- head_onnx_path: "$(var model_path)/pts_backbone_neck_head_$(var model_name).onnx"
- head_engine_path: "$(var model_path)/pts_backbone_neck_head_$(var model_name).engine"
diff --git a/perception/lidar_centerpoint/config/centerpoint_sigma_ml_package.param.yaml b/perception/lidar_centerpoint/config/centerpoint_sigma_ml_package.param.yaml
new file mode 100644
index 0000000000000..c8d17890e33be
--- /dev/null
+++ b/perception/lidar_centerpoint/config/centerpoint_sigma_ml_package.param.yaml
@@ -0,0 +1,12 @@
+/**:
+ ros__parameters:
+ model_params:
+ class_names: ["CAR", "TRUCK", "BUS", "BICYCLE", "PEDESTRIAN"]
+ point_feature_size: 4
+ max_voxel_size: 40000
+ point_cloud_range: [-76.8, -76.8, -4.0, 76.8, 76.8, 6.0]
+ voxel_size: [0.32, 0.32, 10.0]
+ downsample_factor: 1
+ encoder_in_feature_size: 9
+ has_variance: false
+ has_twist: false
diff --git a/perception/lidar_centerpoint/config/centerpoint_tiny.param.yaml b/perception/lidar_centerpoint/config/centerpoint_tiny.param.yaml
deleted file mode 100644
index 35e11e61e9634..0000000000000
--- a/perception/lidar_centerpoint/config/centerpoint_tiny.param.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-/**:
- ros__parameters:
- class_names: ["CAR", "TRUCK", "BUS", "BICYCLE", "PEDESTRIAN"]
- point_feature_size: 4
- max_voxel_size: 40000
- point_cloud_range: [-76.8, -76.8, -4.0, 76.8, 76.8, 6.0]
- voxel_size: [0.32, 0.32, 10.0]
- downsample_factor: 2
- encoder_in_feature_size: 9
- # post-process params
- circle_nms_dist_threshold: 0.5
- iou_nms_target_class_names: ["CAR"]
- iou_nms_search_distance_2d: 10.0
- iou_nms_threshold: 0.1
- yaw_norm_thresholds: [0.3, 0.3, 0.3, 0.3, 0.0]
- score_threshold: 0.35
- has_variance: false
- has_twist: false
- trt_precision: fp16
- densification_num_past_frames: 1
- densification_world_frame_id: map
-
- # weight files
- encoder_onnx_path: "$(var model_path)/pts_voxel_encoder_$(var model_name).onnx"
- encoder_engine_path: "$(var model_path)/pts_voxel_encoder_$(var model_name).engine"
- head_onnx_path: "$(var model_path)/pts_backbone_neck_head_$(var model_name).onnx"
- head_engine_path: "$(var model_path)/pts_backbone_neck_head_$(var model_name).engine"
diff --git a/perception/lidar_centerpoint/config/centerpoint_tiny_ml_package.param.yaml b/perception/lidar_centerpoint/config/centerpoint_tiny_ml_package.param.yaml
new file mode 100644
index 0000000000000..2a3f9c8e75290
--- /dev/null
+++ b/perception/lidar_centerpoint/config/centerpoint_tiny_ml_package.param.yaml
@@ -0,0 +1,12 @@
+/**:
+ ros__parameters:
+ model_params:
+ class_names: ["CAR", "TRUCK", "BUS", "BICYCLE", "PEDESTRIAN"]
+ point_feature_size: 4
+ max_voxel_size: 40000
+ point_cloud_range: [-76.8, -76.8, -4.0, 76.8, 76.8, 6.0]
+ voxel_size: [0.32, 0.32, 10.0]
+ downsample_factor: 2
+ encoder_in_feature_size: 9
+ has_variance: false
+ has_twist: false
diff --git a/perception/lidar_centerpoint/launch/lidar_centerpoint.launch.xml b/perception/lidar_centerpoint/launch/lidar_centerpoint.launch.xml
index 5489af2c8fb60..7b2441578a14c 100644
--- a/perception/lidar_centerpoint/launch/lidar_centerpoint.launch.xml
+++ b/perception/lidar_centerpoint/launch/lidar_centerpoint.launch.xml
@@ -5,8 +5,9 @@
-
-
+
+
+
@@ -18,6 +19,7 @@
+
@@ -30,6 +32,7 @@
+
diff --git a/perception/lidar_centerpoint/schema/centerpoint.schemal.json b/perception/lidar_centerpoint/schema/centerpoint.schemal.json
new file mode 100644
index 0000000000000..b11c115cc2466
--- /dev/null
+++ b/perception/lidar_centerpoint/schema/centerpoint.schemal.json
@@ -0,0 +1,98 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Parameters for Lidar Centerpoint Node",
+ "type": "object",
+ "definitions": {
+ "centerpoint": {
+ "type": "object",
+ "properties": {
+ "trt_precision": {
+ "type": "string",
+ "description": "TensorRT inference precision.",
+ "default": "fp16",
+ "enum": ["fp32", "fp16"]
+ },
+ "post_process_params": {
+ "type": "object",
+ "properties": {
+ "score_threshold": {
+ "type": "number",
+ "description": "A threshold value of existence probability score, all of objects with score less than this threshold are ignored.",
+ "default": 0.35,
+ "minimum": 0.0,
+ "maximum": 1.0
+ },
+ "yaw_norm_thresholds": {
+ "type": "array",
+ "description": "An array of distance threshold values of norm of yaw [rad].",
+ "default": [0.3, 0.3, 0.3, 0.3, 0.0],
+ "minimum": 0.0,
+ "maximum": 1.0
+ },
+ "circle_nms_dist_threshold": {
+ "type": "number",
+ "description": "",
+ "default": 0.5,
+ "minimum": 0.0,
+ "maximum": 1.0
+ },
+ "iou_nms_target_class_names": {
+ "type": "array",
+ "description": "An array of class names to be target in NMS.",
+ "default": ["CAR"],
+ "uniqueItems": true
+ },
+ "iou_nms_search_distance_2d": {
+ "type": "number",
+ "description": "A maximum distance value to search the nearest objects.",
+ "default": 10.0,
+ "minimum": 0.0
+ },
+ "iou_nms_threshold": {
+ "type": "number",
+ "description": "A threshold value of NMS using IoU score.",
+ "default": 0.1,
+ "minimum": 0.0,
+ "maximum": 1.0
+ },
+ "has_twist": {
+ "type": "boolean",
+ "description": "Indicates whether the model outputs twist value.",
+ "default": false
+ }
+ }
+ },
+ "densification_params": {
+ "type": "object",
+ "description": "Parameters for pointcloud densification.",
+ "properties": {
+ "world_frame_id": {
+ "type": "string",
+ "description": "A name of frame id where world coordinates system is defined with respect to.",
+ "default": "map"
+ },
+ "num_past_frames": {
+ "type": "integer",
+ "description": "A number of past frames to be considered as same input frame.",
+ "default": 1,
+ "minimum": 0
+ }
+ }
+ }
+ },
+ "required": ["post_process_params", "densification_params"]
+ }
+ },
+ "properties": {
+ "/**": {
+ "type": "object",
+ "properties": {
+ "ros__parameters": {
+ "$ref": "#/definitions/centerpoint"
+ }
+ },
+ "required": ["ros__parameters"]
+ }
+ },
+ "required": ["/**"]
+}
diff --git a/perception/lidar_centerpoint/schema/centerpoint_ml_package.schema.json b/perception/lidar_centerpoint/schema/centerpoint_ml_package.schema.json
new file mode 100644
index 0000000000000..2c5655c6201ed
--- /dev/null
+++ b/perception/lidar_centerpoint/schema/centerpoint_ml_package.schema.json
@@ -0,0 +1,68 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Parameters for Centerpoint ML model",
+ "type": "object",
+ "definitions": {
+ "centerpoint_ml_package": {
+ "type": "object",
+ "properties": {
+ "model_params": {
+ "type": "object",
+ "description": "Parameters for model configuration.",
+ "properties": {
+ "class_names": {
+ "type": "array",
+ "description": "An array of class names will be predicted.",
+ "default": ["CAR", "TRUCK", "BUS", "BICYCLE", "PEDESTRIAN"],
+ "uniqueItems": true
+ },
+ "point_feature_size": {
+ "type": "integer",
+ "description": "A number of channels of point feature layer.",
+ "default": 4
+ },
+ "max_voxel_size": {
+ "type": "integer",
+ "description": "A maximum size of voxel grid.",
+ "default": 40000
+ },
+ "point_cloud_range": {
+ "type": "array",
+ "description": "An array of distance ranges of each class, this must have same length with `class_names`.",
+ "default": [-76.8, -76.8, -4.0, 76.8, 76.8, 6.0]
+ },
+ "voxel_size": {
+ "type": "array",
+ "description": "An array of voxel grid sizes for PointPainting, this must have same length with `paint_class_names`.",
+ "default": [0.32, 0.32, 10.0]
+ },
+ "down_sample_factor": {
+ "type": "integer",
+ "description": "A scale factor of downsampling points",
+ "default": 1,
+ "minimum": 1
+ },
+ "encoder_in_feature_size": {
+ "type": "integer",
+ "description": "A size of encoder input feature channels.",
+ "default": 9
+ }
+ }
+ }
+ },
+ "required": ["model_params"]
+ }
+ },
+ "properties": {
+ "/**": {
+ "type": "object",
+ "properties": {
+ "ros__parameters": {
+ "$ref": "#/definitions/centerpoint_ml_package"
+ }
+ },
+ "required": ["ros__parameters"]
+ }
+ },
+ "required": ["/**"]
+}
diff --git a/perception/lidar_centerpoint/schema/centerpoint_tiny_ml_package.schema.json b/perception/lidar_centerpoint/schema/centerpoint_tiny_ml_package.schema.json
new file mode 100644
index 0000000000000..55329e21ea8ab
--- /dev/null
+++ b/perception/lidar_centerpoint/schema/centerpoint_tiny_ml_package.schema.json
@@ -0,0 +1,68 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Parameters for Centerpoint Tiny ML model",
+ "type": "object",
+ "definitions": {
+ "centerpoint_tiny_ml_package": {
+ "type": "object",
+ "properties": {
+ "model_params": {
+ "type": "object",
+ "description": "Parameters for model configuration.",
+ "properties": {
+ "class_names": {
+ "type": "array",
+ "description": "An array of class names will be predicted.",
+ "default": ["CAR", "TRUCK", "BUS", "BICYCLE", "PEDESTRIAN"],
+ "uniqueItems": true
+ },
+ "point_feature_size": {
+ "type": "integer",
+ "description": "A number of channels of point feature layer.",
+ "default": 4
+ },
+ "max_voxel_size": {
+ "type": "integer",
+ "description": "A maximum size of voxel grid.",
+ "default": 40000
+ },
+ "point_cloud_range": {
+ "type": "array",
+ "description": "An array of distance ranges of each class, this must have same length with `class_names`.",
+ "default": [-76.8, -76.8, -4.0, 76.8, 76.8, 6.0]
+ },
+ "voxel_size": {
+ "type": "array",
+ "description": "An array of voxel grid sizes for PointPainting, this must have same length with `paint_class_names`.",
+ "default": [0.32, 0.32, 10.0]
+ },
+ "down_sample_factor": {
+ "type": "integer",
+ "description": "A scale factor of downsampling points",
+ "default": 2,
+ "minimum": 1
+ },
+ "encoder_in_feature_size": {
+ "type": "integer",
+ "description": "A size of encoder input feature channels.",
+ "default": 9
+ }
+ }
+ }
+ },
+ "required": ["model_params"]
+ }
+ },
+ "properties": {
+ "/**": {
+ "type": "object",
+ "properties": {
+ "ros__parameters": {
+ "$ref": "#/definitions/centerpoint_tiny_ml_package"
+ }
+ },
+ "required": ["ros__parameters"]
+ }
+ },
+ "required": ["/**"]
+}
diff --git a/perception/lidar_centerpoint/src/node.cpp b/perception/lidar_centerpoint/src/node.cpp
index 0606a2ec55c9e..d0abf72d888d3 100644
--- a/perception/lidar_centerpoint/src/node.cpp
+++ b/perception/lidar_centerpoint/src/node.cpp
@@ -39,34 +39,35 @@ LidarCenterPointNode::LidarCenterPointNode(const rclcpp::NodeOptions & node_opti
: Node("lidar_center_point", node_options), tf_buffer_(this->get_clock())
{
const float score_threshold =
- static_cast(this->declare_parameter("score_threshold", 0.35));
- const float circle_nms_dist_threshold =
- static_cast(this->declare_parameter("circle_nms_dist_threshold"));
+ static_cast(this->declare_parameter("post_process_params.score_threshold"));
+ const float circle_nms_dist_threshold = static_cast(
+ this->declare_parameter("post_process_params.circle_nms_dist_threshold"));
const auto yaw_norm_thresholds =
- this->declare_parameter>("yaw_norm_thresholds");
+ this->declare_parameter>("post_process_params.yaw_norm_thresholds");
const std::string densification_world_frame_id =
- this->declare_parameter("densification_world_frame_id", "map");
+ this->declare_parameter("densification_params.world_frame_id");
const int densification_num_past_frames =
- this->declare_parameter("densification_num_past_frames", 1);
- const std::string trt_precision = this->declare_parameter("trt_precision", "fp16");
+ this->declare_parameter("densification_params.num_past_frames");
+ const std::string trt_precision = this->declare_parameter("trt_precision");
const std::string encoder_onnx_path = this->declare_parameter("encoder_onnx_path");
const std::string encoder_engine_path =
this->declare_parameter("encoder_engine_path");
const std::string head_onnx_path = this->declare_parameter("head_onnx_path");
const std::string head_engine_path = this->declare_parameter("head_engine_path");
- class_names_ = this->declare_parameter>("class_names");
- has_variance_ = this->declare_parameter("has_variance");
- has_twist_ = this->declare_parameter("has_twist");
- const std::size_t point_feature_size =
- static_cast(this->declare_parameter("point_feature_size"));
+ class_names_ = this->declare_parameter>("model_params.class_names");
+ has_twist_ = this->declare_parameter("model_params.has_twist");
+ const std::size_t point_feature_size = static_cast(
+ this->declare_parameter("model_params.point_feature_size"));
+ has_variance_ = this->declare_parameter("model_params.has_variance");
const std::size_t max_voxel_size =
- static_cast(this->declare_parameter("max_voxel_size"));
- const auto point_cloud_range = this->declare_parameter>("point_cloud_range");
- const auto voxel_size = this->declare_parameter>("voxel_size");
- const std::size_t downsample_factor =
- static_cast(this->declare_parameter("downsample_factor"));
- const std::size_t encoder_in_feature_size =
- static_cast(this->declare_parameter("encoder_in_feature_size"));
+ static_cast(this->declare_parameter("model_params.max_voxel_size"));
+ const auto point_cloud_range =
+ this->declare_parameter>("model_params.point_cloud_range");
+ const auto voxel_size = this->declare_parameter>("model_params.voxel_size");
+ const std::size_t downsample_factor = static_cast(
+ this->declare_parameter("model_params.downsample_factor"));
+ const std::size_t encoder_in_feature_size = static_cast(
+ this->declare_parameter("model_params.encoder_in_feature_size"));
const auto allow_remapping_by_area_matrix =
this->declare_parameter>("allow_remapping_by_area_matrix");
const auto min_area_matrix = this->declare_parameter>("min_area_matrix");
@@ -78,10 +79,11 @@ LidarCenterPointNode::LidarCenterPointNode(const rclcpp::NodeOptions & node_opti
{
NMSParams p;
p.nms_type_ = NMS_TYPE::IoU_BEV;
- p.target_class_names_ =
- this->declare_parameter>("iou_nms_target_class_names");
- p.search_distance_2d_ = this->declare_parameter("iou_nms_search_distance_2d");
- p.iou_threshold_ = this->declare_parameter("iou_nms_threshold");
+ p.target_class_names_ = this->declare_parameter>(
+ "post_process_params.iou_nms_target_class_names");
+ p.search_distance_2d_ =
+ this->declare_parameter("post_process_params.iou_nms_search_distance_2d");
+ p.iou_threshold_ = this->declare_parameter("post_process_params.iou_nms_threshold");
iou_bev_nms_.setParameters(p);
}