From aaed54421889392d832581a6b0e2d3014aa19ab8 Mon Sep 17 00:00:00 2001
From: zhaoxi <535394140@qq.com>
Date: Tue, 4 Jun 2024 01:02:53 +0800
Subject: [PATCH] =?UTF-8?q?=E6=96=B0=E7=9A=84=20ort=20=E5=A4=9A=E6=80=81?=
=?UTF-8?q?=E9=83=A8=E7=BD=B2=E5=BA=93=EF=BC=8C=E4=BB=A5=E6=94=AF=E6=8C=81?=
=?UTF-8?q?=20CUDA=E3=80=81TensorRT=20=E5=92=8C=20OpenVINO=20=E7=9A=84=20P?=
=?UTF-8?q?rovider=20=E9=80=89=E6=8B=A9?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
CMakeLists.txt | 6 +-
README.md | 12 +-
cmake/RMVLModule.cmake | 3 -
doc/tutorials/introduction/linux/install.md | 13 +-
doc/tutorials/modules/algorithm/lsqnonlin.md | 4 +-
doc/tutorials/modules/tools/opcua.md | 14 +-
extra/detector/CMakeLists.txt | 2 +-
.../include/rmvl/detector/armor_detector.h | 15 +-
.../detector/include/rmvl/detector/detector.h | 2 +-
.../include/rmvl/detector/gyro_detector.h | 17 +-
extra/detector/src/armor_detector/detect.cpp | 2 +-
extra/detector/src/armor_detector/find.cpp | 7 +-
extra/detector/src/gyro_detector/find.cpp | 9 +-
extra/detector/src/gyro_detector/match.cpp | 8 +-
include/rmvl/rmvl.hpp | 4 -
.../camera/include/rmvl/camera/camutils.hpp | 2 +-
modules/core/CMakeLists.txt | 2 +-
.../include/rmvl/core/pretreat.hpp} | 22 +-
.../{imgproc => core}/perf/perf_pretreat.cpp | 14 +-
modules/{imgproc => core}/src/pretreat.cpp | 2 +-
modules/core/test/test_optimal.cpp | 2 +-
.../{imgproc => core}/test/test_pretreat.cpp | 14 +-
modules/imgproc/CMakeLists.txt | 23 --
modules/imgproc/include/rmvl/imgproc.hpp | 19 --
.../imgproc/include/rmvl/imgproc/paint.hpp | 175 --------------
modules/imgproc/test/test_paint.cpp | 65 ------
modules/ml/include/rmvl/ml.hpp | 10 +-
modules/ml/include/rmvl/ml/ort.h | 141 +++++++++--
modules/ml/src/ort/classification.cpp | 110 +++++++++
modules/ml/src/ort/ort.cpp | 220 +++++++++---------
modules/ml/src/ort/ort_impl.h | 66 ------
modules/ml/src/ort/ort_print.cpp | 43 ----
modules/opcua/src/client.cpp | 18 +-
modules/opcua/src/helper.cpp | 1 +
modules/opcua/src/server.cpp | 27 ++-
modules/opcua/test/test_opcua_client.cpp | 12 +-
samples/opcua/opcua_client.cpp | 2 +-
37 files changed, 474 insertions(+), 634 deletions(-)
rename modules/{imgproc/include/rmvl/imgproc/pretreat.h => core/include/rmvl/core/pretreat.hpp} (73%)
rename modules/{imgproc => core}/perf/perf_pretreat.cpp (93%)
rename modules/{imgproc => core}/src/pretreat.cpp (97%)
rename modules/{imgproc => core}/test/test_pretreat.cpp (91%)
delete mode 100644 modules/imgproc/CMakeLists.txt
delete mode 100644 modules/imgproc/include/rmvl/imgproc.hpp
delete mode 100644 modules/imgproc/include/rmvl/imgproc/paint.hpp
delete mode 100644 modules/imgproc/test/test_paint.cpp
create mode 100644 modules/ml/src/ort/classification.cpp
delete mode 100644 modules/ml/src/ort/ort_impl.h
delete mode 100755 modules/ml/src/ort/ort_print.cpp
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 5000586b..b1f0b677 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -195,7 +195,11 @@ foreach(m ${RMVL_EXTRA_MODULE})
if("${m}" STREQUAL "types")
continue()
endif()
- string(REGEX REPLACE "rmvl_" " " ${m}_modules ${RMVL_${mUP}_MODULES_BUILD})
+ if("${RMVL_${mUP}_MODULES_BUILD}" STREQUAL "")
+ set(${m}_modules "")
+ else()
+ string(REGEX REPLACE "rmvl_" " " ${m}_modules ${RMVL_${mUP}_MODULES_BUILD})
+ endif()
endforeach()
string(REGEX REPLACE "rmvl_" " " main_modules ${RMVL_MAIN_MODULES_BUILD})
diff --git a/README.md b/README.md
index d174c6ea..33de3b11 100755
--- a/README.md
+++ b/README.md
@@ -2,17 +2,17 @@
**机器人控制与视觉库**
-| 构建配置 | 编译器/环境 | Github Actions 工作流状态 |
-| :------: | :-------------------------------: | :--------------------------------------: |
-| CMake | GCC 7.5.0 x86_64-linux-gnu
GCC 9.4.0 x86_64-linux-gnu
GCC 12.3.0 x86_64-linux-gnu | [](https://github.com/cv-rmvl/rmvl/actions/workflows/linux-1.x.yml) |
+| 构建配置 | 编译器/环境 | Github Actions 工作流状态 |
+| :------: | :----------------------------------------------------------: | :----------------------------------------------------------: |
+| CMake | GCC 7.5.0 x86_64-linux-gnu
GCC 9.4.0 x86_64-linux-gnu
GCC 12.3.0 x86_64-linux-gnu
GCC 14.0.1 x86_64-linux-gnu | [](https://github.com/cv-rmvl/rmvl/actions/workflows/linux-1.x.yml) |
-RMVL 最初是面向 RoboMaster 赛事的视觉库,因此称为 RoboMaster Vision Library,现计划并逐步完善与机器人相关的视觉、控制、通信的功能,旨在打造适用范围广、使用简洁、架构统一、功能强大的视觉控制一体库。
+RMVL 最初是面向 RoboMaster 赛事的视觉库,现在此之上逐步完善有关基础算法、机器视觉、通信的功能,旨在打造适用范围广、使用简洁、架构统一、功能强大的视觉控制一体库。
### 项目资源
-* 说明文档:
+* 用户手册:
-* 工单:
+* 问题:
* VSCode 扩展插件
diff --git a/cmake/RMVLModule.cmake b/cmake/RMVLModule.cmake
index ef89b1b5..295ffdf0 100644
--- a/cmake/RMVLModule.cmake
+++ b/cmake/RMVLModule.cmake
@@ -100,9 +100,6 @@ macro(rmvl_add_module _name)
if(NOT DEFINED BUILD_${the_module}_INIT)
set(BUILD_${the_module}_INIT ON)
endif()
- if(DEFINED BUILD_${the_module})
- unset(BUILD_${the_module} CACHE)
- endif()
option(BUILD_${the_module} "Include ${the_module} module into the RMVL build" ${BUILD_${the_module}_INIT}) # create option to enable/disable this module
if(BUILD_${the_module})
diff --git a/doc/tutorials/introduction/linux/install.md b/doc/tutorials/introduction/linux/install.md
index a19ddc98..c48645d9 100644
--- a/doc/tutorials/introduction/linux/install.md
+++ b/doc/tutorials/introduction/linux/install.md
@@ -130,9 +130,7 @@ sudo apt install libeigen3-dev
#### 1.4 onnxruntime{#install_onnxruntime}
-onnxruntime 库是目前数字识别所依赖的第三方库,如果有需要开启此功能,则需要安装 onnxruntime
-
-- 获取压缩包
+- 获取压缩包,可选择对应的发行版,也可以是 gpu、cuda12 或 cpu 版本,下面以 1.12.0 的 cpu 版本为例
```shell
curl -SL https://github.com/microsoft/onnxruntime/releases/download/v1.12.0/onnxruntime-linux-x64-1.12.0.tgz -o onnxruntime-linux-x64-1.12.0.tgz
```
@@ -143,12 +141,9 @@ onnxruntime 库是目前数字识别所依赖的第三方库,如果有需要
- 安装(复制头文件与库文件)
```shell
sudo mkdir /usr/local/include/onnxruntime
- sudo cp onnxruntime-linux-x64-1.12.0/include/* /usr/local/include/onnxruntime
- sudo cp -r onnxruntime-linux-x64-1.12.0/lib /usr/local
- ```
-- 移除中间文件
- ```shell
- rm -r onnxruntime-linux-x64-1.12.0 && rm onnxruntime-linux-x64-1.12.0.tgz
+ cd onnxruntime-linux-x64-1.12.0
+ sudo cp -r include/* /usr/local/include/onnxruntime
+ sudo cp -r lib /usr/local
```
至此 onnxruntime 安装完成。
diff --git a/doc/tutorials/modules/algorithm/lsqnonlin.md b/doc/tutorials/modules/algorithm/lsqnonlin.md
index 258625a3..31c7b9df 100644
--- a/doc/tutorials/modules/algorithm/lsqnonlin.md
+++ b/doc/tutorials/modules/algorithm/lsqnonlin.md
@@ -72,7 +72,7 @@
1. 选择一初始点 \f$\pmb x_0=(x_{1,0},x_{2,0},\cdots,x_{n,0})^T\f$;
2. 算出 \f[\Delta\pmb x_0=-\pmb H_0^{-1}\pmb J_0^T\pmb\varphi(\pmb x_k)\tag{1-6}\f]
3. 令 \f$\pmb x_1\f$ 为函数 \f$f(\pmb x)\f$ 的极小点的第 1 次近似,则有 \f[\pmb x_1=\pmb x_0+\Delta\pmb x_0\tag{1-7}\f]
-4. 以 \f$\pmb x_1\f$ 代替前面的 \f$\pmb x_0\f$,\f$\Delta\pmb x_1\f$ 代替 \f$\Delta\pmb x_0\f$,重复上述计算过程,直到 \f[\|\pmb\varphi(\pmb x_k)\|<\epsilon'\tag{1-8a}\f] 或 \f[\|\nabla f(\pmb x_k)\|<\epsilon"\tag{1-8b}\f] 为止。\f$\epsilon'\f$ 和 \f$\epsilon"\f$ 是预先给定的精度。
+4. 以 \f$\pmb x_1\f$ 代替前面的 \f$\pmb x_0\f$,\f$\Delta\pmb x_1\f$ 代替 \f$\Delta\pmb x_0\f$,重复上述计算过程,直到 \f[\|\pmb\varphi(\pmb x_k)\|<\epsilon'\tag{1-8a}\f] 或 \f[\|\nabla f(\pmb x_k)\|<\epsilon''\tag{1-8b}\f] 为止。\f$\epsilon'\f$ 和 \f$\epsilon''\f$ 是预先给定的精度。
#### 1.3 改进
@@ -85,7 +85,7 @@
#### 1.4 如何使用
-可参考 `rm::lsqnonlin` 函数,例如,我们需要拟合一个正弦函数,下面的 `obtain` 函数就是观测每一帧的数据。
+RMVL 提供了改进的 Gauss-Newton 迭代算法,可参考 `rm::lsqnonlin` 函数。例如,我们需要拟合一个正弦函数\f[y=A\sin(\omega t+\varphi_0)+b\f]其中,\f$A,\omega,\varphi_0,b\f$ 是待拟合的参数,不妨统一写为 \f$\pmb x=(A,\omega,\varphi_0,b)\f$,也就是说我们需要拟合的函数是\f[\green y=x_1\sin(x_2\green t+x_3)+x_4\f]其中 \f$t\f$ 和 \f$y\f$ 是可以观测到的数据,我们需要通过观测的数据来拟合 \f$\pmb x\f$ 的值。比方说,下面的 `obtain` 函数就可以观测每一帧的数据。
```cpp
double obtain();
diff --git a/doc/tutorials/modules/tools/opcua.md b/doc/tutorials/modules/tools/opcua.md
index b0c63aa0..ea8eb654 100644
--- a/doc/tutorials/modules/tools/opcua.md
+++ b/doc/tutorials/modules/tools/opcua.md
@@ -97,8 +97,8 @@ int main()
int main()
{
- // 创建 OPC UA 客户端,连接到 localhost:4840
- rm::Client clt("opc.tcp://localhost:4840");
+ // 创建 OPC UA 客户端,连接到 127.0.0.1:4840
+ rm::Client clt("opc.tcp://127.0.0.1:4840");
/* other code */
}
@@ -141,7 +141,7 @@ int main()
int main()
{
- rm::Client clt("opc.tcp://localhost:4840");
+ rm::Client clt("opc.tcp://127.0.0.1:4840");
// 使用管道运算符 "|" 进行路径搜索,寻找待读取的变量
auto node = rm::nodeObjectsFolder | clt.find("number");
@@ -210,7 +210,7 @@ int main()
int main()
{
- rm::Client clt("opc.tcp://localhost:4840");
+ rm::Client clt("opc.tcp://127.0.0.1:4840");
// 设置输入参数,1 和 2 是 Int32 类型的,因此可以直接隐式构造
std::vector input = {1, 2};
@@ -287,7 +287,7 @@ int main()
int main()
{
- rm::Client clt("opc.tcp://localhost:4840");
+ rm::Client clt("opc.tcp://127.0.0.1:4840");
// 路径搜索寻找 C2
auto node_c2 = rm::nodeObjectsFolder | clt.find("A") | clt.find("B1") | clt.find("C2");
@@ -369,7 +369,7 @@ int main()
int main()
{
- rm::Client clt("opc.tcp://localhost:4840");
+ rm::Client clt("opc.tcp://127.0.0.1:4840");
auto node = rm::nodeObjectsFolder | clt.find("number");
for (int i = 0; i < 100; ++i)
{
@@ -396,7 +396,7 @@ void onChange(UA_Client *, UA_UInt32, void *, UA_UInt32, void *, UA_DataValue *v
int main()
{
- rm::Client clt("opc.tcp://localhost:4840");
+ rm::Client clt("opc.tcp://127.0.0.1:4840");
auto node = rm::nodeObjectsFolder | clt.find("number");
// 监视变量,这里的 onChange 同样可以写成无捕获列表的 lambda 表达式,因为存在隐式转换
client.monitor(node_id, onChange, 5);
diff --git a/extra/detector/CMakeLists.txt b/extra/detector/CMakeLists.txt
index a306fd74..20aff70f 100755
--- a/extra/detector/CMakeLists.txt
+++ b/extra/detector/CMakeLists.txt
@@ -3,7 +3,7 @@
# ----------------------------------------------------------------------------
rmvl_add_module(
detector INTERFACE
- DEPENDS imgproc group
+ DEPENDS group
)
# armor_detector
diff --git a/extra/detector/include/rmvl/detector/armor_detector.h b/extra/detector/include/rmvl/detector/armor_detector.h
index 4a2206c2..c115b6c1 100755
--- a/extra/detector/include/rmvl/detector/armor_detector.h
+++ b/extra/detector/include/rmvl/detector/armor_detector.h
@@ -28,7 +28,7 @@ namespace rm
//! 装甲板识别模块
class ArmorDetector final : public detector
{
- std::unique_ptr _ort;
+ std::unique_ptr _ort;
std::unordered_map _robot_t;
public:
@@ -37,16 +37,9 @@ class ArmorDetector final : public detector
explicit ArmorDetector(const std::string &model)
{
- _ort = std::make_unique(model);
- _robot_t[0] = RobotType::UNKNOWN;
- _robot_t[1] = RobotType::HERO;
- _robot_t[2] = RobotType::ENGINEER;
- _robot_t[3] = RobotType::INFANTRY_3;
- _robot_t[4] = RobotType::INFANTRY_4;
- _robot_t[5] = RobotType::INFANTRY_5;
- _robot_t[6] = RobotType::OUTPOST;
- _robot_t[7] = RobotType::BASE;
- _robot_t[8] = RobotType::SENTRY;
+ _ort = std::make_unique(model);
+ for (int i = 0; i < 9; ++i)
+ _robot_t[i] = static_cast(i);
}
/**
diff --git a/extra/detector/include/rmvl/detector/detector.h b/extra/detector/include/rmvl/detector/detector.h
index 918ced81..c9532e8e 100755
--- a/extra/detector/include/rmvl/detector/detector.h
+++ b/extra/detector/include/rmvl/detector/detector.h
@@ -13,7 +13,7 @@
#include "rmvl/group/group.h"
-#include "rmvl/imgproc/pretreat.h"
+#include "rmvl/core/pretreat.hpp"
namespace rm
{
diff --git a/extra/detector/include/rmvl/detector/gyro_detector.h b/extra/detector/include/rmvl/detector/gyro_detector.h
index 6c58f82f..4d91d75e 100644
--- a/extra/detector/include/rmvl/detector/gyro_detector.h
+++ b/extra/detector/include/rmvl/detector/gyro_detector.h
@@ -29,25 +29,18 @@ class GyroDetector final : public detector
{
int _armor_num; //!< 默认装甲板数目
- std::unique_ptr _ort;
+ std::unique_ptr _ort;
std::unordered_map _robot_t;
public:
GyroDetector(int armor_num) : _armor_num(armor_num) {}
~GyroDetector() = default;
- explicit GyroDetector(const std::string &model, int armor_num) : _armor_num(armor_num)
+ GyroDetector(const std::string &model, int armor_num) : _armor_num(armor_num)
{
- _ort = std::make_unique(model);
- _robot_t[0] = RobotType::UNKNOWN;
- _robot_t[1] = RobotType::HERO;
- _robot_t[2] = RobotType::ENGINEER;
- _robot_t[3] = RobotType::INFANTRY_3;
- _robot_t[4] = RobotType::INFANTRY_4;
- _robot_t[5] = RobotType::INFANTRY_5;
- _robot_t[6] = RobotType::OUTPOST;
- _robot_t[7] = RobotType::BASE;
- _robot_t[8] = RobotType::SENTRY;
+ _ort = std::make_unique(model);
+ for (int i = 0; i < 9; ++i)
+ _robot_t[i] = static_cast(i);
}
/**
diff --git a/extra/detector/src/armor_detector/detect.cpp b/extra/detector/src/armor_detector/detect.cpp
index caace367..3229c819 100755
--- a/extra/detector/src/armor_detector/detect.cpp
+++ b/extra/detector/src/armor_detector/detect.cpp
@@ -29,7 +29,7 @@ DetectInfo ArmorDetector::detect(std::vector &groups, cv::Mat &src,
// 二值化处理图像
PixChannel ch_minus = color == RED ? BLUE : RED;
int thesh = color == RED ? para::armor_detector_param.GRAY_THRESHOLD_RED : para::armor_detector_param.GRAY_THRESHOLD_BLUE;
- info.bin = rm::binary(src, color, ch_minus, thesh);
+ info.bin = binary(src, color, ch_minus, thesh);
// 找到所有的灯条和装甲板
find(info.bin, info.features, info.combos, info.rois);
diff --git a/extra/detector/src/armor_detector/find.cpp b/extra/detector/src/armor_detector/find.cpp
index 6a054da2..0a145d3f 100755
--- a/extra/detector/src/armor_detector/find.cpp
+++ b/extra/detector/src/armor_detector/find.cpp
@@ -38,8 +38,11 @@ void ArmorDetector::find(cv::Mat &src, std::vector &features, std:
for (const auto &armor : armors)
{
cv::Mat roi = Armor::getNumberROI(src, armor);
- auto type = _ort->inference({roi}, {para::armor_detector_param.MODEL_MEAN}, {para::armor_detector_param.MODEL_STD})[0];
- armor->setType(_robot_t[type]);
+ PreprocessOptions preop;
+ preop.means = {para::armor_detector_param.MODEL_MEAN};
+ preop.stds = {para::armor_detector_param.MODEL_STD};
+ int idx = ClassificationNet::cast(_ort->inference({roi}, preop, {})).first;
+ armor->setType(_robot_t[idx]);
rois.emplace_back(roi);
}
// eraseFakeArmors(armors);
diff --git a/extra/detector/src/gyro_detector/find.cpp b/extra/detector/src/gyro_detector/find.cpp
index 6e91f703..3d92739e 100755
--- a/extra/detector/src/gyro_detector/find.cpp
+++ b/extra/detector/src/gyro_detector/find.cpp
@@ -9,8 +9,6 @@
*
*/
-#include
-
#include
#include "rmvl/detector/gyro_detector.h"
@@ -40,8 +38,11 @@ void GyroDetector::find(cv::Mat &src, std::vector &features, std::
for (const auto &armor : armors)
{
cv::Mat roi = Armor::getNumberROI(src, armor);
- auto type = _ort->inference({roi}, {para::gyro_detector_param.MODEL_MEAN}, {para::gyro_detector_param.MODEL_STD})[0];
- armor->setType(_robot_t[type]);
+ PreprocessOptions preop;
+ preop.means = {para::gyro_detector_param.MODEL_MEAN};
+ preop.stds = {para::gyro_detector_param.MODEL_STD};
+ int idx = ClassificationNet::cast(_ort->inference({roi}, preop, {})).first;
+ armor->setType(_robot_t[idx]);
rois.emplace_back(roi);
}
// eraseFakeArmors(armors);
diff --git a/extra/detector/src/gyro_detector/match.cpp b/extra/detector/src/gyro_detector/match.cpp
index 9284a25c..871cdf6f 100755
--- a/extra/detector/src/gyro_detector/match.cpp
+++ b/extra/detector/src/gyro_detector/match.cpp
@@ -140,8 +140,8 @@ void GyroDetector::match(std::vector &groups, std::vector represent_set;
- for (const auto &[represent, combo_set] : combo_maps)
- represent_set.insert(represent);
+ for (const auto &p : combo_maps)
+ represent_set.insert(p.first); // represent
// 距离最近的装甲板组匹配到相应的序列组中
for (auto &p_group : groups)
{
@@ -188,8 +188,8 @@ void GyroDetector::match(std::vector &groups, std::vector represent_set;
- for (const auto &[represent, combo_set] : combo_maps)
- represent_set.insert(represent);
+ for (const auto &p : combo_maps)
+ represent_set.insert(p.first); // represent
size_t before_size = groups.size();
for (size_t i = 0; i < before_size; i++)
{
diff --git a/include/rmvl/rmvl.hpp b/include/rmvl/rmvl.hpp
index 1f3c23a0..bd0ae2d0 100644
--- a/include/rmvl/rmvl.hpp
+++ b/include/rmvl/rmvl.hpp
@@ -30,10 +30,6 @@
#include "rmvl/light.hpp"
#endif // HAVE_RMVL_LIGHT
-#ifdef HAVE_RMVL_IMGPROC
-#include "rmvl/imgproc.hpp"
-#endif // HAVE_RMVL_IMGPROC
-
#ifdef HAVE_RMVL_OPCUA
#include "rmvl/opcua.hpp"
#endif // HAVE_RMVL_OPCUA
diff --git a/modules/camera/include/rmvl/camera/camutils.hpp b/modules/camera/include/rmvl/camera/camutils.hpp
index 864b8f66..8980078b 100644
--- a/modules/camera/include/rmvl/camera/camutils.hpp
+++ b/modules/camera/include/rmvl/camera/camutils.hpp
@@ -80,7 +80,7 @@ struct CameraConfig
* @param[in] modes 配置模式参数包
*/
template
- inline void set(Args... modes) { [[maybe_unused]] int _[] = {(conf(modes), 0)...}; }
+ inline void set(Args... modes) { (..., conf(modes)); }
private:
inline void conf(TriggerChannel chn) { trigger_channel = chn; }
diff --git a/modules/core/CMakeLists.txt b/modules/core/CMakeLists.txt
index 30215d24..5cc8919a 100644
--- a/modules/core/CMakeLists.txt
+++ b/modules/core/CMakeLists.txt
@@ -16,7 +16,7 @@ rmvl_generate_para(core)
rmvl_add_module(
core
EXTRA_HEADER ${EIGEN3_INCLUDE_DIR}
- EXTERNAL ${CMAKE_THREAD_LIBS_INIT} opencv_core
+ EXTERNAL ${CMAKE_THREAD_LIBS_INIT} opencv_imgproc
)
rmvl_compile_definitions(
diff --git a/modules/imgproc/include/rmvl/imgproc/pretreat.h b/modules/core/include/rmvl/core/pretreat.hpp
similarity index 73%
rename from modules/imgproc/include/rmvl/imgproc/pretreat.h
rename to modules/core/include/rmvl/core/pretreat.hpp
index 5949c44c..0b311bed 100644
--- a/modules/imgproc/include/rmvl/imgproc/pretreat.h
+++ b/modules/core/include/rmvl/core/pretreat.hpp
@@ -1,11 +1,11 @@
/**
- * @file pretreat.h
- * @author RoboMaster Vision Community
- * @brief Header of the image pretreating module
+ * @file pretreat.hpp
+ * @author zhaoxi (535394140@qq.com)
+ * @brief 图像预处理模块
* @version 1.0
- * @date 2022-11-23
+ * @date 2024-06-05
*
- * @copyright Copyright 2023 (c), RoboMaster Vision Community
+ * @copyright Copyright 2024 (c), zhaoxi
*
*/
@@ -16,7 +16,15 @@
namespace rm
{
-//! @addtogroup imgproc
+//! @addtogroup core
+//! @{
+//! @defgroup core_pretreat 图像预处理模块
+//! @{
+//! @brief 提供了二值化等图像预处理功能
+//! @} core_pretreat
+//! @} core
+
+//! @addtogroup core_pretreat
//! @{
//! 像素通道枚举
@@ -52,6 +60,6 @@ cv::Mat binary(cv::Mat src, PixChannel ch1, PixChannel ch2, uint8_t threshold);
*/
cv::Mat binary(cv::Mat src, uint8_t threshold);
-//! @} imgproc
+//! @} core_pretreat
} // namespace rm
diff --git a/modules/imgproc/perf/perf_pretreat.cpp b/modules/core/perf/perf_pretreat.cpp
similarity index 93%
rename from modules/imgproc/perf/perf_pretreat.cpp
rename to modules/core/perf/perf_pretreat.cpp
index d2987ba9..46452c2d 100644
--- a/modules/imgproc/perf/perf_pretreat.cpp
+++ b/modules/core/perf/perf_pretreat.cpp
@@ -1,19 +1,19 @@
/**
* @file perf_pretreat.cpp
- * @author RoboMaster Vision Community
- * @brief
+ * @author zhaoxi (535394140@qq.com)
+ * @brief
* @version 1.0
- * @date 2022-11-24
- *
- * @copyright Copyright 2023 (c), RoboMaster Vision Community
- *
+ * @date 2024-06-05
+ *
+ * @copyright Copyright 2024 (c), zhaoxi
+ *
*/
#include
#include
-#include "rmvl/imgproc/pretreat.h"
+#include "rmvl/core/pretreat.hpp"
namespace rm_test
{
diff --git a/modules/imgproc/src/pretreat.cpp b/modules/core/src/pretreat.cpp
similarity index 97%
rename from modules/imgproc/src/pretreat.cpp
rename to modules/core/src/pretreat.cpp
index 4b7b90a1..c18db54c 100644
--- a/modules/imgproc/src/pretreat.cpp
+++ b/modules/core/src/pretreat.cpp
@@ -12,7 +12,7 @@
#include
#include "rmvl/core/util.hpp"
-#include "rmvl/imgproc/pretreat.h"
+#include "rmvl/core/pretreat.hpp"
namespace rm
{
diff --git a/modules/core/test/test_optimal.cpp b/modules/core/test/test_optimal.cpp
index 22f0ace3..3fcdaf6c 100644
--- a/modules/core/test/test_optimal.cpp
+++ b/modules/core/test/test_optimal.cpp
@@ -114,7 +114,7 @@ static inline double real_f(double x)
{
constexpr double FPS = 100;
return 0.8 * std::sin(1.9 / FPS * x - 0.2) + 1.29;
-};
+}
TEST(Optimal, lsqnonlin_sine)
{
diff --git a/modules/imgproc/test/test_pretreat.cpp b/modules/core/test/test_pretreat.cpp
similarity index 91%
rename from modules/imgproc/test/test_pretreat.cpp
rename to modules/core/test/test_pretreat.cpp
index 271e90aa..38373901 100644
--- a/modules/imgproc/test/test_pretreat.cpp
+++ b/modules/core/test/test_pretreat.cpp
@@ -1,17 +1,17 @@
/**
* @file test_pretreat.cpp
- * @author RoboMaster Vision Community
- * @brief
+ * @author zhaoxi (535394140@qq.com)
+ * @brief
* @version 1.0
- * @date 2022-11-23
- *
- * @copyright Copyright 2023 (c), RoboMaster Vision Community
- *
+ * @date 2024-06-05
+ *
+ * @copyright Copyright 2024 (c), zhaoxi
+ *
*/
#include
-#include "rmvl/imgproc/pretreat.h"
+#include "rmvl/core/pretreat.hpp"
using namespace cv;
using namespace rm;
diff --git a/modules/imgproc/CMakeLists.txt b/modules/imgproc/CMakeLists.txt
deleted file mode 100644
index cf1aeeb0..00000000
--- a/modules/imgproc/CMakeLists.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-rmvl_add_module(
- imgproc
- DEPENDS core
- EXTERNAL opencv_imgproc
-)
-
-# Build the test program
-if(BUILD_TESTS)
- rmvl_add_test(
- imgproc Unit
- DEPENDS imgproc
- DEPEND_TESTS GTest::gtest_main
- )
-endif(BUILD_TESTS)
-
-# Build the perf test program
-if(BUILD_PERF_TESTS)
- rmvl_add_test(
- imgproc Performance
- DEPENDS imgproc
- DEPEND_TESTS benchmark::benchmark_main
- )
-endif(BUILD_PERF_TESTS)
diff --git a/modules/imgproc/include/rmvl/imgproc.hpp b/modules/imgproc/include/rmvl/imgproc.hpp
deleted file mode 100644
index 45b35f3a..00000000
--- a/modules/imgproc/include/rmvl/imgproc.hpp
+++ /dev/null
@@ -1,19 +0,0 @@
-/**
- * @file imgproc.hpp
- * @author RoboMaster Vision Community
- * @brief
- * @version 1.0
- * @date 2022-11-23
- *
- * @copyright Copyright 2023 (c), RoboMaster Vision Community
- *
- */
-
-//! @defgroup imgproc 图像处理
-
-#include
-
-#ifdef HAVE_RMVL_IMGPROC
-#include "imgproc/pretreat.h"
-#include "imgproc/paint.hpp"
-#endif // HAVE_RMVL_IMGPROC
diff --git a/modules/imgproc/include/rmvl/imgproc/paint.hpp b/modules/imgproc/include/rmvl/imgproc/paint.hpp
deleted file mode 100644
index 915972b1..00000000
--- a/modules/imgproc/include/rmvl/imgproc/paint.hpp
+++ /dev/null
@@ -1,175 +0,0 @@
-/**
- * @file paint.hpp
- * @author RoboMaster Vision Community
- * @brief 绘画辅助工具
- * @version 1.0
- * @date 2023-01-17
- *
- * @copyright Copyright 2023 (c), RoboMaster Vision Community
- *
- */
-
-#pragma once
-
-#include
-
-#include
-
-namespace rm
-{
-
-//! @addtogroup imgproc
-//! @{
-
-//! 阵列工具
-class ArrayTool
-{
-public:
- /**
- * @brief 平面线性阵列
- *
- * @param[in] sketch 阵列特征
- * @param[in] spacing 阵列间距(Δx, Δy)
- * @param[in] times 阵列个数(包括原特征)
- * @return 阵列结果
- */
- template
- static std::vector>> linear2D(const std::vector> &sketch,
- const cv::Point_ &spacing, std::size_t times)
- {
- // 数据准备
- std::vector>> sketchs;
- sketchs.reserve(times);
- sketchs.push_back(sketch);
- // 循环阵列
- for (std::size_t i = 1; i < times; ++i)
- {
- std::vector> tmp;
- tmp.reserve(sketch.size());
- for (auto &point : sketch)
- tmp.emplace_back(point.x + spacing.x * i, point.y + spacing.y * i);
- sketchs.emplace_back(tmp);
- }
- return sketchs;
- }
-
- /**
- * @brief 平面圆周阵列
- *
- * @param[in] sketch 阵列特征
- * @param[in] center 阵列中心
- * @param[in] spacing 阵列间距(Δθ,角度制,像素坐标系)
- * @param[in] times 阵列个数(包括原特征)
- * @return 阵列结果
- */
- template
- static std::vector>> circular2D(const std::vector> &sketch,
- const cv::Point_ ¢er, double spacing, std::size_t times)
- {
- // 数据准备
- std::vector>> sketchs;
- sketchs.reserve(times);
- sketchs.push_back(sketch);
- // 循环阵列
- for (size_t i = 1; i < times; ++i)
- {
- // 计算旋转矩阵,左手系
- double spacing_rad = spacing * static_cast(i) * M_PI / 180.0;
- double c_r = cos(spacing_rad);
- double s_r = sin(spacing_rad);
- cv::Matx22d R = {c_r, -s_r,
- s_r, c_r};
- std::vector> tmp;
- tmp.reserve(sketch.size());
- for (auto &point : sketch)
- {
- cv::Vec2d vec = {static_cast(point.x - center.x),
- static_cast(point.y - center.y)};
- vec = R * vec;
- tmp.emplace_back(static_cast(center.x + vec(0)),
- static_cast(center.y + vec(1)));
- }
- sketchs.emplace_back(tmp);
- }
- return sketchs;
- }
-
- /**
- * @brief 空间线性阵列
- *
- * @param[in] sketch 阵列特征
- * @param[in] spacing 阵列间距(Δx, Δy, Δz)
- * @param[in] times 阵列个数(包括原特征)
- * @return 阵列结果
- */
- template
- static std::vector>> linear3D(const std::vector> &sketch,
- const cv::Point3_ &spacing, std::size_t times)
- {
- std::vector>> sketchs;
- sketchs.reserve(times);
- sketchs.push_back(sketch);
- for (std::size_t i = 1; i < times; ++i)
- {
- std::vector> tmp;
- tmp.reserve(sketch.size());
- for (auto &point : sketch)
- tmp.emplace_back(point.x + spacing.x * i,
- point.y + spacing.y * i,
- point.z + spacing.z * i);
- sketchs.emplace_back(tmp);
- }
- return sketchs;
- }
-
- /**
- * @brief 空间圆周阵列
- *
- * @param[in] sketch 阵列特征
- * @param[in] center 阵列中心
- * @param[in] axis 阵列转轴
- * @param[in] spacing 阵列间距(Δθ,角度制)
- * @param[in] times 阵列个数(包括原特征)
- * @return 阵列结果
- */
- template
- static std::vector>> circular3D(const std::vector> &sketch,
- const cv::Point3_ ¢er, const cv::Vec3d &axis,
- double spacing, std::size_t times)
- {
- // 数据准备
- std::vector>> sketchs;
- sketchs.reserve(times);
- sketchs.push_back(sketch);
- // 循环阵列
- for (size_t i = 1; i < times; ++i)
- {
- // 计算旋转矩阵
- double spacing_rad = spacing * static_cast(i) * M_PI / 180.0;
- double c_r = cos(spacing_rad);
- double s_r = sin(spacing_rad);
- auto &a = axis;
- cv::Matx33d R = {c_r + (1 - c_r) * a(0) * a(0), (1 - c_r) * a(0) * a(1) - s_r * a(2), (1 - c_r) * a(0) * a(2) + s_r * a(1),
- (1 - c_r) * a(0) * a(1) + s_r * a(2), c_r + (1 - c_r) * a(1) * a(1), (1 - c_r) * a(1) * a(2) - s_r * a(0),
- (1 - c_r) * a(0) * a(2) - s_r * a(1), (1 - c_r) * a(1) * a(2) + s_r * a(0), c_r + (1 - c_r) * a(2) * a(2)};
- std::vector> tmp;
- tmp.reserve(sketch.size());
- for (auto &point : sketch)
- {
- cv::Vec3d vec = {static_cast(point.x - center.x),
- static_cast(point.y - center.y),
- static_cast(point.z - center.z)};
- vec = R * vec;
- tmp.emplace_back(static_cast(center.x + vec(0)),
- static_cast(center.y + vec(1)),
- static_cast(center.z + vec(2)));
- }
- sketchs.emplace_back(tmp);
- }
- return sketchs;
- }
-};
-
-//! @} imgproc
-
-} // namespace rm
diff --git a/modules/imgproc/test/test_paint.cpp b/modules/imgproc/test/test_paint.cpp
deleted file mode 100644
index acd33d77..00000000
--- a/modules/imgproc/test/test_paint.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * @file test_paint.cpp
- * @author RoboMaster Vision Community
- * @brief
- * @version 1.0
- * @date 2023-01-18
- *
- * @copyright Copyright 2023 (c), RoboMaster Vision Community
- *
- */
-
-#include "rmvl/imgproc/paint.hpp"
-
-#include
-
-namespace rm_test
-{
-
-using rm::ArrayTool;
-
-TEST(ArrayToolTest, linear2D)
-{
- std::vector pts = {{1, 1}, {1, 2}, {1, 3}};
-
- auto ptss = ArrayTool::linear2D(pts, {3, 1}, 3);
- EXPECT_EQ(ptss.size(), 3);
- for (size_t i = 0; i < ptss.size(); ++i)
- for (size_t j = 0; j < pts.size(); ++j)
- EXPECT_EQ(ptss[i][j], (pts[j] + cv::Point(3 * i, 1 * i)));
-}
-
-TEST(ArrayToolTest, circular2D)
-{
- std::vector pts = {{20.f, 20.f}, {-10.f, -10.f}};
- auto ptss = ArrayTool::circular2D(pts, {0, 0}, 90.0, 2);
- EXPECT_EQ(ptss.size(), 2);
- EXPECT_EQ(ptss[1][0], cv::Point2f(-20, 20));
- EXPECT_EQ(ptss[1][1], cv::Point2f(10, -10));
-}
-
-TEST(ArrayToolTest, linear3D)
-{
- std::vector pts = {{1, 1, 2},
- {1, 2, 4},
- {1, 3, 6}};
-
- auto ptss = ArrayTool::linear3D(pts, {3, 1, 2}, 3);
- EXPECT_EQ(ptss.size(), 3);
- for (size_t i = 0; i < ptss.size(); ++i)
- for (size_t j = 0; j < pts.size(); ++j)
- EXPECT_EQ(ptss[i][j], (pts[j] + cv::Point3i(3 * i, 1 * i, 2 * i)));
-}
-
-TEST(ArrayToolTest, circular3D)
-{
- std::vector pt = {cv::Point3f(10, 20, 30)};
- auto pts1 = ArrayTool::circular3D(pt, {50, 50, 50}, {0, 0, 1}, 90.0, 2);
- EXPECT_EQ(pts1.size(), 2);
- EXPECT_EQ(pts1[1].front(), cv::Point3f(80, 10, 30));
-
- auto pts2 = ArrayTool::circular3D(pt, {50, 50, 50}, {0, 1, 0}, 90.0, 2);
- EXPECT_EQ(pts2[1].front(), cv::Point3f(30, 20, 90));
-}
-
-} // namespace rm_test
diff --git a/modules/ml/include/rmvl/ml.hpp b/modules/ml/include/rmvl/ml.hpp
index a779457c..845a044b 100644
--- a/modules/ml/include/rmvl/ml.hpp
+++ b/modules/ml/include/rmvl/ml.hpp
@@ -1,11 +1,11 @@
/**
* @file ml.hpp
- * @author RoboMaster Vision Community
+ * @author RMVL Community
* @brief
- * @version 1.0
- * @date 2023-05-20
+ * @version 2.0
+ * @date 2024-06-02
*
- * @copyright Copyright 2023 (c), RoboMaster Vision Community
+ * @copyright Copyright 2024 (c), RMVL Community
*
*/
@@ -14,7 +14,7 @@
/**
* @defgroup ml 机器学习与深度学习支持库
* @{
- * @defgroup ml_ort ONNX-Runtime 分类网络部署库
+ * @defgroup ml_ort onnxruntime 多态部署库
* @}
*/
diff --git a/modules/ml/include/rmvl/ml/ort.h b/modules/ml/include/rmvl/ml/ort.h
index 98a23994..54a5bc76 100755
--- a/modules/ml/include/rmvl/ml/ort.h
+++ b/modules/ml/include/rmvl/ml/ort.h
@@ -1,17 +1,20 @@
/**
* @file ort.h
- * @author RoboMaster Vision Community
+ * @author RMVL Community
* @brief the deployment library header file of the ONNXruntime (Ort)
* @version 1.0
* @date 2022-02-04
*
- * @copyright Copyright 2023 (c), RoboMaster Vision Community
+ * @copyright Copyright 2023 (c), RMVL Community
*
*/
#pragma once
-#include
+#include
+#include
+
+#include "rmvl/core/pretreat.hpp"
namespace rm
{
@@ -19,33 +22,137 @@ namespace rm
//! @addtogroup ml_ort
//! @{
-//! ONNX-Runtime (Ort) 部署库 \cite onnx-rt
-class OnnxRT
+//! Ort 提供者
+enum class OrtProvider : uint8_t
+{
+ CPU, //!< 由 `CPU` 执行
+ CUDA, //!< 由 `CUDA` 执行
+ TensorRT, //!< 由 `TensorRT` 执行
+ OpenVINO //!< 由 `OpenVINO` 执行
+};
+
+//! 预处理选项
+struct PreprocessOptions
{
+ std::vector means; //!< 均值
+ std::vector stds; //!< 标准差
+};
+
+//! 后处理选项
+struct PostprocessOptions
+{
+ PixChannel color{}; //!< 颜色通道
+ std::vector thresh{}; //!< 阈值向量
+};
+
+//! ONNX-Runtime (Ort) 部署库基类 \cite onnx-rt
+class OnnxNet
+{
+protected:
+ Ort::MemoryInfo _memory_info; //!< 内存分配信息
+ Ort::Env _env; //!< 环境配置
+ Ort::SessionOptions _session_options; //!< 会话选项
+ std::unique_ptr _session; //!< 会话
+#if ORT_API_VERSION < 12
+ std::vector _inames; //!< 输入名称
+ std::vector _onames; //!< 输出名称
+#else
+ std::vector _inames; //!< 输入名称
+ std::vector _onames; //!< 输出名称
+#endif
+
public:
/**
- * @brief 创建 OnnxRT 对象
+ * @brief 创建 OnnxNet 对象
*
* @param[in] model_path 模型路径,如果该路径不存在,则程序将因错误而退出
+ * @param[in] prov Ort 提供者
+ */
+ OnnxNet(std::string_view model_path, OrtProvider prov);
+
+ //! 打印环境信息
+ static void printEnvInfo() noexcept;
+ //! 打印模型信息
+ void printModelInfo() noexcept;
+
+ /**
+ * @brief 推理
+ *
+ * @param[in] images 所有输入图像
+ * @param[in] preop 预处理选项,不同网络有不同的预处理选项
+ * @param[in] postop 后处理选项,不同网络有不同的后处理选项
+ * @return 使用 `std::any` 表示的所有推理结果,需要根据具体的网络进行解析
+ * @note 可使用 `::cast` 函数对返回类型进行转换
+ */
+ std::any inference(const std::vector &images, const PreprocessOptions &preop, const PostprocessOptions &postop);
+
+private:
+ /**
+ * @brief 预处理
+ *
+ * @param[in] images 所有输入图像
+ * @param[in] preop 预处理选项,不同网络有不同的预处理选项
+ * @return 模型的输入 Tensors
*/
- OnnxRT(std::string_view model_path);
- ~OnnxRT();
+ virtual std::vector preProcess(const std::vector &images, const PreprocessOptions &preop) = 0;
- void printModelInfo();
+ /**
+ * @brief 后处理
+ *
+ * @param[in] output_tensors 模型的输出 Tensors
+ * @param[in] postop 后处理选项,不同网络有不同的后处理选项
+ * @return 使用 `std::any` 表示的所有推理结果,需要根据具体的网络进行解析
+ */
+ virtual std::any postProcess(const std::vector &output_tensors, const PostprocessOptions &postop) = 0;
+};
+/**
+ * @brief 分类网络推理类
+ * @note 需满足
+ * @note
+ * - 输入层为 `[1, c, h, w]`,其中 `c` 为输入图像的通道数,可以是 `1` 或者 `3`,`h` 为高度,`w` 为宽度
+ * @note
+ * - 输出层为 `[1, n]`,其中 `n` 为类别数
+ */
+class ClassificationNet : public OnnxNet
+{
+ std::vector> _iarrays; //!< 输入数组
+
+public:
/**
- * @brief 预处理,推理和后处理
+ * @brief 推理结果转换
*
- * @param[in] images 所有的输入图像
- * @param[in] means 网络模型各通道的均值
- * @param[in] stds 网络模型各通道的标准差
- * @return 与概率最高的值对应的索引向量
+ * @param[in] result 使用 `std::any` 表示的推理结果
+ * @return 转换后的推理结果,为 `std::pair` 类型,表示分类结果及其置信度
*/
- std::vector inference(const std::vector &images, const std::vector &means, const std::vector &stds);
+ static std::pair cast(const std::any &result) { return std::any_cast>(result); }
+
+ /**
+ * @brief 创建分类网络对象
+ *
+ * @param[in] model_path 模型路径,如果该路径不存在,则程序将因错误而退出
+ * @param[in] prov Ort 提供者,默认为 `OrtProvider::CPU`
+ */
+ ClassificationNet(std::string_view model_path, OrtProvider prov = OrtProvider::CPU);
private:
- class Impl;
- Impl *_pimpl;
+ /**
+ * @brief 预处理
+ *
+ * @param[in] images 所有输入图像
+ * @param[in] options 预处理选项,包含各通道的均值和标准差
+ * @return 模型的输入 Tensors
+ */
+ std::vector preProcess(const std::vector &images, const PreprocessOptions &options) override;
+
+ /**
+ * @brief 后处理
+ *
+ * @param[in] output_tensors 模型的输出 Tensors
+ * @param[in] postop 无需后处理选项,传入 `{}` 即可
+ * @return 用 `std::any` 表示的分类结果及其置信度,可使用 `rm::ClassificationNet::cast` 函数对返回类型进行转换
+ */
+ std::any postProcess(const std::vector &output_tensors, const PostprocessOptions &postop) override;
};
//! @} ml_ort
diff --git a/modules/ml/src/ort/classification.cpp b/modules/ml/src/ort/classification.cpp
new file mode 100644
index 00000000..e750976b
--- /dev/null
+++ b/modules/ml/src/ort/classification.cpp
@@ -0,0 +1,110 @@
+/**
+ * @file classification.cpp
+ * @author zhaoxi (535394140@qq.com)
+ * @brief
+ * @version 1.0
+ * @date 2024-06-03
+ *
+ * @copyright Copyright 2024 (c), zhaoxi
+ *
+ */
+
+#include
+#include
+
+#include "rmvl/core/util.hpp"
+#include "rmvl/ml/ort.h"
+
+namespace rm
+{
+
+ClassificationNet::ClassificationNet(std::string_view model_path, OrtProvider prov) : OnnxNet(model_path, prov)
+{
+ // 初始化输入数组
+ _iarrays.resize(_session->GetInputCount());
+ for (size_t i = 0; i < _session->GetInputCount(); i++)
+ {
+ auto shape = _session->GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape();
+ _iarrays[i].resize(shape[1] * shape[2] * shape[3]);
+ }
+}
+
+/**
+ * @brief 从 `NHWC` 格式的 `BGR` 图像生成 `NCHW` 格式的输入 `Tensor`
+ *
+ * @param[in] input_image 输入图像
+ * @param[in] means 3 通道各自的均值
+ * @param[in] stds 3 通道各自的标准差
+ * @param[out] iarray `NCHW` 格式的输入 `Tensor`
+ */
+static void imageToVector(const cv::Mat &input_image, const std::vector &means, const std::vector &stds, std::vector &iarray)
+{
+ int H{input_image.rows}, W{input_image.cols};
+ RMVL_DbgAssert(static_cast(3 * H * W) == iarray.size());
+ RMVL_Assert(means.size() == 3 && stds.size() == 3);
+ // 转 Tensor 的 NCHW 格式,做归一化和标准化
+ float *p_input_array = iarray.data();
+ for (int c = 0; c < 3; c++)
+ for (int h = 0; h < H; h++)
+ for (int w = 0; w < W; w++)
+ p_input_array[c * H * W + h * W + w] = (input_image.at(h, w * 3 + 2 - c) / 255.f - means[c]) / stds[c];
+}
+
+/**
+ * @brief 从 `NHWC` 格式的灰度图像生成 `NCHW` 格式的输入 `Tensor`
+ *
+ * @param[in] input_image 输入图像
+ * @param[in] mean 均值
+ * @param[in] std 标准差
+ * @param[out] iarray `NCHW` 格式的输入 `Tensor`
+ */
+static void imageToVector(const cv::Mat &input_image, float mean, float std, std::vector &iarray)
+{
+ int H{input_image.rows}, W{input_image.cols};
+ RMVL_DbgAssert(static_cast(H * W) == iarray.size());
+ // 转 Tensor 的 NCHW 格式,做归一化和标准化
+ float *p_input_array = iarray.data();
+ for (int h = 0; h < H; h++)
+ for (int w = 0; w < W; w++)
+ p_input_array[h * W + w] = (input_image.at(h, w) / 255.f - mean) / std;
+}
+
+std::vector ClassificationNet::preProcess(const std::vector &images, const PreprocessOptions &options)
+{
+ std::size_t input_count = _session->GetInputCount();
+ RMVL_Assert(input_count == 1 && images.size() == 1);
+ // 获取输入层 Tensor
+ std::vector input_tensors;
+ const cv::Mat &img = images.front();
+ // 合法性检查
+ auto shape = _session->GetInputTypeInfo(0).GetTensorTypeAndShapeInfo().GetShape();
+ RMVL_Assert(shape.size() == 4);
+ if (shape[1] != img.channels() || shape[2] != img.rows || shape[3] != img.cols)
+ RMVL_Error_(RMVL_StsBadSize, "image (%d, %d, %d) unequal to shape (%d, %d, %d).",
+ img.channels(), img.rows, img.cols, shape[1], shape[2], shape[3]);
+ RMVL_Assert(shape[1] == 3 || shape[1] == 1);
+ shape[0] = 1;
+ // img -> iarray
+ RMVL_Assert(!options.means.empty() && !options.stds.empty());
+ if (shape[1] == 3)
+ imageToVector(img, options.means, options.stds, _iarrays.front());
+ else
+ imageToVector(img, options.means.front(), options.stds.front(), _iarrays.front());
+ // 更新每个输入层的数据
+ input_tensors.emplace_back(Ort::Value::CreateTensor(
+ _memory_info, _iarrays.front().data(), _iarrays.front().size(), shape.data(), shape.size()));
+
+ return input_tensors;
+}
+
+std::any ClassificationNet::postProcess(const std::vector &output_tensors, const PostprocessOptions &)
+{
+ RMVL_Assert(output_tensors.size() == 1);
+ auto &output_tensor = output_tensors.front();
+ const float *output = output_tensor.GetTensorData();
+ std::size_t size{output_tensor.GetTensorTypeAndShapeInfo().GetElementCount()};
+ auto maxit = std::max_element(output, output + size);
+ return std::make_pair(maxit - output, *maxit);
+}
+
+} // namespace rm
diff --git a/modules/ml/src/ort/ort.cpp b/modules/ml/src/ort/ort.cpp
index de04020a..cf9723a9 100755
--- a/modules/ml/src/ort/ort.cpp
+++ b/modules/ml/src/ort/ort.cpp
@@ -14,144 +14,132 @@
#include
-#include "ort_impl.h"
#include "rmvl/core/util.hpp"
+#include "rmvl/ml/ort.h"
-rm::OnnxRT::OnnxRT(std::string_view model_path) : _pimpl(new Impl(model_path)) {}
-rm::OnnxRT::~OnnxRT() { delete _pimpl; }
-void rm::OnnxRT::printModelInfo() { _pimpl->printModelInfo(); }
-std::vector rm::OnnxRT::inference(const std::vector &images, const std::vector &means,
- const std::vector &stds)
+namespace rm
{
- try
- {
- return _pimpl->inference(images, means, stds);
- }
- catch (const rm::Exception &e)
- {
- delete _pimpl;
- throw e;
- }
-}
-rm::OnnxRT::Impl::Impl(std::string_view model_path)
- : _env(OrtLoggingLevel::ORT_LOGGING_LEVEL_WARNING, "OnnxDeployment"),
- _memory_info(Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator, OrtMemType::OrtMemTypeDefault))
-{
- if (model_path.empty())
- RMVL_Error(RMVL_StsBadArg, "Model path is empty!");
- setupEngine(model_path);
-}
+// 默认配置的内存分配器
+static Ort::AllocatorWithDefaultOptions alloc;
-void rm::OnnxRT::Impl::setupEngine(std::string_view model_path) noexcept
+OnnxNet::OnnxNet(std::string_view model_path, OrtProvider prov) : _memory_info(Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator, OrtMemType::OrtMemTypeDefault))
{
-#ifdef WITH_ORT_CUDA
- OrtSessionOptionsAppendExecutionProvider_CUDA(session_options, 0);
-#endif // WITH_ORT_CUDA
+ if (model_path.empty())
+ RMVL_Error(RMVL_StsBadArg, "Model path empty!");
-#ifdef WITH_ORT_TensorRT
- OrtSessionOptionsAppendExecutionProvider_Tensorrt(session_options, 0);
-#endif // WITH_ORT_TensorRT
+ switch (prov)
+ {
+ case OrtProvider::CUDA:
+ _session_options.AppendExecutionProvider_CUDA({});
+ break;
+ case OrtProvider::TensorRT:
+ _session_options.AppendExecutionProvider_TensorRT({});
+ break;
+ case OrtProvider::OpenVINO: {
+ OrtOpenVINOProviderOptions options;
+ options.device_type = "CPU_FP32";
+ _session_options.AppendExecutionProvider_OpenVINO(options);
+ break;
+ }
+ default:
+ break;
+ }
_session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_ALL);
- _p_session = std::make_unique(_env, model_path.data(), _session_options);
+ _session = std::make_unique(_env, model_path.data(), _session_options);
- // define the names of the I/O nodes
- for (size_t i = 0; i < _p_session->GetInputCount(); i++)
- _input_names.emplace_back(_p_session->GetInputName(i, _allocator));
- for (size_t i = 0; i < _p_session->GetOutputCount(); i++)
- _output_names.emplace_back(_p_session->GetOutputName(i, _allocator));
- // setup input array
- _input_arrays.resize(_p_session->GetInputCount());
- for (size_t i = 0; i < _p_session->GetInputCount(); i++)
- {
- auto shape = _p_session->GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape();
- _input_arrays[i].resize(shape[1] * shape[2] * shape[3]);
- }
+ // 定义输入输出节点的名称
+#if ORT_API_VERSION < 12
+ for (std::size_t i = 0; i < _session->GetInputCount(); i++)
+ _inames.emplace_back(_session->GetInputName(i, alloc));
+ for (std::size_t i = 0; i < _session->GetOutputCount(); i++)
+ _onames.emplace_back(_session->GetOutputName(i, alloc));
+#else
+ for (std::size_t i = 0; i < _session->GetInputCount(); i++)
+ _inames.emplace_back(_session->GetInputNameAllocated(i, alloc));
+ for (std::size_t i = 0; i < _session->GetOutputCount(); i++)
+ _onames.emplace_back(_session->GetOutputNameAllocated(i, alloc));
+#endif
}
-std::vector rm::OnnxRT::Impl::inference(const std::vector &images,
- const std::vector &means,
- const std::vector &stds)
+std::any OnnxNet::inference(const std::vector &images, const PreprocessOptions &preop, const PostprocessOptions &postop)
{
- std::vector input_tensors = preProcess(images, means, stds);
- std::vector output_tensors = doInference(input_tensors);
- return postProcess(output_tensors);
+ auto itensors = preProcess(images, preop);
+#if ORT_API_VERSION < 12
+ return postProcess(_session->Run(Ort::RunOptions{nullptr}, _inames.data(), itensors.data(), itensors.size(), _onames.data(), _onames.size()), postop);
+#else
+ std::vector input_names(_inames.size());
+ for (std::size_t i = 0; i < _inames.size(); i++)
+ input_names[i] = _inames[i].get();
+ std::vector output_names(_onames.size());
+ for (std::size_t i = 0; i < _onames.size(); i++)
+ output_names[i] = _onames[i].get();
+ return postProcess(_session->Run(Ort::RunOptions{nullptr}, input_names.data(), itensors.data(), itensors.size(), output_names.data(), output_names.size()), postop);
+#endif
}
-std::vector rm::OnnxRT::Impl::preProcess(const std::vector &images,
- const std::vector &means,
- const std::vector &stds)
+void OnnxNet::printEnvInfo() noexcept
{
- size_t input_count = _p_session->GetInputCount();
- if (input_count != images.size())
- RMVL_Error(RMVL_StsBadArg, "Size of the \"images\" are not equal to the model input_count.");
- // get the correct data of each input layer
- std::vector input_tensors;
- for (size_t i = 0; i < input_count; i++)
- {
- auto input_shape = _p_session->GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape();
- // [2], [3] are the correct size of the image
- if (input_shape.size() != 4)
- RMVL_Error(RMVL_StsBadSize, "Size of the input_shape of the model is not equal to \'4\'");
- if (input_shape[2] != input_shape[3])
- RMVL_Error(RMVL_StsError, "Shape of the input_shape[2] and input_shape[3] of the model is unequal");
- input_shape[0] = 1;
- // update the size of each input layer
- cv::Mat input_image;
- resize(images[i], input_image, cv::Size(input_shape[2], input_shape[3]));
- // allocate memory and normalization
- imageToVector(input_image, _input_arrays[i], means, stds);
- input_tensors.emplace_back(Ort::Value::CreateTensor(_memory_info,
- _input_arrays[i].data(), _input_arrays[i].size(),
- input_shape.data(), input_shape.size()));
- }
- return input_tensors;
+ printf("----------------- Build -----------------\n");
+#if ORT_API_VERSION < 15
+ printf("version: 1.%d (< 1.15.0)\n", ORT_API_VERSION);
+#else
+ printf("version: %s\n", Ort::GetVersionString().c_str());
+#endif
+ auto providers = Ort::GetAvailableProviders();
+ printf("\n--------------- Providers ---------------\n");
+ for (std::size_t i = 0; i < providers.size(); i++)
+ printf(" [%zu] %s\n", i, providers[i].c_str());
}
-std::vector rm::OnnxRT::Impl::postProcess(const std::vector &output_tensors) noexcept
+constexpr const char *element_data_type[] = {
+ "undefined", "float", "uint8", "int8", "uint16", "int16",
+ "int32", "int64", "string", "bool", "float16", "double",
+ "uint32", "uint64", "complex64", "complex128", "bfloat16"};
+
+void OnnxNet::printModelInfo() noexcept
{
- // 所有输出对应的置信度最高的索引
- std::vector output_indexs;
- for (auto &output_tensor : output_tensors)
+ if (_session == nullptr)
{
- // 获取置信度最高的索引
- const float *output = output_tensor.GetTensorData();
- std::vector indexs(output_tensor.GetTensorTypeAndShapeInfo().GetElementCount());
- iota(indexs.begin(), indexs.end(), 0);
- auto it = max_element(indexs.begin(), indexs.end(), [&output](size_t lhs, size_t rhs) {
- return output[lhs] < output[rhs];
- });
- output_indexs.emplace_back(*it);
+ printf("the model is empty.\n");
+ return;
+ }
+ printf("-------------- Input Layer --------------\n");
+ std::size_t input_node = _session->GetInputCount();
+ printf("input node: n = %zu\n", input_node);
+ for (size_t i = 0; i < input_node; i++)
+ {
+#if ORT_API_VERSION < 12
+ printf("[%zu]\t┬ name: %s\n", i, _session->GetInputName(i, alloc));
+#else
+ printf("[%zu]\t┬ name: %s\n", i, _session->GetInputNameAllocated(i, alloc).get());
+#endif
+ auto input_dims = _session->GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape();
+ printf("\t│ dim: [");
+ for (auto dim : input_dims)
+ printf("%ld, ", dim);
+ printf("\b\b]\n");
+ printf("\t└ element type: %s\n", element_data_type[_session->GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetElementType()]);
}
- return output_indexs;
-}
-void rm::OnnxRT::Impl::imageToVector(cv::Mat &input_image, std::vector &input_array,
- const std::vector &means, const std::vector &stds)
-{
- // CHW
- int C = input_image.channels();
- if (C != 1 && C != 3)
- RMVL_Error_(RMVL_StsBadArg, "Bad channel of the input argument: \"input_image\", chn = %d", C);
- int H = input_image.rows;
- int W = input_image.cols;
- size_t pixels = C * H * W;
- if (pixels != input_array.size())
- RMVL_Error(RMVL_StsBadArg, "The size of the arguments: \"input_image\" and \"input_array\" are not equal");
- if (static_cast(means.size()) != C || static_cast(stds.size()) != C)
- RMVL_Error_(RMVL_StsBadArg, "Bad size of the input argument: the size of \"means\" or \"stds\" must be %d", C);
- // 转 Tensor 的 NCHW 格式,做归一化和标准化
- float *p_input_array = input_array.data();
- for (int c = 0; c < C; c++)
+ printf("\n------------- Output Layer -------------\n");
+ std::size_t output_node = _session->GetOutputCount();
+ std::printf("output node: n = %zu\n", _session->GetOutputCount());
+ for (std::size_t i = 0; i < output_node; i++)
{
- for (int h = 0; h < H; h++)
- {
- for (int w = 0; w < W; w++)
- {
- p_input_array[c * H * W + h * W + w] = input_image.ptr(h)[w * C + 2 - c] / 255.f;
- p_input_array[c * H * W + h * W + w] = (p_input_array[c * H * W + h * W + w] - means[c]) / stds[c];
- }
- }
+#if ORT_API_VERSION < 12
+ printf("[%zu]\t┬ name: %s\n", i, _session->GetInputName(i, alloc));
+#else
+ printf("[%zu]\t┬ name: %s\n", i, _session->GetOutputNameAllocated(i, alloc).get());
+#endif
+ auto output_dims = _session->GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape();
+ printf("\t│ dim: [");
+ for (auto dim : output_dims)
+ printf("%ld, ", dim);
+ printf("\b\b]\n");
+ printf("\t└ element type: %s\n", element_data_type[_session->GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetElementType()]);
}
}
+
+} // namespace rm
diff --git a/modules/ml/src/ort/ort_impl.h b/modules/ml/src/ort/ort_impl.h
deleted file mode 100644
index 447165c7..00000000
--- a/modules/ml/src/ort/ort_impl.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/**
- * @file ort_impl.h
- * @author zhaoxi (535394140@qq.com)
- * @brief the deployment library of the ONNXruntime (Ort)
- * @version 1.0
- * @date 2024-01-25
- *
- * @copyright Copyright 2024 (c), zhaoxi
- *
- */
-
-#pragma once
-
-#include
-
-#include "rmvl/ml/ort.h"
-
-namespace rm
-{
-
-//! ONNX-Runtime (Ort) 部署库 \cite onnx-rt
-class OnnxRT::Impl
-{
- using session_ptr = std::unique_ptr;
-
- Ort::Env _env; //!< 环境配置
- Ort::SessionOptions _session_options; //!< Session 配置
- Ort::MemoryInfo _memory_info; //!< Tensor 内存分配信息
- Ort::AllocatorWithDefaultOptions _allocator; //!< 默认配置的内存分配器
- session_ptr _p_session;
-
- std::vector> _input_arrays; //!< 输入数组
- std::vector _input_names; //!< 输入名
- std::vector _output_names; //!< 输出名
-
-public:
- Impl(std::string_view model_path);
- ~Impl() = default;
-
- void printModelInfo() noexcept;
-
- std::vector inference(const std::vector &images, const std::vector &means, const std::vector &stds);
-
-private:
- //! 初始化 Ort 引擎
- void setupEngine(std::string_view model_path) noexcept;
-
- //! 分配内存,将图像平展为 NCHW 格式的一维数组,同时将数组归一化
- void imageToVector(cv::Mat &input_image, std::vector &input_array,
- const std::vector &means, const std::vector &stds);
-
- //! 预处理
- std::vector preProcess(const std::vector &images, const std::vector &means, const std::vector &stds);
-
- //! 后处理
- std::vector postProcess(const std::vector &output_tensors) noexcept;
-
- //! 推理并返回输出 Tensors
- inline std::vector doInference(const std::vector &input_tensors) noexcept
- {
- return _p_session->Run(Ort::RunOptions{nullptr}, _input_names.data(), input_tensors.data(),
- input_tensors.size(), _output_names.data(), _output_names.size());
- }
-};
-
-} // namespace rm
diff --git a/modules/ml/src/ort/ort_print.cpp b/modules/ml/src/ort/ort_print.cpp
deleted file mode 100755
index 28b7d151..00000000
--- a/modules/ml/src/ort/ort_print.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * @file ort_print.cpp
- * @author RoboMaster Vision Community
- * @brief
- * @version 1.0
- * @date 2022-02-04
- *
- * @copyright Copyright 2023 (c), RoboMaster Vision Community
- *
- */
-
-#include "ort_impl.h"
-
-void rm::OnnxRT::Impl::printModelInfo() noexcept
-{
- printf("-------------- Input Layer --------------\n");
- std::size_t input_node = _p_session->GetInputCount();
- printf("the number of input node is: %zu\n", input_node);
- for (size_t i = 0; i < input_node; i++)
- {
- printf("[%zu]\t┬ name is: %s\n", i, _p_session->GetInputName(i, _allocator));
- auto input_dims = _p_session->GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape();
- printf("\t│ dim is: [\n");
- for (auto dim : input_dims)
- printf("%ld, ", dim);
- printf("\b\b]\n");
- printf("\t└ type of each element is: %d\n", _p_session->GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetElementType());
- }
-
- printf("\n------------- Output Layer -------------\n");
- std::size_t output_node = _p_session->GetOutputCount();
- std::printf("the number of output node is: %zu\n", _p_session->GetOutputCount());
- for (std::size_t i = 0; i < output_node; i++)
- {
- printf("[%zu]\t┬ name is: %s\n", i, _p_session->GetOutputName(i, _allocator));
- std::vector output_dims = _p_session->GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape();
- printf("\t│ dim is: [");
- for (auto dim : output_dims)
- printf("%ld, ", dim);
- printf("\b\b]\n");
- printf("\t└ type of each element is: %d\n", _p_session->GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetElementType());
- }
-}
diff --git a/modules/opcua/src/client.cpp b/modules/opcua/src/client.cpp
index 6d3e2f61..a33bfaee 100644
--- a/modules/opcua/src/client.cpp
+++ b/modules/opcua/src/client.cpp
@@ -47,7 +47,7 @@ Client::Client(std::string_view address, UserConfig usr)
}
if (status != UA_STATUSCODE_GOOD)
{
- ERROR_("Failed to create client");
+ ERROR_("Failed to create client: %s", UA_StatusCode_name(status));
UA_Client_delete(_client);
_client = nullptr;
}
@@ -86,6 +86,8 @@ void Client::spinOnce() { UA_Client_run_iterate(_client, para::opcua_param.SPIN_
Variable Client::read(const NodeId &node)
{
+ RMVL_DbgAssert(_client != nullptr);
+
UA_Variant p_val;
UA_Variant_init(&p_val);
UA_StatusCode status = UA_Client_readValueAttribute(_client, node, &p_val);
@@ -98,6 +100,8 @@ Variable Client::read(const NodeId &node)
bool Client::write(const NodeId &node, const Variable &val)
{
+ RMVL_DbgAssert(_client != nullptr);
+
UA_Variant new_variant = helper::cvtVariable(val);
auto status = UA_Client_writeValueAttribute(_client, node, &new_variant);
UA_Variant_clear(&new_variant);
@@ -111,6 +115,8 @@ bool Client::write(const NodeId &node, const Variable &val)
bool Client::call(const NodeId &obj_node, const std::string &name, const std::vector &inputs, std::vector &outputs)
{
+ RMVL_DbgAssert(_client != nullptr);
+
// 初始化输入、输出参数
std::vector input_variants;
input_variants.reserve(inputs.size());
@@ -133,7 +139,7 @@ bool Client::call(const NodeId &obj_node, const std::string &name, const std::ve
if (status != UA_STATUSCODE_GOOD)
{
ERROR_("Failed to call the method, node id: %d, error code: %s",
- method_node.nid.identifier.numeric, UA_StatusCode_name(status));
+ method_node.nid.identifier.numeric, UA_StatusCode_name(status));
return false;
}
outputs.reserve(output_size);
@@ -146,6 +152,8 @@ bool Client::call(const NodeId &obj_node, const std::string &name, const std::ve
NodeId Client::addViewNode(const View &view)
{
+ RMVL_DbgAssert(_client != nullptr);
+
// 准备数据
NodeId retval;
UA_ViewAttributes attr = UA_ViewAttributes_default;
@@ -178,6 +186,8 @@ NodeId Client::addViewNode(const View &view)
bool Client::monitor(NodeId node, UA_Client_DataChangeNotificationCallback on_change, uint32_t queue_size)
{
+ RMVL_DbgAssert(_client != nullptr);
+
// 创建订阅
UA_CreateSubscriptionResponse resp;
auto status = createSubscription(resp);
@@ -201,6 +211,7 @@ bool Client::monitor(NodeId node, UA_Client_DataChangeNotificationCallback on_ch
bool Client::monitor(NodeId node, const std::vector &names, UA_Client_EventNotificationCallback on_event)
{
+ RMVL_DbgAssert(_client != nullptr);
// 创建订阅
UA_CreateSubscriptionResponse sub_resp;
if (!createSubscription(sub_resp))
@@ -266,8 +277,7 @@ bool Client::createSubscription(UA_CreateSubscriptionResponse &response)
response = UA_Client_Subscriptions_create(_client, request, nullptr, nullptr, nullptr);
if (response.responseHeader.serviceResult != UA_STATUSCODE_GOOD)
{
- ERROR_("Failed to create subscription, error: %s",
- UA_StatusCode_name(response.responseHeader.serviceResult));
+ ERROR_("Failed to create subscription, error: %s", UA_StatusCode_name(response.responseHeader.serviceResult));
return false;
}
return true;
diff --git a/modules/opcua/src/helper.cpp b/modules/opcua/src/helper.cpp
index fa7c6ad6..a9530833 100644
--- a/modules/opcua/src/helper.cpp
+++ b/modules/opcua/src/helper.cpp
@@ -52,6 +52,7 @@ NodeId operator|(NodeId origin, FindNodeInClient &&fnic)
request.browsePaths = &browse_path;
request.browsePathsSize = 1;
+ RMVL_Assert(p_client != nullptr);
auto response = UA_Client_Service_translateBrowsePathsToNodeIds(p_client, request);
if (response.responseHeader.serviceResult == UA_STATUSCODE_GOOD)
if (response.resultsSize == 1 && response.results[0].targetsSize == 1)
diff --git a/modules/opcua/src/server.cpp b/modules/opcua/src/server.cpp
index 78c5c112..b510d979 100644
--- a/modules/opcua/src/server.cpp
+++ b/modules/opcua/src/server.cpp
@@ -31,7 +31,7 @@ Server::Server(uint16_t port, std::string_view name, const std::vectorlogger = UA_Log_Stdout_withLevel(UA_LOGLEVEL_ERROR);
+ config->logger = UA_Log_Stdout_withLevel(UA_LOGLEVEL_INFO);
#else
config->logging = UA_Log_Stdout_new(UA_LOGLEVEL_ERROR);
#endif
@@ -103,6 +103,8 @@ void Server::deleteServer()
NodeId Server::addVariableTypeNode(const VariableType &vtype)
{
+ RMVL_DbgAssert(_server != nullptr);
+
UA_VariableTypeAttributes attr = UA_VariableTypeAttributes_default;
UA_Variant variant = helper::cvtVariable(vtype);
// 设置属性
@@ -132,6 +134,8 @@ NodeId Server::addVariableTypeNode(const VariableType &vtype)
NodeId Server::addVariableNode(const Variable &val, const NodeId &parent_id)
{
+ RMVL_DbgAssert(_server != nullptr);
+
// 变量节点属性 `UA_VariableAttributes`
UA_VariableAttributes attr = UA_VariableAttributes_default;
UA_Variant variant = helper::cvtVariable(val);
@@ -177,6 +181,8 @@ NodeId Server::addVariableNode(const Variable &val, const NodeId &parent_id)
Variable Server::read(const NodeId &node)
{
+ RMVL_DbgAssert(_server != nullptr);
+
UA_Variant p_val;
UA_Variant_init(&p_val);
auto status = UA_Server_readValue(_server, node, &p_val);
@@ -189,6 +195,8 @@ Variable Server::read(const NodeId &node)
bool Server::write(const NodeId &node, const Variable &val)
{
+ RMVL_DbgAssert(_server != nullptr);
+
auto variant = helper::cvtVariable(val);
auto status = UA_Server_writeValue(_server, node, variant);
UA_Variant_clear(&variant);
@@ -199,6 +207,8 @@ bool Server::write(const NodeId &node, const Variable &val)
bool Server::addVariableNodeValueCallBack(NodeId id, ValueCallBackBeforeRead before_read, ValueCallBackAfterWrite after_write)
{
+ RMVL_DbgAssert(_server != nullptr);
+
UA_ValueCallback callback{before_read, after_write};
auto status = UA_Server_setVariableNode_valueCallback(_server, id, callback);
if (status != UA_STATUSCODE_GOOD)
@@ -208,6 +218,8 @@ bool Server::addVariableNodeValueCallBack(NodeId id, ValueCallBackBeforeRead bef
NodeId Server::addDataSourceVariableNode(const Variable &val, DataSourceRead on_read, DataSourceWrite on_write, NodeId parent_id)
{
+ RMVL_DbgAssert(_server != nullptr);
+
// 变量节点属性 `UA_VariableAttributes`
UA_VariableAttributes attr = UA_VariableAttributes_default;
// 设置属性
@@ -245,6 +257,8 @@ NodeId Server::addDataSourceVariableNode(const Variable &val, DataSourceRead on_
NodeId Server::addMethodNode(const Method &method, const NodeId &parent_id)
{
+ RMVL_DbgAssert(_server != nullptr);
+
UA_MethodAttributes attr = UA_MethodAttributes_default;
attr.displayName = UA_LOCALIZEDTEXT(helper::en_US(), helper::to_char(method.display_name));
attr.description = UA_LOCALIZEDTEXT(helper::zh_CN(), helper::to_char(method.description));
@@ -282,11 +296,14 @@ NodeId Server::addMethodNode(const Method &method, const NodeId &parent_id)
void Server::setMethodNodeCallBack(const NodeId &id, UA_MethodCallback on_method)
{
+ RMVL_DbgAssert(_server != nullptr);
UA_Server_setMethodNodeCallback(_server, id, on_method);
}
NodeId Server::addObjectTypeNode(const ObjectType &otype)
{
+ RMVL_DbgAssert(_server != nullptr);
+
// 定义对象类型节点
UA_ObjectTypeAttributes attr = UA_ObjectTypeAttributes_default;
attr.displayName = UA_LOCALIZEDTEXT(helper::en_US(), helper::to_char(otype.display_name));
@@ -346,6 +363,8 @@ NodeId Server::addObjectTypeNode(const ObjectType &otype)
NodeId Server::addObjectNode(const Object &obj, NodeId parent_id)
{
+ RMVL_DbgAssert(_server != nullptr);
+
UA_ObjectAttributes attr{UA_ObjectAttributes_default};
attr.displayName = UA_LOCALIZEDTEXT(helper::en_US(), helper::to_char(obj.display_name));
attr.description = UA_LOCALIZEDTEXT(helper::zh_CN(), helper::to_char(obj.description));
@@ -402,6 +421,8 @@ NodeId Server::addObjectNode(const Object &obj, NodeId parent_id)
NodeId Server::addViewNode(const View &view)
{
+ RMVL_DbgAssert(_server != nullptr);
+
// 准备数据
NodeId retval;
UA_ViewAttributes attr = UA_ViewAttributes_default;
@@ -434,6 +455,8 @@ NodeId Server::addViewNode(const View &view)
NodeId Server::addEventTypeNode(const EventType &etype)
{
+ RMVL_DbgAssert(_server != nullptr);
+
NodeId retval;
UA_ObjectTypeAttributes attr = UA_ObjectTypeAttributes_default;
attr.displayName = UA_LOCALIZEDTEXT(helper::en_US(), helper::to_char(etype.display_name));
@@ -482,6 +505,8 @@ NodeId Server::addEventTypeNode(const EventType &etype)
bool Server::triggerEvent(const NodeId &node_id, const Event &event)
{
+ RMVL_DbgAssert(_server != nullptr);
+
NodeId type_id = nodeBaseEventType | find(event.type()->browse_name);
if (type_id.empty())
{
diff --git a/modules/opcua/test/test_opcua_client.cpp b/modules/opcua/test/test_opcua_client.cpp
index d5b10630..748c0893 100644
--- a/modules/opcua/test/test_opcua_client.cpp
+++ b/modules/opcua/test/test_opcua_client.cpp
@@ -50,7 +50,7 @@ void setSvr(rm::Server &svr)
svr.addMethodNode(method);
// 添加对象节点,包含字符串变量和乘法方法
svr.start();
- std::this_thread::sleep_for(std::chrono::milliseconds(20));
+ std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
// 路径搜索
@@ -58,7 +58,7 @@ TEST(OPC_UA_ClientTest, read_variable)
{
rm::Server svr(5000);
setSvr(svr);
- rm::Client client("opc.tcp://localhost:5000");
+ rm::Client client("opc.tcp://127.0.0.1:5000");
// 读取测试服务器上的变量值
auto id = rm::nodeObjectsFolder | client.find("array");
rm::Variable variable = client.read(id);
@@ -75,7 +75,7 @@ TEST(OPC_UA_ClientTest, variable_IO)
{
rm::Server svr(5001);
setSvr(svr);
- rm::Client client("opc.tcp://localhost:5001");
+ rm::Client client("opc.tcp://127.0.0.1:5001");
// 读取测试服务器上的变量值
auto id = rm::nodeObjectsFolder | client.find("single");
EXPECT_TRUE(client.write(id, 99));
@@ -92,7 +92,7 @@ TEST(OPC_UA_ClientTest, call)
{
rm::Server svr(5002);
setSvr(svr);
- rm::Client client("opc.tcp://localhost:5002");
+ rm::Client client("opc.tcp://127.0.0.1:5002");
// 调用测试服务器上的方法
std::vector input = {1, 2};
std::vector output;
@@ -116,7 +116,7 @@ TEST(OPC_UA_ClientTest, variable_monitor)
{
rm::Server svr(5003);
setSvr(svr);
- rm::Client client("opc.tcp://localhost:5003");
+ rm::Client client("opc.tcp://127.0.0.1:5003");
// 订阅测试服务器上的变量
auto node_id = rm::nodeObjectsFolder | client.find("single");
EXPECT_TRUE(client.monitor(node_id, onChange, 5));
@@ -157,7 +157,7 @@ TEST(OPC_UA_ClientTest, event_monitor)
etype.description = "测试事件类型";
etype.add("aaa", 3);
svr.addEventTypeNode(etype);
- rm::Client client("opc.tcp://localhost:5004");
+ rm::Client client("opc.tcp://127.0.0.1:5004");
client.monitor(rm::nodeServer, {"SourceName", "aaa"}, onEvent);
// 触发事件
rm::Event event(etype);
diff --git a/samples/opcua/opcua_client.cpp b/samples/opcua/opcua_client.cpp
index 5a38f053..e9a9d2c4 100644
--- a/samples/opcua/opcua_client.cpp
+++ b/samples/opcua/opcua_client.cpp
@@ -2,7 +2,7 @@
int main()
{
- rm::Client client("opc.tcp://localhost:4840");
+ rm::Client client("opc.tcp://127.0.0.1:4840");
if (!client.ok())
return -1;
auto position_id = rm::nodeObjectsFolder | client.find("position");