Skip to content

Commit

Permalink
🔧 重新设计 rm::FuncNds,完成 #213
Browse files Browse the repository at this point in the history
  • Loading branch information
zhaoxi-scut committed Jan 14, 2025
1 parent 254fbc1 commit 2d93f5c
Show file tree
Hide file tree
Showing 6 changed files with 127 additions and 121 deletions.
4 changes: 2 additions & 2 deletions doc/tools/pyrmvl_fns.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,8 @@ rm region func,x0[,delta] a,b
rm fminbnd func,x1,x2[,options] x,fval
rm fminunc func,x0[,options] x,fval
rm fmincon func,x0,c,ceq[,options] x,fval
rm lsqnonlin funcs,x0[,options] x
rm lsqnonlinRKF funcs,x0,rb[,options] x
rm lsqnonlin func,x0[,options] x
rm lsqnonlinRKF func,x0,rb[,options] x
rm dft xt Xf
rm idft Xf xt
rm Gx x,type G
Expand Down
6 changes: 3 additions & 3 deletions extra/combo/include/rmvl/combo/armor.h
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ const char *to_string(ArmorSizeType armor_size);
/**
* @brief StateType 转为装甲板大小类型
*
* @param[in] str 字符串
* @param[in] tp StateType 类型
*/
ArmorSizeType to_armor_size_type(const StateType &tp);

Expand All @@ -192,9 +192,9 @@ const char *to_string(RobotType robot);
/**
* @brief StateType 转为机器人类型
*
* @param[in] str 字符串
* @param[in] tp StateType 类型
*/
RobotType to_robot_type(const StateType &type);
RobotType to_robot_type(const StateType &tp);

//! @} combo_armor

Expand Down
16 changes: 8 additions & 8 deletions modules/algorithm/include/rmvl/algorithm/numcal.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ using Func1ds = std::vector<std::function<double(double)>>;
//! 多元函数
using FuncNd = std::function<double(const std::valarray<double> &)>;
//! 多元函数组
using FuncNds = std::vector<std::function<double(const std::valarray<double> &)>>;
using FuncNds = std::function<std::valarray<double>(const std::valarray<double> &)>;

//! 梯度/导数计算模式
enum class DiffMode : uint8_t
Expand Down Expand Up @@ -348,7 +348,7 @@ struct RMVL_EXPORTS_W_AG OptimalOptions
*
* @param[in] func 一元函数
* @param[in] x 指定位置的自变量
* @param[in] mode 导数计算模式,默认为中心差商 `Diff_Central`
* @param[in] mode 导数计算模式,默认为中心差商 `DiffMode::Central`
* @param[in] dx 坐标的微小增量,默认为 `1e-3`
* @return 函数在指定点的导数
*/
Expand All @@ -359,7 +359,7 @@ RMVL_EXPORTS_W double derivative(Func1d func, double x, DiffMode mode = DiffMode
*
* @param[in] func 多元函数
* @param[in] x 指定位置的自变量
* @param[in] mode 梯度计算模式,默认为中心差商 `Diff_Central`
* @param[in] mode 梯度计算模式,默认为中心差商 `DiffMode::Central`
* @param[in] dx 计算偏导数时,坐标的微小增量,默认为 `1e-3`
* @return 函数在指定点的梯度向量
*/
Expand Down Expand Up @@ -410,13 +410,13 @@ RMVL_EXPORTS_W std::pair<std::valarray<double>, double> fmincon(FuncNd func, con
/**
* @brief 非线性最小二乘求解,实现与 \cite Agarwal23 类似的算法
*
* @param[in] funcs 最小二乘目标函数,满足 \f[F(\pmb x_k)=\frac12\|\pmb f(\pmb x_k)\|_2^2=\frac12
* \left(\texttt{funcs}[0]^2+\texttt{funcs}[1]^2+\cdots+\texttt{funcs}[n]^2\right)\f]
* @param[in] func 最小二乘目标函数,满足 \f[F(\pmb x_k)=\frac12\|\pmb f(\pmb x_k)\|_2^2=\frac12
* \left(\texttt{func}[0]^2+\texttt{func}[1]^2+\cdots+\texttt{func}[n]^2\right)\f]
* @param[in] x0 初始点
* @param[in] options 优化选项,可供设置的有 `lsq_mode`、`max_iter`、`tol` 和 `dx`
* @return 最小二乘解
*/
RMVL_EXPORTS_W std::valarray<double> lsqnonlin(const FuncNds &funcs, const std::valarray<double> &x0, const OptimalOptions &options = {});
RMVL_EXPORTS_W std::valarray<double> lsqnonlin(const FuncNds &func, const std::valarray<double> &x0, const OptimalOptions &options = {});

//! Robust 核函数
enum class RobustMode : uint8_t
Expand All @@ -431,13 +431,13 @@ enum class RobustMode : uint8_t
/**
* @brief 带 Robust 核函数的非线性最小二乘求解
*
* @param[in] funcs 最小二乘目标函数,参考 rm::lsqnonlin
* @param[in] func 最小二乘目标函数,参考 rm::lsqnonlin
* @param[in] x0 初始点
* @param[in] rb Robust 核函数模式,参考 rm::RobustMode ,选择 `rm::RobustMode::L2` 时退化为 `rm::lsqnonlin`
* @param[in] options 优化选项,可供设置的有 `lsq_mode`、`max_iter`、`tol` 和 `dx`
* @return 最小二乘解
*/
RMVL_EXPORTS_W std::valarray<double> lsqnonlinRKF(const FuncNds &funcs, const std::valarray<double> &x0, RobustMode rb, const OptimalOptions &options = {});
RMVL_EXPORTS_W std::valarray<double> lsqnonlinRKF(const FuncNds &func, const std::valarray<double> &x0, RobustMode rb, const OptimalOptions &options = {});

//! @} algorithm_optimal

Expand Down
22 changes: 16 additions & 6 deletions modules/algorithm/perf/perf_optimal.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,14 +64,17 @@ static void cg_quadratic_cv(benchmark::State &state)
BENCHMARK(cg_quadratic_rmvl)->Name("fminunc (conj_grad, quadratic) - by rmvl ")->Iterations(50);
BENCHMARK(cg_quadratic_cv)->Name("fminunc (conj_grad, quadratic) - by opencv")->Iterations(50);

static inline double cle1(const std::valarray<double> &x) { return -x[0] - x[1] + 10; }
static inline double cle2(const std::valarray<double> &x) { return 2 * x[0] + x[1] - 30; }
static inline double cle3(const std::valarray<double> &x) { return -x[0] + x[1] - 5; }
static inline std::valarray<double> cle(const std::valarray<double> &x)
{
return {-x[0] - x[1] + 10,
2 * x[0] + x[1] - 30,
-x[0] + x[1] - 5};
}

static void com_quadratic_rmvl(benchmark::State &state)
{
for (auto _ : state)
rm::fmincon(quadraticFunc, {0, 0}, {cle1, cle2, cle3}, {});
rm::fmincon(quadraticFunc, {0, 0}, cle, {});
}

BENCHMARK(com_quadratic_rmvl)->Name("fmincon (conj_grad, quadratic) - by rmvl ")->Iterations(50);
Expand Down Expand Up @@ -135,11 +138,18 @@ void lsqnonlin_rmvl(benchmark::State &state)
{
for (auto _ : state)
{
rm::FuncNds lsq_sine(20);
std::array<rm::FuncNd, 20> lsq_sine;
for (std::size_t i = 0; i < lsq_sine.size(); ++i)
lsq_sine[i] = [=](const std::valarray<double> &x) { return x[0] * std::sin(x[1] * i + x[2]) + x[3] - real_f(i); };

auto x = rm::lsqnonlin(lsq_sine, {1, 0.02, 0, 1.09});
rm::FuncNds lsq_sine_f = [&](const std::valarray<double> &x) {
std::valarray<double> ret(lsq_sine.size());
for (std::size_t i = 0; i < lsq_sine.size(); ++i)
ret[i] = lsq_sine[i](x);
return ret;
};

auto x = rm::lsqnonlin(lsq_sine_f, {1, 0.02, 0, 1.09});
benchmark::DoNotOptimize(x);
}
}
Expand Down
Loading

0 comments on commit 2d93f5c

Please sign in to comment.