Skip to content

Commit

Permalink
Merge pull request #82 from babreufig/feature/scipy-opt
Browse files Browse the repository at this point in the history
SciPy algorithms in Xsuite
  • Loading branch information
giadarol authored Nov 9, 2024
2 parents faf276c + 297231a commit 121e199
Show file tree
Hide file tree
Showing 2 changed files with 166 additions and 1 deletion.
36 changes: 36 additions & 0 deletions examples/optimize/002_scipy_algorithms.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
import xobjects as xo
import xdeps as xd
import numpy as np

def my_function(x):
return [(x[0]-0.0001)**2, (x[1]-0.0003)**2, (x[2]+0.0005)**2, 3.0]

def scalar_func(x):
return np.sum(np.array(my_function(x))**2)

x0 = [0., 0., 0.]
limits = [[-1, 1], [-1, 1], [-1, 1]]
targets = [0., 0., 0., 3.]
steps = [1e-6, 1e-6, 1e-6, 1e-6]
tols = [1e-12, 1e-12, 1e-12, 1e-12]

opt = xd.Optimize.from_callable(my_function, x0=x0, steps=steps, tar=targets, tols=tols, limits=limits)
opt.run_bfgs()
xo.assert_allclose(opt.get_merit_function().get_x(), [0.0001, 0.0003, -0.0005], atol=1e-6, rtol=0)

opt.reload(0)
opt.run_l_bfgs_b()
xo.assert_allclose(opt.get_merit_function().get_x(), [0.0001, 0.0003, -0.0005], atol=1e-6, rtol=0)

opt.reload(0)
opt.run_ls_trf()
xo.assert_allclose(opt.get_merit_function().get_x(), [0.0001, 0.0003, -0.0005], atol=1e-6, rtol=0)

opt.reload(0)
opt.run_ls_dogbox()
xo.assert_allclose(opt.get_merit_function().get_x(), [0.0001, 0.0003, -0.0005], atol=1e-6, rtol=0)

assert 'bfgs' in opt.log()['tag']
assert 'l-bfgs-b' in opt.log()['tag']
assert 'trf' in opt.log()['tag']
assert 'dogbox' in opt.log()['tag']
131 changes: 130 additions & 1 deletion xdeps/optimize/optimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
import logging

import numpy as np
from scipy.optimize import least_squares
from scipy.optimize import minimize
from ..general import _print

from .jacobian import JacobianSolver
Expand Down Expand Up @@ -721,11 +723,138 @@ def from_callable(cls, function, x0, tar, steps=None, tols=None,

return opt


def run_ls_trf(self, n_steps=1000, ftol=1e-12, gtol=None, xtol=1e-12, verbose=0):
"""
Perform the least squares optimization using the Trust Region Reflective algorithm.
Parameters
----------
n_steps : int, optional
Maximum number of steps to perform. Defaults to 1000.
ftol : float, optional
Tolerance for the cost function. Defaults to 1e-12.
gtol : float, optional
Tolerance for the gradient. Defaults to None.
xtol : float, optional
Tolerance for the step. Defaults to 1e-12.
verbose : int, optional
Verbosity level. Defaults to 0.
"""
merit_function = self.get_merit_function(return_scalar=False)
bounds = merit_function.get_x_limits()
res = least_squares(merit_function, merit_function.get_x(), method="trf",
bounds=bounds.T, ftol=ftol, gtol=gtol, xtol=xtol,
jac=merit_function.get_jacobian, max_nfev=n_steps,
verbose=verbose)
merit_function.set_x(res.x)
self.tag('trf')

def run_ls_dogbox(self, n_steps=1000, ftol=1e-12, gtol=None, xtol=1e-12, verbose=0):
"""
Perform the least squares optimization using the Dogbox algorithm.
Parameters
----------
n_steps : int, optional
Maximum number of steps to perform. Defaults to 1000.
ftol : float, optional
Tolerance for the cost function. Defaults to 1e-12.
gtol : float, optional
Tolerance for the gradient. Defaults to None.
xtol : float, optional
Tolerance for the step. Defaults to 1e-12.
verbose : int, optional
Verbosity level. Defaults to 0.
"""

merit_function = self.get_merit_function(return_scalar=False)
bounds = merit_function.get_x_limits()
res = least_squares(merit_function, merit_function.get_x(), method="dogbox",
bounds=bounds.T, ftol=ftol, gtol=gtol, xtol=xtol,
jac=merit_function.get_jacobian, max_nfev=n_steps,
verbose=verbose)
merit_function.set_x(res.x)
self.tag('dogbox')

def run_l_bfgs_b(self, n_steps=1000, ftol=1e-24, gtol=1e-24, disp=False):
"""
Perform the optimization using the L-BFGS-B algorithm.
Parameters
----------
n_steps : int, optional
Maximum number of steps to perform. Defaults to 1000.
ftol : float, optional
Tolerance for the cost function. Defaults to 1e-24.
gtol : float, optional
Tolerance for the gradient. Defaults to 1e-24.
disp : bool, optional
If True, display convergence messages. Defaults to False.
"""

merit_function = self.get_merit_function(return_scalar=True)
bounds = merit_function.get_x_limits()
res = minimize(merit_function, merit_function.get_x(), method='L-BFGS-B',
jac=merit_function.get_jacobian,
bounds=bounds,
options=dict(
maxiter=n_steps,
ftol=ftol,
gtol=gtol,
disp=disp,
))
merit_function.set_x(res.x)
self.tag('l-bfgs-b')

def run_bfgs(self, n_steps=1000, xrtol=1e-10, gtol=1e-18, disp=False):
"""
Perform the optimization using the L-BFGS-B algorithm.
Parameters
----------
n_steps : int, optional
Maximum number of steps to perform. Defaults to 1000.
xrtol : float, optional
Relative tolerance for the step. Defaults to 1e-10.
gtol : float, optional
Tolerance for the gradient. Defaults to 1e-18.
disp : bool, optional
If True, display convergence messages. Defaults to False.
"""

merit_function = self.get_merit_function(return_scalar=True)
res = minimize(merit_function, merit_function.get_x(), method='BFGS',
jac=merit_function.get_jacobian,
options=dict(
maxiter=n_steps,
xrtol=xrtol,
gtol=gtol,
disp=disp,
))
merit_function.set_x(res.x)
self.tag('bfgs')

def run_simplex(self, n_steps=1000, fatol=1e-11, xatol=1e-11,
adaptive=True, disp=False):
"""
Perform the optimization using the Nelder-Mead Simplex algorithm.
Parameters
----------
n_steps : int, optional
Maximum number of steps to perform. Defaults to 1000.
fatol : float, optional
Absolute tolerance for the cost function. Defaults to 1e-11.
xatol : float, optional
Absolute tolerance for the step. Defaults to 1e-11.
adaptive : bool, optional
If True, adapt algorithm parameters to dimensionality of problem. Defaults to True.
disp : bool, optional
If True, display convergence messages. Defaults to False."""

fff = self.get_merit_function(return_scalar=True)
bounds = fff.get_x_limits()
from scipy.optimize import minimize
res = minimize(fff, fff.get_x(), method='Nelder-Mead',
bounds=bounds,
options=dict(
Expand Down

0 comments on commit 121e199

Please sign in to comment.