diff --git a/CHANGELOG.md b/CHANGELOG.md index 854fbfc09f..39ae70677d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ - Deprecated `norms` and `prob` in the `SPDHG` algorithm to be set in the `BlockOperator` and `Sampler` respectively (#1644) - The `run` method in the cil algorithm class will no longer run if a number of iterations is not passed (#1940) - Paganin processor now requires the CIL data order (#1920) + - The gradient descent algorithm now takes `f` instead of `objective_function` to match with ISTA and FISTA (#2006) - Testing - Added a new test file `test_algorithm_convergence` that will hold our algorithm tests that run to convergence (#2019) diff --git a/Wrappers/Python/cil/optimisation/algorithms/GD.py b/Wrappers/Python/cil/optimisation/algorithms/GD.py index 9d3fdbee70..74ccf06a38 100644 --- a/Wrappers/Python/cil/optimisation/algorithms/GD.py +++ b/Wrappers/Python/cil/optimisation/algorithms/GD.py @@ -33,7 +33,7 @@ class GD(Algorithm): ---------- initial: DataContainer (e.g. ImageData) The initial point for the optimisation - objective_function: CIL function (:meth:`~cil.optimisation.functions.Function`. ) with a defined gradient method + f: CIL function (:meth:`~cil.optimisation.functions.Function`. ) with a defined gradient method The function to be minimised. step_size: positive real float or subclass of :meth:`~cil.optimisation.utilities.StepSizeRule`, default = None If you pass a float this will be used as a constant step size. If left as None and do not pass a step_size_rule then the Armijio rule will be used to perform backtracking to choose a step size at each iteration. If a child class of :meth:`cil.optimisation.utilities.StepSizeRule`' is passed then it's method `get_step_size` is called for each update. @@ -47,12 +47,19 @@ class GD(Algorithm): """ - def __init__(self, initial=None, objective_function=None, step_size=None, rtol=1e-5, atol=1e-8, preconditioner=None, **kwargs): + def __init__(self, initial=None, f=None, step_size=None, rtol=1e-5, atol=1e-8, preconditioner=None, **kwargs): '''GD algorithm creator ''' self.alpha = kwargs.pop('alpha', None) self.beta = kwargs.pop('beta', None) + + if kwargs.get('objective_function') is not None: + warn('The argument `objective_function` will be deprecated in the future. Please use `f` instead.', DeprecationWarning, stacklevel=2) + if f is not None: + raise ValueError('The argument `objective_function` is being deprecated, replaced by `f`. Please use just `f` not both') + f = kwargs.pop('objective_function') + super().__init__(**kwargs) @@ -61,18 +68,18 @@ def __init__(self, initial=None, objective_function=None, step_size=None, rtol=1 self.rtol = rtol self.atol = atol - if initial is not None and objective_function is not None: - self.set_up(initial=initial, objective_function=objective_function, + if initial is not None and f is not None: + self.set_up(initial=initial, f=f, step_size=step_size, preconditioner=preconditioner) - def set_up(self, initial, objective_function, step_size, preconditioner): + def set_up(self, initial, f, step_size, preconditioner): '''initialisation of the algorithm Parameters ---------- initial: DataContainer (e.g. ImageData) The initial point for the optimisation - objective_function: CIL function with a defined gradient + f: CIL function with a defined gradient The function to be minimised. step_size: positive real float or subclass of :meth:`~cil.optimisation.utilities.StepSizeRule`, default = None If you pass a float this will be used as a constant step size. If left as None and do not pass a step_size_rule then the Armijio rule will be used to perform backtracking to choose a step size at each iteration. If a child class of :meth:`cil.optimisation.utilities.StepSizeRule`' is passed then it's method `get_step_size` is called for each update. @@ -84,7 +91,7 @@ def set_up(self, initial, objective_function, step_size, preconditioner): log.info("%s setting up", self.__class__.__name__) self.x = initial.copy() - self._objective_function = objective_function + self._objective_function = f if step_size is None: self.step_size_rule = ArmijoStepSizeRule( diff --git a/Wrappers/Python/test/test_algorithms.py b/Wrappers/Python/test/test_algorithms.py index 17c443fa0e..19c081c134 100644 --- a/Wrappers/Python/test/test_algorithms.py +++ b/Wrappers/Python/test/test_algorithms.py @@ -88,12 +88,12 @@ def test_GD(self): step_size = norm2sq.L / 3. - alg = GD(initial=initial, objective_function=norm2sq, step_size=step_size, + alg = GD(initial=initial, f=norm2sq, step_size=step_size, atol=1e-9, rtol=1e-6) alg.run(1000, verbose=0) self.assertNumpyArrayAlmostEqual(alg.x.as_array(), b.as_array()) - alg = GD(initial=initial, objective_function=norm2sq, step_size=step_size, + alg = GD(initial=initial, f=norm2sq, step_size=step_size, atol=1e-9, rtol=1e-6, update_objective_interval=2) self.assertTrue(alg.update_objective_interval == 2) alg.run(20, verbose=0) @@ -114,7 +114,7 @@ def test_update_interval_0(self): identity = IdentityOperator(ig) norm2sq = LeastSquares(identity, b) alg = GD(initial=initial, - objective_function=norm2sq, + f=norm2sq, update_objective_interval=0, atol=1e-9, rtol=1e-6) self.assertTrue(alg.update_objective_interval == 0) @@ -124,11 +124,11 @@ def test_update_interval_0(self): self.assertNumpyArrayAlmostEqual(alg.x.as_array(), b.as_array()) def test_gd_step_size_init(self): - gd = GD(initial=self.initial, objective_function=self.f, step_size=0.002) + gd = GD(initial=self.initial, f=self.f, step_size=0.002) self.assertEqual(gd.step_size_rule.step_size, 0.002) self.assertEqual(gd.step_size, 0.002) - gd = GD(initial=self.initial, objective_function=self.f) + gd = GD(initial=self.initial, f=self.f) self.assertEqual(gd.step_size_rule.alpha_orig, 1e6) self.assertEqual(gd.step_size_rule.beta, 0.5) self.assertEqual(gd.step_size_rule.max_iterations, np.ceil( @@ -137,27 +137,27 @@ def test_gd_step_size_init(self): gd.step_size gd = GD(initial=self.initial, - objective_function=self.f, alpha=1e2, beta=0.25) + f=self.f, alpha=1e2, beta=0.25) self.assertEqual(gd.step_size_rule.alpha_orig, 1e2) self.assertEqual(gd.step_size_rule.beta, 0.25) self.assertEqual(gd.step_size_rule.max_iterations, np.ceil( 2 * np.log10(1e2) / np.log10(2))) with self.assertRaises(TypeError): - gd = GD(initial=self.initial, objective_function=self.f, + gd = GD(initial=self.initial, f=self.f, step_size=0.1, step_size_rule=ConstantStepSize(0.5)) def test_gd_constant_step_size_init(self): rule = ConstantStepSize(0.4) self.assertEqual(rule.step_size, 0.4) gd = GD(initial=self.initial, - objective_function=self.f, step_size=rule) + f=self.f, step_size=rule) self.assertEqual(gd.step_size_rule.step_size, 0.4) self.assertEqual(gd.step_size, 0.4) def test_gd_fixed_step_size_rosen(self): - gd = GD(initial=self.initial, objective_function=self.f, step_size=0.002, + gd = GD(initial=self.initial, f=self.f, step_size=0.002, update_objective_interval=500) gd.run(3000, verbose=0) np.testing.assert_allclose( @@ -174,7 +174,7 @@ def test_armijo_step_size_init(self): 2 * np.log10(1e6) / np.log10(2))) gd = GD(initial=self.initial, - objective_function=self.f, step_size=rule) + f=self.f, step_size=rule) self.assertEqual(gd.step_size_rule.alpha_orig, 1e6) self.assertEqual(gd.step_size_rule.beta, 0.5) self.assertEqual(gd.step_size_rule.max_iterations, np.ceil( @@ -186,7 +186,7 @@ def test_armijo_step_size_init(self): self.assertEqual(rule.max_iterations, 5) gd = GD(initial=self.initial, - objective_function=self.f, step_size=rule) + f=self.f, step_size=rule) self.assertEqual(gd.step_size_rule.alpha_orig, 5e5) self.assertEqual(gd.step_size_rule.beta, 0.2) self.assertEqual(gd.step_size_rule.max_iterations, 5) @@ -205,18 +205,22 @@ def test_GDArmijo(self): norm2sq = LeastSquares(identity, b) - alg = GD(initial=initial, objective_function=norm2sq) + alg = GD(initial=initial, f=norm2sq) alg.run(100, verbose=0) self.assertNumpyArrayAlmostEqual(alg.x.as_array(), b.as_array()) - alg = GD(initial=initial, objective_function=norm2sq, + alg = GD(initial=initial, f=norm2sq, update_objective_interval=2) + self.assertTrue(alg.update_objective_interval==2) + + alg = GD(initial=initial, f=norm2sq, update_objective_interval=2) self.assertTrue(alg.update_objective_interval == 2) + alg.run(20, verbose=0) self.assertNumpyArrayAlmostEqual(alg.x.as_array(), b.as_array()) def test_gd_armijo_rosen(self): armj = ArmijoStepSizeRule(alpha=50, max_iterations=50, warmstart=False) - gd = GD(initial=self.initial, objective_function=self.f, step_size=armj, + gd = GD(initial=self.initial, f=self.f, step_size=armj, update_objective_interval=500) gd.run(3500, verbose=0) np.testing.assert_allclose( @@ -225,12 +229,12 @@ def test_gd_armijo_rosen(self): gd.solution.array[1], self.scipy_opt_high.x[1], atol=1e-2) def test_gd_run_no_iterations(self): - gd = GD(initial=self.initial, objective_function=self.f, step_size=0.002) + gd = GD(initial=self.initial, f=self.f, step_size=0.002) with self.assertRaises(ValueError): gd.run() def test_gd_run_infinite(self): - gd = GD(initial=self.initial, objective_function=self.f, step_size=0.002) + gd = GD(initial=self.initial, f=self.f, step_size=0.002) with self.assertRaises(ValueError): gd.run(np.inf) diff --git a/Wrappers/Python/test/test_approximate_gradient.py b/Wrappers/Python/test/test_approximate_gradient.py index 86a963910d..934db489e7 100644 --- a/Wrappers/Python/test/test_approximate_gradient.py +++ b/Wrappers/Python/test/test_approximate_gradient.py @@ -159,7 +159,7 @@ def test_toy_example(self): stochastic_objective = self.stochastic_estimator(functions, sampler) alg_stochastic = GD(initial=initial, - objective_function=stochastic_objective, update_objective_interval=1000, + f=stochastic_objective, update_objective_interval=1000, step_size=0.05) alg_stochastic.run(600, verbose=0) @@ -246,7 +246,7 @@ def test_SAG_toy_example_warm_start(self): stochastic_objective.warm_start_approximate_gradients(initial) alg_stochastic = GD(initial=initial, - objective_function=stochastic_objective, update_objective_interval=1000, + f=stochastic_objective, update_objective_interval=1000, step_size=0.05, max_iteration =5000) alg_stochastic.run( 80, verbose=0) np.testing.assert_allclose(p.value ,stochastic_objective(alg_stochastic.x) , atol=1e-1) @@ -307,7 +307,7 @@ def test_SAGA_toy_example_warm_start(self): stochastic_objective.warm_start_approximate_gradients(initial) alg_stochastic = GD(initial=initial, - objective_function=stochastic_objective, update_objective_interval=1000, + f=stochastic_objective, update_objective_interval=1000, step_size=0.05, max_iteration =5000) alg_stochastic.run( 100, verbose=0) np.testing.assert_allclose(p.value ,stochastic_objective(alg_stochastic.x) , atol=1e-1) @@ -343,7 +343,7 @@ def test_SVRG_snapshot_update_interval_and_data_passes(self): sampler = Sampler.random_with_replacement(self.n_subsets) objective = SVRGFunction(self.f_subsets, sampler) alg_stochastic = GD(initial=self.initial, - objective_function=objective, update_objective_interval=500, + f=objective, update_objective_interval=500, step_size=5e-8, max_iteration=5000) alg_stochastic.run(2, verbose=0) self.assertNumpyArrayAlmostEqual( @@ -366,7 +366,7 @@ def test_SVRG_snapshot_update_interval_and_data_passes(self): objective = SVRGFunction( self.f_subsets, self.sampler, snapshot_update_interval=3) alg_stochastic = GD(initial=self.initial, - objective_function=objective, update_objective_interval=500, + f=objective, update_objective_interval=500, step_size=5e-8, max_iteration=5000) alg_stochastic.run(2, verbose=0) self.assertNumpyArrayAlmostEqual( @@ -418,7 +418,7 @@ def test_SVRG_toy_example_store_gradients(self): stochastic_objective = SVRGFunction(functions, sampler, store_gradients=True) alg_stochastic = GD(initial=initial, - objective_function=stochastic_objective, update_objective_interval=1000, + f=stochastic_objective, update_objective_interval=1000, step_size=1/stochastic_objective.L) alg_stochastic.run(10, verbose=0) @@ -463,7 +463,7 @@ def test_LSVRG_init(self): def test_LSVRG_data_passes_and_snapshot_update_probability_and_seed(self): objective = LSVRGFunction(self.f_subsets, self.sampler, snapshot_update_probability=1) alg_stochastic = GD(initial=self.initial, update_objective_interval=500, - objective_function=objective, step_size=5e-8, max_iteration=5000) + f=objective, step_size=5e-8, max_iteration=5000) alg_stochastic.run(2, verbose=0) self.assertNumpyArrayAlmostEqual( np.array(objective.data_passes), np.array([1., 2,])) @@ -474,7 +474,7 @@ def test_LSVRG_data_passes_and_snapshot_update_probability_and_seed(self): self.assertNumpyArrayAlmostEqual(np.array(objective.data_passes_indices[-1]), np.array(list(range(self.n_subsets)))) objective = LSVRGFunction(self.f_subsets, self.sampler, seed=3) alg_stochastic = GD(initial=self.initial, - objective_function=objective, update_objective_interval=500, + f=objective, update_objective_interval=500, step_size=5e-8, max_iteration=5000) alg_stochastic.run(10, verbose=0) self.assertNumpyArrayAlmostEqual(np.array(objective.data_passes), @@ -522,7 +522,7 @@ def test_LSVRG_toy_example_store_gradients(self): stochastic_objective = LSVRGFunction(functions, sampler, store_gradients=True) alg_stochastic = GD(initial=initial, - objective_function=stochastic_objective, update_objective_interval=1000, + f=stochastic_objective, update_objective_interval=1000, step_size=1/stochastic_objective.L) diff --git a/Wrappers/Python/test/test_preconditioners.py b/Wrappers/Python/test/test_preconditioners.py index 207a92f1ae..38de5f066f 100644 --- a/Wrappers/Python/test/test_preconditioners.py +++ b/Wrappers/Python/test/test_preconditioners.py @@ -20,7 +20,7 @@ def test_preconditioner_called(self): A = IdentityOperator(ig) test_precon = MagicMock(None) f = LeastSquares(A=A, b=data, c=0.5) - alg = GD(initial=ig.allocate('random', seed=10), objective_function=f, preconditioner=test_precon, + alg = GD(initial=ig.allocate('random', seed=10), f=f, preconditioner=test_precon, max_iteration=100, update_objective_interval=1, step_size=0.0000001) alg.run(5) self.assertEqual(len(test_precon.apply.mock_calls), 5) @@ -63,7 +63,7 @@ def test_sensitivity_calculation(self): 2., 2., 2., 2., 0, 0, 0, 0, 0, 0])) f = LeastSquares(A, data) - alg = GD(initial=ig.allocate(0), objective_function=f, + alg = GD(initial=ig.allocate(0), f=f, max_iteration=100, update_objective_interval=1, step_size=1.) alg.gradient_update = ig.allocate(2) preconditioner.apply(alg, alg.gradient_update, out=alg.gradient_update) @@ -84,17 +84,17 @@ def test_sensitivity_gd_against_sirt(self): step_size = 1. preconditioner = Sensitivity(A) - alg = GD(initial=ig.allocate(0), objective_function=f, + alg = GD(initial=ig.allocate(0), f=f, max_iteration=100, update_objective_interval=1, step_size=step_size) self.assertEqual(alg.preconditioner, None) - precond_pwls = GD(initial=ig.allocate(0), objective_function=f, preconditioner=preconditioner, + precond_pwls = GD(initial=ig.allocate(0), f=f, preconditioner=preconditioner, max_iteration=100, update_objective_interval=1, step_size=step_size) def correct_update_objective(alg): # SIRT computes |Ax_{k} - b|_2^2 # GD with weighted LeastSquares computes the objective included the weight, so we remove the weight - return 0.5*(alg.objective_function.A.direct(alg.x) - alg.objective_function.b).squared_norm() + return 0.5*(alg._objective_function.A.direct(alg.x) - alg._objective_function.b).squared_norm() precond_pwls.run(10) np.testing.assert_allclose( @@ -122,13 +122,13 @@ def test_sensitivity_ista_against_sirt(self): max_iteration=100, update_objective_interval=1, step_size=step_size) self.assertEqual(alg.preconditioner, None) - precond_pwls = GD(initial=ig.allocate(0), objective_function=f, preconditioner=preconditioner, + precond_pwls = GD(initial=ig.allocate(0), f=f, preconditioner=preconditioner, max_iteration=100, update_objective_interval=1, step_size=step_size) def correct_update_objective(alg): # SIRT computes |Ax_{k} - b|_2^2 # GD with weighted LeastSquares computes the objective included the weight, so we remove the weight - return 0.5*(alg.objective_function.A.direct(alg.x) - alg.objective_function.b).squared_norm() + return 0.5*(alg._objective_function.A.direct(alg.x) - alg._objective_function.b).squared_norm() precond_pwls.run(10) np.testing.assert_allclose( @@ -185,7 +185,7 @@ def test_adaptive_sensitivity_calculations(self): 2., 2., 2., 2., 0, 0, 0, 0, 0, 0])) f = LeastSquares(A, data) - alg = GD(initial=ig.allocate(0), objective_function=f, + alg = GD(initial=ig.allocate(0), f=f, max_iteration=100, update_objective_interval=1, step_size=1.) alg.gradient_update = ig.allocate(1) preconditioner.apply(alg, alg.gradient_update, out=alg.gradient_update) @@ -242,7 +242,7 @@ def test_adaptive_sensitivity_gd_converges(self): step_size = 1. preconditioner = AdaptiveSensitivity(A, max_iterations=3000, delta=1e-8) - precond_pwls = GD(initial=initial, objective_function=f, + precond_pwls = GD(initial=initial, f=f, preconditioner=preconditioner, update_objective_interval=1, step_size=step_size) precond_pwls.run(3000) diff --git a/Wrappers/Python/test/test_stepsizes.py b/Wrappers/Python/test/test_stepsizes.py index 1ef0fc145c..221a1dc448 100644 --- a/Wrappers/Python/test/test_stepsizes.py +++ b/Wrappers/Python/test/test_stepsizes.py @@ -20,7 +20,7 @@ def test_step_sizes_called(self): step_size_test = ConstantStepSize(3) step_size_test.get_step_size = MagicMock(return_value=.1) f = LeastSquares(A=A, b=data, c=0.5) - alg = GD(initial=ig.allocate('random', seed=10), objective_function=f, step_size=step_size_test, + alg = GD(initial=ig.allocate('random', seed=10), f=f, step_size=step_size_test, update_objective_interval=1) alg.run(5) @@ -73,7 +73,7 @@ def test_armijo_init(self): def test_armijo_calculation(self): test_stepsize = ArmijoStepSizeRule(alpha=8, beta=0.5, max_iterations=100, warmstart=False) - alg = GD(initial=self.ig.allocate(0), objective_function=self.f, + alg = GD(initial=self.ig.allocate(0), f=self.f, update_objective_interval=1, step_size=test_stepsize) alg.gradient_update = self.ig.allocate(-1) step_size = test_stepsize.get_step_size(alg) @@ -123,7 +123,7 @@ def test_warmstart_true(self): rule = ArmijoStepSizeRule(warmstart=True, alpha=5000) self.assertTrue(rule.warmstart) self.assertTrue(rule.alpha_orig == 5000) - alg = GD(initial=self.ig.allocate(0), objective_function=self.f, + alg = GD(initial=self.ig.allocate(0), f=self.f, update_objective_interval=1, step_size=rule) alg.update() self.assertFalse(rule.alpha == 5000) @@ -132,7 +132,7 @@ def test_warmstart_false(self): rule = ArmijoStepSizeRule(warmstart=False, alpha=5000) self.assertFalse(rule.warmstart) self.assertTrue(rule.alpha_orig == 5000) - alg = GD(initial=self.ig.allocate(0), objective_function=self.f, + alg = GD(initial=self.ig.allocate(0), f=self.f, update_objective_interval=1, step_size=rule) alg.update() self.assertTrue(rule.alpha_orig == 5000) @@ -173,7 +173,7 @@ def test_bb(self): ss_rule=BarzilaiBorweinStepSizeRule(2, 'long',3 ) self.assertEqual(ss_rule.mode, 'long') self.assertFalse(ss_rule.adaptive) - alg = GD(initial=initial, objective_function=f, step_size=ss_rule) + alg = GD(initial=initial, f=f, step_size=ss_rule) self.assertEqual(ss_rule.stabilisation_param,3) alg.run(2) self.assertEqual(ss_rule.stabilisation_param,3) @@ -183,7 +183,7 @@ def test_bb(self): self.assertEqual(ss_rule.mode, 'short') self.assertFalse(ss_rule.adaptive) self.assertEqual(ss_rule.stabilisation_param,np.inf) - alg = GD(initial=initial, objective_function=f, step_size=ss_rule) + alg = GD(initial=initial, f=f, step_size=ss_rule) alg.run(2) n = 5 @@ -198,7 +198,7 @@ def test_bb(self): initial = ig.allocate(0) f = LeastSquares(Aop, b=bop, c=0.5) ss_rule=BarzilaiBorweinStepSizeRule(0.22, 'long',np.inf ) - alg = GD(initial=initial, objective_function=f, step_size=ss_rule) + alg = GD(initial=initial, f=f, step_size=ss_rule) self.assertFalse(ss_rule.is_short) #Check the initial step size was used alg.run(1) @@ -213,7 +213,7 @@ def test_bb(self): self.assertFalse(ss_rule.is_short) ss_rule=BarzilaiBorweinStepSizeRule(0.22, 'short',np.inf ) - alg = GD(initial=initial, objective_function=f, step_size=ss_rule) + alg = GD(initial=initial, f=f, step_size=ss_rule) self.assertTrue(ss_rule.is_short) #Check the initial step size was used alg.run(1) @@ -229,13 +229,13 @@ def test_bb(self): #check stop iteration ss_rule=BarzilaiBorweinStepSizeRule(1, 'long',np.inf ) - alg = GD(initial=initial, objective_function=f, step_size=ss_rule) + alg = GD(initial=initial, f=f, step_size=ss_rule) alg.run(500) self.assertEqual(alg.iteration, 1) #check adaptive ss_rule=BarzilaiBorweinStepSizeRule(0.001, 'long',"auto") - alg = GD(initial=initial, objective_function=f, step_size=ss_rule) + alg = GD(initial=initial, f=f, step_size=ss_rule) self.assertEqual(ss_rule.stabilisation_param, np.inf) alg.run(2) self.assertNotEqual(ss_rule.stabilisation_param, np.inf) @@ -243,7 +243,7 @@ def test_bb(self): #check stops being adaptive ss_rule=BarzilaiBorweinStepSizeRule(0.0000001, 'long',"auto" ) - alg = GD(initial=initial, objective_function=f, step_size=ss_rule) + alg = GD(initial=initial, f=f, step_size=ss_rule) self.assertEqual(ss_rule.stabilisation_param, np.inf) alg.run(4) self.assertNotEqual(ss_rule.stabilisation_param, np.inf) @@ -253,7 +253,7 @@ def test_bb(self): #Test alternating ss_rule=BarzilaiBorweinStepSizeRule(0.0000001, 'alternate',"auto" ) - alg = GD(initial=initial, objective_function=f, step_size=ss_rule) + alg = GD(initial=initial, f=f, step_size=ss_rule) self.assertFalse(ss_rule.is_short) alg.run(2) self.assertTrue(ss_rule.is_short) @@ -280,25 +280,25 @@ def test_bb_converge(self): f = LeastSquares(Aop, b=bop, c=2) ss_rule=ArmijoStepSizeRule(max_iterations=40, warmstart=False) - alg_true = GD(initial=initial, objective_function=f, step_size=ss_rule) + alg_true = GD(initial=initial, f=f, step_size=ss_rule) alg_true .run(300, verbose=0) ss_rule=BarzilaiBorweinStepSizeRule(1/f.L, 'short') - alg = GD(initial=initial, objective_function=f, step_size=ss_rule) + alg = GD(initial=initial, f=f, step_size=ss_rule) alg.run(80, verbose=0) self.assertNumpyArrayAlmostEqual(alg.x.as_array(), alg_true.x.as_array(), decimal=3) ss_rule=BarzilaiBorweinStepSizeRule(1/f.L, 'long') - alg = GD(initial=initial, objective_function=f, step_size=ss_rule) + alg = GD(initial=initial, f=f, step_size=ss_rule) alg.run(80, verbose=0) self.assertNumpyArrayAlmostEqual(alg.x.as_array(), alg_true.x.as_array(), decimal=3) ss_rule=BarzilaiBorweinStepSizeRule(1/f.L, 'alternate') - alg = GD(initial=initial, objective_function=f, step_size=ss_rule) + alg = GD(initial=initial, f=f, step_size=ss_rule) alg.run(80, verbose=0) self.assertNumpyArrayAlmostEqual(alg.x.as_array(), alg_true.x.as_array(), decimal=3)