diff --git a/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl b/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl index 1ba8456d2..dcf187d45 100644 --- a/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl +++ b/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl @@ -95,7 +95,7 @@ function SciMLBase.__init(prob::OptimizationProblem, if prob.f isa OptimizationFunction && (!(prob.f.adtype isa SciMLBase.NoAD) || !isnothing(prob.f.grad)) opt = Optim.Fminbox(opt) else - throw(ArgumentError("Fminbox($opt) requires gradients, since you didn't use `OptimizationFunction` with a valid AD backend https://docs.sciml.ai/Optimization/stable/API/ad/ the lower and upper bounds thus will be ignored.")) + throw(ArgumentError("Fminbox($opt) requires gradients, use `OptimizationFunction` either with a valid AD backend https://docs.sciml.ai/Optimization/stable/API/ad/ or a provided 'grad' function.")) end end end diff --git a/lib/OptimizationOptimJL/test/runtests.jl b/lib/OptimizationOptimJL/test/runtests.jl index 138519700..4dc4aac66 100644 --- a/lib/OptimizationOptimJL/test/runtests.jl +++ b/lib/OptimizationOptimJL/test/runtests.jl @@ -174,6 +174,30 @@ end sol = solve(prob, BFGS()) @test 10 * sol.objective < l1 + function rosenbrock_grad!(dx, x, p) + dx[1] = -2*(p[1] - x[1]) -4 * p[2] * (x[2] - x[1]^2)*x[1] + dx[2]= 2*p[2]*(x[2]-x[1]^2) + return nothing + end + + # https://github.com/SciML/Optimization.jl/issues/754 Optim.BFGS() with explicit gradient function + optprob = OptimizationFunction(rosenbrock; grad=rosenbrock_grad!) + prob = OptimizationProblem(optprob, x0, _p) + @test (sol = solve(prob, Optim.BFGS())) isa Any # test exception not thrown + @test 10 * sol.objective < l1 + + # https://github.com/SciML/Optimization.jl/issues/754 Optim.BFGS() with bounds and explicit gradient function + optprob = OptimizationFunction(rosenbrock; grad=rosenbrock_grad!) + prob = OptimizationProblem(optprob, x0, _p; lb = [-1.0, -1.0], ub = [0.8, 0.8]) + @test (sol = solve(prob, Optim.BFGS())) isa Any # test exception not thrown + @test 10 * sol.objective < l1 + + # test that Optim.BFGS() with bounds but no AD or user-supplied gradient fails + optprob = OptimizationFunction(rosenbrock, SciMLBase.NoAD()) + prob = OptimizationProblem(optprob, x0, _p; lb = [-1.0, -1.0], ub = [0.8, 0.8]) + @test_throws ArgumentError (sol = solve(prob, Optim.BFGS())) isa Any # test exception is thrown + @test 10 * sol.objective < l1 + @testset "cache" begin objective(x, p) = (p[1] - x[1])^2 x0 = zeros(1)