Skip to content

Commit

Permalink
Allow Vector{BigFloat} for parameters (#168)
Browse files Browse the repository at this point in the history
* Allow Vector{BigFloat} for parameters

When attempting to pass initial parameter configurations `p0` of type `Vector{BigFloat}` to `curve_fit`, one receives the error `UndefRefError: access to undefined reference`. The reason for this lies in the use of `similar`: 

For other types (e.g. `Vector{Float64}`) the method `similar(p0)` creates a new vector of the same type whose components are initialized with random values. In constrast, when applied to `Vector{BigFloat}`, a new vector of this type is created, however, its components are not yet initialized, i.e. they all have the value `#undef`! Therefore, when other methods from `NLSolversBase` attempt to access this newly created object further down the pipeline, one gets the error I described.

Ultimately, I think this should be fixed in `Base.similar` but for now, changing `similar` to `copy` (which essentially achieves the same goal) solves the problem. I believe that allowing for arbitrary precision is an important feature of any optimization library. Since my own package also relies on **LsqFit.jl** for optimization, I would be grateful if this issue could be fixed soon - either as proposed here or in some other way.

* Added BigFloat test for curve_fit()

* Update test/curve_fit.jl

Co-authored-by: Patrick Kofod Mogensen <[email protected]>
  • Loading branch information
RafaelArutjunjan and pkofod authored Dec 17, 2020
1 parent 3b693c5 commit 2d3611f
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 8 deletions.
10 changes: 5 additions & 5 deletions src/curve_fit.jl
Original file line number Diff line number Diff line change
Expand Up @@ -27,19 +27,19 @@ end
# provide a method for those who have their own Jacobian function
function lmfit(f, g, p0::AbstractArray, wt::AbstractArray; kwargs...)
r = f(p0)
R = OnceDifferentiable(f, g, p0, similar(r); inplace=false)
R = OnceDifferentiable(f, g, p0, copy(r); inplace=false)
lmfit(R, p0, wt; kwargs...)
end

#for inplace f and inplace g
function lmfit(f!, g!, p0::AbstractArray, wt::AbstractArray, r::AbstractArray; kwargs...)
R = OnceDifferentiable(f!, g!, p0, similar(r); inplace = true)
R = OnceDifferentiable(f!, g!, p0, copy(r); inplace = true)
lmfit(R, p0, wt; kwargs...)
end

#for inplace f only
function lmfit(f, p0::AbstractArray, wt::AbstractArray, r::AbstractArray; autodiff = :finite, kwargs...)
R = OnceDifferentiable(f, p0, similar(r); inplace = true, autodiff = autodiff)
R = OnceDifferentiable(f, p0, copy(r); inplace = true, autodiff = autodiff)
lmfit(R, p0, wt; kwargs...)
end

Expand All @@ -60,7 +60,7 @@ function lmfit(f, p0::AbstractArray, wt::AbstractArray; autodiff = :finite, kwar
# construct Jacobian function, which uses finite difference method
r = f(p0)
autodiff = autodiff == :forwarddiff ? :forward : autodiff
R = OnceDifferentiable(f, p0, similar(r); inplace = false, autodiff = autodiff)
R = OnceDifferentiable(f, p0, copy(r); inplace = false, autodiff = autodiff)
lmfit(R, p0, wt; kwargs...)
end

Expand Down Expand Up @@ -125,7 +125,7 @@ function curve_fit(model, jacobian_model,
if inplace
f! = (F,p) -> (model(F,xdata,p); @. F = F - ydata)
g! = (G,p) -> jacobian_model(G, xdata, p)
lmfit(f!, g!, p0, T[], similar(ydata); kwargs...)
lmfit(f!, g!, p0, T[], copy(ydata); kwargs...)
else
f = (p) -> model(xdata, p) - ydata
g = (p) -> jacobian_model(xdata, p)
Expand Down
11 changes: 8 additions & 3 deletions test/curve_fit.jl
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ let
# to supply your own jacobian instead of using the finite difference
function jacobian_model(x,p)
J = Array{Float64}(undef, length(x), length(p))
J[:,1] = exp.(-x.*p[2]) #dmodel/dp[1]
J[:,1] = exp.(-x.*p[2]) #dmodel/dp[1]
J[:,2] = -x.*p[1].*J[:,1] #dmodel/dp[2]
J
end
Expand All @@ -53,11 +53,16 @@ let
@assert norm(errors - [0.017, 0.075]) < 0.1

# test with user-supplied jacobian and weights
fit = curve_fit(model, jacobian_model, xdata, ydata, 1 ./ yvars, [0.5, 0.5])
fit = curve_fit(model, jacobian_model, xdata, ydata, 1 ./ yvars, p0)
println("norm(fit.param - [1.0, 2.0]) < 0.05 ? ", norm(fit.param - [1.0, 2.0]))
@assert norm(fit.param - [1.0, 2.0]) < 0.05
@test fit.converged


# Parameters can also be inferred using arbitrary precision
fit = curve_fit(model, xdata, ydata, 1 ./ yvars, BigFloat.(p0); x_tol=1e-20, g_tol=1e-20)
@test fit.converged
fit = curve_fit(model, jacobian_model, xdata, ydata, 1 ./ yvars, BigFloat.(p0); x_tol=1e-20, g_tol=1e-20)
@test fit.converged

curve_fit(model, jacobian_model, xdata, ydata, 1 ./ yvars, [0.5, 0.5]; tau=0.0001)
end

0 comments on commit 2d3611f

Please sign in to comment.