From 280a43d71eb93e79afbed3f13ae30736edda5d38 Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Mon, 25 Mar 2024 19:47:33 -0500 Subject: [PATCH 01/24] Update dae_solve.jl Added in StochasticTraining strategy for generate_loss function --- src/dae_solve.jl | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/dae_solve.jl b/src/dae_solve.jl index 0c9d1323de..d9276b9d96 100644 --- a/src/dae_solve.jl +++ b/src/dae_solve.jl @@ -82,6 +82,16 @@ function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, return loss end +function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tspan, p, differential_vars::AbstractVector) + autodiff && throw(ArgumentError("autodiff not supported for StochasticTraining.")) + function loss(θ, _) + ts = adapt(parameterless_type(θ), + [(tspan[2] - tspan[1]) * rand() + tspan[1] for i in 1:(strategy.points)]) + sum(abs2, inner_loss(phi, f, autodiff, ts, θ, p, differential_vars)) + end + return loss +end + function DiffEqBase.__solve(prob::DiffEqBase.AbstractDAEProblem, alg::NNDAE, args...; From 911b3a3c1f7febbeaa5a2bb6376fb1b80b5b25f1 Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Mon, 25 Mar 2024 22:39:55 -0500 Subject: [PATCH 02/24] Update dae_solve.jl Added WeightedIntervalTraining --- src/dae_solve.jl | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/src/dae_solve.jl b/src/dae_solve.jl index d9276b9d96..3e3a2bb7f8 100644 --- a/src/dae_solve.jl +++ b/src/dae_solve.jl @@ -92,6 +92,33 @@ function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tsp return loss end + +function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Bool, tspan, p,differential_vars::AbstractVector) +autodiff && throw(ArgumentError("autodiff not supported for WeightedIntervalTraining.")) +minT = tspan[1] +maxT = tspan[2] + +weights = strategy.weights ./ sum(strategy.weights) + +N = length(weights) +points = strategy.points + +difference = (maxT - minT) / N + +data = Float64[] +for (index, item) in enumerate(weights) + temp_data = rand(1, trunc(Int, points * item)) .* difference .+ minT .+ + ((index - 1) * difference) + data = append!(data, temp_data) +end + +ts = data +function loss(θ, _) + sum(abs2, inner_loss(phi, f, autodiff, ts, θ, p, differential_vars)) +end +return loss +end + function DiffEqBase.__solve(prob::DiffEqBase.AbstractDAEProblem, alg::NNDAE, args...; From 9d98ae1d730fcb96143355fa6483c0a8b2f06753 Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Wed, 27 Mar 2024 23:21:06 -0500 Subject: [PATCH 03/24] Update NNDAE_tests.jl I added the test function for weighted interval training --- test/NNDAE_tests.jl | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/test/NNDAE_tests.jl b/test/NNDAE_tests.jl index f0f2699127..05c55baa73 100644 --- a/test/NNDAE_tests.jl +++ b/test/NNDAE_tests.jl @@ -62,3 +62,40 @@ end @test ground_sol(0:(1 / 100):(pi / 2))≈sol atol=0.4 end + +@testset "WeightedIntervalTraining" begin + println("WeightedIntervalTraining") + function f(out, du, u, p, t) + out[1] = du[1] -u[2] + out[2] = u[1] + u[2] + end + p = [] + u0 = [1.0/4.0, 1.0/4.0] + tspan = (0.0, 100000.0) + differential_vars = [true, false] + prob = DAEProblem(f, u0, tspan, differential_vars = differential_vars) + true_out_1(t) = exp(-t)/4.0 + true_out_2(t) = -1.0 * exp(-t)/4.0 + func = Lux.σ + N = 12 + chain = Lux.Chain(Lux.Dense(1, N, func), Lux.Dense(N, N, func), Lux.Dense(N, N, func), + Lux.Dense(N, N, func), Lux.Dense(N, length(u0))) + opt = OptimizationOptimisers.Adam(0.01) + weights = [0.7, 0.2, 0.1] + points = 200 + alg = NNDAE(chain, opt,init_params = nothing; autodiff = false, + strategy = NeuralPDE.WeightedIntervalTraining(weights, points)) + sol = solve(prob, alg, verbose = false, maxiters = 5000, saveat = 0.01) + #@test abs(mean(sol) - mean(true_sol)) < 0.2 + """Sol would have 2 outputs: one for u[1] and the other for u[2] so just I need compute the total error for all t in tspan """ + total_error = 0 + for i in tspan: + total_error = total_error + abs(sol(i) - [true_out_1(i) true_out_2(i)]) + end + if total_error < 0.01: + print("It works!") + else: + print("Total error exceeds bound") + end + end + From 35700c44091aa7a437f1ce2f4e773aec109069e4 Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Thu, 28 Mar 2024 16:11:37 -0500 Subject: [PATCH 04/24] Added strategy::QuadratureTraining --- src/dae_solve.jl | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/dae_solve.jl b/src/dae_solve.jl index 3e3a2bb7f8..e6eb4d2b75 100644 --- a/src/dae_solve.jl +++ b/src/dae_solve.jl @@ -72,6 +72,18 @@ function inner_loss(phi::ODEPhi{C, T, U}, f, autodiff::Bool, t::AbstractVector, sum(abs2, loss) / length(t) end +function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tspan, p, differential_vars::AbstractVector) +integrand(t::Number, θ) = abs2(inner_loss(phi, f, autodiff, t, θ, p, differential_vars)) +integrand(ts, θ) = [abs2(inner_loss(phi, f, autodiff, t, θ, p, differential_vars)) for t in ts] WHAT IS TS HERE?? - DO WE NEED THIS LINE +function loss(θ, _) + intf = BatchIntegralFunction(integrand, max_batch = strategy.batch) + intprob = IntegralProblem(intf, (tspan[1], tspan[2]), θ) + sol = solve(intprob, strategy.quadrature_alg; abstol = strategy.abstol, reltol = strategy.reltol, maxiters = strategy.maxiters) + sol.u +end +return loss +end + function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, differential_vars::AbstractVector) ts = tspan[1]:(strategy.dx):tspan[2] From 5773e9103dfe88324ef1162d36d4d0f010d20dfc Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Thu, 28 Mar 2024 16:16:26 -0500 Subject: [PATCH 05/24] Formatted indentation in strategy::WeightIntervalTraining --- src/dae_solve.jl | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/src/dae_solve.jl b/src/dae_solve.jl index e6eb4d2b75..a204f10574 100644 --- a/src/dae_solve.jl +++ b/src/dae_solve.jl @@ -106,29 +106,28 @@ end function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Bool, tspan, p,differential_vars::AbstractVector) -autodiff && throw(ArgumentError("autodiff not supported for WeightedIntervalTraining.")) -minT = tspan[1] -maxT = tspan[2] + autodiff && throw(ArgumentError("autodiff not supported for WeightedIntervalTraining.")) + minT = tspan[1] + maxT = tspan[2] -weights = strategy.weights ./ sum(strategy.weights) + weights = strategy.weights ./ sum(strategy.weights) -N = length(weights) -points = strategy.points + N = length(weights) + points = strategy.points -difference = (maxT - minT) / N + difference = (maxT - minT) / N -data = Float64[] -for (index, item) in enumerate(weights) - temp_data = rand(1, trunc(Int, points * item)) .* difference .+ minT .+ - ((index - 1) * difference) - data = append!(data, temp_data) -end + data = Float64[] + for (index, item) in enumerate(weights) + temp_data = rand(1, trunc(Int, points * item)) .* difference .+ minT .+ ((index - 1) * difference) + data = append!(data, temp_data) + end -ts = data -function loss(θ, _) - sum(abs2, inner_loss(phi, f, autodiff, ts, θ, p, differential_vars)) -end -return loss + ts = data + function loss(θ, _) + sum(abs2, inner_loss(phi, f, autodiff, ts, θ, p, differential_vars)) + end + return loss end function DiffEqBase.__solve(prob::DiffEqBase.AbstractDAEProblem, From c9f54de65e199565048532749e683f42b8af16b7 Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Thu, 28 Mar 2024 16:17:28 -0500 Subject: [PATCH 06/24] Formatted Indentation in strategy::QuadratureTraining --- src/dae_solve.jl | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/dae_solve.jl b/src/dae_solve.jl index a204f10574..f41354409d 100644 --- a/src/dae_solve.jl +++ b/src/dae_solve.jl @@ -73,15 +73,15 @@ function inner_loss(phi::ODEPhi{C, T, U}, f, autodiff::Bool, t::AbstractVector, end function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tspan, p, differential_vars::AbstractVector) -integrand(t::Number, θ) = abs2(inner_loss(phi, f, autodiff, t, θ, p, differential_vars)) -integrand(ts, θ) = [abs2(inner_loss(phi, f, autodiff, t, θ, p, differential_vars)) for t in ts] WHAT IS TS HERE?? - DO WE NEED THIS LINE -function loss(θ, _) - intf = BatchIntegralFunction(integrand, max_batch = strategy.batch) - intprob = IntegralProblem(intf, (tspan[1], tspan[2]), θ) - sol = solve(intprob, strategy.quadrature_alg; abstol = strategy.abstol, reltol = strategy.reltol, maxiters = strategy.maxiters) - sol.u -end -return loss + integrand(t::Number, θ) = abs2(inner_loss(phi, f, autodiff, t, θ, p, differential_vars)) + integrand(ts, θ) = [abs2(inner_loss(phi, f, autodiff, t, θ, p, differential_vars)) for t in ts] WHAT IS TS HERE?? - DO WE NEED THIS LINE + function loss(θ, _) + intf = BatchIntegralFunction(integrand, max_batch = strategy.batch) + intprob = IntegralProblem(intf, (tspan[1], tspan[2]), θ) + sol = solve(intprob, strategy.quadrature_alg; abstol = strategy.abstol, reltol = strategy.reltol, maxiters = strategy.maxiters) + sol.u + end + return loss end function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, From abc61bbd489eb6120a76ffa7dfb1baa8dfd60dee Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Sat, 30 Mar 2024 20:27:49 -0500 Subject: [PATCH 07/24] Refactored generate_losses in ode_solve.jl --- src/ode_solve.jl | 143 ++++++++++++++++++++++++----------------------- 1 file changed, 74 insertions(+), 69 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 8af6a708d9..bfa67bb97a 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -219,79 +219,87 @@ end Representation of the loss function, parametric on the training strategy `strategy`. """ -function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tspan, p, - batch, param_estim::Bool) - integrand(t::Number, θ) = abs2(inner_loss(phi, f, autodiff, t, θ, p, param_estim)) - - integrand(ts, θ) = [abs2(inner_loss(phi, f, autodiff, t, θ, p, param_estim)) for t in ts] - function loss(θ, _) - intf = BatchIntegralFunction(integrand, max_batch = strategy.batch) - intprob = IntegralProblem(intf, (tspan[1], tspan[2]), θ) - sol = solve(intprob, strategy.quadrature_alg; abstol = strategy.abstol, reltol = strategy.reltol, maxiters = strategy.maxiters) - sol.u - end - return loss -end +function generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, + batch, param_estim::Bool) -function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, batch, param_estim::Bool) - ts = tspan[1]:(strategy.dx):tspan[2] - autodiff && throw(ArgumentError("autodiff not supported for GridTraining.")) - function loss(θ, _) - if batch - inner_loss(phi, f, autodiff, ts, θ, p, param_estim) - else - sum([inner_loss(phi, f, autodiff, t, θ, p, param_estim) for t in ts]) + if strategy isa QuadratureTraining + + integrand(t::Number, θ) = abs2(inner_loss(phi, f, autodiff, t, θ, p, param_estim)) + function integrand(ts, θ) + [abs2(inner_loss(phi, f, autodiff, t, θ, p, param_estim)) for t in ts] end - end - return loss -end - -function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tspan, p, - batch, param_estim::Bool) - autodiff && throw(ArgumentError("autodiff not supported for StochasticTraining.")) - function loss(θ, _) - ts = adapt(parameterless_type(θ), - [(tspan[2] - tspan[1]) * rand() + tspan[1] for i in 1:(strategy.points)]) - if batch - inner_loss(phi, f, autodiff, ts, θ, p, param_estim) - else - sum([inner_loss(phi, f, autodiff, t, θ, p, param_estim) for t in ts]) + + function loss(θ, _) + intf = BatchIntegralFunction(integrand, max_batch = strategy.batch) + intprob = IntegralProblem(intf, (tspan[1], tspan[2]), θ) + sol = solve(intprob, strategy.quadrature_alg; abstol = strategy.abstol, + reltol = strategy.reltol, maxiters = strategy.maxiters) + sol.u end - end - return loss -end - -function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Bool, tspan, p, - batch, param_estim::Bool) - autodiff && throw(ArgumentError("autodiff not supported for WeightedIntervalTraining.")) - minT = tspan[1] - maxT = tspan[2] - - weights = strategy.weights ./ sum(strategy.weights) - - N = length(weights) - points = strategy.points - - difference = (maxT - minT) / N + return loss + + elseif strategy isa GridTraining + ts = tspan[1]:(strategy.dx):tspan[2] + autodiff && throw(ArgumentError("autodiff not supported for GridTraining.")) + function loss(θ, _) + if batch + inner_loss(phi, f, autodiff, ts, θ, p, param_estim) + else + sum([inner_loss(phi, f, autodiff, t, θ, p, param_estim) for t in ts]) + end + end + return loss + + elseif strategy isa StochasticTraining + autodiff && throw(ArgumentError("autodiff not supported for StochasticTraining.")) + function loss(θ, _) + ts = adapt(parameterless_type(θ), + [(tspan[2] - tspan[1]) * rand() + tspan[1] for i in 1:(strategy.points)]) + if batch + inner_loss(phi, f, autodiff, ts, θ, p, param_estim) + else + sum([inner_loss(phi, f, autodiff, t, θ, p, param_estim) for t in ts]) + end + end + return loss + + elseif strategy isa WeightedIntervalTraining + + autodiff && throw(ArgumentError("autodiff not supported for WeightedIntervalTraining.")) + minT = tspan[1] + maxT = tspan[2] + + weights = strategy.weights ./ sum(strategy.weights) + + N = length(weights) + points = strategy.points + + difference = (maxT - minT) / N + + data = Float64[] + for (index, item) in enumerate(weights) + temp_data = rand(1, trunc(Int, points * item)) .* difference .+ minT .+ + ((index - 1) * difference) + data = append!(data, temp_data) + end + + ts = data + function loss(θ, _) + if batch + inner_loss(phi, f, autodiff, ts, θ, p, param_estim) + else + sum([inner_loss(phi, f, autodiff, t, θ, p, param_estim) for t in ts]) + end + end + return loss - data = Float64[] - for (index, item) in enumerate(weights) - temp_data = rand(1, trunc(Int, points * item)) .* difference .+ minT .+ - ((index - 1) * difference) - data = append!(data, temp_data) + else strategy isa QuasiRandomTraining + error("QuasiRandomTraining is not supported by NNODE since it's for high dimensional spaces only. Use StochasticTraining instead.") end +end + - ts = data - function loss(θ, _) - if batch - inner_loss(phi, f, autodiff, ts, θ, p, param_estim) - else - sum([inner_loss(phi, f, autodiff, t, θ, p, param_estim) for t in ts]) - end - end - return loss -end function evaluate_tstops_loss(phi, f, autodiff::Bool, tstops, p, batch, param_estim::Bool) function loss(θ, _) @@ -304,9 +312,6 @@ function evaluate_tstops_loss(phi, f, autodiff::Bool, tstops, p, batch, param_es return loss end -function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan) - error("QuasiRandomTraining is not supported by NNODE since it's for high dimensional spaces only. Use StochasticTraining instead.") -end struct NNODEInterpolation{T <: ODEPhi, T2} phi::T From e4f06d51875d0e7085b3ccd212360ab0033b8226 Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Mon, 1 Apr 2024 06:42:04 -0500 Subject: [PATCH 08/24] Reverted back the ode_solve.jl to the previous set of codes In the previous commit, I merged all the generate_loss function into one function, but I refactored the code incorrectly. I need to refactor each generate loss function between ode_solve and dae_solve --- src/ode_solve.jl | 148 ++++++++++++++++++++++++----------------------- 1 file changed, 76 insertions(+), 72 deletions(-) diff --git a/src/ode_solve.jl b/src/ode_solve.jl index bfa67bb97a..d7b149d19a 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -220,84 +220,84 @@ end Representation of the loss function, parametric on the training strategy `strategy`. """ -function generate_loss(strategy, phi, f, autodiff::Bool, tspan, p, +function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tspan, p, batch, param_estim::Bool) + integrand(t::Number, θ) = abs2(inner_loss(phi, f, autodiff, t, θ, p, param_estim)) - if strategy isa QuadratureTraining - - integrand(t::Number, θ) = abs2(inner_loss(phi, f, autodiff, t, θ, p, param_estim)) - function integrand(ts, θ) - [abs2(inner_loss(phi, f, autodiff, t, θ, p, param_estim)) for t in ts] - end - - function loss(θ, _) - intf = BatchIntegralFunction(integrand, max_batch = strategy.batch) - intprob = IntegralProblem(intf, (tspan[1], tspan[2]), θ) - sol = solve(intprob, strategy.quadrature_alg; abstol = strategy.abstol, - reltol = strategy.reltol, maxiters = strategy.maxiters) - sol.u - end - return loss - - elseif strategy isa GridTraining - ts = tspan[1]:(strategy.dx):tspan[2] - autodiff && throw(ArgumentError("autodiff not supported for GridTraining.")) - function loss(θ, _) - if batch - inner_loss(phi, f, autodiff, ts, θ, p, param_estim) - else - sum([inner_loss(phi, f, autodiff, t, θ, p, param_estim) for t in ts]) - end - end - return loss - - elseif strategy isa StochasticTraining - autodiff && throw(ArgumentError("autodiff not supported for StochasticTraining.")) - function loss(θ, _) - ts = adapt(parameterless_type(θ), - [(tspan[2] - tspan[1]) * rand() + tspan[1] for i in 1:(strategy.points)]) - if batch - inner_loss(phi, f, autodiff, ts, θ, p, param_estim) - else - sum([inner_loss(phi, f, autodiff, t, θ, p, param_estim) for t in ts]) - end - end - return loss - - elseif strategy isa WeightedIntervalTraining - - autodiff && throw(ArgumentError("autodiff not supported for WeightedIntervalTraining.")) - minT = tspan[1] - maxT = tspan[2] - - weights = strategy.weights ./ sum(strategy.weights) - - N = length(weights) - points = strategy.points - - difference = (maxT - minT) / N - - data = Float64[] - for (index, item) in enumerate(weights) - temp_data = rand(1, trunc(Int, points * item)) .* difference .+ minT .+ - ((index - 1) * difference) - data = append!(data, temp_data) + function integrand(ts, θ) + [abs2(inner_loss(phi, f, autodiff, t, θ, p, param_estim)) for t in ts] + end + + function loss(θ, _) + intf = BatchIntegralFunction(integrand, max_batch = strategy.batch) + intprob = IntegralProblem(intf, (tspan[1], tspan[2]), θ) + sol = solve(intprob, strategy.quadrature_alg; abstol = strategy.abstol, + reltol = strategy.reltol, maxiters = strategy.maxiters) + sol.u + end + return loss +end + +function generate_loss( + strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, batch, param_estim::Bool) + ts = tspan[1]:(strategy.dx):tspan[2] + autodiff && throw(ArgumentError("autodiff not supported for GridTraining.")) + function loss(θ, _) + if batch + inner_loss(phi, f, autodiff, ts, θ, p, param_estim) + else + sum([inner_loss(phi, f, autodiff, t, θ, p, param_estim) for t in ts]) end - - ts = data - function loss(θ, _) - if batch - inner_loss(phi, f, autodiff, ts, θ, p, param_estim) - else - sum([inner_loss(phi, f, autodiff, t, θ, p, param_estim) for t in ts]) - end + end + return loss +end + +function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tspan, p, + batch, param_estim::Bool) + autodiff && throw(ArgumentError("autodiff not supported for StochasticTraining.")) + function loss(θ, _) + ts = adapt(parameterless_type(θ), + [(tspan[2] - tspan[1]) * rand() + tspan[1] for i in 1:(strategy.points)]) + if batch + inner_loss(phi, f, autodiff, ts, θ, p, param_estim) + else + sum([inner_loss(phi, f, autodiff, t, θ, p, param_estim) for t in ts]) end - return loss + end + return loss +end + +function generate_loss( + strategy::WeightedIntervalTraining, phi, f, autodiff::Bool, tspan, p, + batch, param_estim::Bool) + autodiff && throw(ArgumentError("autodiff not supported for WeightedIntervalTraining.")) + minT = tspan[1] + maxT = tspan[2] + + weights = strategy.weights ./ sum(strategy.weights) + + N = length(weights) + points = strategy.points - else strategy isa QuasiRandomTraining - error("QuasiRandomTraining is not supported by NNODE since it's for high dimensional spaces only. Use StochasticTraining instead.") + difference = (maxT - minT) / N + + data = Float64[] + for (index, item) in enumerate(weights) + temp_data = rand(1, trunc(Int, points * item)) .* difference .+ minT .+ + ((index - 1) * difference) + data = append!(data, temp_data) end -end + + ts = data + function loss(θ, _) + if batch + inner_loss(phi, f, autodiff, ts, θ, p, param_estim) + else + sum([inner_loss(phi, f, autodiff, t, θ, p, param_estim) for t in ts]) + end + end + return loss +end @@ -312,6 +312,10 @@ function evaluate_tstops_loss(phi, f, autodiff::Bool, tstops, p, batch, param_es return loss end +function generate_loss(strategy::QuasiRandomTraining, phi, f, autodiff::Bool, tspan) + error("QuasiRandomTraining is not supported by NNODE since it's for high dimensional spaces only. Use StochasticTraining instead.") +end + struct NNODEInterpolation{T <: ODEPhi, T2} phi::T From 0f4cfde143df29259394deb9c896e78a976a8586 Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Mon, 8 Apr 2024 22:00:48 -0500 Subject: [PATCH 09/24] Edits to dae_solve.jl and NNDAE_tests.jl --- src/dae_solve.jl | 3 +- test/NNDAE_tests.jl | 106 ++++++++++++++++++++++++++++++-------------- 2 files changed, 74 insertions(+), 35 deletions(-) diff --git a/src/dae_solve.jl b/src/dae_solve.jl index f41354409d..226be19b06 100644 --- a/src/dae_solve.jl +++ b/src/dae_solve.jl @@ -74,8 +74,7 @@ end function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tspan, p, differential_vars::AbstractVector) integrand(t::Number, θ) = abs2(inner_loss(phi, f, autodiff, t, θ, p, differential_vars)) - integrand(ts, θ) = [abs2(inner_loss(phi, f, autodiff, t, θ, p, differential_vars)) for t in ts] WHAT IS TS HERE?? - DO WE NEED THIS LINE - function loss(θ, _) + integrand(ts, θ) = [abs2(inner_loss(phi, f, autodiff, t, θ, p, differential_vars)) for t in ts] #WHAT IS TS HERE?? - DO WE NEED THIS LINEfunction loss(θ, _) intf = BatchIntegralFunction(integrand, max_batch = strategy.batch) intprob = IntegralProblem(intf, (tspan[1], tspan[2]), θ) sol = solve(intprob, strategy.quadrature_alg; abstol = strategy.abstol, reltol = strategy.reltol, maxiters = strategy.maxiters) diff --git a/test/NNDAE_tests.jl b/test/NNDAE_tests.jl index 05c55baa73..baf829c3bc 100644 --- a/test/NNDAE_tests.jl +++ b/test/NNDAE_tests.jl @@ -64,38 +64,78 @@ end end @testset "WeightedIntervalTraining" begin - println("WeightedIntervalTraining") - function f(out, du, u, p, t) - out[1] = du[1] -u[2] - out[2] = u[1] + u[2] - end - p = [] - u0 = [1.0/4.0, 1.0/4.0] - tspan = (0.0, 100000.0) - differential_vars = [true, false] - prob = DAEProblem(f, u0, tspan, differential_vars = differential_vars) - true_out_1(t) = exp(-t)/4.0 - true_out_2(t) = -1.0 * exp(-t)/4.0 - func = Lux.σ - N = 12 - chain = Lux.Chain(Lux.Dense(1, N, func), Lux.Dense(N, N, func), Lux.Dense(N, N, func), - Lux.Dense(N, N, func), Lux.Dense(N, length(u0))) - opt = OptimizationOptimisers.Adam(0.01) - weights = [0.7, 0.2, 0.1] - points = 200 - alg = NNDAE(chain, opt,init_params = nothing; autodiff = false, - strategy = NeuralPDE.WeightedIntervalTraining(weights, points)) - sol = solve(prob, alg, verbose = false, maxiters = 5000, saveat = 0.01) - #@test abs(mean(sol) - mean(true_sol)) < 0.2 - """Sol would have 2 outputs: one for u[1] and the other for u[2] so just I need compute the total error for all t in tspan """ - total_error = 0 - for i in tspan: - total_error = total_error + abs(sol(i) - [true_out_1(i) true_out_2(i)]) - end - if total_error < 0.01: - print("It works!") - else: - print("Total error exceeds bound") - end + example = (du, u, p, t) -> [u[1] - t - du[1], u[2] - t - du[2]] + differential_vars = [false, true] + u₀ = [1.0, -1.0] + du₀ = [0.0, 0.0] + tspan = (0.0f0, pi / 2.0f0) + prob = DAEProblem(example, du₀, u₀, tspan; differential_vars = differential_vars) #Lack of parameter here?? + chain = Lux.Chain(Lux.Dense(1, 15, Lux.σ), Lux.Dense(15, 2)) + opt = OptimizationOptimisers.Adam(0.1) + weights = [0.7, 0.2, 0.1] + points = 200 + alg = NNDAE(chain, opt,init_params = nothing; autodiff = false,strategy = NeuralPDE.WeightedIntervalTraining(weights, points)) + sol = solve(prob, alg, verbose = false, maxiters = 5000, saveat = 0.01) + @test ground_sol(0:(1 / 100):(pi / 2))≈sol atol=0.4 +end + + +@testset "StochasticTraining" begin + example = (du, u, p, t) -> [u[1] - t - du[1], u[2] - t - du[2]] + differential_vars = [false, true] + u₀ = [1.0, -1.0] + du₀ = [0.0, 0.0] + tspan = (0.0f0, pi / 2.0f0) + prob = DAEProblem(example, du₀, u₀, tspan; differential_vars = differential_vars) + chain = Lux.Chain(Lux.Dense(1, 15, Lux.σ), Lux.Dense(15, 2)) + opt = OptimizationOptimisers.Adam(0.1) + weights = [0.7, 0.2, 0.1] + points = 200 + alg = NNDAE(chain, opt,init_params = nothing; autodiff = false,strategy = StochasticTraining(100)) + sol = solve(prob, alg, verbose = false, maxiters = 5000, saveat = 0.01) + @test ground_sol(0:(1 / 100):(pi / 2))≈sol atol=0.4 +end + + +#= + +@testset "WeightedIntervalTraining" begin + println("WeightedIntervalTraining") + function example3(du, u, p, t) + du[1] = -u[2] + u[1] = -u[2] + return out + end + p = [] + u0 = [1.0/4.0, 1.0/4.0] + du0 = [0,0] + tspan = (0.0, 100000.0) + differential_vars = [true, false] + prob = DAEProblem(example3, du0, u0, tspan, differential_vars = differential_vars) + true_out_1(t) = exp(-t)/4.0 + true_out_2(t) = -1.0 * exp(-t)/4.0 + func = Lux.σ + N = 12 + chain = Lux.Chain(Lux.Dense(1, N, func), Lux.Dense(N, N, func), Lux.Dense(N, N, func), Lux.Dense(N, N, func), Lux.Dense(N, length(u0))) + opt = OptimizationOptimisers.Adam(0.01) + weights = [0.7, 0.2, 0.1] + points = 200 + alg = NNDAE(chain, opt,init_params = nothing; autodiff = false,strategy = NeuralPDE.WeightedIntervalTraining(weights, points)) + sol = solve(prob, alg, verbose = false, maxiters = 5000, saveat = 0.01) + #@test abs(mean(sol) - mean(true_sol)) < 0.2 + """Sol would have 2 outputs: one for u[1] and the other for u[2] so just I need compute the total error for all t in tspan """ + total_error = 0 + for i in tspan + total_error = total_error + abs(sol(i) - [true_out_1(i) true_out_2(i)]) + end + if total_error < 0.01 + print("It works!") + else + print("Total error exceeds bound") end +end + +=# + + From 911d68c56ad03fc79eb2970e7012710373297c1a Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Thu, 11 Apr 2024 23:00:34 -0500 Subject: [PATCH 10/24] Modified dae_solve.jl and NNDAE_tests --- src/dae_solve.jl | 69 +++++++++++++++++++++++++++++++++++++++++++-- test/NNDAE_tests.jl | 3 +- 2 files changed, 69 insertions(+), 3 deletions(-) diff --git a/src/dae_solve.jl b/src/dae_solve.jl index 226be19b06..748effc5da 100644 --- a/src/dae_solve.jl +++ b/src/dae_solve.jl @@ -74,7 +74,10 @@ end function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tspan, p, differential_vars::AbstractVector) integrand(t::Number, θ) = abs2(inner_loss(phi, f, autodiff, t, θ, p, differential_vars)) - integrand(ts, θ) = [abs2(inner_loss(phi, f, autodiff, t, θ, p, differential_vars)) for t in ts] #WHAT IS TS HERE?? - DO WE NEED THIS LINEfunction loss(θ, _) + function integrand(ts, θ) + integrand(ts, θ) = [abs2(inner_loss(phi, f, autodiff, t, θ, p, differential_vars)) for t in ts] #WHAT IS TS HERE?? - DO WE NEED THIS LINEfunction loss(θ, _) + end + function loss(θ, _) intf = BatchIntegralFunction(integrand, max_batch = strategy.batch) intprob = IntegralProblem(intf, (tspan[1], tspan[2]), θ) sol = solve(intprob, strategy.quadrature_alg; abstol = strategy.abstol, reltol = strategy.reltol, maxiters = strategy.maxiters) @@ -167,6 +170,12 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractDAEProblem, error("Only Lux.AbstractExplicitLayer and Flux.Chain neural networks are supported") end + init_params = if alg.param_estim + ComponentArrays.ComponentArray(; depvar = ComponentArrays.ComponentArray(init_params), p = prob.p) + else + ComponentArrays.ComponentArray(; depvar = ComponentArrays.ComponentArray(init_params)) + end + if isinplace(prob) throw(error("The NNODE solver only supports out-of-place DAE definitions, i.e. du=f(u,p,t).")) end @@ -181,24 +190,80 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractDAEProblem, end end - strategy = if alg.strategy === nothing + #=strategy = if alg.strategy === nothing if dt !== nothing GridTraining(dt) else error("dt is not defined") end + end=# + + strategy = if alg.strategy === nothing + if dt !== nothing + GridTraining(dt) + else + QuadratureTraining(; quadrature_alg = QuadGKJL(), + reltol = convert(eltype(u0), reltol), + abstol = convert(eltype(u0), abstol), maxiters = maxiters, + batch = 0) + end + else + alg.strategy end + batch = alg.batch inner_f = generate_loss(strategy, phi, f, autodiff, tspan, p, differential_vars) + additional_loss = alg.additional_loss + (param_estim && isnothing(additional_loss)) && throw(ArgumentError("Please provide `additional_loss` in `NNODE` for parameter estimation (`param_estim` is true).")) + #= # Creates OptimizationFunction Object from total_loss total_loss(θ, _) = inner_f(θ, phi) + =# + + # Creates OptimizationFunction Object from total_loss + function total_loss(θ, _) + L2_loss = inner_f(θ, phi) + if !(additional_loss isa Nothing) + L2_loss = L2_loss + additional_loss(phi, θ) + end + if !(tstops isa Nothing) + num_tstops_points = length(tstops) + tstops_loss_func = evaluate_tstops_loss(phi, f, autodiff, tstops, p, batch, param_estim) + tstops_loss = tstops_loss_func(θ, phi) + if strategy isa GridTraining + num_original_points = length(tspan[1]:(strategy.dx):tspan[2]) + elseif strategy isa Union{WeightedIntervalTraining, StochasticTraining} + num_original_points = strategy.points + else + return L2_loss + tstops_loss + end + total_original_loss = L2_loss * num_original_points + total_tstops_loss = tstops_loss * num_tstops_points + total_points = num_original_points + num_tstops_points + L2_loss = (total_original_loss + total_tstops_loss) / total_points + end + return L2_loss + end + + #= # Optimization Algo for Training Strategies opt_algo = Optimization.AutoZygote() # Creates OptimizationFunction Object from total_loss optf = OptimizationFunction(total_loss, opt_algo) + =# + + # Choice of Optimization Algo for Training Strategies + opt_algo = if strategy isa QuadratureTraining + Optimization.AutoForwardDiff() + else + Optimization.AutoZygote() + end + # Creates OptimizationFunction Object from total_loss + optf = OptimizationFunction(total_loss, opt_algo) + iteration = 0 callback = function (p, l) iteration += 1 diff --git a/test/NNDAE_tests.jl b/test/NNDAE_tests.jl index baf829c3bc..abc7658e15 100644 --- a/test/NNDAE_tests.jl +++ b/test/NNDAE_tests.jl @@ -68,8 +68,9 @@ end differential_vars = [false, true] u₀ = [1.0, -1.0] du₀ = [0.0, 0.0] + p = 0 tspan = (0.0f0, pi / 2.0f0) - prob = DAEProblem(example, du₀, u₀, tspan; differential_vars = differential_vars) #Lack of parameter here?? + prob = DAEProblem(example, du₀, u₀, tspan, p; differential_vars = differential_vars) #Lack of parameter here?? chain = Lux.Chain(Lux.Dense(1, 15, Lux.σ), Lux.Dense(15, 2)) opt = OptimizationOptimisers.Adam(0.1) weights = [0.7, 0.2, 0.1] From 2f4c505c7bcb3cc42e66cb0b0c63ea2ce89557bf Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Sun, 21 Apr 2024 18:53:55 -0500 Subject: [PATCH 11/24] Removed param_estim --- src/dae_solve.jl | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/dae_solve.jl b/src/dae_solve.jl index 748effc5da..19005004f8 100644 --- a/src/dae_solve.jl +++ b/src/dae_solve.jl @@ -213,8 +213,6 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractDAEProblem, batch = alg.batch inner_f = generate_loss(strategy, phi, f, autodiff, tspan, p, differential_vars) - additional_loss = alg.additional_loss - (param_estim && isnothing(additional_loss)) && throw(ArgumentError("Please provide `additional_loss` in `NNODE` for parameter estimation (`param_estim` is true).")) #= # Creates OptimizationFunction Object from total_loss From 52cdea8049fbce898ccdac7a3cc2e5944d417c04 Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Wed, 24 Apr 2024 23:29:19 -0500 Subject: [PATCH 12/24] Update dae_solve.jl --- src/dae_solve.jl | 103 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 70 insertions(+), 33 deletions(-) diff --git a/src/dae_solve.jl b/src/dae_solve.jl index 19005004f8..b0ffcc9ca1 100644 --- a/src/dae_solve.jl +++ b/src/dae_solve.jl @@ -63,8 +63,50 @@ function dfdx(phi::ODEPhi, t::AbstractVector, θ, autodiff::Bool, differential_v end end +function dfdx(phi::ODEPhi{C, T, U}, t::AbstractVector, θ, autodiff::Bool, + differential_vars::AbstractVector) where {C,T,U <: AbstractVector} + if autodiff + autodiff && throw(ArgumentError("autodiff not supported for DAE problem.")) + else + """ + dphi = (phi(t .+ sqrt(eps(eltype(t))), θ) - phi(t, θ)) ./ sqrt(eps(eltype(t))) + batch_size = size(t)[1] + + reduce(vcat, + [if dv == true + dphi[[i], :] + else + zeros(1, batch_size) + end + for (i, dv) in enumerate(differential_vars)]) + """ + 0 + end +end + +function dfdx(phi::ODEPhi{C, T, U}, t::AbstractVector, θ, autodiff::Bool, + differential_vars::AbstractVector) where {C,T,U <: Number} + if autodiff + autodiff && throw(ArgumentError("autodiff not supported for DAE problem.")) + else + """ + dphi = (phi(t .+ sqrt(eps(eltype(t))), θ) - phi(t, θ)) ./ sqrt(eps(eltype(t))) + batch_size = size(t)[1] + + reduce(vcat, + [if dv == true + dphi[[i], :] + else + zeros(1, batch_size) + end + for (i, dv) in enumerate(differential_vars)]) + """ + 0 + end +end + function inner_loss(phi::ODEPhi{C, T, U}, f, autodiff::Bool, t::AbstractVector, θ, - p, differential_vars::AbstractVector) where {C, T, U} + p, differential_vars::AbstractVector, param_estim) where {C, T, U} out = Array(phi(t, θ)) dphi = Array(dfdx(phi, t, θ, autodiff, differential_vars)) arrt = Array(t) @@ -72,10 +114,10 @@ function inner_loss(phi::ODEPhi{C, T, U}, f, autodiff::Bool, t::AbstractVector, sum(abs2, loss) / length(t) end -function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tspan, p, differential_vars::AbstractVector) +function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tspan, p, differential_vars::AbstractVector, batch, param_estim::Bool) integrand(t::Number, θ) = abs2(inner_loss(phi, f, autodiff, t, θ, p, differential_vars)) function integrand(ts, θ) - integrand(ts, θ) = [abs2(inner_loss(phi, f, autodiff, t, θ, p, differential_vars)) for t in ts] #WHAT IS TS HERE?? - DO WE NEED THIS LINEfunction loss(θ, _) + [abs2(inner_loss(phi, f, autodiff, t, θ, p, differential_vars, param_estim)) for t in ts] end function loss(θ, _) intf = BatchIntegralFunction(integrand, max_batch = strategy.batch) @@ -87,27 +129,36 @@ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tsp end function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, - differential_vars::AbstractVector) + differential_vars::AbstractVector, batch, param_estim::Bool) ts = tspan[1]:(strategy.dx):tspan[2] autodiff && throw(ArgumentError("autodiff not supported for GridTraining.")) function loss(θ, _) - sum(abs2, inner_loss(phi, f, autodiff, ts, θ, p, differential_vars)) + if batch + inner_loss(phi,f, autodiff, ts, θ, p, differential_vars, param_estim) + else + sum([inner_loss(phi, f, autodiff, ts, θ, p, differential_vars,param_estim) for t in ts]) + end end return loss end -function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tspan, p, differential_vars::AbstractVector) +function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tspan, p, differential_vars::AbstractVector, + batch, param_estim::Bool) autodiff && throw(ArgumentError("autodiff not supported for StochasticTraining.")) function loss(θ, _) ts = adapt(parameterless_type(θ), [(tspan[2] - tspan[1]) * rand() + tspan[1] for i in 1:(strategy.points)]) - sum(abs2, inner_loss(phi, f, autodiff, ts, θ, p, differential_vars)) + if batch + inner_loss(phi, f, autodiff, ts, θ, p, differential_vars, param_estim) + else + sum([inner_loss(phi, f, autodiff, ts, θ, p, differential_vars, param_estim) for t in ts]) + end end return loss end -function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Bool, tspan, p,differential_vars::AbstractVector) +function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Bool, tspan, p,differential_vars::AbstractVector,batch, param_estim) autodiff && throw(ArgumentError("autodiff not supported for WeightedIntervalTraining.")) minT = tspan[1] maxT = tspan[2] @@ -127,7 +178,11 @@ function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Boo ts = data function loss(θ, _) - sum(abs2, inner_loss(phi, f, autodiff, ts, θ, p, differential_vars)) + if batch + inner_loss(phi, f, autodiff, ts, θ, p, differential_vars, param_estim) + else + sum([inner_loss(phi, f, autodiff, ts, θ, p, differential_vars, param_estim) for t in ts]) + end end return loss end @@ -144,7 +199,9 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractDAEProblem, verbose = false, saveat = nothing, maxiters = nothing, - tstops = nothing) + tstops = nothing, + batch = true, + param_estim = false) u0 = prob.u0 du0 = prob.du0 tspan = prob.tspan @@ -190,14 +247,6 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractDAEProblem, end end - #=strategy = if alg.strategy === nothing - if dt !== nothing - GridTraining(dt) - else - error("dt is not defined") - end - end=# - strategy = if alg.strategy === nothing if dt !== nothing GridTraining(dt) @@ -212,12 +261,9 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractDAEProblem, end batch = alg.batch - inner_f = generate_loss(strategy, phi, f, autodiff, tspan, p, differential_vars) - - #= - # Creates OptimizationFunction Object from total_loss - total_loss(θ, _) = inner_f(θ, phi) - =# + inner_f = generate_loss(strategy, phi, f, autodiff, tspan, p, differential_vars, batch, param_estim) + additional_loss = alg.additional_loss + (param_estim && isnothing(additional_loss)) && throw(ArgumentError("Please provide `additional_loss` in `NNODE` for parameter estimation (`param_estim` is true).")) # Creates OptimizationFunction Object from total_loss function total_loss(θ, _) @@ -244,15 +290,6 @@ function DiffEqBase.__solve(prob::DiffEqBase.AbstractDAEProblem, return L2_loss end - #= - - # Optimization Algo for Training Strategies - opt_algo = Optimization.AutoZygote() - # Creates OptimizationFunction Object from total_loss - optf = OptimizationFunction(total_loss, opt_algo) - - =# - # Choice of Optimization Algo for Training Strategies opt_algo = if strategy isa QuadratureTraining Optimization.AutoForwardDiff() From d6f2e5fc5bcc25ce909e626c4c14c0c8592c0c91 Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Thu, 2 May 2024 22:16:56 -0500 Subject: [PATCH 13/24] Reset the code to match master code. Planning to start from scratch again --- src/dae_solve.jl | 165 +++----------------------------------------- test/NNDAE_tests.jl | 75 -------------------- 2 files changed, 9 insertions(+), 231 deletions(-) diff --git a/src/dae_solve.jl b/src/dae_solve.jl index 00663f83ef..5a5ee83be3 100644 --- a/src/dae_solve.jl +++ b/src/dae_solve.jl @@ -60,50 +60,8 @@ function dfdx(phi::ODEPhi, t::AbstractVector, θ, autodiff::Bool, end end -function dfdx(phi::ODEPhi{C, T, U}, t::AbstractVector, θ, autodiff::Bool, - differential_vars::AbstractVector) where {C,T,U <: AbstractVector} - if autodiff - autodiff && throw(ArgumentError("autodiff not supported for DAE problem.")) - else - """ - dphi = (phi(t .+ sqrt(eps(eltype(t))), θ) - phi(t, θ)) ./ sqrt(eps(eltype(t))) - batch_size = size(t)[1] - - reduce(vcat, - [if dv == true - dphi[[i], :] - else - zeros(1, batch_size) - end - for (i, dv) in enumerate(differential_vars)]) - """ - 0 - end -end - -function dfdx(phi::ODEPhi{C, T, U}, t::AbstractVector, θ, autodiff::Bool, - differential_vars::AbstractVector) where {C,T,U <: Number} - if autodiff - autodiff && throw(ArgumentError("autodiff not supported for DAE problem.")) - else - """ - dphi = (phi(t .+ sqrt(eps(eltype(t))), θ) - phi(t, θ)) ./ sqrt(eps(eltype(t))) - batch_size = size(t)[1] - - reduce(vcat, - [if dv == true - dphi[[i], :] - else - zeros(1, batch_size) - end - for (i, dv) in enumerate(differential_vars)]) - """ - 0 - end -end - function inner_loss(phi::ODEPhi{C, T, U}, f, autodiff::Bool, t::AbstractVector, θ, - p, differential_vars::AbstractVector, param_estim) where {C, T, U} + p, differential_vars::AbstractVector) where {C, T, U} out = Array(phi(t, θ)) dphi = Array(dfdx(phi, t, θ, autodiff, differential_vars)) arrt = Array(t) @@ -111,75 +69,12 @@ function inner_loss(phi::ODEPhi{C, T, U}, f, autodiff::Bool, t::AbstractVector, sum(abs2, loss) / length(t) end -function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tspan, p, differential_vars::AbstractVector, batch, param_estim::Bool) - integrand(t::Number, θ) = abs2(inner_loss(phi, f, autodiff, t, θ, p, differential_vars)) - function integrand(ts, θ) - [abs2(inner_loss(phi, f, autodiff, t, θ, p, differential_vars, param_estim)) for t in ts] - end - function loss(θ, _) - intf = BatchIntegralFunction(integrand, max_batch = strategy.batch) - intprob = IntegralProblem(intf, (tspan[1], tspan[2]), θ) - sol = solve(intprob, strategy.quadrature_alg; abstol = strategy.abstol, reltol = strategy.reltol, maxiters = strategy.maxiters) - sol.u - end - return loss -end - function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, - differential_vars::AbstractVector, batch, param_estim::Bool) + differential_vars::AbstractVector) ts = tspan[1]:(strategy.dx):tspan[2] autodiff && throw(ArgumentError("autodiff not supported for GridTraining.")) function loss(θ, _) - if batch - inner_loss(phi,f, autodiff, ts, θ, p, differential_vars, param_estim) - else - sum([inner_loss(phi, f, autodiff, ts, θ, p, differential_vars,param_estim) for t in ts]) - end - end - return loss -end - -function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tspan, p, differential_vars::AbstractVector, - batch, param_estim::Bool) - autodiff && throw(ArgumentError("autodiff not supported for StochasticTraining.")) - function loss(θ, _) - ts = adapt(parameterless_type(θ), - [(tspan[2] - tspan[1]) * rand() + tspan[1] for i in 1:(strategy.points)]) - if batch - inner_loss(phi, f, autodiff, ts, θ, p, differential_vars, param_estim) - else - sum([inner_loss(phi, f, autodiff, ts, θ, p, differential_vars, param_estim) for t in ts]) - end - end - return loss -end - - -function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Bool, tspan, p,differential_vars::AbstractVector,batch, param_estim) - autodiff && throw(ArgumentError("autodiff not supported for WeightedIntervalTraining.")) - minT = tspan[1] - maxT = tspan[2] - - weights = strategy.weights ./ sum(strategy.weights) - - N = length(weights) - points = strategy.points - - difference = (maxT - minT) / N - - data = Float64[] - for (index, item) in enumerate(weights) - temp_data = rand(1, trunc(Int, points * item)) .* difference .+ minT .+ ((index - 1) * difference) - data = append!(data, temp_data) - end - - ts = data - function loss(θ, _) - if batch - inner_loss(phi, f, autodiff, ts, θ, p, differential_vars, param_estim) - else - sum([inner_loss(phi, f, autodiff, ts, θ, p, differential_vars, param_estim) for t in ts]) - end + sum(abs2, inner_loss(phi, f, autodiff, ts, θ, p, differential_vars)) end return loss end @@ -196,9 +91,7 @@ function SciMLBase.__solve(prob::SciMLBase.AbstractDAEProblem, verbose = false, saveat = nothing, maxiters = nothing, - tstops = nothing, - batch = true, - param_estim = false) + tstops = nothing) u0 = prob.u0 du0 = prob.du0 tspan = prob.tspan @@ -225,12 +118,6 @@ function SciMLBase.__solve(prob::SciMLBase.AbstractDAEProblem, error("Only Lux.AbstractExplicitLayer and Flux.Chain neural networks are supported") end - init_params = if alg.param_estim - ComponentArrays.ComponentArray(; depvar = ComponentArrays.ComponentArray(init_params), p = prob.p) - else - ComponentArrays.ComponentArray(; depvar = ComponentArrays.ComponentArray(init_params)) - end - if isinplace(prob) throw(error("The NNODE solver only supports out-of-place DAE definitions, i.e. du=f(u,p,t).")) end @@ -249,51 +136,17 @@ function SciMLBase.__solve(prob::SciMLBase.AbstractDAEProblem, if dt !== nothing GridTraining(dt) else - QuadratureTraining(; quadrature_alg = QuadGKJL(), - reltol = convert(eltype(u0), reltol), - abstol = convert(eltype(u0), abstol), maxiters = maxiters, - batch = 0) + error("dt is not defined") end - else - alg.strategy end - batch = alg.batch - inner_f = generate_loss(strategy, phi, f, autodiff, tspan, p, differential_vars, batch, param_estim) - additional_loss = alg.additional_loss - (param_estim && isnothing(additional_loss)) && throw(ArgumentError("Please provide `additional_loss` in `NNODE` for parameter estimation (`param_estim` is true).")) + inner_f = generate_loss(strategy, phi, f, autodiff, tspan, p, differential_vars) # Creates OptimizationFunction Object from total_loss - function total_loss(θ, _) - L2_loss = inner_f(θ, phi) - if !(additional_loss isa Nothing) - L2_loss = L2_loss + additional_loss(phi, θ) - end - if !(tstops isa Nothing) - num_tstops_points = length(tstops) - tstops_loss_func = evaluate_tstops_loss(phi, f, autodiff, tstops, p, batch, param_estim) - tstops_loss = tstops_loss_func(θ, phi) - if strategy isa GridTraining - num_original_points = length(tspan[1]:(strategy.dx):tspan[2]) - elseif strategy isa Union{WeightedIntervalTraining, StochasticTraining} - num_original_points = strategy.points - else - return L2_loss + tstops_loss - end - total_original_loss = L2_loss * num_original_points - total_tstops_loss = tstops_loss * num_tstops_points - total_points = num_original_points + num_tstops_points - L2_loss = (total_original_loss + total_tstops_loss) / total_points - end - return L2_loss - end + total_loss(θ, _) = inner_f(θ, phi) - # Choice of Optimization Algo for Training Strategies - opt_algo = if strategy isa QuadratureTraining - Optimization.AutoForwardDiff() - else - Optimization.AutoZygote() - end + # Optimization Algo for Training Strategies + opt_algo = Optimization.AutoZygote() # Creates OptimizationFunction Object from total_loss optf = OptimizationFunction(total_loss, opt_algo) diff --git a/test/NNDAE_tests.jl b/test/NNDAE_tests.jl index 9c14355e22..7199d190c7 100644 --- a/test/NNDAE_tests.jl +++ b/test/NNDAE_tests.jl @@ -63,80 +63,5 @@ end @test ground_sol(0:(1 / 100):(pi / 2))≈sol atol=0.4 end -@testset "WeightedIntervalTraining" begin - example = (du, u, p, t) -> [u[1] - t - du[1], u[2] - t - du[2]] - differential_vars = [false, true] - u₀ = [1.0, -1.0] - du₀ = [0.0, 0.0] - p = 0 - tspan = (0.0f0, pi / 2.0f0) - prob = DAEProblem(example, du₀, u₀, tspan, p; differential_vars = differential_vars) #Lack of parameter here?? - chain = Lux.Chain(Lux.Dense(1, 15, Lux.σ), Lux.Dense(15, 2)) - opt = OptimizationOptimisers.Adam(0.1) - weights = [0.7, 0.2, 0.1] - points = 200 - alg = NNDAE(chain, opt,init_params = nothing; autodiff = false,strategy = NeuralPDE.WeightedIntervalTraining(weights, points)) - sol = solve(prob, alg, verbose = false, maxiters = 5000, saveat = 0.01) - @test ground_sol(0:(1 / 100):(pi / 2))≈sol atol=0.4 -end - - -@testset "StochasticTraining" begin - example = (du, u, p, t) -> [u[1] - t - du[1], u[2] - t - du[2]] - differential_vars = [false, true] - u₀ = [1.0, -1.0] - du₀ = [0.0, 0.0] - tspan = (0.0f0, pi / 2.0f0) - prob = DAEProblem(example, du₀, u₀, tspan; differential_vars = differential_vars) - chain = Lux.Chain(Lux.Dense(1, 15, Lux.σ), Lux.Dense(15, 2)) - opt = OptimizationOptimisers.Adam(0.1) - weights = [0.7, 0.2, 0.1] - points = 200 - alg = NNDAE(chain, opt,init_params = nothing; autodiff = false,strategy = StochasticTraining(100)) - sol = solve(prob, alg, verbose = false, maxiters = 5000, saveat = 0.01) - @test ground_sol(0:(1 / 100):(pi / 2))≈sol atol=0.4 -end - - -#= - -@testset "WeightedIntervalTraining" begin - println("WeightedIntervalTraining") - function example3(du, u, p, t) - du[1] = -u[2] - u[1] = -u[2] - return out - end - p = [] - u0 = [1.0/4.0, 1.0/4.0] - du0 = [0,0] - tspan = (0.0, 100000.0) - differential_vars = [true, false] - prob = DAEProblem(example3, du0, u0, tspan, differential_vars = differential_vars) - true_out_1(t) = exp(-t)/4.0 - true_out_2(t) = -1.0 * exp(-t)/4.0 - func = Lux.σ - N = 12 - chain = Lux.Chain(Lux.Dense(1, N, func), Lux.Dense(N, N, func), Lux.Dense(N, N, func), Lux.Dense(N, N, func), Lux.Dense(N, length(u0))) - opt = OptimizationOptimisers.Adam(0.01) - weights = [0.7, 0.2, 0.1] - points = 200 - alg = NNDAE(chain, opt,init_params = nothing; autodiff = false,strategy = NeuralPDE.WeightedIntervalTraining(weights, points)) - sol = solve(prob, alg, verbose = false, maxiters = 5000, saveat = 0.01) - #@test abs(mean(sol) - mean(true_sol)) < 0.2 - """Sol would have 2 outputs: one for u[1] and the other for u[2] so just I need compute the total error for all t in tspan """ - total_error = 0 - for i in tspan - total_error = total_error + abs(sol(i) - [true_out_1(i) true_out_2(i)]) - end - if total_error < 0.01 - print("It works!") - else - print("Total error exceeds bound") - end -end - -=# - From 1ed7683952705f51b19992d6ce68852259605ee5 Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Sun, 5 May 2024 23:01:25 -0500 Subject: [PATCH 14/24] Implemented WeightedIntervalTraining and it's Test --- src/dae_solve.jl | 31 +++++++++++++++++++++++++++++++ test/NNDAE_tests.jl | 33 ++++++++++++++++++++++++++++++++- 2 files changed, 63 insertions(+), 1 deletion(-) diff --git a/src/dae_solve.jl b/src/dae_solve.jl index 5a5ee83be3..df18ad08d9 100644 --- a/src/dae_solve.jl +++ b/src/dae_solve.jl @@ -79,6 +79,35 @@ function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, return loss end +function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Bool, tspan, p, + differential_vars::AbstractVector) + autodiff && throw(ArgumentError("autodiff not supported for GridTraining.")) + minT = tspan[1] + maxT = tspan[2] + + weights = strategy.weights ./ sum(strategy.weights) + + N = length(weights) + points = strategy.points + + difference = (maxT-minT)/N + + data = Float64[] + for (index, item) in enumerate(weights) + temp_data = rand(1, trunc(Int, points * item)) .* difference .+ minT .+ + ((index - 1) * difference) + data = append!(data, temp_data) + end + + ts = data + + function loss(θ, _) + sum(inner_loss(phi, f, autodiff, ts, θ, p, differential_vars)) + end + return loss +end + + function SciMLBase.__solve(prob::SciMLBase.AbstractDAEProblem, alg::NNDAE, args...; @@ -138,6 +167,8 @@ function SciMLBase.__solve(prob::SciMLBase.AbstractDAEProblem, else error("dt is not defined") end + else + alg.strategy end inner_f = generate_loss(strategy, phi, f, autodiff, tspan, p, differential_vars) diff --git a/test/NNDAE_tests.jl b/test/NNDAE_tests.jl index 7199d190c7..e5930ec063 100644 --- a/test/NNDAE_tests.jl +++ b/test/NNDAE_tests.jl @@ -54,7 +54,38 @@ end prob = DAEProblem(example, du₀, u₀, tspan; differential_vars = differential_vars) chain = Lux.Chain(Lux.Dense(1, 15, Lux.σ), Lux.Dense(15, 2)) opt = OptimizationOptimisers.Adam(0.1) - alg = NNDAE(chain, OptimizationOptimisers.Adam(0.1); autodiff = false) + alg = NeuralPDE.NNDAE(chain, OptimizationOptimisers.Adam(0.1); autodiff = false) + + sol = solve(prob, + alg, verbose = false, dt = 1 / 100.0f0, + maxiters = 3000, abstol = 1.0f-10) + + @test ground_sol(0:(1 / 100):(pi / 2))≈sol atol=0.4 +end + +@testset "WeightedIntervalTraining" begin + function example2(du, u, p, t) + du[1] = u[1] - t + du[2] = u[2] - t + nothing + end + M = [0.0 0 + 0 1] + u₀ = [0.0, 0.0] + du₀ = [0.0, 0.0] + tspan = (0.0f0, pi / 2.0f0) + f = ODEFunction(example2, mass_matrix = M) + prob_mm = ODEProblem(f, u₀, tspan) + ground_sol = solve(prob_mm, Rodas5(), reltol = 1e-8, abstol = 1e-8) + + example = (du, u, p, t) -> [u[1] - t - du[1], u[2] - t - du[2]] + differential_vars = [false, true] + prob = DAEProblem(example, du₀, u₀, tspan; differential_vars = differential_vars) + chain = Lux.Chain(Lux.Dense(1, 15, Lux.σ), Lux.Dense(15, 2)) + opt = OptimizationOptimisers.Adam(0.1) + weights = [0.7, 0.2, 0.1] + points = 200 + alg = NeuralPDE.NNDAE(chain, OptimizationOptimisers.Adam(0.1), strategy = NeuralPDE.WeightedIntervalTraining(weights, points); autodiff = false) sol = solve(prob, alg, verbose = false, dt = 1 / 100.0f0, From 2f9db684af12abb78512d6a746396492115f6197 Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Sun, 5 May 2024 23:39:26 -0500 Subject: [PATCH 15/24] Formatted Code --- src/dae_solve.jl | 14 +++++++------- test/NNDAE_tests.jl | 6 ++---- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/src/dae_solve.jl b/src/dae_solve.jl index df18ad08d9..5c2936c4d3 100644 --- a/src/dae_solve.jl +++ b/src/dae_solve.jl @@ -79,19 +79,20 @@ function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, return loss end -function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Bool, tspan, p, - differential_vars::AbstractVector) +function generate_loss( + strategy::WeightedIntervalTraining, phi, f, autodiff::Bool, tspan, p, + differential_vars::AbstractVector) autodiff && throw(ArgumentError("autodiff not supported for GridTraining.")) minT = tspan[1] maxT = tspan[2] - + weights = strategy.weights ./ sum(strategy.weights) N = length(weights) points = strategy.points - difference = (maxT-minT)/N - + difference = (maxT - minT) / N + data = Float64[] for (index, item) in enumerate(weights) temp_data = rand(1, trunc(Int, points * item)) .* difference .+ minT .+ @@ -102,12 +103,11 @@ function generate_loss(strategy::WeightedIntervalTraining, phi, f, autodiff::Boo ts = data function loss(θ, _) - sum(inner_loss(phi, f, autodiff, ts, θ, p, differential_vars)) + sum(inner_loss(phi, f, autodiff, ts, θ, p, differential_vars)) end return loss end - function SciMLBase.__solve(prob::SciMLBase.AbstractDAEProblem, alg::NNDAE, args...; diff --git a/test/NNDAE_tests.jl b/test/NNDAE_tests.jl index e5930ec063..1f9a31438d 100644 --- a/test/NNDAE_tests.jl +++ b/test/NNDAE_tests.jl @@ -85,7 +85,8 @@ end opt = OptimizationOptimisers.Adam(0.1) weights = [0.7, 0.2, 0.1] points = 200 - alg = NeuralPDE.NNDAE(chain, OptimizationOptimisers.Adam(0.1), strategy = NeuralPDE.WeightedIntervalTraining(weights, points); autodiff = false) + alg = NeuralPDE.NNDAE(chain, OptimizationOptimisers.Adam(0.1), + strategy = NeuralPDE.WeightedIntervalTraining(weights, points); autodiff = false) sol = solve(prob, alg, verbose = false, dt = 1 / 100.0f0, @@ -93,6 +94,3 @@ end @test ground_sol(0:(1 / 100):(pi / 2))≈sol atol=0.4 end - - - From c2453d2559d70b9ef289432986e9208d2457bafe Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Wed, 8 May 2024 15:18:34 -0500 Subject: [PATCH 16/24] Added in failed Quadature training --- src/dae_solve.jl | 29 ++++++++++++++++++++++++++++- test/NNDAE_tests.jl | 28 ++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 1 deletion(-) diff --git a/src/dae_solve.jl b/src/dae_solve.jl index 5c2936c4d3..b2ab39c4ca 100644 --- a/src/dae_solve.jl +++ b/src/dae_solve.jl @@ -69,6 +69,11 @@ function inner_loss(phi::ODEPhi{C, T, U}, f, autodiff::Bool, t::AbstractVector, sum(abs2, loss) / length(t) end +function inner_loss(phi::ODEPhi{C, T, U}, f, autodiff::Bool, t::Number, θ, + p, differential_vars::AbstractVector) where {C, T, U} + sum(abs2, dfdx(phi, t, θ, autodiff,differential_vars) .- f(phi(t, θ), p_, t)) +end + function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, differential_vars::AbstractVector) ts = tspan[1]:(strategy.dx):tspan[2] @@ -108,6 +113,25 @@ function generate_loss( return loss end + +function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tspan, p, + differential_vars::AbstractVector) + integrand(t::Number, θ) = abs2(inner_loss(phi, f, autodiff, t, θ, p, differential_vars)) + + function integrand(ts, θ) + sum(abs2, inner_loss(phi, f, autodiff, ts, θ, p, differential_vars)) + end + + function loss(θ, _) + intf = BatchIntegralFunction(integrand, max_batch = strategy.batch) + intprob = IntegralProblem(intf, (tspan[1], tspan[2]), θ) + sol = solve(intprob, strategy.quadrature_alg; abstol = strategy.abstol, + reltol = strategy.reltol, maxiters = strategy.maxiters) + sol.u + end + return loss +end + function SciMLBase.__solve(prob::SciMLBase.AbstractDAEProblem, alg::NNDAE, args...; @@ -165,7 +189,10 @@ function SciMLBase.__solve(prob::SciMLBase.AbstractDAEProblem, if dt !== nothing GridTraining(dt) else - error("dt is not defined") + QuadratureTraining(; quadrature_alg = QuadGKJL(), + reltol = convert(eltype(u0), reltol), + abstol = convert(eltype(u0), abstol), maxiters = maxiters, + batch = 0) end else alg.strategy diff --git a/test/NNDAE_tests.jl b/test/NNDAE_tests.jl index 1f9a31438d..6a12d90d81 100644 --- a/test/NNDAE_tests.jl +++ b/test/NNDAE_tests.jl @@ -5,6 +5,7 @@ import Lux, OptimizationOptimisers, OptimizationOptimJL Random.seed!(100) +#= @testset "Example 1" begin function example1(du, u, p, t) du[1] = cos(2pi * t) @@ -94,3 +95,30 @@ end @test ground_sol(0:(1 / 100):(pi / 2))≈sol atol=0.4 end + + +=# +@testset "QuadratureTraining" begin + function example2(du, u, p, t) + du[1] = u[1] - t + du[2] = u[2] - t + nothing + end + M = [0.0 0 + 0 1] + u₀ = [0.0, 0.0] + du₀ = [0.0, 0.0] + tspan = (0.0f0, pi / 2.0f0) + f = ODEFunction(example2, mass_matrix = M) + prob_mm = ODEProblem(f, u₀, tspan) + ground_sol = solve(prob_mm, Rodas5(), reltol = 1e-8, abstol = 1e-8) + + example = (du, u, p, t) -> [u[1] - t - du[1], u[2] - t - du[2]] + differential_vars = [false, true] + prob = DAEProblem(example, du₀, u₀, tspan; differential_vars = differential_vars) + chain = Lux.Chain(Lux.Dense(1, 15, Lux.σ), Lux.Dense(15, 2)) + opt = OptimizationOptimisers.Adam(0.1) + alg = NeuralPDE.NNDAE(chain, opt; autodiff = false) + sol = solve(prob,alg, verbose = false, maxiters = 3000, abstol = 1.0f-10) + @test ground_sol(0:(1 / 100):(pi / 2))≈sol atol=0.4 +end From 7c6c2bf397b03f175310931a5811a94cb9c9160c Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Fri, 17 May 2024 00:02:57 +0800 Subject: [PATCH 17/24] trying to workout quadature training strategy. --- src/dae_solve.jl | 25 ++++++++++++++++++++++--- test/NNDAE_tests.jl | 5 +++-- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/src/dae_solve.jl b/src/dae_solve.jl index b2ab39c4ca..dbae3d50fc 100644 --- a/src/dae_solve.jl +++ b/src/dae_solve.jl @@ -47,6 +47,25 @@ function NNDAE(chain, opt, init_params = nothing; strategy = nothing, autodiff = NNDAE(chain, opt, init_params, autodiff, strategy, kwargs) end + +function dfdx(phi::ODEPhi{C, T, U}, t::Number, θ, + autodiff::Bool, differential_vars::AbstractVector) where {C, T, U <: Number} + if autodiff + ForwardDiff.derivative(t -> phi(t, θ), t) + else + (phi(t + sqrt(eps(typeof(t))), θ) - phi(t, θ)) / sqrt(eps(typeof(t))) + end +end + +function dfdx(phi::ODEPhi{C, T, U}, t::Number, θ, + autodiff::Bool,differential_vars::AbstractVector) where {C, T, U <: AbstractVector} + if autodiff + ForwardDiff.jacobian(t -> phi(t, θ), t) + else + (phi(t + sqrt(eps(typeof(t))), θ) - phi(t, θ)) / sqrt(eps(typeof(t))) + end +end + function dfdx(phi::ODEPhi, t::AbstractVector, θ, autodiff::Bool, differential_vars::AbstractVector) if autodiff @@ -71,7 +90,7 @@ end function inner_loss(phi::ODEPhi{C, T, U}, f, autodiff::Bool, t::Number, θ, p, differential_vars::AbstractVector) where {C, T, U} - sum(abs2, dfdx(phi, t, θ, autodiff,differential_vars) .- f(phi(t, θ), p_, t)) + sum(abs2, dfdx(phi, t, θ, autodiff,differential_vars) .- f(phi(t, θ), t)) end function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, @@ -119,8 +138,8 @@ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tsp integrand(t::Number, θ) = abs2(inner_loss(phi, f, autodiff, t, θ, p, differential_vars)) function integrand(ts, θ) - sum(abs2, inner_loss(phi, f, autodiff, ts, θ, p, differential_vars)) - end + [sum(abs2, inner_loss(phi, f, autodiff, t, θ, p, differential_vars)) for t in ts] +end function loss(θ, _) intf = BatchIntegralFunction(integrand, max_batch = strategy.batch) diff --git a/test/NNDAE_tests.jl b/test/NNDAE_tests.jl index 6a12d90d81..24dac3bbbd 100644 --- a/test/NNDAE_tests.jl +++ b/test/NNDAE_tests.jl @@ -4,7 +4,6 @@ using OrdinaryDiffEq, Statistics import Lux, OptimizationOptimisers, OptimizationOptimJL Random.seed!(100) - #= @testset "Example 1" begin function example1(du, u, p, t) @@ -96,7 +95,6 @@ end @test ground_sol(0:(1 / 100):(pi / 2))≈sol atol=0.4 end - =# @testset "QuadratureTraining" begin function example2(du, u, p, t) @@ -120,5 +118,8 @@ end opt = OptimizationOptimisers.Adam(0.1) alg = NeuralPDE.NNDAE(chain, opt; autodiff = false) sol = solve(prob,alg, verbose = false, maxiters = 3000, abstol = 1.0f-10) + print("Everything Works\n") + #= @test ground_sol(0:(1 / 100):(pi / 2))≈sol atol=0.4 + =# end From 70e0657217d19f67d08f6c87b6de5ad14d924016 Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Fri, 17 May 2024 00:28:25 +0800 Subject: [PATCH 18/24] Stochastic training passes --- src/dae_solve.jl | 11 +++++++++++ test/NNDAE_tests.jl | 37 +++++++++++++++++++++++++++++++++---- 2 files changed, 44 insertions(+), 4 deletions(-) diff --git a/src/dae_solve.jl b/src/dae_solve.jl index dbae3d50fc..7614e9a966 100644 --- a/src/dae_solve.jl +++ b/src/dae_solve.jl @@ -151,6 +151,17 @@ end return loss end +function generate_loss(strategy::StochasticTraining, phi, f, autodiff::Bool, tspan, p, + differential_vars::AbstractVector) + autodiff && throw(ArgumentError("autodiff not supported for StochasticTraining.")) + function loss(θ, _) + ts = adapt(parameterless_type(θ), + [(tspan[2] - tspan[1]) * rand() + tspan[1] for i in 1:(strategy.points)]) + sum(inner_loss(phi, f, autodiff, ts, θ, p, differential_vars)) + end + return loss +end + function SciMLBase.__solve(prob::SciMLBase.AbstractDAEProblem, alg::NNDAE, args...; diff --git a/test/NNDAE_tests.jl b/test/NNDAE_tests.jl index 24dac3bbbd..3ac37117c4 100644 --- a/test/NNDAE_tests.jl +++ b/test/NNDAE_tests.jl @@ -4,7 +4,7 @@ using OrdinaryDiffEq, Statistics import Lux, OptimizationOptimisers, OptimizationOptimJL Random.seed!(100) -#= + @testset "Example 1" begin function example1(du, u, p, t) du[1] = cos(2pi * t) @@ -95,7 +95,37 @@ end @test ground_sol(0:(1 / 100):(pi / 2))≈sol atol=0.4 end -=# +@testset "StochasticTraining" begin + function example2(du, u, p, t) + du[1] = u[1] - t + du[2] = u[2] - t + nothing + end + M = [0.0 0 + 0 1] + u₀ = [0.0, 0.0] + du₀ = [0.0, 0.0] + tspan = (0.0f0, pi / 2.0f0) + f = ODEFunction(example2, mass_matrix = M) + prob_mm = ODEProblem(f, u₀, tspan) + ground_sol = solve(prob_mm, Rodas5(), reltol = 1e-8, abstol = 1e-8) + + example = (du, u, p, t) -> [u[1] - t - du[1], u[2] - t - du[2]] + differential_vars = [false, true] + prob = DAEProblem(example, du₀, u₀, tspan; differential_vars = differential_vars) + chain = Lux.Chain(Lux.Dense(1, 15, Lux.σ), Lux.Dense(15, 2)) + opt = OptimizationOptimisers.Adam(0.1) + alg = NeuralPDE.NNDAE(chain, OptimizationOptimisers.Adam(0.1), + strategy = NeuralPDE.StochasticTraining(1000); autodiff = false) + sol = solve(prob, + alg, verbose = false, dt = 1 / 100.0f0, + maxiters = 3000, abstol = 1.0f-10) + + @test ground_sol(0:(1 / 100):(pi / 2))≈sol atol=0.4 +end + + +#= @testset "QuadratureTraining" begin function example2(du, u, p, t) du[1] = u[1] - t @@ -119,7 +149,6 @@ end alg = NeuralPDE.NNDAE(chain, opt; autodiff = false) sol = solve(prob,alg, verbose = false, maxiters = 3000, abstol = 1.0f-10) print("Everything Works\n") - #= @test ground_sol(0:(1 / 100):(pi / 2))≈sol atol=0.4 - =# end +=# From 0098c6de4286c8ab904993ae0857db277f58a0c6 Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Sun, 26 May 2024 20:48:56 +0800 Subject: [PATCH 19/24] updates on NNDAE_tests.jl --- src/dae_solve.jl | 10 +++++++++- test/NNDAE_tests.jl | 4 +--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/src/dae_solve.jl b/src/dae_solve.jl index 7614e9a966..6fea0f2dd7 100644 --- a/src/dae_solve.jl +++ b/src/dae_solve.jl @@ -88,10 +88,18 @@ function inner_loss(phi::ODEPhi{C, T, U}, f, autodiff::Bool, t::AbstractVector, sum(abs2, loss) / length(t) end +#= function inner_loss(phi::ODEPhi{C, T, U}, f, autodiff::Bool, t::Number, θ, p, differential_vars::AbstractVector) where {C, T, U} sum(abs2, dfdx(phi, t, θ, autodiff,differential_vars) .- f(phi(t, θ), t)) end +=# + +function inner_loss(phi::ODEPhi{C, T, U}, f, autodiff::Bool, t::Number, θ, + p, differential_vars::AbstractVector) where {C, T, U} + dphi = dfdx(phi, t, θ, autodiff,differential_vars) + sum(abs2, f(dphi, phi(t, θ), p, t)) +end function generate_loss(strategy::GridTraining, phi, f, autodiff::Bool, tspan, p, differential_vars::AbstractVector) @@ -139,7 +147,7 @@ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tsp function integrand(ts, θ) [sum(abs2, inner_loss(phi, f, autodiff, t, θ, p, differential_vars)) for t in ts] -end + end function loss(θ, _) intf = BatchIntegralFunction(integrand, max_batch = strategy.batch) diff --git a/test/NNDAE_tests.jl b/test/NNDAE_tests.jl index 3ac37117c4..a14ac2adcf 100644 --- a/test/NNDAE_tests.jl +++ b/test/NNDAE_tests.jl @@ -125,7 +125,6 @@ end end -#= @testset "QuadratureTraining" begin function example2(du, u, p, t) du[1] = u[1] - t @@ -149,6 +148,5 @@ end alg = NeuralPDE.NNDAE(chain, opt; autodiff = false) sol = solve(prob,alg, verbose = false, maxiters = 3000, abstol = 1.0f-10) print("Everything Works\n") - @test ground_sol(0:(1 / 100):(pi / 2))≈sol atol=0.4 + @test ground_sol(0:(pi / 200):(pi / 2))≈sol atol=0.4 end -=# From 3e9473e5f8bd70cd1ea8254a6366ddd365754281 Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Sun, 26 May 2024 20:54:14 +0800 Subject: [PATCH 20/24] Updates --- test/NNDAE_tests.jl | 1 - 1 file changed, 1 deletion(-) diff --git a/test/NNDAE_tests.jl b/test/NNDAE_tests.jl index a14ac2adcf..a43bbe3fc3 100644 --- a/test/NNDAE_tests.jl +++ b/test/NNDAE_tests.jl @@ -147,6 +147,5 @@ end opt = OptimizationOptimisers.Adam(0.1) alg = NeuralPDE.NNDAE(chain, opt; autodiff = false) sol = solve(prob,alg, verbose = false, maxiters = 3000, abstol = 1.0f-10) - print("Everything Works\n") @test ground_sol(0:(pi / 200):(pi / 2))≈sol atol=0.4 end From b00c8cfc05d1b14cc4b4a2bbccd3be9444b667c4 Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Tue, 4 Jun 2024 13:06:48 +0800 Subject: [PATCH 21/24] removing empty line --- test/NNDAE_tests.jl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/NNDAE_tests.jl b/test/NNDAE_tests.jl index a43bbe3fc3..361fab4075 100644 --- a/test/NNDAE_tests.jl +++ b/test/NNDAE_tests.jl @@ -124,7 +124,6 @@ end @test ground_sol(0:(1 / 100):(pi / 2))≈sol atol=0.4 end - @testset "QuadratureTraining" begin function example2(du, u, p, t) du[1] = u[1] - t @@ -146,6 +145,6 @@ end chain = Lux.Chain(Lux.Dense(1, 15, Lux.σ), Lux.Dense(15, 2)) opt = OptimizationOptimisers.Adam(0.1) alg = NeuralPDE.NNDAE(chain, opt; autodiff = false) - sol = solve(prob,alg, verbose = false, maxiters = 3000, abstol = 1.0f-10) + sol = solve(prob,alg, verbose = false, maxiters = 10000, abstol = 1.0f-10) @test ground_sol(0:(pi / 200):(pi / 2))≈sol atol=0.4 end From a4e2877e1094b46e692d72e591f09a4f8597468e Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Fri, 7 Jun 2024 09:33:45 +0800 Subject: [PATCH 22/24] changes to quadrature training --- test/NNDAE_tests.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/NNDAE_tests.jl b/test/NNDAE_tests.jl index 361fab4075..64efc04e9f 100644 --- a/test/NNDAE_tests.jl +++ b/test/NNDAE_tests.jl @@ -144,7 +144,8 @@ end prob = DAEProblem(example, du₀, u₀, tspan; differential_vars = differential_vars) chain = Lux.Chain(Lux.Dense(1, 15, Lux.σ), Lux.Dense(15, 2)) opt = OptimizationOptimisers.Adam(0.1) - alg = NeuralPDE.NNDAE(chain, opt; autodiff = false) + alg = NeuralPDE.NNDAE(chain, OptimizationOptimisers.Adam(0.1); autodiff = false) sol = solve(prob,alg, verbose = false, maxiters = 10000, abstol = 1.0f-10) @test ground_sol(0:(pi / 200):(pi / 2))≈sol atol=0.4 end + From 41dbf62fb8460bbeee3b2de2f9ac4928649309e2 Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Mon, 10 Jun 2024 13:54:15 +0800 Subject: [PATCH 23/24] Added Quadrature training --- test/NNDAE_tests.jl | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/test/NNDAE_tests.jl b/test/NNDAE_tests.jl index 64efc04e9f..a34ce3450b 100644 --- a/test/NNDAE_tests.jl +++ b/test/NNDAE_tests.jl @@ -143,9 +143,10 @@ end differential_vars = [false, true] prob = DAEProblem(example, du₀, u₀, tspan; differential_vars = differential_vars) chain = Lux.Chain(Lux.Dense(1, 15, Lux.σ), Lux.Dense(15, 2)) - opt = OptimizationOptimisers.Adam(0.1) - alg = NeuralPDE.NNDAE(chain, OptimizationOptimisers.Adam(0.1); autodiff = false) - sol = solve(prob,alg, verbose = false, maxiters = 10000, abstol = 1.0f-10) - @test ground_sol(0:(pi / 200):(pi / 2))≈sol atol=0.4 + opt = OptimizationOptimisers.RMSProp(0.1) + alg = NeuralPDE.NNDAE(chain, OptimizationOptimisers.RMSProp(0.1); autodiff = false) + sol = solve(prob,alg, verbose = false, maxiters = 6000, abstol = 1.0f-10) + #print(ground_sol(0:(pi / 200):(pi / 2)) - sol(0:(pi / 200):(pi / 2))) + @test ground_sol(0:(pi / 200):(pi / 2))≈sol atol=3.5 end From 9c72fee89497214b964fabb25eb3bfa16c22a38b Mon Sep 17 00:00:00 2001 From: hippyhippohops Date: Mon, 8 Jul 2024 15:10:11 +0800 Subject: [PATCH 24/24] Changing to float64 --- test/NNDAE_tests.jl | 71 ++++++++++++++++++++++----------------------- 1 file changed, 34 insertions(+), 37 deletions(-) diff --git a/test/NNDAE_tests.jl b/test/NNDAE_tests.jl index a34ce3450b..16ca01e344 100644 --- a/test/NNDAE_tests.jl +++ b/test/NNDAE_tests.jl @@ -16,7 +16,7 @@ Random.seed!(100) M = [1.0 0 0 0] f = ODEFunction(example1, mass_matrix = M) - tspan = (0.0f0, 1.0f0) + tspan = (0.0, 1.0) prob_mm = ODEProblem(f, u₀, tspan) ground_sol = solve(prob_mm, Rodas5(), reltol = 1e-8, abstol = 1e-8) @@ -25,13 +25,13 @@ Random.seed!(100) differential_vars = [true, false] prob = DAEProblem(example, du₀, u₀, tspan; differential_vars = differential_vars) chain = Lux.Chain(Lux.Dense(1, 15, cos), Lux.Dense(15, 15, sin), Lux.Dense(15, 2)) - opt = OptimizationOptimisers.Adam(0.1) - alg = NeuralPDE.NNDAE(chain, opt; autodiff = false) + opt = OptimizationOptimJL.BFGS(linesearch = BackTracking()) + alg = NNDAE(chain, opt; autodiff = false) sol = solve(prob, - alg, verbose = false, dt = 1 / 100.0f0, - maxiters = 3000, abstol = 1.0f-10) - @test ground_sol(0:(1 / 100):1)≈sol atol=0.4 + alg, verbose = false, dt = 1 / 100.0, + maxiters = 3000, abstol = 1e-10) + @test reduce(hcat, ground_sol(0:(1 / 100):1).u)≈reduce(hcat, sol.u) rtol=1e-1 end @testset "Example 2" begin @@ -44,7 +44,7 @@ end 0 1] u₀ = [0.0, 0.0] du₀ = [0.0, 0.0] - tspan = (0.0f0, pi / 2.0f0) + tspan = (0.0, pi / 2.0) f = ODEFunction(example2, mass_matrix = M) prob_mm = ODEProblem(f, u₀, tspan) ground_sol = solve(prob_mm, Rodas5(), reltol = 1e-8, abstol = 1e-8) @@ -54,13 +54,13 @@ end prob = DAEProblem(example, du₀, u₀, tspan; differential_vars = differential_vars) chain = Lux.Chain(Lux.Dense(1, 15, Lux.σ), Lux.Dense(15, 2)) opt = OptimizationOptimisers.Adam(0.1) - alg = NeuralPDE.NNDAE(chain, OptimizationOptimisers.Adam(0.1); autodiff = false) + alg = NNDAE(chain, OptimizationOptimisers.Adam(0.1); autodiff = false) sol = solve(prob, - alg, verbose = false, dt = 1 / 100.0f0, - maxiters = 3000, abstol = 1.0f-10) + alg, verbose = false, dt = 1 / 100.0, + maxiters = 3000, abstol = 1e-10) - @test ground_sol(0:(1 / 100):(pi / 2))≈sol atol=0.4 + @test reduce(hcat, ground_sol(0:(1 / 100):(pi / 2.0)).u)≈reduce(hcat, sol.u) rtol=1e-2 end @testset "WeightedIntervalTraining" begin @@ -69,11 +69,11 @@ end du[2] = u[2] - t nothing end - M = [0.0 0 - 0 1] + M = [0.0 0.0 + 0.0 1.0] u₀ = [0.0, 0.0] du₀ = [0.0, 0.0] - tspan = (0.0f0, pi / 2.0f0) + tspan = (0.0, pi / 2.0) f = ODEFunction(example2, mass_matrix = M) prob_mm = ODEProblem(f, u₀, tspan) ground_sol = solve(prob_mm, Rodas5(), reltol = 1e-8, abstol = 1e-8) @@ -85,14 +85,14 @@ end opt = OptimizationOptimisers.Adam(0.1) weights = [0.7, 0.2, 0.1] points = 200 - alg = NeuralPDE.NNDAE(chain, OptimizationOptimisers.Adam(0.1), - strategy = NeuralPDE.WeightedIntervalTraining(weights, points); autodiff = false) + alg = NNDAE(chain, OptimizationOptimisers.Adam(0.1), + strategy = WeightedIntervalTraining(weights, points); autodiff = false) sol = solve(prob, - alg, verbose = false, dt = 1 / 100.0f0, - maxiters = 3000, abstol = 1.0f-10) + alg, verbose = false, dt = 1 / 100.0, + maxiters = 3000, abstol = 1e-10) - @test ground_sol(0:(1 / 100):(pi / 2))≈sol atol=0.4 + @test reduce(hcat, ground_sol(0:(1 / 100):(pi / 2.0)).u)≈reduce(hcat, sol.u) rtol=1e-2 end @testset "StochasticTraining" begin @@ -101,11 +101,11 @@ end du[2] = u[2] - t nothing end - M = [0.0 0 - 0 1] + M = [0.0 0.0 + 0.0 1.0] u₀ = [0.0, 0.0] du₀ = [0.0, 0.0] - tspan = (0.0f0, pi / 2.0f0) + tspan = (0.0, pi / 2.0) f = ODEFunction(example2, mass_matrix = M) prob_mm = ODEProblem(f, u₀, tspan) ground_sol = solve(prob_mm, Rodas5(), reltol = 1e-8, abstol = 1e-8) @@ -117,11 +117,10 @@ end opt = OptimizationOptimisers.Adam(0.1) alg = NeuralPDE.NNDAE(chain, OptimizationOptimisers.Adam(0.1), strategy = NeuralPDE.StochasticTraining(1000); autodiff = false) - sol = solve(prob, - alg, verbose = false, dt = 1 / 100.0f0, - maxiters = 3000, abstol = 1.0f-10) - - @test ground_sol(0:(1 / 100):(pi / 2))≈sol atol=0.4 + sol = solve(prob, + alg, verbose = false, dt = 1 / 100.0, + maxiters = 3000, abstol = 1e-10) + @test reduce(hcat, ground_sol(0:(1 / 100):(pi / 2.0)).u)≈reduce(hcat, sol.u) rtol=1e-2 end @testset "QuadratureTraining" begin @@ -130,11 +129,11 @@ end du[2] = u[2] - t nothing end - M = [0.0 0 - 0 1] + M = [0.0 0.0 + 0.0 1.0] u₀ = [0.0, 0.0] du₀ = [0.0, 0.0] - tspan = (0.0f0, pi / 2.0f0) + tspan = (0.0, pi / 2.0) f = ODEFunction(example2, mass_matrix = M) prob_mm = ODEProblem(f, u₀, tspan) ground_sol = solve(prob_mm, Rodas5(), reltol = 1e-8, abstol = 1e-8) @@ -143,10 +142,8 @@ end differential_vars = [false, true] prob = DAEProblem(example, du₀, u₀, tspan; differential_vars = differential_vars) chain = Lux.Chain(Lux.Dense(1, 15, Lux.σ), Lux.Dense(15, 2)) - opt = OptimizationOptimisers.RMSProp(0.1) - alg = NeuralPDE.NNDAE(chain, OptimizationOptimisers.RMSProp(0.1); autodiff = false) - sol = solve(prob,alg, verbose = false, maxiters = 6000, abstol = 1.0f-10) - #print(ground_sol(0:(pi / 200):(pi / 2)) - sol(0:(pi / 200):(pi / 2))) - @test ground_sol(0:(pi / 200):(pi / 2))≈sol atol=3.5 -end - + opt = OptimizationOptimJL.BFGS(linesearch = BackTracking()) + alg = NeuralPDE.NNDAE(chain, opt; autodiff = false) + sol = solve(prob, alg, verbose = true, maxiters = 6000, abstol = 1e-10, dt = 1/100.0) + @test reduce(hcat, ground_sol(0:(1 / 100):(pi / 2.0)).u)≈reduce(hcat, sol.u) rtol=1e-2 +end \ No newline at end of file