From 08bd81a861030b6a85b48c40fdad1804e0faba5a Mon Sep 17 00:00:00 2001 From: Sathvik Bhagavan Date: Mon, 4 Mar 2024 04:17:17 +0000 Subject: [PATCH] test: clean up NeuralAdapter tests and use res.u instead of res.minimizer --- test/IDE_tests.jl | 12 ++++++------ test/NNPDE_tests.jl | 14 +++++++------- test/NNPDE_tests_gpu_Lux.jl | 14 +++++++------- test/adaptive_loss_tests.jl | 2 +- test/additional_loss_tests.jl | 12 ++++++------ test/direct_function_tests.jl | 10 +++++----- test/neural_adapter_tests.jl | 26 ++++++++------------------ 7 files changed, 40 insertions(+), 50 deletions(-) diff --git a/test/IDE_tests.jl b/test/IDE_tests.jl index 7fcb15604a..cb3f8a3023 100644 --- a/test/IDE_tests.jl +++ b/test/IDE_tests.jl @@ -30,7 +30,7 @@ end phi = discretization.phi analytic_sol_func(t) = 1 / 2 * (exp(-t)) * (sin(2 * t)) u_real = [analytic_sol_func(t) for t in ts] - u_predict = [first(phi([t], res.minimizer)) for t in ts] + u_predict = [first(phi([t], res.u)) for t in ts] @test Flux.mse(u_real, u_predict) < 0.01 end @@ -54,7 +54,7 @@ eq = Ix(u(x) * cos(x)) ~ (x^3) / 3 maxiters = 200) xs = [infimum(d.domain):0.01:supremum(d.domain) for d in domains][1] phi = discretization.phi - u_predict = [first(phi([x], res.minimizer)) for x in xs] + u_predict = [first(phi([x], res.u)) for x in xs] u_real = [x^2 / cos(x) for x in xs] @test Flux.mse(u_real, u_predict) < 0.001 end @@ -78,7 +78,7 @@ end ys = 0.00:0.01:1.00 phi = discretization.phi u_real = collect(1 - x^2 - y^2 for y in ys, x in xs); - u_predict = collect(Array(phi([x, y], res.minimizer))[1] for y in ys, x in xs); + u_predict = collect(Array(phi([x, y], res.u))[1] for y in ys, x in xs); @test Flux.mse(u_real, u_predict) < 0.001 end @@ -101,7 +101,7 @@ end ys = 0.00:0.01:1.00 phi = discretization.phi u_real = collect(x + y^2 for y in ys, x in xs); - u_predict = collect(Array(phi([x, y], res.minimizer))[1] for y in ys, x in xs); + u_predict = collect(Array(phi([x, y], res.u))[1] for y in ys, x in xs); @test Flux.mse(u_real, u_predict) < 0.01 end @@ -144,7 +144,7 @@ end res = solve(prob, OptimizationOptimJL.BFGS(); callback = callback, maxiters = 200) xs = [infimum(d.domain):0.01:supremum(d.domain) for d in domains][1] phi = discretization.phi - u_predict = [first(phi([x], res.minimizer)) for x in xs] + u_predict = [first(phi([x], res.u)) for x in xs] u_real = [1 / x^2 for x in xs] @test u_real≈u_predict rtol=10^-2 end @@ -163,7 +163,7 @@ end res = solve(prob, OptimizationOptimJL.BFGS(); callback = callback, maxiters = 300) xs = [infimum(d.domain):0.01:supremum(d.domain) for d in domains][1] phi = discretization.phi - u_predict = [first(phi([x], res.minimizer)) for x in xs] + u_predict = [first(phi([x], res.u)) for x in xs] u_real = [1 / x^2 for x in xs] @test u_real≈u_predict rtol=10^-2 end diff --git a/test/NNPDE_tests.jl b/test/NNPDE_tests.jl index 5602096b40..be4b3152dd 100644 --- a/test/NNPDE_tests.jl +++ b/test/NNPDE_tests.jl @@ -40,15 +40,15 @@ function test_ode(strategy_) prob = discretize(pde_system, discretization) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.1); maxiters = 1000) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.01); maxiters = 500) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.001); maxiters = 500) phi = discretization.phi analytic_sol_func(t) = exp(-(t^2) / 2) / (1 + t + t^3) + t^2 ts = [infimum(d.domain):0.01:supremum(d.domain) for d in domains][1] u_real = [analytic_sol_func(t) for t in ts] - u_predict = [first(phi(t, res.minimizer)) for t in ts] + u_predict = [first(phi(t, res.u)) for t in ts] @test u_predict≈u_real atol=0.1 end @@ -183,7 +183,7 @@ function test_2d_poisson_equation(chain_, strategy_) xs, ys = [infimum(d.domain):0.01:supremum(d.domain) for d in domains] analytic_sol_func(x, y) = (sin(pi * x) * sin(pi * y)) / (2pi^2) - u_predict = reshape([first(phi([x, y], res.minimizer)) for x in xs for y in ys], + u_predict = reshape([first(phi([x, y], res.u)) for x in xs for y in ys], (length(xs), length(ys))) u_real = reshape([analytic_sol_func(x, y) for x in xs for y in ys], (length(xs), length(ys))) @@ -431,14 +431,14 @@ end @named pde_system = PDESystem(eq, bcs, domains, [θ], [u]) prob = discretize(pde_system, discretization) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.1); maxiters = 1000) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.01); maxiters = 500) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.001); maxiters = 500) phi = discretization.phi analytic_sol_func(t) = exp(-(t^2) / 2) / (1 + t + t^3) + t^2 ts = [infimum(d.domain):0.01:supremum(d.domain) for d in domains][1] u_real = [analytic_sol_func(t) for t in ts] - u_predict = [first(phi(t, res.minimizer)) for t in ts] + u_predict = [first(phi(t, res.u)) for t in ts] @test u_predict≈u_real atol=0.1 end \ No newline at end of file diff --git a/test/NNPDE_tests_gpu_Lux.jl b/test/NNPDE_tests_gpu_Lux.jl index b3123a30cd..01e63a1aa5 100644 --- a/test/NNPDE_tests_gpu_Lux.jl +++ b/test/NNPDE_tests_gpu_Lux.jl @@ -52,7 +52,7 @@ const gpud = gpu_device() analytic_sol_func(t) = exp(-(t^2) / 2) / (1 + t + t^3) + t^2 ts = [infimum(d.domain):(dt / 10):supremum(d.domain) for d in domains][1] u_real = [analytic_sol_func(t) for t in ts] - u_predict = [first(Array(phi([t], res.minimizer))) for t in ts] + u_predict = [first(Array(phi([t], res.u))) for t in ts] @test u_predict≈u_real atol=0.2 end @@ -86,12 +86,12 @@ end discretization = PhysicsInformedNN(chain, strategy; init_params = ps) prob = discretize(pdesys, discretization) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.01); maxiters = 1000) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.001); maxiters = 1000) phi = discretization.phi u_exact = (t, x) -> exp.(-t) * cos.(x) ts, xs = [infimum(d.domain):0.01:supremum(d.domain) for d in domains] - u_predict = reshape([first(Array(phi([t, x], res.minimizer))) for t in ts for x in xs], + u_predict = reshape([first(Array(phi([t, x], res.u))) for t in ts for x in xs], (length(ts), length(xs))) u_real = reshape([u_exact(t, x) for t in ts for x in xs], (length(ts), length(xs))) diff_u = abs.(u_predict .- u_real) @@ -130,12 +130,12 @@ end discretization = PhysicsInformedNN(chain, strategy; init_params = ps) prob = discretize(pdesys, discretization) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.1); maxiters = 2000) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.01); maxiters = 2000) phi = discretization.phi u_exact = (t, x) -> exp(-t) * cos(x) ts, xs = [infimum(d.domain):0.01:supremum(d.domain) for d in domains] - u_predict = reshape([first(Array(phi([t, x], res.minimizer))) for t in ts for x in xs], + u_predict = reshape([first(Array(phi([t, x], res.u))) for t in ts for x in xs], (length(ts), length(xs))) u_real = reshape([u_exact(t, x) for t in ts for x in xs], (length(ts), length(xs))) diff_u = abs.(u_predict .- u_real) @@ -184,12 +184,12 @@ end @named pde_system = PDESystem(eq, bcs, domains, [t, x, y], [u(t, x, y)]) prob = discretize(pde_system, discretization) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.01); maxiters = 2500) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.001); maxiters = 2500) phi = discretization.phi ts, xs, ys = [infimum(d.domain):0.1:supremum(d.domain) for d in domains] u_real = [analytic_sol_func(t, x, y) for t in ts for x in xs for y in ys] - u_predict = [first(Array(phi([t, x, y], res.minimizer))) for t in ts for x in xs + u_predict = [first(Array(phi([t, x, y], res.u))) for t in ts for x in xs for y in ys] @test u_predict≈u_real rtol=0.2 diff --git a/test/adaptive_loss_tests.jl b/test/adaptive_loss_tests.jl index 72c0d78ab2..8180a6895d 100644 --- a/test/adaptive_loss_tests.jl +++ b/test/adaptive_loss_tests.jl @@ -60,7 +60,7 @@ function test_2d_poisson_equation_adaptive_loss(adaptive_loss; seed = 60, maxite return false end res = solve(prob, OptimizationOptimisers.Adam(0.03); maxiters = maxiters, callback = callback) - u_predict = reshape([first(phi([x, y], res.minimizer)) for x in xs for y in ys], + u_predict = reshape([first(phi([x, y], res.u)) for x in xs for y in ys], (length(xs), length(ys))) diff_u = abs.(u_predict .- u_real) total_diff = sum(diff_u) diff --git a/test/additional_loss_tests.jl b/test/additional_loss_tests.jl index be72ed7859..4fdc0bc7d0 100644 --- a/test/additional_loss_tests.jl +++ b/test/additional_loss_tests.jl @@ -43,7 +43,7 @@ using ComponentArrays function inner_f(x, θ) dx * phi(x, θ) .- 1 end - prob1 = IntegralProblem(inner_f, lb, ub, θ) + prob1 = IntegralProblem(inner_f, (lb, ub), θ) norm2 = solve(prob1, HCubatureJL(), reltol = 1e-8, abstol = 1e-8, maxiters = 10) abs(norm2[1]) end @@ -63,7 +63,7 @@ using ComponentArrays return false end res = solve(prob, OptimizationOptimJL.LBFGS(), maxiters = 400, callback = cb_) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = solve(prob, OptimizationOptimJL.BFGS(), maxiters = 2000, callback = cb_) C = 142.88418699042 analytic_sol_func(x) = C * exp((1 / (2 * _σ^2)) * (2 * α * x^2 - β * x^4)) @@ -88,7 +88,7 @@ using ComponentArrays return false end res = solve(prob, OptimizationOptimJL.LBFGS(), maxiters = 400, callback = cb_) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = solve(prob, OptimizationOptimJL.BFGS(), maxiters = 2000, callback = cb_) C = 142.88418699042 analytic_sol_func(x) = C * exp((1 / (2 * _σ^2)) * (2 * α * x^2 - β * x^4)) @@ -170,7 +170,7 @@ end Float64[]) res = solve(prob, OptimizationOptimJL.BFGS(); maxiters = 6000) - p_ = res.minimizer[(end - 2):end] + p_ = res.u[(end - 2):end] @test sum(abs2, p_[1] - 10.00) < 0.1 @test sum(abs2, p_[2] - 28.00) < 0.1 @test sum(abs2, p_[3] - (8 / 3)) < 0.1 @@ -189,7 +189,7 @@ end sym_prob = NeuralPDE.symbolic_discretize(pde_system, discretization) sym_prob.loss_functions.full_loss_function(sym_prob.flat_init_params, nothing) res = solve(prob, OptimizationOptimJL.BFGS(); maxiters = 6000) - p_ = res.minimizer[(end - 2):end] + p_ = res.u[(end - 2):end] @test sum(abs2, p_[1] - 10.00) < 0.1 @test sum(abs2, p_[2] - 28.00) < 0.1 @test sum(abs2, p_[3] - (8 / 3)) < 0.1 @@ -225,7 +225,7 @@ end phi(xs, flat_init_params) additional_loss_(phi, flat_init_params, nothing) res = solve(prob, OptimizationOptimisers.Adam(0.01), maxiters = 500) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = solve(prob, OptimizationOptimJL.BFGS(), maxiters = 500) @test phi(xs, res.u)≈aproxf_(xs) rtol=0.01 end diff --git a/test/direct_function_tests.jl b/test/direct_function_tests.jl index f128372f51..497583954a 100644 --- a/test/direct_function_tests.jl +++ b/test/direct_function_tests.jl @@ -35,7 +35,7 @@ Random.seed!(110) @named pde_system = PDESystem(eq, bc, domain, [x], [u(x)]) prob = discretize(pde_system, discretization) res = solve(prob, OptimizationOptimisers.Adam(0.05), maxiters = 1000) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = solve(prob, OptimizationOptimJL.BFGS(initial_stepnorm = 0.01), maxiters = 500) @test discretization.phi(xs', res.u)≈func(xs') rtol=0.01 end @@ -62,7 +62,7 @@ end @named pde_system = PDESystem(eq, bc, domain, [x], [u(x)]) prob = discretize(pde_system, discretization) res = solve(prob, OptimizationOptimisers.Adam(0.01), maxiters = 500) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = solve(prob, OptimizationOptimJL.BFGS(), maxiters = 1000) dx = 0.01 xs = collect(x0:dx:x_end) @@ -95,14 +95,14 @@ end symprob = NeuralPDE.symbolic_discretize(pde_system, discretization) symprob.loss_functions.full_loss_function(symprob.flat_init_params, nothing) res = solve(prob, OptimizationOptimisers.Adam(0.01), maxiters = 500) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = solve(prob, OptimizationOptimJL.BFGS(), maxiters = 1000) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = solve(prob, OptimizationOptimJL.BFGS(), maxiters = 500) phi = discretization.phi xs = collect(x0:0.1:x_end) ys = collect(y0:0.1:y_end) - u_predict = reshape([first(phi([x, y], res.minimizer)) for x in xs for y in ys], + u_predict = reshape([first(phi([x, y], res.u)) for x in xs for y in ys], (length(xs), length(ys))) u_real = reshape([func(x, y) for x in xs for y in ys], (length(xs), length(ys))) diff_u = abs.(u_predict .- u_real) diff --git a/test/neural_adapter_tests.jl b/test/neural_adapter_tests.jl index 801fecd0fe..55440a563c 100644 --- a/test/neural_adapter_tests.jl +++ b/test/neural_adapter_tests.jl @@ -8,19 +8,12 @@ using ComponentArrays using Random Random.seed!(100) -global iter = 0 - callback = function (p, l) - global iter - iter += 1 - if iter % 100 == 0 - println("Current loss at iteration $iter is: $l") - end + println("Current loss is: $l") return false end -# @testset "Example, 2D Poisson equation with Neural adapter" begin -begin +@testset "Example, 2D Poisson equation with Neural adapter" begin @parameters x y @variables u(..) Dxx = Differential(x)^2 @@ -50,7 +43,7 @@ begin @named pde_system = PDESystem(eq, bcs, domains, [x, y], [u(x, y)]) prob = NeuralPDE.discretize(pde_system, discretization) println("Poisson equation, strategy: $(nameof(typeof(quadrature_strategy)))") - @time res = solve(prob, OptimizationOptimisers.Adam(5e-3); maxiters = 10000, callback) + @time res = solve(prob, OptimizationOptimisers.Adam(5e-3); maxiters = 10000) phi = discretization.phi inner_ = 8 @@ -63,9 +56,8 @@ begin init_params2 = Float64.(ComponentArrays.ComponentArray(initp)) function loss(cord, θ) - global st ch2, st = chain2(cord, θ, st) - ch2 .- phi(cord, res.minimizer) + ch2 .- phi(cord, res.u) end grid_strategy = GridTraining(0.05) @@ -77,16 +69,14 @@ begin reses_1 = map(strategies1) do strategy_ println("Neural adapter Poisson equation, strategy: $(nameof(typeof(strategy_)))") prob_ = NeuralPDE.neural_adapter(loss, init_params2, pde_system, strategy_) - global iter = 0 - @time res_ = solve(prob_, OptimizationOptimisers.Adam(5e-3); maxiters = 10000, callback) + @time res_ = solve(prob_, OptimizationOptimisers.Adam(5e-3); maxiters = 10000) end strategies2 = [stochastic_strategy, quasirandom_strategy] reses_2 = map(strategies2) do strategy_ println("Neural adapter Poisson equation, strategy: $(nameof(typeof(strategy_)))") prob_ = NeuralPDE.neural_adapter(loss, init_params2, pde_system, strategy_) - global iter = 0 - @time res_ = solve(prob_, OptimizationOptimisers.Adam(5e-3); maxiters = 10000, callback) + @time res_ = solve(prob_, OptimizationOptimisers.Adam(5e-3); maxiters = 10000) end reses_ = [reses_1; reses_2] @@ -97,11 +87,11 @@ begin xs, ys = [infimum(d.domain):0.01:supremum(d.domain) for d in domains] analytic_sol_func(x, y) = (sin(pi * x) * sin(pi * y)) / (2pi^2) - u_predict = reshape([first(phi([x, y], res.minimizer)) for x in xs for y in ys], + u_predict = reshape([first(phi([x, y], res.u)) for x in xs for y in ys], (length(xs), length(ys))) u_predicts = map(zip(phis, reses_)) do (phi_, res_) - reshape([first(phi_([x, y], res_.minimizer)) for x in xs for y in ys], + reshape([first(phi_([x, y], res_.u)) for x in xs for y in ys], (length(xs), length(ys))) end