From a8401a4b613b02a893af398b73aa0818ecd43486 Mon Sep 17 00:00:00 2001 From: Sathvik Bhagavan Date: Mon, 4 Mar 2024 04:16:09 +0000 Subject: [PATCH 1/3] refactor: use latest API for IntegralProblem --- src/advancedHMC_MCMC.jl | 2 +- src/discretize.jl | 2 +- src/ode_solve.jl | 2 +- src/training_strategies.jl | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/advancedHMC_MCMC.jl b/src/advancedHMC_MCMC.jl index 348ed57a0d..c051d208ca 100644 --- a/src/advancedHMC_MCMC.jl +++ b/src/advancedHMC_MCMC.jl @@ -176,7 +176,7 @@ function getlogpdf(strategy::QuadratureTraining, Tar::LogTargetDensity, f, function integrand(t::Number, θ) innerdiff(Tar, f, autodiff, [t], θ, ode_params) end - intprob = IntegralProblem(integrand, tspan[1], tspan[2], θ; nout = length(Tar.prob.u0)) + intprob = IntegralProblem(integrand, (tspan[1], tspan[2]), θ; nout = length(Tar.prob.u0)) sol = solve(intprob, QuadGKJL(); abstol = strategy.abstol, reltol = strategy.reltol) sum(sol.u) end diff --git a/src/discretize.jl b/src/discretize.jl index af035980b3..9d42c7d1df 100644 --- a/src/discretize.jl +++ b/src/discretize.jl @@ -317,7 +317,7 @@ function get_numeric_integral(pinnrep::PINNRepresentation) ChainRulesCore.@ignore_derivatives @views(cord_[integrating_var_id]) .= x return integrand_func(cord_, p, phi, derivative, nothing, u, nothing) end - prob_ = IntegralProblem(integrand_, lb, ub, θ) + prob_ = IntegralProblem(integrand_, (lb, ub), θ) sol = solve(prob_, CubatureJLh(), reltol = 1e-3, abstol = 1e-3)[1] return sol diff --git a/src/ode_solve.jl b/src/ode_solve.jl index 5001d135f7..f93183d76f 100644 --- a/src/ode_solve.jl +++ b/src/ode_solve.jl @@ -232,7 +232,7 @@ function generate_loss(strategy::QuadratureTraining, phi, f, autodiff::Bool, tsp @assert batch == 0 # not implemented function loss(θ, _) - intprob = IntegralProblem(integrand, tspan[1], tspan[2], θ) + intprob = IntegralProblem(integrand, (tspan[1], tspan[2]), θ) sol = solve(intprob, QuadGKJL(); abstol = strategy.abstol, reltol = strategy.reltol) sol.u end diff --git a/src/training_strategies.jl b/src/training_strategies.jl index 0ed29fbf12..1db8780941 100644 --- a/src/training_strategies.jl +++ b/src/training_strategies.jl @@ -315,7 +315,7 @@ function get_loss_function(loss_function, lb, ub, eltypeθ, strategy::Quadrature sum(abs2, view(loss_(x, θ), 1, :), dims = 2) #./ size_x end integral_function = BatchIntegralFunction(integrand, max_batch = strategy.batch) - prob = IntegralProblem(integral_function, lb, ub, θ) + prob = IntegralProblem(integral_function, (lb, ub), θ) solve(prob, strategy.quadrature_alg, reltol = strategy.reltol, From 2cd59a3b99689b9ce9333f793ce72b178f4bc50f Mon Sep 17 00:00:00 2001 From: Sathvik Bhagavan Date: Mon, 4 Mar 2024 04:17:17 +0000 Subject: [PATCH 2/3] test: clean up NeuralAdapter tests and use res.u instead of res.minimizer --- .../test/adaptive_loss_log_tests.jl | 2 +- test/IDE_tests.jl | 12 +++--- test/NNPDE_tests.jl | 14 +++---- test/NNPDE_tests_gpu_Lux.jl | 14 +++---- test/adaptive_loss_tests.jl | 2 +- test/additional_loss_tests.jl | 12 +++--- test/direct_function_tests.jl | 10 ++--- test/neural_adapter_tests.jl | 42 +++++++------------ 8 files changed, 48 insertions(+), 60 deletions(-) diff --git a/lib/NeuralPDELogging/test/adaptive_loss_log_tests.jl b/lib/NeuralPDELogging/test/adaptive_loss_log_tests.jl index d47f80989c..1facaf0a7d 100644 --- a/lib/NeuralPDELogging/test/adaptive_loss_log_tests.jl +++ b/lib/NeuralPDELogging/test/adaptive_loss_log_tests.jl @@ -88,7 +88,7 @@ function test_2d_poisson_equation_adaptive_loss(adaptive_loss, run, outdir, hasl res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.03); maxiters = maxiters, callback = callback) - u_predict = reshape([first(phi([x, y], res.minimizer)) for x in xs for y in ys], + u_predict = reshape([first(phi([x, y], res.u)) for x in xs for y in ys], (length(xs), length(ys))) diff_u = abs.(u_predict .- u_real) total_diff = sum(diff_u) diff --git a/test/IDE_tests.jl b/test/IDE_tests.jl index 7fcb15604a..cb3f8a3023 100644 --- a/test/IDE_tests.jl +++ b/test/IDE_tests.jl @@ -30,7 +30,7 @@ end phi = discretization.phi analytic_sol_func(t) = 1 / 2 * (exp(-t)) * (sin(2 * t)) u_real = [analytic_sol_func(t) for t in ts] - u_predict = [first(phi([t], res.minimizer)) for t in ts] + u_predict = [first(phi([t], res.u)) for t in ts] @test Flux.mse(u_real, u_predict) < 0.01 end @@ -54,7 +54,7 @@ eq = Ix(u(x) * cos(x)) ~ (x^3) / 3 maxiters = 200) xs = [infimum(d.domain):0.01:supremum(d.domain) for d in domains][1] phi = discretization.phi - u_predict = [first(phi([x], res.minimizer)) for x in xs] + u_predict = [first(phi([x], res.u)) for x in xs] u_real = [x^2 / cos(x) for x in xs] @test Flux.mse(u_real, u_predict) < 0.001 end @@ -78,7 +78,7 @@ end ys = 0.00:0.01:1.00 phi = discretization.phi u_real = collect(1 - x^2 - y^2 for y in ys, x in xs); - u_predict = collect(Array(phi([x, y], res.minimizer))[1] for y in ys, x in xs); + u_predict = collect(Array(phi([x, y], res.u))[1] for y in ys, x in xs); @test Flux.mse(u_real, u_predict) < 0.001 end @@ -101,7 +101,7 @@ end ys = 0.00:0.01:1.00 phi = discretization.phi u_real = collect(x + y^2 for y in ys, x in xs); - u_predict = collect(Array(phi([x, y], res.minimizer))[1] for y in ys, x in xs); + u_predict = collect(Array(phi([x, y], res.u))[1] for y in ys, x in xs); @test Flux.mse(u_real, u_predict) < 0.01 end @@ -144,7 +144,7 @@ end res = solve(prob, OptimizationOptimJL.BFGS(); callback = callback, maxiters = 200) xs = [infimum(d.domain):0.01:supremum(d.domain) for d in domains][1] phi = discretization.phi - u_predict = [first(phi([x], res.minimizer)) for x in xs] + u_predict = [first(phi([x], res.u)) for x in xs] u_real = [1 / x^2 for x in xs] @test u_real≈u_predict rtol=10^-2 end @@ -163,7 +163,7 @@ end res = solve(prob, OptimizationOptimJL.BFGS(); callback = callback, maxiters = 300) xs = [infimum(d.domain):0.01:supremum(d.domain) for d in domains][1] phi = discretization.phi - u_predict = [first(phi([x], res.minimizer)) for x in xs] + u_predict = [first(phi([x], res.u)) for x in xs] u_real = [1 / x^2 for x in xs] @test u_real≈u_predict rtol=10^-2 end diff --git a/test/NNPDE_tests.jl b/test/NNPDE_tests.jl index 5602096b40..be4b3152dd 100644 --- a/test/NNPDE_tests.jl +++ b/test/NNPDE_tests.jl @@ -40,15 +40,15 @@ function test_ode(strategy_) prob = discretize(pde_system, discretization) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.1); maxiters = 1000) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.01); maxiters = 500) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.001); maxiters = 500) phi = discretization.phi analytic_sol_func(t) = exp(-(t^2) / 2) / (1 + t + t^3) + t^2 ts = [infimum(d.domain):0.01:supremum(d.domain) for d in domains][1] u_real = [analytic_sol_func(t) for t in ts] - u_predict = [first(phi(t, res.minimizer)) for t in ts] + u_predict = [first(phi(t, res.u)) for t in ts] @test u_predict≈u_real atol=0.1 end @@ -183,7 +183,7 @@ function test_2d_poisson_equation(chain_, strategy_) xs, ys = [infimum(d.domain):0.01:supremum(d.domain) for d in domains] analytic_sol_func(x, y) = (sin(pi * x) * sin(pi * y)) / (2pi^2) - u_predict = reshape([first(phi([x, y], res.minimizer)) for x in xs for y in ys], + u_predict = reshape([first(phi([x, y], res.u)) for x in xs for y in ys], (length(xs), length(ys))) u_real = reshape([analytic_sol_func(x, y) for x in xs for y in ys], (length(xs), length(ys))) @@ -431,14 +431,14 @@ end @named pde_system = PDESystem(eq, bcs, domains, [θ], [u]) prob = discretize(pde_system, discretization) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.1); maxiters = 1000) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.01); maxiters = 500) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.001); maxiters = 500) phi = discretization.phi analytic_sol_func(t) = exp(-(t^2) / 2) / (1 + t + t^3) + t^2 ts = [infimum(d.domain):0.01:supremum(d.domain) for d in domains][1] u_real = [analytic_sol_func(t) for t in ts] - u_predict = [first(phi(t, res.minimizer)) for t in ts] + u_predict = [first(phi(t, res.u)) for t in ts] @test u_predict≈u_real atol=0.1 end \ No newline at end of file diff --git a/test/NNPDE_tests_gpu_Lux.jl b/test/NNPDE_tests_gpu_Lux.jl index b3123a30cd..01e63a1aa5 100644 --- a/test/NNPDE_tests_gpu_Lux.jl +++ b/test/NNPDE_tests_gpu_Lux.jl @@ -52,7 +52,7 @@ const gpud = gpu_device() analytic_sol_func(t) = exp(-(t^2) / 2) / (1 + t + t^3) + t^2 ts = [infimum(d.domain):(dt / 10):supremum(d.domain) for d in domains][1] u_real = [analytic_sol_func(t) for t in ts] - u_predict = [first(Array(phi([t], res.minimizer))) for t in ts] + u_predict = [first(Array(phi([t], res.u))) for t in ts] @test u_predict≈u_real atol=0.2 end @@ -86,12 +86,12 @@ end discretization = PhysicsInformedNN(chain, strategy; init_params = ps) prob = discretize(pdesys, discretization) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.01); maxiters = 1000) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.001); maxiters = 1000) phi = discretization.phi u_exact = (t, x) -> exp.(-t) * cos.(x) ts, xs = [infimum(d.domain):0.01:supremum(d.domain) for d in domains] - u_predict = reshape([first(Array(phi([t, x], res.minimizer))) for t in ts for x in xs], + u_predict = reshape([first(Array(phi([t, x], res.u))) for t in ts for x in xs], (length(ts), length(xs))) u_real = reshape([u_exact(t, x) for t in ts for x in xs], (length(ts), length(xs))) diff_u = abs.(u_predict .- u_real) @@ -130,12 +130,12 @@ end discretization = PhysicsInformedNN(chain, strategy; init_params = ps) prob = discretize(pdesys, discretization) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.1); maxiters = 2000) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.01); maxiters = 2000) phi = discretization.phi u_exact = (t, x) -> exp(-t) * cos(x) ts, xs = [infimum(d.domain):0.01:supremum(d.domain) for d in domains] - u_predict = reshape([first(Array(phi([t, x], res.minimizer))) for t in ts for x in xs], + u_predict = reshape([first(Array(phi([t, x], res.u))) for t in ts for x in xs], (length(ts), length(xs))) u_real = reshape([u_exact(t, x) for t in ts for x in xs], (length(ts), length(xs))) diff_u = abs.(u_predict .- u_real) @@ -184,12 +184,12 @@ end @named pde_system = PDESystem(eq, bcs, domains, [t, x, y], [u(t, x, y)]) prob = discretize(pde_system, discretization) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.01); maxiters = 2500) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.001); maxiters = 2500) phi = discretization.phi ts, xs, ys = [infimum(d.domain):0.1:supremum(d.domain) for d in domains] u_real = [analytic_sol_func(t, x, y) for t in ts for x in xs for y in ys] - u_predict = [first(Array(phi([t, x, y], res.minimizer))) for t in ts for x in xs + u_predict = [first(Array(phi([t, x, y], res.u))) for t in ts for x in xs for y in ys] @test u_predict≈u_real rtol=0.2 diff --git a/test/adaptive_loss_tests.jl b/test/adaptive_loss_tests.jl index 72c0d78ab2..8180a6895d 100644 --- a/test/adaptive_loss_tests.jl +++ b/test/adaptive_loss_tests.jl @@ -60,7 +60,7 @@ function test_2d_poisson_equation_adaptive_loss(adaptive_loss; seed = 60, maxite return false end res = solve(prob, OptimizationOptimisers.Adam(0.03); maxiters = maxiters, callback = callback) - u_predict = reshape([first(phi([x, y], res.minimizer)) for x in xs for y in ys], + u_predict = reshape([first(phi([x, y], res.u)) for x in xs for y in ys], (length(xs), length(ys))) diff_u = abs.(u_predict .- u_real) total_diff = sum(diff_u) diff --git a/test/additional_loss_tests.jl b/test/additional_loss_tests.jl index be72ed7859..4fdc0bc7d0 100644 --- a/test/additional_loss_tests.jl +++ b/test/additional_loss_tests.jl @@ -43,7 +43,7 @@ using ComponentArrays function inner_f(x, θ) dx * phi(x, θ) .- 1 end - prob1 = IntegralProblem(inner_f, lb, ub, θ) + prob1 = IntegralProblem(inner_f, (lb, ub), θ) norm2 = solve(prob1, HCubatureJL(), reltol = 1e-8, abstol = 1e-8, maxiters = 10) abs(norm2[1]) end @@ -63,7 +63,7 @@ using ComponentArrays return false end res = solve(prob, OptimizationOptimJL.LBFGS(), maxiters = 400, callback = cb_) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = solve(prob, OptimizationOptimJL.BFGS(), maxiters = 2000, callback = cb_) C = 142.88418699042 analytic_sol_func(x) = C * exp((1 / (2 * _σ^2)) * (2 * α * x^2 - β * x^4)) @@ -88,7 +88,7 @@ using ComponentArrays return false end res = solve(prob, OptimizationOptimJL.LBFGS(), maxiters = 400, callback = cb_) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = solve(prob, OptimizationOptimJL.BFGS(), maxiters = 2000, callback = cb_) C = 142.88418699042 analytic_sol_func(x) = C * exp((1 / (2 * _σ^2)) * (2 * α * x^2 - β * x^4)) @@ -170,7 +170,7 @@ end Float64[]) res = solve(prob, OptimizationOptimJL.BFGS(); maxiters = 6000) - p_ = res.minimizer[(end - 2):end] + p_ = res.u[(end - 2):end] @test sum(abs2, p_[1] - 10.00) < 0.1 @test sum(abs2, p_[2] - 28.00) < 0.1 @test sum(abs2, p_[3] - (8 / 3)) < 0.1 @@ -189,7 +189,7 @@ end sym_prob = NeuralPDE.symbolic_discretize(pde_system, discretization) sym_prob.loss_functions.full_loss_function(sym_prob.flat_init_params, nothing) res = solve(prob, OptimizationOptimJL.BFGS(); maxiters = 6000) - p_ = res.minimizer[(end - 2):end] + p_ = res.u[(end - 2):end] @test sum(abs2, p_[1] - 10.00) < 0.1 @test sum(abs2, p_[2] - 28.00) < 0.1 @test sum(abs2, p_[3] - (8 / 3)) < 0.1 @@ -225,7 +225,7 @@ end phi(xs, flat_init_params) additional_loss_(phi, flat_init_params, nothing) res = solve(prob, OptimizationOptimisers.Adam(0.01), maxiters = 500) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = solve(prob, OptimizationOptimJL.BFGS(), maxiters = 500) @test phi(xs, res.u)≈aproxf_(xs) rtol=0.01 end diff --git a/test/direct_function_tests.jl b/test/direct_function_tests.jl index f128372f51..497583954a 100644 --- a/test/direct_function_tests.jl +++ b/test/direct_function_tests.jl @@ -35,7 +35,7 @@ Random.seed!(110) @named pde_system = PDESystem(eq, bc, domain, [x], [u(x)]) prob = discretize(pde_system, discretization) res = solve(prob, OptimizationOptimisers.Adam(0.05), maxiters = 1000) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = solve(prob, OptimizationOptimJL.BFGS(initial_stepnorm = 0.01), maxiters = 500) @test discretization.phi(xs', res.u)≈func(xs') rtol=0.01 end @@ -62,7 +62,7 @@ end @named pde_system = PDESystem(eq, bc, domain, [x], [u(x)]) prob = discretize(pde_system, discretization) res = solve(prob, OptimizationOptimisers.Adam(0.01), maxiters = 500) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = solve(prob, OptimizationOptimJL.BFGS(), maxiters = 1000) dx = 0.01 xs = collect(x0:dx:x_end) @@ -95,14 +95,14 @@ end symprob = NeuralPDE.symbolic_discretize(pde_system, discretization) symprob.loss_functions.full_loss_function(symprob.flat_init_params, nothing) res = solve(prob, OptimizationOptimisers.Adam(0.01), maxiters = 500) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = solve(prob, OptimizationOptimJL.BFGS(), maxiters = 1000) - prob = remake(prob, u0 = res.minimizer) + prob = remake(prob, u0 = res.u) res = solve(prob, OptimizationOptimJL.BFGS(), maxiters = 500) phi = discretization.phi xs = collect(x0:0.1:x_end) ys = collect(y0:0.1:y_end) - u_predict = reshape([first(phi([x, y], res.minimizer)) for x in xs for y in ys], + u_predict = reshape([first(phi([x, y], res.u)) for x in xs for y in ys], (length(xs), length(ys))) u_real = reshape([func(x, y) for x in xs for y in ys], (length(xs), length(ys))) diff_u = abs.(u_predict .- u_real) diff --git a/test/neural_adapter_tests.jl b/test/neural_adapter_tests.jl index 801fecd0fe..cc977210da 100644 --- a/test/neural_adapter_tests.jl +++ b/test/neural_adapter_tests.jl @@ -8,19 +8,12 @@ using ComponentArrays using Random Random.seed!(100) -global iter = 0 - callback = function (p, l) - global iter - iter += 1 - if iter % 100 == 0 - println("Current loss at iteration $iter is: $l") - end + println("Current loss is: $l") return false end -# @testset "Example, 2D Poisson equation with Neural adapter" begin -begin +@testset "Example, 2D Poisson equation with Neural adapter" begin @parameters x y @variables u(..) Dxx = Differential(x)^2 @@ -50,7 +43,7 @@ begin @named pde_system = PDESystem(eq, bcs, domains, [x, y], [u(x, y)]) prob = NeuralPDE.discretize(pde_system, discretization) println("Poisson equation, strategy: $(nameof(typeof(quadrature_strategy)))") - @time res = solve(prob, OptimizationOptimisers.Adam(5e-3); maxiters = 10000, callback) + @time res = solve(prob, OptimizationOptimisers.Adam(5e-3); maxiters = 10000) phi = discretization.phi inner_ = 8 @@ -63,9 +56,8 @@ begin init_params2 = Float64.(ComponentArrays.ComponentArray(initp)) function loss(cord, θ) - global st ch2, st = chain2(cord, θ, st) - ch2 .- phi(cord, res.minimizer) + ch2 .- phi(cord, res.u) end grid_strategy = GridTraining(0.05) @@ -77,16 +69,14 @@ begin reses_1 = map(strategies1) do strategy_ println("Neural adapter Poisson equation, strategy: $(nameof(typeof(strategy_)))") prob_ = NeuralPDE.neural_adapter(loss, init_params2, pde_system, strategy_) - global iter = 0 - @time res_ = solve(prob_, OptimizationOptimisers.Adam(5e-3); maxiters = 10000, callback) + @time res_ = solve(prob_, OptimizationOptimisers.Adam(5e-3); maxiters = 10000) end strategies2 = [stochastic_strategy, quasirandom_strategy] reses_2 = map(strategies2) do strategy_ println("Neural adapter Poisson equation, strategy: $(nameof(typeof(strategy_)))") prob_ = NeuralPDE.neural_adapter(loss, init_params2, pde_system, strategy_) - global iter = 0 - @time res_ = solve(prob_, OptimizationOptimisers.Adam(5e-3); maxiters = 10000, callback) + @time res_ = solve(prob_, OptimizationOptimisers.Adam(5e-3); maxiters = 10000) end reses_ = [reses_1; reses_2] @@ -97,11 +87,11 @@ begin xs, ys = [infimum(d.domain):0.01:supremum(d.domain) for d in domains] analytic_sol_func(x, y) = (sin(pi * x) * sin(pi * y)) / (2pi^2) - u_predict = reshape([first(phi([x, y], res.minimizer)) for x in xs for y in ys], + u_predict = reshape([first(phi([x, y], res.u)) for x in xs for y in ys], (length(xs), length(ys))) u_predicts = map(zip(phis, reses_)) do (phi_, res_) - reshape([first(phi_([x, y], res_.minimizer)) for x in xs for y in ys], + reshape([first(phi_([x, y], res_.u)) for x in xs for y in ys], (length(xs), length(ys))) end @@ -174,11 +164,9 @@ end for i in 1:count_decomp println("decomposition $i") domains_ = domains_map[i] - phi_in(cord) = phis[i - 1](cord, reses[i - 1].minimizer) - # phi_bound(x,y) = if (x isa Matrix) phi_in(vcat(x, fill(y,size(x)))) else phi_in(vcat(fill(x,size(y)),y)) end + phi_in(cord) = phis[i - 1](cord, reses[i - 1].u) phi_bound(x, y) = phi_in(vcat(x, y)) @register_symbolic phi_bound(x, y) - global phi_bound Base.Broadcast.broadcasted(::typeof(phi_bound), x, y) = phi_bound(x, y) bcs_ = create_bcs(domains_[1].domain, phi_bound) @named pde_system_ = PDESystem(eq, bcs_, domains_, [x, y], [u(x, y)]) @@ -187,7 +175,7 @@ end discretization = PhysicsInformedNN(chains[i], strategy; init_params = init_params[i]) prob = discretize(pde_system_, discretization) @time res_ = Optimization.solve(prob, OptimizationOptimisers.Adam(5e-3), maxiters = 10000) - @show res_.minimum + @show res_.objective phi = discretization.phi push!(reses, res_) push!(phis, phi) @@ -208,7 +196,7 @@ end end for x_ in xs i = index_of_interval(x_) - u_predict_sub = [first(phis[i]([x_, y], reses[i].minimizer)) for y in ys] + u_predict_sub = [first(phis[i]([x_, y], reses[i].u)) for y in ys] u_real_sub = [analytic_sol_func(x_, y) for y in ys] diff_u_sub = u_predict_sub .- u_real_sub append!(u_predict_array, u_predict_sub) @@ -245,15 +233,15 @@ end prob_ = NeuralPDE.neural_adapter(losses, init_params2, pde_system_map, GridTraining([0.1 / count_decomp, 0.1])) @time res_ = solve(prob_, OptimizationOptimisers.Adam(5e-3); maxiters = 5000) - @show res_.minimum - prob_ = NeuralPDE.neural_adapter(losses, res_.minimizer, pde_system_map, + @show res_.objective + prob_ = NeuralPDE.neural_adapter(losses, res_.u, pde_system_map, GridTraining(0.01)) @time res_ = solve(prob_, OptimizationOptimisers.Adam(5e-3); maxiters = 5000) - @show res_.minimum + @show res_.objective phi_ = NeuralPDE.Phi(chain2) xs, ys = [infimum(d.domain):dx:supremum(d.domain) for d in domains] - u_predict_ = reshape([first(phi_([x, y], res_.minimizer)) for x in xs for y in ys], + u_predict_ = reshape([first(phi_([x, y], res_.u)) for x in xs for y in ys], (length(xs), length(ys))) u_real = reshape([analytic_sol_func(x, y) for x in xs for y in ys], (length(xs), length(ys))) From 84c50e199cda1e820952064f7f03c2112df090ad Mon Sep 17 00:00:00 2001 From: Sathvik Bhagavan Date: Mon, 4 Mar 2024 06:38:06 +0000 Subject: [PATCH 3/3] ci: remove NeuralAdapter from GHA as it is in buildkite --- .github/workflows/CI.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index fa24d90a88..8662940570 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -25,7 +25,6 @@ jobs: - AdaptiveLoss - Logging - Forward - - NeuralAdapter version: - "1" steps: