Skip to content

Commit

Permalink
Merge branch 'master' into pino_ode
Browse files Browse the repository at this point in the history
  • Loading branch information
KirillZubov authored Mar 6, 2024
2 parents db50090 + b861b05 commit a08af21
Show file tree
Hide file tree
Showing 25 changed files with 760 additions and 330 deletions.
6 changes: 5 additions & 1 deletion .github/workflows/CI.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ jobs:
- AdaptiveLoss
- Logging
- Forward
- NeuralAdapter
- DGM
version:
- "1"
steps:
Expand All @@ -49,6 +51,8 @@ jobs:
- uses: julia-actions/julia-processcoverage@v1
with:
directories: src,lib/NeuralPDELogging/src
- uses: codecov/codecov-action@v3
- uses: codecov/codecov-action@v4
with:
files: lcov.info
token: ${{ secrets.CODECOV_TOKEN }}
fail_ci_if_error: true
4 changes: 3 additions & 1 deletion .github/workflows/Downstream.yml
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@ jobs:
exit(0) # Exit immediately, as a success
end
- uses: julia-actions/julia-processcoverage@v1
- uses: codecov/codecov-action@v3
- uses: codecov/codecov-action@v4
with:
file: lcov.info
token: ${{ secrets.CODECOV_TOKEN }}
fail_ci_if_error: true
2 changes: 1 addition & 1 deletion .github/workflows/SpellCheck.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,4 @@ jobs:
- name: Checkout Actions Repository
uses: actions/checkout@v4
- name: Check spelling
uses: crate-ci/typos@v1.18.0
uses: crate-ci/typos@v1.19.0
26 changes: 13 additions & 13 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "NeuralPDE"
uuid = "315f7962-48a3-4962-8226-d0f33b1235f0"
authors = ["Chris Rackauckas <[email protected]>"]
version = "5.11.0"
version = "5.13.0"

[deps]
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
Expand Down Expand Up @@ -34,25 +34,24 @@ Reexport = "189a3867-3050-52da-a836-e630ba90ab69"
RuntimeGeneratedFunctions = "7e49a35a-f44a-4d26-94aa-eba1b4ca6b47"
SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
StochasticDiffEq = "789caeaf-c7a9-5a7d-9973-96adeb23e2a0"
SymbolicUtils = "d1185830-fcd6-423d-90d6-eec64667417b"
Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7"
UnPack = "3a884ed6-31ef-47d7-9d2a-63182c4928ed"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"

[compat]
Adapt = "3, 4"
Adapt = "4"
AdvancedHMC = "0.6"
ArrayInterface = "6, 7"
Aqua = "0.8"
CUDA = "4"
ArrayInterface = "7"
CUDA = "5.1"
ChainRulesCore = "1"
ComponentArrays = "0.13.2, 0.14, 0.15"
ComponentArrays = "0.15"
Cubature = "1.5"
DiffEqBase = "6"
DiffEqNoiseProcess = "5.1"
Distributions = "0.23, 0.24, 0.25"
DocStringExtensions = "0.8, 0.9"
Distributions = "0.25"
DocStringExtensions = "0.9"
DomainSets = "0.6, 0.7"
Flux = "0.14"
ForwardDiff = "0.10"
Expand All @@ -61,15 +60,15 @@ Integrals = "4"
LineSearches = "7.2"
LinearAlgebra = "1"
LogDensityProblems = "2"
Lux = "0.4, 0.5"
Lux = "0.5"
LuxCUDA = "0.3"
MCMCChains = "6"
ModelingToolkit = "8"
MonteCarloMeasurements = "1"
Optim = "1.7.8"
Optimization = "3"
OptimizationOptimJL = "0.1"
OptimizationOptimisers = "0.1"
OptimizationOptimJL = "0.2"
OptimizationOptimisers = "0.2"
OrdinaryDiffEq = "6"
Pkg = "1"
QuasiMonteCarlo = "0.3.2"
Expand All @@ -79,12 +78,12 @@ RuntimeGeneratedFunctions = "0.5"
SafeTestsets = "0.1"
SciMLBase = "2"
Statistics = "1"
StochasticDiffEq = "6.13"
SymbolicUtils = "1"
Symbolics = "5"
Test = "1"
UnPack = "1"
Zygote = "0.6"
MethodOfLines = "0.10.7"
julia = "1.6"

[extras]
Expand All @@ -98,6 +97,7 @@ OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"
Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
MethodOfLines = "94925ecb-adb7-4558-8ed8-f975c56a0bf4"

[targets]
test = ["Aqua", "Test", "CUDA", "SafeTestsets", "OptimizationOptimJL", "Pkg", "OrdinaryDiffEq", "LineSearches", "LuxCUDA", "Flux"]
test = ["Aqua", "Test", "CUDA", "SafeTestsets", "OptimizationOptimJL", "Pkg", "OrdinaryDiffEq", "LineSearches", "LuxCUDA", "Flux", "MethodOfLines"]
12 changes: 6 additions & 6 deletions docs/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,19 +26,19 @@ SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b"
AdvancedHMC = "0.6"
Cubature = "1.5"
DiffEqBase = "6.106"
Distributions = "0.23, 0.24, 0.25"
Distributions = "0.25"
Documenter = "1"
DomainSets = "0.6, 0.7"
Flux = "0.13, 0.14"
Flux = "0.14"
Integrals = "4"
Lux = "0.4, 0.5"
Lux = "0.5"
ModelingToolkit = "8.33"
MonteCarloMeasurements = "1"
NeuralPDE = "5.3"
Optimization = "3.9"
OptimizationOptimJL = "0.1"
OptimizationOptimisers = "0.1"
OptimizationPolyalgorithms = "0.1"
OptimizationOptimJL = "0.2"
OptimizationOptimisers = "0.2"
OptimizationPolyalgorithms = "0.2"
OrdinaryDiffEq = "6.31"
Plots = "1.36"
QuasiMonteCarlo = "0.3"
Expand Down
3 changes: 2 additions & 1 deletion docs/pages.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@ pages = ["index.md",
"Bayesian PINNs for Coupled ODEs" => "tutorials/Lotka_Volterra_BPINNs.md",
"PINNs DAEs" => "tutorials/dae.md",
"Parameter Estimation with PINNs for ODEs" => "tutorials/ode_parameter_estimation.md",
"Physics informed Neural Opeator ODEs" => "tutorials/pino_ode.md"
"Physics informed Neural Opeator ODEs" => "tutorials/pino_ode.md",

Check warning on line 6 in docs/pages.jl

View workflow job for this annotation

GitHub Actions / Spell Check with Typos

"Opeator" should be "Operator".
"Deep Galerkin Method" => "tutorials/dgm.md"
#"examples/nnrode_example.md", # currently incorrect
],
"PDE PINN Tutorials" => Any["Introduction to NeuralPDE for PDEs" => "tutorials/pdesystem.md",
Expand Down
117 changes: 117 additions & 0 deletions docs/src/tutorials/dgm.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
## Solving PDEs using Deep Galerkin Method

### Overview

Deep Galerkin Method is a meshless deep learning algorithm to solve high dimensional PDEs. The algorithm does so by approximating the solution of a PDE with a neural network. The loss function of the network is defined in the similar spirit as PINNs, composed of PDE loss and boundary condition loss.

In the following example, we demonstrate computing the loss function using Quasi-Random Sampling, a sampling technique that uses quasi-Monte Carlo sampling to generate low discrepancy random sequences in high dimensional spaces.

### Algorithm
The authors of DGM suggest a network composed of LSTM-type layers that works well for most of the parabolic and quasi-parabolic PDEs.

```math
\begin{align*}
S^1 &= \sigma_1(W^1 \vec{x} + b^1); \\
Z^l &= \sigma_1(U^{z,l} \vec{x} + W^{z,l} S^l + b^{z,l}); \quad l = 1, \ldots, L; \\
G^l &= \sigma_1(U^{g,l} \vec{x} + W^{g,l} S_l + b^{g,l}); \quad l = 1, \ldots, L; \\
R^l &= \sigma_1(U^{r,l} \vec{x} + W^{r,l} S^l + b^{r,l}); \quad l = 1, \ldots, L; \\
H^l &= \sigma_2(U^{h,l} \vec{x} + W^{h,l}(S^l \cdot R^l) + b^{h,l}); \quad l = 1, \ldots, L; \\
S^{l+1} &= (1 - G^l) \cdot H^l + Z^l \cdot S^{l}; \quad l = 1, \ldots, L; \\
f(t, x; \theta) &= \sigma_{out}(W S^{L+1} + b).
\end{align*}
```

where $\vec{x}$ is the concatenated vector of $(t, x)$ and $L$ is the number of LSTM type layers in the network.

### Example

Let's try to solve the following Burger's equation using Deep Galerkin Method for $\alpha = 0.05$ and compare our solution with the finite difference method:

$$
\partial_t u(t, x) + u(t, x) \partial_x u(t, x) - \alpha \partial_{xx} u(t, x) = 0
$$

defined over

$$
t \in [0, 1], x \in [-1, 1]
$$

with boundary conditions
```math
\begin{align*}
u(t, x) & = - sin(πx), \\
u(t, -1) & = 0, \\
u(t, 1) & = 0
\end{align*}
```

### Copy- Pasteable code
```@example dgm
using NeuralPDE
using ModelingToolkit, Optimization, OptimizationOptimisers
import Lux: tanh, identity
using Distributions
import ModelingToolkit: Interval, infimum, supremum
using MethodOfLines, OrdinaryDiffEq
@parameters x t
@variables u(..)
Dt = Differential(t)
Dx = Differential(x)
Dxx = Dx^2
α = 0.05
# Burger's equation
eq = Dt(u(t,x)) + u(t,x) * Dx(u(t,x)) - α * Dxx(u(t,x)) ~ 0
# boundary conditions
bcs = [
u(0.0, x) ~ - sin(π*x),
u(t, -1.0) ~ 0.0,
u(t, 1.0) ~ 0.0
]
domains = [t ∈ Interval(0.0, 1.0), x ∈ Interval(-1.0, 1.0)]
# MethodOfLines, for FD solution
dx = 0.01
order = 2
discretization = MOLFiniteDifference([x => dx], t, saveat = 0.01)
@named pde_system = PDESystem(eq, bcs, domains, [t, x], [u(t,x)])
prob = discretize(pde_system, discretization)
sol= solve(prob, Tsit5())
ts = sol[t]
xs = sol[x]
u_MOL = sol[u(t,x)]
# NeuralPDE, using Deep Galerkin Method
strategy = QuasiRandomTraining(256, minibatch= 32)
discretization = DeepGalerkin(2, 1, 50, 5, tanh, tanh, identity, strategy)
@named pde_system = PDESystem(eq, bcs, domains, [t, x], [u(t,x)])
prob = discretize(pde_system, discretization)
global iter = 0
callback = function (p, l)
global iter += 1
if iter%20 == 0
println("$iter => $l")
end
return false
end
res = Optimization.solve(prob, Adam(0.1); callback = callback, maxiters = 100)
prob = remake(prob, u0 = res.u)
res = Optimization.solve(prob, Adam(0.01); callback = callback, maxiters = 500)
phi = discretization.phi
u_predict= [first(phi([t, x], res.minimizer)) for t in ts, x in xs]
diff_u = abs.(u_predict .- u_MOL)
using Plots
p1 = plot(tgrid, xgrid, u_MOL', linetype = :contourf, title = "FD");
p2 = plot(tgrid, xgrid, u_predict', linetype = :contourf, title = "predict");
p3 = plot(tgrid, xgrid, diff_u', linetype = :contourf, title = "error");
plot(p1, p2, p3)
```
5 changes: 2 additions & 3 deletions lib/NeuralPDELogging/test/adaptive_loss_log_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ using Test, NeuralPDE
using Optimization, OptimizationOptimisers
import ModelingToolkit: Interval, infimum, supremum
using Random, Lux
#using Plots
@info "Starting Soon!"

nonadaptive_loss = NeuralPDE.NonAdaptiveLoss(pde_loss_weights = 1, bc_loss_weights = 1)
Expand Down Expand Up @@ -70,7 +69,7 @@ function test_2d_poisson_equation_adaptive_loss(adaptive_loss, run, outdir, hasl
if haslogger
log_value(logger, "outer_error/loss", l, step = iteration[1])
if iteration[1] % 30 == 0
u_predict = reshape([first(phi([x, y], p)) for x in xs for y in ys],
u_predict = reshape([first(phi([x, y], p.u)) for x in xs for y in ys],
(length(xs), length(ys)))
diff_u = abs.(u_predict .- u_real)
total_diff = sum(diff_u)
Expand All @@ -89,7 +88,7 @@ function test_2d_poisson_equation_adaptive_loss(adaptive_loss, run, outdir, hasl
res = Optimization.solve(prob, OptimizationOptimisers.Adam(0.03); maxiters = maxiters,
callback = callback)

u_predict = reshape([first(phi([x, y], res.minimizer)) for x in xs for y in ys],
u_predict = reshape([first(phi([x, y], res.u)) for x in xs for y in ys],
(length(xs), length(ys)))
diff_u = abs.(u_predict .- u_real)
total_diff = sum(diff_u)
Expand Down
6 changes: 4 additions & 2 deletions src/NeuralPDE.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ using Reexport, Statistics
@reexport using ModelingToolkit

using Zygote, ForwardDiff, Random, Distributions
using Adapt, DiffEqNoiseProcess, StochasticDiffEq
using Adapt, DiffEqNoiseProcess
using Optimization
using OptimizationOptimisers
using Integrals, Cubature
Expand Down Expand Up @@ -53,6 +53,7 @@ include("neural_adapter.jl")
include("advancedHMC_MCMC.jl")
include("BPINN_ode.jl")
include("PDE_BPINN.jl")
include("dgm.jl")

export NNODE, NNDAE, PINOODE
PhysicsInformedNN, discretize,
Expand All @@ -64,6 +65,7 @@ export NNODE, NNDAE, PINOODE
AbstractAdaptiveLoss, NonAdaptiveLoss, GradientScaleAdaptiveLoss,
MiniMaxAdaptiveLoss, LogOptions,
ahmc_bayesian_pinn_ode, BNNODE, ahmc_bayesian_pinn_pde, vector_to_parameters,
BPINNsolution, BayesianPINN
BPINNsolution, BayesianPINN,
DeepGalerkin

end # module
2 changes: 1 addition & 1 deletion src/advancedHMC_MCMC.jl
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ function getlogpdf(strategy::QuadratureTraining, Tar::LogTargetDensity, f,
function integrand(t::Number, θ)
innerdiff(Tar, f, autodiff, [t], θ, ode_params)
end
intprob = IntegralProblem(integrand, tspan[1], tspan[2], θ; nout = length(Tar.prob.u0))
intprob = IntegralProblem(integrand, (tspan[1], tspan[2]), θ; nout = length(Tar.prob.u0))
sol = solve(intprob, QuadGKJL(); abstol = strategy.abstol, reltol = strategy.reltol)
sum(sol.u)
end
Expand Down
Loading

0 comments on commit a08af21

Please sign in to comment.