Skip to content

Commit

Permalink
pde fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
AstitvaAggarwal committed Oct 31, 2024
1 parent 72a7479 commit 6b03ea1
Show file tree
Hide file tree
Showing 5 changed files with 191 additions and 16 deletions.
14 changes: 7 additions & 7 deletions src/PDE_BPINN.jl
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ end

function LogDensityProblems.logdensity(ltd::PDELogTargetDensity, θ)
# for parameter estimation neccesarry to use multioutput case
if Tar.L2_loss2 === nothing
if ltd.L2_loss2 === nothing
return ltd.full_loglikelihood(setparameters(ltd, θ), ltd.allstd) +
priorlogpdf(ltd, θ) + L2LossData(ltd, θ)
else
Expand All @@ -24,7 +24,6 @@ function LogDensityProblems.logdensity(ltd::PDELogTargetDensity, θ)
end
end


# you get a vector of losses
function get_lossy(pinnrep, dataset, Dict_differentials)
eqs = pinnrep.eqs
Expand All @@ -43,15 +42,16 @@ function get_lossy(pinnrep, dataset, Dict_differentials)

# for each dataset point(eq_sub dictionary), substitute in masked equations
# n_collocated_equations = n_rows_dataset(or n_indvar_coords_dataset)
masked_colloc_equations = [[substitute(eq, eq_sub) for eq in eqs_new]
masked_colloc_equations = [[Symbolics.substitute(eq, eq_sub) for eq in eqs_new]
for eq_sub in eq_subs]
# now we have vector of dataset depvar's collocated equations

# reverse dict for re-substituting values of Differential(t)(u(t)) etc
rev_Dict_differentials = Dict(value => key for (key, value) in Dict_differentials)

# unmask Differential terms in masked_colloc_equations
colloc_equations = [substitute.(masked_colloc_equation, Ref(rev_Dict_differentials))
colloc_equations = [Symbolics.substitute.(
masked_colloc_equation, Ref(rev_Dict_differentials))
for masked_colloc_equation in masked_colloc_equations]

# nested vector of datafree_pde_loss_functions (as in discretize.jl)
Expand Down Expand Up @@ -314,7 +314,7 @@ function ahmc_bayesian_pinn_pde(pde_system, discretization;
Kernel = HMC(0.1, 30), Adaptorkwargs = (Adaptor = StanHMCAdaptor,
Metric = DiagEuclideanMetric, targetacceptancerate = 0.8),
Integratorkwargs = (Integrator = Leapfrog,), saveats = [1 / 10.0],
numensemble = floor(Int, draw_samples / 3), progress = false, verbose = false)
numensemble = floor(Int, draw_samples / 3), Dict_differentials = nothing, progress = false, verbose = false)
pinnrep = symbolic_discretize(pde_system, discretization)
dataset_pde, dataset_bc = discretization.dataset

Expand Down Expand Up @@ -422,7 +422,7 @@ function ahmc_bayesian_pinn_pde(pde_system, discretization;
# dimensions would be total no of params,initial_nnθ for Lux namedTuples
ℓπ = PDELogTargetDensity(
nparameters, strategy, dataset, priors, [phystd, bcstd, l2std], phynewstd,
names, ninv, initial_nnθ, full_weighted_loglikelihood, Φ)
names, ninv, initial_nnθ, full_weighted_loglikelihood, newloss, Φ)

Adaptor, Metric, targetacceptancerate = Adaptorkwargs[:Adaptor],
Adaptorkwargs[:Metric], Adaptorkwargs[:targetacceptancerate]
Expand Down Expand Up @@ -496,7 +496,7 @@ function ahmc_bayesian_pinn_pde(pde_system, discretization;
@printf("Final MSE against dataset Log-likelihood : %g\n",
L2LossData(ℓπ, samples[end]))
if !(newloss isa Nothing)
@printf("Final L2_LOSSY : %g\n",
@printf("Final new loss : %g\n",
ℓπ.L2_loss2(setparameters(ℓπ, samples[end]),
ℓπ.phynewstd))
end
Expand Down
8 changes: 4 additions & 4 deletions src/discretize.jl
Original file line number Diff line number Diff line change
Expand Up @@ -549,13 +549,13 @@ function SciMLBase.symbolic_discretize(pde_system::PDESystem, discretization::Ab
for (j, bc_loss_function) in enumerate(bc_loss_functions)]

if !(datapde_loss_functions isa Nothing)
pde_loglikelihoods += [logpdf(Normal(0, stdpdes[j]), pde_loss_function(θ))
for (j, pde_loss_function) in enumerate(datapde_loss_functions)]
pde_loglikelihoods += [pde_loglike_function(θ, allstd[1])
for (j, pde_loglike_function) in enumerate(datapde_loss_functions)]
end

if !(databc_loss_functions isa Nothing)
bc_loglikelihoods += [logpdf(Normal(0, stdbcs[j]), bc_loss_function(θ))
for (j, bc_loss_function) in enumerate(databc_loss_functions)]
bc_loglikelihoods += [bc_loglike_function(θ, allstd[2])
for (j, bc_loglike_function) in enumerate(databc_loss_functions)]
end

# this is kind of a hack, and means that whenever the outer function is evaluated the increment goes up, even if it's not being optimized
Expand Down
2 changes: 1 addition & 1 deletion src/training_strategies.jl
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ function get_points_loss_functions(loss_function, train_set, eltypeθ, strategy:
function loss(θ, std)
logpdf(
MvNormal(loss_function(train_set, θ)[1, :],
LinearAlgebra.Diagonal(abs2.(std .* ones(size(train_set)[2])))),
Diagonal(abs2.(std .* ones(size(train_set)[2])))),
zeros(size(train_set)[2]))
end
end
Expand Down
175 changes: 175 additions & 0 deletions test/BPINN_PDE_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -351,3 +351,178 @@ end
@test sum(abs, pmean(p_) - 10.00) < 0.3 * idealp[1]
# @test sum(abs, pmean(p_[2]) - (8 / 3)) < 0.3 * idealp[2]
end

function recur_expression(exp, Dict_differentials)
for in_exp in exp.args
if !(in_exp isa Expr)
# skip +,== symbols, characters etc
continue

elseif in_exp.args[1] isa ModelingToolkit.Differential
# first symbol of differential term
# Dict_differentials for masking differential terms
# and resubstituting differentials in equations after putting in interpolations
# temp = in_exp.args[end]
Dict_differentials[eval(in_exp)] = Symbolics.variable("diff_$(length(Dict_differentials) + 1)")
return
else
recur_expression(in_exp, Dict_differentials)
end
end
end

@testitem "BPINN PDE Inv III: Improved Parametric Kuromo-Sivashinsky Equation solve" tags=[:pdebpinn] begin
using MCMCChains, Lux, ModelingToolkit, Distributions, OrdinaryDiffEq,
AdvancedHMC, Statistics, Random, Functors, NeuralPDE, MonteCarloMeasurements,
ComponentArrays
import ModelingToolkit: Interval, infimum, supremum

Random.seed!(100)

@parameters x, t, α
@variables u(..)
Dt = Differential(t)
Dx = Differential(x)
Dx2 = Differential(x)^2
Dx3 = Differential(x)^3
Dx4 = Differential(x)^4

# α = 1 (KS equation to be parametric in a)
β = 4
γ = 1
eq = Dt(u(x, t)) + u(x, t) * Dx(u(x, t)) + α * Dx2(u(x, t)) + β * Dx3(u(x, t)) + γ * Dx4(u(x, t)) ~ 0

u_analytic(x, t; z = -x / 2 + t) = 11 + 15 * tanh(z) - 15 * tanh(z)^2 - 15 * tanh(z)^3
du(x, t; z = -x / 2 + t) = 15 / 2 * (tanh(z) + 1) * (3 * tanh(z) - 1) * sech(z)^2

bcs = [u(x, 0) ~ u_analytic(x, 0),
u(-10, t) ~ u_analytic(-10, t),
u(10, t) ~ u_analytic(10, t),
Dx(u(-10, t)) ~ du(-10, t),
Dx(u(10, t)) ~ du(10, t)]

# Space and time domains
domains = [x Interval(-10.0, 10.0),
t Interval(0.0, 1.0)]

# Discretization
dx = 0.4
dt = 0.2

# Function to compute analytical solution at a specific point (x, t)
function u_analytic_point(x, t)
z = -x / 2 + t
return 11 + 15 * tanh(z) - 15 * tanh(z)^2 - 15 * tanh(z)^3
end

# Function to generate the dataset matrix
function generate_dataset_matrix(domains, dx, dt, xlim, tlim)
x_values = xlim[1]:dx:xlim[2]
t_values = tlim[1]:dt:tlim[2]

dataset = []

for t in t_values
for x in x_values
u_value = u_analytic_point(x, t)
push!(dataset, [u_value, x, t])
end
end

return vcat([data' for data in dataset]...)
end

# considering sparse dataset from half of x's domain
datasetpde_new = [generate_dataset_matrix(domains, dx, dt, [-10, 0], [0.0, 1.0])]

# Adding Gaussian noise with a 0.8 std
noisydataset_new = deepcopy(datasetpde_new)
noisydataset_new[1][:, 1] = noisydataset_new[1][:, 1] .+
(randn(size(noisydataset_new[1][:, 1])) .* 0.8)

# Neural network
chain = Lux.Chain(Lux.Dense(2, 8, Lux.tanh),
Lux.Dense(8, 8, Lux.tanh),
Lux.Dense(8, 1))

# Discretization for old and new models
discretization = NeuralPDE.BayesianPINN([chain],
GridTraining([dx, dt]), param_estim = true, dataset = [noisydataset_new, nothing])

# let α default to 2.0
@named pde_system = PDESystem(eq,
bcs,
domains,
[x, t],
[u(x, t)],
[α],
defaults = Dict([α => 2.0]))

# neccesarry for loss function construction (involves Operator masking)
eqs = pde_system.eqs
Dict_differentials = Dict()
exps = toexpr.(eqs)
nullobj = [recur_expression(exp, Dict_differentials) for exp in exps]

# Dict_differentials is now ;
# Dict{Any, Any} with 5 entries:
# Differential(x)(Differential(x)(u(x, t))) => diff_5
# Differential(x)(Differential(x)(Differential(x)(u(x… => diff_1
# Differential(x)(Differential(x)(Differential(x)(Dif… => diff_2
# Differential(x)(u(x, t)) => diff_4
# Differential(t)(u(x, t)) => diff_3

# using HMC algorithm due to convergence, stability, time of training. (refer to mcmc chain plots)
# choice of std for objectives is very important
# pass in Dict_differentials, phystdnew arguments when using the new model

sol_new = ahmc_bayesian_pinn_pde(pde_system,
discretization;
draw_samples = 150,
bcstd = [0.1, 0.1, 0.1, 0.1, 0.1], phystdnew = [0.2],
phystd = [0.2], l2std = [0.5], param = [Distributions.Normal(2.0, 2)],
priorsNNw = (0.0, 1.0),
saveats = [1 / 100.0, 1 / 100.0],
Dict_differentials = Dict_differentials)

sol_old = ahmc_bayesian_pinn_pde(pde_system,
discretization;
draw_samples = 150,
bcstd = [0.1, 0.1, 0.1, 0.1, 0.1],
phystd = [0.2], l2std = [0.5], param = [Distributions.Normal(2.0, 2)],
priorsNNw = (0.0, 1.0),
saveats = [1 / 100.0, 1 / 100.0])

phi = discretization.phi[1]
xs, ts = [infimum(d.domain):dx:supremum(d.domain)
for (d, dx) in zip(domains, [dx / 10, dt])]
u_real = [[u_analytic(x, t) for x in xs] for t in ts]

u_predict_new = [[first(pmean(phi([x, t], sol_new.estimated_nn_params[1]))) for x in xs]
for t in ts]

diff_u_new = [[abs(u_analytic(x, t) -
first(pmean(phi([x, t], sol_new.estimated_nn_params[1]))))
for x in xs]
for t in ts]

u_predict_old = [[first(pmean(phi([x, t], sol_old.estimated_nn_params[1]))) for x in xs]
for t in ts]
diff_u_old = [[abs(u_analytic(x, t) -
first(pmean(phi([x, t], sol_old.estimated_nn_params[1]))))
for x in xs]
for t in ts]

@test all(all, [((diff_u_new[i]) .^ 2 .< 0.5) for i in 1:6]) == true
@test all(all, [((diff_u_old[i]) .^ 2 .< 0.5) for i in 1:6]) == false

MSE_new = [sum(abs2, diff_u_new[i]) for i in 1:6]
MSE_old = [sum(abs2, diff_u_old[i]) for i in 1:6]
@test (MSE_new .< MSE_old) == [1, 1, 1, 1, 1, 1]

param_new = sol_new.estimated_de_params[1]
param_old = sol_old.estimated_de_params[1]
α = 1
@test abs(param_new - α) < 0.2 * α
@test abs(param_new - α) < abs(param_old - α)
end
8 changes: 4 additions & 4 deletions test/BPINN_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -427,10 +427,10 @@ end
abs.(p .- sol_pestim2.estimated_de_params)
@test bitvec == ones(size(bitvec))

Loss_1 = mean(abs, u[1, :] .- pmean(sol_pestim1_1.ensemblesol[1])) +
mean(abs, u[2, :] .- pmean(sol_pestim1_1.ensemblesol[2]))
Loss_2 = mean(abs, u[1, :] .- pmean(sol_pestim2_1.ensemblesol[1])) +
mean(abs, u[2, :] .- pmean(sol_pestim2_1.ensemblesol[2]))
Loss_1 = mean(abs, u[1, :] .- pmean(sol_pestim1.ensemblesol[1])) +
mean(abs, u[2, :] .- pmean(sol_pestim1.ensemblesol[2]))
Loss_2 = mean(abs, u[1, :] .- pmean(sol_pestim2.ensemblesol[1])) +
mean(abs, u[2, :] .- pmean(sol_pestim2.ensemblesol[2]))

@test Loss_1 > Loss_2
end

0 comments on commit 6b03ea1

Please sign in to comment.