diff --git a/src/BPINN_ode.jl b/src/BPINN_ode.jl index b4622d6755..7a013371e7 100644 --- a/src/BPINN_ode.jl +++ b/src/BPINN_ode.jl @@ -148,7 +148,7 @@ end BPINN Solution contains the original solution from AdvancedHMC.jl sampling(BPINNstats contains fields related to that) > ensemblesol is the Probabilistic Estimate(MonteCarloMeasurements.jl Particles type) of Ensemble solution from All Neural Network's(made using all sampled parameters) output's. > estimated_nn_params - Probabilistic Estimate of NN params from sampled weights,biases -> estimated_de_params - Probabilistic Estimate of DE params from sampled unknown de paramters +> estimated_de_params - Probabilistic Estimate of DE params from sampled unknown DE paramters """ struct BPINNsolution{O <: BPINNstats, E, NP, OP, P} diff --git a/src/PDE_BPINN.jl b/src/PDE_BPINN.jl index 192d89eedb..3ce5db33f9 100644 --- a/src/PDE_BPINN.jl +++ b/src/PDE_BPINN.jl @@ -417,11 +417,11 @@ function ahmc_bayesian_pinn_pde(pde_system, discretization; metric = Metric(nparameters) hamiltonian = Hamiltonian(metric, ℓπ, ForwardDiff) - println("Current Physics Log-likelihood : ", + @info("Current Physics Log-likelihood : ", ℓπ.full_loglikelihood(setparameters(ℓπ, initial_θ), ℓπ.allstd)) - println("Current Prior Log-likelihood : ", priorlogpdf(ℓπ, initial_θ)) - println("Current MSE against dataset Log-likelihood : ", L2LossData(ℓπ, initial_θ)) + @info("Current Prior Log-likelihood : ", priorlogpdf(ℓπ, initial_θ)) + @info("Current MSE against dataset Log-likelihood : ", L2LossData(ℓπ, initial_θ)) # parallel sampling option if nchains != 1 @@ -476,12 +476,12 @@ function ahmc_bayesian_pinn_pde(pde_system, discretization; matrix_samples = hcat(samples...) mcmc_chain = MCMCChains.Chains(matrix_samples') - println("Sampling Complete.") - println("Current Physics Log-likelihood : ", + @info("Sampling Complete.") + @info("Current Physics Log-likelihood : ", ℓπ.full_loglikelihood(setparameters(ℓπ, samples[end]), ℓπ.allstd)) - println("Current Prior Log-likelihood : ", priorlogpdf(ℓπ, samples[end])) - println("Current MSE against dataset Log-likelihood : ", + @info("Current Prior Log-likelihood : ", priorlogpdf(ℓπ, samples[end])) + @info("Current MSE against dataset Log-likelihood : ", L2LossData(ℓπ, samples[end])) fullsolution = BPINNstats(mcmc_chain, samples, stats) diff --git a/src/advancedHMC_MCMC.jl b/src/advancedHMC_MCMC.jl index 4b680ce44a..506a38b674 100644 --- a/src/advancedHMC_MCMC.jl +++ b/src/advancedHMC_MCMC.jl @@ -65,7 +65,7 @@ mutable struct LogTargetDensity{C, S, ST <: AbstractTrainingStrategy, I, end """ -Cool function needed for converting vector of sampled parameters into ComponentVector in case of Lux chain output, derivatives +function needed for converting vector of sampled parameters into ComponentVector in case of Lux chain output, derivatives the sampled parameters are of exotic type `Dual` due to ForwardDiff's autodiff tagging """ function vector_to_parameters(ps_new::AbstractVector, @@ -558,9 +558,9 @@ function ahmc_bayesian_pinn_ode(prob::DiffEqBase.ODEProblem, chain; end end - println("Current Physics Log-likelihood : ", physloglikelihood(ℓπ, initial_θ)) - println("Current Prior Log-likelihood : ", priorweights(ℓπ, initial_θ)) - println("Current MSE against dataset Log-likelihood : ", L2LossData(ℓπ, initial_θ)) + @info("Current Physics Log-likelihood : ", physloglikelihood(ℓπ, initial_θ)) + @info("Current Prior Log-likelihood : ", priorweights(ℓπ, initial_θ)) + @info("Current MSE against dataset Log-likelihood : ", L2LossData(ℓπ, initial_θ)) Adaptor, Metric, targetacceptancerate = Adaptorkwargs[:Adaptor], Adaptorkwargs[:Metric], Adaptorkwargs[:targetacceptancerate] @@ -608,10 +608,10 @@ function ahmc_bayesian_pinn_ode(prob::DiffEqBase.ODEProblem, chain; samples, stats = sample(hamiltonian, Kernel, initial_θ, draw_samples, adaptor; progress = progress, verbose = verbose) - println("Sampling Complete.") - println("Current Physics Log-likelihood : ", physloglikelihood(ℓπ, samples[end])) - println("Current Prior Log-likelihood : ", priorweights(ℓπ, samples[end])) - println("Current MSE against dataset Log-likelihood : ", + @info("Sampling Complete.") + @info("Current Physics Log-likelihood : ", physloglikelihood(ℓπ, samples[end])) + @info("Current Prior Log-likelihood : ", priorweights(ℓπ, samples[end])) + @info("Current MSE against dataset Log-likelihood : ", L2LossData(ℓπ, samples[end])) # return a chain(basic chain),samples and stats diff --git a/src/discretize.jl b/src/discretize.jl index 13a71735d8..2153a4f1c6 100644 --- a/src/discretize.jl +++ b/src/discretize.jl @@ -401,7 +401,7 @@ to the PDE. For more information, see `discretize` and `PINNRepresentation`. """ function SciMLBase.symbolic_discretize(pde_system::PDESystem, - discretization::PhysicsInformedNN; bayesian::Bool = false,dataset_given=[nothing]) + discretization::PhysicsInformedNN; bayesian::Bool = false,dataset_given=nothing) eqs = pde_system.eqs bcs = pde_system.bcs chain = discretization.chain @@ -587,7 +587,7 @@ function SciMLBase.symbolic_discretize(pde_system::PDESystem, if bayesian # required as Physics loss also needed on dataset domain points - pde_loss_functions1, bc_loss_functions1 = if !(dataset_given isa Vector{Nothing}) + pde_loss_functions1, bc_loss_functions1 = if !(dataset_given isa Nothing) if !(strategy isa GridTraining) throw("only GridTraining strategy allowed") else @@ -609,7 +609,7 @@ function SciMLBase.symbolic_discretize(pde_system::PDESystem, bc_loglikelihoods = [logpdf(Normal(0, stdbcs[j]), bc_loss_function(θ)) for (j, bc_loss_function) in enumerate(bc_loss_functions)] - if !(dataset_given isa Vector{Nothing}) + if !(dataset_given isa Nothing) pde_loglikelihoods += [logpdf(Normal(0, stdpdes[j]), pde_loss_function1(θ)) for (j, pde_loss_function1) in enumerate(pde_loss_functions1)] diff --git a/test/BPINN_PDEinvsol_tests.jl b/test/BPINN_PDEinvsol_tests.jl index 03b4d52f2d..ab2ecd874d 100644 --- a/test/BPINN_PDEinvsol_tests.jl +++ b/test/BPINN_PDEinvsol_tests.jl @@ -149,8 +149,7 @@ sol1 = ahmc_bayesian_pinn_pde(pde_system, priorsNNw = (0.0, 1.0), saveats = [0.01], param = [Normal(12.0, 2)], - dataset = dataset, - progress = true) + dataset = dataset) idealp = 10.0 p_ = sol1.estimated_de_params