From 3905b1433af1e193618f1e9d40ad473612e654ef Mon Sep 17 00:00:00 2001 From: KirillZubov Date: Tue, 19 Mar 2024 17:26:43 +0400 Subject: [PATCH] fix typo --- src/NeuralPDE.jl | 29 +++++++++++++++-------------- test/PINO_ode_tests.jl | 2 +- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/src/NeuralPDE.jl b/src/NeuralPDE.jl index 30e59486e7..68e7073ab4 100644 --- a/src/NeuralPDE.jl +++ b/src/NeuralPDE.jl @@ -24,14 +24,15 @@ using Symbolics: wrap, unwrap, arguments, operation using SymbolicUtils using AdvancedHMC, LogDensityProblems, LinearAlgebra, Functors, MCMCChains using MonteCarloMeasurements: Particles -using ModelingToolkit: value, nameof, toexpr, build_expr, expand_derivatives, Interval, infimum, supremum +using ModelingToolkit: value, nameof, toexpr, build_expr, expand_derivatives, Interval, + infimum, supremum import DomainSets -using DomainSets: Domain, ClosedInterval, AbstractInterval, leftendpoint, rightendpoint, ProductDomain +using DomainSets: Domain, ClosedInterval, AbstractInterval, leftendpoint, rightendpoint, + ProductDomain using SciMLBase: @add_kwonly, parameterless_type using UnPack: @unpack import ChainRulesCore, Lux, ComponentArrays using ChainRulesCore: @non_differentiable -using NeuralOperators RuntimeGeneratedFunctions.init(@__MODULE__) @@ -56,16 +57,16 @@ include("PDE_BPINN.jl") include("dgm.jl") export NNODE, NNDAE, PINOODE, TRAINSET - PhysicsInformedNN, discretize, - GridTraining, StochasticTraining, QuadratureTraining, QuasiRandomTraining, - WeightedIntervalTraining, - build_loss_function, get_loss_function, - generate_training_sets, get_variables, get_argument, get_bounds, - get_numeric_integral, symbolic_discretize, - AbstractAdaptiveLoss, NonAdaptiveLoss, GradientScaleAdaptiveLoss, - MiniMaxAdaptiveLoss, LogOptions, - ahmc_bayesian_pinn_ode, BNNODE, ahmc_bayesian_pinn_pde, vector_to_parameters, - BPINNsolution, BayesianPINN, - DeepGalerkin + PhysicsInformedNN, discretize, + GridTraining, StochasticTraining, QuadratureTraining, QuasiRandomTraining, + WeightedIntervalTraining, + build_loss_function, get_loss_function, + generate_training_sets, get_variables, get_argument, get_bounds, + get_numeric_integral, symbolic_discretize, + AbstractAdaptiveLoss, NonAdaptiveLoss, GradientScaleAdaptiveLoss, + MiniMaxAdaptiveLoss, LogOptions, + ahmc_bayesian_pinn_ode, BNNODE, ahmc_bayesian_pinn_pde, vector_to_parameters, + BPINNsolution, BayesianPINN, + DeepGalerkin end # module diff --git a/test/PINO_ode_tests.jl b/test/PINO_ode_tests.jl index 0d35b5c166..e733b16a0c 100644 --- a/test/PINO_ode_tests.jl +++ b/test/PINO_ode_tests.jl @@ -81,7 +81,7 @@ begin * output data: set of solutions u(t){u0} corresponding initial conditions 'u0'. """ train_set = TRAINSET(prob_set, u_output_; isu0 = true) - #TODO we argument u0 but dont actualy use u0 because we use only set of u0 for generate train set from prob_set + #TODO we argument u0 but dont actually use u0 because we use only set of u0 for generate train set from prob_set prob = ODEProblem(linear, 0.0f0, tspan, p) fno = FourierNeuralOperator(ch = (2, 16, 16, 16, 16, 16, 32, 1), modes = (16,), σ = gelu) opt = OptimizationOptimisers.Adam(0.001)