diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml
index 866d6547c9..152ce77e69 100644
--- a/.github/workflows/CI.yml
+++ b/.github/workflows/CI.yml
@@ -3,9 +3,13 @@ on:
pull_request:
branches:
- master
+ paths-ignore:
+ - 'docs/**'
push:
branches:
- master
+ paths-ignore:
+ - 'docs/**'
jobs:
test:
runs-on: ubuntu-latest
diff --git a/Project.toml b/Project.toml
index 1038233134..866d8bfdb0 100644
--- a/Project.toml
+++ b/Project.toml
@@ -63,7 +63,7 @@ Lux = "0.4, 0.5"
MCMCChains = "6"
ModelingToolkit = "8"
MonteCarloMeasurements = "1"
-Optim = "1.0"
+Optim = "=1.7.6"
Optimisers = "0.2, 0.3"
Optimization = "3"
QuasiMonteCarlo = "0.2.1"
diff --git a/README.md b/README.md
index fdf955c4b4..8ae6f0f6e8 100644
--- a/README.md
+++ b/README.md
@@ -37,7 +37,7 @@ the documentation, which contains the unreleased features.
- Integrated logging suite for handling connections to TensorBoard
- Handling of (partial) integro-differential equations and various stochastic equations
- Specialized forms for solving `ODEProblem`s with neural networks
- - Compatability with [Flux.jl](https://docs.sciml.ai/Flux.jl/stable/) and [Lux.jl](https://docs.sciml.ai/Lux/stable/)
+ - Compatability with [Flux.jl](https://fluxml.ai/) and [Lux.jl](https://lux.csail.mit.edu/)
for all of the GPU-powered machine learning layers available from those libraries.
- Compatability with [NeuralOperators.jl](https://docs.sciml.ai/NeuralOperators/stable/) for
mixing DeepONets and other neural operators (Fourier Neural Operators, Graph Neural Operators,
diff --git a/docs/Project.toml b/docs/Project.toml
index a0da3777ee..5ca5b58742 100644
--- a/docs/Project.toml
+++ b/docs/Project.toml
@@ -20,7 +20,7 @@ SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b"
[compat]
DiffEqBase = "6.106"
-Documenter = "0.27"
+Documenter = "1"
DomainSets = "0.6"
Flux = "0.13, 0.14"
Integrals = "3.3"
diff --git a/docs/make.jl b/docs/make.jl
index c50aadcee3..67ee53b3d4 100644
--- a/docs/make.jl
+++ b/docs/make.jl
@@ -10,19 +10,10 @@ include("pages.jl")
makedocs(sitename = "NeuralPDE.jl",
authors = "#",
- clean = true,
- doctest = false,
modules = [NeuralPDE],
- strict = [
- :doctest,
- :linkcheck,
- :parse_error,
- :example_block,
- # Other available options are
- # :autodocs_block, :cross_references, :docs_block, :eval_block, :example_block, :footnote, :meta_block, :missing_docs, :setup_block
- ],
- format = Documenter.HTML(analytics = "UA-90474609-3",
- assets = ["assets/favicon.ico"],
+ clean = true, doctest = false, linkcheck = true,
+ warnonly = [:missing_docs, :example_block],
+ format = Documenter.HTML(assets = ["assets/favicon.ico"],
canonical = "https://docs.sciml.ai/NeuralPDE/stable/"),
pages = pages)
diff --git a/docs/src/examples/Lotka_Volterra_BPINNs.md b/docs/src/examples/Lotka_Volterra_BPINNs.md
index a61b1a4882..9f222d2c12 100644
--- a/docs/src/examples/Lotka_Volterra_BPINNs.md
+++ b/docs/src/examples/Lotka_Volterra_BPINNs.md
@@ -46,7 +46,7 @@ tspan = (0.0, 6.0)
prob = ODEProblem(lotka_volterra, u0, tspan, p)
```
-With the [`saveat` argument](https://docs.sciml.ai/latest/basics/common_solver_opts/) we can specify that the solution is stored only at `saveat` time units(default saveat=1 / 50.0).
+With the [`saveat` argument](https://docs.sciml.ai/DiffEqDocs/stable/basics/common_solver_opts/) we can specify that the solution is stored only at `saveat` time units(default saveat=1 / 50.0).
```julia
# Plot solution got by Standard DifferentialEquations.jl ODE solver
diff --git a/docs/src/examples/linear_parabolic.md b/docs/src/examples/linear_parabolic.md
index 4200ae27bd..0ae1432763 100644
--- a/docs/src/examples/linear_parabolic.md
+++ b/docs/src/examples/linear_parabolic.md
@@ -81,10 +81,14 @@ sym_prob = symbolic_discretize(pdesystem, discretization)
pde_inner_loss_functions = sym_prob.loss_functions.pde_loss_functions
bcs_inner_loss_functions = sym_prob.loss_functions.bc_loss_functions
+global iteration = 0
callback = function (p, l)
- println("loss: ", l)
- println("pde_losses: ", map(l_ -> l_(p), pde_inner_loss_functions))
- println("bcs_losses: ", map(l_ -> l_(p), bcs_inner_loss_functions))
+ if iteration % 10 == 0
+ println("loss: ", l)
+ println("pde_losses: ", map(l_ -> l_(p), pde_inner_loss_functions))
+ println("bcs_losses: ", map(l_ -> l_(p), bcs_inner_loss_functions))
+ end
+ global iteration += 1
return false
end
diff --git a/docs/src/examples/nonlinear_elliptic.md b/docs/src/examples/nonlinear_elliptic.md
index d6c365466e..db8d6e228f 100644
--- a/docs/src/examples/nonlinear_elliptic.md
+++ b/docs/src/examples/nonlinear_elliptic.md
@@ -95,11 +95,15 @@ pde_inner_loss_functions = sym_prob.loss_functions.pde_loss_functions
bcs_inner_loss_functions = sym_prob.loss_functions.bc_loss_functions[1:6]
aprox_derivative_loss_functions = sym_prob.loss_functions.bc_loss_functions[7:end]
+global iteration = 0
callback = function (p, l)
- println("loss: ", l)
- println("pde_losses: ", map(l_ -> l_(p), pde_inner_loss_functions))
- println("bcs_losses: ", map(l_ -> l_(p), bcs_inner_loss_functions))
- println("der_losses: ", map(l_ -> l_(p), aprox_derivative_loss_functions))
+ if iteration % 10 == 0
+ println("loss: ", l)
+ println("pde_losses: ", map(l_ -> l_(p), pde_inner_loss_functions))
+ println("bcs_losses: ", map(l_ -> l_(p), bcs_inner_loss_functions))
+ println("der_losses: ", map(l_ -> l_(p), aprox_derivative_loss_functions))
+ end
+ global iteration += 1
return false
end
diff --git a/docs/src/index.md b/docs/src/index.md
index 2d18be492a..20f1b3b53d 100644
--- a/docs/src/index.md
+++ b/docs/src/index.md
@@ -15,7 +15,7 @@ networks which both approximate physical laws and real data simultaniously.
- Integrated logging suite for handling connections to TensorBoard.
- Handling of (partial) integro-differential equations and various stochastic equations.
- Specialized forms for solving `ODEProblem`s with neural networks.
- - Compatibility with [Flux.jl](https://docs.sciml.ai/Flux.jl/stable/) and [Lux.jl](https://docs.sciml.ai/Lux/stable/).
+ - Compatibility with [Flux.jl](https://fluxml.ai/) and [Lux.jl](https://lux.csail.mit.edu/).
for all the GPU-powered machine learning layers available from those libraries.
- Compatibility with [NeuralOperators.jl](https://docs.sciml.ai/NeuralOperators/stable/) for
mixing DeepONets and other neural operators (Fourier Neural Operators, Graph Neural Operators,
@@ -132,32 +132,19 @@ Pkg.status(; mode = PKGMODE_MANIFEST) # hide
```
-```@raw html
-You can also download the
-manifest file and the
-project file.
+link_manifest = "https://github.com/SciML/" * name * ".jl/tree/gh-pages/v" * version *
+ "/assets/Manifest.toml"
+link_project = "https://github.com/SciML/" * name * ".jl/tree/gh-pages/v" * version *
+ "/assets/Project.toml"
+Markdown.parse("""You can also download the
+[manifest]($link_manifest)
+file and the
+[project]($link_project)
+file.
+""")
```
diff --git a/src/pinn_types.jl b/src/pinn_types.jl
index 3c74022b38..8a4766d122 100644
--- a/src/pinn_types.jl
+++ b/src/pinn_types.jl
@@ -66,7 +66,7 @@ methodology.
should only be used to more directly impose functional information in the training problem,
for example imposing the boundary condition by the test function formulation.
* `adaptive_loss`: the choice for the adaptive loss function. See the
- [adaptive loss page](@id adaptive_loss) for more details. Defaults to no adaptivity.
+ [adaptive loss page](@ref adaptive_loss) for more details. Defaults to no adaptivity.
* `additional_loss`: a function `additional_loss(phi, θ, p_)` where `phi` are the neural
network trial solutions, `θ` are the weights of the neural network(s), and `p_` are the
hyperparameters of the `OptimizationProblem`. If `param_estim = true`, then `θ` additionally