diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c3ad581062e..c40c40bb18e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,9 +1,12 @@ # Contributing Trixi.jl is an open-source project and we are very happy to accept contributions -from the community. Please feel free to open issues or submit patches (preferably -as pull requests) any time. For planned larger contributions, it is often -beneficial to get in contact with one of the principal developers first (see +from the community. Please feel free to +[open issues](https://github.com/trixi-framework/Trixi.jl/issues/new/choose) +or submit patches (preferably as +[pull requests](https://github.com/trixi-framework/Trixi.jl/pulls)) +any time. For planned larger contributions, it is often beneficial to get +in contact with one of the principal developers first (see [AUTHORS.md](AUTHORS.md)). Trixi.jl and its contributions are licensed under the MIT license (see diff --git a/NEWS.md b/NEWS.md index d70504d8c85..022252e61a9 100644 --- a/NEWS.md +++ b/NEWS.md @@ -4,13 +4,20 @@ Trixi.jl follows the interpretation of [semantic versioning (semver)](https://ju used in the Julia ecosystem. Notable changes will be documented in this file for human readability. +## Changes in the v0.7 lifecycle + +#### Added +- Implementation of `TimeSeriesCallback` for curvilinear meshes on `UnstructuredMesh2D` and extension + to 1D and 3D on `TreeMesh`. + + ## Changes when updating to v0.7 from v0.6.x #### Added #### Changed -- The default wave speed estimate used within `flux_hll` is now `min_max_speed_davis` +- The default wave speed estimate used within `flux_hll` is now `min_max_speed_davis` instead of `min_max_speed_naive`. #### Deprecated @@ -26,7 +33,7 @@ for human readability. #### Added - AMR for hyperbolic-parabolic equations on 3D `P4estMesh` - `flux_hllc` on non-cartesian meshes for `CompressibleEulerEquations{2,3}D` -- Different boundary conditions for quad/hex meshes in Abaqus format, even if not generated by HOHQMesh, +- Different boundary conditions for quad/hex meshes in Abaqus format, even if not generated by HOHQMesh, can now be digested by Trixi in 2D and 3D. - Subcell (positivity) limiting support for nonlinear variables in 2D for `TreeMesh` - Added Lighthill-Whitham-Richards (LWR) traffic model @@ -40,7 +47,7 @@ for human readability. #### Changed - The wave speed estimates for `flux_hll`, `FluxHLL()` are now consistent across equations. - In particular, the functions `min_max_speed_naive`, `min_max_speed_einfeldt` are now + In particular, the functions `min_max_speed_naive`, `min_max_speed_einfeldt` are now conceptually identical across equations. Users, who have been using `flux_hll` for MHD have now to use `flux_hlle` in order to use the Einfeldt wave speed estimate. diff --git a/Project.toml b/Project.toml index 6b44af4a3fa..27df49ed4fa 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.3-pre" +version = "0.7.5-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" @@ -31,7 +31,6 @@ RecipesBase = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" Requires = "ae029012-a4dd-5104-9daa-d747884805df" SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" -Setfield = "efcf1570-3423-57d1-acb7-fd33fddbac46" SimpleUnPack = "ce78b400-467f-4804-87d8-8f486da07d0a" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" StartUpDG = "472ebc20-7c99-4d4b-9470-8fde4e9faa0f" @@ -84,7 +83,6 @@ RecipesBase = "1.1" Reexport = "1.0" Requires = "1.1" SciMLBase = "1.90, 2" -Setfield = "1" SimpleUnPack = "1.1" SparseArrays = "1" StartUpDG = "0.17.7" diff --git a/README.md b/README.md index 71370d3478e..86a8514a5ba 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,8 @@ [![Aqua QA](https://raw.githubusercontent.com/JuliaTesting/Aqua.jl/master/badge.svg)](https://github.com/JuliaTesting/Aqua.jl) [![License: MIT](https://img.shields.io/badge/License-MIT-success.svg)](https://opensource.org/licenses/MIT) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3996439.svg)](https://doi.org/10.5281/zenodo.3996439) -[![Downloads](https://shields.io/endpoint?url=https://pkgs.genieframework.com/api/v1/badge/Trixi)](https://pkgs.genieframework.com?packages=Trixi) +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/8695/badge)](https://www.bestpractices.dev/projects/8695) + diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 00000000000..faa84a770bc --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,30 @@ +# Security Policy + +The Trixi.jl development team takes security issues seriously. We appreciate +all efforts to responsibly disclose any security issues and will make every +effort to acknowledge contributions. + + +## Supported Versions + +The current stable release following the interpretation of +[semantic versioning (SemVer)](https://julialang.github.io/Pkg.jl/dev/compatibility/#Version-specifier-format-1) +used in the Julia ecosystem is supported with security updates. + + +## Reporting a Vulnerability + +To report a security issue, please use the GitHub Security Advisory +["Report a Vulnerability"](https://github.com/trixi-framework/Trixi.jl/security/advisories/new) +tab. + +We will send a response indicating the next steps in handling your report. +After the initial reply to your report, we will keep you informed of the +progress towards a fix and full announcement, and may ask for additional +information or guidance. + +Please report security bugs in third-party modules directly to the person +or team maintaining the module. + +Public notifications of vulnerabilities will be shared in community channels +such as Slack. diff --git a/docs/.gitignore b/docs/.gitignore index 01f3ac8d79a..c8a9e842246 100644 --- a/docs/.gitignore +++ b/docs/.gitignore @@ -1 +1,3 @@ src/code_of_conduct.md +src/contributing.md + diff --git a/docs/literate/src/files/scalar_linear_advection_1d.jl b/docs/literate/src/files/scalar_linear_advection_1d.jl index 77ba7b087cc..9b48f29d341 100644 --- a/docs/literate/src/files/scalar_linear_advection_1d.jl +++ b/docs/literate/src/files/scalar_linear_advection_1d.jl @@ -115,7 +115,7 @@ integral = sum(nodes.^3 .* weights) # To approximate the solution, we need to get the polynomial coefficients $\{u_j^{Q_l}\}_{j=0}^N$ # for every element $Q_l$. -# After defining all nodes, we can implement the spatial coordinate $x$ and its initial value $u0$ +# After defining all nodes, we can implement the spatial coordinate $x$ and its initial value $u0 = u(t_0)$ # for every node. x = Matrix{Float64}(undef, length(nodes), n_elements) for element in 1:n_elements diff --git a/docs/make.jl b/docs/make.jl index 8427c4049bf..f752a7b0ee6 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -28,19 +28,36 @@ DocMeta.setdocmeta!(Trixi2Vtk, :DocTestSetup, :(using Trixi2Vtk); recursive=true # as necessary # Based on: https://github.com/ranocha/SummationByPartsOperators.jl/blob/0206a74140d5c6eb9921ca5021cb7bf2da1a306d/docs/make.jl#L27-L41 open(joinpath(@__DIR__, "src", "code_of_conduct.md"), "w") do io - # Point to source license file - println(io, """ - ```@meta - EditURL = "https://github.com/trixi-framework/Trixi.jl/blob/main/CODE_OF_CONDUCT.md" - ``` - """) - # Write the modified contents - println(io, "# [Code of Conduct](@id code-of-conduct)") - println(io, "") - for line in eachline(joinpath(dirname(@__DIR__), "CODE_OF_CONDUCT.md")) - line = replace(line, "[AUTHORS.md](AUTHORS.md)" => "[Authors](@ref)") - println(io, "> ", line) - end + # Point to source license file + println(io, + """ + ```@meta + EditURL = "https://github.com/trixi-framework/Trixi.jl/blob/main/CODE_OF_CONDUCT.md" + ``` + """) + # Write the modified contents + println(io, "# [Code of Conduct](@id code-of-conduct)") + println(io, "") + for line in eachline(joinpath(dirname(@__DIR__), "CODE_OF_CONDUCT.md")) + line = replace(line, "[AUTHORS.md](AUTHORS.md)" => "[Authors](@ref)") + println(io, "> ", line) + end +end + +open(joinpath(@__DIR__, "src", "contributing.md"), "w") do io + # Point to source license file + println(io, + """ + ```@meta + EditURL = "https://github.com/trixi-framework/Trixi.jl/blob/main/CONTRIBUTING.md" + ``` + """) + # Write the modified contents + for line in eachline(joinpath(dirname(@__DIR__), "CONTRIBUTING.md")) + line = replace(line, "[LICENSE.md](LICENSE.md)" => "[License](@ref)") + line = replace(line, "[AUTHORS.md](AUTHORS.md)" => "[Authors](@ref)") + println(io, line) + end end # Create tutorials for the following files: diff --git a/docs/src/contributing.md b/docs/src/contributing.md deleted file mode 100644 index 5f996477215..00000000000 --- a/docs/src/contributing.md +++ /dev/null @@ -1,54 +0,0 @@ -# Contributing - -Trixi.jl is an open-source project and we are very happy to accept contributions -from the community. Please feel free to open issues or submit patches (preferably -as merge requests) any time. For planned larger contributions, it is often -beneficial to get in contact with one of the principal developers first (see -[Authors](@ref)). - -Trixi.jl and its contributions are licensed under the MIT license (see -[License](@ref)). As a contributor, you certify that all your -contributions are in conformance with the *Developer Certificate of Origin -(Version 1.1)*, which is reproduced below. - -## Developer Certificate of Origin (Version 1.1) -The following text was taken from -[https://developercertificate.org](https://developercertificate.org): - - Developer Certificate of Origin - Version 1.1 - - Copyright (C) 2004, 2006 The Linux Foundation and its contributors. - 1 Letterman Drive - Suite D4700 - San Francisco, CA, 94129 - - Everyone is permitted to copy and distribute verbatim copies of this - license document, but changing it is not allowed. - - - Developer's Certificate of Origin 1.1 - - By making a contribution to this project, I certify that: - - (a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - - (b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - - (c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - - (d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. diff --git a/examples/tree_1d_dgsem/elixir_euler_source_terms.jl b/examples/tree_1d_dgsem/elixir_euler_source_terms.jl index 555910f69f0..cb8a09057d9 100644 --- a/examples/tree_1d_dgsem/elixir_euler_source_terms.jl +++ b/examples/tree_1d_dgsem/elixir_euler_source_terms.jl @@ -44,9 +44,12 @@ save_solution = SaveSolutionCallback(interval = 100, stepsize_callback = StepsizeCallback(cfl = 0.8) +time_series = TimeSeriesCallback(semi, [0.0, 0.33, 1.0], interval = 10) + callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, + time_series, stepsize_callback) ############################################################################### diff --git a/examples/tree_3d_dgsem/elixir_euler_source_terms.jl b/examples/tree_3d_dgsem/elixir_euler_source_terms.jl index f0246c30490..021fd09f316 100644 --- a/examples/tree_3d_dgsem/elixir_euler_source_terms.jl +++ b/examples/tree_3d_dgsem/elixir_euler_source_terms.jl @@ -41,9 +41,14 @@ save_solution = SaveSolutionCallback(interval = 100, stepsize_callback = StepsizeCallback(cfl = 0.6) +time_series = TimeSeriesCallback(semi, + [(0.0, 0.0, 0.0), (0.33, 0.33, 0.33), (1.0, 1.0, 1.0)], + interval = 10) + callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, + time_series, stepsize_callback) ############################################################################### diff --git a/examples/unstructured_2d_dgsem/elixir_euler_time_series.jl b/examples/unstructured_2d_dgsem/elixir_euler_time_series.jl new file mode 100644 index 00000000000..13233cdadbc --- /dev/null +++ b/examples/unstructured_2d_dgsem/elixir_euler_time_series.jl @@ -0,0 +1,115 @@ +# An elixir that has an alternative convergence test that uses +# the `TimeSeriesCallback` on several gauge points. Many of the +# gauge points are selected as "stress tests" for the element +# identification, e.g., a gauge point that lies on an +# element corner of a curvilinear mesh + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations2D(1.4) + +# Modify the manufactured solution test to use `L = sqrt(2)` +# in the initial condition and source terms +function initial_condition_convergence_shifted(x, t, + equations::CompressibleEulerEquations2D) + c = 2 + A = 0.1 + L = sqrt(2) + f = 1 / L + ω = 2 * pi * f + ini = c + A * sin(ω * (x[1] + x[2] - t)) + + rho = ini + rho_v1 = ini + rho_v2 = ini + rho_e = ini^2 + + return SVector(rho, rho_v1, rho_v2, rho_e) +end + +@inline function source_terms_convergence_shifted(u, x, t, + equations::CompressibleEulerEquations2D) + # Same settings as in `initial_condition` + c = 2 + A = 0.1 + L = sqrt(2) + f = 1 / L + ω = 2 * pi * f + γ = equations.gamma + + x1, x2 = x + si, co = sincos(ω * (x1 + x2 - t)) + rho = c + A * si + rho_x = ω * A * co + # Note that d/dt rho = -d/dx rho = -d/dy rho. + + tmp = (2 * rho - 1) * (γ - 1) + + du1 = rho_x + du2 = rho_x * (1 + tmp) + du3 = du2 + du4 = 2 * rho_x * (rho + tmp) + + return SVector(du1, du2, du3, du4) +end + +initial_condition = initial_condition_convergence_shifted + +source_term = source_terms_convergence_shifted + +############################################################################### +# Get the DG approximation space + +solver = DGSEM(polydeg = 6, surface_flux = flux_lax_friedrichs) + +############################################################################### +# Get the curved quad mesh from a file (downloads the file if not available locally) + +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/b434e724e3972a9c4ee48d58c80cdcdb/raw/55c916cd8c0294a2d4a836e960dac7247b7c8ccf/mesh_multiple_flips.mesh", + joinpath(@__DIR__, "mesh_multiple_flips.mesh")) + +mesh = UnstructuredMesh2D(mesh_file, periodicity = true) + +############################################################################### +# create the semi discretization object + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_term) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1000 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +time_series = TimeSeriesCallback(semi, + [(0.75, 0.7), (1.23, 0.302), (0.8, 1.0), + (0.353553390593274, 0.353553390593274), + (0.505, 1.125), (1.37, 0.89), (0.349, 0.7153), + (0.883883476483184, 0.406586401289607), + (sqrt(2), sqrt(2))]; + interval = 10) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + time_series, + alive_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, RDPK3SpFSAL49(); abstol = 1.0e-6, reltol = 1.0e-6, + ode_default_options()..., callback = callbacks); + +summary_callback() # print the timer summary diff --git a/src/Trixi.jl b/src/Trixi.jl index 1baa5eae806..300f8f1022e 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -22,7 +22,7 @@ using LinearAlgebra: LinearAlgebra, Diagonal, diag, dot, mul!, norm, cross, norm UniformScaling, det using Printf: @printf, @sprintf, println using SparseArrays: AbstractSparseMatrix, AbstractSparseMatrixCSC, sparse, droptol!, - rowvals, nzrange, nonzeros, spzeros + rowvals, nzrange, nonzeros # import @reexport now to make it available for further imports/exports using Reexport: @reexport @@ -32,10 +32,10 @@ using Reexport: @reexport using MPI: MPI using SciMLBase: CallbackSet, DiscreteCallback, - ODEProblem, ODESolution, ODEFunction, + ODEProblem, ODESolution, SplitODEProblem import SciMLBase: get_du, get_tmp_cache, u_modified!, - AbstractODEIntegrator, init, step!, check_error, + init, step!, check_error, get_proposed_dt, set_proposed_dt!, terminate!, remake, add_tstop!, has_tstop, first_tstop @@ -57,7 +57,6 @@ using Polyester: Polyester, @batch # You know, the cheapest threads you can find using OffsetArrays: OffsetArray, OffsetVector using P4est using T8code -using Setfield: @set using RecipesBase: RecipesBase using Requires: @require using Static: Static, One, True, False @@ -66,7 +65,7 @@ using StaticArrays: StaticArrays, MVector, MArray, SMatrix, @SMatrix using StrideArrays: PtrArray, StrideArray, StaticInt @reexport using StructArrays: StructArrays, StructArray using TimerOutputs: TimerOutputs, @notimeit, TimerOutput, print_timer, reset_timer! -using Triangulate: Triangulate, TriangulateIO, triangulate +using Triangulate: Triangulate, TriangulateIO export TriangulateIO # for type parameter in DGMultiMesh using TriplotBase: TriplotBase using TriplotRecipes: DGTriPseudocolor @@ -84,9 +83,9 @@ const _PREFERENCE_LOG = @load_preference("log", "log_Trixi_NaN") # finite difference SBP operators using SummationByPartsOperators: AbstractDerivativeOperator, - AbstractNonperiodicDerivativeOperator, DerivativeOperator, + AbstractNonperiodicDerivativeOperator, AbstractPeriodicDerivativeOperator, - PeriodicDerivativeOperator, grid + grid import SummationByPartsOperators: integrate, semidiscretize, compute_coefficients, compute_coefficients!, left_boundary_weight, right_boundary_weight diff --git a/src/auxiliary/auxiliary.jl b/src/auxiliary/auxiliary.jl index 92da9a5ba8b..972a748c56b 100644 --- a/src/auxiliary/auxiliary.jl +++ b/src/auxiliary/auxiliary.jl @@ -242,6 +242,8 @@ macro threaded(expr) # !!! danger "Heisenbug" # Look at the comments for `wrap_array` when considering to change this macro. + # By using `Trixi.@batch` we allow users of Trixi.jl to use `@threaded` without having + # Polyester.jl in their namespace. return esc(quote Trixi.@batch $(expr) end) diff --git a/src/callbacks_step/time_series.jl b/src/callbacks_step/time_series.jl index 7baa6b9c5a1..ae18c85700d 100644 --- a/src/callbacks_step/time_series.jl +++ b/src/callbacks_step/time_series.jl @@ -23,8 +23,7 @@ After the last time step, the results are stored in an HDF5 file `filename` in d The real data type `RealT` and data type for solution variables `uEltype` default to the respective types used in the solver and the cache. -!!! warning "Experimental implementation" - This is an experimental feature and may change in future releases. +Currently this callback is only implemented for [`TreeMesh`](@ref) and [`UnstructuredMesh2D`](@ref). """ mutable struct TimeSeriesCallback{RealT <: Real, uEltype <: Real, SolutionVariables, VariableNames, Cache} @@ -96,6 +95,11 @@ function TimeSeriesCallback(mesh, equations, solver, cache, point_coordinates; throw(ArgumentError("`point_coordinates` must be a matrix of size n_points × ndims")) end + # create the output folder if it does not exist already + if mpi_isroot() && !isdir(output_directory) + mkpath(output_directory) + end + # Transpose point_coordinates to our usual format [ndims, n_points] # Note: They are accepted in a different format to allow direct input from `readdlm` point_coordinates_ = permutedims(point_coordinates) @@ -213,5 +217,6 @@ function (time_series_callback::TimeSeriesCallback)(integrator) end include("time_series_dg.jl") -include("time_series_dg2d.jl") +include("time_series_dg_tree.jl") +include("time_series_dg_unstructured.jl") end # @muladd diff --git a/src/callbacks_step/time_series_dg.jl b/src/callbacks_step/time_series_dg.jl index 1b63979d579..3781a10662d 100644 --- a/src/callbacks_step/time_series_dg.jl +++ b/src/callbacks_step/time_series_dg.jl @@ -5,8 +5,10 @@ @muladd begin #! format: noindent -# Store time series file for a TreeMesh with a DG solver -function save_time_series_file(time_series_callback, mesh::TreeMesh, equations, dg::DG) +# Store time series file for a DG solver +function save_time_series_file(time_series_callback, + mesh::Union{TreeMesh, UnstructuredMesh2D}, + equations, dg::DG) @unpack (interval, solution_variables, variable_names, output_directory, filename, point_coordinates, point_data, time, step, time_series_cache) = time_series_callback @@ -32,4 +34,41 @@ function save_time_series_file(time_series_callback, mesh::TreeMesh, equations, end end end + +# Creates cache for time series callback +function create_cache_time_series(point_coordinates, + mesh::Union{TreeMesh, UnstructuredMesh2D}, + dg, cache) + # Determine element ids for point coordinates + element_ids = get_elements_by_coordinates(point_coordinates, mesh, dg, cache) + + # Calculate & store Lagrange interpolation polynomials + interpolating_polynomials = calc_interpolating_polynomials(point_coordinates, + element_ids, mesh, + dg, cache) + + time_series_cache = (; element_ids, interpolating_polynomials) + + return time_series_cache +end + +function get_elements_by_coordinates(coordinates, mesh, dg, cache) + element_ids = Vector{Int}(undef, size(coordinates, 2)) + get_elements_by_coordinates!(element_ids, coordinates, mesh, dg, cache) + + return element_ids +end + +function calc_interpolating_polynomials(coordinates, element_ids, + mesh::Union{TreeMesh, UnstructuredMesh2D}, + dg, cache) + interpolating_polynomials = Array{real(dg), 3}(undef, + nnodes(dg), ndims(mesh), + length(element_ids)) + calc_interpolating_polynomials!(interpolating_polynomials, coordinates, element_ids, + mesh, dg, + cache) + + return interpolating_polynomials +end end # @muladd diff --git a/src/callbacks_step/time_series_dg2d.jl b/src/callbacks_step/time_series_dg_tree.jl similarity index 60% rename from src/callbacks_step/time_series_dg2d.jl rename to src/callbacks_step/time_series_dg_tree.jl index c15945d6e16..37d4e6ea705 100644 --- a/src/callbacks_step/time_series_dg2d.jl +++ b/src/callbacks_step/time_series_dg_tree.jl @@ -5,21 +5,6 @@ @muladd begin #! format: noindent -# Creates cache for time series callback -function create_cache_time_series(point_coordinates, mesh::TreeMesh{2}, dg, cache) - # Determine element ids for point coordinates - element_ids = get_elements_by_coordinates(point_coordinates, mesh, dg, cache) - - # Calculate & store Lagrange interpolation polynomials - interpolating_polynomials = calc_interpolating_polynomials(point_coordinates, - element_ids, mesh, - dg, cache) - - time_series_cache = (; element_ids, interpolating_polynomials) - - return time_series_cache -end - # Find element ids containing coordinates given as a matrix [ndims, npoints] function get_elements_by_coordinates!(element_ids, coordinates, mesh::TreeMesh, dg, cache) @@ -68,13 +53,6 @@ function get_elements_by_coordinates!(element_ids, coordinates, mesh::TreeMesh, return element_ids end -function get_elements_by_coordinates(coordinates, mesh, dg, cache) - element_ids = Vector{Int}(undef, size(coordinates, 2)) - get_elements_by_coordinates!(element_ids, coordinates, mesh, dg, cache) - - return element_ids -end - # Calculate the interpolating polynomials to extract data at the given coordinates # The coordinates are known to be located in the respective element in `element_ids` function calc_interpolating_polynomials!(interpolating_polynomials, coordinates, @@ -106,23 +84,43 @@ function calc_interpolating_polynomials!(interpolating_polynomials, coordinates, return interpolating_polynomials end -function calc_interpolating_polynomials(coordinates, element_ids, mesh::TreeMesh, dg, - cache) - interpolating_polynomials = Array{real(dg), 3}(undef, - nnodes(dg), ndims(mesh), - length(element_ids)) - calc_interpolating_polynomials!(interpolating_polynomials, coordinates, element_ids, - mesh, dg, - cache) +# Record the solution variables at each given point for the 1D case +function record_state_at_points!(point_data, u, solution_variables, + n_solution_variables, + mesh::TreeMesh{1}, equations, dg::DG, + time_series_cache) + @unpack element_ids, interpolating_polynomials = time_series_cache + old_length = length(first(point_data)) + new_length = old_length + n_solution_variables - return interpolating_polynomials + # Loop over all points/elements that should be recorded + for index in 1:length(element_ids) + # Extract data array and element id + data = point_data[index] + element_id = element_ids[index] + + # Make room for new data to be recorded + resize!(data, new_length) + data[(old_length + 1):new_length] .= zero(eltype(data)) + + # Loop over all nodes to compute their contribution to the interpolated values + for i in eachnode(dg) + u_node = solution_variables(get_node_vars(u, equations, dg, i, + element_id), equations) + + for v in 1:length(u_node) + data[old_length + v] += (u_node[v] * + interpolating_polynomials[i, 1, index]) + end + end + end end -# Record the solution variables at each given point +# Record the solution variables at each given point for the 2D case function record_state_at_points!(point_data, u, solution_variables, n_solution_variables, - mesh::TreeMesh{2}, equations, dg::DG, - time_series_cache) + mesh::TreeMesh{2}, + equations, dg::DG, time_series_cache) @unpack element_ids, interpolating_polynomials = time_series_cache old_length = length(first(point_data)) new_length = old_length + n_solution_variables @@ -150,4 +148,38 @@ function record_state_at_points!(point_data, u, solution_variables, end end end + +# Record the solution variables at each given point for the 3D case +function record_state_at_points!(point_data, u, solution_variables, + n_solution_variables, + mesh::TreeMesh{3}, equations, dg::DG, + time_series_cache) + @unpack element_ids, interpolating_polynomials = time_series_cache + old_length = length(first(point_data)) + new_length = old_length + n_solution_variables + + # Loop over all points/elements that should be recorded + for index in 1:length(element_ids) + # Extract data array and element id + data = point_data[index] + element_id = element_ids[index] + + # Make room for new data to be recorded + resize!(data, new_length) + data[(old_length + 1):new_length] .= zero(eltype(data)) + + # Loop over all nodes to compute their contribution to the interpolated values + for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg) + u_node = solution_variables(get_node_vars(u, equations, dg, i, j, k, + element_id), equations) + + for v in 1:length(u_node) + data[old_length + v] += (u_node[v] + * interpolating_polynomials[i, 1, index] + * interpolating_polynomials[j, 2, index] + * interpolating_polynomials[k, 3, index]) + end + end + end +end end # @muladd diff --git a/src/callbacks_step/time_series_dg_unstructured.jl b/src/callbacks_step/time_series_dg_unstructured.jl new file mode 100644 index 00000000000..f6d1bb48f24 --- /dev/null +++ b/src/callbacks_step/time_series_dg_unstructured.jl @@ -0,0 +1,305 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + +# Elements on an `UnstructuredMesh2D` are possibly curved. Assume that each +# element is convex, i.e., all interior angles are less than 180 degrees. +# This routine computes the shortest distance from a given point to each element +# surface in the mesh. These distances then indicate possible candidate elements. +# From these candidates we (essentially) apply a ray casting strategy and identify +# the element in which the point lies by comparing the ray formed by the point to +# the nearest boundary to the rays cast by the candidate element barycenters to the +# boundary. If these rays point in the same direction, then we have identified the +# desired element location. +function get_elements_by_coordinates!(element_ids, coordinates, + mesh::UnstructuredMesh2D, + dg, cache) + if length(element_ids) != size(coordinates, 2) + throw(DimensionMismatch("storage length for element ids does not match the number of coordinates")) + end + + # Reset element ids - 0 indicates "not (yet) found" + element_ids .= 0 + + # Compute and save the barycentric coordinate on each element + bary_centers = zeros(eltype(mesh.corners), 2, mesh.n_elements) + calc_bary_centers!(bary_centers, dg, cache) + + # Iterate over coordinates + distances = zeros(eltype(mesh.corners), mesh.n_elements) + indices = zeros(Int, mesh.n_elements, 2) + for index in 1:length(element_ids) + # Grab the current point for which the element needs found + point = SVector(coordinates[1, index], + coordinates[2, index]) + + # Compute the minimum distance between the `point` and all the element surfaces + # saved into `distances`. The point in `node_coordinates` that gives said minimum + # distance on each element is saved in `indices` + distances, indices = calc_minimum_surface_distance(point, + cache.elements.node_coordinates, + dg, mesh) + + # Get the candidate elements where the `point` might live + candidates = findall(abs.(minimum(distances) .- distances) .< + 500 * eps(eltype(point))) + + # The minimal surface point is on a boundary so it plays no role which candidate + # we use to grab it. So just use the first one + surface_point = SVector(cache.elements.node_coordinates[1, + indices[candidates[1], + 1], + indices[candidates[1], + 2], + candidates[1]], + cache.elements.node_coordinates[2, + indices[candidates[1], + 1], + indices[candidates[1], + 2], + candidates[1]]) + + # Compute the vector pointing from the current `point` toward the surface + P = surface_point - point + + # If the vector `P` is the zero vector then this `point` is at an element corner or + # on a surface. In this case the choice of a candidate element is ambiguous and + # we just use the first candidate. However, solutions might differ at discontinuous + # interfaces such that this choice may influence the result. + if sum(P .* P) < 500 * eps(eltype(point)) + element_ids[index] = candidates[1] + continue + end + + # Loop through all the element candidates until we find a vector from the barycenter + # to the surface that points in the same direction as the current `point` vector. + # This then gives us the correct element. + for element in 1:length(candidates) + bary_center = SVector(bary_centers[1, candidates[element]], + bary_centers[2, candidates[element]]) + # Vector pointing from the barycenter toward the minimal `surface_point` + B = surface_point - bary_center + if sum(P .* B) > zero(eltype(bary_center)) + element_ids[index] = candidates[element] + break + end + end + end + + return element_ids +end + +# Use the available `node_coordinates` on each element to compute and save the barycenter. +# In essence, the barycenter is like an average where all the x and y node coordinates are +# summed and then we divide by the total number of degrees of freedom on the element, i.e., +# the value of `n^2` in two spatial dimensions. +@inline function calc_bary_centers!(bary_centers, dg, cache) + n = nnodes(dg) + @views for element in eachelement(dg, cache) + bary_centers[1, element] = sum(cache.elements.node_coordinates[1, :, :, + element]) / n^2 + bary_centers[2, element] = sum(cache.elements.node_coordinates[2, :, :, + element]) / n^2 + end + return nothing +end + +# Compute the shortest distance from a `point` to the surface of each element +# using the available `node_coordinates`. Also return the index pair of this +# minimum surface point location. We compute and store in `min_distance` +# the squared norm to avoid computing computationally more expensive square roots. +# Note! Could be made more accurate if the `node_coordinates` were super-sampled +# and reinterpolated onto a higher polynomial degree before this computation. +function calc_minimum_surface_distance(point, node_coordinates, + dg, mesh::UnstructuredMesh2D) + n = nnodes(dg) + min_distance2 = Inf * ones(eltype(mesh.corners), length(mesh)) + indices = zeros(Int, length(mesh), 2) + for k in 1:length(mesh) + # used to ensure that only boundary points are used + on_surface = MVector(false, false) + for j in 1:n + on_surface[2] = (j == 1) || (j == n) + for i in 1:n + on_surface[1] = (i == 1) || (i == n) + if !any(on_surface) + continue + end + node = SVector(node_coordinates[1, i, j, k], + node_coordinates[2, i, j, k]) + distance2 = sum(abs2, node - point) + if distance2 < min_distance2[k] + min_distance2[k] = distance2 + indices[k, 1] = i + indices[k, 2] = j + end + end + end + end + + return min_distance2, indices +end + +function calc_interpolating_polynomials!(interpolating_polynomials, coordinates, + element_ids, + mesh::UnstructuredMesh2D, dg::DGSEM, cache) + @unpack nodes = dg.basis + + wbary = barycentric_weights(nodes) + + # Helper array for a straight-sided quadrilateral element + corners = zeros(eltype(mesh.corners), 4, 2) + + for index in 1:length(element_ids) + # Construct point + x = SVector(ntuple(i -> coordinates[i, index], ndims(mesh))) + + # Convert to unit coordinates; procedure differs for straight-sided + # versus curvilinear elements + element = element_ids[index] + if !mesh.element_is_curved[element] + for j in 1:2, i in 1:4 + # Pull the (x,y) values of the element corners from the global corners array + corners[i, j] = mesh.corners[j, mesh.element_node_ids[i, element]] + end + # Compute coordinates in reference system + unit_coordinates = invert_bilinear_interpolation(mesh, x, corners) + + # Sanity check that the computed `unit_coordinates` indeed recover the desired point `x` + x_check = straight_side_quad_map(unit_coordinates[1], unit_coordinates[2], + corners) + if !isapprox(x[1], x_check[1]) || !isapprox(x[2], x_check[2]) + error("failed to compute computational coordinates for the time series point $(x), closet candidate was $(x_check)") + end + else # mesh.element_is_curved[element] + unit_coordinates = invert_transfinite_interpolation(mesh, x, + view(mesh.surface_curves, + :, element)) + + # Sanity check that the computed `unit_coordinates` indeed recover the desired point `x` + x_check = transfinite_quad_map(unit_coordinates[1], unit_coordinates[2], + view(mesh.surface_curves, :, element)) + if !isapprox(x[1], x_check[1]) || !isapprox(x[2], x_check[2]) + error("failed to compute computational coordinates for the time series point $(x), closet candidate was $(x_check)") + end + end + + # Calculate interpolating polynomial for each dimension, making use of tensor product structure + for d in 1:ndims(mesh) + interpolating_polynomials[:, d, index] .= lagrange_interpolating_polynomials(unit_coordinates[d], + nodes, + wbary) + end + end + + return interpolating_polynomials +end + +# Use a Newton iteration to determine the computational coordinates +# (xi, eta) of an (x,y) `point` that is given in physical coordinates +# by inverting the transformation. For straight-sided elements this +# amounts to inverting a bi-linear interpolation. For curved +# elements we invert the transfinite interpolation with linear blending. +# The residual function for the Newton iteration is +# r(xi, eta) = X(xi, eta) - point +# and the Jacobian entries are computed accordingly from either +# `straight_side_quad_map_metrics` or `transfinite_quad_map_metrics`. +# We exploit the 2x2 nature of the problem and directly compute the matrix +# inverse to make things faster. The implementations below are inspired by +# an answer on Stack Overflow (https://stackoverflow.com/a/18332009) where +# the author explicitly states that their code is released to the public domain. +@inline function invert_bilinear_interpolation(mesh::UnstructuredMesh2D, point, + element_corners) + # Initial guess for the point (center of the reference element) + xi = zero(eltype(point)) + eta = zero(eltype(point)) + for k in 1:5 # Newton's method should converge quickly + # Compute current x and y coordinate and the Jacobian matrix + # J = (X_xi, X_eta; Y_xi, Y_eta) + x, y = straight_side_quad_map(xi, eta, element_corners) + J11, J12, J21, J22 = straight_side_quad_map_metrics(xi, eta, element_corners) + + # Compute residuals for the Newton teration for the current (x, y) coordinate + r1 = x - point[1] + r2 = y - point[2] + + # Newton update that directly applies the inverse of the 2x2 Jacobian matrix + inv_detJ = inv(J11 * J22 - J12 * J21) + + # Update with explicitly inverted Jacobian + xi = xi - inv_detJ * (J22 * r1 - J12 * r2) + eta = eta - inv_detJ * (-J21 * r1 + J11 * r2) + + # Ensure updated point is in the reference element + xi = min(max(xi, -1), 1) + eta = min(max(eta, -1), 1) + end + + return SVector(xi, eta) +end + +@inline function invert_transfinite_interpolation(mesh::UnstructuredMesh2D, point, + surface_curves::AbstractVector{<:CurvedSurface}) + # Initial guess for the point (center of the reference element) + xi = zero(eltype(point)) + eta = zero(eltype(point)) + for k in 1:5 # Newton's method should converge quickly + # Compute current x and y coordinate and the Jacobian matrix + # J = (X_xi, X_eta; Y_xi, Y_eta) + x, y = transfinite_quad_map(xi, eta, surface_curves) + J11, J12, J21, J22 = transfinite_quad_map_metrics(xi, eta, surface_curves) + + # Compute residuals for the Newton teration for the current (x,y) coordinate + r1 = x - point[1] + r2 = y - point[2] + + # Newton update that directly applies the inverse of the 2x2 Jacobian matrix + inv_detJ = inv(J11 * J22 - J12 * J21) + + # Update with explicitly inverted Jacobian + xi = xi - inv_detJ * (J22 * r1 - J12 * r2) + eta = eta - inv_detJ * (-J21 * r1 + J11 * r2) + + # Ensure updated point is in the reference element + xi = min(max(xi, -1), 1) + eta = min(max(eta, -1), 1) + end + + return SVector(xi, eta) +end + +function record_state_at_points!(point_data, u, solution_variables, + n_solution_variables, + mesh::UnstructuredMesh2D, + equations, dg::DG, time_series_cache) + @unpack element_ids, interpolating_polynomials = time_series_cache + old_length = length(first(point_data)) + new_length = old_length + n_solution_variables + + # Loop over all points/elements that should be recorded + for index in 1:length(element_ids) + # Extract data array and element id + data = point_data[index] + element_id = element_ids[index] + + # Make room for new data to be recorded + resize!(data, new_length) + data[(old_length + 1):new_length] .= zero(eltype(data)) + + # Loop over all nodes to compute their contribution to the interpolated values + for j in eachnode(dg), i in eachnode(dg) + u_node = solution_variables(get_node_vars(u, equations, dg, i, j, + element_id), equations) + + for v in 1:length(u_node) + data[old_length + v] += (u_node[v] + * interpolating_polynomials[i, 1, index] + * interpolating_polynomials[j, 2, index]) + end + end + end +end +end # @muladd diff --git a/src/solvers/dgsem/basis_lobatto_legendre.jl b/src/solvers/dgsem/basis_lobatto_legendre.jl index 9e21b88dfa1..cac1dba9c74 100644 --- a/src/solvers/dgsem/basis_lobatto_legendre.jl +++ b/src/solvers/dgsem/basis_lobatto_legendre.jl @@ -404,7 +404,8 @@ function calc_dsplit(nodes, weights) return dsplit end -# Calculate the polynomial derivative matrix D +# Calculate the polynomial derivative matrix D. +# This implements algorithm 37 "PolynomialDerivativeMatrix" from Kopriva's book. function polynomial_derivative_matrix(nodes) n_nodes = length(nodes) d = zeros(n_nodes, n_nodes) @@ -421,6 +422,7 @@ function polynomial_derivative_matrix(nodes) end # Calculate and interpolation matrix (Vandermonde matrix) between two given sets of nodes +# See algorithm 32 "PolynomialInterpolationMatrix" from Kopriva's book. function polynomial_interpolation_matrix(nodes_in, nodes_out, baryweights_in = barycentric_weights(nodes_in)) n_nodes_in = length(nodes_in) @@ -433,6 +435,7 @@ function polynomial_interpolation_matrix(nodes_in, nodes_out, return vandermonde end +# This implements algorithm 32 "PolynomialInterpolationMatrix" from Kopriva's book. function polynomial_interpolation_matrix!(vandermonde, nodes_in, nodes_out, baryweights_in) @@ -463,7 +466,19 @@ function polynomial_interpolation_matrix!(vandermonde, return vandermonde end -# Calculate the barycentric weights for a given node distribution. +""" + barycentric_weights(nodes) + +Calculate the barycentric weights for a given node distribution, i.e., +```math +w_j = \\frac{1}{ \\prod_{k \\neq j} \\left( x_j - x_k \\right ) } +``` + +For details, see (especially Section 3) +- Jean-Paul Berrut and Lloyd N. Trefethen (2004). + Barycentric Lagrange Interpolation. + [DOI:10.1137/S0036144502417715](https://doi.org/10.1137/S0036144502417715) +""" function barycentric_weights(nodes) n_nodes = length(nodes) weights = ones(n_nodes) @@ -494,12 +509,31 @@ function calc_lhat(x, nodes, weights) return lhat end -# Calculate Lagrange polynomials for a given node distribution. +""" + lagrange_interpolating_polynomials(x, nodes, wbary) + +Calculate Lagrange polynomials for a given node distribution with +associated barycentric weights `wbary` at a given point `x` on the +reference interval ``[-1, 1]``. + +This returns all ``l_j(x)``, i.e., the Lagrange polynomials for each node ``x_j``. +Thus, to obtain the interpolating polynomial ``p(x)`` at ``x``, one has to +multiply the Lagrange polynomials with the nodal values ``u_j`` and sum them up: +``p(x) = \\sum_{j=1}^{n} u_j l_j(x)``. + +For details, see e.g. Section 2 of +- Jean-Paul Berrut and Lloyd N. Trefethen (2004). + Barycentric Lagrange Interpolation. + [DOI:10.1137/S0036144502417715](https://doi.org/10.1137/S0036144502417715) +""" function lagrange_interpolating_polynomials(x, nodes, wbary) n_nodes = length(nodes) polynomials = zeros(n_nodes) for i in 1:n_nodes + # Avoid division by zero when `x` is close to node by using + # the Kronecker-delta property at nodes + # of the Lagrange interpolation polynomials. if isapprox(x, nodes[i], rtol = eps(x)) polynomials[i] = 1 return polynomials @@ -518,6 +552,17 @@ function lagrange_interpolating_polynomials(x, nodes, wbary) return polynomials end +""" + gauss_lobatto_nodes_weights(n_nodes::Integer) + +Computes nodes ``x_j`` and weights ``w_j`` for the (Legendre-)Gauss-Lobatto quadrature. +This implements algorithm 25 "GaussLobattoNodesAndWeights" from the book + +- David A. Kopriva, (2009). + Implementing spectral methods for partial differential equations: + Algorithms for scientists and engineers. + [DOI:10.1007/978-90-481-2261-5](https://doi.org/10.1007/978-90-481-2261-5) +""" # From FLUXO (but really from blue book by Kopriva) function gauss_lobatto_nodes_weights(n_nodes::Integer) # From Kopriva's book @@ -585,7 +630,7 @@ function gauss_lobatto_nodes_weights(n_nodes::Integer) return nodes, weights end -# From FLUXO (but really from blue book by Kopriva) +# From FLUXO (but really from blue book by Kopriva, algorithm 24) function calc_q_and_l(N::Integer, x::Float64) L_Nm2 = 1.0 L_Nm1 = x @@ -609,7 +654,17 @@ function calc_q_and_l(N::Integer, x::Float64) end calc_q_and_l(N::Integer, x::Real) = calc_q_and_l(N, convert(Float64, x)) -# From FLUXO (but really from blue book by Kopriva) +""" + gauss_nodes_weights(n_nodes::Integer) + +Computes nodes ``x_j`` and weights ``w_j`` for the Gauss-Legendre quadrature. +This implements algorithm 23 "LegendreGaussNodesAndWeights" from the book + +- David A. Kopriva, (2009). + Implementing spectral methods for partial differential equations: + Algorithms for scientists and engineers. + [DOI:10.1007/978-90-481-2261-5](https://doi.org/10.1007/978-90-481-2261-5) +""" function gauss_nodes_weights(n_nodes::Integer) # From Kopriva's book n_iterations = 10 @@ -666,7 +721,17 @@ function gauss_nodes_weights(n_nodes::Integer) end end -# From FLUXO (but really from blue book by Kopriva) +""" + legendre_polynomial_and_derivative(N::Int, x::Real) + +Computes the Legendre polynomial of degree `N` and its derivative at `x`. +This implements algorithm 22 "LegendrePolynomialAndDerivative" from the book + +- David A. Kopriva, (2009). + Implementing spectral methods for partial differential equations: + Algorithms for scientists and engineers. + [DOI:10.1007/978-90-481-2261-5](https://doi.org/10.1007/978-90-481-2261-5) +""" function legendre_polynomial_and_derivative(N::Int, x::Real) if N == 0 poly = 1.0 diff --git a/src/solvers/dgsem_tree/containers_2d.jl b/src/solvers/dgsem_tree/containers_2d.jl index 4bfbddead9a..7048739a226 100644 --- a/src/solvers/dgsem_tree/containers_2d.jl +++ b/src/solvers/dgsem_tree/containers_2d.jl @@ -421,6 +421,8 @@ end function init_boundaries!(boundaries, elements, mesh::TreeMesh2D) # Exit early if there are no boundaries to initialize if nboundaries(boundaries) == 0 + # In this case n_boundaries_per_direction still needs to be reset! + boundaries.n_boundaries_per_direction = SVector(0, 0, 0, 0) return nothing end diff --git a/src/solvers/dgsem_unstructured/dg_2d.jl b/src/solvers/dgsem_unstructured/dg_2d.jl index 988e995d6b7..ce602e178d8 100644 --- a/src/solvers/dgsem_unstructured/dg_2d.jl +++ b/src/solvers/dgsem_unstructured/dg_2d.jl @@ -95,49 +95,51 @@ function prolong2interfaces!(cache, u, mesh::UnstructuredMesh2D, equations, surface_integral, dg::DG) @unpack interfaces = cache + @unpack element_ids, element_side_ids = interfaces + interfaces_u = interfaces.u @threaded for interface in eachinterface(dg, cache) - primary_element = interfaces.element_ids[1, interface] - secondary_element = interfaces.element_ids[2, interface] + primary_element = element_ids[1, interface] + secondary_element = element_ids[2, interface] - primary_side = interfaces.element_side_ids[1, interface] - secondary_side = interfaces.element_side_ids[2, interface] + primary_side = element_side_ids[1, interface] + secondary_side = element_side_ids[2, interface] if primary_side == 1 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[1, v, i, interface] = u[v, i, 1, primary_element] + interfaces_u[1, v, i, interface] = u[v, i, 1, primary_element] end elseif primary_side == 2 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[1, v, i, interface] = u[v, nnodes(dg), i, primary_element] + interfaces_u[1, v, i, interface] = u[v, nnodes(dg), i, primary_element] end elseif primary_side == 3 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[1, v, i, interface] = u[v, i, nnodes(dg), primary_element] + interfaces_u[1, v, i, interface] = u[v, i, nnodes(dg), primary_element] end else # primary_side == 4 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[1, v, i, interface] = u[v, 1, i, primary_element] + interfaces_u[1, v, i, interface] = u[v, 1, i, primary_element] end end if secondary_side == 1 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[2, v, i, interface] = u[v, i, 1, secondary_element] + interfaces_u[2, v, i, interface] = u[v, i, 1, secondary_element] end elseif secondary_side == 2 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[2, v, i, interface] = u[v, nnodes(dg), i, + interfaces_u[2, v, i, interface] = u[v, nnodes(dg), i, secondary_element] end elseif secondary_side == 3 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[2, v, i, interface] = u[v, i, nnodes(dg), + interfaces_u[2, v, i, interface] = u[v, i, nnodes(dg), secondary_element] end else # secondary_side == 4 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[2, v, i, interface] = u[v, 1, i, secondary_element] + interfaces_u[2, v, i, interface] = u[v, 1, i, secondary_element] end end end @@ -278,26 +280,28 @@ function prolong2boundaries!(cache, u, mesh::UnstructuredMesh2D, equations, surface_integral, dg::DG) @unpack boundaries = cache + @unpack element_id, element_side_id = boundaries + boundaries_u = boundaries.u @threaded for boundary in eachboundary(dg, cache) - element = boundaries.element_id[boundary] - side = boundaries.element_side_id[boundary] + element = element_id[boundary] + side = element_side_id[boundary] if side == 1 for l in eachnode(dg), v in eachvariable(equations) - boundaries.u[v, l, boundary] = u[v, l, 1, element] + boundaries_u[v, l, boundary] = u[v, l, 1, element] end elseif side == 2 for l in eachnode(dg), v in eachvariable(equations) - boundaries.u[v, l, boundary] = u[v, nnodes(dg), l, element] + boundaries_u[v, l, boundary] = u[v, nnodes(dg), l, element] end elseif side == 3 for l in eachnode(dg), v in eachvariable(equations) - boundaries.u[v, l, boundary] = u[v, l, nnodes(dg), element] + boundaries_u[v, l, boundary] = u[v, l, nnodes(dg), element] end else # side == 4 for l in eachnode(dg), v in eachvariable(equations) - boundaries.u[v, l, boundary] = u[v, 1, l, element] + boundaries_u[v, l, boundary] = u[v, 1, l, element] end end end diff --git a/src/solvers/fdsbp_unstructured/fdsbp_2d.jl b/src/solvers/fdsbp_unstructured/fdsbp_2d.jl index c35772cdf18..cbe11ac6ac9 100644 --- a/src/solvers/fdsbp_unstructured/fdsbp_2d.jl +++ b/src/solvers/fdsbp_unstructured/fdsbp_2d.jl @@ -28,11 +28,11 @@ end # 2D volume integral contributions for `VolumeIntegralStrongForm` # OBS! This is the standard (not de-aliased) form of the volume integral. # So it is not provably stable for variable coefficients due to the the metric terms. -@inline function calc_volume_integral!(du, u, - mesh::UnstructuredMesh2D, - nonconservative_terms::False, equations, - volume_integral::VolumeIntegralStrongForm, - dg::FDSBP, cache) +function calc_volume_integral!(du, u, + mesh::UnstructuredMesh2D, + nonconservative_terms::False, equations, + volume_integral::VolumeIntegralStrongForm, + dg::FDSBP, cache) D = dg.basis # SBP derivative operator @unpack f_threaded = cache @unpack contravariant_vectors = cache.elements diff --git a/test/Project.toml b/test/Project.toml index 1a042dab44f..1491d7a5c5f 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -2,6 +2,7 @@ Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" Downloads = "f43a241f-c20a-4ad4-852c-f6b1247861c6" +ExplicitImports = "7d51a73a-1435-4ff3-83d9-f097790105c7" FFMPEG = "c87230d0-a227-11e9-1b43-d7ebe4e7570a" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" @@ -16,6 +17,7 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" Aqua = "0.8" CairoMakie = "0.10" Downloads = "1" +ExplicitImports = "1.0.1" FFMPEG = "0.4" ForwardDiff = "0.10.24" LinearAlgebra = "1" diff --git a/test/test_aqua.jl b/test/test_aqua.jl index 93457caba28..04c4a533d26 100644 --- a/test/test_aqua.jl +++ b/test/test_aqua.jl @@ -1,6 +1,7 @@ module TestAqua using Aqua +using ExplicitImports: check_no_implicit_imports, check_no_stale_explicit_imports using Test using Trixi @@ -13,6 +14,14 @@ include("test_trixi.jl") # in src/solvers/dgmulti/sbp.jl piracies = (treat_as_own = [Trixi.StartUpDG.RefElemData, Trixi.StartUpDG.MeshData],)) + @test isnothing(check_no_implicit_imports(Trixi, + skip = (Core, Base, Trixi.P4est, Trixi.T8code, + Trixi.EllipsisNotation))) + @test isnothing(check_no_stale_explicit_imports(Trixi, + ignore = (:derivative_operator, + :periodic_derivative_operator, + :upwind_operators, + Symbol("@batch")))) end end #module diff --git a/test/test_tree_1d_euler.jl b/test/test_tree_1d_euler.jl index f26500b411c..784d123128e 100644 --- a/test/test_tree_1d_euler.jl +++ b/test/test_tree_1d_euler.jl @@ -21,7 +21,10 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") 1.6205433861493646e-7, 1.465427772462391e-7, 5.372255111879554e-7, - ]) + ], + # With the default `maxiters = 1` in coverage tests, + # there would be no time series to check against. + coverage_override=(maxiters = 20,)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -30,6 +33,18 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") du_ode = similar(u_ode) @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end + # Extra test to make sure the "TimeSeriesCallback" made correct data. + # Extracts data at all points from the first step of the time series and compares it to the + # exact solution and an interpolated reference solution + point_data = [getindex(time_series.affect!.point_data[i], 1:3) for i in 1:3] + exact_data = [initial_condition_convergence_test(time_series.affect!.point_coordinates[i], + time_series.affect!.time[1], + equations) for i in 1:3] + ref_data = [[1.968279088772251, 1.9682791565395945, 3.874122958278797], + [2.0654816955822017, 2.0654817326611883, 4.26621471136323], + [2.0317209235018936, 2.0317209516429506, 4.127889808862571]] + @test point_data≈exact_data atol=1e-6 + @test point_data ≈ ref_data end @trixi_testset "elixir_euler_convergence_pure_fv.jl" begin diff --git a/test/test_tree_3d_euler.jl b/test/test_tree_3d_euler.jl index e9e2b82fec5..47669dce2fb 100644 --- a/test/test_tree_3d_euler.jl +++ b/test/test_tree_3d_euler.jl @@ -25,7 +25,10 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_3d_dgsem") 0.032179231640894645, 0.032179231640895534, 0.0655408023333299, - ]) + ], + # With the default `maxiters = 1` in coverage tests, + # there would be no time series to check against. + coverage_override=(maxiters = 20,)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -34,6 +37,38 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_3d_dgsem") du_ode = similar(u_ode) @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end + # Extra test to make sure the "TimeSeriesCallback" made correct data. + # Extracts data at all points from the first step of the time series and compares it to the + # exact solution and an interpolated reference solution + point_data = [getindex(time_series.affect!.point_data[i], 1:5) for i in 1:3] + exact_data = [initial_condition_convergence_test(time_series.affect!.point_coordinates[:, + i], + time_series.affect!.time[1], + equations) for i in 1:3] + ref_data = [ + [ + 1.951156832316166, + 1.952073047561595, + 1.9520730475615966, + 1.9520730475615953, + 3.814390510967551, + ], + [ + 2.0506452262144363, + 2.050727319703708, + 2.0507273197037073, + 2.0507273197037077, + 4.203653999433724, + ], + [ + 2.046982357537558, + 2.0463728824399654, + 2.0463728824399654, + 2.0463728824399645, + 4.190033459318115, + ]] + @test point_data≈exact_data atol=1e-1 + @test point_data ≈ ref_data end @trixi_testset "elixir_euler_convergence_pure_fv.jl" begin diff --git a/test/test_unit.jl b/test/test_unit.jl index 1907a281718..03a78f6918a 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -600,6 +600,7 @@ end end @timed_testset "TimeSeriesCallback" begin + # Test the 2D TreeMesh version of the callback and some warnings @test_nowarn_mod trixi_include(@__MODULE__, joinpath(examples_dir(), "tree_2d_dgsem", "elixir_acoustics_gaussian_source.jl"), diff --git a/test/test_unstructured_2d.jl b/test/test_unstructured_2d.jl index 8a62dcaec3c..6814250dd47 100644 --- a/test/test_unstructured_2d.jl +++ b/test/test_unstructured_2d.jl @@ -198,6 +198,39 @@ end end end +@trixi_testset "elixir_euler_time_series.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_time_series.jl"), + l2=[ + 6.984024099236519e-5, + 6.289022520363763e-5, + 6.550951878107466e-5, + 0.00016222767700879948, + ], + linf=[ + 0.0005367823248620951, + 0.000671293180158461, + 0.0005656680962440319, + 0.0013910024779804075, + ], + tspan=(0.0, 0.2), + # With the default `maxiters = 1` in coverage tests, + # there would be no time series to check against. + coverage_override=(maxiters = 20,)) + # Extra test that the `TimeSeries` callback creates reasonable data + point_data_1 = time_series.affect!.point_data[1] + @test all(isapprox.(point_data_1[1:4], + [1.9546882708551676, 1.9547149531788077, + 1.9547142161310154, 3.821066781119142])) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_acoustics_gauss_wall.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_acoustics_gauss_wall.jl"), l2=[0.029330394861252995, 0.029345079728907965,