From 1ca37cf2271806d203a832d3d99bfa2c14d3226f Mon Sep 17 00:00:00 2001 From: Benedict <135045760+bgeihe@users.noreply.github.com> Date: Fri, 8 Mar 2024 07:48:24 +0100 Subject: [PATCH 01/19] set capacity also when using MPI (#1862) Co-authored-by: Hendrik Ranocha --- src/meshes/mesh_io.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/src/meshes/mesh_io.jl b/src/meshes/mesh_io.jl index 337e33e6969..28e6efa8c57 100644 --- a/src/meshes/mesh_io.jl +++ b/src/meshes/mesh_io.jl @@ -74,6 +74,7 @@ function save_mesh_file(mesh::TreeMesh, output_directory, timestep, attributes(file)["mesh_type"] = get_name(mesh) attributes(file)["ndims"] = ndims(mesh) attributes(file)["n_cells"] = n_cells + attributes(file)["capacity"] = mesh.tree.capacity attributes(file)["n_leaf_cells"] = count_leaf_cells(mesh.tree) attributes(file)["minimum_level"] = minimum_level(mesh.tree) attributes(file)["maximum_level"] = maximum_level(mesh.tree) From f235619a49dbc8bd7d84a77269558a64b21a155f Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Fri, 8 Mar 2024 12:20:45 +0100 Subject: [PATCH 02/19] Mention hyphen/dash caveat for boundary symbols (#1866) * Mention hyphen/dash caveat for boundary symbols * typo * elaborate --- docs/src/meshes/p4est_mesh.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/src/meshes/p4est_mesh.md b/docs/src/meshes/p4est_mesh.md index 3b35ffcad6f..a14551b3f46 100644 --- a/docs/src/meshes/p4est_mesh.md +++ b/docs/src/meshes/p4est_mesh.md @@ -256,6 +256,8 @@ By doing so, only nodesets with a label present in `boundary_symbols` are treate Other nodesets that could be used for diagnostics are not treated as external boundaries. Note that there is a leading colon `:` compared to the label in the `.inp` mesh file. This is required to turn the label into a [`Symbol`](https://docs.julialang.org/en/v1/manual/metaprogramming/#Symbols). +**Important**: In Julia, a symbol _cannot_ contain a hyphen/dash `-`, i.e., `:BC-1` is _not_ a valid symbol. +Keep this in mind when importing boundaries, you might have to convert hyphens/dashes `-` to underscores `_` in the `.inp` mesh file, i.e., `BC_1` instead of `BC-1`. A 2D example for this mesh, which is read-in for an unstructured mesh file created with `gmsh`, is presented in `examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl`. From bd060b6d140b2c4f8f717a89899e867593e141c1 Mon Sep 17 00:00:00 2001 From: Andrew Winters Date: Thu, 14 Mar 2024 15:49:16 +0100 Subject: [PATCH 03/19] Add functionality for `TimeSeries` callback on `UnstructuredMesh2D` (#1855) * add functionality for TimeSeries callback on UnstructuredMesh2D * Update src/callbacks_step/time_series_dg.jl Co-authored-by: Hendrik Ranocha * Apply suggestions from code review Co-authored-by: Daniel Doehring * add strategy to correctly locate a gauge point within a curvilinear element * add sanity check that the Newton solution is correct * run formatter * implement a more general approach that also works on curved element without issue * run formatter * forgot to format the examples * Apply suggestions from code review Co-authored-by: Hendrik Ranocha Co-authored-by: Daniel Doehring * working version of the element finding routine * run formatter * add new elixir for the time series callback * add additional test for the time series callback on an unstructured mesh * add appropriate test * update docstring * add comment about the barycenter computation * add simplifications and comments from code review * adjust variable name to avoid ugly formatting * Apply suggestions from code review Co-authored-by: Hendrik Ranocha * fix variable name * remove Experimental status from the TimeSeriesCallback * move new TimeSeries test into the unit testing * add output_directory creation if not already done. Necessary if this callback is used without the SaveSolution callback * formatting * update test mesh to have one straight-sided element to trigger inverse bi-linear interpolation * update test values * add news item * forgot to update all new test values on the new mesh * update tests and use coverage override to avoid redundancy --------- Co-authored-by: Hendrik Ranocha Co-authored-by: Daniel Doehring --- NEWS.md | 12 +- .../elixir_euler_time_series.jl | 115 ++++++++ src/callbacks_step/time_series.jl | 9 +- src/callbacks_step/time_series_dg.jl | 6 +- src/callbacks_step/time_series_dg2d.jl | 279 +++++++++++++++++- test/test_unit.jl | 1 + test/test_unstructured_2d.jl | 33 +++ 7 files changed, 443 insertions(+), 12 deletions(-) create mode 100644 examples/unstructured_2d_dgsem/elixir_euler_time_series.jl diff --git a/NEWS.md b/NEWS.md index d70504d8c85..5b08d51ab89 100644 --- a/NEWS.md +++ b/NEWS.md @@ -4,13 +4,19 @@ Trixi.jl follows the interpretation of [semantic versioning (semver)](https://ju used in the Julia ecosystem. Notable changes will be documented in this file for human readability. +## Changes in the v0.7 lifecycle + +#### Added +- Implementation of `TimeSeriesCallback` for curvilinear meshes on `UnstructuredMesh2D`. + + ## Changes when updating to v0.7 from v0.6.x #### Added #### Changed -- The default wave speed estimate used within `flux_hll` is now `min_max_speed_davis` +- The default wave speed estimate used within `flux_hll` is now `min_max_speed_davis` instead of `min_max_speed_naive`. #### Deprecated @@ -26,7 +32,7 @@ for human readability. #### Added - AMR for hyperbolic-parabolic equations on 3D `P4estMesh` - `flux_hllc` on non-cartesian meshes for `CompressibleEulerEquations{2,3}D` -- Different boundary conditions for quad/hex meshes in Abaqus format, even if not generated by HOHQMesh, +- Different boundary conditions for quad/hex meshes in Abaqus format, even if not generated by HOHQMesh, can now be digested by Trixi in 2D and 3D. - Subcell (positivity) limiting support for nonlinear variables in 2D for `TreeMesh` - Added Lighthill-Whitham-Richards (LWR) traffic model @@ -40,7 +46,7 @@ for human readability. #### Changed - The wave speed estimates for `flux_hll`, `FluxHLL()` are now consistent across equations. - In particular, the functions `min_max_speed_naive`, `min_max_speed_einfeldt` are now + In particular, the functions `min_max_speed_naive`, `min_max_speed_einfeldt` are now conceptually identical across equations. Users, who have been using `flux_hll` for MHD have now to use `flux_hlle` in order to use the Einfeldt wave speed estimate. diff --git a/examples/unstructured_2d_dgsem/elixir_euler_time_series.jl b/examples/unstructured_2d_dgsem/elixir_euler_time_series.jl new file mode 100644 index 00000000000..13233cdadbc --- /dev/null +++ b/examples/unstructured_2d_dgsem/elixir_euler_time_series.jl @@ -0,0 +1,115 @@ +# An elixir that has an alternative convergence test that uses +# the `TimeSeriesCallback` on several gauge points. Many of the +# gauge points are selected as "stress tests" for the element +# identification, e.g., a gauge point that lies on an +# element corner of a curvilinear mesh + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations2D(1.4) + +# Modify the manufactured solution test to use `L = sqrt(2)` +# in the initial condition and source terms +function initial_condition_convergence_shifted(x, t, + equations::CompressibleEulerEquations2D) + c = 2 + A = 0.1 + L = sqrt(2) + f = 1 / L + ω = 2 * pi * f + ini = c + A * sin(ω * (x[1] + x[2] - t)) + + rho = ini + rho_v1 = ini + rho_v2 = ini + rho_e = ini^2 + + return SVector(rho, rho_v1, rho_v2, rho_e) +end + +@inline function source_terms_convergence_shifted(u, x, t, + equations::CompressibleEulerEquations2D) + # Same settings as in `initial_condition` + c = 2 + A = 0.1 + L = sqrt(2) + f = 1 / L + ω = 2 * pi * f + γ = equations.gamma + + x1, x2 = x + si, co = sincos(ω * (x1 + x2 - t)) + rho = c + A * si + rho_x = ω * A * co + # Note that d/dt rho = -d/dx rho = -d/dy rho. + + tmp = (2 * rho - 1) * (γ - 1) + + du1 = rho_x + du2 = rho_x * (1 + tmp) + du3 = du2 + du4 = 2 * rho_x * (rho + tmp) + + return SVector(du1, du2, du3, du4) +end + +initial_condition = initial_condition_convergence_shifted + +source_term = source_terms_convergence_shifted + +############################################################################### +# Get the DG approximation space + +solver = DGSEM(polydeg = 6, surface_flux = flux_lax_friedrichs) + +############################################################################### +# Get the curved quad mesh from a file (downloads the file if not available locally) + +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/b434e724e3972a9c4ee48d58c80cdcdb/raw/55c916cd8c0294a2d4a836e960dac7247b7c8ccf/mesh_multiple_flips.mesh", + joinpath(@__DIR__, "mesh_multiple_flips.mesh")) + +mesh = UnstructuredMesh2D(mesh_file, periodicity = true) + +############################################################################### +# create the semi discretization object + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_term) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1000 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +time_series = TimeSeriesCallback(semi, + [(0.75, 0.7), (1.23, 0.302), (0.8, 1.0), + (0.353553390593274, 0.353553390593274), + (0.505, 1.125), (1.37, 0.89), (0.349, 0.7153), + (0.883883476483184, 0.406586401289607), + (sqrt(2), sqrt(2))]; + interval = 10) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + time_series, + alive_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, RDPK3SpFSAL49(); abstol = 1.0e-6, reltol = 1.0e-6, + ode_default_options()..., callback = callbacks); + +summary_callback() # print the timer summary diff --git a/src/callbacks_step/time_series.jl b/src/callbacks_step/time_series.jl index 7baa6b9c5a1..f6d76f0fb15 100644 --- a/src/callbacks_step/time_series.jl +++ b/src/callbacks_step/time_series.jl @@ -23,8 +23,8 @@ After the last time step, the results are stored in an HDF5 file `filename` in d The real data type `RealT` and data type for solution variables `uEltype` default to the respective types used in the solver and the cache. -!!! warning "Experimental implementation" - This is an experimental feature and may change in future releases. +Currently this callback is only implemented for [`TreeMesh`](@ref) in 2D +and [`UnstructuredMesh2D`](@ref). """ mutable struct TimeSeriesCallback{RealT <: Real, uEltype <: Real, SolutionVariables, VariableNames, Cache} @@ -96,6 +96,11 @@ function TimeSeriesCallback(mesh, equations, solver, cache, point_coordinates; throw(ArgumentError("`point_coordinates` must be a matrix of size n_points × ndims")) end + # create the output folder if it does not exist already + if mpi_isroot() && !isdir(output_directory) + mkpath(output_directory) + end + # Transpose point_coordinates to our usual format [ndims, n_points] # Note: They are accepted in a different format to allow direct input from `readdlm` point_coordinates_ = permutedims(point_coordinates) diff --git a/src/callbacks_step/time_series_dg.jl b/src/callbacks_step/time_series_dg.jl index 1b63979d579..ae394afbbfd 100644 --- a/src/callbacks_step/time_series_dg.jl +++ b/src/callbacks_step/time_series_dg.jl @@ -5,8 +5,10 @@ @muladd begin #! format: noindent -# Store time series file for a TreeMesh with a DG solver -function save_time_series_file(time_series_callback, mesh::TreeMesh, equations, dg::DG) +# Store time series file for a DG solver +function save_time_series_file(time_series_callback, + mesh::Union{TreeMesh, UnstructuredMesh2D}, + equations, dg::DG) @unpack (interval, solution_variables, variable_names, output_directory, filename, point_coordinates, point_data, time, step, time_series_cache) = time_series_callback diff --git a/src/callbacks_step/time_series_dg2d.jl b/src/callbacks_step/time_series_dg2d.jl index c15945d6e16..ad7c6851c80 100644 --- a/src/callbacks_step/time_series_dg2d.jl +++ b/src/callbacks_step/time_series_dg2d.jl @@ -6,7 +6,9 @@ #! format: noindent # Creates cache for time series callback -function create_cache_time_series(point_coordinates, mesh::TreeMesh{2}, dg, cache) +function create_cache_time_series(point_coordinates, + mesh::Union{TreeMesh{2}, UnstructuredMesh2D}, + dg, cache) # Determine element ids for point coordinates element_ids = get_elements_by_coordinates(point_coordinates, mesh, dg, cache) @@ -68,6 +70,144 @@ function get_elements_by_coordinates!(element_ids, coordinates, mesh::TreeMesh, return element_ids end +# Elements on an `UnstructuredMesh2D` are possibly curved. Assume that each +# element is convex, i.e., all interior angles are less than 180 degrees. +# This routine computes the shortest distance from a given point to each element +# surface in the mesh. These distances then indicate possible candidate elements. +# From these candidates we (essentially) apply a ray casting strategy and identify +# the element in which the point lies by comparing the ray formed by the point to +# the nearest boundary to the rays cast by the candidate element barycenters to the +# boundary. If these rays point in the same direction, then we have identified the +# desired element location. +function get_elements_by_coordinates!(element_ids, coordinates, + mesh::UnstructuredMesh2D, + dg, cache) + if length(element_ids) != size(coordinates, 2) + throw(DimensionMismatch("storage length for element ids does not match the number of coordinates")) + end + + # Reset element ids - 0 indicates "not (yet) found" + element_ids .= 0 + + # Compute and save the barycentric coordinate on each element + bary_centers = zeros(eltype(mesh.corners), 2, mesh.n_elements) + calc_bary_centers!(bary_centers, dg, cache) + + # Iterate over coordinates + distances = zeros(eltype(mesh.corners), mesh.n_elements) + indices = zeros(Int, mesh.n_elements, 2) + for index in 1:length(element_ids) + # Grab the current point for which the element needs found + point = SVector(coordinates[1, index], + coordinates[2, index]) + + # Compute the minimum distance between the `point` and all the element surfaces + # saved into `distances`. The point in `node_coordinates` that gives said minimum + # distance on each element is saved in `indices` + distances, indices = calc_minimum_surface_distance(point, + cache.elements.node_coordinates, + dg, mesh) + + # Get the candidate elements where the `point` might live + candidates = findall(abs.(minimum(distances) .- distances) .< + 500 * eps(eltype(point))) + + # The minimal surface point is on a boundary so it plays no role which candidate + # we use to grab it. So just use the first one + surface_point = SVector(cache.elements.node_coordinates[1, + indices[candidates[1], + 1], + indices[candidates[1], + 2], + candidates[1]], + cache.elements.node_coordinates[2, + indices[candidates[1], + 1], + indices[candidates[1], + 2], + candidates[1]]) + + # Compute the vector pointing from the current `point` toward the surface + P = surface_point - point + + # If the vector `P` is the zero vector then this `point` is at an element corner or + # on a surface. In this case the choice of a candidate element is ambiguous and + # we just use the first candidate. However, solutions might differ at discontinuous + # interfaces such that this choice may influence the result. + if sum(P .* P) < 500 * eps(eltype(point)) + element_ids[index] = candidates[1] + continue + end + + # Loop through all the element candidates until we find a vector from the barycenter + # to the surface that points in the same direction as the current `point` vector. + # This then gives us the correct element. + for element in 1:length(candidates) + bary_center = SVector(bary_centers[1, candidates[element]], + bary_centers[2, candidates[element]]) + # Vector pointing from the barycenter toward the minimal `surface_point` + B = surface_point - bary_center + if sum(P .* B) > zero(eltype(bary_center)) + element_ids[index] = candidates[element] + break + end + end + end + + return element_ids +end + +# Use the available `node_coordinates` on each element to compute and save the barycenter. +# In essence, the barycenter is like an average where all the x and y node coordinates are +# summed and then we divide by the total number of degrees of freedom on the element, i.e., +# the value of `n^2` in two spatial dimensions. +@inline function calc_bary_centers!(bary_centers, dg, cache) + n = nnodes(dg) + @views for element in eachelement(dg, cache) + bary_centers[1, element] = sum(cache.elements.node_coordinates[1, :, :, + element]) / n^2 + bary_centers[2, element] = sum(cache.elements.node_coordinates[2, :, :, + element]) / n^2 + end + return nothing +end + +# Compute the shortest distance from a `point` to the surface of each element +# using the available `node_coordinates`. Also return the index pair of this +# minimum surface point location. We compute and store in `min_distance` +# the squared norm to avoid computing computationally more expensive square roots. +# Note! Could be made more accurate if the `node_coordinates` were super-sampled +# and reinterpolated onto a higher polynomial degree before this computation. +function calc_minimum_surface_distance(point, node_coordinates, + dg, mesh::UnstructuredMesh2D) + n = nnodes(dg) + min_distance2 = Inf * ones(eltype(mesh.corners), length(mesh)) + indices = zeros(Int, length(mesh), 2) + for k in 1:length(mesh) + # used to ensure that only boundary points are used + on_surface = MVector(false, false) + for j in 1:n + on_surface[2] = (j == 1) || (j == n) + for i in 1:n + on_surface[1] = (i == 1) || (i == n) + if !any(on_surface) + continue + end + node = SVector(node_coordinates[1, i, j, k], + node_coordinates[2, i, j, k]) + distance2 = sum(abs2, node - point) + if distance2 < min_distance2[k] + min_distance2[k] = distance2 + indices[k, 1] = i + indices[k, 2] = j + end + end + end + end + + return min_distance2, indices +end + function get_elements_by_coordinates(coordinates, mesh, dg, cache) element_ids = Vector{Int}(undef, size(coordinates, 2)) get_elements_by_coordinates!(element_ids, coordinates, mesh, dg, cache) @@ -106,8 +246,137 @@ function calc_interpolating_polynomials!(interpolating_polynomials, coordinates, return interpolating_polynomials end -function calc_interpolating_polynomials(coordinates, element_ids, mesh::TreeMesh, dg, - cache) +function calc_interpolating_polynomials!(interpolating_polynomials, coordinates, + element_ids, + mesh::UnstructuredMesh2D, dg::DGSEM, cache) + @unpack nodes = dg.basis + + wbary = barycentric_weights(nodes) + + # Helper array for a straight-sided quadrilateral element + corners = zeros(eltype(mesh.corners), 4, 2) + + for index in 1:length(element_ids) + # Construct point + x = SVector(ntuple(i -> coordinates[i, index], ndims(mesh))) + + # Convert to unit coordinates; procedure differs for straight-sided + # versus curvilinear elements + element = element_ids[index] + if !mesh.element_is_curved[element] + for j in 1:2, i in 1:4 + # Pull the (x,y) values of the element corners from the global corners array + corners[i, j] = mesh.corners[j, mesh.element_node_ids[i, element]] + end + # Compute coordinates in reference system + unit_coordinates = invert_bilinear_interpolation(mesh, x, corners) + + # Sanity check that the computed `unit_coordinates` indeed recover the desired point `x` + x_check = straight_side_quad_map(unit_coordinates[1], unit_coordinates[2], + corners) + if !isapprox(x[1], x_check[1]) || !isapprox(x[2], x_check[2]) + error("failed to compute computational coordinates for the time series point $(x), closet candidate was $(x_check)") + end + else # mesh.element_is_curved[element] + unit_coordinates = invert_transfinite_interpolation(mesh, x, + view(mesh.surface_curves, + :, element)) + + # Sanity check that the computed `unit_coordinates` indeed recover the desired point `x` + x_check = transfinite_quad_map(unit_coordinates[1], unit_coordinates[2], + view(mesh.surface_curves, :, element)) + if !isapprox(x[1], x_check[1]) || !isapprox(x[2], x_check[2]) + error("failed to compute computational coordinates for the time series point $(x), closet candidate was $(x_check)") + end + end + + # Calculate interpolating polynomial for each dimension, making use of tensor product structure + for d in 1:ndims(mesh) + interpolating_polynomials[:, d, index] .= lagrange_interpolating_polynomials(unit_coordinates[d], + nodes, + wbary) + end + end + + return interpolating_polynomials +end + +# Use a Newton iteration to determine the computational coordinates +# (xi, eta) of an (x,y) `point` that is given in physical coordinates +# by inverting the transformation. For straight-sided elements this +# amounts to inverting a bi-linear interpolation. For curved +# elements we invert the transfinite interpolation with linear blending. +# The residual function for the Newton iteration is +# r(xi, eta) = X(xi, eta) - point +# and the Jacobian entries are computed accordingly from either +# `straight_side_quad_map_metrics` or `transfinite_quad_map_metrics`. +# We exploit the 2x2 nature of the problem and directly compute the matrix +# inverse to make things faster. The implementations below are inspired by +# an answer on Stack Overflow (https://stackoverflow.com/a/18332009) where +# the author explicitly states that their code is released to the public domain. +@inline function invert_bilinear_interpolation(mesh::UnstructuredMesh2D, point, + element_corners) + # Initial guess for the point (center of the reference element) + xi = zero(eltype(point)) + eta = zero(eltype(point)) + for k in 1:5 # Newton's method should converge quickly + # Compute current x and y coordinate and the Jacobian matrix + # J = (X_xi, X_eta; Y_xi, Y_eta) + x, y = straight_side_quad_map(xi, eta, element_corners) + J11, J12, J21, J22 = straight_side_quad_map_metrics(xi, eta, element_corners) + + # Compute residuals for the Newton teration for the current (x, y) coordinate + r1 = x - point[1] + r2 = y - point[2] + + # Newton update that directly applies the inverse of the 2x2 Jacobian matrix + inv_detJ = inv(J11 * J22 - J12 * J21) + + # Update with explicitly inverted Jacobian + xi = xi - inv_detJ * (J22 * r1 - J12 * r2) + eta = eta - inv_detJ * (-J21 * r1 + J11 * r2) + + # Ensure updated point is in the reference element + xi = min(max(xi, -1), 1) + eta = min(max(eta, -1), 1) + end + + return SVector(xi, eta) +end + +@inline function invert_transfinite_interpolation(mesh::UnstructuredMesh2D, point, + surface_curves::AbstractVector{<:CurvedSurface}) + # Initial guess for the point (center of the reference element) + xi = zero(eltype(point)) + eta = zero(eltype(point)) + for k in 1:5 # Newton's method should converge quickly + # Compute current x and y coordinate and the Jacobian matrix + # J = (X_xi, X_eta; Y_xi, Y_eta) + x, y = transfinite_quad_map(xi, eta, surface_curves) + J11, J12, J21, J22 = transfinite_quad_map_metrics(xi, eta, surface_curves) + + # Compute residuals for the Newton teration for the current (x,y) coordinate + r1 = x - point[1] + r2 = y - point[2] + + # Newton update that directly applies the inverse of the 2x2 Jacobian matrix + inv_detJ = inv(J11 * J22 - J12 * J21) + + # Update with explicitly inverted Jacobian + xi = xi - inv_detJ * (J22 * r1 - J12 * r2) + eta = eta - inv_detJ * (-J21 * r1 + J11 * r2) + + # Ensure updated point is in the reference element + xi = min(max(xi, -1), 1) + eta = min(max(eta, -1), 1) + end + + return SVector(xi, eta) +end + +function calc_interpolating_polynomials(coordinates, element_ids, + mesh::Union{TreeMesh, UnstructuredMesh2D}, + dg, cache) interpolating_polynomials = Array{real(dg), 3}(undef, nnodes(dg), ndims(mesh), length(element_ids)) @@ -121,8 +390,8 @@ end # Record the solution variables at each given point function record_state_at_points!(point_data, u, solution_variables, n_solution_variables, - mesh::TreeMesh{2}, equations, dg::DG, - time_series_cache) + mesh::Union{TreeMesh{2}, UnstructuredMesh2D}, + equations, dg::DG, time_series_cache) @unpack element_ids, interpolating_polynomials = time_series_cache old_length = length(first(point_data)) new_length = old_length + n_solution_variables diff --git a/test/test_unit.jl b/test/test_unit.jl index 1907a281718..03a78f6918a 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -600,6 +600,7 @@ end end @timed_testset "TimeSeriesCallback" begin + # Test the 2D TreeMesh version of the callback and some warnings @test_nowarn_mod trixi_include(@__MODULE__, joinpath(examples_dir(), "tree_2d_dgsem", "elixir_acoustics_gaussian_source.jl"), diff --git a/test/test_unstructured_2d.jl b/test/test_unstructured_2d.jl index 8a62dcaec3c..6814250dd47 100644 --- a/test/test_unstructured_2d.jl +++ b/test/test_unstructured_2d.jl @@ -198,6 +198,39 @@ end end end +@trixi_testset "elixir_euler_time_series.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_time_series.jl"), + l2=[ + 6.984024099236519e-5, + 6.289022520363763e-5, + 6.550951878107466e-5, + 0.00016222767700879948, + ], + linf=[ + 0.0005367823248620951, + 0.000671293180158461, + 0.0005656680962440319, + 0.0013910024779804075, + ], + tspan=(0.0, 0.2), + # With the default `maxiters = 1` in coverage tests, + # there would be no time series to check against. + coverage_override=(maxiters = 20,)) + # Extra test that the `TimeSeries` callback creates reasonable data + point_data_1 = time_series.affect!.point_data[1] + @test all(isapprox.(point_data_1[1:4], + [1.9546882708551676, 1.9547149531788077, + 1.9547142161310154, 3.821066781119142])) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_acoustics_gauss_wall.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_acoustics_gauss_wall.jl"), l2=[0.029330394861252995, 0.029345079728907965, From 9323c2ae47300ce83976fcdaba8f5801e82e41a5 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 14 Mar 2024 15:54:43 +0100 Subject: [PATCH 04/19] set version to v0.7.3 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 6b44af4a3fa..f2f8a10626a 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.3-pre" +version = "0.7.3" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From a528083a1c41d9fd131fc972816881d3276f718e Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 14 Mar 2024 15:54:55 +0100 Subject: [PATCH 05/19] set development version to v0.7.4-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index f2f8a10626a..97da4aec51b 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.3" +version = "0.7.4-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From aa9ea20342e3d02445ec2dc53e380c405da3b683 Mon Sep 17 00:00:00 2001 From: Benedict <135045760+bgeihe@users.noreply.github.com> Date: Fri, 15 Mar 2024 10:20:43 +0100 Subject: [PATCH 06/19] reset n_boundaries_per_direction (#1870) --- src/solvers/dgsem_tree/containers_2d.jl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/solvers/dgsem_tree/containers_2d.jl b/src/solvers/dgsem_tree/containers_2d.jl index 4bfbddead9a..7048739a226 100644 --- a/src/solvers/dgsem_tree/containers_2d.jl +++ b/src/solvers/dgsem_tree/containers_2d.jl @@ -421,6 +421,8 @@ end function init_boundaries!(boundaries, elements, mesh::TreeMesh2D) # Exit early if there are no boundaries to initialize if nboundaries(boundaries) == 0 + # In this case n_boundaries_per_direction still needs to be reset! + boundaries.n_boundaries_per_direction = SVector(0, 0, 0, 0) return nothing end From 38a9a5234cb2e5588fced11c7bac5d9441142014 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 15 Mar 2024 10:21:54 +0100 Subject: [PATCH 07/19] remove some minor allocations for threaded FDSBP (#1868) * do not inline calc_volume_integral! for FDSBP * avoid allocations in unstructured prolong2interfaces! * avoid allocations in unstructured prolong2boundaries! --- src/solvers/dgsem_unstructured/dg_2d.jl | 40 ++++++++++++---------- src/solvers/fdsbp_unstructured/fdsbp_2d.jl | 10 +++--- 2 files changed, 27 insertions(+), 23 deletions(-) diff --git a/src/solvers/dgsem_unstructured/dg_2d.jl b/src/solvers/dgsem_unstructured/dg_2d.jl index 988e995d6b7..ce602e178d8 100644 --- a/src/solvers/dgsem_unstructured/dg_2d.jl +++ b/src/solvers/dgsem_unstructured/dg_2d.jl @@ -95,49 +95,51 @@ function prolong2interfaces!(cache, u, mesh::UnstructuredMesh2D, equations, surface_integral, dg::DG) @unpack interfaces = cache + @unpack element_ids, element_side_ids = interfaces + interfaces_u = interfaces.u @threaded for interface in eachinterface(dg, cache) - primary_element = interfaces.element_ids[1, interface] - secondary_element = interfaces.element_ids[2, interface] + primary_element = element_ids[1, interface] + secondary_element = element_ids[2, interface] - primary_side = interfaces.element_side_ids[1, interface] - secondary_side = interfaces.element_side_ids[2, interface] + primary_side = element_side_ids[1, interface] + secondary_side = element_side_ids[2, interface] if primary_side == 1 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[1, v, i, interface] = u[v, i, 1, primary_element] + interfaces_u[1, v, i, interface] = u[v, i, 1, primary_element] end elseif primary_side == 2 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[1, v, i, interface] = u[v, nnodes(dg), i, primary_element] + interfaces_u[1, v, i, interface] = u[v, nnodes(dg), i, primary_element] end elseif primary_side == 3 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[1, v, i, interface] = u[v, i, nnodes(dg), primary_element] + interfaces_u[1, v, i, interface] = u[v, i, nnodes(dg), primary_element] end else # primary_side == 4 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[1, v, i, interface] = u[v, 1, i, primary_element] + interfaces_u[1, v, i, interface] = u[v, 1, i, primary_element] end end if secondary_side == 1 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[2, v, i, interface] = u[v, i, 1, secondary_element] + interfaces_u[2, v, i, interface] = u[v, i, 1, secondary_element] end elseif secondary_side == 2 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[2, v, i, interface] = u[v, nnodes(dg), i, + interfaces_u[2, v, i, interface] = u[v, nnodes(dg), i, secondary_element] end elseif secondary_side == 3 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[2, v, i, interface] = u[v, i, nnodes(dg), + interfaces_u[2, v, i, interface] = u[v, i, nnodes(dg), secondary_element] end else # secondary_side == 4 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[2, v, i, interface] = u[v, 1, i, secondary_element] + interfaces_u[2, v, i, interface] = u[v, 1, i, secondary_element] end end end @@ -278,26 +280,28 @@ function prolong2boundaries!(cache, u, mesh::UnstructuredMesh2D, equations, surface_integral, dg::DG) @unpack boundaries = cache + @unpack element_id, element_side_id = boundaries + boundaries_u = boundaries.u @threaded for boundary in eachboundary(dg, cache) - element = boundaries.element_id[boundary] - side = boundaries.element_side_id[boundary] + element = element_id[boundary] + side = element_side_id[boundary] if side == 1 for l in eachnode(dg), v in eachvariable(equations) - boundaries.u[v, l, boundary] = u[v, l, 1, element] + boundaries_u[v, l, boundary] = u[v, l, 1, element] end elseif side == 2 for l in eachnode(dg), v in eachvariable(equations) - boundaries.u[v, l, boundary] = u[v, nnodes(dg), l, element] + boundaries_u[v, l, boundary] = u[v, nnodes(dg), l, element] end elseif side == 3 for l in eachnode(dg), v in eachvariable(equations) - boundaries.u[v, l, boundary] = u[v, l, nnodes(dg), element] + boundaries_u[v, l, boundary] = u[v, l, nnodes(dg), element] end else # side == 4 for l in eachnode(dg), v in eachvariable(equations) - boundaries.u[v, l, boundary] = u[v, 1, l, element] + boundaries_u[v, l, boundary] = u[v, 1, l, element] end end end diff --git a/src/solvers/fdsbp_unstructured/fdsbp_2d.jl b/src/solvers/fdsbp_unstructured/fdsbp_2d.jl index c35772cdf18..cbe11ac6ac9 100644 --- a/src/solvers/fdsbp_unstructured/fdsbp_2d.jl +++ b/src/solvers/fdsbp_unstructured/fdsbp_2d.jl @@ -28,11 +28,11 @@ end # 2D volume integral contributions for `VolumeIntegralStrongForm` # OBS! This is the standard (not de-aliased) form of the volume integral. # So it is not provably stable for variable coefficients due to the the metric terms. -@inline function calc_volume_integral!(du, u, - mesh::UnstructuredMesh2D, - nonconservative_terms::False, equations, - volume_integral::VolumeIntegralStrongForm, - dg::FDSBP, cache) +function calc_volume_integral!(du, u, + mesh::UnstructuredMesh2D, + nonconservative_terms::False, equations, + volume_integral::VolumeIntegralStrongForm, + dg::FDSBP, cache) D = dg.basis # SBP derivative operator @unpack f_threaded = cache @unpack contravariant_vectors = cache.elements From 2dfde7faf3cc74f066d86148ae6c99ed9e58fa79 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Fri, 15 Mar 2024 13:55:10 +0100 Subject: [PATCH 08/19] Docstrings for some methods in basis Lobatto-Legendre (#1874) * Docstrings for some methods in basis LL * double back slash --- .../src/files/scalar_linear_advection_1d.jl | 2 +- src/solvers/dgsem/basis_lobatto_legendre.jl | 77 +++++++++++++++++-- 2 files changed, 72 insertions(+), 7 deletions(-) diff --git a/docs/literate/src/files/scalar_linear_advection_1d.jl b/docs/literate/src/files/scalar_linear_advection_1d.jl index 77ba7b087cc..9b48f29d341 100644 --- a/docs/literate/src/files/scalar_linear_advection_1d.jl +++ b/docs/literate/src/files/scalar_linear_advection_1d.jl @@ -115,7 +115,7 @@ integral = sum(nodes.^3 .* weights) # To approximate the solution, we need to get the polynomial coefficients $\{u_j^{Q_l}\}_{j=0}^N$ # for every element $Q_l$. -# After defining all nodes, we can implement the spatial coordinate $x$ and its initial value $u0$ +# After defining all nodes, we can implement the spatial coordinate $x$ and its initial value $u0 = u(t_0)$ # for every node. x = Matrix{Float64}(undef, length(nodes), n_elements) for element in 1:n_elements diff --git a/src/solvers/dgsem/basis_lobatto_legendre.jl b/src/solvers/dgsem/basis_lobatto_legendre.jl index 9e21b88dfa1..cac1dba9c74 100644 --- a/src/solvers/dgsem/basis_lobatto_legendre.jl +++ b/src/solvers/dgsem/basis_lobatto_legendre.jl @@ -404,7 +404,8 @@ function calc_dsplit(nodes, weights) return dsplit end -# Calculate the polynomial derivative matrix D +# Calculate the polynomial derivative matrix D. +# This implements algorithm 37 "PolynomialDerivativeMatrix" from Kopriva's book. function polynomial_derivative_matrix(nodes) n_nodes = length(nodes) d = zeros(n_nodes, n_nodes) @@ -421,6 +422,7 @@ function polynomial_derivative_matrix(nodes) end # Calculate and interpolation matrix (Vandermonde matrix) between two given sets of nodes +# See algorithm 32 "PolynomialInterpolationMatrix" from Kopriva's book. function polynomial_interpolation_matrix(nodes_in, nodes_out, baryweights_in = barycentric_weights(nodes_in)) n_nodes_in = length(nodes_in) @@ -433,6 +435,7 @@ function polynomial_interpolation_matrix(nodes_in, nodes_out, return vandermonde end +# This implements algorithm 32 "PolynomialInterpolationMatrix" from Kopriva's book. function polynomial_interpolation_matrix!(vandermonde, nodes_in, nodes_out, baryweights_in) @@ -463,7 +466,19 @@ function polynomial_interpolation_matrix!(vandermonde, return vandermonde end -# Calculate the barycentric weights for a given node distribution. +""" + barycentric_weights(nodes) + +Calculate the barycentric weights for a given node distribution, i.e., +```math +w_j = \\frac{1}{ \\prod_{k \\neq j} \\left( x_j - x_k \\right ) } +``` + +For details, see (especially Section 3) +- Jean-Paul Berrut and Lloyd N. Trefethen (2004). + Barycentric Lagrange Interpolation. + [DOI:10.1137/S0036144502417715](https://doi.org/10.1137/S0036144502417715) +""" function barycentric_weights(nodes) n_nodes = length(nodes) weights = ones(n_nodes) @@ -494,12 +509,31 @@ function calc_lhat(x, nodes, weights) return lhat end -# Calculate Lagrange polynomials for a given node distribution. +""" + lagrange_interpolating_polynomials(x, nodes, wbary) + +Calculate Lagrange polynomials for a given node distribution with +associated barycentric weights `wbary` at a given point `x` on the +reference interval ``[-1, 1]``. + +This returns all ``l_j(x)``, i.e., the Lagrange polynomials for each node ``x_j``. +Thus, to obtain the interpolating polynomial ``p(x)`` at ``x``, one has to +multiply the Lagrange polynomials with the nodal values ``u_j`` and sum them up: +``p(x) = \\sum_{j=1}^{n} u_j l_j(x)``. + +For details, see e.g. Section 2 of +- Jean-Paul Berrut and Lloyd N. Trefethen (2004). + Barycentric Lagrange Interpolation. + [DOI:10.1137/S0036144502417715](https://doi.org/10.1137/S0036144502417715) +""" function lagrange_interpolating_polynomials(x, nodes, wbary) n_nodes = length(nodes) polynomials = zeros(n_nodes) for i in 1:n_nodes + # Avoid division by zero when `x` is close to node by using + # the Kronecker-delta property at nodes + # of the Lagrange interpolation polynomials. if isapprox(x, nodes[i], rtol = eps(x)) polynomials[i] = 1 return polynomials @@ -518,6 +552,17 @@ function lagrange_interpolating_polynomials(x, nodes, wbary) return polynomials end +""" + gauss_lobatto_nodes_weights(n_nodes::Integer) + +Computes nodes ``x_j`` and weights ``w_j`` for the (Legendre-)Gauss-Lobatto quadrature. +This implements algorithm 25 "GaussLobattoNodesAndWeights" from the book + +- David A. Kopriva, (2009). + Implementing spectral methods for partial differential equations: + Algorithms for scientists and engineers. + [DOI:10.1007/978-90-481-2261-5](https://doi.org/10.1007/978-90-481-2261-5) +""" # From FLUXO (but really from blue book by Kopriva) function gauss_lobatto_nodes_weights(n_nodes::Integer) # From Kopriva's book @@ -585,7 +630,7 @@ function gauss_lobatto_nodes_weights(n_nodes::Integer) return nodes, weights end -# From FLUXO (but really from blue book by Kopriva) +# From FLUXO (but really from blue book by Kopriva, algorithm 24) function calc_q_and_l(N::Integer, x::Float64) L_Nm2 = 1.0 L_Nm1 = x @@ -609,7 +654,17 @@ function calc_q_and_l(N::Integer, x::Float64) end calc_q_and_l(N::Integer, x::Real) = calc_q_and_l(N, convert(Float64, x)) -# From FLUXO (but really from blue book by Kopriva) +""" + gauss_nodes_weights(n_nodes::Integer) + +Computes nodes ``x_j`` and weights ``w_j`` for the Gauss-Legendre quadrature. +This implements algorithm 23 "LegendreGaussNodesAndWeights" from the book + +- David A. Kopriva, (2009). + Implementing spectral methods for partial differential equations: + Algorithms for scientists and engineers. + [DOI:10.1007/978-90-481-2261-5](https://doi.org/10.1007/978-90-481-2261-5) +""" function gauss_nodes_weights(n_nodes::Integer) # From Kopriva's book n_iterations = 10 @@ -666,7 +721,17 @@ function gauss_nodes_weights(n_nodes::Integer) end end -# From FLUXO (but really from blue book by Kopriva) +""" + legendre_polynomial_and_derivative(N::Int, x::Real) + +Computes the Legendre polynomial of degree `N` and its derivative at `x`. +This implements algorithm 22 "LegendrePolynomialAndDerivative" from the book + +- David A. Kopriva, (2009). + Implementing spectral methods for partial differential equations: + Algorithms for scientists and engineers. + [DOI:10.1007/978-90-481-2261-5](https://doi.org/10.1007/978-90-481-2261-5) +""" function legendre_polynomial_and_derivative(N::Int, x::Real) if N == 0 poly = 1.0 From 17ab101d4dc62382bfd83f449d3ca602e20c23ca Mon Sep 17 00:00:00 2001 From: Patrick Ersing <114223904+patrickersing@users.noreply.github.com> Date: Thu, 21 Mar 2024 16:46:03 +0100 Subject: [PATCH 09/19] Extend TimeSeriesCallback for TreeMesh1D/3D (#1873) * add TimeSeriesCallback support for TreeMesh1D * add TimeSeriesCallback support for TreeMesh3D * add testing * update tests * reorganize files structure for TimeSeriesCallback * update test values for julia 1.9 * update news.md * rename time_series_dg2d.jl, add comments from code review * apply formatter --- NEWS.md | 3 +- .../elixir_euler_source_terms.jl | 3 + .../elixir_euler_source_terms.jl | 5 + src/callbacks_step/time_series.jl | 6 +- src/callbacks_step/time_series_dg.jl | 37 ++++ src/callbacks_step/time_series_dg_tree.jl | 185 ++++++++++++++++++ ...dg2d.jl => time_series_dg_unstructured.jl} | 119 +---------- test/test_tree_1d_euler.jl | 17 +- test/test_tree_3d_euler.jl | 37 +++- 9 files changed, 288 insertions(+), 124 deletions(-) create mode 100644 src/callbacks_step/time_series_dg_tree.jl rename src/callbacks_step/{time_series_dg2d.jl => time_series_dg_unstructured.jl} (76%) diff --git a/NEWS.md b/NEWS.md index 5b08d51ab89..022252e61a9 100644 --- a/NEWS.md +++ b/NEWS.md @@ -7,7 +7,8 @@ for human readability. ## Changes in the v0.7 lifecycle #### Added -- Implementation of `TimeSeriesCallback` for curvilinear meshes on `UnstructuredMesh2D`. +- Implementation of `TimeSeriesCallback` for curvilinear meshes on `UnstructuredMesh2D` and extension + to 1D and 3D on `TreeMesh`. ## Changes when updating to v0.7 from v0.6.x diff --git a/examples/tree_1d_dgsem/elixir_euler_source_terms.jl b/examples/tree_1d_dgsem/elixir_euler_source_terms.jl index 555910f69f0..cb8a09057d9 100644 --- a/examples/tree_1d_dgsem/elixir_euler_source_terms.jl +++ b/examples/tree_1d_dgsem/elixir_euler_source_terms.jl @@ -44,9 +44,12 @@ save_solution = SaveSolutionCallback(interval = 100, stepsize_callback = StepsizeCallback(cfl = 0.8) +time_series = TimeSeriesCallback(semi, [0.0, 0.33, 1.0], interval = 10) + callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, + time_series, stepsize_callback) ############################################################################### diff --git a/examples/tree_3d_dgsem/elixir_euler_source_terms.jl b/examples/tree_3d_dgsem/elixir_euler_source_terms.jl index f0246c30490..021fd09f316 100644 --- a/examples/tree_3d_dgsem/elixir_euler_source_terms.jl +++ b/examples/tree_3d_dgsem/elixir_euler_source_terms.jl @@ -41,9 +41,14 @@ save_solution = SaveSolutionCallback(interval = 100, stepsize_callback = StepsizeCallback(cfl = 0.6) +time_series = TimeSeriesCallback(semi, + [(0.0, 0.0, 0.0), (0.33, 0.33, 0.33), (1.0, 1.0, 1.0)], + interval = 10) + callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, + time_series, stepsize_callback) ############################################################################### diff --git a/src/callbacks_step/time_series.jl b/src/callbacks_step/time_series.jl index f6d76f0fb15..ae18c85700d 100644 --- a/src/callbacks_step/time_series.jl +++ b/src/callbacks_step/time_series.jl @@ -23,8 +23,7 @@ After the last time step, the results are stored in an HDF5 file `filename` in d The real data type `RealT` and data type for solution variables `uEltype` default to the respective types used in the solver and the cache. -Currently this callback is only implemented for [`TreeMesh`](@ref) in 2D -and [`UnstructuredMesh2D`](@ref). +Currently this callback is only implemented for [`TreeMesh`](@ref) and [`UnstructuredMesh2D`](@ref). """ mutable struct TimeSeriesCallback{RealT <: Real, uEltype <: Real, SolutionVariables, VariableNames, Cache} @@ -218,5 +217,6 @@ function (time_series_callback::TimeSeriesCallback)(integrator) end include("time_series_dg.jl") -include("time_series_dg2d.jl") +include("time_series_dg_tree.jl") +include("time_series_dg_unstructured.jl") end # @muladd diff --git a/src/callbacks_step/time_series_dg.jl b/src/callbacks_step/time_series_dg.jl index ae394afbbfd..3781a10662d 100644 --- a/src/callbacks_step/time_series_dg.jl +++ b/src/callbacks_step/time_series_dg.jl @@ -34,4 +34,41 @@ function save_time_series_file(time_series_callback, end end end + +# Creates cache for time series callback +function create_cache_time_series(point_coordinates, + mesh::Union{TreeMesh, UnstructuredMesh2D}, + dg, cache) + # Determine element ids for point coordinates + element_ids = get_elements_by_coordinates(point_coordinates, mesh, dg, cache) + + # Calculate & store Lagrange interpolation polynomials + interpolating_polynomials = calc_interpolating_polynomials(point_coordinates, + element_ids, mesh, + dg, cache) + + time_series_cache = (; element_ids, interpolating_polynomials) + + return time_series_cache +end + +function get_elements_by_coordinates(coordinates, mesh, dg, cache) + element_ids = Vector{Int}(undef, size(coordinates, 2)) + get_elements_by_coordinates!(element_ids, coordinates, mesh, dg, cache) + + return element_ids +end + +function calc_interpolating_polynomials(coordinates, element_ids, + mesh::Union{TreeMesh, UnstructuredMesh2D}, + dg, cache) + interpolating_polynomials = Array{real(dg), 3}(undef, + nnodes(dg), ndims(mesh), + length(element_ids)) + calc_interpolating_polynomials!(interpolating_polynomials, coordinates, element_ids, + mesh, dg, + cache) + + return interpolating_polynomials +end end # @muladd diff --git a/src/callbacks_step/time_series_dg_tree.jl b/src/callbacks_step/time_series_dg_tree.jl new file mode 100644 index 00000000000..37d4e6ea705 --- /dev/null +++ b/src/callbacks_step/time_series_dg_tree.jl @@ -0,0 +1,185 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + +# Find element ids containing coordinates given as a matrix [ndims, npoints] +function get_elements_by_coordinates!(element_ids, coordinates, mesh::TreeMesh, dg, + cache) + if length(element_ids) != size(coordinates, 2) + throw(DimensionMismatch("storage length for element ids does not match the number of coordinates")) + end + + @unpack cell_ids = cache.elements + @unpack tree = mesh + + # Reset element ids - 0 indicates "not (yet) found" + element_ids .= 0 + found_elements = 0 + + # Iterate over all elements + for element in eachelement(dg, cache) + # Get cell id + cell_id = cell_ids[element] + + # Iterate over coordinates + for index in 1:length(element_ids) + # Skip coordinates for which an element has already been found + if element_ids[index] > 0 + continue + end + + # Construct point + x = SVector(ntuple(i -> coordinates[i, index], ndims(mesh))) + + # Skip if point is not in cell + if !is_point_in_cell(tree, x, cell_id) + continue + end + + # Otherwise point is in cell and thus in element + element_ids[index] = element + found_elements += 1 + end + + # Exit loop if all elements have already been found + if found_elements == length(element_ids) + break + end + end + + return element_ids +end + +# Calculate the interpolating polynomials to extract data at the given coordinates +# The coordinates are known to be located in the respective element in `element_ids` +function calc_interpolating_polynomials!(interpolating_polynomials, coordinates, + element_ids, + mesh::TreeMesh, dg::DGSEM, cache) + @unpack tree = mesh + @unpack nodes = dg.basis + + wbary = barycentric_weights(nodes) + + for index in 1:length(element_ids) + # Construct point + x = SVector(ntuple(i -> coordinates[i, index], ndims(mesh))) + + # Convert to unit coordinates + cell_id = cache.elements.cell_ids[element_ids[index]] + cell_coordinates_ = cell_coordinates(tree, cell_id) + cell_length = length_at_cell(tree, cell_id) + unit_coordinates = (x .- cell_coordinates_) * 2 / cell_length + + # Calculate interpolating polynomial for each dimension, making use of tensor product structure + for d in 1:ndims(mesh) + interpolating_polynomials[:, d, index] .= lagrange_interpolating_polynomials(unit_coordinates[d], + nodes, + wbary) + end + end + + return interpolating_polynomials +end + +# Record the solution variables at each given point for the 1D case +function record_state_at_points!(point_data, u, solution_variables, + n_solution_variables, + mesh::TreeMesh{1}, equations, dg::DG, + time_series_cache) + @unpack element_ids, interpolating_polynomials = time_series_cache + old_length = length(first(point_data)) + new_length = old_length + n_solution_variables + + # Loop over all points/elements that should be recorded + for index in 1:length(element_ids) + # Extract data array and element id + data = point_data[index] + element_id = element_ids[index] + + # Make room for new data to be recorded + resize!(data, new_length) + data[(old_length + 1):new_length] .= zero(eltype(data)) + + # Loop over all nodes to compute their contribution to the interpolated values + for i in eachnode(dg) + u_node = solution_variables(get_node_vars(u, equations, dg, i, + element_id), equations) + + for v in 1:length(u_node) + data[old_length + v] += (u_node[v] * + interpolating_polynomials[i, 1, index]) + end + end + end +end + +# Record the solution variables at each given point for the 2D case +function record_state_at_points!(point_data, u, solution_variables, + n_solution_variables, + mesh::TreeMesh{2}, + equations, dg::DG, time_series_cache) + @unpack element_ids, interpolating_polynomials = time_series_cache + old_length = length(first(point_data)) + new_length = old_length + n_solution_variables + + # Loop over all points/elements that should be recorded + for index in 1:length(element_ids) + # Extract data array and element id + data = point_data[index] + element_id = element_ids[index] + + # Make room for new data to be recorded + resize!(data, new_length) + data[(old_length + 1):new_length] .= zero(eltype(data)) + + # Loop over all nodes to compute their contribution to the interpolated values + for j in eachnode(dg), i in eachnode(dg) + u_node = solution_variables(get_node_vars(u, equations, dg, i, j, + element_id), equations) + + for v in 1:length(u_node) + data[old_length + v] += (u_node[v] + * interpolating_polynomials[i, 1, index] + * interpolating_polynomials[j, 2, index]) + end + end + end +end + +# Record the solution variables at each given point for the 3D case +function record_state_at_points!(point_data, u, solution_variables, + n_solution_variables, + mesh::TreeMesh{3}, equations, dg::DG, + time_series_cache) + @unpack element_ids, interpolating_polynomials = time_series_cache + old_length = length(first(point_data)) + new_length = old_length + n_solution_variables + + # Loop over all points/elements that should be recorded + for index in 1:length(element_ids) + # Extract data array and element id + data = point_data[index] + element_id = element_ids[index] + + # Make room for new data to be recorded + resize!(data, new_length) + data[(old_length + 1):new_length] .= zero(eltype(data)) + + # Loop over all nodes to compute their contribution to the interpolated values + for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg) + u_node = solution_variables(get_node_vars(u, equations, dg, i, j, k, + element_id), equations) + + for v in 1:length(u_node) + data[old_length + v] += (u_node[v] + * interpolating_polynomials[i, 1, index] + * interpolating_polynomials[j, 2, index] + * interpolating_polynomials[k, 3, index]) + end + end + end +end +end # @muladd diff --git a/src/callbacks_step/time_series_dg2d.jl b/src/callbacks_step/time_series_dg_unstructured.jl similarity index 76% rename from src/callbacks_step/time_series_dg2d.jl rename to src/callbacks_step/time_series_dg_unstructured.jl index ad7c6851c80..f6d1bb48f24 100644 --- a/src/callbacks_step/time_series_dg2d.jl +++ b/src/callbacks_step/time_series_dg_unstructured.jl @@ -5,71 +5,6 @@ @muladd begin #! format: noindent -# Creates cache for time series callback -function create_cache_time_series(point_coordinates, - mesh::Union{TreeMesh{2}, UnstructuredMesh2D}, - dg, cache) - # Determine element ids for point coordinates - element_ids = get_elements_by_coordinates(point_coordinates, mesh, dg, cache) - - # Calculate & store Lagrange interpolation polynomials - interpolating_polynomials = calc_interpolating_polynomials(point_coordinates, - element_ids, mesh, - dg, cache) - - time_series_cache = (; element_ids, interpolating_polynomials) - - return time_series_cache -end - -# Find element ids containing coordinates given as a matrix [ndims, npoints] -function get_elements_by_coordinates!(element_ids, coordinates, mesh::TreeMesh, dg, - cache) - if length(element_ids) != size(coordinates, 2) - throw(DimensionMismatch("storage length for element ids does not match the number of coordinates")) - end - - @unpack cell_ids = cache.elements - @unpack tree = mesh - - # Reset element ids - 0 indicates "not (yet) found" - element_ids .= 0 - found_elements = 0 - - # Iterate over all elements - for element in eachelement(dg, cache) - # Get cell id - cell_id = cell_ids[element] - - # Iterate over coordinates - for index in 1:length(element_ids) - # Skip coordinates for which an element has already been found - if element_ids[index] > 0 - continue - end - - # Construct point - x = SVector(ntuple(i -> coordinates[i, index], ndims(mesh))) - - # Skip if point is not in cell - if !is_point_in_cell(tree, x, cell_id) - continue - end - - # Otherwise point is in cell and thus in element - element_ids[index] = element - found_elements += 1 - end - - # Exit loop if all elements have already been found - if found_elements == length(element_ids) - break - end - end - - return element_ids -end - # Elements on an `UnstructuredMesh2D` are possibly curved. Assume that each # element is convex, i.e., all interior angles are less than 180 degrees. # This routine computes the shortest distance from a given point to each element @@ -208,44 +143,6 @@ function calc_minimum_surface_distance(point, node_coordinates, return min_distance2, indices end -function get_elements_by_coordinates(coordinates, mesh, dg, cache) - element_ids = Vector{Int}(undef, size(coordinates, 2)) - get_elements_by_coordinates!(element_ids, coordinates, mesh, dg, cache) - - return element_ids -end - -# Calculate the interpolating polynomials to extract data at the given coordinates -# The coordinates are known to be located in the respective element in `element_ids` -function calc_interpolating_polynomials!(interpolating_polynomials, coordinates, - element_ids, - mesh::TreeMesh, dg::DGSEM, cache) - @unpack tree = mesh - @unpack nodes = dg.basis - - wbary = barycentric_weights(nodes) - - for index in 1:length(element_ids) - # Construct point - x = SVector(ntuple(i -> coordinates[i, index], ndims(mesh))) - - # Convert to unit coordinates - cell_id = cache.elements.cell_ids[element_ids[index]] - cell_coordinates_ = cell_coordinates(tree, cell_id) - cell_length = length_at_cell(tree, cell_id) - unit_coordinates = (x .- cell_coordinates_) * 2 / cell_length - - # Calculate interpolating polynomial for each dimension, making use of tensor product structure - for d in 1:ndims(mesh) - interpolating_polynomials[:, d, index] .= lagrange_interpolating_polynomials(unit_coordinates[d], - nodes, - wbary) - end - end - - return interpolating_polynomials -end - function calc_interpolating_polynomials!(interpolating_polynomials, coordinates, element_ids, mesh::UnstructuredMesh2D, dg::DGSEM, cache) @@ -374,23 +271,9 @@ end return SVector(xi, eta) end -function calc_interpolating_polynomials(coordinates, element_ids, - mesh::Union{TreeMesh, UnstructuredMesh2D}, - dg, cache) - interpolating_polynomials = Array{real(dg), 3}(undef, - nnodes(dg), ndims(mesh), - length(element_ids)) - calc_interpolating_polynomials!(interpolating_polynomials, coordinates, element_ids, - mesh, dg, - cache) - - return interpolating_polynomials -end - -# Record the solution variables at each given point function record_state_at_points!(point_data, u, solution_variables, n_solution_variables, - mesh::Union{TreeMesh{2}, UnstructuredMesh2D}, + mesh::UnstructuredMesh2D, equations, dg::DG, time_series_cache) @unpack element_ids, interpolating_polynomials = time_series_cache old_length = length(first(point_data)) diff --git a/test/test_tree_1d_euler.jl b/test/test_tree_1d_euler.jl index f26500b411c..784d123128e 100644 --- a/test/test_tree_1d_euler.jl +++ b/test/test_tree_1d_euler.jl @@ -21,7 +21,10 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") 1.6205433861493646e-7, 1.465427772462391e-7, 5.372255111879554e-7, - ]) + ], + # With the default `maxiters = 1` in coverage tests, + # there would be no time series to check against. + coverage_override=(maxiters = 20,)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -30,6 +33,18 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") du_ode = similar(u_ode) @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end + # Extra test to make sure the "TimeSeriesCallback" made correct data. + # Extracts data at all points from the first step of the time series and compares it to the + # exact solution and an interpolated reference solution + point_data = [getindex(time_series.affect!.point_data[i], 1:3) for i in 1:3] + exact_data = [initial_condition_convergence_test(time_series.affect!.point_coordinates[i], + time_series.affect!.time[1], + equations) for i in 1:3] + ref_data = [[1.968279088772251, 1.9682791565395945, 3.874122958278797], + [2.0654816955822017, 2.0654817326611883, 4.26621471136323], + [2.0317209235018936, 2.0317209516429506, 4.127889808862571]] + @test point_data≈exact_data atol=1e-6 + @test point_data ≈ ref_data end @trixi_testset "elixir_euler_convergence_pure_fv.jl" begin diff --git a/test/test_tree_3d_euler.jl b/test/test_tree_3d_euler.jl index e9e2b82fec5..47669dce2fb 100644 --- a/test/test_tree_3d_euler.jl +++ b/test/test_tree_3d_euler.jl @@ -25,7 +25,10 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_3d_dgsem") 0.032179231640894645, 0.032179231640895534, 0.0655408023333299, - ]) + ], + # With the default `maxiters = 1` in coverage tests, + # there would be no time series to check against. + coverage_override=(maxiters = 20,)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -34,6 +37,38 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_3d_dgsem") du_ode = similar(u_ode) @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end + # Extra test to make sure the "TimeSeriesCallback" made correct data. + # Extracts data at all points from the first step of the time series and compares it to the + # exact solution and an interpolated reference solution + point_data = [getindex(time_series.affect!.point_data[i], 1:5) for i in 1:3] + exact_data = [initial_condition_convergence_test(time_series.affect!.point_coordinates[:, + i], + time_series.affect!.time[1], + equations) for i in 1:3] + ref_data = [ + [ + 1.951156832316166, + 1.952073047561595, + 1.9520730475615966, + 1.9520730475615953, + 3.814390510967551, + ], + [ + 2.0506452262144363, + 2.050727319703708, + 2.0507273197037073, + 2.0507273197037077, + 4.203653999433724, + ], + [ + 2.046982357537558, + 2.0463728824399654, + 2.0463728824399654, + 2.0463728824399645, + 4.190033459318115, + ]] + @test point_data≈exact_data atol=1e-1 + @test point_data ≈ ref_data end @trixi_testset "elixir_euler_convergence_pure_fv.jl" begin From 18aaae96035a995e840e4e262964e7b49fdd9325 Mon Sep 17 00:00:00 2001 From: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> Date: Thu, 21 Mar 2024 19:56:02 +0100 Subject: [PATCH 10/19] Add ExplicitImports.jl test (#1875) * add ExplicitImports.jl tests * format * use Trixi.at-batch in at-threaded and skipt at-batch in stale explicit imports test --------- Co-authored-by: Michael Schlottke-Lakemper --- Project.toml | 2 -- src/Trixi.jl | 13 ++++++------- src/auxiliary/auxiliary.jl | 2 ++ test/Project.toml | 2 ++ test/test_aqua.jl | 9 +++++++++ 5 files changed, 19 insertions(+), 9 deletions(-) diff --git a/Project.toml b/Project.toml index 97da4aec51b..0e960a06a38 100644 --- a/Project.toml +++ b/Project.toml @@ -31,7 +31,6 @@ RecipesBase = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" Requires = "ae029012-a4dd-5104-9daa-d747884805df" SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" -Setfield = "efcf1570-3423-57d1-acb7-fd33fddbac46" SimpleUnPack = "ce78b400-467f-4804-87d8-8f486da07d0a" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" StartUpDG = "472ebc20-7c99-4d4b-9470-8fde4e9faa0f" @@ -84,7 +83,6 @@ RecipesBase = "1.1" Reexport = "1.0" Requires = "1.1" SciMLBase = "1.90, 2" -Setfield = "1" SimpleUnPack = "1.1" SparseArrays = "1" StartUpDG = "0.17.7" diff --git a/src/Trixi.jl b/src/Trixi.jl index da7359999c5..9375c80d77e 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -22,7 +22,7 @@ using LinearAlgebra: LinearAlgebra, Diagonal, diag, dot, mul!, norm, cross, norm UniformScaling, det using Printf: @printf, @sprintf, println using SparseArrays: AbstractSparseMatrix, AbstractSparseMatrixCSC, sparse, droptol!, - rowvals, nzrange, nonzeros, spzeros + rowvals, nzrange, nonzeros # import @reexport now to make it available for further imports/exports using Reexport: @reexport @@ -32,10 +32,10 @@ using Reexport: @reexport using MPI: MPI using SciMLBase: CallbackSet, DiscreteCallback, - ODEProblem, ODESolution, ODEFunction, + ODEProblem, ODESolution, SplitODEProblem import SciMLBase: get_du, get_tmp_cache, u_modified!, - AbstractODEIntegrator, init, step!, check_error, + init, step!, check_error, get_proposed_dt, set_proposed_dt!, terminate!, remake, add_tstop!, has_tstop, first_tstop @@ -57,7 +57,6 @@ using Polyester: Polyester, @batch # You know, the cheapest threads you can find using OffsetArrays: OffsetArray, OffsetVector using P4est using T8code -using Setfield: @set using RecipesBase: RecipesBase using Requires: @require using Static: Static, One, True, False @@ -66,7 +65,7 @@ using StaticArrays: StaticArrays, MVector, MArray, SMatrix, @SMatrix using StrideArrays: PtrArray, StrideArray, StaticInt @reexport using StructArrays: StructArrays, StructArray using TimerOutputs: TimerOutputs, @notimeit, TimerOutput, print_timer, reset_timer! -using Triangulate: Triangulate, TriangulateIO, triangulate +using Triangulate: Triangulate, TriangulateIO export TriangulateIO # for type parameter in DGMultiMesh using TriplotBase: TriplotBase using TriplotRecipes: DGTriPseudocolor @@ -84,9 +83,9 @@ const _PREFERENCE_LOG = @load_preference("log", "log_Trixi_NaN") # finite difference SBP operators using SummationByPartsOperators: AbstractDerivativeOperator, - AbstractNonperiodicDerivativeOperator, DerivativeOperator, + AbstractNonperiodicDerivativeOperator, AbstractPeriodicDerivativeOperator, - PeriodicDerivativeOperator, grid + grid import SummationByPartsOperators: integrate, semidiscretize, compute_coefficients, compute_coefficients!, left_boundary_weight, right_boundary_weight diff --git a/src/auxiliary/auxiliary.jl b/src/auxiliary/auxiliary.jl index 92da9a5ba8b..972a748c56b 100644 --- a/src/auxiliary/auxiliary.jl +++ b/src/auxiliary/auxiliary.jl @@ -242,6 +242,8 @@ macro threaded(expr) # !!! danger "Heisenbug" # Look at the comments for `wrap_array` when considering to change this macro. + # By using `Trixi.@batch` we allow users of Trixi.jl to use `@threaded` without having + # Polyester.jl in their namespace. return esc(quote Trixi.@batch $(expr) end) diff --git a/test/Project.toml b/test/Project.toml index 1a042dab44f..1491d7a5c5f 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -2,6 +2,7 @@ Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" Downloads = "f43a241f-c20a-4ad4-852c-f6b1247861c6" +ExplicitImports = "7d51a73a-1435-4ff3-83d9-f097790105c7" FFMPEG = "c87230d0-a227-11e9-1b43-d7ebe4e7570a" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" @@ -16,6 +17,7 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" Aqua = "0.8" CairoMakie = "0.10" Downloads = "1" +ExplicitImports = "1.0.1" FFMPEG = "0.4" ForwardDiff = "0.10.24" LinearAlgebra = "1" diff --git a/test/test_aqua.jl b/test/test_aqua.jl index 93457caba28..04c4a533d26 100644 --- a/test/test_aqua.jl +++ b/test/test_aqua.jl @@ -1,6 +1,7 @@ module TestAqua using Aqua +using ExplicitImports: check_no_implicit_imports, check_no_stale_explicit_imports using Test using Trixi @@ -13,6 +14,14 @@ include("test_trixi.jl") # in src/solvers/dgmulti/sbp.jl piracies = (treat_as_own = [Trixi.StartUpDG.RefElemData, Trixi.StartUpDG.MeshData],)) + @test isnothing(check_no_implicit_imports(Trixi, + skip = (Core, Base, Trixi.P4est, Trixi.T8code, + Trixi.EllipsisNotation))) + @test isnothing(check_no_stale_explicit_imports(Trixi, + ignore = (:derivative_operator, + :periodic_derivative_operator, + :upwind_operators, + Symbol("@batch")))) end end #module From f10969548615547e520623a9fb351a41bd952065 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 22 Mar 2024 12:40:51 +0100 Subject: [PATCH 11/19] Create SECURITY.md (#1884) * Create SECURITY.md * link to Julia SemVer --- SECURITY.md | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 SECURITY.md diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 00000000000..faa84a770bc --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,30 @@ +# Security Policy + +The Trixi.jl development team takes security issues seriously. We appreciate +all efforts to responsibly disclose any security issues and will make every +effort to acknowledge contributions. + + +## Supported Versions + +The current stable release following the interpretation of +[semantic versioning (SemVer)](https://julialang.github.io/Pkg.jl/dev/compatibility/#Version-specifier-format-1) +used in the Julia ecosystem is supported with security updates. + + +## Reporting a Vulnerability + +To report a security issue, please use the GitHub Security Advisory +["Report a Vulnerability"](https://github.com/trixi-framework/Trixi.jl/security/advisories/new) +tab. + +We will send a response indicating the next steps in handling your report. +After the initial reply to your report, we will keep you informed of the +progress towards a fix and full announcement, and may ask for additional +information or guidance. + +Please report security bugs in third-party modules directly to the person +or team maintaining the module. + +Public notifications of vulnerabilities will be shared in community channels +such as Slack. From f4cb1e0e88fe6fac456d55a619d79e4a4d8d265e Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 22 Mar 2024 12:56:20 +0100 Subject: [PATCH 12/19] add OpenSSF best practices badge (#1885) * add OpenSSF best practices badge * remove broken Genie badge --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 71370d3478e..86a8514a5ba 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,8 @@ [![Aqua QA](https://raw.githubusercontent.com/JuliaTesting/Aqua.jl/master/badge.svg)](https://github.com/JuliaTesting/Aqua.jl) [![License: MIT](https://img.shields.io/badge/License-MIT-success.svg)](https://opensource.org/licenses/MIT) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3996439.svg)](https://doi.org/10.5281/zenodo.3996439) -[![Downloads](https://shields.io/endpoint?url=https://pkgs.genieframework.com/api/v1/badge/Trixi)](https://pkgs.genieframework.com?packages=Trixi) +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/8695/badge)](https://www.bestpractices.dev/projects/8695) + From e31e25928d6e51f1e84d1437840346edf4e944a6 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 22 Mar 2024 17:56:06 +0100 Subject: [PATCH 13/19] Update contributing.md (#1883) * Update contributing.md * copy contributing from root directory to docs --- CONTRIBUTING.md | 9 ++++--- docs/.gitignore | 2 ++ docs/make.jl | 43 ++++++++++++++++++++++---------- docs/src/contributing.md | 54 ---------------------------------------- 4 files changed, 38 insertions(+), 70 deletions(-) delete mode 100644 docs/src/contributing.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c3ad581062e..c40c40bb18e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,9 +1,12 @@ # Contributing Trixi.jl is an open-source project and we are very happy to accept contributions -from the community. Please feel free to open issues or submit patches (preferably -as pull requests) any time. For planned larger contributions, it is often -beneficial to get in contact with one of the principal developers first (see +from the community. Please feel free to +[open issues](https://github.com/trixi-framework/Trixi.jl/issues/new/choose) +or submit patches (preferably as +[pull requests](https://github.com/trixi-framework/Trixi.jl/pulls)) +any time. For planned larger contributions, it is often beneficial to get +in contact with one of the principal developers first (see [AUTHORS.md](AUTHORS.md)). Trixi.jl and its contributions are licensed under the MIT license (see diff --git a/docs/.gitignore b/docs/.gitignore index 01f3ac8d79a..c8a9e842246 100644 --- a/docs/.gitignore +++ b/docs/.gitignore @@ -1 +1,3 @@ src/code_of_conduct.md +src/contributing.md + diff --git a/docs/make.jl b/docs/make.jl index 8427c4049bf..f752a7b0ee6 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -28,19 +28,36 @@ DocMeta.setdocmeta!(Trixi2Vtk, :DocTestSetup, :(using Trixi2Vtk); recursive=true # as necessary # Based on: https://github.com/ranocha/SummationByPartsOperators.jl/blob/0206a74140d5c6eb9921ca5021cb7bf2da1a306d/docs/make.jl#L27-L41 open(joinpath(@__DIR__, "src", "code_of_conduct.md"), "w") do io - # Point to source license file - println(io, """ - ```@meta - EditURL = "https://github.com/trixi-framework/Trixi.jl/blob/main/CODE_OF_CONDUCT.md" - ``` - """) - # Write the modified contents - println(io, "# [Code of Conduct](@id code-of-conduct)") - println(io, "") - for line in eachline(joinpath(dirname(@__DIR__), "CODE_OF_CONDUCT.md")) - line = replace(line, "[AUTHORS.md](AUTHORS.md)" => "[Authors](@ref)") - println(io, "> ", line) - end + # Point to source license file + println(io, + """ + ```@meta + EditURL = "https://github.com/trixi-framework/Trixi.jl/blob/main/CODE_OF_CONDUCT.md" + ``` + """) + # Write the modified contents + println(io, "# [Code of Conduct](@id code-of-conduct)") + println(io, "") + for line in eachline(joinpath(dirname(@__DIR__), "CODE_OF_CONDUCT.md")) + line = replace(line, "[AUTHORS.md](AUTHORS.md)" => "[Authors](@ref)") + println(io, "> ", line) + end +end + +open(joinpath(@__DIR__, "src", "contributing.md"), "w") do io + # Point to source license file + println(io, + """ + ```@meta + EditURL = "https://github.com/trixi-framework/Trixi.jl/blob/main/CONTRIBUTING.md" + ``` + """) + # Write the modified contents + for line in eachline(joinpath(dirname(@__DIR__), "CONTRIBUTING.md")) + line = replace(line, "[LICENSE.md](LICENSE.md)" => "[License](@ref)") + line = replace(line, "[AUTHORS.md](AUTHORS.md)" => "[Authors](@ref)") + println(io, line) + end end # Create tutorials for the following files: diff --git a/docs/src/contributing.md b/docs/src/contributing.md deleted file mode 100644 index 5f996477215..00000000000 --- a/docs/src/contributing.md +++ /dev/null @@ -1,54 +0,0 @@ -# Contributing - -Trixi.jl is an open-source project and we are very happy to accept contributions -from the community. Please feel free to open issues or submit patches (preferably -as merge requests) any time. For planned larger contributions, it is often -beneficial to get in contact with one of the principal developers first (see -[Authors](@ref)). - -Trixi.jl and its contributions are licensed under the MIT license (see -[License](@ref)). As a contributor, you certify that all your -contributions are in conformance with the *Developer Certificate of Origin -(Version 1.1)*, which is reproduced below. - -## Developer Certificate of Origin (Version 1.1) -The following text was taken from -[https://developercertificate.org](https://developercertificate.org): - - Developer Certificate of Origin - Version 1.1 - - Copyright (C) 2004, 2006 The Linux Foundation and its contributors. - 1 Letterman Drive - Suite D4700 - San Francisco, CA, 94129 - - Everyone is permitted to copy and distribute verbatim copies of this - license document, but changing it is not allowed. - - - Developer's Certificate of Origin 1.1 - - By making a contribution to this project, I certify that: - - (a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - - (b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - - (c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - - (d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. From 9f330400a9b62473a5c695f1faedabb487c6c8b1 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 22 Mar 2024 17:57:31 +0100 Subject: [PATCH 14/19] set version to v0.7.4 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 0e960a06a38..ada2b40bf0a 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.4-pre" +version = "0.7.4" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 01032aaa78f6140d7200f50d1011b3c7cd0c9c9f Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 22 Mar 2024 17:57:46 +0100 Subject: [PATCH 15/19] set development version to v0.7.5-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index ada2b40bf0a..27df49ed4fa 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.4" +version = "0.7.5-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From d86a0f47bebe0945b3a70143015adb0fe95e6174 Mon Sep 17 00:00:00 2001 From: Benjamin Bolm <74359358+bennibolm@users.noreply.github.com> Date: Tue, 26 Mar 2024 08:22:48 +0100 Subject: [PATCH 16/19] Use @batch reduction functionality for subcell bounds check (#1888) * Use @batch reduction functionality for bounds check * fmt * Adapt compat bound for Polyester.jl * Add comments --- Project.toml | 2 +- docs/src/performance.md | 11 ---- .../subcell_bounds_check_2d.jl | 66 ++++++++----------- src/solvers/dgsem_tree/subcell_limiters_2d.jl | 11 +--- 4 files changed, 32 insertions(+), 58 deletions(-) diff --git a/Project.toml b/Project.toml index 27df49ed4fa..3db90c9928e 100644 --- a/Project.toml +++ b/Project.toml @@ -75,7 +75,7 @@ MuladdMacro = "0.2.2" Octavian = "0.3.21" OffsetArrays = "1.12" P4est = "0.4.9" -Polyester = "0.7.5" +Polyester = "0.7.10" PrecompileTools = "1.1" Preferences = "1.3" Printf = "1" diff --git a/docs/src/performance.md b/docs/src/performance.md index 40970e58c5c..9f81d3c3d8e 100644 --- a/docs/src/performance.md +++ b/docs/src/performance.md @@ -282,14 +282,3 @@ requires. It can thus be seen as a proxy for "energy used" and, as an extension, timing result, you need to set the analysis interval such that the `AnalysisCallback` is invoked at least once during the course of the simulation and discard the first PID value. - -## Performance issues with multi-threaded reductions -[False sharing](https://en.wikipedia.org/wiki/False_sharing) is a known performance issue -for systems with distributed caches. It also occurred for the implementation of a thread -parallel bounds checking routine for the subcell IDP limiting -in [PR #1736](https://github.com/trixi-framework/Trixi.jl/pull/1736). -After some [testing and discussion](https://github.com/trixi-framework/Trixi.jl/pull/1736#discussion_r1423881895), -it turned out that initializing a vector of length `n * Threads.nthreads()` and only using every -n-th entry instead of a vector of length `Threads.nthreads()` fixes the problem. -Since there are no processors with caches over 128B, we use `n = 128B / size(uEltype)`. -Now, the bounds checking routine of the IDP limiting scales as hoped. diff --git a/src/callbacks_stage/subcell_bounds_check_2d.jl b/src/callbacks_stage/subcell_bounds_check_2d.jl index 19d73968c9a..3a56ea71f62 100644 --- a/src/callbacks_stage/subcell_bounds_check_2d.jl +++ b/src/callbacks_stage/subcell_bounds_check_2d.jl @@ -12,35 +12,37 @@ (; variable_bounds) = limiter.cache.subcell_limiter_coefficients (; idp_bounds_delta_local, idp_bounds_delta_global) = limiter.cache - # Note: Accessing the threaded memory vector `idp_bounds_delta_local` with - # `deviation = idp_bounds_delta_local[key][Threads.threadid()]` causes critical performance - # issues due to False Sharing. - # Initializing a vector with n times the length and using every n-th entry fixes this - # problem and allows proper scaling: - # `deviation = idp_bounds_delta_local[key][n * Threads.threadid()]` - # Since there are no processors with caches over 128B, we use `n = 128B / size(uEltype)` - stride_size = div(128, sizeof(eltype(u))) # = n + # Note: In order to get the maximum deviation from the target bounds, this bounds check + # requires a reduction in every RK stage and for every enabled limiting option. To make + # this Thread-parallel we are using Polyester.jl's (at least v0.7.10) `@batch reduction` + # functionality. + # Although `@threaded` and `@batch` are currently used equivalently in Trixi.jl, we use + # `@batch` here to allow a possible redefinition of `@threaded` without creating errors here. + # See also https://github.com/trixi-framework/Trixi.jl/pull/1888#discussion_r1537785293. if local_minmax for v in limiter.local_minmax_variables_cons v_string = string(v) key_min = Symbol(v_string, "_min") key_max = Symbol(v_string, "_max") - deviation_min_threaded = idp_bounds_delta_local[key_min] - deviation_max_threaded = idp_bounds_delta_local[key_max] - @threaded for element in eachelement(solver, cache) - deviation_min = deviation_min_threaded[stride_size * Threads.threadid()] - deviation_max = deviation_max_threaded[stride_size * Threads.threadid()] + deviation_min = idp_bounds_delta_local[key_min] + deviation_max = idp_bounds_delta_local[key_max] + @batch reduction=((max, deviation_min), (max, deviation_max)) for element in eachelement(solver, + cache) for j in eachnode(solver), i in eachnode(solver) var = u[v, i, j, element] + # Note: We always save the absolute deviations >= 0 and therefore use the + # `max` operator for the lower and upper bound. The different directions of + # upper and lower bound are considered in their calculations with a + # different sign. deviation_min = max(deviation_min, variable_bounds[key_min][i, j, element] - var) deviation_max = max(deviation_max, var - variable_bounds[key_max][i, j, element]) end - deviation_min_threaded[stride_size * Threads.threadid()] = deviation_min - deviation_max_threaded[stride_size * Threads.threadid()] = deviation_max end + idp_bounds_delta_local[key_min] = deviation_min + idp_bounds_delta_local[key_max] = deviation_max end end if positivity @@ -49,40 +51,35 @@ continue end key = Symbol(string(v), "_min") - deviation_threaded = idp_bounds_delta_local[key] - @threaded for element in eachelement(solver, cache) - deviation = deviation_threaded[stride_size * Threads.threadid()] + deviation = idp_bounds_delta_local[key] + @batch reduction=(max, deviation) for element in eachelement(solver, cache) for j in eachnode(solver), i in eachnode(solver) var = u[v, i, j, element] deviation = max(deviation, variable_bounds[key][i, j, element] - var) end - deviation_threaded[stride_size * Threads.threadid()] = deviation end + idp_bounds_delta_local[key] = deviation end for variable in limiter.positivity_variables_nonlinear key = Symbol(string(variable), "_min") - deviation_threaded = idp_bounds_delta_local[key] - @threaded for element in eachelement(solver, cache) - deviation = deviation_threaded[stride_size * Threads.threadid()] + deviation = idp_bounds_delta_local[key] + @batch reduction=(max, deviation) for element in eachelement(solver, cache) for j in eachnode(solver), i in eachnode(solver) var = variable(get_node_vars(u, equations, solver, i, j, element), equations) deviation = max(deviation, variable_bounds[key][i, j, element] - var) end - deviation_threaded[stride_size * Threads.threadid()] = deviation end + idp_bounds_delta_local[key] = deviation end end for (key, _) in idp_bounds_delta_local - # Calculate maximum deviations of all threads - idp_bounds_delta_local[key][stride_size] = maximum(idp_bounds_delta_local[key][stride_size * i] - for i in 1:Threads.nthreads()) # Update global maximum deviations idp_bounds_delta_global[key] = max(idp_bounds_delta_global[key], - idp_bounds_delta_local[key][stride_size]) + idp_bounds_delta_local[key]) end if save_errors @@ -92,10 +89,8 @@ if local_minmax for v in limiter.local_minmax_variables_cons v_string = string(v) - print(f, ", ", - idp_bounds_delta_local[Symbol(v_string, "_min")][stride_size], - ", ", - idp_bounds_delta_local[Symbol(v_string, "_max")][stride_size]) + print(f, ", ", idp_bounds_delta_local[Symbol(v_string, "_min")], + ", ", idp_bounds_delta_local[Symbol(v_string, "_max")]) end end if positivity @@ -103,21 +98,18 @@ if v in limiter.local_minmax_variables_cons continue end - print(f, ", ", - idp_bounds_delta_local[Symbol(string(v), "_min")][stride_size]) + print(f, ", ", idp_bounds_delta_local[Symbol(string(v), "_min")]) end for variable in limiter.positivity_variables_nonlinear print(f, ", ", - idp_bounds_delta_local[Symbol(string(variable), "_min")][stride_size]) + idp_bounds_delta_local[Symbol(string(variable), "_min")]) end end println(f) end # Reset local maximum deviations for (key, _) in idp_bounds_delta_local - for i in 1:Threads.nthreads() - idp_bounds_delta_local[key][stride_size * i] = zero(eltype(idp_bounds_delta_local[key][stride_size])) - end + idp_bounds_delta_local[key] = zero(eltype(idp_bounds_delta_local[key])) end end diff --git a/src/solvers/dgsem_tree/subcell_limiters_2d.jl b/src/solvers/dgsem_tree/subcell_limiters_2d.jl index 3f7954c8958..9343cee4397 100644 --- a/src/solvers/dgsem_tree/subcell_limiters_2d.jl +++ b/src/solvers/dgsem_tree/subcell_limiters_2d.jl @@ -18,18 +18,11 @@ function create_cache(limiter::Type{SubcellLimiterIDP}, equations::AbstractEquat # Memory for bounds checking routine with `BoundsCheckCallback`. # Local variable contains the maximum deviation since the last export. - # Using a threaded vector to parallelize bounds check. - idp_bounds_delta_local = Dict{Symbol, Vector{real(basis)}}() + idp_bounds_delta_local = Dict{Symbol, real(basis)}() # Global variable contains the total maximum deviation. idp_bounds_delta_global = Dict{Symbol, real(basis)}() - # Note: False sharing causes critical performance issues on multiple threads when using a vector - # of length `Threads.nthreads()`. Initializing a vector of length `n * Threads.nthreads()` - # and then only using every n-th entry, fixes the problem and allows proper scaling. - # Since there are no processors with caches over 128B, we use `n = 128B / size(uEltype)` - stride_size = div(128, sizeof(eltype(basis.nodes))) # = n for key in bound_keys - idp_bounds_delta_local[key] = [zero(real(basis)) - for _ in 1:(stride_size * Threads.nthreads())] + idp_bounds_delta_local[key] = zero(real(basis)) idp_bounds_delta_global[key] = zero(real(basis)) end From cd6fdaaa1443376ad97bc408cafc27d82a647175 Mon Sep 17 00:00:00 2001 From: Benedict <135045760+bgeihe@users.noreply.github.com> Date: Tue, 26 Mar 2024 08:23:15 +0100 Subject: [PATCH 17/19] abort initial AMR loop after 10 iterations (#1890) Co-authored-by: Hendrik Ranocha --- src/callbacks_step/amr.jl | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl index 6f57d6647fc..1ab65a3553e 100644 --- a/src/callbacks_step/amr.jl +++ b/src/callbacks_step/amr.jl @@ -138,11 +138,18 @@ function initialize!(cb::DiscreteCallback{Condition, Affect!}, u, t, # iterate until mesh does not change anymore has_changed = amr_callback(integrator, only_refine = amr_callback.adapt_initial_condition_only_refine) + iterations = 1 while has_changed compute_coefficients!(integrator.u, t, semi) u_modified!(integrator, true) has_changed = amr_callback(integrator, only_refine = amr_callback.adapt_initial_condition_only_refine) + iterations = iterations + 1 + if iterations > 10 + @warn "AMR for initial condition did not settle within 10 iterations!\n" * + "Consider adjusting thresholds or setting `adapt_initial_condition_only_refine`." + break + end end end From 27026de6e2be4f0f6655efb68c082515f1383e84 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Tue, 26 Mar 2024 10:07:11 +0100 Subject: [PATCH 18/19] set version to v0.7.5 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 3db90c9928e..9ca3086306f 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.5-pre" +version = "0.7.5" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 909abb4473c95042a87e93e46d443dc381c2b523 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Tue, 26 Mar 2024 10:07:23 +0100 Subject: [PATCH 19/19] set development version to v0.7.6-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 9ca3086306f..6ff7f29686d 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.5" +version = "0.7.6-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2"