From 954a5021ae9c61ceae247397b46bf1a22513ca51 Mon Sep 17 00:00:00 2001 From: Benedict Geihe Date: Thu, 7 Mar 2024 14:40:26 +0100 Subject: [PATCH 01/20] switch to global count of cells --- src/meshes/p4est_mesh.jl | 2 +- src/meshes/t8code_mesh.jl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/meshes/p4est_mesh.jl b/src/meshes/p4est_mesh.jl index abe9d9345b5..d62853d31be 100644 --- a/src/meshes/p4est_mesh.jl +++ b/src/meshes/p4est_mesh.jl @@ -93,7 +93,7 @@ end return mesh.p4est.trees.elem_count[] end # returns Int32 by default which causes a weird method error when creating the cache -@inline ncells(mesh::P4estMesh) = Int(mesh.p4est.local_num_quadrants[]) +@inline ncells(mesh::P4estMesh) = Int(mesh.p4est.global_num_quadrants[]) function Base.show(io::IO, mesh::P4estMesh) print(io, "P4estMesh{", ndims(mesh), ", ", real(mesh), "}") diff --git a/src/meshes/t8code_mesh.jl b/src/meshes/t8code_mesh.jl index cb2ac787e14..8df50cc5487 100644 --- a/src/meshes/t8code_mesh.jl +++ b/src/meshes/t8code_mesh.jl @@ -79,7 +79,7 @@ const ParallelT8codeMesh{NDIMS} = T8codeMesh{NDIMS, <:Real, <:True} @inline Base.real(::T8codeMesh{NDIMS, RealT}) where {NDIMS, RealT} = RealT @inline ntrees(mesh::T8codeMesh) = size(mesh.tree_node_coordinates)[end] -@inline ncells(mesh::T8codeMesh) = Int(t8_forest_get_local_num_elements(mesh.forest)) +@inline ncells(mesh::T8codeMesh) = Int(t8_forest_get_global_num_elements(mesh.forest)) function Base.show(io::IO, mesh::T8codeMesh) print(io, "T8codeMesh{", ndims(mesh), ", ", real(mesh), "}") From ed6d5a981246a4eec84373c95030691bd3db37cb Mon Sep 17 00:00:00 2001 From: Benedict Geihe Date: Thu, 7 Mar 2024 14:43:11 +0100 Subject: [PATCH 02/20] introduce ndofsglobal for generic types as fallback --- src/semidiscretization/semidiscretization.jl | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/src/semidiscretization/semidiscretization.jl b/src/semidiscretization/semidiscretization.jl index 8518cf27fd3..1fc8d75eab7 100644 --- a/src/semidiscretization/semidiscretization.jl +++ b/src/semidiscretization/semidiscretization.jl @@ -15,6 +15,26 @@ Return the number of degrees of freedom associated with each scalar variable. ndofs(mesh, solver, cache) end +""" + ndofsglobal(semi::AbstractSemidiscretization) + +Return the global number of degrees of freedom associated with each scalar variable. +""" +@inline function ndofsglobal(semi::AbstractSemidiscretization) + mesh, _, solver, cache = mesh_equations_solver_cache(semi) + ndofsglobal(mesh, solver, cache) +end + +""" + ndofsglobal(mesh, solver, cache) + +Return the global number of degrees of freedom associated with each scalar variable. +Defaults to ndofs when there is no special implementation for parallel computations. +""" +@inline function ndofsglobal(mesh, solver, cache) + ndofs(mesh, solver, cache) +end + """ integrate_via_indices(func, u_ode, semi::AbstractSemidiscretization, args...; normalize=true) From 0fd2376c2a69d54ed48ef411996c26b3320cb1cf Mon Sep 17 00:00:00 2001 From: Benedict Geihe Date: Thu, 7 Mar 2024 14:44:11 +0100 Subject: [PATCH 03/20] switch to ndofsglobal for console output --- src/semidiscretization/semidiscretization_coupled.jl | 6 +++++- src/semidiscretization/semidiscretization_hyperbolic.jl | 2 +- .../semidiscretization_hyperbolic_parabolic.jl | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl index dc21dbe9a1e..a56f17c8832 100644 --- a/src/semidiscretization/semidiscretization_coupled.jl +++ b/src/semidiscretization/semidiscretization_coupled.jl @@ -81,7 +81,7 @@ function Base.show(io::IO, ::MIME"text/plain", semi::SemidiscretizationCoupled) semi.semis[i].source_terms) summary_line(increment_indent(io), "solver", solver |> typeof |> nameof) end - summary_line(io, "total #DOFs per field", ndofs(semi)) + summary_line(io, "total #DOFs per field", ndofsglobal(semi)) summary_footer(io) end end @@ -123,6 +123,10 @@ end sum(ndofs, semi.semis) end +@inline function ndofsglobal(semi::SemidiscretizationCoupled) + sum(ndofsglobal, semi.semis) +end + function compute_coefficients(t, semi::SemidiscretizationCoupled) @unpack u_indices = semi diff --git a/src/semidiscretization/semidiscretization_hyperbolic.jl b/src/semidiscretization/semidiscretization_hyperbolic.jl index 7ebd758de37..7dbfdfb4af4 100644 --- a/src/semidiscretization/semidiscretization_hyperbolic.jl +++ b/src/semidiscretization/semidiscretization_hyperbolic.jl @@ -244,7 +244,7 @@ function Base.show(io::IO, ::MIME"text/plain", semi::SemidiscretizationHyperboli summary_line(io, "source terms", semi.source_terms) summary_line(io, "solver", semi.solver |> typeof |> nameof) - summary_line(io, "total #DOFs per field", ndofs(semi)) + summary_line(io, "total #DOFs per field", ndofsglobal(semi)) summary_footer(io) end end diff --git a/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl b/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl index 0f44941390a..997c9f543f2 100644 --- a/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl +++ b/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl @@ -227,7 +227,7 @@ function Base.show(io::IO, ::MIME"text/plain", summary_line(io, "source terms", semi.source_terms) summary_line(io, "solver", semi.solver |> typeof |> nameof) summary_line(io, "parabolic solver", semi.solver_parabolic |> typeof |> nameof) - summary_line(io, "total #DOFs per field", ndofs(semi)) + summary_line(io, "total #DOFs per field", ndofsglobal(semi)) summary_footer(io) end end From 51127eb8c3854e341a5b5b72af3e79a24d61fc03 Mon Sep 17 00:00:00 2001 From: Benedict Geihe Date: Thu, 7 Mar 2024 17:56:06 +0100 Subject: [PATCH 04/20] add ncellsglobal ncells was used elsewhere and has to be the local number --- src/meshes/p4est_mesh.jl | 5 +++-- src/meshes/t8code_mesh.jl | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/meshes/p4est_mesh.jl b/src/meshes/p4est_mesh.jl index d62853d31be..d191cd3efb5 100644 --- a/src/meshes/p4est_mesh.jl +++ b/src/meshes/p4est_mesh.jl @@ -93,7 +93,8 @@ end return mesh.p4est.trees.elem_count[] end # returns Int32 by default which causes a weird method error when creating the cache -@inline ncells(mesh::P4estMesh) = Int(mesh.p4est.global_num_quadrants[]) +@inline ncells(mesh::P4estMesh) = Int(mesh.p4est.local_num_quadrants[]) +@inline ncellsglobal(mesh::P4estMesh) = Int(mesh.p4est.global_num_quadrants[]) function Base.show(io::IO, mesh::P4estMesh) print(io, "P4estMesh{", ndims(mesh), ", ", real(mesh), "}") @@ -105,7 +106,7 @@ function Base.show(io::IO, ::MIME"text/plain", mesh::P4estMesh) else setup = [ "#trees" => ntrees(mesh), - "current #cells" => ncells(mesh), + "current #cells" => ncellsglobal(mesh), "polydeg" => length(mesh.nodes) - 1, ] summary_box(io, diff --git a/src/meshes/t8code_mesh.jl b/src/meshes/t8code_mesh.jl index 8df50cc5487..8260532b30c 100644 --- a/src/meshes/t8code_mesh.jl +++ b/src/meshes/t8code_mesh.jl @@ -79,7 +79,8 @@ const ParallelT8codeMesh{NDIMS} = T8codeMesh{NDIMS, <:Real, <:True} @inline Base.real(::T8codeMesh{NDIMS, RealT}) where {NDIMS, RealT} = RealT @inline ntrees(mesh::T8codeMesh) = size(mesh.tree_node_coordinates)[end] -@inline ncells(mesh::T8codeMesh) = Int(t8_forest_get_global_num_elements(mesh.forest)) +@inline ncells(mesh::T8codeMesh) = Int(t8_forest_get_local_num_elements(mesh.forest)) +@inline ncellsglobal(mesh::T8codeMesh) = Int(t8_forest_get_global_num_elements(mesh.forest)) function Base.show(io::IO, mesh::T8codeMesh) print(io, "T8codeMesh{", ndims(mesh), ", ", real(mesh), "}") @@ -91,7 +92,7 @@ function Base.show(io::IO, ::MIME"text/plain", mesh::T8codeMesh) else setup = [ "#trees" => ntrees(mesh), - "current #cells" => ncells(mesh), + "current #cells" => ncellsglobal(mesh), "polydeg" => length(mesh.nodes) - 1, ] summary_box(io, From 2efdfd8bd79f9301d2f84f55c466629938292890 Mon Sep 17 00:00:00 2001 From: Benedict <135045760+bgeihe@users.noreply.github.com> Date: Fri, 8 Mar 2024 12:24:03 +0100 Subject: [PATCH 05/20] Update src/semidiscretization/semidiscretization.jl Co-authored-by: Hendrik Ranocha --- src/semidiscretization/semidiscretization.jl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/semidiscretization/semidiscretization.jl b/src/semidiscretization/semidiscretization.jl index 1fc8d75eab7..35acd8cf44a 100644 --- a/src/semidiscretization/semidiscretization.jl +++ b/src/semidiscretization/semidiscretization.jl @@ -19,6 +19,9 @@ end ndofsglobal(semi::AbstractSemidiscretization) Return the global number of degrees of freedom associated with each scalar variable. +This is the same as [`ndofs`](@ref) for simulations running in serial or +parallelized via threads. It will in general be different for simulations +running in parallel with MPI. """ @inline function ndofsglobal(semi::AbstractSemidiscretization) mesh, _, solver, cache = mesh_equations_solver_cache(semi) From b883b3a0490858a28c36a2d4d3415f08bf690316 Mon Sep 17 00:00:00 2001 From: Benedict Geihe Date: Fri, 8 Mar 2024 16:59:07 +0100 Subject: [PATCH 06/20] remove docstring --- src/semidiscretization/semidiscretization.jl | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/semidiscretization/semidiscretization.jl b/src/semidiscretization/semidiscretization.jl index 35acd8cf44a..ebb3a897bca 100644 --- a/src/semidiscretization/semidiscretization.jl +++ b/src/semidiscretization/semidiscretization.jl @@ -28,12 +28,6 @@ running in parallel with MPI. ndofsglobal(mesh, solver, cache) end -""" - ndofsglobal(mesh, solver, cache) - -Return the global number of degrees of freedom associated with each scalar variable. -Defaults to ndofs when there is no special implementation for parallel computations. -""" @inline function ndofsglobal(mesh, solver, cache) ndofs(mesh, solver, cache) end From 6a48c77ad300ff13c8512712d95c046a0881133c Mon Sep 17 00:00:00 2001 From: Benedict Geihe Date: Fri, 8 Mar 2024 16:59:17 +0100 Subject: [PATCH 07/20] ndofsglobal in analysis callback --- src/callbacks_step/analysis.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl index ba232032951..9e7a0c35e75 100644 --- a/src/callbacks_step/analysis.jl +++ b/src/callbacks_step/analysis.jl @@ -310,7 +310,7 @@ function (analysis_callback::AnalysisCallback)(u_ode, du_ode, integrator, semi) mpi_println(" " * " " * " " * " PID: " * @sprintf("%10.8e s", performance_index)) - mpi_println(" #DOFs per field:" * @sprintf("% 14d", ndofs(semi)) * + mpi_println(" #DOFs per field:" * @sprintf("% 14d", ndofsglobal(semi)) * " " * " alloc'd memory: " * @sprintf("%14.3f MiB", memory_use)) mpi_println(" #elements: " * From d82aff50c23218393a8242600b790dbe7ea501ae Mon Sep 17 00:00:00 2001 From: Benedict Geihe Date: Tue, 26 Mar 2024 11:34:48 +0100 Subject: [PATCH 08/20] remove unnecessary fallback --- src/semidiscretization/semidiscretization.jl | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/semidiscretization/semidiscretization.jl b/src/semidiscretization/semidiscretization.jl index ebb3a897bca..4550f8fce83 100644 --- a/src/semidiscretization/semidiscretization.jl +++ b/src/semidiscretization/semidiscretization.jl @@ -28,10 +28,6 @@ running in parallel with MPI. ndofsglobal(mesh, solver, cache) end -@inline function ndofsglobal(mesh, solver, cache) - ndofs(mesh, solver, cache) -end - """ integrate_via_indices(func, u_ode, semi::AbstractSemidiscretization, args...; normalize=true) From b8dbb4a360d9518c687c0fc12598e15759f1322c Mon Sep 17 00:00:00 2001 From: Benedict <135045760+benegee@users.noreply.github.com> Date: Wed, 29 May 2024 09:32:58 +0200 Subject: [PATCH 09/20] Update src/semidiscretization/semidiscretization.jl Co-authored-by: Michael Schlottke-Lakemper --- src/semidiscretization/semidiscretization.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/semidiscretization/semidiscretization.jl b/src/semidiscretization/semidiscretization.jl index 4550f8fce83..1a56e27f7d9 100644 --- a/src/semidiscretization/semidiscretization.jl +++ b/src/semidiscretization/semidiscretization.jl @@ -18,7 +18,7 @@ end """ ndofsglobal(semi::AbstractSemidiscretization) -Return the global number of degrees of freedom associated with each scalar variable. +Return the global number of degrees of freedom associated with each scalar variable across all MPI ranks. This is the same as [`ndofs`](@ref) for simulations running in serial or parallelized via threads. It will in general be different for simulations running in parallel with MPI. From 0d6c935b56d255cea96b41ea886962852fce536e Mon Sep 17 00:00:00 2001 From: Benedict <135045760+benegee@users.noreply.github.com> Date: Wed, 29 May 2024 09:36:45 +0200 Subject: [PATCH 10/20] Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Michael Schlottke-Lakemper --- src/semidiscretization/semidiscretization_coupled.jl | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl index a56f17c8832..09bedb9ec22 100644 --- a/src/semidiscretization/semidiscretization_coupled.jl +++ b/src/semidiscretization/semidiscretization_coupled.jl @@ -123,6 +123,14 @@ end sum(ndofs, semi.semis) end +""" + ndofsglobal(semi::SemidiscretizationCoupled) + +Return the global number of degrees of freedom associated with each scalar variable across all MPI ranks, and summed up over all coupled systems. +This is the same as [`ndofs`](@ref) for simulations running in serial or +parallelized via threads. It will in general be different for simulations +running in parallel with MPI. +""" @inline function ndofsglobal(semi::SemidiscretizationCoupled) sum(ndofsglobal, semi.semis) end From 60ed55fd7cabb94d2383f7376216a3b10105d239 Mon Sep 17 00:00:00 2001 From: Benedict Geihe Date: Wed, 29 May 2024 10:35:44 +0200 Subject: [PATCH 11/20] missing calls to *global functions --- src/callbacks_step/analysis.jl | 4 ++-- src/semidiscretization/semidiscretization.jl | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl index df195c2f857..b90af10215b 100644 --- a/src/callbacks_step/analysis.jl +++ b/src/callbacks_step/analysis.jl @@ -267,7 +267,7 @@ function (analysis_callback::AnalysisCallback)(u_ode, du_ode, integrator, semi) # and the number of local degrees of freedom # OBS! This computation must happen *after* the PID computation above, since `take!(...)` # will reset the number of calls to `rhs!` - runtime_relative = 1.0e-9 * take!(semi.performance_counter) / ndofs(semi) + runtime_relative = 1.0e-9 * take!(semi.performance_counter) / ndofsglobal(semi) # Compute the total time spent in garbage collection since the analysis callback has been # initialized, in seconds @@ -312,7 +312,7 @@ function (analysis_callback::AnalysisCallback)(u_ode, du_ode, integrator, semi) " " * " alloc'd memory: " * @sprintf("%14.3f MiB", memory_use)) mpi_println(" #elements: " * - @sprintf("% 14d", nelements(mesh, solver, cache))) + @sprintf("% 14d", nelementsglobal(solver, cache))) # Level information (only show for AMR) print_amr_information(integrator.opts.callback, mesh, solver, cache) diff --git a/src/semidiscretization/semidiscretization.jl b/src/semidiscretization/semidiscretization.jl index 1a56e27f7d9..c6b82d5f37b 100644 --- a/src/semidiscretization/semidiscretization.jl +++ b/src/semidiscretization/semidiscretization.jl @@ -410,6 +410,7 @@ end # TODO: Taal, document interface? # New mesh/solver combinations have to implement # - ndofs(mesh, solver, cache) +# - ndofsgloabal(mesh, solver, cache) # - ndims(mesh) # - nnodes(solver) # - real(solver) From e0df3b4ce17ccd6e975097c7c00c23ec66bbfba6 Mon Sep 17 00:00:00 2001 From: Benedict Geihe Date: Wed, 29 May 2024 14:07:44 +0200 Subject: [PATCH 12/20] sum up and print global element count per level --- src/callbacks_step/analysis.jl | 86 ++++++++++------------------------ 1 file changed, 25 insertions(+), 61 deletions(-) diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl index b90af10215b..d46c2b55df9 100644 --- a/src/callbacks_step/analysis.jl +++ b/src/callbacks_step/analysis.jl @@ -494,88 +494,52 @@ function print_amr_information(callbacks, mesh, solver, cache) # Return early if there is nothing to print uses_amr(callbacks) || return nothing - levels = Vector{Int}(undef, nelements(solver, cache)) - min_level = typemax(Int) - max_level = typemin(Int) - for element in eachelement(solver, cache) - current_level = mesh.tree.levels[cache.elements.cell_ids[element]] - levels[element] = current_level - min_level = min(min_level, current_level) - max_level = max(max_level, current_level) + # Get global minimum and maximum level from the AMRController + min_level = max_level = 0 + for cb in callbacks.discrete_callbacks + if cb.affect! isa AMRCallback + min_level = cb.affect!.controller.base_level + max_level = cb.affect!.controller.max_level + end end + # Get local element count per level + elements_per_level = get_elements_per_level(min_level, max_level, mesh, solver, cache) + + # Sum up across all ranks + MPI.Reduce!(elements_per_level, +, mpi_root(), mpi_comm()) + + # Print for level in max_level:-1:(min_level + 1) mpi_println(" ├── level $level: " * - @sprintf("% 14d", count(==(level), levels))) + @sprintf("% 14d", elements_per_level[level + 1 - min_level])) end mpi_println(" └── level $min_level: " * - @sprintf("% 14d", count(==(min_level), levels))) + @sprintf("% 14d", elements_per_level[min_level + 1 - min_level])) return nothing end -# Print level information only if AMR is enabled -function print_amr_information(callbacks, mesh::P4estMesh, solver, cache) - - # Return early if there is nothing to print - uses_amr(callbacks) || return nothing - +function get_elements_per_level(min_level, max_level, mesh::P4estMesh, solver, cache) elements_per_level = zeros(P4EST_MAXLEVEL + 1) for tree in unsafe_wrap_sc(p4est_tree_t, mesh.p4est.trees) elements_per_level .+= tree.quadrants_per_level end - # levels start at zero but Julia's standard indexing starts at 1 - min_level_1 = findfirst(i -> i > 0, elements_per_level) - max_level_1 = findlast(i -> i > 0, elements_per_level) - - # Check if there is at least one level with an element - if isnothing(min_level_1) || isnothing(max_level_1) - return nothing - end - - min_level = min_level_1 - 1 - max_level = max_level_1 - 1 - - for level in max_level:-1:(min_level + 1) - mpi_println(" ├── level $level: " * - @sprintf("% 14d", elements_per_level[level + 1])) - end - mpi_println(" └── level $min_level: " * - @sprintf("% 14d", elements_per_level[min_level + 1])) - - return nothing + return @view(elements_per_level[(min_level + 1):(max_level + 1)]) end -# Print level information only if AMR is enabled -function print_amr_information(callbacks, mesh::T8codeMesh, solver, cache) - - # Return early if there is nothing to print - uses_amr(callbacks) || return nothing - - # TODO: Switch to global element levels array when MPI supported or find - # another solution. +function get_elements_per_level(min_level, max_level, mesh::T8codeMesh, solver, cache) levels = trixi_t8_get_local_element_levels(mesh.forest) - min_level = minimum(levels) - max_level = maximum(levels) - - mpi_println(" minlevel = $min_level") - mpi_println(" maxlevel = $max_level") - - if min_level > 0 - elements_per_level = [count(==(l), levels) for l in 1:max_level] - - for level in max_level:-1:(min_level + 1) - mpi_println(" ├── level $level: " * - @sprintf("% 14d", elements_per_level[level])) - end - mpi_println(" └── level $min_level: " * - @sprintf("% 14d", elements_per_level[min_level])) - end + return [count(==(l), levels) for l in min_level:max_level] +end - return nothing +function get_elements_per_level(min_level, max_level, mesh, solver, cache) + levels = [mesh.tree.levels[cache.elements.cell_ids[element]] + for element in eachelement(solver, cache)] + return [count(==(l), levels) for l in min_level:max_level] end # Iterate over tuples of analysis integrals in a type-stable way using "lispy tuple programming". From a773257e8a1fe8bbc283cc7a341da7481ea3747d Mon Sep 17 00:00:00 2001 From: Benedict Geihe Date: Wed, 29 May 2024 14:11:08 +0200 Subject: [PATCH 13/20] formatter --- src/callbacks_step/analysis.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl index d46c2b55df9..13f3fbfc101 100644 --- a/src/callbacks_step/analysis.jl +++ b/src/callbacks_step/analysis.jl @@ -504,7 +504,8 @@ function print_amr_information(callbacks, mesh, solver, cache) end # Get local element count per level - elements_per_level = get_elements_per_level(min_level, max_level, mesh, solver, cache) + elements_per_level = get_elements_per_level(min_level, max_level, mesh, solver, + cache) # Sum up across all ranks MPI.Reduce!(elements_per_level, +, mpi_root(), mpi_comm()) From e217ab36c8d0b664a08c6db858fe3b5a1e1a15e8 Mon Sep 17 00:00:00 2001 From: Benedict Geihe Date: Wed, 29 May 2024 14:14:59 +0200 Subject: [PATCH 14/20] simplify --- src/callbacks_step/analysis.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl index 13f3fbfc101..a5a4cb47745 100644 --- a/src/callbacks_step/analysis.jl +++ b/src/callbacks_step/analysis.jl @@ -516,7 +516,7 @@ function print_amr_information(callbacks, mesh, solver, cache) @sprintf("% 14d", elements_per_level[level + 1 - min_level])) end mpi_println(" └── level $min_level: " * - @sprintf("% 14d", elements_per_level[min_level + 1 - min_level])) + @sprintf("% 14d", elements_per_level[1])) return nothing end From 97c1c2d080851a97e9c88edd505bf2d6cb8c80ee Mon Sep 17 00:00:00 2001 From: Benedict Geihe Date: Wed, 29 May 2024 14:18:02 +0200 Subject: [PATCH 15/20] revert change in relative runtime --- src/callbacks_step/analysis.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl index a5a4cb47745..8dc331df428 100644 --- a/src/callbacks_step/analysis.jl +++ b/src/callbacks_step/analysis.jl @@ -267,7 +267,7 @@ function (analysis_callback::AnalysisCallback)(u_ode, du_ode, integrator, semi) # and the number of local degrees of freedom # OBS! This computation must happen *after* the PID computation above, since `take!(...)` # will reset the number of calls to `rhs!` - runtime_relative = 1.0e-9 * take!(semi.performance_counter) / ndofsglobal(semi) + runtime_relative = 1.0e-9 * take!(semi.performance_counter) / ndofs(semi) # Compute the total time spent in garbage collection since the analysis callback has been # initialized, in seconds From 9301e6524b80035ec142d3fd9681a426974dd8c3 Mon Sep 17 00:00:00 2001 From: Benedict Geihe Date: Wed, 29 May 2024 14:42:10 +0200 Subject: [PATCH 16/20] add nelementsglobal in analogy to ndofsglobal --- src/callbacks_step/analysis_dgmulti.jl | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/callbacks_step/analysis_dgmulti.jl b/src/callbacks_step/analysis_dgmulti.jl index dc294de9e7b..63ed7cca62e 100644 --- a/src/callbacks_step/analysis_dgmulti.jl +++ b/src/callbacks_step/analysis_dgmulti.jl @@ -185,6 +185,13 @@ end SolutionAnalyzer(rd::RefElemData) = rd nelements(mesh::DGMultiMesh, ::DGMulti, other_args...) = mesh.md.num_elements +function nelementsglobal(mesh::DGMultiMesh, solver::DGMulti, cache) + if mpi_isparallel() + error("`nelementsglobal` is not implemented for `DGMultiMesh` when used in parallel with MPI") + else + return nelements(mesh, solver, cache) + end +end function ndofsglobal(mesh::DGMultiMesh, solver::DGMulti, cache) if mpi_isparallel() error("`ndofsglobal` is not implemented for `DGMultiMesh` when used in parallel with MPI") From 9282cb6d7d4a9eeb176638bb7d891476acc478f1 Mon Sep 17 00:00:00 2001 From: Benedict Geihe Date: Wed, 29 May 2024 15:03:53 +0200 Subject: [PATCH 17/20] fix signature --- src/callbacks_step/analysis_dgmulti.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/callbacks_step/analysis_dgmulti.jl b/src/callbacks_step/analysis_dgmulti.jl index 63ed7cca62e..5ba46a05c3d 100644 --- a/src/callbacks_step/analysis_dgmulti.jl +++ b/src/callbacks_step/analysis_dgmulti.jl @@ -185,11 +185,11 @@ end SolutionAnalyzer(rd::RefElemData) = rd nelements(mesh::DGMultiMesh, ::DGMulti, other_args...) = mesh.md.num_elements -function nelementsglobal(mesh::DGMultiMesh, solver::DGMulti, cache) +function nelementsglobal(::DGMulti, other_args...) if mpi_isparallel() error("`nelementsglobal` is not implemented for `DGMultiMesh` when used in parallel with MPI") else - return nelements(mesh, solver, cache) + return mesh.md.num_elements end end function ndofsglobal(mesh::DGMultiMesh, solver::DGMulti, cache) From 710c9c364253a3123b7d19d4923f6f5d382f0f6b Mon Sep 17 00:00:00 2001 From: Benedict Geihe Date: Wed, 29 May 2024 15:30:31 +0200 Subject: [PATCH 18/20] add mesh parameter to nelementsglobal --- src/callbacks_step/amr.jl | 4 ++-- src/callbacks_step/analysis.jl | 2 +- src/callbacks_step/analysis_dgmulti.jl | 4 ++-- src/callbacks_step/save_restart_dg.jl | 4 ++-- src/callbacks_step/save_solution_dg.jl | 6 +++--- src/solvers/dg.jl | 4 ++-- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl index 45f03fba8fe..b0afd02aff8 100644 --- a/src/callbacks_step/amr.jl +++ b/src/callbacks_step/amr.jl @@ -228,7 +228,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh, if mpi_isparallel() # Collect lambda for all elements - lambda_global = Vector{eltype(lambda)}(undef, nelementsglobal(dg, cache)) + lambda_global = Vector{eltype(lambda)}(undef, nelementsglobal(mesh, dg, cache)) # Use parent because n_elements_by_rank is an OffsetArray recvbuf = MPI.VBuffer(lambda_global, parent(cache.mpi_cache.n_elements_by_rank)) MPI.Allgatherv!(lambda, recvbuf, mpi_comm()) @@ -380,7 +380,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::TreeMesh, error("MPI has not been verified yet for parabolic AMR") # Collect lambda for all elements - lambda_global = Vector{eltype(lambda)}(undef, nelementsglobal(dg, cache)) + lambda_global = Vector{eltype(lambda)}(undef, nelementsglobal(mesh, dg, cache)) # Use parent because n_elements_by_rank is an OffsetArray recvbuf = MPI.VBuffer(lambda_global, parent(cache.mpi_cache.n_elements_by_rank)) MPI.Allgatherv!(lambda, recvbuf, mpi_comm()) diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl index 8dc331df428..e5560126420 100644 --- a/src/callbacks_step/analysis.jl +++ b/src/callbacks_step/analysis.jl @@ -312,7 +312,7 @@ function (analysis_callback::AnalysisCallback)(u_ode, du_ode, integrator, semi) " " * " alloc'd memory: " * @sprintf("%14.3f MiB", memory_use)) mpi_println(" #elements: " * - @sprintf("% 14d", nelementsglobal(solver, cache))) + @sprintf("% 14d", nelementsglobal(mesh, solver, cache))) # Level information (only show for AMR) print_amr_information(integrator.opts.callback, mesh, solver, cache) diff --git a/src/callbacks_step/analysis_dgmulti.jl b/src/callbacks_step/analysis_dgmulti.jl index 5ba46a05c3d..1f0eec2de34 100644 --- a/src/callbacks_step/analysis_dgmulti.jl +++ b/src/callbacks_step/analysis_dgmulti.jl @@ -185,11 +185,11 @@ end SolutionAnalyzer(rd::RefElemData) = rd nelements(mesh::DGMultiMesh, ::DGMulti, other_args...) = mesh.md.num_elements -function nelementsglobal(::DGMulti, other_args...) +function nelementsglobal(mesh::DGMultiMesh, solver::DGMulti, cache) if mpi_isparallel() error("`nelementsglobal` is not implemented for `DGMultiMesh` when used in parallel with MPI") else - return mesh.md.num_elements + return ndofs(mesh, solver, cache) end end function ndofsglobal(mesh::DGMultiMesh, solver::DGMulti, cache) diff --git a/src/callbacks_step/save_restart_dg.jl b/src/callbacks_step/save_restart_dg.jl index cddeef77bb2..b83402c5f86 100644 --- a/src/callbacks_step/save_restart_dg.jl +++ b/src/callbacks_step/save_restart_dg.jl @@ -126,7 +126,7 @@ function save_restart_file_parallel(u, time, dt, timestep, attributes(file)["equations"] = get_name(equations) attributes(file)["polydeg"] = polydeg(dg) attributes(file)["n_vars"] = nvariables(equations) - attributes(file)["n_elements"] = nelementsglobal(dg, cache) + attributes(file)["n_elements"] = nelementsglobal(mesh, dg, cache) attributes(file)["mesh_type"] = get_name(mesh) attributes(file)["mesh_file"] = splitdir(mesh.current_filename)[2] attributes(file)["time"] = convert(Float64, time) # Ensure that `time` is written as a double precision scalar @@ -239,7 +239,7 @@ function load_restart_file_parallel(mesh::Union{ParallelTreeMesh, ParallelP4estM if read(attributes(file)["polydeg"]) != polydeg(dg) error("restart mismatch: polynomial degree in solver differs from value in restart file") end - if read(attributes(file)["n_elements"]) != nelementsglobal(dg, cache) + if read(attributes(file)["n_elements"]) != nelementsglobal(mesh, dg, cache) error("restart mismatch: number of elements in solver differs from value in restart file") end diff --git a/src/callbacks_step/save_solution_dg.jl b/src/callbacks_step/save_solution_dg.jl index 7367886ca94..deae8f7c930 100644 --- a/src/callbacks_step/save_solution_dg.jl +++ b/src/callbacks_step/save_solution_dg.jl @@ -158,7 +158,7 @@ function save_solution_file_parallel(data, time, dt, timestep, n_vars, attributes(file)["equations"] = get_name(equations) attributes(file)["polydeg"] = polydeg(dg) attributes(file)["n_vars"] = n_vars - attributes(file)["n_elements"] = nelementsglobal(dg, cache) + attributes(file)["n_elements"] = nelementsglobal(mesh, dg, cache) attributes(file)["mesh_type"] = get_name(mesh) attributes(file)["mesh_file"] = splitdir(mesh.current_filename)[2] attributes(file)["time"] = convert(Float64, time) # Ensure that `time` is written as a double precision scalar @@ -183,7 +183,7 @@ function save_solution_file_parallel(data, time, dt, timestep, n_vars, # Need to create dataset explicitly in parallel case var = create_dataset(file, "/element_variables_$v", datatype(eltype(element_variable)), - dataspace((nelementsglobal(dg, cache),))) + dataspace((nelementsglobal(mesh, dg, cache),))) # Write data of each process in slices (ranks start with 0) slice = (cum_element_counts[mpi_rank() + 1] + 1):cum_element_counts[mpi_rank() + 2] @@ -230,7 +230,7 @@ function save_solution_file_on_root(data, time, dt, timestep, n_vars, attributes(file)["equations"] = get_name(equations) attributes(file)["polydeg"] = polydeg(dg) attributes(file)["n_vars"] = n_vars - attributes(file)["n_elements"] = nelementsglobal(dg, cache) + attributes(file)["n_elements"] = nelementsglobal(mesh, dg, cache) attributes(file)["mesh_type"] = get_name(mesh) attributes(file)["mesh_file"] = splitdir(mesh.current_filename)[2] attributes(file)["time"] = convert(Float64, time) # Ensure that `time` is written as a double precision scalar diff --git a/src/solvers/dg.jl b/src/solvers/dg.jl index 0ab947e697a..3dc499f5715 100644 --- a/src/solvers/dg.jl +++ b/src/solvers/dg.jl @@ -459,7 +459,7 @@ In particular, not the nodes themselves are returned. # `mesh` for some combinations of mesh/solver. @inline nelements(mesh, dg::DG, cache) = nelements(dg, cache) @inline function ndofsglobal(mesh, dg::DG, cache) - nelementsglobal(dg, cache) * nnodes(dg)^ndims(mesh) + nelementsglobal(mehs, dg, cache) * nnodes(dg)^ndims(mesh) end """ @@ -517,7 +517,7 @@ In particular, not the mortars themselves are returned. @inline eachmpimortar(dg::DG, cache) = Base.OneTo(nmpimortars(dg, cache)) @inline nelements(dg::DG, cache) = nelements(cache.elements) -@inline function nelementsglobal(dg::DG, cache) +@inline function nelementsglobal(mesh, dg::DG, cache) mpi_isparallel() ? cache.mpi_cache.n_elements_global : nelements(dg, cache) end @inline ninterfaces(dg::DG, cache) = ninterfaces(cache.interfaces) From db3bf435955b75c5ac5d3180f26e57beb14e8873 Mon Sep 17 00:00:00 2001 From: Benedict Geihe Date: Wed, 29 May 2024 15:42:31 +0200 Subject: [PATCH 19/20] :/ --- src/solvers/dg.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/solvers/dg.jl b/src/solvers/dg.jl index 3dc499f5715..fb4c8f182e0 100644 --- a/src/solvers/dg.jl +++ b/src/solvers/dg.jl @@ -459,7 +459,7 @@ In particular, not the nodes themselves are returned. # `mesh` for some combinations of mesh/solver. @inline nelements(mesh, dg::DG, cache) = nelements(dg, cache) @inline function ndofsglobal(mesh, dg::DG, cache) - nelementsglobal(mehs, dg, cache) * nnodes(dg)^ndims(mesh) + nelementsglobal(mesh, dg, cache) * nnodes(dg)^ndims(mesh) end """ From bded959cba81a87280ae4e6eb530ff252a7e53af Mon Sep 17 00:00:00 2001 From: Benedict <135045760+benegee@users.noreply.github.com> Date: Fri, 31 May 2024 10:16:10 +0200 Subject: [PATCH 20/20] Update src/callbacks_step/analysis.jl Co-authored-by: Hendrik Ranocha --- src/callbacks_step/analysis.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl index e5560126420..2c8497dc28d 100644 --- a/src/callbacks_step/analysis.jl +++ b/src/callbacks_step/analysis.jl @@ -537,7 +537,7 @@ function get_elements_per_level(min_level, max_level, mesh::T8codeMesh, solver, return [count(==(l), levels) for l in min_level:max_level] end -function get_elements_per_level(min_level, max_level, mesh, solver, cache) +function get_elements_per_level(min_level, max_level, mesh::TreeMesh, solver, cache) levels = [mesh.tree.levels[cache.elements.cell_ids[element]] for element in eachelement(solver, cache)] return [count(==(l), levels) for l in min_level:max_level]