From 6606bf9a769a2262667909d60b771a5ae4e552b1 Mon Sep 17 00:00:00 2001 From: Weiqun Zhang Date: Mon, 31 Jul 2023 13:09:46 -0700 Subject: [PATCH] Clang-Tidy: Add more checks Most of the changes were done by clang-tidy --fix for readability-braces-around-statements. --- .clang-tidy | 8 +- Src/Base/AMReX.H | 6 +- Src/Base/AMReX.cpp | 36 +- Src/Base/AMReX_Algorithm.H | 8 +- Src/Base/AMReX_Arena.cpp | 12 +- Src/Base/AMReX_Array.H | 9 +- Src/Base/AMReX_AsyncOut.cpp | 2 +- Src/Base/AMReX_BCUtil.cpp | 4 +- Src/Base/AMReX_BaseFab.H | 15 +- Src/Base/AMReX_BlockMutex.cpp | 2 +- Src/Base/AMReX_Box.H | 11 +- Src/Base/AMReX_Box.cpp | 12 +- Src/Base/AMReX_BoxArray.cpp | 51 +- Src/Base/AMReX_BoxDomain.cpp | 3 +- Src/Base/AMReX_BoxIterator.H | 2 +- Src/Base/AMReX_BoxList.cpp | 4 +- Src/Base/AMReX_DistributionMapping.cpp | 17 +- Src/Base/AMReX_FACopyDescriptor.H | 13 +- Src/Base/AMReX_FArrayBox.cpp | 27 +- Src/Base/AMReX_FBI.H | 20 +- Src/Base/AMReX_FabArray.H | 8 +- Src/Base/AMReX_FabArrayBase.H | 2 +- Src/Base/AMReX_FabArrayBase.cpp | 39 +- Src/Base/AMReX_FabArrayCommI.H | 8 +- Src/Base/AMReX_FabArrayUtility.H | 14 +- Src/Base/AMReX_FabConv.cpp | 79 +- Src/Base/AMReX_Geometry.cpp | 19 +- Src/Base/AMReX_GpuAsyncArray.H | 6 +- Src/Base/AMReX_GpuBuffer.H | 8 +- Src/Base/AMReX_GpuComplex.H | 9 +- Src/Base/AMReX_GpuContainers.H | 18 +- Src/Base/AMReX_GpuDevice.H | 12 +- Src/Base/AMReX_GpuDevice.cpp | 2 +- Src/Base/AMReX_GpuLaunchFunctsG.H | 44 +- Src/Base/AMReX_GpuMemory.H | 4 +- Src/Base/AMReX_IArrayBox.cpp | 2 +- Src/Base/AMReX_IndexType.cpp | 6 +- Src/Base/AMReX_IntConv.H | 4 +- Src/Base/AMReX_IntVect.cpp | 3 +- Src/Base/AMReX_LayoutData.H | 10 +- Src/Base/AMReX_MFIter.cpp | 8 +- Src/Base/AMReX_MPMD.H | 4 +- Src/Base/AMReX_Machine.cpp | 7 +- Src/Base/AMReX_Math.H | 13 +- Src/Base/AMReX_MemPool.cpp | 2 +- Src/Base/AMReX_MemProfiler.cpp | 26 +- Src/Base/AMReX_MultiFab.cpp | 21 +- Src/Base/AMReX_MultiFabUtil.H | 2 +- Src/Base/AMReX_MultiFabUtil.cpp | 4 +- Src/Base/AMReX_MultiFabUtilI.H | 8 +- Src/Base/AMReX_NonLocalBC.cpp | 2 +- Src/Base/AMReX_NonLocalBCImpl.H | 24 +- Src/Base/AMReX_Orientation.cpp | 6 +- Src/Base/AMReX_PArena.cpp | 2 +- Src/Base/AMReX_PCI.H | 4 +- Src/Base/AMReX_ParallelContext.cpp | 8 +- Src/Base/AMReX_ParallelDescriptor.H | 12 +- Src/Base/AMReX_ParallelDescriptor.cpp | 30 +- Src/Base/AMReX_ParmParse.cpp | 47 +- Src/Base/AMReX_PhysBCFunct.H | 12 +- Src/Base/AMReX_PlotFileUtil.cpp | 2 +- Src/Base/AMReX_Random.cpp | 3 +- Src/Base/AMReX_RealBox.H | 2 +- Src/Base/AMReX_RealBox.cpp | 6 +- Src/Base/AMReX_RealVect.cpp | 3 +- Src/Base/AMReX_Reduce.H | 8 +- Src/Base/AMReX_Scan.H | 68 +- Src/Base/AMReX_TagParallelFor.H | 4 +- Src/Base/AMReX_TinyProfiler.cpp | 22 +- Src/Base/AMReX_TypeList.H | 2 +- Src/Base/AMReX_Utility.cpp | 15 +- Src/Base/AMReX_Vector.H | 22 +- Src/Base/AMReX_VisMF.cpp | 6 +- Src/Base/AMReX_iMultiFab.cpp | 2 +- Src/Base/AMReX_parstream.cpp | 2 +- Src/Base/Parser/AMReX_Parser_Y.H | 6 +- Src/Base/Parser/AMReX_Parser_Y.cpp | 2 +- Src/Boundary/AMReX_InterpBndryData.H | 2 +- Src/Boundary/AMReX_Mask.cpp | 6 +- Src/Boundary/AMReX_YAFluxRegister.H | 2 +- Src/EB/AMReX_EB2.cpp | 4 +- Src/EB/AMReX_EB2_2D_C.H | 8 +- Src/EB/AMReX_EB2_2D_C.cpp | 16 +- Src/EB/AMReX_EB2_3D_C.H | 60 +- Src/EB/AMReX_EB2_GeometryShop.H | 8 +- Src/EB/AMReX_EB2_Level.cpp | 12 +- Src/EB/AMReX_EBFluxRegister.cpp | 4 +- Src/EB/AMReX_EBMultiFabUtil.cpp | 28 +- Src/EB/AMReX_EBMultiFabUtil_3D_C.H | 93 +- Src/EB/AMReX_EBToPVD.cpp | 11 +- Src/EB/AMReX_EB_FluxRedistribute.cpp | 155 +- Src/EB/AMReX_EB_LeastSquares_2D_K.H | 313 ++-- Src/EB/AMReX_EB_LeastSquares_3D_K.H | 1299 +++++++++-------- Src/EB/AMReX_EB_Redistribution.cpp | 6 +- Src/EB/AMReX_EB_RedistributionApply.cpp | 75 +- Src/EB/AMReX_EB_Slopes_2D_K.H | 436 +++--- Src/EB/AMReX_EB_Slopes_3D_K.H | 323 ++-- Src/EB/AMReX_EB_StateRedistItracker.cpp | 150 +- Src/EB/AMReX_EB_StateRedistSlopeLimiter_K.H | 6 +- Src/EB/AMReX_EB_StateRedistUtils.cpp | 9 +- Src/EB/AMReX_EB_StateRedistribute.cpp | 36 +- Src/EB/AMReX_EB_chkpt_file.cpp | 36 +- Src/EB/AMReX_EB_utils.cpp | 6 +- Src/EB/AMReX_WriteEBSurface.cpp | 4 +- Src/EB/AMReX_algoim.cpp | 28 +- Src/EB/AMReX_algoim_K.H | 7 +- Src/Extern/HDF5/AMReX_ParticleHDF5.H | 65 +- Src/Extern/HDF5/AMReX_PlotFileUtilHDF5.cpp | 20 +- .../HDF5/AMReX_WriteBinaryParticleDataHDF5.H | 30 +- Src/Extern/HYPRE/AMReX_HypreIJIface.cpp | 29 +- Src/Extern/PETSc/AMReX_PETSc.cpp | 6 +- .../ProfParser/AMReX_ProfParserBatch.cpp | 44 +- .../SUNDIALS/AMReX_NVector_MultiFab.cpp | 12 +- Src/Extern/SUNDIALS/AMReX_SUNMemory.cpp | 18 +- .../SUNDIALS/AMReX_SundialsIntegrator.H | 20 +- Src/Extern/SUNDIALS/AMReX_Sundials_Core.cpp | 2 +- .../AmrCore/AMReX_FlashFluxRegister.cpp | 4 +- .../AmrCore/AMReX_fluxregister_fi.cpp | 3 +- Src/F_Interfaces/Base/AMReX_distromap_fi.cpp | 3 +- Src/F_Interfaces/Base/AMReX_geometry_fi.cpp | 3 +- .../LinearSolvers/AMReX_abeclaplacian_fi.cpp | 6 +- .../LinearSolvers/AMReX_poisson_fi.cpp | 6 +- Src/LinearSolvers/MLMG/AMReX_MLABecLap_2D_K.H | 16 +- Src/LinearSolvers/MLMG/AMReX_MLABecLap_3D_K.H | 52 +- .../MLMG/AMReX_MLABecLaplacian.H | 12 +- Src/LinearSolvers/MLMG/AMReX_MLALaplacian.H | 4 +- Src/LinearSolvers/MLMG/AMReX_MLCGSolver.H | 12 +- Src/LinearSolvers/MLMG/AMReX_MLCellABecLap.H | 14 +- Src/LinearSolvers/MLMG/AMReX_MLCellLinOp.H | 36 +- Src/LinearSolvers/MLMG/AMReX_MLEBABecLap.cpp | 54 +- .../MLMG/AMReX_MLEBABecLap_2D_K.H | 32 +- .../MLMG/AMReX_MLEBABecLap_F.cpp | 8 +- Src/LinearSolvers/MLMG/AMReX_MLEBABecLap_K.H | 13 +- Src/LinearSolvers/MLMG/AMReX_MLEBTensorOp.cpp | 19 +- .../MLMG/AMReX_MLEBTensorOp_bc.cpp | 2 +- .../MLMG/AMReX_MLEBTensor_2D_K.H | 9 +- .../MLMG/AMReX_MLEBTensor_3D_K.H | 12 +- Src/LinearSolvers/MLMG/AMReX_MLLinOp.H | 16 +- Src/LinearSolvers/MLMG/AMReX_MLLinOp_K.H | 48 +- Src/LinearSolvers/MLMG/AMReX_MLMG.H | 22 +- Src/LinearSolvers/MLMG/AMReX_MLNodeLap_2D_K.H | 128 +- Src/LinearSolvers/MLMG/AMReX_MLNodeLap_3D_K.H | 296 ++-- Src/LinearSolvers/MLMG/AMReX_MLNodeLap_K.H | 2 +- .../MLMG/AMReX_MLNodeLaplacian.H | 2 +- .../MLMG/AMReX_MLNodeLaplacian.cpp | 24 +- .../MLMG/AMReX_MLNodeLaplacian_eb.cpp | 8 +- .../MLMG/AMReX_MLNodeLaplacian_misc.cpp | 22 +- .../MLMG/AMReX_MLNodeLaplacian_sten.cpp | 4 +- .../MLMG/AMReX_MLNodeLaplacian_sync.cpp | 6 +- Src/LinearSolvers/MLMG/AMReX_MLNodeLinOp.cpp | 4 +- .../MLMG/AMReX_MLNodeTensorLaplacian.cpp | 2 +- Src/LinearSolvers/MLMG/AMReX_MLPoisson.H | 10 +- Src/LinearSolvers/MLMG/AMReX_MLTensorOp.cpp | 4 +- Src/LinearSolvers/OpenBC/AMReX_OpenBC.cpp | 4 +- Src/Particle/AMReX_NeighborList.H | 4 +- Src/Particle/AMReX_NeighborParticles.H | 28 +- Src/Particle/AMReX_NeighborParticlesCPUImpl.H | 39 +- Src/Particle/AMReX_NeighborParticlesGPUImpl.H | 12 +- Src/Particle/AMReX_NeighborParticlesI.H | 57 +- Src/Particle/AMReX_ParGDB.H | 3 +- Src/Particle/AMReX_Particle.H | 46 +- Src/Particle/AMReX_ParticleArray.H | 2 +- Src/Particle/AMReX_ParticleBufferMap.cpp | 13 +- Src/Particle/AMReX_ParticleCommunication.H | 29 +- Src/Particle/AMReX_ParticleCommunication.cpp | 20 +- Src/Particle/AMReX_ParticleContainerBase.cpp | 5 +- Src/Particle/AMReX_ParticleContainerI.H | 862 +++++------ Src/Particle/AMReX_ParticleIO.H | 99 +- Src/Particle/AMReX_ParticleInit.H | 92 +- Src/Particle/AMReX_ParticleLocator.H | 16 +- Src/Particle/AMReX_ParticleMPIUtil.cpp | 2 +- Src/Particle/AMReX_ParticleMesh.H | 4 +- Src/Particle/AMReX_ParticleTile.H | 114 +- Src/Particle/AMReX_ParticleTransformation.H | 52 +- Src/Particle/AMReX_ParticleUtil.H | 8 +- Src/Particle/AMReX_ParticleUtil.cpp | 4 +- Src/Particle/AMReX_Particle_mod_K.H | 24 +- Src/Particle/AMReX_SparseBins.H | 8 +- Src/Particle/AMReX_StructOfArrays.H | 79 +- Src/Particle/AMReX_TracerParticles.cpp | 18 +- Src/Particle/AMReX_WriteBinaryParticleData.H | 136 +- .../Advection_AmrCore/Source/AmrCoreAdv.cpp | 14 +- .../Source/DefineVelocity.cpp | 3 +- Tests/Amr/Advection_AmrCore/Source/Tagging.H | 3 +- .../Advection_AmrLevel/Source/AmrLevelAdv.cpp | 33 +- Tests/DivFreePatch/main.cpp | 2 +- Tests/EB/CNS/Source/CNS.cpp | 9 +- Tests/EB/CNS/Source/CNS_advance.cpp | 8 +- Tests/EB_CNS/Source/CNS.cpp | 4 +- Tests/GPU/CNS/Source/CNS.cpp | 4 +- Tests/LinearSolvers/CellEB/MyTest.cpp | 7 +- Tests/LinearSolvers/NodalPoisson/main.cpp | 3 +- .../Nodal_Projection_EB/main.cpp | 10 +- Tests/MultiBlock/IndexType/main.cpp | 4 +- Tests/Parser2/fn.cpp | 2 +- Tests/Particles/AssignDensity/main.cpp | 6 +- .../AssignMultiLevelDensity/main.cpp | 6 +- Tests/Particles/AsyncIO/main.cpp | 39 +- Tests/Particles/CheckpointRestart/main.cpp | 9 +- Tests/Particles/CheckpointRestartSOA/main.cpp | 9 +- Tests/Particles/DenseBins/main.cpp | 2 +- Tests/Particles/GhostsAndVirtuals/main.cpp | 30 +- Tests/Particles/InitRandom/main.cpp | 3 +- Tests/Particles/Intersection/main.cpp | 5 +- .../NeighborParticles/MDParticleContainer.cpp | 12 +- Tests/Particles/NeighborParticles/main.cpp | 24 +- Tests/Particles/ParallelContext/main.cpp | 32 +- Tests/Particles/ParticleIterator/main.cpp | 9 +- Tests/Particles/ParticleMesh/main.cpp | 6 +- .../Particles/ParticleMeshMultiLevel/main.cpp | 6 +- Tests/Particles/ParticleReduce/main.cpp | 13 +- .../ParticleTransformations/main.cpp | 62 +- Tests/Particles/Redistribute/main.cpp | 35 +- Tests/Particles/RedistributeSOA/main.cpp | 26 +- Tests/Particles/SOAParticle/GNUmakefile | 22 + Tests/Particles/SOAParticle/Make.package | 1 + Tests/Particles/SOAParticle/main.cpp | 10 +- Tests/Particles/SparseBins/main.cpp | 3 +- Tools/Plotfile/fcompare.cpp | 2 +- Tools/Plotfile/fextract.cpp | 4 +- Tools/Plotfile/fnan.cpp | 2 +- Tools/Plotfile/fsnapshot.cpp | 6 +- .../Postprocessing/C_Src/particle_compare.cpp | 6 +- 223 files changed, 4183 insertions(+), 3376 deletions(-) create mode 100644 Tests/Particles/SOAParticle/GNUmakefile create mode 100644 Tests/Particles/SOAParticle/Make.package diff --git a/.clang-tidy b/.clang-tidy index 81892813369..565062895ec 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -20,16 +20,18 @@ Checks: > -cppcoreguidelines-non-private-member-variables-in-classes, -cppcoreguidelines-owning-memory, -cppcoreguidelines-pro-*, - misc-misplaced-const, + misc-*, + -misc-const-correctness, + -misc-non-private-member-variables-in-classes, + -misc-no-recursion, modernize-*, -modernize-avoid-c-arrays, -modernize-macro-to-enum, -modernize-return-braced-init-list, -modernize-use-trailing-return-type, performance-*, + portability-*, readability-*, - -readability-braces-around-statements, - -readability-container-data-pointer, -readability-else-after-return, -readability-function-cognitive-complexity, -readability-function-size, diff --git a/Src/Base/AMReX.H b/Src/Base/AMReX.H index a0eba4a5c0d..c539a1d8e75 100644 --- a/Src/Base/AMReX.H +++ b/Src/Base/AMReX.H @@ -117,7 +117,7 @@ namespace amrex #if defined(NDEBUG) amrex::ignore_unused(msg); #else - if (msg) AMREX_DEVICE_PRINTF("Error %s\n", msg); + if (msg) { AMREX_DEVICE_PRINTF("Error %s\n", msg); } AMREX_DEVICE_ASSERT(0); #endif #else @@ -136,7 +136,7 @@ namespace amrex #if defined(NDEBUG) amrex::ignore_unused(msg); #else - if (msg) AMREX_DEVICE_PRINTF("Warning %s\n", msg); + if (msg) { AMREX_DEVICE_PRINTF("Warning %s\n", msg); } #endif #else Warning_host(msg); @@ -152,7 +152,7 @@ namespace amrex #if defined(NDEBUG) amrex::ignore_unused(msg); #else - if (msg) AMREX_DEVICE_PRINTF("Abort %s\n", msg); + if (msg) { AMREX_DEVICE_PRINTF("Abort %s\n", msg); } AMREX_DEVICE_ASSERT(0); #endif #else diff --git a/Src/Base/AMReX.cpp b/Src/Base/AMReX.cpp index 6bb199f21b6..b43add7c9d1 100644 --- a/Src/Base/AMReX.cpp +++ b/Src/Base/AMReX.cpp @@ -364,7 +364,7 @@ amrex::Initialize (int& argc, char**& argv, bool build_parm_parse, system::exename += argv[0]; for (int i = 0; i < argc; ++i) { - if (i != 0) command_line.append(" "); + if (i != 0) { command_line.append(" "); } command_line.append(argv[i]); command_arguments.emplace_back(argv[i]); } @@ -412,7 +412,7 @@ amrex::Initialize (int& argc, char**& argv, bool build_parm_parse, // the rest get ingored. int ppargc = 1; for (; ppargc < argc; ++ppargc) { - if (std::strcmp(argv[ppargc], "--") == 0) break; + if (std::strcmp(argv[ppargc], "--") == 0) { break; } } if (ppargc > 1) { @@ -549,9 +549,9 @@ amrex::Initialize (int& argc, char**& argv, bool build_parm_parse, #if defined(__linux__) curr_fpe_excepts = 0; - if (invalid) curr_fpe_excepts |= FE_INVALID; - if (divbyzero) curr_fpe_excepts |= FE_DIVBYZERO; - if (overflow) curr_fpe_excepts |= FE_OVERFLOW; + if (invalid) { curr_fpe_excepts |= FE_INVALID; } + if (divbyzero) { curr_fpe_excepts |= FE_DIVBYZERO; } + if (overflow) { curr_fpe_excepts |= FE_OVERFLOW; } prev_fpe_excepts = fegetexcept(); if (curr_fpe_excepts != 0) { feenableexcept(curr_fpe_excepts); // trap floating point exceptions @@ -561,9 +561,9 @@ amrex::Initialize (int& argc, char**& argv, bool build_parm_parse, #elif defined(__APPLE__) && defined(__x86_64__) prev_fpe_mask = _MM_GET_EXCEPTION_MASK(); curr_fpe_excepts = 0u; - if (invalid) curr_fpe_excepts |= _MM_MASK_INVALID; - if (divbyzero) curr_fpe_excepts |= _MM_MASK_DIV_ZERO; - if (overflow) curr_fpe_excepts |= _MM_MASK_OVERFLOW; + if (invalid) { curr_fpe_excepts |= _MM_MASK_INVALID; } + if (divbyzero) { curr_fpe_excepts |= _MM_MASK_DIV_ZERO; } + if (overflow) { curr_fpe_excepts |= _MM_MASK_OVERFLOW; } if (curr_fpe_excepts != 0u) { _MM_SET_EXCEPTION_MASK(prev_fpe_mask & ~curr_fpe_excepts); prev_handler_sigfpe = std::signal(SIGFPE, BLBackTrace::handler); @@ -582,9 +582,9 @@ amrex::Initialize (int& argc, char**& argv, bool build_parm_parse, fenv_t env; fegetenv(&env); - if (invalid) env.__fpcr |= __fpcr_trap_invalid; - if (divbyzero) env.__fpcr |= __fpcr_trap_divbyzero; - if (overflow) env.__fpcr |= __fpcr_trap_overflow; + if (invalid) { env.__fpcr |= __fpcr_trap_invalid; } + if (divbyzero) { env.__fpcr |= __fpcr_trap_divbyzero; } + if (overflow) { env.__fpcr |= __fpcr_trap_overflow; } fesetenv(&env); // SIGILL ref: https://developer.apple.com/forums/thread/689159 #endif @@ -708,7 +708,7 @@ amrex::Finalize (amrex::AMReX* pamrex) AMReX::erase(pamrex); #ifdef AMREX_USE_HYPRE - if (init_hypre) HYPRE_Finalize(); + if (init_hypre) { HYPRE_Finalize(); } #endif BL_TINY_PROFILE_FINALIZE(); @@ -781,12 +781,12 @@ amrex::Finalize (amrex::AMReX* pamrex) #ifndef BL_AMRPROF if (system::signal_handling) { - if (prev_handler_sigsegv != SIG_ERR) std::signal(SIGSEGV, prev_handler_sigsegv); // NOLINT(performance-no-int-to-ptr) - if (prev_handler_sigterm != SIG_ERR) std::signal(SIGTERM, prev_handler_sigterm); // NOLINT(performance-no-int-to-ptr) - if (prev_handler_sigint != SIG_ERR) std::signal(SIGINT , prev_handler_sigint); // NOLINT(performance-no-int-to-ptr) - if (prev_handler_sigabrt != SIG_ERR) std::signal(SIGABRT, prev_handler_sigabrt); // NOLINT(performance-no-int-to-ptr) - if (prev_handler_sigfpe != SIG_ERR) std::signal(SIGFPE , prev_handler_sigfpe); // NOLINT(performance-no-int-to-ptr) - if (prev_handler_sigill != SIG_ERR) std::signal(SIGILL , prev_handler_sigill); // NOLINT(performance-no-int-to-ptr) + if (prev_handler_sigsegv != SIG_ERR) { std::signal(SIGSEGV, prev_handler_sigsegv); } // NOLINT(performance-no-int-to-ptr) + if (prev_handler_sigterm != SIG_ERR) { std::signal(SIGTERM, prev_handler_sigterm); } // NOLINT(performance-no-int-to-ptr) + if (prev_handler_sigint != SIG_ERR) { std::signal(SIGINT , prev_handler_sigint); } // NOLINT(performance-no-int-to-ptr) + if (prev_handler_sigabrt != SIG_ERR) { std::signal(SIGABRT, prev_handler_sigabrt); } // NOLINT(performance-no-int-to-ptr) + if (prev_handler_sigfpe != SIG_ERR) { std::signal(SIGFPE , prev_handler_sigfpe); } // NOLINT(performance-no-int-to-ptr) + if (prev_handler_sigill != SIG_ERR) { std::signal(SIGILL , prev_handler_sigill); } // NOLINT(performance-no-int-to-ptr) #if defined(__linux__) #if !defined(__PGI) || (__PGIC__ >= 16) if (curr_fpe_excepts != 0) { diff --git a/Src/Base/AMReX_Algorithm.H b/Src/Base/AMReX_Algorithm.H index 510796a861b..b418f3cc1c0 100644 --- a/Src/Base/AMReX_Algorithm.H +++ b/Src/Base/AMReX_Algorithm.H @@ -110,8 +110,8 @@ namespace amrex T flo = f(lo); T fhi = f(hi); - if (flo == T(0)) return flo; - if (fhi == T(0)) return fhi; + if (flo == T(0)) { return flo; } + if (fhi == T(0)) { return fhi; } AMREX_ASSERT_WITH_MESSAGE(flo * fhi <= T(0), "Error - calling bisect but lo and hi don't bracket a root."); @@ -121,10 +121,10 @@ namespace amrex int n = 1; while (n <= max_iter) { - if (hi - lo < tol || almostEqual(lo,hi)) break; + if (hi - lo < tol || almostEqual(lo,hi)) { break; } mi = (lo + hi) / T(2); fmi = f(mi); - if (fmi == T(0)) break; + if (fmi == T(0)) { break; } fmi*flo < T(0) ? hi = mi : lo = mi; flo = f(lo); fhi = f(hi); diff --git a/Src/Base/AMReX_Arena.cpp b/Src/Base/AMReX_Arena.cpp index d18fa0dd5d5..a30a5fc7344 100644 --- a/Src/Base/AMReX_Arena.cpp +++ b/Src/Base/AMReX_Arena.cpp @@ -141,7 +141,7 @@ Arena::allocate_system (std::size_t nbytes) // NOLINT(readability-make-member-fu #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #endif - if (p && (nbytes > 0) && arena_info.device_use_hostalloc) mlock(p, nbytes); + if (p && (nbytes > 0) && arena_info.device_use_hostalloc) { mlock(p, nbytes); } #if defined(__GNUC__) && !defined(__clang__) #pragma GCC diagnostic pop #endif @@ -201,13 +201,13 @@ Arena::allocate_system (std::size_t nbytes) // NOLINT(readability-make-member-fu #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #endif - if (p && (nbytes > 0) && arena_info.device_use_hostalloc) mlock(p, nbytes); + if (p && (nbytes > 0) && arena_info.device_use_hostalloc) { mlock(p, nbytes); } #if defined(__GNUC__) && !defined(__clang__) #pragma GCC diagnostic pop #endif #endif #endif - if (p == nullptr) amrex::Abort("Sorry, malloc failed"); + if (p == nullptr) { amrex::Abort("Sorry, malloc failed"); } return p; } @@ -217,7 +217,7 @@ Arena::deallocate_system (void* p, std::size_t nbytes) // NOLINT(readability-mak #ifdef AMREX_USE_GPU if (arena_info.use_cpu_memory) { - if (p && arena_info.device_use_hostalloc) AMREX_MUNLOCK(p, nbytes); + if (p && arena_info.device_use_hostalloc) { AMREX_MUNLOCK(p, nbytes); } std::free(p); } else if (arena_info.device_use_hostalloc) @@ -235,7 +235,7 @@ Arena::deallocate_system (void* p, std::size_t nbytes) // NOLINT(readability-mak sycl::free(p,Gpu::Device::syclContext())); } #else - if (p && arena_info.device_use_hostalloc) AMREX_MUNLOCK(p, nbytes); + if (p && arena_info.device_use_hostalloc) { AMREX_MUNLOCK(p, nbytes); } std::free(p); #endif } @@ -265,7 +265,7 @@ namespace { void Arena::Initialize () { - if (initialized) return; + if (initialized) { return; } initialized = true; // see reason on allowed reuse of the default CPU BArena in Arena::Finalize diff --git a/Src/Base/AMReX_Array.H b/Src/Base/AMReX_Array.H index c9a3eb63a56..3d853329ebc 100644 --- a/Src/Base/AMReX_Array.H +++ b/Src/Base/AMReX_Array.H @@ -103,7 +103,7 @@ namespace amrex { */ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void fill ( const T& value ) noexcept - { for (unsigned int i = 0; i < N; ++i) arr[i] = value; } + { for (unsigned int i = 0; i < N; ++i) { arr[i] = value; } } /** * Returns the sum of all elements in the GpuArray object. @@ -853,7 +853,7 @@ namespace amrex template std::array GetArrOfPtrs (std::array& a) noexcept { - return {{AMREX_D_DECL(&a[0], &a[1], &a[2])}}; + return {{AMREX_D_DECL(a.data(), a.data()+1, a.data()+2)}}; } template @@ -865,13 +865,13 @@ namespace amrex template std::array GetArrOfConstPtrs (const std::array& a) noexcept { - return {{AMREX_D_DECL(&a[0], &a[1], &a[2])}}; + return {{AMREX_D_DECL(a.data(), a.data()+1, a.data()+2)}}; } template std::array GetArrOfConstPtrs (const std::array& a) noexcept { - return {{AMREX_D_DECL(a[0], a[1], a[2])}}; + return {{AMREX_D_DECL(a.data(), a.data()+1, a.data()+2)}}; } template @@ -897,4 +897,3 @@ namespace amrex } #endif - diff --git a/Src/Base/AMReX_AsyncOut.cpp b/Src/Base/AMReX_AsyncOut.cpp index 2ee6fec4637..b235baa6580 100644 --- a/Src/Base/AMReX_AsyncOut.cpp +++ b/Src/Base/AMReX_AsyncOut.cpp @@ -62,7 +62,7 @@ void Finalize () } #ifdef AMREX_USE_MPI - if (s_comm != MPI_COMM_NULL) MPI_Comm_free(&s_comm); + if (s_comm != MPI_COMM_NULL) { MPI_Comm_free(&s_comm); } s_comm = MPI_COMM_NULL; #endif } diff --git a/Src/Base/AMReX_BCUtil.cpp b/Src/Base/AMReX_BCUtil.cpp index ff17bd4e244..c0a645ba05c 100644 --- a/Src/Base/AMReX_BCUtil.cpp +++ b/Src/Base/AMReX_BCUtil.cpp @@ -35,8 +35,8 @@ struct dummy_gpu_fill_extdir void FillDomainBoundary (MultiFab& phi, const Geometry& geom, const Vector& bc) { - if (geom.isAllPeriodic()) return; - if (phi.nGrow() == 0) return; + if (geom.isAllPeriodic()) { return; } + if (phi.nGrow() == 0) { return; } AMREX_ALWAYS_ASSERT(phi.ixType().cellCentered()); diff --git a/Src/Base/AMReX_BaseFab.H b/Src/Base/AMReX_BaseFab.H index 960f1078e18..99132038397 100644 --- a/Src/Base/AMReX_BaseFab.H +++ b/Src/Base/AMReX_BaseFab.H @@ -1769,8 +1769,9 @@ BaseFab::getVal (T* data, AMREX_ASSERT(!(this->dptr == nullptr)); AMREX_ASSERT(n >= 0 && n + numcomp <= this->nvar); - for (int k = 0; k < numcomp; k++) + for (int k = 0; k < numcomp; k++) { data[k] = this->dptr[loc+(n+k)*sz]; + } } template @@ -1895,7 +1896,7 @@ BaseFab::define () AMREX_ASSERT(this->dptr == nullptr); AMREX_ASSERT(this->domain.numPts() > 0); AMREX_ASSERT(this->nvar >= 0); - if (this->nvar == 0) return; + if (this->nvar == 0) { return; } AMREX_ASSERT(std::numeric_limits::max()/this->nvar > this->domain.numPts()); this->truesize = this->nvar*this->domain.numPts(); @@ -1923,7 +1924,7 @@ template BaseFab::BaseFab (const Box& bx, int n, bool alloc, bool shared, Arena* ar) : DataAllocator{ar}, domain(bx), nvar(n), shared_memory(shared) { - if (!this->shared_memory && alloc) define(); + if (!this->shared_memory && alloc) { define(); } } template @@ -2054,16 +2055,18 @@ BaseFab::resize (const Box& b, int n, Arena* ar) } else if (this->dptr == nullptr || !this->ptr_owner) { - if (this->shared_memory) + if (this->shared_memory) { amrex::Abort("BaseFab::resize: BaseFab in shared memory cannot increase size"); + } this->dptr = nullptr; define(); } else if (this->nvar*this->domain.numPts() > this->truesize) { - if (this->shared_memory) + if (this->shared_memory) { amrex::Abort("BaseFab::resize: BaseFab in shared memory cannot increase size"); + } clear(); @@ -2590,7 +2593,7 @@ BaseFab::indexFromValue (Box const& subbox, int comp, T const& value) const n { AMREX_LOOP_3D(subbox, i, j, k, { - if (a(i,j,k) == value) return IntVect(AMREX_D_DECL(i,j,k)); + if (a(i,j,k) == value) { return IntVect(AMREX_D_DECL(i,j,k)); } }); return IntVect::TheMinVector(); } diff --git a/Src/Base/AMReX_BlockMutex.cpp b/Src/Base/AMReX_BlockMutex.cpp index d07d89fd76a..8ce63a15133 100644 --- a/Src/Base/AMReX_BlockMutex.cpp +++ b/Src/Base/AMReX_BlockMutex.cpp @@ -13,7 +13,7 @@ void BlockMutex::init_states (state_t* state, int N) noexcept { [=] AMREX_GPU_DEVICE () noexcept { int i = threadIdx.x + blockIdx.x*blockDim.x; - if (i < N) state[i] = FreeState(); + if (i < N) { state[i] = FreeState(); } }); #endif } diff --git a/Src/Base/AMReX_Box.H b/Src/Base/AMReX_Box.H index 025c4b90c45..0a32d637d4f 100644 --- a/Src/Base/AMReX_Box.H +++ b/Src/Base/AMReX_Box.H @@ -636,10 +636,11 @@ public: AMREX_GPU_HOST_DEVICE Box& grow (Orientation face, int n_cell = 1) noexcept { int idir = face.coordDir(); - if (face.isLow()) + if (face.isLow()) { smallend.shift(idir, -n_cell); - else + } else { bigend.shift(idir,n_cell); + } return *this; } @@ -800,9 +801,11 @@ Box::coarsen (const IntVect& ref_ratio) noexcept IntVect off(0); for (int dir = 0; dir < AMREX_SPACEDIM; dir++) { - if (btype[dir]) - if (bigend[dir]%ref_ratio[dir]) + if (btype[dir]) { + if (bigend[dir]%ref_ratio[dir]) { off.setVal(dir,1); + } + } } bigend.coarsen(ref_ratio); bigend += off; diff --git a/Src/Base/AMReX_Box.cpp b/Src/Base/AMReX_Box.cpp index 0b2560749b5..b0db9caa994 100644 --- a/Src/Base/AMReX_Box.cpp +++ b/Src/Base/AMReX_Box.cpp @@ -24,8 +24,9 @@ operator<< (std::ostream& os, << b.type() << ')'; - if (os.fail()) + if (os.fail()) { amrex::Error("operator<<(ostream&,Box&) failed"); + } return os; } @@ -77,8 +78,9 @@ operator>> (std::istream& is, b = Box(lo,hi,typ); - if (is.fail()) + if (is.fail()) { amrex::Error("operator>>(istream&,Box&) failed"); + } return is; } @@ -88,7 +90,7 @@ BoxCommHelper::BoxCommHelper (const Box& bx, int* p_) { if (p == nullptr) { v.resize(3*AMREX_SPACEDIM); - p = &v[0]; + p = v.data(); } AMREX_D_EXPR(p[0] = bx.smallend[0], @@ -123,7 +125,7 @@ AllGatherBoxes (Vector& bxs, int n_extra_reserve) count_tot += countvec[i]; } - if (count_tot == 0) return; + if (count_tot == 0) { return; } if (count_tot > static_cast(std::numeric_limits::max())) { amrex::Abort("AllGatherBoxes: too many boxes"); @@ -158,7 +160,7 @@ AllGatherBoxes (Vector& bxs, int n_extra_reserve) MPI_Bcast(&count_tot, 1, MPI_INT, root, comm); - if (count_tot == 0) return; + if (count_tot == 0) { return; } if (count_tot > static_cast(std::numeric_limits::max())) { amrex::Abort("AllGatherBoxes: too many boxes"); diff --git a/Src/Base/AMReX_BoxArray.cpp b/Src/Base/AMReX_BoxArray.cpp index 8dfbd2b6379..9413f5ae5c1 100644 --- a/Src/Base/AMReX_BoxArray.cpp +++ b/Src/Base/AMReX_BoxArray.cpp @@ -496,8 +496,9 @@ BoxArray::writeOn (std::ostream& os) const os << ')'; - if (os.fail()) + if (os.fail()) { amrex::Error("BoxArray::writeOn(ostream&) failed"); + } return os; } @@ -518,9 +519,9 @@ BoxArray::operator!= (const BoxArray& rhs) const noexcept bool BoxArray::operator== (const Vector& bv) const noexcept { - if (size() != bv.size()) return false; + if (size() != bv.size()) { return false; } for (int i = 0; i < static_cast(size()); ++i) { - if (this->operator[](i) != bv[i]) return false; + if (this->operator[](i) != bv[i]) { return false; } } return true; } @@ -601,11 +602,11 @@ bool BoxArray::coarsenable (const IntVect& refinement_ratio, const IntVect& min_width) const { const Long sz = size(); - if(size() == 0) return false; + if(size() == 0) { return false; } const Box& first = (*this)[0]; bool res = first.coarsenable(refinement_ratio,min_width); - if (res == false) return false; + if (res == false) { return false; } auto const& bxs = this->m_ref->m_abox; if (m_bat.is_null()) { @@ -873,17 +874,17 @@ BoxArray::ok () const auto const& bxs = this->m_ref->m_abox; if (m_bat.is_null()) { for (int i = 0; i < N; ++i) { - if (! bxs[i].ok()) return false; + if (! bxs[i].ok()) { return false; } } } else if (m_bat.is_simple()) { IndexType t = ixType(); IntVect cr = crseRatio(); for (int i = 0; i < N; ++i) { - if (! amrex::convert(amrex::coarsen(bxs[i],cr),t).ok()) return false; + if (! amrex::convert(amrex::coarsen(bxs[i],cr),t).ok()) { return false; } } } else { for (int i = 0; i < N; ++i) { - if (! m_bat.m_op.m_bndryReg(bxs[i]).ok()) return false; + if (! m_bat.m_op.m_bndryReg(bxs[i]).ok()) { return false; } } } } @@ -900,19 +901,19 @@ BoxArray::isDisjoint () const if (m_bat.is_null()) { for (int i = 0; i < N; ++i) { intersections(bxs[i],isects); - if ( isects.size() > 1 ) return false; + if ( isects.size() > 1 ) { return false; } } } else if (m_bat.is_simple()) { IndexType t = ixType(); IntVect cr = crseRatio(); for (int i = 0; i < N; ++i) { intersections(amrex::convert(amrex::coarsen(bxs[i],cr),t), isects); - if ( isects.size() > 1 ) return false; + if ( isects.size() > 1 ) { return false; } } } else { for (int i = 0; i < N; ++i) { intersections(m_bat.m_op.m_bndryReg(bxs[i]), isects); - if ( isects.size() > 1 ) return false; + if ( isects.size() > 1 ) { return false; } } } @@ -996,9 +997,9 @@ BoxArray::contains (const Box& b, bool assume_disjoint_ba, const IntVect& ng) co bool BoxArray::contains (const BoxArray& ba, bool assume_disjoint_ba, const IntVect& ng) const { - if (size() == 0) return false; + if (size() == 0) { return false; } - if (!minimalBox().grow(ng).contains(ba.minimalBox())) return false; + if (!minimalBox().grow(ng).contains(ba.minimalBox())) { return false; } for (int i = 0, N = static_cast(ba.size()); i < N; ++i) { if (!contains(ba[i],assume_disjoint_ba, ng)) { @@ -1200,7 +1201,7 @@ BoxArray::intersections (const Box& bx, Box cbx(sm,bg); cbx.normalize(); - if (!cbx.intersects(m_ref->bbox)) return; + if (!cbx.intersects(m_ref->bbox)) { return; } auto TheEnd = BoxHashMap.cend(); @@ -1221,7 +1222,7 @@ BoxArray::intersections (const Box& bx, if (isect.ok()) { isects.emplace_back(index,isect); - if (first_only) return; + if (first_only) { return; } } } } else if (m_bat.is_simple()) { @@ -1235,7 +1236,7 @@ BoxArray::intersections (const Box& bx, if (isect.ok()) { isects.emplace_back(index,isect); - if (first_only) return; + if (first_only) { return; } } } } else { @@ -1247,7 +1248,7 @@ BoxArray::intersections (const Box& bx, if (isect.ok()) { isects.emplace_back(index,isect); - if (first_only) return; + if (first_only) { return; } } } } @@ -1271,7 +1272,7 @@ BoxArray::complementIn (BoxList& bl, const Box& bx) const bl.set(bx.ixType()); bl.push_back(bx); - if (empty()) return; + if (empty()) { return; } BARef::HashType& BoxHashMap = getHashMap(); @@ -1293,7 +1294,7 @@ BoxArray::complementIn (BoxList& bl, const Box& bx) const Box cbx(sm,bg); cbx.normalize(); - if (!cbx.intersects(m_ref->bbox)) return; + if (!cbx.intersects(m_ref->bbox)) { return; } auto TheEnd = BoxHashMap.cend(); @@ -1408,7 +1409,7 @@ BoxArray::removeOverlap (bool simplify) for (auto const& is: isects) { - if (is.first == i) continue; + if (is.first == i) { continue; } Box& bx = m_ref->m_abox[is.first]; @@ -1481,7 +1482,7 @@ BoxArray::getHashMap () const { BARef::HashType& BoxHashMap = m_ref->hash; - if (m_ref->HasHashMap()) return BoxHashMap; + if (m_ref->HasHashMap()) { return BoxHashMap; } #ifdef AMREX_USE_OMP #pragma omp critical(intersections_lock) @@ -1589,13 +1590,15 @@ operator<< (std::ostream& os, << 0 << ")\n "; - for (int i = 0, N = static_cast(ba.size()); i < N; ++i) + for (int i = 0, N = static_cast(ba.size()); i < N; ++i) { os << ba[i] << ' '; + } os << ")\n"; - if (os.fail()) + if (os.fail()) { amrex::Error("operator<<(ostream& os,const BoxArray&) failed"); + } return os; } @@ -1669,7 +1672,7 @@ intersect (const BoxArray& ba, BoxArray intersect (const BoxArray& lhs, const BoxArray& rhs) { - if (lhs.empty() || rhs.empty()) return BoxArray(); + if (lhs.empty() || rhs.empty()) { return BoxArray(); } BoxList bl(lhs[0].ixType()); for (int i = 0, Nl = static_cast(lhs.size()); i < Nl; ++i) { diff --git a/Src/Base/AMReX_BoxDomain.cpp b/Src/Base/AMReX_BoxDomain.cpp index 892c847ca27..4213f629544 100644 --- a/Src/Base/AMReX_BoxDomain.cpp +++ b/Src/Base/AMReX_BoxDomain.cpp @@ -231,8 +231,9 @@ operator<< (std::ostream& os, const BoxDomain& bd) { os << "(BoxDomain " << bd.boxList() << ")" << std::flush; - if (os.fail()) + if (os.fail()) { amrex::Error("operator<<(ostream&,BoxDomain&) failed"); + } return os; } diff --git a/Src/Base/AMReX_BoxIterator.H b/Src/Base/AMReX_BoxIterator.H index c11d55c0ce4..bf05a9e817f 100644 --- a/Src/Base/AMReX_BoxIterator.H +++ b/Src/Base/AMReX_BoxIterator.H @@ -111,7 +111,7 @@ namespace amrex inline void BoxIterator::begin () noexcept { - if (m_boxLo <= m_boxHi) m_current = m_boxLo; + if (m_boxLo <= m_boxHi) { m_current = m_boxLo; } } inline diff --git a/Src/Base/AMReX_BoxList.cpp b/Src/Base/AMReX_BoxList.cpp index 584b707b4d8..4459b24dba9 100644 --- a/Src/Base/AMReX_BoxList.cpp +++ b/Src/Base/AMReX_BoxList.cpp @@ -251,9 +251,9 @@ BoxList::isDisjoint () const } bool -BoxList::contains (const BoxList& bl) const +BoxList::contains (const BoxList& bl) const { - if (isEmpty() || bl.isEmpty()) return false; + if (isEmpty() || bl.isEmpty()) { return false; } BL_ASSERT(ixType() == bl.ixType()); diff --git a/Src/Base/AMReX_DistributionMapping.cpp b/Src/Base/AMReX_DistributionMapping.cpp index 7ee39e310d7..ee0c32c26fa 100644 --- a/Src/Base/AMReX_DistributionMapping.cpp +++ b/Src/Base/AMReX_DistributionMapping.cpp @@ -106,7 +106,7 @@ DistributionMapping::operator!= (const DistributionMapping& rhs) const noexcept void DistributionMapping::Initialize () { - if (initialized) return; + if (initialized) { return; } // // Set defaults here!!! // @@ -856,13 +856,13 @@ DistributionMapping::KnapSackProcessorMap (const std::vector& wgts, { RoundRobinProcessorMap(static_cast(wgts.size()),nprocs, sort); - if (efficiency) *efficiency = 1; + if (efficiency) { *efficiency = 1; } } else { Real eff = 0; KnapSackDoIt(wgts, nprocs, eff, do_full_knapsack, nmax, sort); - if (efficiency) *efficiency = eff; + if (efficiency) { *efficiency = eff; } } } @@ -1335,7 +1335,7 @@ DistributionMapping::SFCProcessorMapDoIt (const BoxArray& boxes, LIpairV.emplace_back(wgt,i); } - if (sort) Sort(LIpairV, true); + if (sort) { Sort(LIpairV, true); } if (flag_verbose_mapper) { for (const auto &p : LIpairV) { @@ -1441,11 +1441,11 @@ DistributionMapping::SFCProcessorMapDoIt (const BoxArray& boxes, for (int i = 0; i < nteams; ++i) { const Long W = LIpairV[i].first; - if (W > max_wgt) max_wgt = W; + if (W > max_wgt) { max_wgt = W; } sum_wgt += W; } Real efficiency = static_cast(sum_wgt)/static_cast(nteams*max_wgt); - if (eff) *eff = efficiency; + if (eff) { *eff = efficiency; } if (verbose) { @@ -1871,7 +1871,7 @@ DistributionMapping::makeSFC (const LayoutData& rcost_local, } // Broadcast vector from which to construct new distribution mapping - ParallelDescriptor::Bcast(&pmap[0], pmap.size(), root); + ParallelDescriptor::Bcast(pmap.data(), pmap.size(), root); if (ParallelDescriptor::MyProc() != root) { r = DistributionMapping(pmap); @@ -1968,8 +1968,9 @@ operator<< (std::ostream& os, os << ')' << '\n'; - if (os.fail()) + if (os.fail()) { amrex::Error("operator<<(ostream &, DistributionMapping &) failed"); + } return os; } diff --git a/Src/Base/AMReX_FACopyDescriptor.H b/Src/Base/AMReX_FACopyDescriptor.H index 263fb9ece48..5194ca7f876 100644 --- a/Src/Base/AMReX_FACopyDescriptor.H +++ b/Src/Base/AMReX_FACopyDescriptor.H @@ -94,8 +94,9 @@ FabCopyDescriptor::FabCopyDescriptor () template FabCopyDescriptor::~FabCopyDescriptor () { - if (cacheDataAllocated) + if (cacheDataAllocated) { delete localFabSource; + } } /** @@ -435,7 +436,7 @@ FabArrayCopyDescriptor::CollectData () { dataAvailable = true; - if (ParallelDescriptor::NProcs() == 1) return; + if (ParallelDescriptor::NProcs() == 1) { return; } #ifdef BL_USE_MPI using value_type = typename FAB::value_type; @@ -524,9 +525,11 @@ FabArrayCopyDescriptor::CollectData () } BL_ASSERT(SndsArray[MyProc] == 0); - for (int i = 0; i < NProcs; i++) - if (SndsArray[i] > 0) + for (int i = 0; i < NProcs; i++) { + if (SndsArray[i] > 0) { Snds[i] = SndsArray[i]; + } + } } // There are two rounds of send and recv. @@ -538,7 +541,7 @@ FabArrayCopyDescriptor::CollectData () const auto N_snds = static_cast(Snds.size()); const auto N_rcvs = static_cast(Rcvs.size()); - if ( N_snds == 0 && N_rcvs == 0 ) return; + if ( N_snds == 0 && N_rcvs == 0 ) { return; } const int Nints = 4 + 3*AMREX_SPACEDIM; // # of ints in a meta-data diff --git a/Src/Base/AMReX_FArrayBox.cpp b/Src/Base/AMReX_FArrayBox.cpp index 2074ef8d663..6efd90f97e9 100644 --- a/Src/Base/AMReX_FArrayBox.cpp +++ b/Src/Base/AMReX_FArrayBox.cpp @@ -123,7 +123,7 @@ FArrayBox::FArrayBox (const Box& b, int ncomp, Arena* ar) FArrayBox::FArrayBox (const Box& b, int n, bool alloc, bool shared, Arena* ar) : BaseFab(b,n,alloc,shared,ar) { - if (alloc) initVal(); + if (alloc) { initVal(); } } FArrayBox::FArrayBox (const FArrayBox& rhs, MakeType make_type, int scomp, int ncomp) @@ -180,7 +180,7 @@ FArrayBox::initVal () noexcept p[i] = x; }); #ifdef AMREX_USE_GPU - if (runon == RunOn::Gpu) Gpu::streamSynchronize(); + if (runon == RunOn::Gpu) { Gpu::streamSynchronize(); } #else amrex::ignore_unused(runon); #endif @@ -346,7 +346,7 @@ FArrayBox::get_initval () void FArrayBox::Initialize () { - if (initialized) return; + if (initialized) { return; } initialized = true; BL_ASSERT(fabio == nullptr); @@ -419,14 +419,13 @@ FArrayBox::Initialize () if (pp.query("ordering", ord)) { - if (ord == "NORMAL_ORDER") + if (ord == "NORMAL_ORDER") { FArrayBox::setOrdering(FABio::FAB_NORMAL_ORDER); - else if (ord == "REVERSE_ORDER") + } else if (ord == "REVERSE_ORDER") { FArrayBox::setOrdering(FABio::FAB_REVERSE_ORDER); - else if (ord == "REVERSE_ORDER_2") + } else if (ord == "REVERSE_ORDER_2") { FArrayBox::setOrdering(FABio::FAB_REVERSE_ORDER_2); - else - { + } else { amrex::ErrorStream() << "FArrayBox::init(): Bad FABio::Ordering = " << ord; amrex::Abort(); } @@ -472,11 +471,11 @@ FABio::read_header (std::istream& is, char c; is >> c; - if(c != 'F') amrex::Error("FABio::read_header(): expected \'F\'"); + if(c != 'F') { amrex::Error("FABio::read_header(): expected \'F\'"); } is >> c; - if(c != 'A') amrex::Error("FABio::read_header(): expected \'A\'"); + if(c != 'A') { amrex::Error("FABio::read_header(): expected \'A\'"); } is >> c; - if(c != 'B') amrex::Error("FABio::read_header(): expected \'B\'"); + if(c != 'B') { amrex::Error("FABio::read_header(): expected \'B\'"); } is >> c; if(c == ':') { // ---- The "old" FAB format. @@ -551,11 +550,11 @@ FABio::read_header (std::istream& is, char c; is >> c; - if(c != 'F') amrex::Error("FABio::read_header(): expected \'F\'"); + if(c != 'F') { amrex::Error("FABio::read_header(): expected \'F\'"); } is >> c; - if(c != 'A') amrex::Error("FABio::read_header(): expected \'A\'"); + if(c != 'A') { amrex::Error("FABio::read_header(): expected \'A\'"); } is >> c; - if(c != 'B') amrex::Error("FABio::read_header(): expected \'B\'"); + if(c != 'B') { amrex::Error("FABio::read_header(): expected \'B\'"); } is >> c; if(c == ':') { // ---- The "old" FAB format. diff --git a/Src/Base/AMReX_FBI.H b/Src/Base/AMReX_FBI.H index 4c7ba5ec4f0..e20f8bb4b79 100644 --- a/Src/Base/AMReX_FBI.H +++ b/Src/Base/AMReX_FBI.H @@ -212,7 +212,7 @@ FabArray::FB_local_copy_cpu (const FB& TheFB, int scomp, int ncomp) { auto const& LocTags = *(TheFB.m_LocTags); auto N_locs = static_cast(LocTags.size()); - if (N_locs == 0) return; + if (N_locs == 0) { return; } bool is_thread_safe = TheFB.m_threadsafe_loc; if (is_thread_safe) { @@ -273,7 +273,7 @@ FabArray::FB_local_copy_gpu (const FB& TheFB, int scomp, int ncomp) { auto const& LocTags = *(TheFB.m_LocTags); int N_locs = LocTags.size(); - if (N_locs == 0) return; + if (N_locs == 0) { return; } bool is_thread_safe = TheFB.m_threadsafe_loc; using TagType = Array4CopyTag; @@ -334,7 +334,7 @@ FabArray::CMD_local_setVal_gpu (typename FabArray::value_type x, { auto const& LocTags = *(thecmd.m_LocTags); int N_locs = LocTags.size(); - if (N_locs == 0) return; + if (N_locs == 0) { return; } bool is_thread_safe = thecmd.m_threadsafe_loc; using TagType = Array4BoxTag; @@ -374,7 +374,7 @@ FabArray::CMD_remote_setVal_gpu (typename FabArray::value_type x, } } - if (rcv_setval_tags.empty()) return; + if (rcv_setval_tags.empty()) { return; } AMREX_ALWAYS_ASSERT(amrex::IsStoreAtomic::value || is_thread_safe); @@ -561,7 +561,7 @@ FabArray::FB_pack_send_buffer_cuda_graph (const FB& TheFB, int scomp, int n Vector::CopyComTagsContainer const*> const& send_cctc) { const int N_snds = send_data.size(); - if (N_snds == 0) return; + if (N_snds == 0) { return; } if ( !(TheFB.m_copyToBuffer.ready()) ) { @@ -648,7 +648,7 @@ FabArray::FB_unpack_recv_buffer_cuda_graph (const FB& TheFB, int dcomp, int bool /*is_thread_safe*/) { const int N_rcvs = recv_cctc.size(); - if (N_rcvs == 0) return; + if (N_rcvs == 0) { return; } int launches = 0; LayoutData > recv_copy_tags(boxArray(),DistributionMap()); @@ -735,7 +735,7 @@ FabArray::pack_send_buffer_gpu (FabArray const& src, int scomp, int nc amrex::ignore_unused(send_size); const int N_snds = send_data.size(); - if (N_snds == 0) return; + if (N_snds == 0) { return; } char* pbuffer = send_data[0]; std::size_t szbuffer = 0; @@ -796,7 +796,7 @@ FabArray::unpack_recv_buffer_gpu (FabArray& dst, int dcomp, int ncomp, amrex::ignore_unused(recv_size); const int N_rcvs = recv_cctc.size(); - if (N_rcvs == 0) return; + if (N_rcvs == 0) { return; } char* pbuffer = recv_data[0]; #if 0 @@ -905,7 +905,7 @@ FabArray::pack_send_buffer_cpu (FabArray const& src, int scomp, int nc amrex::ignore_unused(send_size); auto const N_snds = static_cast(send_data.size()); - if (N_snds == 0) return; + if (N_snds == 0) { return; } #ifdef AMREX_USE_OMP #pragma omp parallel for @@ -945,7 +945,7 @@ FabArray::unpack_recv_buffer_cpu (FabArray& dst, int dcomp, int ncomp, amrex::ignore_unused(recv_size); auto const N_rcvs = static_cast(recv_cctc.size()); - if (N_rcvs == 0) return; + if (N_rcvs == 0) { return; } if (is_thread_safe) { diff --git a/Src/Base/AMReX_FabArray.H b/Src/Base/AMReX_FabArray.H index 2df4cf53ba2..59bb6b73807 100644 --- a/Src/Base/AMReX_FabArray.H +++ b/Src/Base/AMReX_FabArray.H @@ -1255,7 +1255,7 @@ protected: ~ShMem () { // NOLINT #if defined(BL_USE_MPI3) - if (win != MPI_WIN_NULL) MPI_Win_free(&win); + if (win != MPI_WIN_NULL) { MPI_Win_free(&win); } #endif #ifdef BL_USE_TEAM if (alloc) { @@ -1937,7 +1937,7 @@ template bool FabArray::ok () const { - if (!define_function_called) return false; + if (!define_function_called) { return false; } int isok = 1; @@ -3242,7 +3242,7 @@ FabArray::SumBoundary_nowait (int scomp, int ncomp, IntVect const& src_ngho { BL_PROFILE("FabArray::SumBoundary_nowait()"); - if ( n_grow == IntVect::TheZeroVector() && boxArray().ixType().cellCentered()) return; + if ( n_grow == IntVect::TheZeroVector() && boxArray().ixType().cellCentered()) { return; } AMREX_ASSERT(src_nghost <= n_grow); @@ -3262,7 +3262,7 @@ FabArray::SumBoundary_finish () BL_PROFILE("FabArray::SumBoundary_finish()"); // If pcd doesn't exist, ParallelCopy was all local and operation was fully completed in "SumBoundary_nowait". - if ( (n_grow == IntVect::TheZeroVector() && boxArray().ixType().cellCentered()) || !(this->pcd) ) return; + if ( (n_grow == IntVect::TheZeroVector() && boxArray().ixType().cellCentered()) || !(this->pcd) ) { return; } auto* tmp = const_cast*> (this->pcd->src); this->ParallelCopy_finish(); diff --git a/Src/Base/AMReX_FabArrayBase.H b/Src/Base/AMReX_FabArrayBase.H index a4c1407d36c..29d3d63b29e 100644 --- a/Src/Base/AMReX_FabArrayBase.H +++ b/Src/Base/AMReX_FabArrayBase.H @@ -409,7 +409,7 @@ public: RegionTag (RegionTag && rhs) noexcept : tagged(rhs.tagged) { rhs.tagged = false; } RegionTag& operator= (RegionTag const&) = delete; RegionTag& operator= (RegionTag &&) = delete; - ~RegionTag () { if (tagged) popRegionTag(); } + ~RegionTag () { if (tagged) { popRegionTag(); } } private: bool tagged = false; }; diff --git a/Src/Base/AMReX_FabArrayBase.cpp b/Src/Base/AMReX_FabArrayBase.cpp index e7b8e1977ae..8dd8275f66a 100644 --- a/Src/Base/AMReX_FabArrayBase.cpp +++ b/Src/Base/AMReX_FabArrayBase.cpp @@ -94,7 +94,7 @@ namespace void FabArrayBase::Initialize () { - if (initialized) return; + if (initialized) { return; } initialized = true; // @@ -108,12 +108,12 @@ FabArrayBase::Initialize () if (pp.queryarr("mfiter_tile_size", tilesize, 0, AMREX_SPACEDIM)) { - for (int i=0; i(sizeof(FabArrayBase::FB)); - if (m_LocTags) + if (m_LocTags) { cnt += amrex::bytesOf(*m_LocTags); + } - if (m_SndTags) + if (m_SndTags) { cnt += FabArrayBase::bytesOfMapOfCopyComTagContainers(*m_SndTags); + } - if (m_RcvTags) + if (m_RcvTags) { cnt += FabArrayBase::bytesOfMapOfCopyComTagContainers(*m_RcvTags); + } return cnt; } @@ -515,8 +521,9 @@ FabArrayBase::flushCPC (bool no_assertion) const std::pair o_er_it = m_TheCPCache.equal_range(otherkey); for (auto oit = o_er_it.first; oit != o_er_it.second; ++oit) { - if (it->second == oit->second) + if (it->second == oit->second) { others.push_back(oit); + } } } @@ -898,7 +905,7 @@ FabArrayBase::FB::define_epo (const FabArrayBase& fa) Box bxsnd = amrex::grow(ba[ksnd],ng); bxsnd &= pdomain; // source must be inside the periodic domain. - if (!bxsnd.ok()) continue; + if (!bxsnd.ok()) { continue; } for (auto const& pit : pshifts) { @@ -953,7 +960,7 @@ FabArrayBase::FB::define_epo (const FabArrayBase& fa) const Box& vbx = ba[krcv]; const Box& bxrcv = amrex::grow(vbx, ng); - if (pdomain.contains(bxrcv)) continue; + if (pdomain.contains(bxrcv)) { continue; } for (auto const& pit : pshifts) { @@ -1837,7 +1844,7 @@ FabArrayBase::FPinfo::FPinfo (const FabArrayBase& srcfa, amrex::AllGatherBoxes(bl.data()); } - if (bl.isEmpty()) return; + if (bl.isEmpty()) { return; } Long ncells_total = 0L; Long ncells_max = 0L; @@ -2010,8 +2017,9 @@ FabArrayBase::TheFPinfo (const FabArrayBase& srcfa, m_FPinfo_stats.recordUse(); m_TheFillPatchCache.insert(er_it.second, FPinfoCache::value_type(dstkey,new_fpc)); - if (srckey != dstkey) + if (srckey != dstkey) { m_TheFillPatchCache.insert( FPinfoCache::value_type(srckey,new_fpc)); + } return *new_fpc; } @@ -2040,8 +2048,9 @@ FabArrayBase::flushFPinfo (bool no_assertion) const for (auto oit = o_er_it.first; oit != o_er_it.second; ++oit) { - if (it->second == oit->second) + if (it->second == oit->second) { others.push_back(oit); + } } } diff --git a/Src/Base/AMReX_FabArrayCommI.H b/Src/Base/AMReX_FabArrayCommI.H index d79df73c7b6..ea877266f9a 100644 --- a/Src/Base/AMReX_FabArrayCommI.H +++ b/Src/Base/AMReX_FabArrayCommI.H @@ -34,7 +34,7 @@ FabArray::FBEP_nowait (int scomp, int ncomp, const IntVect& nghost, // There can only be local work to do. // int N_locs = (*TheFB.m_LocTags).size(); - if (N_locs == 0) return; + if (N_locs == 0) { return; } #ifdef AMREX_USE_GPU if (Gpu::inLaunchRegion()) { @@ -647,7 +647,7 @@ FabArray::PrepareSendBuffers (const MapOfCopyComTagContainers& SndTags, send_reqs.clear(); send_cctc.clear(); const auto N_snds = SndTags.size(); - if (N_snds == 0) return; + if (N_snds == 0) { return; } send_data.reserve(N_snds); send_size.reserve(N_snds); send_rank.reserve(N_snds); @@ -843,7 +843,7 @@ template void fbv_copy (Vector const& tags) { const int N = tags.size(); - if (N == 0) return; + if (N == 0) { return; } #ifdef AMREX_USE_GPU if (Gpu::inLaunchRegion()) { ParallelFor(tags, 1, @@ -941,7 +941,7 @@ FillBoundary (Vector const& mf, Vector const& scomp, int SeqNum = ParallelDescriptor::SeqNum(); MPI_Comm comm = ParallelContext::CommunicatorSub(); - if (N_locs == 0 && N_rcvs == 0 && N_snds == 0) return; // No work to do + if (N_locs == 0 && N_rcvs == 0 && N_snds == 0) { return; } // No work to do char* the_recv_data = nullptr; Vector recv_from; diff --git a/Src/Base/AMReX_FabArrayUtility.H b/Src/Base/AMReX_FabArrayUtility.H index 04c4fc47b28..f4aa73f2791 100644 --- a/Src/Base/AMReX_FabArrayUtility.H +++ b/Src/Base/AMReX_FabArrayUtility.H @@ -1313,7 +1313,7 @@ OverrideSync_nowait (FabArray & fa, FabArray const& msk, const Period BL_PROFILE("OverrideSync_nowait()"); AMREX_ASSERT_WITH_MESSAGE(!fa.os_temp, "OverrideSync_nowait() called when already in progress."); - if (fa.ixType().cellCentered()) return; + if (fa.ixType().cellCentered()) { return; } const int ncomp = fa.nComp(); @@ -1324,7 +1324,7 @@ OverrideSync_nowait (FabArray & fa, FabArray const& msk, const Period ParallelFor(fa, IntVect(0), ncomp, [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept { - if (!ifabarr[box_no](i,j,k)) fabarr[box_no](i,j,k,n) = 0; + if (!ifabarr[box_no](i,j,k)) { fabarr[box_no](i,j,k,n) = 0; } }); if (!Gpu::inNoSyncRegion()) { Gpu::streamSynchronize(); @@ -1342,7 +1342,7 @@ OverrideSync_nowait (FabArray & fa, FabArray const& msk, const Period auto const ifab = msk.array(mfi); AMREX_HOST_DEVICE_PARALLEL_FOR_4D( bx, ncomp, i, j, k, n, { - if (!ifab(i,j,k)) fab(i,j,k,n) = 0; + if (!ifab(i,j,k)) { fab(i,j,k,n) = 0; } }); } } @@ -1359,7 +1359,7 @@ OverrideSync_finish (FabArray & fa) { BL_PROFILE("OverrideSync_finish()"); - if (fa.ixType().cellCentered()) return; + if (fa.ixType().cellCentered()) { return; } fa.os_temp->ParallelCopy_finish(); amrex::Copy(fa, *(fa.os_temp), 0, 0, fa.nComp(), 0); @@ -1485,7 +1485,9 @@ indexFromValue (FabArray const& mf, int comp, IntVect const& nghost, auto const& fab = mf.const_array(mfi); AMREX_LOOP_3D(bx, i, j, k, { - if (fab(i,j,k,comp) == value) priv_loc = IntVect(AMREX_D_DECL(i,j,k)); + if (fab(i,j,k,comp) == value) { + priv_loc = IntVect(AMREX_D_DECL(i,j,k)); + } }); } @@ -1504,7 +1506,7 @@ indexFromValue (FabArray const& mf, int comp, IntVect const& nghost, f = true; } - if (old == false) loc = priv_loc; + if (old == false) { loc = priv_loc; } } } } diff --git a/Src/Base/AMReX_FabConv.cpp b/Src/Base/AMReX_FabConv.cpp index 6e015c083af..522216aadd1 100644 --- a/Src/Base/AMReX_FabConv.cpp +++ b/Src/Base/AMReX_FabConv.cpp @@ -68,20 +68,23 @@ operator>> (std::istream& is, { char c; is >> c; - if (c != '(') + if (c != '(') { amrex::Error("operator>>(istream&,RealDescriptor&): expected a \'(\'"); + } int numbytes; is >> numbytes; id.numbytes = numbytes; is >> c; - if (c != ',') + if (c != ',') { amrex::Error("operator>>(istream&,RealDescriptor&): expected a \',\'"); + } int ord; is >> ord; id.ord = (IntDescriptor::Ordering) ord; is >> c; - if (c != ')') + if (c != ')') { amrex::Error("operator>>(istream&,RealDescriptor&): expected a \')\'"); + } return is; } @@ -247,10 +250,9 @@ ONES_COMP_NEG (Long& n, int nb, Long incr) { - if (nb == 8*sizeof(Long)) + if (nb == 8*sizeof(Long)) { n = ~n + incr; - else - { + } else { const Long MSK = (1LL << nb) - 1LL; n = (~n + incr) & MSK; } @@ -272,10 +274,11 @@ _pd_get_bit (char const* base, n -= nbytes; offs = offs % 8; - if (ord == nullptr) + if (ord == nullptr) { base += (n + nbytes); - else + } else { base += (n + (ord[nbytes] - 1)); + } int mask = (1 << (7 - offs)); @@ -315,10 +318,9 @@ _pd_extract_field (char const* in, in += n; unsigned char bpb = 8 - offs; - if (ord == nullptr) + if (ord == nullptr) { ind = offy++; - else - { + } else { if (offy >= nby) { offy -= nby; @@ -331,19 +333,17 @@ _pd_extract_field (char const* in, unsigned char mask = (1 << bpb) - 1; bit_field = ((bit_field << bpb) | (tgt & mask)); nbi -= bpb; - if (nbi < 0) + if (nbi < 0) { bit_field = bit_field >> (-nbi); - else - { + } else { for (; nbi > 0; nbi -= bpb) { // // ind = (ord == nullptr) ? offy++ : (ord[offy++] - 1); // - if (ord == nullptr) + if (ord == nullptr) { ind = offy++; - else - { + } else { if (offy >= nby) { offy -= nby; @@ -427,10 +427,11 @@ _pd_insert_field (Long in_long, if (mi < offs) { dm = BitsMax - (8 - offs); - if (nb == BitsMax) + if (nb == BitsMax) { longmask = ~((1LL << dm) - 1LL); - else + } else { longmask = ((1LL << nb) - 1LL) ^ ((1LL << dm) - 1LL); + } unsigned char fb = ((in_long&longmask)>>dm)&((1LL<<(nb-dm))-1LL); *(out++) |= fb; // NOLINT @@ -499,9 +500,9 @@ _pd_reorder (char* arr, for (int j; nitems > 0; nitems--) { arr--; - for (j = 0; j < nbytes; local[j] = arr[ord[j]], j++); + for (j = 0; j < nbytes; local[j] = arr[ord[j]], j++) {;} arr++; - for (j = 0; j < nbytes; *(arr++) = local[j++]); + for (j = 0; j < nbytes; *(arr++) = local[j++]) {;} } } @@ -529,8 +530,9 @@ permute_real_word_order (void* out, for (; nitems > 0; nitems--, pin += REALSIZE, pout += REALSIZE) { - for (int i = 0; i < REALSIZE; i++) + for (int i = 0; i < REALSIZE; i++) { pout[outord[i]] = pin[inord[i]]; + } } } @@ -673,17 +675,20 @@ PD_fconvert (void* out, { ONES_COMP_NEG(expn, nbi_exp, 1L); } - else + else { expn += (expn < hexpn); + } } - if (expn != 0) + if (expn != 0) { expn += DeltaBias; + } if ((0 <= expn) && (expn < expn_max)) { _pd_insert_field(expn, nbo_exp, lout, bo_exp, l_order, l_bytes); - if (sign) + if (sign) { _pd_set_bit(lout, bo_sign); + } indxin = bi_mant; inrem = int(infor[2]); @@ -722,8 +727,9 @@ PD_fconvert (void* out, // // Do complement for negative ones complement data. // - if (onescmp && sign) + if (onescmp && sign) { ONES_COMP_NEG(mant, nbits, 0L); + } _pd_insert_field(mant, nbits, lout, indxout, l_order, l_bytes); @@ -740,8 +746,9 @@ PD_fconvert (void* out, { _pd_insert_field(expn_max, nbo_exp, lout, bo_exp, l_order, l_bytes); - if (_pd_get_bit(lin, bi_sign, inbytes, inord)) + if (_pd_get_bit(lin, bi_sign, inbytes, inord)) { _pd_set_bit(lout, bo_sign); + } } bi_sign += nbi; bi_exp += nbi; @@ -763,11 +770,14 @@ PD_fconvert (void* out, rout = (unsigned char *) out; for (i = 0L; i < nitems; i++, rout += outbytes) { - for (j = 0; j < outbytes; j++) - if ((j == indxout) ? (rout[j] != mask) : rout[j]) + for (j = 0; j < outbytes; j++) { + if ((j == indxout) ? (rout[j] != mask) : rout[j]) { break; - if (j == outbytes) + } + } + if (j == outbytes) { rout[indxout] = 0; + } } } // @@ -888,18 +898,21 @@ operator>> (std::istream& is, { char c; is >> c; - if (c != '(') + if (c != '(') { amrex::Error("operator>>(istream&,RealDescriptor&): expected a \'(\'"); + } Vector fmt; getarray(is, fmt); is >> c; - if (c != ',') + if (c != ',') { amrex::Error("operator>>(istream&,RealDescriptor&): expected a \',\'"); + } Vector ord; getarray(is, ord); is >> c; - if (c != ')') + if (c != ')') { amrex::Error("operator>>(istream&,RealDescriptor&): expected a \')\'"); + } rd = RealDescriptor(fmt.dataPtr(),ord.dataPtr(),static_cast(ord.size())); return is; } diff --git a/Src/Base/AMReX_Geometry.cpp b/Src/Base/AMReX_Geometry.cpp index ac30aba62c7..ace1eefa663 100644 --- a/Src/Base/AMReX_Geometry.cpp +++ b/Src/Base/AMReX_Geometry.cpp @@ -48,7 +48,7 @@ operator>> (std::istream& is, Geometry::Geometry () noexcept { - if (!AMReX::empty()) *this = DefaultGeometry(); + if (!AMReX::empty()) { *this = DefaultGeometry(); } } Geometry::Geometry (const Box& dom, const RealBox* rb, int coord, @@ -111,7 +111,7 @@ Geometry::Setup (const RealBox* rb, int coord, int const* isper) noexcept { Geometry* gg = AMReX::top()->getDefaultGeometry(); - if (gg->ok) return; + if (gg->ok) { return; } AMREX_ASSERT(!OpenMP::in_parallel()); @@ -146,8 +146,9 @@ Geometry::Setup (const RealBox* rb, int coord, int const* isper) noexcept if (read_prob_extent) { - for (int i = 0; i < AMREX_SPACEDIM; i++) + for (int i = 0; i < AMREX_SPACEDIM; i++) { prob_hi[i] = prob_lo[i] + prob_extent[i]; + } } gg->prob_domain.setLo(prob_lo); @@ -395,10 +396,12 @@ Geometry::periodicShift (const Box& target, int ri,rj,rk; for (ri = nist; ri <= niend; ri++) { - if (ri != 0 && !is_periodic[0]) + if (ri != 0 && !is_periodic[0]) { continue; - if (ri != 0 && is_periodic[0]) + } + if (ri != 0 && is_periodic[0]) { locsrc.shift(0,ri*domain.length(0)); + } for (rj = njst; rj <= njend; rj++) { @@ -438,8 +441,9 @@ Geometry::periodicShift (const Box& target, locsrc.shift(2,rk*domain.length(2)); } - if (ri == 0 && rj == 0 && rk == 0) + if (ri == 0 && rj == 0 && rk == 0) { continue; + } // // If losrc intersects target, then add to "out". // @@ -467,8 +471,9 @@ Geometry::periodicShift (const Box& target, locsrc.shift(1,-rj*domain.length(1)); } } - if (ri != 0 && is_periodic[0]) + if (ri != 0 && is_periodic[0]) { locsrc.shift(0,-ri*domain.length(0)); + } } } diff --git a/Src/Base/AMReX_GpuAsyncArray.H b/Src/Base/AMReX_GpuAsyncArray.H index 2258057e079..eda18a81dab 100644 --- a/Src/Base/AMReX_GpuAsyncArray.H +++ b/Src/Base/AMReX_GpuAsyncArray.H @@ -31,7 +31,7 @@ public: AsyncArray (T const* h_p, const std::size_t n) { - if (n == 0) return; + if (n == 0) { return; } h_data = static_cast(The_Pinned_Arena()->alloc(n*sizeof(T))); std::memcpy(h_data, h_p, n*sizeof(T)); #ifdef AMREX_USE_GPU @@ -46,7 +46,7 @@ public: template ::value && std::is_trivial::value,int>::type = 0> explicit AsyncArray (const std::size_t n) { - if (n == 0) return; + if (n == 0) { return; } #ifdef AMREX_USE_GPU if (Gpu::inLaunchRegion()) { @@ -120,7 +120,7 @@ public: void copyToHost (T* h_p, std::size_t n) const { - if (n == 0) return; + if (n == 0) { return; } #ifdef AMREX_USE_GPU if (d_data) { diff --git a/Src/Base/AMReX_GpuBuffer.H b/Src/Base/AMReX_GpuBuffer.H index 245a55910be..a52dc04785b 100644 --- a/Src/Base/AMReX_GpuBuffer.H +++ b/Src/Base/AMReX_GpuBuffer.H @@ -20,7 +20,7 @@ public: Buffer (std::initializer_list init) : m_size(init.size()) { - if (m_size == 0) return; + if (m_size == 0) { return; } #ifdef AMREX_USE_GPU h_data = static_cast(The_Pinned_Arena()->alloc(m_size*sizeof(T))); #else @@ -39,7 +39,7 @@ public: Buffer (T const* h_p, const std::size_t n) : m_size(n) { - if (m_size == 0) return; + if (m_size == 0) { return; } #ifdef AMREX_USE_GPU h_data = static_cast(The_Pinned_Arena()->alloc(m_size*sizeof(T))); #else @@ -73,8 +73,8 @@ public: void clear () { #ifdef AMREX_USE_GPU - if (d_data) The_Arena()->free(d_data); - if (h_data) The_Pinned_Arena()->free(h_data); + if (d_data) { The_Arena()->free(d_data); } + if (h_data) { The_Pinned_Arena()->free(h_data); } #else std::free(h_data); #endif diff --git a/Src/Base/AMReX_GpuComplex.H b/Src/Base/AMReX_GpuComplex.H index 5c9deab3995..4cf0d6e69c8 100644 --- a/Src/Base/AMReX_GpuComplex.H +++ b/Src/Base/AMReX_GpuComplex.H @@ -355,8 +355,7 @@ T abs (const GpuComplex& a_z) noexcept T y = a_z.imag(); const T s = amrex::max(std::abs(x), std::abs(y)); - if (s == T()) - return s; + if (s == T()) { return s; } x /= s; y /= s; return s * std::sqrt(x * x + y * y); @@ -414,8 +413,9 @@ template AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE GpuComplex pow (const GpuComplex& a_z, const T& a_y) noexcept { - if (a_z.imag() == T() && a_z.real() > T()) + if (a_z.imag() == T() && a_z.real() > T()) { return std::pow(a_z.real(), a_y); + } GpuComplex t = amrex::log(a_z); return amrex::polar(std::exp(a_y * t.real()), a_y * t.imag()); @@ -432,8 +432,9 @@ namespace detail while (a_n >>= 1) { a_z *= a_z; - if (a_n % 2) + if (a_n % 2) { y *= a_z; + } } return y; diff --git a/Src/Base/AMReX_GpuContainers.H b/Src/Base/AMReX_GpuContainers.H index cb7b97acaf3..05399b2e047 100644 --- a/Src/Base/AMReX_GpuContainers.H +++ b/Src/Base/AMReX_GpuContainers.H @@ -129,7 +129,7 @@ namespace amrex::Gpu { "Can only copy trivially copyable types"); auto size = std::distance(begin, end); - if (size == 0) return; + if (size == 0) { return; } #ifdef AMREX_USE_GPU htod_memcpy(&(*result), &(*begin), size*sizeof(value_type)); #else @@ -166,7 +166,7 @@ namespace amrex::Gpu { "Can only copy trivially copyable types"); auto size = std::distance(begin, end); - if (size == 0) return; + if (size == 0) { return; } #ifdef AMREX_USE_GPU dtoh_memcpy(&(*result), &(*begin), size*sizeof(value_type)); #else @@ -203,7 +203,7 @@ namespace amrex::Gpu { "Can only copy trivially copyable types"); auto size = std::distance(begin, end); - if (size == 0) return; + if (size == 0) { return; } #ifdef AMREX_USE_GPU dtod_memcpy(&(*result), &(*begin), size*sizeof(value_type)); #else @@ -241,7 +241,7 @@ namespace amrex::Gpu { "Can only copy trivially copyable types"); auto size = std::distance(begin, end); - if (size == 0) return; + if (size == 0) { return; } #ifdef AMREX_USE_GPU htod_memcpy_async(&(*result), &(*begin), size*sizeof(value_type)); #else @@ -279,7 +279,7 @@ namespace amrex::Gpu { "Can only copy trivially copyable types"); auto size = std::distance(begin, end); - if (size == 0) return; + if (size == 0) { return; } #ifdef AMREX_USE_GPU dtoh_memcpy_async(&(*result), &(*begin), size*sizeof(value_type)); #else @@ -317,7 +317,7 @@ namespace amrex::Gpu { "Can only copy trivially copyable types"); auto size = std::distance(begin, end); - if (size == 0) return; + if (size == 0) { return; } #ifdef AMREX_USE_GPU dtod_memcpy_async(&(*result), &(*begin), size*sizeof(value_type)); #else @@ -339,7 +339,7 @@ namespace amrex::Gpu { "Can only copy trivially copyable types"); auto size = std::distance(begin, end); - if (size == 0) return; + if (size == 0) { return; } #ifdef AMREX_USE_GPU // Currently only implemented for CUDA. @@ -370,7 +370,7 @@ namespace amrex::Gpu { "Can only copy trivially copyable types"); auto size = std::distance(begin, end); - if (size == 0) return; + if (size == 0) { return; } #ifdef AMREX_USE_GPU // Currently only implemented for CUDA. @@ -411,7 +411,7 @@ namespace amrex::Gpu { void fillAsync (IT first, IT last, F&& f) noexcept { auto N = static_cast(std::distance(first, last)); - if (N <= 0) return; + if (N <= 0) { return; } auto p = &(*first); #ifndef AMREX_USE_GPU for (Long i = 0; i < N; ++i) { diff --git a/Src/Base/AMReX_GpuDevice.H b/Src/Base/AMReX_GpuDevice.H index 371bff773ca..20514aa367e 100644 --- a/Src/Base/AMReX_GpuDevice.H +++ b/Src/Base/AMReX_GpuDevice.H @@ -252,7 +252,7 @@ streamSynchronizeAll () noexcept inline void htod_memcpy_async (void* p_d, const void* p_h, const std::size_t sz) noexcept { - if (sz == 0) return; + if (sz == 0) { return; } #ifdef AMREX_USE_SYCL auto& q = Device::streamQueue(); q.submit([&] (sycl::handler& h) { h.memcpy(p_d, p_h, sz); }); @@ -266,7 +266,7 @@ htod_memcpy_async (void* p_d, const void* p_h, const std::size_t sz) noexcept inline void dtoh_memcpy_async (void* p_h, const void* p_d, const std::size_t sz) noexcept { - if (sz == 0) return; + if (sz == 0) { return; } #ifdef AMREX_USE_SYCL auto& q = Device::streamQueue(); q.submit([&] (sycl::handler& h) { h.memcpy(p_h, p_d, sz); }); @@ -280,7 +280,7 @@ dtoh_memcpy_async (void* p_h, const void* p_d, const std::size_t sz) noexcept inline void dtod_memcpy_async (void* p_d_dst, const void* p_d_src, const std::size_t sz) noexcept { - if (sz == 0) return; + if (sz == 0) { return; } #ifdef AMREX_USE_SYCL auto& q = Device::streamQueue(); q.submit([&] (sycl::handler& h) { h.memcpy(p_d_dst, p_d_src, sz); }); @@ -294,7 +294,7 @@ dtod_memcpy_async (void* p_d_dst, const void* p_d_src, const std::size_t sz) noe inline void htod_memcpy (void* p_d, const void* p_h, const std::size_t sz) noexcept { - if (sz == 0) return; + if (sz == 0) { return; } htod_memcpy_async(p_d, p_h, sz); Gpu::streamSynchronize(); } @@ -302,7 +302,7 @@ htod_memcpy (void* p_d, const void* p_h, const std::size_t sz) noexcept inline void dtoh_memcpy (void* p_h, const void* p_d, const std::size_t sz) noexcept { - if (sz == 0) return; + if (sz == 0) { return; } dtoh_memcpy_async(p_h, p_d, sz); Gpu::streamSynchronize(); } @@ -310,7 +310,7 @@ dtoh_memcpy (void* p_h, const void* p_d, const std::size_t sz) noexcept inline void dtod_memcpy (void* p_d_dst, const void* p_d_src, const std::size_t sz) noexcept { - if (sz == 0) return; + if (sz == 0) { return; } dtod_memcpy_async(p_d_dst, p_d_src, sz); Gpu::streamSynchronize(); } diff --git a/Src/Base/AMReX_GpuDevice.cpp b/Src/Base/AMReX_GpuDevice.cpp index a07acc591a2..df3625d13ce 100644 --- a/Src/Base/AMReX_GpuDevice.cpp +++ b/Src/Base/AMReX_GpuDevice.cpp @@ -473,7 +473,7 @@ Device::initialize_gpu () #endif } auto found = std::find(sgss.begin(), sgss.end(), static_cast(warp_size)); - if (found == sgss.end()) amrex::Abort("Incorrect subgroup size"); + if (found == sgss.end()) { amrex::Abort("Incorrect subgroup size"); } } #endif diff --git a/Src/Base/AMReX_GpuLaunchFunctsG.H b/Src/Base/AMReX_GpuLaunchFunctsG.H index 6c958f2070d..aea0c030152 100644 --- a/Src/Base/AMReX_GpuLaunchFunctsG.H +++ b/Src/Base/AMReX_GpuLaunchFunctsG.H @@ -114,7 +114,7 @@ void launch (int nblocks, gpuStream_t stream, L&& f) noexcept template void launch (T const& n, L&& f) noexcept { - if (amrex::isEmpty(n)) return; + if (amrex::isEmpty(n)) { return; } const auto ec = Gpu::makeExecutionConfig(n); int nthreads_per_block = ec.numThreads.x; int nthreads_total = nthreads_per_block * ec.numBlocks.x; @@ -190,7 +190,7 @@ namespace detail { template ::value> > void ParallelFor (Gpu::KernelInfo const& info, T n, L&& f) noexcept { - if (amrex::isEmpty(n)) return; + if (amrex::isEmpty(n)) { return; } const auto ec = Gpu::makeExecutionConfig(n); int nthreads_per_block = ec.numThreads.x; int nthreads_total = nthreads_per_block * ec.numBlocks.x; @@ -238,7 +238,7 @@ void ParallelFor (Gpu::KernelInfo const& info, T n, L&& f) noexcept template void ParallelFor (Gpu::KernelInfo const& info, Box const& box, L&& f) noexcept { - if (amrex::isEmpty(box)) return; + if (amrex::isEmpty(box)) { return; } int ncells = box.numPts(); const auto lo = amrex::lbound(box); const auto len = amrex::length(box); @@ -303,7 +303,7 @@ void ParallelFor (Gpu::KernelInfo const& info, Box const& box, L&& f) noexcept template ::value> > void ParallelFor (Gpu::KernelInfo const& info, Box const& box, T ncomp, L&& f) noexcept { - if (amrex::isEmpty(box)) return; + if (amrex::isEmpty(box)) { return; } int ncells = box.numPts(); const auto lo = amrex::lbound(box); const auto len = amrex::length(box); @@ -369,7 +369,7 @@ void ParallelFor (Gpu::KernelInfo const& info, Box const& box, T ncomp, L&& f) n template ::value> > void ParallelForRNG (T n, L&& f) noexcept { - if (amrex::isEmpty(n)) return; + if (amrex::isEmpty(n)) { return; } const auto ec = Gpu::ExecutionConfig(n); int nthreads_per_block = ec.numThreads.x; int nthreads_total = nthreads_per_block * amrex::min(ec.numBlocks.x,Gpu::Device::maxBlocksPerLaunch()); @@ -402,7 +402,7 @@ void ParallelForRNG (T n, L&& f) noexcept template void ParallelForRNG (Box const& box, L&& f) noexcept { - if (amrex::isEmpty(box)) return; + if (amrex::isEmpty(box)) { return; } int ncells = box.numPts(); const auto lo = amrex::lbound(box); const auto len = amrex::length(box); @@ -447,7 +447,7 @@ void ParallelForRNG (Box const& box, L&& f) noexcept template ::value> > void ParallelForRNG (Box const& box, T ncomp, L&& f) noexcept { - if (amrex::isEmpty(box)) return; + if (amrex::isEmpty(box)) { return; } int ncells = box.numPts(); const auto lo = amrex::lbound(box); const auto len = amrex::length(box); @@ -494,7 +494,7 @@ void ParallelForRNG (Box const& box, T ncomp, L&& f) noexcept template void ParallelFor (Gpu::KernelInfo const& /*info*/, Box const& box1, Box const& box2, L1&& f1, L2&& f2) noexcept { - if (amrex::isEmpty(box1) && amrex::isEmpty(box2)) return; + if (amrex::isEmpty(box1) && amrex::isEmpty(box2)) { return; } int ncells1 = box1.numPts(); int ncells2 = box2.numPts(); int ncells = amrex::max(ncells1, ncells2); @@ -551,7 +551,7 @@ void ParallelFor (Gpu::KernelInfo const& /*info*/, Box const& box1, Box const& box2, Box const& box3, L1&& f1, L2&& f2, L3&& f3) noexcept { - if (amrex::isEmpty(box1) && amrex::isEmpty(box2) && amrex::isEmpty(box3)) return; + if (amrex::isEmpty(box1) && amrex::isEmpty(box2) && amrex::isEmpty(box3)) { return; } int ncells1 = box1.numPts(); int ncells2 = box2.numPts(); int ncells3 = box3.numPts(); @@ -624,7 +624,7 @@ void ParallelFor (Gpu::KernelInfo const& /*info*/, Box const& box1, T1 ncomp1, L1&& f1, Box const& box2, T2 ncomp2, L2&& f2) noexcept { - if (amrex::isEmpty(box1) && amrex::isEmpty(box2)) return; + if (amrex::isEmpty(box1) && amrex::isEmpty(box2)) { return; } int ncells1 = box1.numPts(); int ncells2 = box2.numPts(); int ncells = amrex::max(ncells1, ncells2); @@ -689,7 +689,7 @@ void ParallelFor (Gpu::KernelInfo const& /*info*/, Box const& box2, T2 ncomp2, L2&& f2, Box const& box3, T3 ncomp3, L3&& f3) noexcept { - if (amrex::isEmpty(box1) && amrex::isEmpty(box2) && amrex::isEmpty(box3)) return; + if (amrex::isEmpty(box1) && amrex::isEmpty(box2) && amrex::isEmpty(box3)) { return; } int ncells1 = box1.numPts(); int ncells2 = box2.numPts(); int ncells3 = box3.numPts(); @@ -808,7 +808,7 @@ void launch (int nblocks, int nthreads_per_block, gpuStream_t stream, L&& f) noe template void launch (T const& n, L&& f) noexcept { - if (amrex::isEmpty(n)) return; + if (amrex::isEmpty(n)) { return; } const auto ec = Gpu::makeExecutionConfig(n); AMREX_LAUNCH_KERNEL(MT, ec.numBlocks, ec.numThreads, 0, Gpu::gpuStream(), [=] AMREX_GPU_DEVICE () noexcept { @@ -873,7 +873,7 @@ template ::value> ParallelFor (Gpu::KernelInfo const&, T n, L&& f) noexcept { - if (amrex::isEmpty(n)) return; + if (amrex::isEmpty(n)) { return; } const auto ec = Gpu::makeExecutionConfig(n); AMREX_LAUNCH_KERNEL(MT, ec.numBlocks, ec.numThreads, 0, Gpu::gpuStream(), [=] AMREX_GPU_DEVICE () noexcept { @@ -889,7 +889,7 @@ template std::enable_if_t::value> ParallelFor (Gpu::KernelInfo const&, Box const& box, L&& f) noexcept { - if (amrex::isEmpty(box)) return; + if (amrex::isEmpty(box)) { return; } int ncells = box.numPts(); const auto lo = amrex::lbound(box); const auto len = amrex::length(box); @@ -917,7 +917,7 @@ template ::value> ParallelFor (Gpu::KernelInfo const&, Box const& box, T ncomp, L&& f) noexcept { - if (amrex::isEmpty(box)) return; + if (amrex::isEmpty(box)) { return; } int ncells = box.numPts(); const auto lo = amrex::lbound(box); const auto len = amrex::length(box); @@ -944,7 +944,7 @@ template ::value> ParallelForRNG (T n, L&& f) noexcept { - if (amrex::isEmpty(n)) return; + if (amrex::isEmpty(n)) { return; } randState_t* rand_state = getRandState(); const auto ec = Gpu::ExecutionConfig(n); AMREX_LAUNCH_KERNEL(AMREX_GPU_MAX_THREADS, @@ -965,7 +965,7 @@ template std::enable_if_t::value> ParallelForRNG (Box const& box, L&& f) noexcept { - if (amrex::isEmpty(box)) return; + if (amrex::isEmpty(box)) { return; } randState_t* rand_state = getRandState(); int ncells = box.numPts(); const auto lo = amrex::lbound(box); @@ -997,7 +997,7 @@ template ::value> ParallelForRNG (Box const& box, T ncomp, L&& f) noexcept { - if (amrex::isEmpty(box)) return; + if (amrex::isEmpty(box)) { return; } randState_t* rand_state = getRandState(); int ncells = box.numPts(); const auto lo = amrex::lbound(box); @@ -1032,7 +1032,7 @@ std::enable_if_t::value && MaybeDeviceRunnable::valu ParallelFor (Gpu::KernelInfo const&, Box const& box1, Box const& box2, L1&& f1, L2&& f2) noexcept { - if (amrex::isEmpty(box1) && amrex::isEmpty(box2)) return; + if (amrex::isEmpty(box1) && amrex::isEmpty(box2)) { return; } int ncells1 = box1.numPts(); int ncells2 = box2.numPts(); int ncells = amrex::max(ncells1, ncells2); @@ -1078,7 +1078,7 @@ ParallelFor (Gpu::KernelInfo const&, Box const& box1, Box const& box2, Box const& box3, L1&& f1, L2&& f2, L3&& f3) noexcept { - if (amrex::isEmpty(box1) && amrex::isEmpty(box2) && amrex::isEmpty(box3)) return; + if (amrex::isEmpty(box1) && amrex::isEmpty(box2) && amrex::isEmpty(box3)) { return; } int ncells1 = box1.numPts(); int ncells2 = box2.numPts(); int ncells3 = box3.numPts(); @@ -1140,7 +1140,7 @@ ParallelFor (Gpu::KernelInfo const&, Box const& box1, T1 ncomp1, L1&& f1, Box const& box2, T2 ncomp2, L2&& f2) noexcept { - if (amrex::isEmpty(box1) && amrex::isEmpty(box2)) return; + if (amrex::isEmpty(box1) && amrex::isEmpty(box2)) { return; } int ncells1 = box1.numPts(); int ncells2 = box2.numPts(); int ncells = amrex::max(ncells1, ncells2); @@ -1194,7 +1194,7 @@ ParallelFor (Gpu::KernelInfo const&, Box const& box2, T2 ncomp2, L2&& f2, Box const& box3, T3 ncomp3, L3&& f3) noexcept { - if (amrex::isEmpty(box1) && amrex::isEmpty(box2) && amrex::isEmpty(box3)) return; + if (amrex::isEmpty(box1) && amrex::isEmpty(box2) && amrex::isEmpty(box3)) { return; } int ncells1 = box1.numPts(); int ncells2 = box2.numPts(); int ncells3 = box3.numPts(); diff --git a/Src/Base/AMReX_GpuMemory.H b/Src/Base/AMReX_GpuMemory.H index 88005b82596..1ffee387015 100644 --- a/Src/Base/AMReX_GpuMemory.H +++ b/Src/Base/AMReX_GpuMemory.H @@ -22,7 +22,7 @@ struct Managed { void operator delete (void *ptr) { - if (ptr) The_Managed_Arena()->free(ptr); + if (ptr) { The_Managed_Arena()->free(ptr); } } #endif @@ -39,7 +39,7 @@ struct Pinned { void operator delete (void *ptr) { - if (ptr) The_Pinned_Arena()->free(ptr); + if (ptr) { The_Pinned_Arena()->free(ptr); } } #endif diff --git a/Src/Base/AMReX_IArrayBox.cpp b/Src/Base/AMReX_IArrayBox.cpp index 6f751cf610c..40b2762729d 100644 --- a/Src/Base/AMReX_IArrayBox.cpp +++ b/Src/Base/AMReX_IArrayBox.cpp @@ -31,7 +31,7 @@ namespace void IArrayBox::Initialize () { - if (initialized) return; + if (initialized) { return; } // ParmParse pp("iab"); ifabio = std::make_unique(); diff --git a/Src/Base/AMReX_IndexType.cpp b/Src/Base/AMReX_IndexType.cpp index 00ba0a0b10f..e96052a4d14 100644 --- a/Src/Base/AMReX_IndexType.cpp +++ b/Src/Base/AMReX_IndexType.cpp @@ -15,8 +15,9 @@ operator<< (std::ostream& os, << ',' << (it.test(1)?'N':'C'), << ',' << (it.test(2)?'N':'C')) << ')' << std::flush; - if (os.fail()) + if (os.fail()) { amrex::Error("operator<<(ostream&,IndexType&) failed"); + } return os; } @@ -41,8 +42,9 @@ operator>> (std::istream& is, BL_ASSERT(t1 == 'C' || t1 == 'N'); t1=='N'?it.set(1):it.unset(1); , BL_ASSERT(t2 == 'C' || t2 == 'N'); t2=='N'?it.set(2):it.unset(2)); - if (is.fail()) + if (is.fail()) { amrex::Error("operator>>(ostream&,IndexType&) failed"); + } return is; } diff --git a/Src/Base/AMReX_IntConv.H b/Src/Base/AMReX_IntConv.H index 0a108fffe13..f792f4b8eab 100644 --- a/Src/Base/AMReX_IntConv.H +++ b/Src/Base/AMReX_IntConv.H @@ -27,7 +27,7 @@ namespace amrex { bool swapEndian = (id.order() != amrex::FPC::NativeIntDescriptor().order()); for (std::size_t j = 0; j < size; ++j) { value = static_cast(data[j]); - if (swapEndian) value = swapBytes(value); + if (swapEndian) { value = swapBytes(value); } os.write((char*) &value, sizeof(To)); } } @@ -40,7 +40,7 @@ namespace amrex { bool swapEndian = (id.order() != amrex::FPC::NativeIntDescriptor().order()); for (std::size_t j = 0; j < size; ++j) { is.read((char*) &value, sizeof(From)); - if (swapEndian) value = swapBytes(value); + if (swapEndian) { value = swapBytes(value); } data[j] = static_cast(value); } } diff --git a/Src/Base/AMReX_IntVect.cpp b/Src/Base/AMReX_IntVect.cpp index 3d0bf0b6eb5..5e68f633d19 100644 --- a/Src/Base/AMReX_IntVect.cpp +++ b/Src/Base/AMReX_IntVect.cpp @@ -61,8 +61,9 @@ operator>> (std::istream& is, IntVect& iv) amrex::Error("operator>>(istream&,IntVect&): expected \'(\'"); } - if (is.fail()) + if (is.fail()) { amrex::Error("operator>>(istream&,IntVect&) failed"); + } return is; } diff --git a/Src/Base/AMReX_LayoutData.H b/Src/Base/AMReX_LayoutData.H index 57b8194d0b3..aaa1db98135 100644 --- a/Src/Base/AMReX_LayoutData.H +++ b/Src/Base/AMReX_LayoutData.H @@ -31,14 +31,14 @@ namespace amrex m_data.resize(local_size()); } - ~LayoutData () { if (m_need_to_clear_bd) clearThisBD(); } + ~LayoutData () { if (m_need_to_clear_bd) { clearThisBD(); } } LayoutData (const LayoutData& rhs) noexcept : FabArrayBase(rhs), m_data(rhs.m_data), m_need_to_clear_bd(rhs.m_need_to_clear_bd) { - if (m_need_to_clear_bd) addThisBD(); + if (m_need_to_clear_bd) { addThisBD(); } } LayoutData (LayoutData&& rhs) noexcept @@ -54,13 +54,13 @@ namespace amrex if (&rhs != this) { m_data.clear(); - if (m_need_to_clear_bd) clearThisBD(); + if (m_need_to_clear_bd) { clearThisBD(); } FabArrayBase::operator=(rhs); m_data = rhs.m_data; m_need_to_clear_bd = rhs.m_need_to_clear_bd; - if (m_need_to_clear_bd) addThisBD(); + if (m_need_to_clear_bd) { addThisBD(); } } return *this; } @@ -70,7 +70,7 @@ namespace amrex if (&rhs != this) { m_data.clear(); - if (m_need_to_clear_bd) clearThisBD(); + if (m_need_to_clear_bd) { clearThisBD(); } FabArrayBase::operator=(static_cast(rhs)); m_data = std::move(rhs.m_data); diff --git a/Src/Base/AMReX_MFIter.cpp b/Src/Base/AMReX_MFIter.cpp index 5ae34d30a81..b05d0bbb2af 100644 --- a/Src/Base/AMReX_MFIter.cpp +++ b/Src/Base/AMReX_MFIter.cpp @@ -216,7 +216,7 @@ void MFIter::Finalize () { // avoid double finalize - if (finalized) return; + if (finalized) { return; } finalized = true; // mark as invalid @@ -462,7 +462,7 @@ MFIter::growntilebox (int a_ng) const noexcept { Box bx = tilebox(); IntVect ngv{a_ng}; - if (a_ng < -100) ngv = fabArray.nGrowVect(); + if (a_ng < -100) { ngv = fabArray.nGrowVect(); } const Box& vbx = validbox(); for (int d=0; d const& mf, int icomp, int ncomp) const { const auto N_snds = static_cast(m_SndTags.size()); - if (N_snds == 0) return; + if (N_snds == 0) { return; } // Prepare buffer @@ -108,7 +108,7 @@ void Copier::recv (FabArray& mf, int icomp, int ncomp) const { const auto N_rcvs = static_cast(m_RcvTags.size()); - if (N_rcvs == 0) return; + if (N_rcvs == 0) { return; } // Prepare buffer diff --git a/Src/Base/AMReX_Machine.cpp b/Src/Base/AMReX_Machine.cpp index e8e3c955962..bcb420b3f67 100644 --- a/Src/Base/AMReX_Machine.cpp +++ b/Src/Base/AMReX_Machine.cpp @@ -98,7 +98,7 @@ std::string to_str (const Array & a) oss << "("; bool first = true; for (auto const& item : a) { - if (!first) oss << ","; + if (!first) { oss << ","; } oss << item; first = false; } @@ -113,7 +113,7 @@ std::string to_str (const Vector & v) oss << "("; bool first = true; for (auto const& item : v) { - if (!first) oss << ","; + if (!first) { oss << ","; } oss << item; first = false; } @@ -382,8 +382,9 @@ class Machine Print() << "Got node ID from SLURM_TOPOLOGY_ADDR: " << result << std::endl; } } else { - if (cluster_name == "escori") + if (cluster_name == "escori") { tag = "cgpu"; + } auto mpi_proc_name = get_mpi_processor_name(); Print() << "MPI_Get_processor_name: " << mpi_proc_name << std::endl; pos = mpi_proc_name.find(tag); diff --git a/Src/Base/AMReX_Math.H b/Src/Base/AMReX_Math.H index 7713bab56f8..769b9bf50f4 100644 --- a/Src/Base/AMReX_Math.H +++ b/Src/Base/AMReX_Math.H @@ -188,19 +188,20 @@ template (x); - else if constexpr (Power == 0) + } else if constexpr (Power == 0) { //note: 0^0 is implementation-defined, but most compilers return 1 return T(1); - else if constexpr (Power == 1) + } else if constexpr (Power == 1) { return x; - else if constexpr (Power == 2) + } else if constexpr (Power == 2) { return x*x; - else if constexpr (Power%2 == 0) + } else if constexpr (Power%2 == 0) { return powi<2>(powi(x)); - else + } else { return x*powi(x); + } } } diff --git a/Src/Base/AMReX_MemPool.cpp b/Src/Base/AMReX_MemPool.cpp index 7c31e0412a7..e1e26ce7b6d 100644 --- a/Src/Base/AMReX_MemPool.cpp +++ b/Src/Base/AMReX_MemPool.cpp @@ -109,7 +109,7 @@ void amrex_mempool_get_stats (int& mp_min, int& mp_max, int& mp_tot) // min, max void amrex_real_array_init (Real* p, size_t nelems) { - if (init_snan) amrex_array_init_snan(p, nelems); + if (init_snan) { amrex_array_init_snan(p, nelems); } } void amrex_array_init_snan (Real* p, size_t nelems) diff --git a/Src/Base/AMReX_MemProfiler.cpp b/Src/Base/AMReX_MemProfiler.cpp index 223a5dad1ba..a6326ec9412 100644 --- a/Src/Base/AMReX_MemProfiler.cpp +++ b/Src/Base/AMReX_MemProfiler.cpp @@ -159,7 +159,7 @@ MemProfiler::report_ (const std::string& prefix, const std::string& memory_log_n } } ifs.ignore(std::numeric_limits::max(), '\n'); - if (nfound == npstat) break; + if (nfound == npstat) { break; } } } @@ -201,23 +201,23 @@ MemProfiler::report_ (const std::string& prefix, const std::string& memory_log_n #endif const int IOProc = ParallelDescriptor::IOProcessorNumber(); - ParallelDescriptor::ReduceLongMin(&cur_min[0], cur_min.size(), IOProc); - ParallelDescriptor::ReduceLongMax(&cur_max[0], cur_max.size(), IOProc); - ParallelDescriptor::ReduceLongMin(&hwm_min[0], hwm_min.size(), IOProc); - ParallelDescriptor::ReduceLongMax(&hwm_max[0], hwm_max.size(), IOProc); - ParallelDescriptor::ReduceLongMin(&mymin[0], N, IOProc); - ParallelDescriptor::ReduceLongMax(&mymax[0], N, IOProc); - - ParallelDescriptor::ReduceIntMin (&num_builds_min[0], num_builds_min.size(), IOProc); - ParallelDescriptor::ReduceIntMax (&num_builds_max[0], num_builds_max.size(), IOProc); - ParallelDescriptor::ReduceIntMin (&hwm_builds_min[0], hwm_builds_min.size(), IOProc); - ParallelDescriptor::ReduceIntMax (&hwm_builds_max[0], hwm_builds_max.size(), IOProc); + ParallelDescriptor::ReduceLongMin(cur_min.data(), cur_min.size(), IOProc); + ParallelDescriptor::ReduceLongMax(cur_max.data(), cur_max.size(), IOProc); + ParallelDescriptor::ReduceLongMin(hwm_min.data(), hwm_min.size(), IOProc); + ParallelDescriptor::ReduceLongMax(hwm_max.data(), hwm_max.size(), IOProc); + ParallelDescriptor::ReduceLongMin(mymin.data(), N, IOProc); + ParallelDescriptor::ReduceLongMax(mymax.data(), N, IOProc); + + ParallelDescriptor::ReduceIntMin (num_builds_min.data(), num_builds_min.size(), IOProc); + ParallelDescriptor::ReduceIntMax (num_builds_max.data(), num_builds_max.size(), IOProc); + ParallelDescriptor::ReduceIntMin (hwm_builds_min.data(), hwm_builds_min.size(), IOProc); + ParallelDescriptor::ReduceIntMax (hwm_builds_max.data(), hwm_builds_max.size(), IOProc); if (ParallelDescriptor::IOProcessor()) { std::ofstream memlog(memory_log_name.c_str(), std::ofstream::out|std::ofstream::app); - if (!memlog.good()) return; + if (!memlog.good()) { return; } static int width_name = 0; if (width_name == 0) { diff --git a/Src/Base/AMReX_MultiFab.cpp b/Src/Base/AMReX_MultiFab.cpp index a77d01d6375..15f9490b1c2 100644 --- a/Src/Base/AMReX_MultiFab.cpp +++ b/Src/Base/AMReX_MultiFab.cpp @@ -452,7 +452,7 @@ MultiFab::negate (const Box& region, int nghost) void MultiFab::Initialize () { - if (initialized) return; + if (initialized) { return; } initialized = true; amrex::ExecOnFinalize(MultiFab::Finalize); @@ -506,7 +506,7 @@ MultiFab::MultiFab (const BoxArray& bxs, : FabArray(bxs,dm,ncomp,ngrow,info,factory) { - if (SharedMemory() && info.alloc) initVal(); // else already done in FArrayBox + if (SharedMemory() && info.alloc) { initVal(); } // else already done in FArrayBox #ifdef AMREX_MEM_PROFILING ++num_multifabs; num_multifabs_hwm = std::max(num_multifabs_hwm, num_multifabs); @@ -555,7 +555,7 @@ MultiFab::define (const BoxArray& bxs, const FabFactory& factory) { define(bxs, dm, nvar, IntVect(ngrow), info, factory); - if (SharedMemory() && info.alloc) initVal(); // else already done in FArrayBox + if (SharedMemory() && info.alloc) { initVal(); } // else already done in FArrayBox } void @@ -567,7 +567,7 @@ MultiFab::define (const BoxArray& bxs, const FabFactory& factory) { this->FabArray::define(bxs,dm,nvar,ngrow,info,factory); - if (SharedMemory() && info.alloc) initVal(); // else already done in FArrayBox + if (SharedMemory() && info.alloc) { initVal(); } // else already done in FArrayBox } void @@ -1112,8 +1112,9 @@ MultiFab::norm1 (int comp, const Periodicity& period, bool ignore_covered ) cons MultiFab::Copy(tmpmf, *this, comp, 0, 1, 0); #ifdef AMREX_USE_EB - if ( this -> hasEBFabFactory() && ignore_covered ) + if ( this -> hasEBFabFactory() && ignore_covered ) { EB_set_covered( tmpmf, Real(0.0) ); + } #endif auto mask = OverlapMask(period); @@ -1173,8 +1174,9 @@ MultiFab::norm1 (const Vector& comps, int ngrow, bool local) const nm1.push_back(this->norm1(comp, ngrow, true)); } - if (!local) + if (!local) { ParallelAllReduce::Sum(nm1.dataPtr(), n, ParallelContext::CommunicatorSub()); + } return nm1; } @@ -1193,8 +1195,9 @@ MultiFab::sum_unique (int comp, BL_PROFILE("MultiFab::sum_unique()"); // no duplicatly distributed points if cell centered - if (ixType().cellCentered()) + if (ixType().cellCentered()) { return this->sum(comp, local); + } // Owner is the grid with the lowest grid number containing the data std::unique_ptr owner_mask = OwnerMask(period); @@ -1411,7 +1414,7 @@ MultiFab::AverageSync (const Periodicity& period) { BL_PROFILE("MultiFab::AverageSync()"); - if (ixType().cellCentered()) return; + if (ixType().cellCentered()) { return; } auto wgt = this->OverlapMask(period); wgt->invert(1.0, 0, 1); this->WeightedSync(*wgt, period); @@ -1422,7 +1425,7 @@ MultiFab::WeightedSync (const MultiFab& wgt, const Periodicity& period) { BL_PROFILE("MultiFab::WeightedSync()"); - if (ixType().cellCentered()) return; + if (ixType().cellCentered()) { return; } const int ncomp = nComp(); for (int comp = 0; comp < ncomp; ++comp) diff --git a/Src/Base/AMReX_MultiFabUtil.H b/Src/Base/AMReX_MultiFabUtil.H index 8c499874a83..3e0d6bf7d9f 100644 --- a/Src/Base/AMReX_MultiFabUtil.H +++ b/Src/Base/AMReX_MultiFabUtil.H @@ -844,7 +844,7 @@ void average_down_faces (const FabArray& fine, FabArray& crse, const auto type = fine.ixType(); int dir; for (dir = 0; dir < AMREX_SPACEDIM; ++dir) { - if (type.nodeCentered(dir)) break; + if (type.nodeCentered(dir)) { break; } } auto tmptype = type; tmptype.unset(dir); diff --git a/Src/Base/AMReX_MultiFabUtil.cpp b/Src/Base/AMReX_MultiFabUtil.cpp index e3cbae4bb05..201561150e8 100644 --- a/Src/Base/AMReX_MultiFabUtil.cpp +++ b/Src/Base/AMReX_MultiFabUtil.cpp @@ -45,7 +45,7 @@ namespace { boxes.push_back(is.second); slice_to_full_ba_map.push_back(is.first); } - BoxArray slice_ba(&boxes[0], static_cast(boxes.size())); + BoxArray slice_ba(boxes.data(), static_cast(boxes.size())); DistributionMapping slice_dmap(std::move(procs)); return std::make_unique(slice_ba, slice_dmap, ncomp, 0, @@ -470,7 +470,7 @@ namespace amrex const auto type = fine.ixType(); int dir; for (dir = 0; dir < AMREX_SPACEDIM; ++dir) { - if (type.cellCentered(dir)) break; + if (type.cellCentered(dir)) { break; } } auto tmptype = type; tmptype.set(dir); diff --git a/Src/Base/AMReX_MultiFabUtilI.H b/Src/Base/AMReX_MultiFabUtilI.H index 9696a2d888a..cb7e8892ee1 100644 --- a/Src/Base/AMReX_MultiFabUtilI.H +++ b/Src/Base/AMReX_MultiFabUtilI.H @@ -94,8 +94,8 @@ namespace amrex::MFUtil { int nc = mf_in.nComp(); mf_out.define(ba, dm, nc, ng); - if (regrid_ghost) SymmetricGhost::copy(mf_out, mf_in, nc, ng); - else AsymmetricGhost::copy(mf_out, mf_in, nc, ng); + if (regrid_ghost) { SymmetricGhost::copy(mf_out, mf_in, nc, ng); } + else { AsymmetricGhost::copy(mf_out, mf_in, nc, ng); } } @@ -112,8 +112,8 @@ namespace amrex::MFUtil { int nc = mf_in.nComp(); mf_out.define(ba, dm, nc, ng, MFInfo(), eb_factory); - if (regrid_ghost) SymmetricGhost::copy(mf_out, mf_in, nc, ng); - else AsymmetricGhost::copy(mf_out, mf_in, nc, ng); + if (regrid_ghost) { SymmetricGhost::copy(mf_out, mf_in, nc, ng); } + else { AsymmetricGhost::copy(mf_out, mf_in, nc, ng); } } } diff --git a/Src/Base/AMReX_NonLocalBC.cpp b/Src/Base/AMReX_NonLocalBC.cpp index 109a77d1e54..ebd78f7b3d1 100644 --- a/Src/Base/AMReX_NonLocalBC.cpp +++ b/Src/Base/AMReX_NonLocalBC.cpp @@ -51,7 +51,7 @@ void PrepareCommBuffers(CommData& comm, comm.stats.clear(); const auto N_comms = static_cast(cctc.size()); - if (N_comms == 0) return; + if (N_comms == 0) { return; } // reserve for upcominf push_backs comm.data.reserve(N_comms); comm.size.reserve(N_comms); diff --git a/Src/Base/AMReX_NonLocalBCImpl.H b/Src/Base/AMReX_NonLocalBCImpl.H index ab2a0dfb978..60798f85a34 100644 --- a/Src/Base/AMReX_NonLocalBCImpl.H +++ b/Src/Base/AMReX_NonLocalBCImpl.H @@ -188,7 +188,7 @@ std::enable_if_t() && IsCallableR() && IsFabPro local_copy_cpu (FabArray& dest, const FabArray& src, int dcomp, int scomp, int ncomp, FabArrayBase::CopyComTagsContainer const& local_tags, DTOS const& dtos, Proj const& proj) noexcept { const auto N_locs = static_cast(local_tags.size()); - if (N_locs == 0) return; + if (N_locs == 0) { return; } #ifdef AMREX_USE_OMP #pragma omp parallel for #endif @@ -218,7 +218,7 @@ unpack_recv_buffer_cpu (FabArray& mf, int dcomp, int ncomp, Vector c amrex::ignore_unused(recv_size); const auto N_rcvs = static_cast(recv_cctc.size()); - if (N_rcvs == 0) return; + if (N_rcvs == 0) { return; } using T = typename FAB::value_type; #ifdef AMREX_USE_OMP @@ -256,7 +256,7 @@ std::enable_if_t() && IsCallableR() && IsFabPro local_copy_gpu (FabArray& dest, const FabArray& src, int dcomp, int scomp, int ncomp, FabArrayBase::CopyComTagsContainer const& local_tags, DTOS const& dtos, Proj const& proj) noexcept { int N_locs = local_tags.size(); - if (N_locs == 0) return; + if (N_locs == 0) { return; } using T = typename FAB::value_type; Vector > loc_copy_tags; @@ -284,7 +284,7 @@ unpack_recv_buffer_gpu (FabArray& mf, int scomp, int ncomp, amrex::ignore_unused(recv_size); const int N_rcvs = recv_cctc.size(); - if (N_rcvs == 0) return; + if (N_rcvs == 0) { return; } char* pbuffer = recv_data[0]; #if 0 @@ -484,7 +484,7 @@ void Comm_finish (FabArray& mf, int scomp, int ncomp, FabArrayBase::CommMetaData const& cmd, CommHandler handler, DTOS const& dtos, Proj const& proj) { - if (ParallelContext::NProcsSub() == 1) return; + if (ParallelContext::NProcsSub() == 1) { return; } const auto N_rcvs = static_cast(cmd.m_RcvTags->size()); if (N_rcvs > 0) @@ -535,7 +535,7 @@ Rotate90 (FabArray& mf, int scomp, int ncomp, IntVect const& nghost, Box co AMREX_ASSERT(scomp < mf.nComp() && scomp+ncomp <= mf.nComp()); AMREX_ASSERT(nghost.allLE(mf.nGrowVect()) && nghost[0] == nghost[1]); - if (nghost[0] <= 0) return; + if (nghost[0] <= 0) { return; } const FabArrayBase::RB90& TheRB90 = mf.getRB90(nghost, domain); @@ -585,7 +585,7 @@ Rotate180 (FabArray& mf, int scomp, int ncomp, IntVect const& nghost, Box c AMREX_ASSERT(scomp < mf.nComp() && scomp+ncomp <= mf.nComp()); AMREX_ASSERT(nghost.allLE(mf.nGrowVect())); - if (nghost[0] <= 0) return; + if (nghost[0] <= 0) { return; } const FabArrayBase::RB180& TheRB180 = mf.getRB180(nghost, domain); @@ -620,7 +620,7 @@ FillPolar (FabArray& mf, int scomp, int ncomp, IntVect const& nghost, Box c AMREX_ASSERT(scomp < mf.nComp() && scomp+ncomp <= mf.nComp()); AMREX_ASSERT(nghost.allLE(mf.nGrowVect())); - if (nghost[0] <= 0) return; + if (nghost[0] <= 0) { return; } const FabArrayBase::PolarB& ThePolarB = mf.getPolarB(nghost, domain); @@ -727,10 +727,10 @@ get_src_dst_boxes (DTOS const& dtos, Box const& dstbox, Box const& domain) r.reserve(AMREX_D_TERM(nboxes[0],*nboxes[1],*nboxes[2])); #if (AMREX_SPACEDIM == 3) - for (int kbox = 0; kbox < nboxes[2]; ++kbox) + for (int kbox = 0; kbox < nboxes[2]; ++kbox) { #endif #if (AMREX_SPACEDIM >=2 ) - for (int jbox = 0; jbox < nboxes[1]; ++jbox) + for (int jbox = 0; jbox < nboxes[1]; ++jbox) { #endif for (int ibox = 0; ibox < nboxes[0]; ++ibox) { @@ -750,9 +750,9 @@ get_src_dst_boxes (DTOS const& dtos, Box const& dstbox, Box const& domain) dst_ends[1][div[1]].second, dst_ends[2][div[2]].second)), dtype)); - } + AMREX_D_TERM(},},}) - return r; // NOLINT(readability-misleading-indentation,-warnings-as-errors) + return r; } template diff --git a/Src/Base/AMReX_Orientation.cpp b/Src/Base/AMReX_Orientation.cpp index f51c36ddaff..ed4226da5a4 100644 --- a/Src/Base/AMReX_Orientation.cpp +++ b/Src/Base/AMReX_Orientation.cpp @@ -11,8 +11,9 @@ operator<< (std::ostream& os, const Orientation& o) { os << '('<< int(o) << ')' ; - if (os.fail()) + if (os.fail()) { amrex::Error("operator<<(ostream&,Orientation&) failed"); + } return os; } @@ -38,8 +39,9 @@ operator>> (std::istream& is, amrex::Error("operator>>(istream&,Orientation&): expected \'(\'"); } - if (is.fail()) + if (is.fail()) { amrex::Error("operator>>(ostream&,Orientation&) failed"); + } return is; } diff --git a/Src/Base/AMReX_PArena.cpp b/Src/Base/AMReX_PArena.cpp index a7c7ee1f4bb..82781a7be65 100644 --- a/Src/Base/AMReX_PArena.cpp +++ b/Src/Base/AMReX_PArena.cpp @@ -67,7 +67,7 @@ PArena::alloc (std::size_t nbytes) void PArena::free (void* p) { - if (p == nullptr) return; + if (p == nullptr) { return; } #if defined(AMREX_USE_GPU) diff --git a/Src/Base/AMReX_PCI.H b/Src/Base/AMReX_PCI.H index 7897adcbed8..6ccbe4078fc 100644 --- a/Src/Base/AMReX_PCI.H +++ b/Src/Base/AMReX_PCI.H @@ -7,7 +7,7 @@ FabArray::PC_local_cpu (const CPC& thecpc, FabArray const& src, int scomp, int dcomp, int ncomp, CpOp op) { auto const N_locs = static_cast(thecpc.m_LocTags->size()); - if (N_locs == 0) return; + if (N_locs == 0) { return; } bool is_thread_safe = thecpc.m_threadsafe_loc; if (is_thread_safe) @@ -89,7 +89,7 @@ FabArray::PC_local_gpu (const CPC& thecpc, FabArray const& src, int scomp, int dcomp, int ncomp, CpOp op) { int N_locs = thecpc.m_LocTags->size(); - if (N_locs == 0) return; + if (N_locs == 0) { return; } bool is_thread_safe = thecpc.m_threadsafe_loc; using TagType = Array4CopyTag; diff --git a/Src/Base/AMReX_ParallelContext.cpp b/Src/Base/AMReX_ParallelContext.cpp index 3654527f139..8faf56ca311 100644 --- a/Src/Base/AMReX_ParallelContext.cpp +++ b/Src/Base/AMReX_ParallelContext.cpp @@ -65,11 +65,11 @@ Frame::local_to_global_rank (int* global, const int* local, int n) } else { - for (int i = 0; i < n; ++i) global[i] = local[i]; + for (int i = 0; i < n; ++i) { global[i] = local[i]; } } #else amrex::ignore_unused(local); - for (int i = 0; i < n; ++i) global[i] = 0; + for (int i = 0; i < n; ++i) { global[i] = 0; } #endif } @@ -91,11 +91,11 @@ Frame::global_to_local_rank (int* local, const int* global, int n) } else { - for (int i = 0; i < n; ++i) local[i] = global[i]; + for (int i = 0; i < n; ++i) { local[i] = global[i]; } } #else amrex::ignore_unused(global); - for (int i = 0; i < n; ++i) local[i] = 0; + for (int i = 0; i < n; ++i) { local[i] = 0; } #endif } diff --git a/Src/Base/AMReX_ParallelDescriptor.H b/Src/Base/AMReX_ParallelDescriptor.H index 2e5a35a0ef8..3bf72ca3852 100644 --- a/Src/Base/AMReX_ParallelDescriptor.H +++ b/Src/Base/AMReX_ParallelDescriptor.H @@ -177,7 +177,7 @@ while ( false ) #if defined(BL_USE_MPI3) if (m_size > 1) { MPI_Comm_free(&m_team_comm); - if (m_rankInTeam==0) MPI_Comm_free(&m_lead_comm); + if (m_rankInTeam==0) { MPI_Comm_free(&m_lead_comm); } } #endif } @@ -355,8 +355,8 @@ while ( false ) { int rb, re; { - if (rit < 0) rit = ParallelDescriptor::MyRankInTeam(); - if (nworkers == 0) nworkers = ParallelDescriptor::TeamSize(); + if (rit < 0) { rit = ParallelDescriptor::MyRankInTeam(); } + if (nworkers == 0) { nworkers = ParallelDescriptor::TeamSize(); } BL_ASSERT(rit resl; - if ( root == MyProc() ) resl.resize(NProcs()); + if ( root == MyProc() ) { resl.resize(NProcs()); } BL_MPI_REQUIRE( MPI_Gather(const_cast(&t), 1, Mpi_typemap::type(), @@ -971,7 +971,7 @@ ParallelDescriptor::Gatherv (const T* send, int sc, BL_COMM_PROFILE(BLProfiler::Gatherv, BLProfiler::BeforeCall(), root, BLProfiler::NoTag()); MPI_Gatherv(send, sc, ParallelDescriptor::Mpi_typemap::type(), - recv, &rc[0], &disp[0], ParallelDescriptor::Mpi_typemap::type(), + recv, rc.data(), disp.data(), ParallelDescriptor::Mpi_typemap::type(), root, Communicator()); BL_COMM_PROFILE(BLProfiler::Gatherv, std::accumulate(rc.begin(),rc.end(),0)*sizeof(T), root, BLProfiler::NoTag()); @@ -1028,7 +1028,7 @@ ParallelDescriptor::GatherLayoutDataToVector (const LayoutData& sendbuf, } Vector disp(nprocs); - if (!disp.empty()) disp[0] = 0; + if (!disp.empty()) { disp[0] = 0; } std::partial_sum(recvcount.begin(), recvcount.end()-1, disp.begin()+1); Vector new_index_to_T(sendbuf.size()); diff --git a/Src/Base/AMReX_ParallelDescriptor.cpp b/Src/Base/AMReX_ParallelDescriptor.cpp index 5074876974f..764bdfd243d 100644 --- a/Src/Base/AMReX_ParallelDescriptor.cpp +++ b/Src/Base/AMReX_ParallelDescriptor.cpp @@ -263,22 +263,22 @@ Message::test () int Message::tag () const { - if ( !m_finished ) amrex::Error("Message::tag: Not Finished!"); + if ( !m_finished ) { amrex::Error("Message::tag: Not Finished!"); } return m_stat.MPI_TAG; } int Message::pid () const { - if ( !m_finished ) amrex::Error("Message::pid: Not Finished!"); + if ( !m_finished ) { amrex::Error("Message::pid: Not Finished!"); } return m_stat.MPI_SOURCE; } size_t Message::count () const { - if ( m_type == MPI_DATATYPE_NULL ) amrex::Error("Message::count: Bad Type!"); - if ( !m_finished ) amrex::Error("Message::count: Not Finished!"); + if ( m_type == MPI_DATATYPE_NULL ) { amrex::Error("Message::count: Bad Type!"); } + if ( !m_finished ) { amrex::Error("Message::count: Not Finished!"); } int cnt; BL_MPI_REQUIRE( MPI_Get_count(&m_stat, m_type, &cnt) ); return cnt; @@ -394,7 +394,7 @@ StartParallel (int* argc, char*** argv, MPI_Comm a_mpi_comm) #ifdef BL_USE_MPI3 int mpi_version, mpi_subversion; BL_MPI_REQUIRE( MPI_Get_version(&mpi_version, &mpi_subversion) ); - if (mpi_version < 3) amrex::Abort("MPI 3 is needed because USE_MPI3=TRUE"); + if (mpi_version < 3) { amrex::Abort("MPI 3 is needed because USE_MPI3=TRUE"); } #endif // Wait until all other processes are properly started. @@ -604,8 +604,9 @@ ReduceBoolAnd (bool& r, int cpu) detail::DoReduce(&src,MPI_SUM,1,cpu); - if (ParallelDescriptor::MyProc() == cpu) + if (ParallelDescriptor::MyProc() == cpu) { r = (src == ParallelDescriptor::NProcs()) ? true : false; + } } void @@ -625,8 +626,9 @@ ReduceBoolOr (bool& r, int cpu) detail::DoReduce(&src,MPI_SUM,1,cpu); - if (ParallelDescriptor::MyProc() == cpu) + if (ParallelDescriptor::MyProc() == cpu) { r = (src == 0) ? false : true; + } } void @@ -1165,8 +1167,9 @@ Gather (Real const* sendbuf, int nsend, Real* recvbuf, int root) BL_ASSERT(!(sendbuf == nullptr)); BL_ASSERT(!(recvbuf == nullptr)); - for (int i = 0; i < nsend; ++i) + for (int i = 0; i < nsend; ++i) { recvbuf[i] = sendbuf[i]; + } } void @@ -1568,7 +1571,7 @@ StartTeams () for (int i = 0; i < lead_ranks.size(); ++i) { lead_ranks[i] = i * team_size; } - BL_MPI_REQUIRE( MPI_Group_incl(grp, lead_ranks.size(), &lead_ranks[0], &lead_grp) ); + BL_MPI_REQUIRE( MPI_Group_incl(grp, lead_ranks.size(), lead_ranks.data(), &lead_grp) ); BL_MPI_REQUIRE( MPI_Comm_create(ParallelDescriptor::Communicator(), lead_grp, &m_Team.m_lead_comm) ); @@ -1591,14 +1594,15 @@ mpi_level_to_string (int mtlev) { amrex::ignore_unused(mtlev); #ifdef AMREX_USE_MPI - if (mtlev == MPI_THREAD_SINGLE) + if (mtlev == MPI_THREAD_SINGLE) { return std::string("MPI_THREAD_SINGLE"); - if (mtlev == MPI_THREAD_FUNNELED) + } else if (mtlev == MPI_THREAD_FUNNELED) { return std::string("MPI_THREAD_FUNNELED"); - if (mtlev == MPI_THREAD_SERIALIZED) + } else if (mtlev == MPI_THREAD_SERIALIZED) { return std::string("MPI_THREAD_SERIALIZED"); - if (mtlev == MPI_THREAD_MULTIPLE) + } else if (mtlev == MPI_THREAD_MULTIPLE) { return std::string("MPI_THREAD_MULTIPLE"); + } #endif return std::string("UNKNOWN"); } diff --git a/Src/Base/AMReX_ParmParse.cpp b/Src/Base/AMReX_ParmParse.cpp index 8fb37dab408..49a6db4afa9 100644 --- a/Src/Base/AMReX_ParmParse.cpp +++ b/Src/Base/AMReX_ParmParse.cpp @@ -85,7 +85,7 @@ ParmParse::PP_entry::~PP_entry () ParmParse::PP_entry& ParmParse::PP_entry::operator= (const PP_entry& pe) { - if ( &pe == this ) return *this; + if ( &pe == this ) { return *this; } m_name = pe.m_name; m_vals = pe.m_vals; m_table = nullptr; @@ -105,7 +105,7 @@ ParmParse::PP_entry::print () const { for ( int i = 0; i < n; i++) { t << m_vals[i]; - if ( i < n-1 ) t << " "; + if ( i < n-1 ) { t << " "; } } return t.str(); } @@ -118,7 +118,7 @@ operator<< (std::ostream& os, const ParmParse::PP_entry& pp) for ( int i = 0; i < n; i++ ) { os << pp.m_vals[i]; - if ( i < n-1 ) os << ", "; + if ( i < n-1 ) { os << ", "; } } os << "]"; @@ -147,10 +147,10 @@ isT (const std::string& str, T& val) { std::istringstream s(str); s >> val; - if ( s.fail() ) return false; + if ( s.fail() ) { return false; } std::string left; std::getline(s, left); - if ( !left.empty() ) return false; + if ( !left.empty() ) { return false; } return true; } @@ -264,7 +264,7 @@ eat_garbage (const char*& str) int num_linefeeds = 0; for (;;) { - if ( *str == 0 ) break; // NOLINT + if ( *str == 0 ) { break; } // NOLINT else if ( *str == '#' ) { while ( *str && *str != '\n' ) @@ -656,8 +656,9 @@ addDefn (std::string& def, tab.emplace_back(def,val); } val.clear(); - if ( def != ParmParse::FileKeyword ) + if ( def != ParmParse::FileKeyword ) { def = std::string(); + } } void @@ -915,7 +916,7 @@ squeryarr (const ParmParse::Table& table, num_val = static_cast(def->m_vals.size()); } - if ( num_val == 0 ) return true; + if ( num_val == 0 ) { return true; } int stop_ix = start_ix + num_val - 1; if ( static_cast(ref.size()) <= stop_ix ) @@ -1183,7 +1184,7 @@ unused_table_entries_q (const ParmParse::Table& table, const std::string& prefix } else { - if (unused_table_entries_q(*li.m_table, prefix)) return true; + if (unused_table_entries_q(*li.m_table, prefix)) { return true; } } } else if ( !li.m_queried ) @@ -1248,11 +1249,11 @@ ParmParse::QueryUnusedInputs () { if ( ParallelDescriptor::IOProcessor() && unused_table_entries_q(g_table)) { - finalize_verbose = amrex::system::verbose; - if (finalize_verbose) amrex::OutStream() << "Unused ParmParse Variables:\n"; - finalize_table(" [TOP]", g_table); - if (finalize_verbose) amrex::OutStream() << std::endl; - return true; + finalize_verbose = amrex::system::verbose; + if (finalize_verbose) { amrex::OutStream() << "Unused ParmParse Variables:\n"; } + finalize_table(" [TOP]", g_table); + if (finalize_verbose) { amrex::OutStream() << std::endl; } + return true; } return false; } @@ -1314,14 +1315,16 @@ ParmParse::Finalize () { if ( ParallelDescriptor::IOProcessor() && unused_table_entries_q(g_table)) { - finalize_verbose = amrex::system::verbose; - if (finalize_verbose) amrex::OutStream() << "Unused ParmParse Variables:\n"; - finalize_table(" [TOP]", g_table); - if (finalize_verbose) amrex::OutStream() << std::endl; - // - // First loop through and delete all queried entries. - // - if (amrex::system::abort_on_unused_inputs) amrex::Abort("ERROR: unused ParmParse variables."); + finalize_verbose = amrex::system::verbose; + if (finalize_verbose) { amrex::OutStream() << "Unused ParmParse Variables:\n"; } + finalize_table(" [TOP]", g_table); + if (finalize_verbose) { amrex::OutStream() << std::endl; } + // + // First loop through and delete all queried entries. + // + if (amrex::system::abort_on_unused_inputs) { + amrex::Abort("ERROR: unused ParmParse variables."); + } } g_table.clear(); diff --git a/Src/Base/AMReX_PhysBCFunct.H b/Src/Base/AMReX_PhysBCFunct.H index 211b6a5b777..0589d30a217 100644 --- a/Src/Base/AMReX_PhysBCFunct.H +++ b/Src/Base/AMReX_PhysBCFunct.H @@ -140,7 +140,7 @@ public: void operator() (MultiFab& mf, int icomp, int ncomp, IntVect const& nghost, Real time, int bccomp) { - if (m_geom.isAllPeriodic()) return; + if (m_geom.isAllPeriodic()) { return; } BL_PROFILE("PhysBCFunct::()"); @@ -234,7 +234,7 @@ GpuBndryFuncFab::nddoit (Box const& bx, FArrayBox& dest, } } - if (gdomain.contains(bx)) return; + if (gdomain.contains(bx)) { return; } Array4 const& fab = dest.array(); const auto geomdata = geom.data(); @@ -359,7 +359,7 @@ GpuBndryFuncFab::ccfcdoit (Box const& bx, FArrayBox& dest, } } - if (gdomain.contains(bx)) return; + if (gdomain.contains(bx)) { return; } Array4 const& fab = dest.array(); const auto geomdata = geom.data(); @@ -383,7 +383,7 @@ GpuBndryFuncFab::ccfcdoit (Box const& bx, FArrayBox& dest, Vector face_boxes; for (const Box& b : dom_face_boxes) { Box tmp = b & bx; - if (tmp.ok()) face_boxes.push_back(tmp); + if (tmp.ok()) { face_boxes.push_back(tmp); } } const int n_face_boxes = face_boxes.size(); if (n_face_boxes == 1) { @@ -443,7 +443,7 @@ GpuBndryFuncFab::ccfcdoit (Box const& bx, FArrayBox& dest, Vector edge_boxes; for (const Box& b : dom_edge_boxes) { Box tmp = b & bx; - if (tmp.ok()) edge_boxes.push_back(tmp); + if (tmp.ok()) { edge_boxes.push_back(tmp); } } const int n_edge_boxes = edge_boxes.size(); if (n_edge_boxes == 1) { @@ -491,7 +491,7 @@ GpuBndryFuncFab::ccfcdoit (Box const& bx, FArrayBox& dest, Vector corner_boxes; for (const Box& b : dom_corner_boxes) { Box tmp = b & bx; - if (tmp.ok()) corner_boxes.push_back(tmp); + if (tmp.ok()) { corner_boxes.push_back(tmp); } } const int n_corner_boxes = corner_boxes.size(); if (n_corner_boxes == 1) { diff --git a/Src/Base/AMReX_PlotFileUtil.cpp b/Src/Base/AMReX_PlotFileUtil.cpp index cee9ee9b735..df8ff405a10 100644 --- a/Src/Base/AMReX_PlotFileUtil.cpp +++ b/Src/Base/AMReX_PlotFileUtil.cpp @@ -202,7 +202,7 @@ WriteMultiLevelPlotfile (const std::string& plotfilename, int nlevels, HeaderFile.open(HeaderFileName.c_str(), std::ofstream::out | std::ofstream::trunc | std::ofstream::binary); - if( ! HeaderFile.good()) FileOpenFailed(HeaderFileName); + if( ! HeaderFile.good()) { FileOpenFailed(HeaderFileName); } WriteGenericPlotfileHeader(HeaderFile, nlevels, boxArrays, varnames, geom, time, level_steps, ref_ratio, versionName, levelPrefix, mfPrefix); diff --git a/Src/Base/AMReX_Random.cpp b/Src/Base/AMReX_Random.cpp index 135c5391ebf..c32c3164f0e 100644 --- a/Src/Base/AMReX_Random.cpp +++ b/Src/Base/AMReX_Random.cpp @@ -136,8 +136,9 @@ void RestoreRandomState (std::istream& is, int nthreads_old, int nstep_old) { int N = std::min(nthreads, nthreads_old); - for (int i = 0; i < N; i++) + for (int i = 0; i < N; i++) { is >> generators[i]; + } if (nthreads > nthreads_old) { const int NProcs = ParallelDescriptor::NProcs(); const int MyProc = ParallelDescriptor::MyProc(); diff --git a/Src/Base/AMReX_RealBox.H b/Src/Base/AMReX_RealBox.H index ddbc96b1b90..6f429c8262b 100644 --- a/Src/Base/AMReX_RealBox.H +++ b/Src/Base/AMReX_RealBox.H @@ -92,7 +92,7 @@ public: //! it's volume is considered to be zero. [[nodiscard]] AMREX_GPU_HOST_DEVICE Real volume () const noexcept { - if (ok()) return AMREX_D_TERM(length(0), *length(1), *length(2)); + if (ok()) { return AMREX_D_TERM(length(0), *length(1), *length(2)); } return 0.0; } diff --git a/Src/Base/AMReX_RealBox.cpp b/Src/Base/AMReX_RealBox.cpp index c411f558ec0..bf4713be289 100644 --- a/Src/Base/AMReX_RealBox.cpp +++ b/Src/Base/AMReX_RealBox.cpp @@ -32,8 +32,9 @@ std::ostream& operator << (std::ostream &os, const RealBox& b) { os << "(RealBox "; - for (int i = 0; i < AMREX_SPACEDIM; i++) + for (int i = 0; i < AMREX_SPACEDIM; i++) { os << b.lo(i) << ' ' << b.hi(i) << ' '; + } os << ')'; return os; } @@ -68,8 +69,9 @@ operator >> (std::istream &is, RealBox& b) hi[i] = static_cast(dhitemp); } #else - for (int i = 0; i < AMREX_SPACEDIM; i++) + for (int i = 0; i < AMREX_SPACEDIM; i++) { is >> lo[i] >> hi[i]; + } #endif is.ignore(BL_IGNORE_MAX, ')'); diff --git a/Src/Base/AMReX_RealVect.cpp b/Src/Base/AMReX_RealVect.cpp index 68adc5ba95d..94d8655f9ac 100644 --- a/Src/Base/AMReX_RealVect.cpp +++ b/Src/Base/AMReX_RealVect.cpp @@ -34,8 +34,9 @@ namespace amrex amrex::Error("operator>>(istream&,IntVect&): expected \'(\'"); } - if (is.fail()) + if (is.fail()) { amrex::Error("operator>>(istream&,IntVect&) failed"); + } return is; } diff --git a/Src/Base/AMReX_Reduce.H b/Src/Base/AMReX_Reduce.H index 34f1c6e74bb..201d1064840 100644 --- a/Src/Base/AMReX_Reduce.H +++ b/Src/Base/AMReX_Reduce.H @@ -618,7 +618,7 @@ public: typename M=std::enable_if_t::value> > void eval (N n, D & reduce_data, F&& f) { - if (n <= 0) return; + if (n <= 0) { return; } using ReduceTuple = typename D::Type; auto const& stream = Gpu::gpuStream(); auto dp = reduce_data.devicePtr(stream); @@ -887,7 +887,7 @@ bool AnyOf (N n, T const* v, P&& pred) amrex::launch(ec.numBlocks.x, 0, Gpu::gpuStream(), [=] AMREX_GPU_DEVICE () noexcept { __shared__ int has_any; - if (threadIdx.x == 0) has_any = *dp; + if (threadIdx.x == 0) { has_any = *dp; } __syncthreads(); if (!has_any) @@ -953,7 +953,7 @@ bool AnyOf (Box const& box, P&& pred) Gpu::gpuStream(), [=] AMREX_GPU_DEVICE () noexcept { __shared__ int has_any; - if (threadIdx.x == 0) has_any = *dp; + if (threadIdx.x == 0) { has_any = *dp; } __syncthreads(); if (!has_any) @@ -1259,7 +1259,7 @@ bool AnyOf (Box const& box, P&&pred) for (int k = lo.z; k <= hi.z; ++k) { for (int j = lo.y; j <= hi.y; ++j) { for (int i = lo.x; i <= hi.x; ++i) { - if (pred(i,j,k)) return true; + if (pred(i,j,k)) { return true; } }}} return false; } diff --git a/Src/Base/AMReX_Scan.H b/Src/Base/AMReX_Scan.H index df255b16e09..11fdfd8bd70 100644 --- a/Src/Base/AMReX_Scan.H +++ b/Src/Base/AMReX_Scan.H @@ -189,7 +189,7 @@ struct BlockStatus template T PrefixSum_mp (N n, FIN && fin, FOUT && fout, TYPE, RetSum a_ret_sum) { - if (n <= 0) return 0; + if (n <= 0) { return 0; } constexpr int nwarps_per_block = 8; constexpr int nthreads = nwarps_per_block*Gpu::Device::warp_size; constexpr int nchunks = 12; @@ -234,7 +234,7 @@ T PrefixSum_mp (N n, FIN && fin, FOUT && fout, TYPE, RetSum a_ret_sum) T sum_prev_chunk = 0; // inclusive sum from previous chunks. for (int ichunk = 0; ichunk < nchunks; ++ichunk) { N offset = ibegin + ichunk*blockDimx; - if (offset >= iend) break; + if (offset >= iend) { break; } offset += threadIdxx; T x0 = (offset < iend) ? fin(offset) : 0; @@ -242,7 +242,7 @@ T PrefixSum_mp (N n, FIN && fin, FOUT && fout, TYPE, RetSum a_ret_sum) // Scan within a warp for (int i = 1; i <= Gpu::Device::warp_size; i *= 2) { T s = sycl::shift_group_right(sg, x, i); - if (lane >= i) x += s; + if (lane >= i) { x += s; } } // x now holds the inclusive sum within the warp. The @@ -260,10 +260,10 @@ T PrefixSum_mp (N n, FIN && fin, FOUT && fout, TYPE, RetSum a_ret_sum) T y = (lane < nwarps) ? shared[lane] : 0; for (int i = 1; i <= Gpu::Device::warp_size; i *= 2) { T s = sycl::shift_group_right(sg, y, i); - if (lane >= i) y += s; + if (lane >= i) { y += s; } } - if (lane < nwarps) shared2[lane] = y; + if (lane < nwarps) { shared2[lane] = y; } } gh.item->barrier(sycl::access::fence_space::local_space); @@ -309,7 +309,7 @@ T PrefixSum_mp (N n, FIN && fin, FOUT && fout, TYPE, RetSum a_ret_sum) // Scan within a warp for (int i = 1; i <= Gpu::Device::warp_size; i *= 2) { T s = sycl::shift_group_right(sg, x, i); - if (lane >= i) x += s; + if (lane >= i) { x += s; } } // x now holds the inclusive sum within the warp. The @@ -327,10 +327,10 @@ T PrefixSum_mp (N n, FIN && fin, FOUT && fout, TYPE, RetSum a_ret_sum) T y = (lane < nwarps) ? shared[lane] : 0; for (int i = 1; i <= Gpu::Device::warp_size; i *= 2) { T s = sycl::shift_group_right(sg, y, i); - if (lane >= i) y += s; + if (lane >= i) { y += s; } } - if (lane < nwarps) shared2[lane] = y; + if (lane < nwarps) { shared2[lane] = y; } } gh.item->barrier(sycl::access::fence_space::local_space); @@ -390,7 +390,7 @@ template ,Type::Exclusive>::value)> > T PrefixSum (N n, FIN && fin, FOUT && fout, TYPE type, RetSum a_ret_sum = retSum) { - if (n <= 0) return 0; + if (n <= 0) { return 0; } constexpr int nwarps_per_block = 8; constexpr int nthreads = nwarps_per_block*Gpu::Device::warp_size; constexpr int nchunks = 12; @@ -476,7 +476,7 @@ T PrefixSum (N n, FIN && fin, FOUT && fout, TYPE type, RetSum a_ret_sum = retSum T tmp_out[nchunks]; // block-wide inclusive sum for chunks for (int ichunk = 0; ichunk < nchunks; ++ichunk) { N offset = ibegin + ichunk*blockDimx; - if (offset >= iend) break; + if (offset >= iend) { break; } offset += threadIdxx; T x0 = (offset < iend) ? fin(offset) : 0; @@ -487,7 +487,7 @@ T PrefixSum (N n, FIN && fin, FOUT && fout, TYPE type, RetSum a_ret_sum = retSum // Scan within a warp for (int i = 1; i <= Gpu::Device::warp_size; i *= 2) { T s = sycl::shift_group_right(sg, x, i); - if (lane >= i) x += s; + if (lane >= i) { x += s; } } // x now holds the inclusive sum within the warp. The @@ -505,10 +505,10 @@ T PrefixSum (N n, FIN && fin, FOUT && fout, TYPE type, RetSum a_ret_sum = retSum T y = (lane < nwarps) ? shared[lane] : 0; for (int i = 1; i <= Gpu::Device::warp_size; i *= 2) { T s = sycl::shift_group_right(sg, y, i); - if (lane >= i) y += s; + if (lane >= i) { y += s; } } - if (lane < nwarps) shared2[lane] = y; + if (lane < nwarps) { shared2[lane] = y; } } gh.item->barrier(sycl::access::fence_space::local_space); @@ -533,7 +533,7 @@ T PrefixSum (N n, FIN && fin, FOUT && fout, TYPE type, RetSum a_ret_sum = retSum if (virtual_block_id == 0) { for (int ichunk = 0; ichunk < nchunks; ++ichunk) { N offset = ibegin + ichunk*blockDimx + threadIdxx; - if (offset >= iend) break; + if (offset >= iend) { break; } fout(offset, tmp_out[ichunk]); if (offset == n-1) { *totalsum_p += tmp_out[ichunk]; @@ -564,11 +564,11 @@ T PrefixSum (N n, FIN && fin, FOUT && fout, TYPE type, RetSum a_ret_sum = retSum if (stop_lookback == false) { if (status_bf != 0) { T y = x; - if (lane > 0) x = 0; + if (lane > 0) { x = 0; } unsigned int bit_mask = 0x1u; for (int i = 1; i < Gpu::Device::warp_size; ++i) { bit_mask <<= 1; - if (i == lane) x = y; + if (i == lane) { x = y; } if (status_bf & bit_mask) { stop_lookback = true; break; @@ -582,7 +582,7 @@ T PrefixSum (N n, FIN && fin, FOUT && fout, TYPE type, RetSum a_ret_sum = retSum } if (lane == 0) { exclusive_prefix += x; } - if (stop_lookback) break; + if (stop_lookback) { break; } } if (lane == 0) { @@ -597,7 +597,7 @@ T PrefixSum (N n, FIN && fin, FOUT && fout, TYPE type, RetSum a_ret_sum = retSum for (int ichunk = 0; ichunk < nchunks; ++ichunk) { N offset = ibegin + ichunk*blockDimx + threadIdxx; - if (offset >= iend) break; + if (offset >= iend) { break; } T t = tmp_out[ichunk] + exclusive_prefix; fout(offset, t); if (offset == n-1) { @@ -629,7 +629,7 @@ template ,Type::Exclusive>::value)> > T PrefixSum (N n, FIN && fin, FOUT && fout, TYPE, RetSum a_ret_sum = retSum) { - if (n <= 0) return 0; + if (n <= 0) { return 0; } constexpr int nwarps_per_block = 4; constexpr int nthreads = nwarps_per_block*Gpu::Device::warp_size; // # of threads per block constexpr int nelms_per_thread = sizeof(T) >= 8 ? 8 : 16; @@ -772,7 +772,7 @@ template ,Type::Exclusive>::value)> > T PrefixSum (N n, FIN && fin, FOUT && fout, TYPE, RetSum a_ret_sum = retSum) { - if (n <= 0) return 0; + if (n <= 0) { return 0; } constexpr int nwarps_per_block = 8; constexpr int nthreads = nwarps_per_block*Gpu::Device::warp_size; // # of threads per block constexpr int nelms_per_thread = sizeof(T) >= 8 ? 4 : 8; @@ -905,7 +905,7 @@ template ,Type::Exclusive>::value)> > T PrefixSum (N n, FIN && fin, FOUT && fout, TYPE, RetSum a_ret_sum = retSum) { - if (n <= 0) return 0; + if (n <= 0) { return 0; } constexpr int nwarps_per_block = 4; constexpr int nthreads = nwarps_per_block*Gpu::Device::warp_size; constexpr int nchunks = 12; @@ -980,7 +980,7 @@ T PrefixSum (N n, FIN && fin, FOUT && fout, TYPE, RetSum a_ret_sum = retSum) T tmp_out[nchunks]; // block-wide inclusive sum for chunks for (int ichunk = 0; ichunk < nchunks; ++ichunk) { N offset = ibegin + ichunk*blockDim.x; - if (offset >= iend) break; + if (offset >= iend) { break; } offset += threadIdx.x; T x0 = (offset < iend) ? fin(offset) : 0; @@ -992,7 +992,7 @@ T PrefixSum (N n, FIN && fin, FOUT && fout, TYPE, RetSum a_ret_sum = retSum) for (int i = 1; i <= Gpu::Device::warp_size; i *= 2) { AMREX_HIP_OR_CUDA( T s = __shfl_up(x,i);, T s = __shfl_up_sync(0xffffffff, x, i); ) - if (lane >= i) x += s; + if (lane >= i) { x += s; } } // x now holds the inclusive sum within the warp. The @@ -1013,7 +1013,7 @@ T PrefixSum (N n, FIN && fin, FOUT && fout, TYPE, RetSum a_ret_sum = retSum) int mask = (1 << nwarps) - 1; for (int i = 1; i <= nwarps; i *= 2) { T s = __shfl_up_sync(mask, y, i, nwarps); - if (lane >= i) y += s; + if (lane >= i) { y += s; } } shared2[lane] = y; } @@ -1025,7 +1025,7 @@ T PrefixSum (N n, FIN && fin, FOUT && fout, TYPE, RetSum a_ret_sum = retSum) } for (int i = 1; i <= nwarps; i *= 2) { T s = __shfl_up(y, i, nwarps); - if (lane >= i) y += s; + if (lane >= i) { y += s; } } if (lane < nwarps) { shared2[lane] = y; @@ -1055,7 +1055,7 @@ T PrefixSum (N n, FIN && fin, FOUT && fout, TYPE, RetSum a_ret_sum = retSum) if (virtual_block_id == 0) { for (int ichunk = 0; ichunk < nchunks; ++ichunk) { N offset = ibegin + ichunk*blockDim.x + threadIdx.x; - if (offset >= iend) break; + if (offset >= iend) { break; } fout(offset, tmp_out[ichunk]); if (offset == n-1) { *totalsum_p += tmp_out[ichunk]; @@ -1082,12 +1082,12 @@ T PrefixSum (N n, FIN && fin, FOUT && fout, TYPE, RetSum a_ret_sum = retSum) if (stop_lookback == false) { if (status_bf != 0) { T y = x; - if (lane > 0) x = 0; + if (lane > 0) { x = 0; } AMREX_HIP_OR_CUDA(uint64_t bit_mask = 0x1ull;, unsigned bit_mask = 0x1u); for (int i = 1; i < Gpu::Device::warp_size; ++i) { bit_mask <<= 1; - if (i == lane) x = y; + if (i == lane) { x = y; } if (status_bf & bit_mask) { stop_lookback = true; break; @@ -1102,7 +1102,7 @@ T PrefixSum (N n, FIN && fin, FOUT && fout, TYPE, RetSum a_ret_sum = retSum) } if (lane == 0) { exclusive_prefix += x; } - if (stop_lookback) break; + if (stop_lookback) { break; } } if (lane == 0) { @@ -1117,7 +1117,7 @@ T PrefixSum (N n, FIN && fin, FOUT && fout, TYPE, RetSum a_ret_sum = retSum) for (int ichunk = 0; ichunk < nchunks; ++ichunk) { N offset = ibegin + ichunk*blockDim.x + threadIdx.x; - if (offset >= iend) break; + if (offset >= iend) { break; } T t = tmp_out[ichunk] + exclusive_prefix; fout(offset, t); if (offset == n-1) { @@ -1208,7 +1208,7 @@ T InclusiveSum (N n, T const* in, T * out, RetSum a_ret_sum = retSum) template ::value> > T ExclusiveSum (N n, T const* in, T * out, RetSum a_ret_sum = retSum) { - if (n <= 0) return 0; + if (n <= 0) { return 0; } #if defined(AMREX_USE_CUDA) && defined(__CUDACC__) && (__CUDACC_VER_MAJOR__ >= 11) T in_last = 0; if (a_ret_sum) { @@ -1286,7 +1286,7 @@ template ,Type::Exclusive>::value)> > T PrefixSum (N n, FIN && fin, FOUT && fout, TYPE, RetSum = retSum) { - if (n <= 0) return 0; + if (n <= 0) { return 0; } T totalsum = 0; for (N i = 0; i < n; ++i) { T x = fin(i); @@ -1317,7 +1317,7 @@ T InclusiveSum (N n, T const* in, T * out, RetSum /*a_ret_sum*/ = retSum) template ::value> > T ExclusiveSum (N n, T const* in, T * out, RetSum /*a_ret_sum*/ = retSum) { - if (n <= 0) return 0; + if (n <= 0) { return 0; } auto in_last = in[n-1]; #if (__cplusplus >= 201703L) && (!defined(_GLIBCXX_RELEASE) || _GLIBCXX_RELEASE >= 10) @@ -1366,7 +1366,7 @@ namespace Gpu // GCC's __cplusplus is not a reliable indication for C++17 support return std::exclusive_scan(begin, end, result, 0); #else - if (begin == end) return result; + if (begin == end) { return result; } typename std::iterator_traits::value_type sum = *begin; *result++ = sum - *begin; diff --git a/Src/Base/AMReX_TagParallelFor.H b/Src/Base/AMReX_TagParallelFor.H index ae832f3b2af..5aa748a3d61 100644 --- a/Src/Base/AMReX_TagParallelFor.H +++ b/Src/Base/AMReX_TagParallelFor.H @@ -149,7 +149,7 @@ void ParallelFor_doit (Vector const& tags, F && f) { const int ntags = tags.size(); - if (ntags == 0) return; + if (ntags == 0) { return; } int ntotwarps = 0; Vector nwarps; @@ -197,7 +197,7 @@ ParallelFor_doit (Vector const& tags, F && f) int g_tid = blockDim.x*blockIdx.x + threadIdx.x; #endif int g_wid = g_tid / Gpu::Device::warp_size; - if (g_wid >= ntotwarps) return; + if (g_wid >= ntotwarps) { return; } int tag_id = amrex::bisect(d_nwarps, 0, ntags, g_wid); diff --git a/Src/Base/AMReX_TinyProfiler.cpp b/Src/Base/AMReX_TinyProfiler.cpp index 8c59a1eae7b..3d935589409 100644 --- a/Src/Base/AMReX_TinyProfiler.cpp +++ b/Src/Base/AMReX_TinyProfiler.cpp @@ -56,7 +56,7 @@ TinyProfiler::TinyProfiler (std::string funcname) noexcept TinyProfiler::TinyProfiler (std::string funcname, bool start_, bool useCUPTI) noexcept : fname(std::move(funcname)), uCUPTI(useCUPTI) { - if (start_) start(); + if (start_) { start(); } } TinyProfiler::TinyProfiler (const char* funcname) noexcept @@ -68,7 +68,7 @@ TinyProfiler::TinyProfiler (const char* funcname) noexcept TinyProfiler::TinyProfiler (const char* funcname, bool start_, bool useCUPTI) noexcept : fname(funcname), uCUPTI(useCUPTI) { - if (start_) start(); + if (start_) { start(); } } TinyProfiler::~TinyProfiler () @@ -543,7 +543,7 @@ TinyProfiler::PrintStats (std::map& regstats, double dt_max) } } - if (regstats.empty()) return; + if (regstats.empty()) { return; } int nprocs = ParallelDescriptor::NProcs(); int ioproc = ParallelDescriptor::IOProcessorNumber(); @@ -568,8 +568,8 @@ TinyProfiler::PrintStats (std::map& regstats, double dt_max) dtdt[1] = dts[1]; } else { - ParallelDescriptor::Gather(&n, 1, &ncalls[0], 1, ioproc); - ParallelDescriptor::Gather(dts, 2, &dtdt[0], 2, ioproc); + ParallelDescriptor::Gather(&n, 1, ncalls.data(), 1, ioproc); + ParallelDescriptor::Gather(dts, 2, dtdt.data(), 2, ioproc); } if (ParallelDescriptor::IOProcessor()) { @@ -742,7 +742,7 @@ TinyProfiler::PrintMemStats(std::map& memstats, } } - if (memstats.empty()) return; + if (memstats.empty()) { return; } const int nprocs = ParallelDescriptor::NProcs(); const int ioproc = ParallelDescriptor::IOProcessorNumber(); @@ -772,10 +772,10 @@ TinyProfiler::PrintMemStats(std::map& memstats, maxmem_vec[0] = maxmem; } else { - ParallelDescriptor::Gather(&nalloc, 1, &nalloc_vec[0], 1, ioproc); - ParallelDescriptor::Gather(&nfree, 1, &nfree_vec[0], 1, ioproc); - ParallelDescriptor::Gather(&maxmem, 1, &maxmem_vec[0], 1, ioproc); - ParallelDescriptor::Gather(&avgmem, 1, &avgmem_vec[0], 1, ioproc); + ParallelDescriptor::Gather(&nalloc, 1, nalloc_vec.data(), 1, ioproc); + ParallelDescriptor::Gather(&nfree , 1, nfree_vec.data(), 1, ioproc); + ParallelDescriptor::Gather(&maxmem, 1, maxmem_vec.data(), 1, ioproc); + ParallelDescriptor::Gather(&avgmem, 1, avgmem_vec.data(), 1, ioproc); } if (ParallelDescriptor::IOProcessor()) { @@ -864,7 +864,7 @@ TinyProfiler::PrintMemStats(std::map& memstats, maxlen[i] += 2; } - if (allstatsstr.size() == 1) return; + if (allstatsstr.size() == 1) { return; } int lenhline = 0; for (auto i : maxlen) { diff --git a/Src/Base/AMReX_TypeList.H b/Src/Base/AMReX_TypeList.H index 24262fbb7a3..3bd0a837069 100644 --- a/Src/Base/AMReX_TypeList.H +++ b/Src/Base/AMReX_TypeList.H @@ -106,7 +106,7 @@ ForEach (TypeList, F&& f) return false; } }); - if (!r) amrex::Abort("Unsupported types"); + if (!r) { amrex::Abort("Unsupported types"); } } \endverbatim */ diff --git a/Src/Base/AMReX_Utility.cpp b/Src/Base/AMReX_Utility.cpp index 423f3046ed9..b9cc90df5ae 100644 --- a/Src/Base/AMReX_Utility.cpp +++ b/Src/Base/AMReX_Utility.cpp @@ -89,8 +89,9 @@ amrex::Tokenize (const std::string& instr, if (!((token = std::strtok(line, separators.c_str())) == nullptr)) // NOLINT(bugprone-assignment-in-if-condition) { ptr.push_back(token); - while (!((token = std::strtok(nullptr, separators.c_str())) == nullptr)) // NOLINT(bugprone-assignment-in-if-condition) + while (!((token = std::strtok(nullptr, separators.c_str())) == nullptr)) { // NOLINT(bugprone-assignment-in-if-condition) ptr.push_back(token); + } } tokens.resize(ptr.size()); @@ -132,7 +133,7 @@ std::string amrex::trim(std::string s, std::string const& space) { const auto sbegin = s.find_first_not_of(space); - if (sbegin == std::string::npos) return std::string{}; + if (sbegin == std::string::npos) { return std::string{}; } const auto send = s.find_last_not_of(space); s = s.substr(sbegin, send-sbegin+1); return s; @@ -302,8 +303,9 @@ int amrex::CRRBetweenLevels(int fromlevel, int tolevel, double amrex::InvNormDist (double p) { - if (p <= 0 || p >= 1) + if (p <= 0 || p >= 1) { amrex::Error("amrex::InvNormDist(): p MUST be in (0,1)"); + } // // Coefficients in rational approximations. // @@ -474,8 +476,9 @@ amrex::InvNormDistBest (double p) double r, value; - if (p <= 0 || p >= 1) + if (p <= 0 || p >= 1) { amrex::Error("InvNormDistBest(): p MUST be in (0,1)"); + } double q = p - 0.5; @@ -528,7 +531,7 @@ amrex::InvNormDistBest (double p) value = num / den; } - if ( q < 0.0 ) value = -value; + if ( q < 0.0 ) { value = -value; } } return value; @@ -547,7 +550,7 @@ amrex::operator>>(std::istream& is, const expect& exp) { char c; is >> c; - if ( !is ) break; + if ( !is ) { break; } if ( c != exp.istr[n++] ) { is.putback(c); diff --git a/Src/Base/AMReX_Vector.H b/Src/Base/AMReX_Vector.H index 248aced1de4..c377076fe1b 100644 --- a/Src/Base/AMReX_Vector.H +++ b/Src/Base/AMReX_Vector.H @@ -63,7 +63,7 @@ namespace amrex { Vector r; r.reserve(a.size()); - for (auto& x : a) r.push_back(&x); + for (auto& x : a) { r.push_back(&x); } return r; } @@ -72,7 +72,7 @@ namespace amrex { Vector r; r.reserve(a.size()); - for (const auto& x : a) r.push_back(x.get()); + for (const auto& x : a) { r.push_back(x.get()); } return r; } @@ -83,7 +83,7 @@ namespace amrex { Vector r; r.reserve(a.size()); - for (const auto& x : a) r.push_back(&x); + for (const auto& x : a) { r.push_back(&x); } return r; } @@ -92,7 +92,7 @@ namespace amrex { Vector r; r.reserve(a.size()); - for (const auto& x : a) r.push_back(x.get()); + for (const auto& x : a) { r.push_back(x.get()); } return r; } @@ -109,7 +109,7 @@ namespace amrex { Vector > r; r.reserve(a.size()); - for (const auto& x : a) r.push_back(GetVecOfPtrs(x)); + for (const auto& x : a) { r.push_back(GetVecOfPtrs(x)); } return r; } @@ -122,7 +122,7 @@ namespace amrex { Vector > r; r.reserve(a.size()); - for (const auto& x : a) r.push_back(GetArrOfPtrs(x)); + for (const auto& x : a) { r.push_back(GetArrOfPtrs(x)); } return r; } @@ -132,7 +132,7 @@ namespace amrex { Vector > r; r.reserve(a.size()); - for (const auto& x : a) r.push_back(GetArrOfConstPtrs(x)); + for (const auto& x : a) { r.push_back(GetArrOfConstPtrs(x)); } return r; } @@ -142,7 +142,7 @@ namespace amrex { Vector > r; r.reserve(a.size()); - for (const auto& x : a) r.push_back(GetArrOfConstPtrs(x)); + for (const auto& x : a) { r.push_back(GetArrOfConstPtrs(x)); } return r; } @@ -154,7 +154,7 @@ namespace amrex { Vector > r; r.reserve(a.size()); - for (const auto& x : a) r.push_back(GetArrOfConstPtrs(x)); + for (const auto& x : a) { r.push_back(GetArrOfConstPtrs(x)); } return r; } @@ -166,7 +166,7 @@ namespace amrex { Vector > r; r.reserve(a.size()); - for (auto &x: a) r.push_back(GetArrOfPtrs(x)); + for (auto &x: a) { r.push_back(GetArrOfPtrs(x)); } return r; } #endif @@ -199,7 +199,7 @@ namespace amrex std::size_t removeDupDoit (Vector& vec, std::size_t start, std::size_t stop) { std::size_t N = stop-start; - if (N < 2) return stop; + if (N < 2) { return stop; } T* const data = vec.data() + start; T const sentinel = data[0]; // duplicates will be set to sentinel and removed later diff --git a/Src/Base/AMReX_VisMF.cpp b/Src/Base/AMReX_VisMF.cpp index a109a09b04e..52efe827460 100644 --- a/Src/Base/AMReX_VisMF.cpp +++ b/Src/Base/AMReX_VisMF.cpp @@ -1535,7 +1535,7 @@ VisMF::Read (FabArray &mf, // This allows us to read in an empty MultiFab without an error -- but only if explicitly told to if (allow_empty_mf > 0) { - if (hdr.m_ba.empty()) return; + if (hdr.m_ba.empty()) { return; } } else { if (hdr.m_ba.empty()) { @@ -2273,7 +2273,7 @@ VisMF::AsyncWriteDoit (const FabArray& mf, const std::string& mf_name RealDescriptor const& whichRD = FPC::NativeRealDescriptor(); auto hdr = std::make_shared(mf, VisMF::NFiles, VisMF::Header::Version_v1, false); - if (valid_cells_only) hdr->m_ngrow = IntVect(0); + if (valid_cells_only) { hdr->m_ngrow = IntVect(0); } constexpr int sizeof_int64_over_real = sizeof(int64_t) / sizeof(Real); const int n_local_fabs = mf.local_size(); @@ -2475,7 +2475,7 @@ VisMF::AsyncWriteDoit (const FabArray& mf, const std::string& mf_name ofs.rdbuf()->pubsetbuf(io_buffer.dataPtr(), io_buffer.size()); ofs.open(file_name.c_str(), (info.ispot == 0) ? (std::ios::binary | std::ios::trunc) : (std::ios::binary | std::ios::app)); - if (!ofs.good()) amrex::FileOpenFailed(file_name); + if (!ofs.good()) { amrex::FileOpenFailed(file_name); } for (auto const& fab : *myfabs) { fabio->write_header(ofs, fab, fab.nComp()); fabio->write(ofs, fab, 0, fab.nComp()); diff --git a/Src/Base/AMReX_iMultiFab.cpp b/Src/Base/AMReX_iMultiFab.cpp index 63465f0a036..3941c4267b2 100644 --- a/Src/Base/AMReX_iMultiFab.cpp +++ b/Src/Base/AMReX_iMultiFab.cpp @@ -148,7 +148,7 @@ iMultiFab::negate (const Box& region, int nghost) void iMultiFab::Initialize () { - if (initialized) return; + if (initialized) { return; } amrex::ExecOnFinalize(iMultiFab::Finalize); diff --git a/Src/Base/AMReX_parstream.cpp b/Src/Base/AMReX_parstream.cpp index cd223ac8180..824b9041ecc 100644 --- a/Src/Base/AMReX_parstream.cpp +++ b/Src/Base/AMReX_parstream.cpp @@ -59,7 +59,7 @@ namespace amrex int outInterv = 1; ParmParse pp("amrex"); pp.queryAdd("pout_int", outInterv); - if (outInterv == 0) outInterv=ParallelDescriptor::NProcs(); + if (outInterv == 0) { outInterv=ParallelDescriptor::NProcs(); } int thisProc = ParallelDescriptor::MyProc(); if ((thisProc % outInterv) != 0) diff --git a/Src/Base/Parser/AMReX_Parser_Y.H b/Src/Base/Parser/AMReX_Parser_Y.H index a6b39d54fb9..53b3dcc55a4 100644 --- a/Src/Base/Parser/AMReX_Parser_Y.H +++ b/Src/Base/Parser/AMReX_Parser_Y.H @@ -154,7 +154,7 @@ static constexpr std::string_view parser_node_s[] = struct parser_node { enum parser_node_t type; enum parser_node_t padding; - struct parser_node* l; + struct parser_node* l; // NOLINT(misc-confusable-identifiers) struct parser_node* r; struct parser_node* padding2; }; @@ -173,7 +173,7 @@ struct alignas(parser_node) parser_symbol { struct alignas(parser_node) parser_f1 { /* Builtin functions with one argument */ enum parser_node_t type; enum parser_f1_t ftype; - struct parser_node* l; + struct parser_node* l; // NOLINT(misc-confusable-identifiers) struct parser_node* padding1; struct parser_node* padding2; }; @@ -181,7 +181,7 @@ struct alignas(parser_node) parser_f1 { /* Builtin functions with one argument struct alignas(parser_node) parser_f2 { /* Builtin functions with two arguments */ enum parser_node_t type; enum parser_f2_t ftype; - struct parser_node* l; + struct parser_node* l; // NOLINT(misc-confusable-identifiers) struct parser_node* r; struct parser_node* padding; }; diff --git a/Src/Base/Parser/AMReX_Parser_Y.cpp b/Src/Base/Parser/AMReX_Parser_Y.cpp index 74dde9679ec..75a58498c15 100644 --- a/Src/Base/Parser/AMReX_Parser_Y.cpp +++ b/Src/Base/Parser/AMReX_Parser_Y.cpp @@ -631,7 +631,7 @@ namespace { bool parser_node_equal (struct parser_node* a, struct parser_node* b) { - if (a->type != b->type) return false; + if (a->type != b->type) { return false; } switch (a->type) { case PARSER_NUMBER: diff --git a/Src/Boundary/AMReX_InterpBndryData.H b/Src/Boundary/AMReX_InterpBndryData.H index a50cd62eb78..c2b57ba6114 100644 --- a/Src/Boundary/AMReX_InterpBndryData.H +++ b/Src/Boundary/AMReX_InterpBndryData.H @@ -169,7 +169,7 @@ InterpBndryDataT::setBndryValues (BndryRegisterT const& crse, int c_star if (max_order==3 || max_order==1) { MFItInfo info; - if (Gpu::notInLaunchRegion()) info.SetDynamic(true); + if (Gpu::notInLaunchRegion()) { info.SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif diff --git a/Src/Boundary/AMReX_Mask.cpp b/Src/Boundary/AMReX_Mask.cpp index d8b4853de83..3a69ad0343b 100644 --- a/Src/Boundary/AMReX_Mask.cpp +++ b/Src/Boundary/AMReX_Mask.cpp @@ -36,8 +36,9 @@ operator<< (std::ostream& os, for (IntVect p = sm; p <= bg; m.box().next(p)) { os << p; - for (int k = 0; k < ncomp; k++) + for (int k = 0; k < ncomp; k++) { os << " " << m(p,k); + } os << "\n"; } os << ")\n"; @@ -65,7 +66,8 @@ operator>> (std::istream& is, { is >> q; BL_ASSERT( p == q); - for( int k=0; k> m(p,k); + for( int k=0; k> m(p,k); + } is.ignore(BL_IGNORE_MAX, '\n'); } is.ignore(BL_IGNORE_MAX,'\n'); diff --git a/Src/Boundary/AMReX_YAFluxRegister.H b/Src/Boundary/AMReX_YAFluxRegister.H index 619b7100983..6f17c4de556 100644 --- a/Src/Boundary/AMReX_YAFluxRegister.H +++ b/Src/Boundary/AMReX_YAFluxRegister.H @@ -426,7 +426,7 @@ YAFluxRegisterT::FineAdd (const MFIter& mfi, // const int li = mfi.LocalIndex(); Vector& cfp_fabs = m_cfp_fab[li]; - if (cfp_fabs.empty()) return; + if (cfp_fabs.empty()) { return; } const Box& tbx = mfi.tilebox(); const Box& bx = amrex::coarsen(tbx, m_ratio); diff --git a/Src/EB/AMReX_EB2.cpp b/Src/EB/AMReX_EB2.cpp index 8786da3c124..ba4c1c3ec07 100644 --- a/Src/EB/AMReX_EB2.cpp +++ b/Src/EB/AMReX_EB2.cpp @@ -267,10 +267,10 @@ int comp_max_crse_level (Box cdomain, const Box& domain) { int ilev; for (ilev = 0; ilev < 30; ++ilev) { - if (cdomain.contains(domain)) break; + if (cdomain.contains(domain)) { break; } cdomain.refine(2); } - if (cdomain != domain) ilev = -1; + if (cdomain != domain) { ilev = -1; } return ilev; } } diff --git a/Src/EB/AMReX_EB2_2D_C.H b/Src/EB/AMReX_EB2_2D_C.H index 873467a9143..529d513163b 100644 --- a/Src/EB/AMReX_EB2_2D_C.H +++ b/Src/EB/AMReX_EB2_2D_C.H @@ -271,10 +271,10 @@ void build_cellflag_from_ap (int i, int j, Array4 const& cflag, // By default, all neighbors are already set. auto flg = cflag(i,j,k); - if (apx(i ,j ,k) == 0.0_rt) flg.setDisconnected(-1, 0, 0); - if (apx(i+1,j ,k) == 0.0_rt) flg.setDisconnected( 1, 0, 0); - if (apy(i ,j ,k) == 0.0_rt) flg.setDisconnected( 0,-1, 0); - if (apy(i ,j+1,k) == 0.0_rt) flg.setDisconnected( 0, 1, 0); + if (apx(i ,j ,k) == 0.0_rt) { flg.setDisconnected(-1, 0, 0); } + if (apx(i+1,j ,k) == 0.0_rt) { flg.setDisconnected( 1, 0, 0); } + if (apy(i ,j ,k) == 0.0_rt) { flg.setDisconnected( 0,-1, 0); } + if (apy(i ,j+1,k) == 0.0_rt) { flg.setDisconnected( 0, 1, 0); } if ((apx(i,j ,k) == 0.0_rt || apy(i-1,j,k) == 0.0_rt) && (apx(i,j-1,k) == 0.0_rt || apy(i ,j,k) == 0.0_rt)) diff --git a/Src/EB/AMReX_EB2_2D_C.cpp b/Src/EB/AMReX_EB2_2D_C.cpp index 5144ac09a42..b77b2ebd00e 100644 --- a/Src/EB/AMReX_EB2_2D_C.cpp +++ b/Src/EB/AMReX_EB2_2D_C.cpp @@ -278,10 +278,10 @@ int build_faces (Box const& bx, Array4 const& cell, else { int ncuts = 0; - if (fx(i ,j ,0) == Type::irregular) ++ncuts; - if (fx(i+1,j ,0) == Type::irregular) ++ncuts; - if (fy(i ,j ,0) == Type::irregular) ++ncuts; - if (fy(i ,j+1,0) == Type::irregular) ++ncuts; + if (fx(i ,j ,0) == Type::irregular) { ++ncuts; } + if (fx(i+1,j ,0) == Type::irregular) { ++ncuts; } + if (fy(i ,j ,0) == Type::irregular) { ++ncuts; } + if (fy(i ,j+1,0) == Type::irregular) { ++ncuts; } if (ncuts > 2) { Gpu::Atomic::Add(dp,1); } @@ -437,10 +437,10 @@ void set_connection_flags (Box const& bxg1, auto flg = cell(i,j,0); - if (fx(i ,j ,0) == Type::covered) flg.setDisconnected(IntVect(-1, 0)); - if (fx(i+1,j ,0) == Type::covered) flg.setDisconnected(IntVect( 1, 0)); - if (fy(i ,j ,0) == Type::covered) flg.setDisconnected(IntVect( 0,-1)); - if (fy(i ,j+1,0) == Type::covered) flg.setDisconnected(IntVect( 0, 1)); + if (fx(i ,j ,0) == Type::covered) { flg.setDisconnected(IntVect(-1, 0)); } + if (fx(i+1,j ,0) == Type::covered) { flg.setDisconnected(IntVect( 1, 0)); } + if (fy(i ,j ,0) == Type::covered) { flg.setDisconnected(IntVect( 0,-1)); } + if (fy(i ,j+1,0) == Type::covered) { flg.setDisconnected(IntVect( 0, 1)); } if (((fx(i,j,0) == Type::covered) || fy(i-1,j,0) == Type::covered) && ((fx(i,j-1,0) == Type::covered) || fy(i,j,0) == Type::covered)) diff --git a/Src/EB/AMReX_EB2_3D_C.H b/Src/EB/AMReX_EB2_3D_C.H index 394b114cae4..10dbdb342d2 100644 --- a/Src/EB/AMReX_EB2_3D_C.H +++ b/Src/EB/AMReX_EB2_3D_C.H @@ -636,107 +636,107 @@ void build_cellflag_from_ap (int i, int j, int k, Array4 const& cfla { flg.setConnected(0,0,0); - if (apx(i ,j,k) != 0.0_rt) flg.setConnected(-1, 0, 0); - if (apx(i+1,j,k) != 0.0_rt) flg.setConnected( 1, 0, 0); - if (apy(i,j ,k) != 0.0_rt) flg.setConnected( 0, -1, 0); - if (apy(i,j+1,k) != 0.0_rt) flg.setConnected( 0, 1, 0); - if (apz(i,j,k ) != 0.0_rt) flg.setConnected( 0, 0, -1); - if (apz(i,j,k+1) != 0.0_rt) flg.setConnected( 0, 0, 1); + if (apx(i ,j,k) != 0.0_rt) { flg.setConnected(-1, 0, 0); } + if (apx(i+1,j,k) != 0.0_rt) { flg.setConnected( 1, 0, 0); } + if (apy(i,j ,k) != 0.0_rt) { flg.setConnected( 0, -1, 0); } + if (apy(i,j+1,k) != 0.0_rt) { flg.setConnected( 0, 1, 0); } + if (apz(i,j,k ) != 0.0_rt) { flg.setConnected( 0, 0, -1); } + if (apz(i,j,k+1) != 0.0_rt) { flg.setConnected( 0, 0, 1); } if ( (apx(i,j,k) != 0.0_rt && apy(i-1,j,k) != 0.0_rt) || (apy(i,j,k) != 0.0_rt && apx(i,j-1,k) != 0.0_rt) ) { flg.setConnected(-1, -1, 0); - if (apz(i-1,j-1,k ) != 0.0_rt) flg.setConnected(-1,-1,-1); - if (apz(i-1,j-1,k+1) != 0.0_rt) flg.setConnected(-1,-1, 1); + if (apz(i-1,j-1,k ) != 0.0_rt) { flg.setConnected(-1,-1,-1); } + if (apz(i-1,j-1,k+1) != 0.0_rt) { flg.setConnected(-1,-1, 1); } } if ( (apx(i+1,j,k) != 0.0_rt && apy(i+1,j ,k) != 0.0_rt) || (apy(i ,j,k) != 0.0_rt && apx(i+1,j-1,k) != 0.0_rt) ) { flg.setConnected(1, -1, 0); - if (apz(i+1,j-1,k ) != 0.0_rt) flg.setConnected(1,-1,-1); - if (apz(i+1,j-1,k+1) != 0.0_rt) flg.setConnected(1,-1, 1); + if (apz(i+1,j-1,k ) != 0.0_rt) { flg.setConnected(1,-1,-1); } + if (apz(i+1,j-1,k+1) != 0.0_rt) { flg.setConnected(1,-1, 1); } } if ( (apx(i,j ,k) != 0.0_rt && apy(i-1,j+1,k) != 0.0_rt) || (apy(i,j+1,k) != 0.0_rt && apx(i ,j+1,k) != 0.0_rt) ) { flg.setConnected(-1, 1, 0); - if (apz(i-1,j+1,k ) != 0.0_rt) flg.setConnected(-1, 1,-1); - if (apz(i-1,j+1,k+1) != 0.0_rt) flg.setConnected(-1, 1, 1); + if (apz(i-1,j+1,k ) != 0.0_rt) { flg.setConnected(-1, 1,-1); } + if (apz(i-1,j+1,k+1) != 0.0_rt) { flg.setConnected(-1, 1, 1); } } if ( (apx(i+1,j ,k) != 0.0_rt && apy(i+1,j+1,k) != 0.0_rt) || (apy(i ,j+1,k) != 0.0_rt && apx(i+1,j+1,k) != 0.0_rt) ) { flg.setConnected(1, 1, 0); - if (apz(i+1,j+1,k ) != 0.0_rt) flg.setConnected(1, 1,-1); - if (apz(i+1,j+1,k+1) != 0.0_rt) flg.setConnected(1, 1, 1); + if (apz(i+1,j+1,k ) != 0.0_rt) { flg.setConnected(1, 1,-1); } + if (apz(i+1,j+1,k+1) != 0.0_rt) { flg.setConnected(1, 1, 1); } } if ( (apx(i,j,k) != 0.0_rt && apz(i-1,j,k ) != 0.0_rt) || (apz(i,j,k) != 0.0_rt && apx(i ,j,k-1) != 0.0_rt) ) { flg.setConnected(-1, 0, -1); - if (apy(i-1,j ,k-1) != 0.0_rt) flg.setConnected(-1,-1,-1); - if (apy(i-1,j+1,k-1) != 0.0_rt) flg.setConnected(-1, 1,-1); + if (apy(i-1,j ,k-1) != 0.0_rt) { flg.setConnected(-1,-1,-1); } + if (apy(i-1,j+1,k-1) != 0.0_rt) { flg.setConnected(-1, 1,-1); } } if ( (apx(i+1,j,k) != 0.0_rt && apz(i+1,j,k ) != 0.0_rt) || (apz(i ,j,k) != 0.0_rt && apx(i+1,j,k-1) != 0.0_rt) ) { flg.setConnected(1, 0, -1); - if (apy(i+1,j ,k-1) != 0.0_rt) flg.setConnected(1,-1,-1); - if (apy(i+1,j+1,k-1) != 0.0_rt) flg.setConnected(1, 1,-1); + if (apy(i+1,j ,k-1) != 0.0_rt) { flg.setConnected(1,-1,-1); } + if (apy(i+1,j+1,k-1) != 0.0_rt) { flg.setConnected(1, 1,-1); } } if ( (apx(i,j,k ) != 0.0_rt && apz(i-1,j,k+1) != 0.0_rt) || (apz(i,j,k+1) != 0.0_rt && apx(i ,j,k+1) != 0.0_rt) ) { flg.setConnected(-1, 0, 1); - if (apy(i-1,j ,k+1) != 0.0_rt) flg.setConnected(-1,-1, 1); - if (apy(i-1,j+1,k+1) != 0.0_rt) flg.setConnected(-1, 1, 1); + if (apy(i-1,j ,k+1) != 0.0_rt) { flg.setConnected(-1,-1, 1); } + if (apy(i-1,j+1,k+1) != 0.0_rt) { flg.setConnected(-1, 1, 1); } } if ( (apx(i+1,j,k ) != 0.0_rt && apz(i+1,j,k+1) != 0.0_rt) || (apz(i ,j,k+1) != 0.0_rt && apx(i+1,j,k+1) != 0.0_rt) ) { flg.setConnected(1, 0, 1); - if (apy(i+1,j ,k+1) != 0.0_rt) flg.setConnected(1,-1, 1); - if (apy(i+1,j+1,k+1) != 0.0_rt) flg.setConnected(1, 1, 1); + if (apy(i+1,j ,k+1) != 0.0_rt) { flg.setConnected(1,-1, 1); } + if (apy(i+1,j+1,k+1) != 0.0_rt) { flg.setConnected(1, 1, 1); } } if ( (apy(i,j,k) != 0.0_rt && apz(i,j-1,k ) != 0.0_rt) || (apz(i,j,k) != 0.0_rt && apy(i,j ,k-1) != 0.0_rt) ) { flg.setConnected(0, -1, -1); - if (apx(i ,j-1,k-1) != 0.0_rt) flg.setConnected(-1,-1,-1); - if (apx(i+1,j-1,k-1) != 0.0_rt) flg.setConnected( 1,-1,-1); + if (apx(i ,j-1,k-1) != 0.0_rt) { flg.setConnected(-1,-1,-1); } + if (apx(i+1,j-1,k-1) != 0.0_rt) { flg.setConnected( 1,-1,-1); } } if ( (apy(i,j+1,k) != 0.0_rt && apz(i,j+1,k ) != 0.0_rt) || (apz(i,j ,k) != 0.0_rt && apy(i,j+1,k-1) != 0.0_rt) ) { flg.setConnected(0, 1, -1); - if (apx(i ,j+1,k-1) != 0.0_rt) flg.setConnected(-1, 1,-1); - if (apx(i+1,j+1,k-1) != 0.0_rt) flg.setConnected( 1, 1,-1); + if (apx(i ,j+1,k-1) != 0.0_rt) { flg.setConnected(-1, 1,-1); } + if (apx(i+1,j+1,k-1) != 0.0_rt) { flg.setConnected( 1, 1,-1); } } if ( (apy(i,j,k ) != 0.0_rt && apz(i,j-1,k+1) != 0.0_rt) || (apz(i,j,k+1) != 0.0_rt && apy(i,j ,k+1) != 0.0_rt) ) { flg.setConnected(0, -1, 1); - if (apx(i ,j-1,k+1) != 0.0_rt) flg.setConnected(-1,-1, 1); - if (apx(i+1,j-1,k+1) != 0.0_rt) flg.setConnected( 1,-1, 1); + if (apx(i ,j-1,k+1) != 0.0_rt) { flg.setConnected(-1,-1, 1); } + if (apx(i+1,j-1,k+1) != 0.0_rt) { flg.setConnected( 1,-1, 1); } } if ( (apy(i,j+1,k ) != 0.0_rt && apz(i,j+1,k+1) != 0.0_rt) || (apz(i,j ,k+1) != 0.0_rt && apy(i,j+1,k+1) != 0.0_rt) ) { flg.setConnected(0, 1, 1); - if (apx(i ,j+1,k+1) != 0.0_rt) flg.setConnected(-1, 1, 1); - if (apx(i+1,j+1,k+1) != 0.0_rt) flg.setConnected( 1, 1, 1); + if (apx(i ,j+1,k+1) != 0.0_rt) { flg.setConnected(-1, 1, 1); } + if (apx(i+1,j+1,k+1) != 0.0_rt) { flg.setConnected( 1, 1, 1); } } } diff --git a/Src/EB/AMReX_EB2_GeometryShop.H b/Src/EB/AMReX_EB2_GeometryShop.H index 6b5e255286e..974738e94f1 100644 --- a/Src/EB/AMReX_EB2_GeometryShop.H +++ b/Src/EB/AMReX_EB2_GeometryShop.H @@ -123,7 +123,7 @@ BrentRootFinder (GpuArray const& lo, } // Check whether in bounds - if (p > 0) q = -q; + if (p > 0) { q = -q; } p = std::abs(p); @@ -158,8 +158,8 @@ BrentRootFinder (GpuArray const& lo, } else { - if (xm < 0) bPt[rangedir] = bPt[rangedir] - tol1; - else bPt[rangedir] = bPt[rangedir] + tol1; + if (xm < 0) { bPt[rangedir] = bPt[rangedir] - tol1; } + else { bPt[rangedir] = bPt[rangedir] + tol1; } } fb = IF_f(f, bPt); @@ -220,7 +220,7 @@ public: } else { ++nfluid; } - if (nbody > 0 && nfluid > 0) return mixedcells; + if (nbody > 0 && nfluid > 0) { return mixedcells; } } } } diff --git a/Src/EB/AMReX_EB2_Level.cpp b/Src/EB/AMReX_EB2_Level.cpp index 9289f5d77f9..795338db135 100644 --- a/Src/EB/AMReX_EB2_Level.cpp +++ b/Src/EB/AMReX_EB2_Level.cpp @@ -162,7 +162,7 @@ Level::coarsenFromFine (Level& fineLevel, bool fill_boundary) ParallelDescriptor::ReduceBoolOr(b); mvmc_error = b; } - if (mvmc_error) return mvmc_error; + if (mvmc_error) { return mvmc_error; } const int ng = 2; m_cellflag.define(m_grids, m_dmap, 1, ng); @@ -241,9 +241,9 @@ Level::coarsenFromFine (Level& fineLevel, bool fill_boundary) vfrac(i,j,k) = 0.0; cflag(i,j,k) = EBCellFlag::TheCoveredCell(); } - AMREX_D_TERM(if (xbx.contains(cell)) apx(i,j,k) = 0.0;, - if (ybx.contains(cell)) apy(i,j,k) = 0.0;, - if (zbx.contains(cell)) apz(i,j,k) = 0.0;); + AMREX_D_TERM(if (xbx.contains(cell)) { apx(i,j,k) = 0.0; }, + if (ybx.contains(cell)) { apy(i,j,k) = 0.0; }, + if (zbx.contains(cell)) { apz(i,j,k) = 0.0; }) }); } } @@ -483,7 +483,7 @@ void Level::fillVolFrac (MultiFab& vfrac, const Geometry& geom) const { vfrac.setVal(1.0); - if (isAllRegular()) return; + if (isAllRegular()) { return; } vfrac.ParallelCopy(m_volfrac,0,0,1,0,vfrac.nGrow(),geom.periodicity()); @@ -715,7 +715,7 @@ Level::fillAreaFrac (Array const& a_areafrac, const Ge a_areafrac[idim]->setVal(1.0); } - if (isAllRegular()) return; + if (isAllRegular()) { return; } for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { diff --git a/Src/EB/AMReX_EBFluxRegister.cpp b/Src/EB/AMReX_EBFluxRegister.cpp index ed2c42f758c..7ba67a562c7 100644 --- a/Src/EB/AMReX_EBFluxRegister.cpp +++ b/Src/EB/AMReX_EBFluxRegister.cpp @@ -166,7 +166,7 @@ EBFluxRegister::FineAdd (const MFIter& mfi, const int li = mfi.LocalIndex(); Vector& cfp_fabs = m_cfp_fab[li]; - if (cfp_fabs.empty()) return; + if (cfp_fabs.empty()) { return; } const Box& tbx = mfi.tilebox(); BL_ASSERT(tbx.cellCentered()); @@ -340,7 +340,7 @@ EBFluxRegister::Reflux (MultiFab& crse_state, const amrex::MultiFab& crse_vfrac, const Box& gdomain = m_crse_geom.growPeriodicDomain(1); MFItInfo info; - if (Gpu::notInLaunchRegion()) info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif diff --git a/Src/EB/AMReX_EBMultiFabUtil.cpp b/Src/EB/AMReX_EBMultiFabUtil.cpp index a5202c7d805..532653b6025 100644 --- a/Src/EB/AMReX_EBMultiFabUtil.cpp +++ b/Src/EB/AMReX_EBMultiFabUtil.cpp @@ -27,7 +27,7 @@ void EB_set_covered (MultiFab& mf, int icomp, int ncomp, int ngrow, Real val) { const auto *const factory = dynamic_cast(&(mf.Factory())); - if (factory == nullptr) return; + if (factory == nullptr) { return; } const auto& flags = factory->getMultiEBCellFlagFab(); AMREX_ALWAYS_ASSERT(mf.ixType().cellCentered() || mf.ixType().nodeCentered()); @@ -69,7 +69,7 @@ void EB_set_covered (MultiFab& mf, int icomp, int ncomp, int ngrow, const Vector& a_vals) { const auto *const factory = dynamic_cast(&(mf.Factory())); - if (factory == nullptr) return; + if (factory == nullptr) { return; } const auto& flags = factory->getMultiEBCellFlagFab(); AMREX_ALWAYS_ASSERT(mf.ixType().cellCentered() || mf.ixType().nodeCentered()); @@ -109,7 +109,7 @@ void EB_set_covered_faces (const Array& umac, Real val) { const auto *const factory = dynamic_cast(&(umac[0]->Factory())); - if (factory == nullptr) return; + if (factory == nullptr) { return; } const auto& area = factory->getAreaFrac(); const auto& flags = factory->getMultiEBCellFlagFab(); @@ -220,7 +220,7 @@ void EB_set_covered_faces (const Array& umac, const int scomp, const int ncomp, const Vector& a_vals ) { const auto *const factory = dynamic_cast(&(umac[0]->Factory())); - if (factory == nullptr) return; + if (factory == nullptr) { return; } const auto& area = factory->getAreaFrac(); const auto& flags = factory->getMultiEBCellFlagFab(); @@ -299,7 +299,7 @@ EB_set_covered_faces (const Array& umac, const int sco for (int k = lo.z; k <= hi.z; ++k) { for (int j = lo.y; j <= hi.y; ++j) { for (int i = lo.x; i <= hi.x; ++i) { - if (ax(i,j,k) == 0.0) u(i,j,k,n) = vals[n]; + if (ax(i,j,k) == Real(0.0)) { u(i,j,k,n) = vals[n]; } }}} } } @@ -311,7 +311,7 @@ EB_set_covered_faces (const Array& umac, const int sco for (int k = lo.z; k <= hi.z; ++k) { for (int j = lo.y; j <= hi.y; ++j) { for (int i = lo.x; i <= hi.x; ++i) { - if (ay(i,j,k) == 0.0) v(i,j,k,n) = vals[n]; + if (ay(i,j,k) == Real(0.0)) { v(i,j,k,n) = vals[n]; } }}} } } @@ -323,7 +323,7 @@ EB_set_covered_faces (const Array& umac, const int sco for (int k = lo.z; k <= hi.z; ++k) { for (int j = lo.y; j <= hi.y; ++j) { for (int i = lo.x; i <= hi.x; ++i) { - if (az(i,j,k) == 0.0) w(i,j,k,n) = vals[n]; + if (az(i,j,k) == Real(0.0)) { w(i,j,k,n) = vals[n]; } }}} } } @@ -642,7 +642,7 @@ void EB_average_down_boundaries (const MultiFab& fine, MultiFab& crse, if (isMFIterSafe(fine, crse)) { MFItInfo info; - if (Gpu::notInLaunchRegion()) info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -712,7 +712,7 @@ void EB_computeDivergence (MultiFab& divu, const Array dxinv = geom.InvCellSizeArray(); MFItInfo info; - if (Gpu::notInLaunchRegion()) info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -774,7 +774,7 @@ void EB_computeDivergence (MultiFab& divu, const Array Real(1.e-8)) + if (std::abs(fcx(i,j,k,0)) > Real(1.e-8)) { jj = (fcx(i,j,k,0) < Real(0.0)) ? j - 1 : j + 1; - else if (apx(i,j-1,k) > Real(0.)) + } else if (apx(i,j-1,k) > Real(0.)) { jj = j-1; - else + } else { jj = j+1; + } - if (std::abs(fcx(i,j,k,1)) > Real(1.e-8)) + if (std::abs(fcx(i,j,k,1)) > Real(1.e-8)) { kk = (fcx(i,j,k,1) < Real(0.0)) ? k - 1 : k + 1; - else if (apx(i,j,k-1) > Real(0.)) + } else if (apx(i,j,k-1) > Real(0.)) { kk = k-1; - else + } else { kk = k+1; + } // If any of these cells has zero volume we don't want to use this stencil Real test_zero = cvol(i-1,jj,k ) * cvol(i-1,j ,kk) * cvol(i-1,jj,kk) * @@ -702,12 +704,11 @@ void eb_interp_centroid2facecent_x (Box const& ubx, Real test_zero_jkalt = cvol(i-1,jalt,k) * cvol(i-1,j,kalt) * cvol(i-1,jalt,kalt) * cvol(i ,jalt,k) * cvol(i ,j,kalt) * cvol(i ,jalt,kalt); - if (test_zero_jalt > Real(0.)) + if (test_zero_jalt > Real(0.)) { jj = jalt; - else if (test_zero_kalt > Real(0.)) + } else if (test_zero_kalt > Real(0.)) { kk = kalt; - else if (test_zero_jkalt > Real(0.)) - { + } else if (test_zero_jkalt > Real(0.)) { jj = jalt; kk = kalt; } @@ -766,15 +767,17 @@ void eb_interp_centroid2facecent_x (Box const& ubx, // This is the location of the face centroid relative to the central node // Recall fcx holds (y,z) of the x-face centroid as components ( /0/1) - if (j < jj) + if (j < jj) { y = Real(-0.5) + fcx(i,j,k,0); // (j,k) is in lower half of stencil so y < 0 - else + } else { y = Real(0.5) + fcx(i,j,k,0); // (j,k) is in upper half of stencil so y > 0 + } - if (k < kk) + if (k < kk) { z = Real(-0.5) + fcx(i,j,k,1); // (j,k) is in lower half of stencil so z < 0 - else + } else { z = Real(0.5) + fcx(i,j,k,1); // (j,k) is in upper half of stencil so z > 0 + } if (j < jj && k < kk) // (j,k) is lower left, (j+1,k+1) is upper right { @@ -889,19 +892,21 @@ void eb_interp_centroid2facecent_y (Box const& vbx, // We must add additional tests to avoid the case where fcy is very close to zero, but i-1/i+1 or k-1/k+1 // might be covered cells -- this can happen when the EB is exactly aligned with the grid planes int ii,kk; - if (std::abs(fcy(i,j,k,0)) > Real(1.e-8)) + if (std::abs(fcy(i,j,k,0)) > Real(1.e-8)) { ii = (fcy(i,j,k,0) < Real(0.0)) ? i - 1 : i + 1; - else if (apy(i-1,j,k) > Real(0.)) + } else if (apy(i-1,j,k) > Real(0.)) { ii = i-1; - else + } else { ii = i+1; + } - if (std::abs(fcy(i,j,k,1)) > Real(1.e-8)) + if (std::abs(fcy(i,j,k,1)) > Real(1.e-8)) { kk = (fcy(i,j,k,1) < Real(0.0)) ? k - 1 : k + 1; - else if (apy(i,j,k-1) > Real(0.)) + } else if (apy(i,j,k-1) > Real(0.)) { kk = k-1; - else + } else { kk = k+1; + } // If any of these cells has zero volume we don't want to use this stencil Real test_zero = cvol(ii,j-1,k) * cvol(i,j-1,kk) * cvol(ii,j-1,kk) * @@ -920,12 +925,11 @@ void eb_interp_centroid2facecent_y (Box const& vbx, Real test_zero_ikalt = cvol(ialt,j-1,k) * cvol(i,j-1,kalt) * cvol(ialt,j-1,kalt) * cvol(ialt,j ,k) * cvol(i,j ,kalt) * cvol(ialt,j ,kalt); - if (test_zero_ialt > Real(0.)) + if (test_zero_ialt > Real(0.)) { ii = ialt; - else if (test_zero_kalt > Real(0.)) + } else if (test_zero_kalt > Real(0.)) { kk = kalt; - else if (test_zero_ikalt > Real(0.)) - { + } else if (test_zero_ikalt > Real(0.)) { ii = ialt; kk = kalt; } @@ -981,15 +985,17 @@ void eb_interp_centroid2facecent_y (Box const& vbx, // This is the location of the face centroid relative to the central node // Recall fcy holds (x,z) of the x-face centroid as components (0/ /1) - if (i < ii) + if (i < ii) { x = Real(-0.5) + fcy(i,j,k,0); // (i,k) is in lower half of stencil so x < 0 - else + } else { x = Real(0.5) + fcy(i,j,k,0); // (i,k) is in upper half of stencil so x > 0 + } - if (k < kk) + if (k < kk) { z = Real(-0.5) + fcy(i,j,k,1); // (i,k) is in lower half of stencil so z < 0 - else + } else { z = Real(0.5) + fcy(i,j,k,1); // (i,k) is in upper half of stencil so z > 0 + } if (i < ii && k < kk) // (i,k) is lower left, (i+1,k+1) is upper right { @@ -1103,19 +1109,21 @@ void eb_interp_centroid2facecent_z (Box const& wbx, // We must add additional tests to avoid the case where fcz is very close to zero, but i-1/i+1 or j-1/j+1 // might be covered cells -- this can happen when the EB is exactly aligned with the grid planes int ii,jj; - if (std::abs(fcz(i,j,k,0)) > Real(1.e-8)) + if (std::abs(fcz(i,j,k,0)) > Real(1.e-8)) { ii = (fcz(i,j,k,0) < Real(0.0)) ? i - 1 : i + 1; - else if (apz(i-1,j,k) > Real(0.)) + } else if (apz(i-1,j,k) > Real(0.)) { ii = i-1; - else + } else { ii = i+1; + } - if (std::abs(fcz(i,j,k,1)) > Real(1.e-8)) + if (std::abs(fcz(i,j,k,1)) > Real(1.e-8)) { jj = (fcz(i,j,k,1) < Real(0.0)) ? j - 1 : j + 1; - else if (apz(i,j-1,k) > Real(0.)) + } else if (apz(i,j-1,k) > Real(0.)) { jj = j-1; - else + } else { jj = j+1; + } // If any of these cells has zero volume we don't want to use this stencil Real test_zero = cvol(ii,j,k-1) * cvol(i,jj,k-1) * cvol(ii,jj,k-1) * @@ -1134,12 +1142,11 @@ void eb_interp_centroid2facecent_z (Box const& wbx, Real test_zero_ijalt = cvol(ialt,j,k-1) * cvol(i,jalt,k-1) * cvol(ialt,jalt,k-1) * cvol(ialt,j,k ) * cvol(i,jalt,k ) * cvol(ialt,jalt,k ); - if (test_zero_ialt > Real(0.)) + if (test_zero_ialt > Real(0.)) { ii = ialt; - else if (test_zero_jalt > Real(0.)) + } else if (test_zero_jalt > Real(0.)) { jj = jalt; - else if (test_zero_ijalt > Real(0.)) - { + } else if (test_zero_ijalt > Real(0.)) { ii = ialt; jj = jalt; } @@ -1195,15 +1202,17 @@ void eb_interp_centroid2facecent_z (Box const& wbx, // This is the location of the face centroid relative to the central node // Recall fcz holds (x,y) of the x-face centroid as components (0/1/ ) - if (i < ii) + if (i < ii) { x = Real(-0.5) + fcz(i,j,k,0); // (i,k) is in lower half of stencil so x < 0 - else + } else { x = Real(0.5) + fcz(i,j,k,0); // (i,k) is in upper half of stencil so x > 0 + } - if (j < jj) + if (j < jj) { y = Real(-0.5) + fcz(i,j,k,1); // (i,k) is in lower half of stencil so z < 0 - else + } else { y = Real(0.5) + fcz(i,j,k,1); // (i,k) is in upper half of stencil so z > 0 + } if (i < ii && j < jj) // (i,j) is lower left, (i+1,j+1) is upper right { diff --git a/Src/EB/AMReX_EBToPVD.cpp b/Src/EB/AMReX_EBToPVD.cpp index dc6e93d5fb2..3835af4d3df 100644 --- a/Src/EB/AMReX_EBToPVD.cpp +++ b/Src/EB/AMReX_EBToPVD.cpp @@ -246,12 +246,14 @@ void EBToPVD::reorder_polygon(const std::vector>& lpoints, int longest = 2; if(std::abs(lnormal[0]) > std::abs(lnormal[1])) { - if(std::abs(lnormal[0]) > std::abs(lnormal[2])) + if(std::abs(lnormal[0]) > std::abs(lnormal[2])) { longest = 0; + } } else { - if(std::abs(lnormal[1]) > std::abs(lnormal[2])) + if(std::abs(lnormal[1]) > std::abs(lnormal[2])) { longest = 1; + } } for(int i = 1; i <= lconnect[0]; ++i) { @@ -436,14 +438,15 @@ void EBToPVD::EBGridCoverage(const int myID, const Real* problo, const Real* dx, for(int j = lo.y; j <= hi.y; ++j) { for(int i = lo.x; i <= hi.x; ++i) { - if(flag(i,j,k).isSingleValued()) + if(flag(i,j,k).isSingleValued()) { lc1 = lc1 + 1; + } } } }; ++m_grid; - if(lc1 == 0) return; + if(lc1 == 0) { return; } std::stringstream ss; ss << std::setw(4) << std::setfill('0') << myID; diff --git a/Src/EB/AMReX_EB_FluxRedistribute.cpp b/Src/EB/AMReX_EB_FluxRedistribute.cpp index f0f38aee929..d5d2b22c0c7 100644 --- a/Src/EB/AMReX_EB_FluxRedistribute.cpp +++ b/Src/EB/AMReX_EB_FluxRedistribute.cpp @@ -102,17 +102,19 @@ amrex_flux_redistribute ( #if (AMREX_SPACEDIM == 2) int kk(0); #else - for (int kk = -1; kk <= 1; kk++) + for (int kk = -1; kk <= 1; kk++) { #endif - for (int jj = -1; jj <= 1; jj++) - for (int ii = -1; ii <= 1; ii++) - if ( (ii != 0 || jj != 0 || kk != 0) && flag(i,j,k).isConnected(ii,jj,kk) && - dbox.contains(IntVect(AMREX_D_DECL(i+ii,j+jj,k+kk)))) - { - Real wted_frac = vfrac(i+ii,j+jj,k+kk) * wt(i+ii,j+jj,k+kk) * mask(i+ii,j+jj,k+kk); - vtot += wted_frac; - divnc += wted_frac * divc(i+ii,j+jj,k+kk,n); - } + for (int jj = -1; jj <= 1; jj++) { + for (int ii = -1; ii <= 1; ii++) { + if ( (ii != 0 || jj != 0 || kk != 0) && flag(i,j,k).isConnected(ii,jj,kk) && + dbox.contains(IntVect(AMREX_D_DECL(i+ii,j+jj,k+kk)))) + { + Real wted_frac = vfrac(i+ii,j+jj,k+kk) * wt(i+ii,j+jj,k+kk) * mask(i+ii,j+jj,k+kk); + vtot += wted_frac; + divnc += wted_frac * divc(i+ii,j+jj,k+kk,n); + } + AMREX_D_TERM(},},}) + divnc /= vtot; // We need to multiply by mask to make sure optmp is zero for cells @@ -135,17 +137,19 @@ amrex_flux_redistribute ( #if (AMREX_SPACEDIM == 2) int kk(0); #else - for (int kk = -1; kk <= 1; kk++) + for (int kk = -1; kk <= 1; kk++) { #endif - for (int jj = -1; jj <= 1; jj++) - for (int ii = -1; ii <= 1; ii++) - if ( (ii != 0 || jj != 0 || kk != 0) && flag(i,j,k).isConnected(ii,jj,kk) && - dbox.contains(IntVect(AMREX_D_DECL(i+ii,j+jj,k+kk)))) - { - Real unwted_frac = vfrac(i+ii,j+jj,k+kk) * mask(i+ii,j+jj,k+kk); - vtot += unwted_frac; - divnc += unwted_frac*divc(i+ii,j+jj,k+kk,n); - } + for (int jj = -1; jj <= 1; jj++) { + for (int ii = -1; ii <= 1; ii++) { + if ( (ii != 0 || jj != 0 || kk != 0) && flag(i,j,k).isConnected(ii,jj,kk) && + dbox.contains(IntVect(AMREX_D_DECL(i+ii,j+jj,k+kk)))) + { + Real unwted_frac = vfrac(i+ii,j+jj,k+kk) * mask(i+ii,j+jj,k+kk); + vtot += unwted_frac; + divnc += unwted_frac*divc(i+ii,j+jj,k+kk,n); + } + AMREX_D_TERM(},},}) + divnc /= vtot; // We need to multiply by mask to make sure optmp is zero for cells @@ -172,14 +176,16 @@ amrex_flux_redistribute ( #if (AMREX_SPACEDIM == 2) int kk(0); #else - for (int kk = -1; kk <= 1; kk++) + for (int kk = -1; kk <= 1; kk++) { #endif - for (int jj = -1; jj <= 1; jj++) - for (int ii = -1; ii <= 1; ii++) - if ( (ii != 0 || jj != 0 || kk != 0) && flag(i,j,k).isConnected(ii,jj,kk) ) - { - wtot += vfrac(i+ii,j+jj,k+kk)*wt(i+ii,j+jj,k+kk)* mask(i+ii,j+jj,k+kk); - } + for (int jj = -1; jj <= 1; jj++) { + for (int ii = -1; ii <= 1; ii++) { + if ( (ii != 0 || jj != 0 || kk != 0) && flag(i,j,k).isConnected(ii,jj,kk) ) + { + wtot += vfrac(i+ii,j+jj,k+kk)*wt(i+ii,j+jj,k+kk)* mask(i+ii,j+jj,k+kk); + } + AMREX_D_TERM(},},}) + #ifdef AMREX_USE_FLOAT wtot = Real(1.0)/(wtot + Real(1.e-30)); #else @@ -212,71 +218,72 @@ amrex_flux_redistribute ( #else ( (i >= bx_ilo) && (i <= bx_ihi) && (j >= bx_jlo) && (j <= bx_jhi) && (k >= bx_klo) && (k <= bx_khi) ); #endif - if (inside) as_fine_valid_cell = true; + if (inside) { as_fine_valid_cell = true; } as_fine_ghost_cell = (levmsk(i,j,k) == level_mask_not_covered); // not covered by other grids } #if (AMREX_SPACEDIM == 2) kk = 0; #else - for (int kk = -1; kk <= 1; kk++) + for (int kk = -1; kk <= 1; kk++) { #endif - for (int jj = -1; jj <= 1; jj++) - for (int ii = -1; ii <= 1; ii++) - if ( (ii != 0 || jj != 0 || kk != 0) && flag(i,j,k).isConnected(ii,jj,kk) ) - { - int iii = i + ii; - int jjj = j + jj; - int kkk = k + kk; - - Real drho = delm(i,j,k,n)*wtot*wt(iii,jjj,kkk)* mask(iii,jjj,kkk) ; - Gpu::Atomic::Add(&optmp(iii,jjj,kkk,n), drho); - - valid_dst_cell = ( (iii >= bx_ilo) && (iii <= bx_ihi) && - (jjj >= bx_jlo) && (jjj <= bx_jhi) ); + for (int jj = -1; jj <= 1; jj++) { + for (int ii = -1; ii <= 1; ii++) { + if ( (ii != 0 || jj != 0 || kk != 0) && flag(i,j,k).isConnected(ii,jj,kk) ) + { + int iii = i + ii; + int jjj = j + jj; + int kkk = k + kk; + + Real drho = delm(i,j,k,n)*wtot*wt(iii,jjj,kkk)* mask(iii,jjj,kkk) ; + Gpu::Atomic::Add(&optmp(iii,jjj,kkk,n), drho); + + valid_dst_cell = ( (iii >= bx_ilo) && (iii <= bx_ihi) && + (jjj >= bx_jlo) && (jjj <= bx_jhi) ); #if (AMREX_SPACEDIM == 3) - valid_dst_cell &= ( (kkk >= bx_klo) && (kkk <= bx_khi) ); + valid_dst_cell &= ( (kkk >= bx_klo) && (kkk <= bx_khi) ); #endif - if (as_crse_crse_cell) - { - if ( (rr_flag_crse(iii,jjj,kkk) == amrex_yafluxreg_fine_cell) && - (vfrac(i,j,k) > reredistribution_threshold) ) - { - Gpu::Atomic::Add(&rr_drho_crse(i,j,k,n), - dt*drho*(vfrac(iii,jjj,kkk)/vfrac(i,j,k))); - } - } - - if (as_crse_covered_cell && valid_dst_cell) - { - if ( (rr_flag_crse(iii,jjj,kkk) == amrex_yafluxreg_crse_fine_boundary_cell) && - (vfrac(iii,jjj,kkk) > reredistribution_threshold) ) - { + if (as_crse_crse_cell) + { + if ( (rr_flag_crse(iii,jjj,kkk) == amrex_yafluxreg_fine_cell) && + (vfrac(i,j,k) > reredistribution_threshold) ) + { + Gpu::Atomic::Add(&rr_drho_crse(i,j,k,n), + dt*drho*(vfrac(iii,jjj,kkk)/vfrac(i,j,k))); + } + } + + if (as_crse_covered_cell && valid_dst_cell) + { + if ( (rr_flag_crse(iii,jjj,kkk) == amrex_yafluxreg_crse_fine_boundary_cell) && + (vfrac(iii,jjj,kkk) > reredistribution_threshold) ) + { // recipient is a crse/fine boundary cell - Gpu::Atomic::Add(&rr_drho_crse(iii,jjj,kkk,n), -dt*drho); - } - } - - if (as_fine_valid_cell && !valid_dst_cell) - { - Gpu::Atomic::Add(&dm_as_fine(iii,jjj,kkk,n), dt*drho*vfrac(iii,jjj,kkk)); - } - - if (as_fine_ghost_cell && valid_dst_cell) - { - Gpu::Atomic::Add(&dm_as_fine(i,j,k,n), -dt*drho*vfrac(iii,jjj,kkk)); - } - - } // isConnected + Gpu::Atomic::Add(&rr_drho_crse(iii,jjj,kkk,n), -dt*drho); + } + } + + if (as_fine_valid_cell && !valid_dst_cell) + { + Gpu::Atomic::Add(&dm_as_fine(iii,jjj,kkk,n), dt*drho*vfrac(iii,jjj,kkk)); + } + + if (as_fine_ghost_cell && valid_dst_cell) + { + Gpu::Atomic::Add(&dm_as_fine(i,j,k,n), -dt*drho*vfrac(iii,jjj,kkk)); + } + } // isConnected + AMREX_D_TERM(},},}) } // isSingleValued }); amrex::ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept { - if (!flag(i,j,k).isCovered()) + if (!flag(i,j,k).isCovered()) { dqdt(i,j,k,icomp+n) = divc(i,j,k,n) + optmp(i,j,k,n); + } }); diff --git a/Src/EB/AMReX_EB_LeastSquares_2D_K.H b/Src/EB/AMReX_EB_LeastSquares_2D_K.H index fb2d6e0673b..7ebfff7e34d 100644 --- a/Src/EB/AMReX_EB_LeastSquares_2D_K.H +++ b/Src/EB/AMReX_EB_LeastSquares_2D_K.H @@ -38,10 +38,11 @@ void decomp_chol_np6(Array2D& aa) p[ii] = std::sqrt(sum1); } } else { - if (ising == 0) + if (ising == 0) { aa(jj,ii) = sum1 / p[ii]; - else + } else { aa(jj,ii) = 0.0; + } } } } @@ -66,9 +67,11 @@ void cholsol_np6(Array2D& Amatrix, Array1D& b) Array2D AtA; - for (int irow = 0; irow < neq; irow++) - for (int icol = 0; icol < neq; icol++) + for (int irow = 0; irow < neq; irow++) { + for (int icol = 0; icol < neq; icol++) { AtA(irow,icol) = 0.0; + } + } for (int irow = 0; irow < 12; irow++) { @@ -95,50 +98,56 @@ void cholsol_np6(Array2D& Amatrix, Array1D& b) AtA(5,5) += Amatrix(irow,5)*Amatrix(irow,5); // (y^2)^T (y^2) } - for (int irow = 0; irow < neq-1; irow++) - for (int icol = irow+1; icol < neq; icol++) + for (int irow = 0; irow < neq-1; irow++) { + for (int icol = irow+1; icol < neq; icol++) { AtA(icol,irow) = AtA(irow,icol); + } + } decomp_chol_np6(AtA); - if (AtA(0,0) > 0.) - b(0) = b(0) / AtA(0,0); - else - b(0) = 0.; + if (AtA(0,0) > 0.) { + b(0) = b(0) / AtA(0,0); + } else { + b(0) = 0.; + } for (int ii = 1; ii < neq; ii++) { - if (AtA(ii,ii) > 0.) - { - for (int jj = 0; jj < ii; jj++) - b(ii) = b(ii) - AtA(ii,jj)*b(jj); + if (AtA(ii,ii) > 0.) + { + for (int jj = 0; jj < ii; jj++) { + b(ii) = b(ii) - AtA(ii,jj)*b(jj); + } - b(ii) = b(ii) / AtA(ii,ii); - } - else - { - b(ii) = 0.0; - } + b(ii) = b(ii) / AtA(ii,ii); + } + else + { + b(ii) = 0.0; + } } - if (AtA(neq-1,neq-1) > 0.) - b(neq-1) = b(neq-1) / AtA(neq-1,neq-1); - else - b(neq-1) = 0.0; + if (AtA(neq-1,neq-1) > 0.) { + b(neq-1) = b(neq-1) / AtA(neq-1,neq-1); + } else { + b(neq-1) = 0.0; + } for (int ii = neq-2; ii >= 0; ii--) { - if (AtA(ii,ii) > 0.) - { - for (int jj = ii+1; jj < neq; jj++) - b(ii) = b(ii) - AtA(ii,jj)*b(jj); - - b(ii) = b(ii) / AtA(ii,ii); - } - else - { - b(ii) = 0.0; - } + if (AtA(ii,ii) > 0.) + { + for (int jj = ii+1; jj < neq; jj++) { + b(ii) = b(ii) - AtA(ii,jj)*b(jj); + } + + b(ii) = b(ii) / AtA(ii,ii); + } + else + { + b(ii) = 0.0; + } } } @@ -149,9 +158,11 @@ void cholsol_for_eb(Array2D& Amatrix, Array1D& b) Array2D AtA; - for (int irow = 0; irow < neq; irow++) - for (int icol = 0; icol < neq; icol++) + for (int irow = 0; irow < neq; irow++) { + for (int icol = 0; icol < neq; icol++) { AtA(irow,icol) = 0.0; + } + } for (int irow = 0; irow < 18; irow++) { @@ -178,50 +189,56 @@ void cholsol_for_eb(Array2D& Amatrix, Array1D& b) AtA(5,5) += Amatrix(irow,5)*Amatrix(irow,5); // (y^2)^T (y^2) } - for (int irow = 0; irow < neq-1; irow++) - for (int icol = irow+1; icol < neq; icol++) - AtA(icol,irow) = AtA(irow,icol); + for (int irow = 0; irow < neq-1; irow++) { + for (int icol = irow+1; icol < neq; icol++) { + AtA(icol,irow) = AtA(irow,icol); + } + } decomp_chol_np6(AtA); - if (AtA(0,0) > 0.) - b(0) = b(0) / AtA(0,0); - else - b(0) = 0.; + if (AtA(0,0) > 0.) { + b(0) = b(0) / AtA(0,0); + } else { + b(0) = 0.; + } for (int ii = 1; ii < neq; ii++) { - if (AtA(ii,ii) > 0.) - { - for (int jj = 0; jj < ii; jj++) - b(ii) = b(ii) - AtA(ii,jj)*b(jj); + if (AtA(ii,ii) > 0.) + { + for (int jj = 0; jj < ii; jj++) { + b(ii) = b(ii) - AtA(ii,jj)*b(jj); + } - b(ii) = b(ii) / AtA(ii,ii); - } - else - { - b(ii) = 0.0; - } + b(ii) = b(ii) / AtA(ii,ii); + } + else + { + b(ii) = 0.0; + } } - if (AtA(neq-1,neq-1) > 0.) - b(neq-1) = b(neq-1) / AtA(neq-1,neq-1); - else - b(neq-1) = 0.0; + if (AtA(neq-1,neq-1) > 0.) { + b(neq-1) = b(neq-1) / AtA(neq-1,neq-1); + } else { + b(neq-1) = 0.0; + } for (int ii = neq-2; ii >= 0; ii--) { - if (AtA(ii,ii) > 0.) - { - for (int jj = ii+1; jj < neq; jj++) - b(ii) = b(ii) - AtA(ii,jj)*b(jj); - - b(ii) = b(ii) / AtA(ii,ii); - } - else - { - b(ii) = 0.0; - } + if (AtA(ii,ii) > 0.) + { + for (int jj = ii+1; jj < neq; jj++) { + b(ii) = b(ii) - AtA(ii,jj)*b(jj); + } + + b(ii) = b(ii) / AtA(ii,ii); + } + else + { + b(ii) = 0.0; + } } } @@ -242,12 +259,14 @@ Real grad_x_of_phi_on_centroids(int i,int j,int k,int n, // Order of column -- first six are cell centroids: (i,j-1) (i,j) (i,j+1) (i-1,j-1) (i-1,j) (i-1,j+1) // Order of column -- second six are EB centroids: (i,j-1) (i,j) (i,j+1) (i-1,j-1) (i-1,j) (i-1,j+1) - for (int irow = 0; irow < 12; irow++) - for (int icol = 0; icol < 6; icol++) + for (int irow = 0; irow < 12; irow++) { + for (int icol = 0; icol < 6; icol++) { Amatrix(irow,icol) = 0.0; + } + } // Columns: [e x y x*x x*y y*y] - for (int ii = i-1; ii <= i; ii++) // Normal to face + for (int ii = i-1; ii <= i; ii++) { // Normal to face for (int jj = j-1; jj <= j+1; jj++) // Tangential to face { if (!flag(ii,jj,k).isCovered()) @@ -279,13 +298,14 @@ Real grad_x_of_phi_on_centroids(int i,int j,int k,int n, } } } + } // Make the RHS = A^T v for (int irow = 0; irow < 6; irow++) { rhs(irow) = 0.; // Only non-zero when inhomogeneous Dirichlet - for (int ii = i-1; ii <= i; ii++) // Normal to face + for (int ii = i-1; ii <= i; ii++) { // Normal to face for (int jj = j-1; jj <= j+1; jj++) // Tangential to face { if (!flag(ii,jj,k).isCovered()) @@ -294,10 +314,12 @@ Real grad_x_of_phi_on_centroids(int i,int j,int k,int n, rhs(irow) += Amatrix(a_ind ,irow)* phi(ii,jj,k,n); if (flag(ii,jj,k).isSingleValued() && - is_eb_dirichlet && is_eb_inhomog) + is_eb_dirichlet && is_eb_inhomog) { rhs(irow) += Amatrix(a_ind+6,irow)*phieb(ii,jj,k,n); + } } } + } } cholsol_np6(Amatrix, rhs); @@ -322,12 +344,14 @@ Real grad_y_of_phi_on_centroids(int i,int j,int k,int n, // Order of column -- first six are cell centroids: (i-1,j) (i,j) (i+1,j) (i-1,j-1) (i,j-1) (i+1,j-1) // Order of column -- second six are EB centroids: (i-1,j) (i,j) (i+1,j) (i-1,j-1) (i,j-1) (i+1,j-1) - for (int irow = 0; irow < 12; irow++) - for (int icol = 0; icol < 6; icol++) + for (int irow = 0; irow < 12; irow++) { + for (int icol = 0; icol < 6; icol++) { Amatrix(irow,icol) = 0.0; + } + } // Columns: [e x y x*x x*y y*y] - for (int jj = j-1; jj <= j; jj++) // Normal to face + for (int jj = j-1; jj <= j; jj++) { // Normal to face for (int ii = i-1; ii <= i+1; ii++) // Tangential to face { if (!flag(ii,jj,k).isCovered()) @@ -356,23 +380,27 @@ Real grad_y_of_phi_on_centroids(int i,int j,int k,int n, } } } + } // Make the RHS = A^T v for (int irow = 0; irow < 6; irow++) { rhs(irow) = 0.; // Only non-zero when inhomogeneous Dirichlet - for (int jj = j-1; jj <= j; jj++) // Normal to face - for (int ii = i-1; ii <= i+1; ii++) // Tangential to face + for (int jj = j-1; jj <= j; jj++) { // Normal to face + for (int ii = i-1; ii <= i+1; ii++) { // Tangential to face if (!flag(ii,jj,k).isCovered()) { int a_ind = (ii-(i-1)) + 3*(jj-(j-1)); rhs(irow) += Amatrix(a_ind ,irow)* phi(ii,jj,k,n); if (flag(ii,jj,k).isSingleValued() && - is_eb_dirichlet && is_eb_inhomog) + is_eb_dirichlet && is_eb_inhomog) { rhs(irow) += Amatrix(a_ind+6,irow)*phieb(ii,jj,k,n); + } } + } + } } cholsol_np6(Amatrix, rhs); @@ -395,12 +423,14 @@ Real grad_eb_of_phi_on_centroids(int i,int j,int k,int n, // Order of column -- first 9 are cell centroids, next 9 are EB centroids - for (int irow = 0; irow < 18; irow++) - for (int icol = 0; icol < 6; icol++) + for (int irow = 0; irow < 18; irow++) { + for (int icol = 0; icol < 6; icol++) { Amatrix(irow,icol) = 0.0; + } + } // Column 0-2: [e x y] - for (int ii = i-1; ii <= i+1; ii++) + for (int ii = i-1; ii <= i+1; ii++) { for (int jj = j-1; jj <= j+1; jj++) { if (!flag(ii,jj,k).isCovered()) @@ -425,6 +455,7 @@ Real grad_eb_of_phi_on_centroids(int i,int j,int k,int n, } } } + } // Columns 3 : [x*x x*y y*y] @@ -440,15 +471,18 @@ Real grad_eb_of_phi_on_centroids(int i,int j,int k,int n, { rhs(irow) = 0.; - for (int ii = i-1; ii <= i+1; ii++) - for (int jj = j-1; jj <= j+1; jj++) + for (int ii = i-1; ii <= i+1; ii++) { + for (int jj = j-1; jj <= j+1; jj++) { if (!flag(ii,jj,k).isCovered()) { int a_ind = (jj-(j-1)) + 3*(ii-(i-1)); rhs(irow) += Amatrix(a_ind,irow) * phi(ii,jj,k,n); - if (flag(ii,jj,k).isSingleValued() && is_eb_inhomog) + if (flag(ii,jj,k).isSingleValued() && is_eb_inhomog) { rhs(irow) += Amatrix(a_ind+9,irow)*phieb(ii,jj,k,n); + } } + } + } } cholsol_for_eb(Amatrix, rhs); @@ -477,15 +511,17 @@ Real grad_x_of_phi_on_centroids_extdir(int i,int j,int k,int n, // Order of column -- first six are cell centroids: (i,j-1) (i,j) (i,j+1) (i-1,j-1) (i-1,j) (i-1,j+1) // Order of column -- second six are EB centroids: (i,j-1) (i,j) (i,j+1) (i-1,j-1) (i-1,j) (i-1,j+1) - for (int irow = 0; irow < 18; irow++) - for (int icol = 0; icol < 6; icol++) + for (int irow = 0; irow < 18; irow++) { + for (int icol = 0; icol < 6; icol++) { Amatrix(irow,icol) = 0.0; + } + } const int im = (i > domhi_x) ? 2 : 1; const int ip = 2 - im; // Columns: [e x y x*x x*y y*y] - for (int ii = i-im; ii <= i+ip; ii++) // Normal to face + for (int ii = i-im; ii <= i+ip; ii++) { // Normal to face for (int jj = j-1; jj <= j+1; jj++) // Tangential to face { @@ -498,8 +534,9 @@ Real grad_x_of_phi_on_centroids_extdir(int i,int j,int k,int n, continue; } - if ( !phi.contains(ii,jj,k) ) + if ( !phi.contains(ii,jj,k) ) { continue; + } if (!flag(ii,jj,k).isCovered()) { @@ -513,17 +550,21 @@ Real grad_x_of_phi_on_centroids_extdir(int i,int j,int k,int n, Real y_off = static_cast(jj-j); if(on_x_face){ - if (ii < domlo_x && (vfrac(ii+1,jj,k) != 1.0 || vfrac(ii+2,jj,k) != 1.0) ) + if (ii < domlo_x && (vfrac(ii+1,jj,k) != 1.0 || vfrac(ii+2,jj,k) != 1.0) ) { continue; - if (ii > domhi_x && (vfrac(ii-1,jj,k) != 1.0 || vfrac(ii-2,jj,k) != 1.0)) + } + if (ii > domhi_x && (vfrac(ii-1,jj,k) != 1.0 || vfrac(ii-2,jj,k) != 1.0)) { continue; + } } if(on_y_face){ - if (jj < domlo_y && (vfrac(ii,jj+1,k) != 1.0 || vfrac(ii,jj+2,k) != 1.0) ) + if (jj < domlo_y && (vfrac(ii,jj+1,k) != 1.0 || vfrac(ii,jj+2,k) != 1.0) ) { continue; - if (jj > domhi_y && (vfrac(ii,jj-1,k) != 1.0 || vfrac(ii,jj-2,k) != 1.0) ) + } + if (jj > domhi_y && (vfrac(ii,jj-1,k) != 1.0 || vfrac(ii,jj-2,k) != 1.0) ) { continue; + } } Amatrix(a_ind,0) = 1.0; @@ -551,16 +592,18 @@ Real grad_x_of_phi_on_centroids_extdir(int i,int j,int k,int n, } } } + } for (int irow = 0; irow < 6; irow++) { rhs(irow) = 0.; // Only non-zero when inhomogeneous Dirichlet - for (int ii = i-im; ii <= i+ip; ii++) // Normal to face + for (int ii = i-im; ii <= i+ip; ii++) { // Normal to face for (int jj = j-1; jj <= j+1; jj++) // Tangential to face { - if ( !phi.contains(ii,jj,k) ) + if ( !phi.contains(ii,jj,k) ) { continue; + } if (!flag(ii,jj,k).isCovered()) { @@ -572,10 +615,12 @@ Real grad_x_of_phi_on_centroids_extdir(int i,int j,int k,int n, rhs(irow) += Amatrix(a_ind ,irow)*phi_val; if (flag(ii,jj,k).isSingleValued() && - is_eb_dirichlet && is_eb_inhomog && Amatrix(a_ind+9,irow) != 0.0) + is_eb_dirichlet && is_eb_inhomog && Amatrix(a_ind+9,irow) != 0.0) { rhs(irow) += Amatrix(a_ind+9,irow)*phieb(ii,jj,k,n); + } } } + } } cholsol_for_eb(Amatrix, rhs); @@ -603,15 +648,17 @@ Real grad_y_of_phi_on_centroids_extdir(int i,int j,int k,int n, // Order of column -- first six are cell centroids: (i-1,j) (i,j) (i+1,j) (i-1,j-1) (i,j-1) (i+1,j-1) // Order of column -- second six are EB centroids: (i-1,j) (i,j) (i+1,j) (i-1,j-1) (i,j-1) (i+1,j-1) - for (int irow = 0; irow < 18; irow++) - for (int icol = 0; icol < 6; icol++) + for (int irow = 0; irow < 18; irow++) { + for (int icol = 0; icol < 6; icol++) { Amatrix(irow,icol) = 0.0; + } + } const int jm = (j > domhi_y) ? 2 : 1; const int jp = 2 - jm; // Columns: [e x y x*x x*y y*y] - for (int jj = j-jm; jj <= j+jp; jj++) // Normal to face + for (int jj = j-jm; jj <= j+jp; jj++) { // Normal to face for (int ii = i-1; ii <= i+1; ii++) // Tangential to face { @@ -620,11 +667,12 @@ Real grad_y_of_phi_on_centroids_extdir(int i,int j,int k,int n, ((on_x_face && ii > domhi_x) && (on_y_face && jj > domhi_y)) || ((on_x_face && ii < domlo_x) && (on_y_face && jj > domhi_y)) || ((on_x_face && ii > domhi_x) && (on_y_face && jj < domlo_y))) { - continue; + continue; } - if ( !phi.contains(ii,jj,k) ) - continue; + if ( !phi.contains(ii,jj,k) ) { + continue; + } if (!flag(ii,jj,k).isCovered()) { @@ -635,17 +683,21 @@ Real grad_y_of_phi_on_centroids_extdir(int i,int j,int k,int n, Real y_off = static_cast(jj-j) + 0.5; if(on_x_face){ - if (ii < domlo_x && (vfrac(ii+1,jj,k) != 1.0 || vfrac(ii+2,jj,k) != 1.0) ) + if (ii < domlo_x && (vfrac(ii+1,jj,k) != 1.0 || vfrac(ii+2,jj,k) != 1.0) ) { continue; - if (ii > domhi_x && (vfrac(ii-1,jj,k) != 1.0 || vfrac(ii-2,jj,k) != 1.0)) + } + if (ii > domhi_x && (vfrac(ii-1,jj,k) != 1.0 || vfrac(ii-2,jj,k) != 1.0)) { continue; + } } if(on_y_face){ - if (jj < domlo_y && (vfrac(ii,jj+1,k) != 1.0 || vfrac(ii,jj+2,k) != 1.0) ) + if (jj < domlo_y && (vfrac(ii,jj+1,k) != 1.0 || vfrac(ii,jj+2,k) != 1.0) ) { continue; - if (jj > domhi_y && (vfrac(ii,jj-1,k) != 1.0 || vfrac(ii,jj-2,k) != 1.0) ) + } + if (jj > domhi_y && (vfrac(ii,jj-1,k) != 1.0 || vfrac(ii,jj-2,k) != 1.0) ) { continue; + } } Amatrix(a_ind,0) = 1.0; @@ -672,6 +724,7 @@ Real grad_y_of_phi_on_centroids_extdir(int i,int j,int k,int n, } } } + } // Make the RHS = A^T v @@ -679,10 +732,11 @@ Real grad_y_of_phi_on_centroids_extdir(int i,int j,int k,int n, { rhs(irow) = 0.; // Only non-zero when inhomogeneous Dirichlet - for (int jj = j-jm; jj <= j+jp; jj++) // Normal to face + for (int jj = j-jm; jj <= j+jp; jj++) { // Normal to face for (int ii = i-1; ii <= i+1; ii++) {// Tangential to face - if ( !phi.contains(ii,jj,k) ) + if ( !phi.contains(ii,jj,k) ) { continue; + } if (!flag(ii,jj,k).isCovered()) { int a_ind = (ii-(i-1)) + 3*(jj-(j-jm)); @@ -694,10 +748,11 @@ Real grad_y_of_phi_on_centroids_extdir(int i,int j,int k,int n, if (flag(ii,jj,k).isSingleValued() && is_eb_dirichlet && is_eb_inhomog && Amatrix(a_ind+9,irow) != Real(0.0)){ - rhs(irow) += Amatrix(a_ind+9,irow)*phieb(ii,jj,k,n); + rhs(irow) += Amatrix(a_ind+9,irow)*phieb(ii,jj,k,n); } } } + } } @@ -724,12 +779,14 @@ Real grad_eb_of_phi_on_centroids_extdir(int i,int j,int k,int n, // Order of column -- first 9 are cell centroids, next 9 are EB centroids - for (int irow = 0; irow < 18; irow++) - for (int icol = 0; icol < 6; icol++) + for (int irow = 0; irow < 18; irow++) { + for (int icol = 0; icol < 6; icol++) { Amatrix(irow,icol) = 0.0; + } + } // Column 0-2: [e x y] - for (int ii = i-1; ii <= i+1; ii++) + for (int ii = i-1; ii <= i+1; ii++) { for (int jj = j-1; jj <= j+1; jj++) { @@ -738,7 +795,7 @@ Real grad_eb_of_phi_on_centroids_extdir(int i,int j,int k,int n, // by removing the on_?_face restrictions. if (((on_x_face && ii < domlo_x) && (on_y_face && jj < domlo_y)) || ((on_x_face && ii > domhi_x) && (on_y_face && jj > domhi_y))){ - continue; + continue; } @@ -750,17 +807,21 @@ Real grad_eb_of_phi_on_centroids_extdir(int i,int j,int k,int n, Real y_off = static_cast(jj-j); if(on_x_face){ - if (ii < domlo_x && (vfrac(ii+1,jj,k) != 1.0 || vfrac(ii+2,jj,k) != 1.0) ) + if (ii < domlo_x && (vfrac(ii+1,jj,k) != 1.0 || vfrac(ii+2,jj,k) != 1.0) ) { continue; - if (ii > domhi_x && (vfrac(ii-1,jj,k) != 1.0 || vfrac(ii-2,jj,k) != 1.0)) + } + if (ii > domhi_x && (vfrac(ii-1,jj,k) != 1.0 || vfrac(ii-2,jj,k) != 1.0)) { continue; + } } if(on_y_face){ - if (jj < domlo_y && (vfrac(ii,jj+1,k) != 1.0 || vfrac(ii,jj+2,k) != 1.0) ) + if (jj < domlo_y && (vfrac(ii,jj+1,k) != 1.0 || vfrac(ii,jj+2,k) != 1.0) ) { continue; - if (jj > domhi_y && (vfrac(ii,jj-1,k) != 1.0 || vfrac(ii,jj-2,k) != 1.0) ) + } + if (jj > domhi_y && (vfrac(ii,jj-1,k) != 1.0 || vfrac(ii,jj-2,k) != 1.0) ) { continue; + } } @@ -781,14 +842,15 @@ Real grad_eb_of_phi_on_centroids_extdir(int i,int j,int k,int n, } } } + } // Columns 3 : [x*x x*y y*y] for (int irow = 0; irow < 18; irow++) { - Amatrix(irow,3) = Amatrix(irow,1) * Amatrix(irow,1); - Amatrix(irow,4) = Amatrix(irow,1) * Amatrix(irow,2); - Amatrix(irow,5) = Amatrix(irow,2) * Amatrix(irow,2); + Amatrix(irow,3) = Amatrix(irow,1) * Amatrix(irow,1); + Amatrix(irow,4) = Amatrix(irow,1) * Amatrix(irow,2); + Amatrix(irow,5) = Amatrix(irow,2) * Amatrix(irow,2); } // Make the RHS = A^T v @@ -796,10 +858,11 @@ Real grad_eb_of_phi_on_centroids_extdir(int i,int j,int k,int n, { rhs(irow) = 0.; - for (int ii = i-1; ii <= i+1; ii++) + for (int ii = i-1; ii <= i+1; ii++) { for (int jj = j-1; jj <= j+1; jj++) { - if ( !phi.contains(ii,jj,k) ) + if ( !phi.contains(ii,jj,k) ) { continue; + } if (!flag(ii,jj,k).isCovered()) { int a_ind = (jj-(j-1)) + 3*(ii-(i-1)); @@ -810,10 +873,12 @@ Real grad_eb_of_phi_on_centroids_extdir(int i,int j,int k,int n, rhs(irow) += Amatrix(a_ind,irow) * phi_val; - if (flag(ii,jj,k).isSingleValued() && is_eb_inhomog && Amatrix(a_ind+9,irow) != 0.0) + if (flag(ii,jj,k).isSingleValued() && is_eb_inhomog && Amatrix(a_ind+9,irow) != 0.0) { rhs(irow) += Amatrix(a_ind+9,irow)*phieb(ii,jj,k,n); + } } } + } } cholsol_for_eb(Amatrix, rhs); diff --git a/Src/EB/AMReX_EB_LeastSquares_3D_K.H b/Src/EB/AMReX_EB_LeastSquares_3D_K.H index 83c0f509842..bbdd20bdba8 100644 --- a/Src/EB/AMReX_EB_LeastSquares_3D_K.H +++ b/Src/EB/AMReX_EB_LeastSquares_3D_K.H @@ -38,10 +38,11 @@ void decomp_chol_np10(Array2D& aa) p[ii] = std::sqrt(sum1); } } else { - if (ising == 0) + if (ising == 0) { aa(jj,ii) = sum1 / p[ii]; - else + } else { aa(jj,ii) = Real(0.0); + } } } } @@ -66,9 +67,11 @@ void cholsol_np10(Array2D& Amatrix, Array1D& b) Array2D AtA; - for (int irow = 0; irow < neq; irow++) - for (int icol = 0; icol < neq; icol++) + for (int irow = 0; irow < neq; irow++) { + for (int icol = 0; icol < neq; icol++) { AtA(irow,icol) = Real(0.0); + } + } for (int irow = 0; irow < 36; irow++) { @@ -138,50 +141,56 @@ void cholsol_np10(Array2D& Amatrix, Array1D& b) AtA(9,9) += Amatrix(irow,9)*Amatrix(irow,9); // (z^2)^T z^2 } - for (int irow = 0; irow < neq-1; irow++) - for (int icol = irow+1; icol < neq; icol++) - AtA(icol,irow) = AtA(irow,icol); + for (int irow = 0; irow < neq-1; irow++) { + for (int icol = irow+1; icol < neq; icol++) { + AtA(icol,irow) = AtA(irow,icol); + } + } decomp_chol_np10(AtA); - if (AtA(0,0) > 0.) - b(0) = b(0) / AtA(0,0); - else - b(0) = 0.; + if (AtA(0,0) > 0.) { + b(0) = b(0) / AtA(0,0); + } else { + b(0) = 0.; + } for (int ii = 1; ii < neq; ii++) { - if (AtA(ii,ii) > 0.) - { - for (int jj = 0; jj < ii; jj++) - b(ii) = b(ii) - AtA(ii,jj)*b(jj); - - b(ii) = b(ii) / AtA(ii,ii); - } - else - { - b(ii) = Real(0.0); - } + if (AtA(ii,ii) > 0.) + { + for (int jj = 0; jj < ii; jj++) { + b(ii) = b(ii) - AtA(ii,jj)*b(jj); + } + + b(ii) = b(ii) / AtA(ii,ii); + } + else + { + b(ii) = Real(0.0); + } } - if (AtA(neq-1,neq-1) > 0.) - b(neq-1) = b(neq-1) / AtA(neq-1,neq-1); - else - b(neq-1) = Real(0.0); + if (AtA(neq-1,neq-1) > 0.) { + b(neq-1) = b(neq-1) / AtA(neq-1,neq-1); + } else { + b(neq-1) = Real(0.0); + } for (int ii = neq-2; ii >= 0; ii--) { - if (AtA(ii,ii) > 0.) - { - for (int jj = ii+1; jj < neq; jj++) - b(ii) = b(ii) - AtA(ii,jj)*b(jj); - - b(ii) = b(ii) / AtA(ii,ii); - } - else - { - b(ii) = Real(0.0); - } + if (AtA(ii,ii) > 0.) + { + for (int jj = ii+1; jj < neq; jj++) { + b(ii) = b(ii) - AtA(ii,jj)*b(jj); + } + + b(ii) = b(ii) / AtA(ii,ii); + } + else + { + b(ii) = Real(0.0); + } } } @@ -193,122 +202,130 @@ void cholsol_for_eb(Array2D& Amatrix, Array1D& b) Array2D AtA; - for (int irow = 0; irow < neq; irow++) - for (int icol = 0; icol < neq; icol++) + for (int irow = 0; irow < neq; irow++) { + for (int icol = 0; icol < neq; icol++) { AtA(irow,icol) = Real(0.0); + } + } for (int irow = 0; irow < 54; irow++) { - AtA(0,0) += Amatrix(irow,0)*Amatrix(irow,0); // e^T e - AtA(0,1) += Amatrix(irow,0)*Amatrix(irow,1); // e^T x - AtA(0,2) += Amatrix(irow,0)*Amatrix(irow,2); // e^T y - AtA(0,3) += Amatrix(irow,0)*Amatrix(irow,3); // e^T z - AtA(0,4) += Amatrix(irow,0)*Amatrix(irow,4); // e^T x^2 - AtA(0,5) += Amatrix(irow,0)*Amatrix(irow,5); // e^T x*y - AtA(0,6) += Amatrix(irow,0)*Amatrix(irow,6); // e^T y^2 - AtA(0,7) += Amatrix(irow,0)*Amatrix(irow,7); // e^T x*z - AtA(0,8) += Amatrix(irow,0)*Amatrix(irow,8); // e^T y*z - AtA(0,9) += Amatrix(irow,0)*Amatrix(irow,9); // e^T z^2 - - AtA(1,1) += Amatrix(irow,1)*Amatrix(irow,1); // x^T x - AtA(1,2) += Amatrix(irow,1)*Amatrix(irow,2); // x^T y - AtA(1,3) += Amatrix(irow,1)*Amatrix(irow,3); // x^T y - AtA(1,4) += Amatrix(irow,1)*Amatrix(irow,4); // x^T (x^2) - AtA(1,5) += Amatrix(irow,1)*Amatrix(irow,5); // x^T (xy) - AtA(1,6) += Amatrix(irow,1)*Amatrix(irow,6); // x^T (y^2) - AtA(1,7) += Amatrix(irow,1)*Amatrix(irow,7); // x^T x*z - AtA(1,8) += Amatrix(irow,1)*Amatrix(irow,8); // x^T y*z - AtA(1,9) += Amatrix(irow,1)*Amatrix(irow,9); // x^T z^2 - - AtA(2,2) += Amatrix(irow,2)*Amatrix(irow,2); // y^T y - AtA(2,3) += Amatrix(irow,2)*Amatrix(irow,3); // y^T z - AtA(2,4) += Amatrix(irow,2)*Amatrix(irow,4); // y^T (x^2) - AtA(2,5) += Amatrix(irow,2)*Amatrix(irow,5); // y^T (xy) - AtA(2,6) += Amatrix(irow,2)*Amatrix(irow,6); // y^T (y^2) - AtA(2,7) += Amatrix(irow,2)*Amatrix(irow,7); // y^T x*z - AtA(2,8) += Amatrix(irow,2)*Amatrix(irow,8); // y^T y*z - AtA(2,9) += Amatrix(irow,2)*Amatrix(irow,9); // y^T z^2 - - AtA(3,3) += Amatrix(irow,3)*Amatrix(irow,3); // z^T z - AtA(3,4) += Amatrix(irow,3)*Amatrix(irow,4); // z^T (x^2) - AtA(3,5) += Amatrix(irow,3)*Amatrix(irow,5); // z^T (xy) - AtA(3,6) += Amatrix(irow,3)*Amatrix(irow,6); // z^T (y^2) - AtA(3,7) += Amatrix(irow,3)*Amatrix(irow,7); // z^T x*z - AtA(3,8) += Amatrix(irow,3)*Amatrix(irow,8); // z^T y*z - AtA(3,9) += Amatrix(irow,3)*Amatrix(irow,9); // z^T z^2 - - AtA(4,4) += Amatrix(irow,4)*Amatrix(irow,4); // (x^2)^T (x^2) - AtA(4,5) += Amatrix(irow,4)*Amatrix(irow,5); // (x^2)^T (xy) - AtA(4,6) += Amatrix(irow,4)*Amatrix(irow,6); // (x^2)^T (y^2) - AtA(4,7) += Amatrix(irow,4)*Amatrix(irow,7); // (x^2)^T x*z - AtA(4,8) += Amatrix(irow,4)*Amatrix(irow,8); // (x^2)^T y*z - AtA(4,9) += Amatrix(irow,4)*Amatrix(irow,9); // (x^2)^T z^2 - - AtA(5,5) += Amatrix(irow,5)*Amatrix(irow,5); // (xy)^T (xy) - AtA(5,6) += Amatrix(irow,5)*Amatrix(irow,6); // (xy)^T (y^2) - AtA(5,7) += Amatrix(irow,5)*Amatrix(irow,7); // (xy)^T x*z - AtA(5,8) += Amatrix(irow,5)*Amatrix(irow,8); // (xy)^T y*z - AtA(5,9) += Amatrix(irow,5)*Amatrix(irow,9); // (xy)^T z^2 - - AtA(6,6) += Amatrix(irow,6)*Amatrix(irow,6); // (y^2)^T (y^2) - AtA(6,7) += Amatrix(irow,6)*Amatrix(irow,7); // (y^2)^T x*z - AtA(6,8) += Amatrix(irow,6)*Amatrix(irow,8); // (y^2)^T y*z - AtA(6,9) += Amatrix(irow,6)*Amatrix(irow,9); // (y^2)^T z^2 - - AtA(7,7) += Amatrix(irow,7)*Amatrix(irow,7); // (xz)^T x*z - AtA(7,8) += Amatrix(irow,7)*Amatrix(irow,8); // (xz)^T y*z - AtA(7,9) += Amatrix(irow,7)*Amatrix(irow,9); // (xz)^T z^2 - - AtA(8,8) += Amatrix(irow,8)*Amatrix(irow,8); // (yz)^T y*z - AtA(8,9) += Amatrix(irow,8)*Amatrix(irow,9); // (yz)^T z^2 - - AtA(9,9) += Amatrix(irow,9)*Amatrix(irow,9); // (z^2)^T z^2 + AtA(0,0) += Amatrix(irow,0)*Amatrix(irow,0); // e^T e + AtA(0,1) += Amatrix(irow,0)*Amatrix(irow,1); // e^T x + AtA(0,2) += Amatrix(irow,0)*Amatrix(irow,2); // e^T y + AtA(0,3) += Amatrix(irow,0)*Amatrix(irow,3); // e^T z + AtA(0,4) += Amatrix(irow,0)*Amatrix(irow,4); // e^T x^2 + AtA(0,5) += Amatrix(irow,0)*Amatrix(irow,5); // e^T x*y + AtA(0,6) += Amatrix(irow,0)*Amatrix(irow,6); // e^T y^2 + AtA(0,7) += Amatrix(irow,0)*Amatrix(irow,7); // e^T x*z + AtA(0,8) += Amatrix(irow,0)*Amatrix(irow,8); // e^T y*z + AtA(0,9) += Amatrix(irow,0)*Amatrix(irow,9); // e^T z^2 + + AtA(1,1) += Amatrix(irow,1)*Amatrix(irow,1); // x^T x + AtA(1,2) += Amatrix(irow,1)*Amatrix(irow,2); // x^T y + AtA(1,3) += Amatrix(irow,1)*Amatrix(irow,3); // x^T y + AtA(1,4) += Amatrix(irow,1)*Amatrix(irow,4); // x^T (x^2) + AtA(1,5) += Amatrix(irow,1)*Amatrix(irow,5); // x^T (xy) + AtA(1,6) += Amatrix(irow,1)*Amatrix(irow,6); // x^T (y^2) + AtA(1,7) += Amatrix(irow,1)*Amatrix(irow,7); // x^T x*z + AtA(1,8) += Amatrix(irow,1)*Amatrix(irow,8); // x^T y*z + AtA(1,9) += Amatrix(irow,1)*Amatrix(irow,9); // x^T z^2 + + AtA(2,2) += Amatrix(irow,2)*Amatrix(irow,2); // y^T y + AtA(2,3) += Amatrix(irow,2)*Amatrix(irow,3); // y^T z + AtA(2,4) += Amatrix(irow,2)*Amatrix(irow,4); // y^T (x^2) + AtA(2,5) += Amatrix(irow,2)*Amatrix(irow,5); // y^T (xy) + AtA(2,6) += Amatrix(irow,2)*Amatrix(irow,6); // y^T (y^2) + AtA(2,7) += Amatrix(irow,2)*Amatrix(irow,7); // y^T x*z + AtA(2,8) += Amatrix(irow,2)*Amatrix(irow,8); // y^T y*z + AtA(2,9) += Amatrix(irow,2)*Amatrix(irow,9); // y^T z^2 + + AtA(3,3) += Amatrix(irow,3)*Amatrix(irow,3); // z^T z + AtA(3,4) += Amatrix(irow,3)*Amatrix(irow,4); // z^T (x^2) + AtA(3,5) += Amatrix(irow,3)*Amatrix(irow,5); // z^T (xy) + AtA(3,6) += Amatrix(irow,3)*Amatrix(irow,6); // z^T (y^2) + AtA(3,7) += Amatrix(irow,3)*Amatrix(irow,7); // z^T x*z + AtA(3,8) += Amatrix(irow,3)*Amatrix(irow,8); // z^T y*z + AtA(3,9) += Amatrix(irow,3)*Amatrix(irow,9); // z^T z^2 + + AtA(4,4) += Amatrix(irow,4)*Amatrix(irow,4); // (x^2)^T (x^2) + AtA(4,5) += Amatrix(irow,4)*Amatrix(irow,5); // (x^2)^T (xy) + AtA(4,6) += Amatrix(irow,4)*Amatrix(irow,6); // (x^2)^T (y^2) + AtA(4,7) += Amatrix(irow,4)*Amatrix(irow,7); // (x^2)^T x*z + AtA(4,8) += Amatrix(irow,4)*Amatrix(irow,8); // (x^2)^T y*z + AtA(4,9) += Amatrix(irow,4)*Amatrix(irow,9); // (x^2)^T z^2 + + AtA(5,5) += Amatrix(irow,5)*Amatrix(irow,5); // (xy)^T (xy) + AtA(5,6) += Amatrix(irow,5)*Amatrix(irow,6); // (xy)^T (y^2) + AtA(5,7) += Amatrix(irow,5)*Amatrix(irow,7); // (xy)^T x*z + AtA(5,8) += Amatrix(irow,5)*Amatrix(irow,8); // (xy)^T y*z + AtA(5,9) += Amatrix(irow,5)*Amatrix(irow,9); // (xy)^T z^2 + + AtA(6,6) += Amatrix(irow,6)*Amatrix(irow,6); // (y^2)^T (y^2) + AtA(6,7) += Amatrix(irow,6)*Amatrix(irow,7); // (y^2)^T x*z + AtA(6,8) += Amatrix(irow,6)*Amatrix(irow,8); // (y^2)^T y*z + AtA(6,9) += Amatrix(irow,6)*Amatrix(irow,9); // (y^2)^T z^2 + + AtA(7,7) += Amatrix(irow,7)*Amatrix(irow,7); // (xz)^T x*z + AtA(7,8) += Amatrix(irow,7)*Amatrix(irow,8); // (xz)^T y*z + AtA(7,9) += Amatrix(irow,7)*Amatrix(irow,9); // (xz)^T z^2 + + AtA(8,8) += Amatrix(irow,8)*Amatrix(irow,8); // (yz)^T y*z + AtA(8,9) += Amatrix(irow,8)*Amatrix(irow,9); // (yz)^T z^2 + + AtA(9,9) += Amatrix(irow,9)*Amatrix(irow,9); // (z^2)^T z^2 } - for (int irow = 0; irow < neq-1; irow++) - for (int icol = irow+1; icol < neq; icol++) - AtA(icol,irow) = AtA(irow,icol); + for (int irow = 0; irow < neq-1; irow++) { + for (int icol = irow+1; icol < neq; icol++) { + AtA(icol,irow) = AtA(irow,icol); + } + } decomp_chol_np10(AtA); - if (AtA(0,0) > 0.) - b(0) = b(0) / AtA(0,0); - else - b(0) = 0.; + if (AtA(0,0) > 0.) { + b(0) = b(0) / AtA(0,0); + } else { + b(0) = 0.; + } for (int ii = 1; ii < neq; ii++) { - if (AtA(ii,ii) > 0.) - { - for (int jj = 0; jj < ii; jj++) - b(ii) = b(ii) - AtA(ii,jj)*b(jj); - - b(ii) = b(ii) / AtA(ii,ii); - } - else - { - b(ii) = Real(0.0); - } + if (AtA(ii,ii) > 0.) + { + for (int jj = 0; jj < ii; jj++) { + b(ii) = b(ii) - AtA(ii,jj)*b(jj); + } + + b(ii) = b(ii) / AtA(ii,ii); + } + else + { + b(ii) = Real(0.0); + } } - if (AtA(neq-1,neq-1) > 0.) - b(neq-1) = b(neq-1) / AtA(neq-1,neq-1); - else - b(neq-1) = Real(0.0); + if (AtA(neq-1,neq-1) > 0.) { + b(neq-1) = b(neq-1) / AtA(neq-1,neq-1); + } else { + b(neq-1) = Real(0.0); + } for (int ii = neq-2; ii >= 0; ii--) { - if (AtA(ii,ii) > 0.) - { - for (int jj = ii+1; jj < neq; jj++) - b(ii) = b(ii) - AtA(ii,jj)*b(jj); - - b(ii) = b(ii) / AtA(ii,ii); - } - else - { - b(ii) = Real(0.0); - } + if (AtA(ii,ii) > 0.) + { + for (int jj = ii+1; jj < neq; jj++) { + b(ii) = b(ii) - AtA(ii,jj)*b(jj); + } + + b(ii) = b(ii) / AtA(ii,ii); + } + else + { + b(ii) = Real(0.0); + } } } @@ -328,45 +345,50 @@ Real grad_x_of_phi_on_centroids(int i,int j,int k,int n, // Order of column -- first 9 are cell centroids, second 9 are EB centroids - for (int irow = 0; irow < 36; irow++) - for (int icol = 0; icol < 10; icol++) + for (int irow = 0; irow < 36; irow++) { + for (int icol = 0; icol < 10; icol++) { Amatrix(irow,icol) = Real(0.0); + } + } // Columns 0-3: [e x y z] - for (int ii = i-1; ii <= i; ii++) // Normal to face - for (int kk = k-1; kk <= k+1; kk++) // Tangential to face - for (int jj = j-1; jj <= j+1; jj++) // Tangential to face - if (!flag(ii,jj,kk).isCovered()) - { - int a_ind = (jj-(j-1)) + 3*(kk-(k-1)) + 9*(ii-(i-1)); - - Real x_off = static_cast(ii-i) + Real(0.5); - Real y_off = static_cast(jj-j); - Real z_off = static_cast(kk-k); - - Amatrix(a_ind,0) = Real(1.0); - Amatrix(a_ind,1) = x_off + ccent(ii,jj,kk,0); - Amatrix(a_ind,2) = y_off + ccent(ii,jj,kk,1) - yloc_on_xface; - Amatrix(a_ind,3) = z_off + ccent(ii,jj,kk,2) - zloc_on_xface; - - if (!flag(ii,jj,kk).isRegular()) + for (int ii = i-1; ii <= i; ii++) { // Normal to face + for (int kk = k-1; kk <= k+1; kk++) { // Tangential to face + for (int jj = j-1; jj <= j+1; jj++) { // Tangential to face + if (!flag(ii,jj,kk).isCovered()) { - Amatrix(a_ind+18,0) = Real(1.0); - Amatrix(a_ind+18,1) = x_off + bcent(ii,jj,kk,0); - Amatrix(a_ind+18,2) = y_off + bcent(ii,jj,kk,1) - yloc_on_xface; - Amatrix(a_ind+18,3) = z_off + bcent(ii,jj,kk,2) - zloc_on_xface; + int a_ind = (jj-(j-1)) + 3*(kk-(k-1)) + 9*(ii-(i-1)); + + Real x_off = static_cast(ii-i) + Real(0.5); + Real y_off = static_cast(jj-j); + Real z_off = static_cast(kk-k); + + Amatrix(a_ind,0) = Real(1.0); + Amatrix(a_ind,1) = x_off + ccent(ii,jj,kk,0); + Amatrix(a_ind,2) = y_off + ccent(ii,jj,kk,1) - yloc_on_xface; + Amatrix(a_ind,3) = z_off + ccent(ii,jj,kk,2) - zloc_on_xface; + + if (!flag(ii,jj,kk).isRegular()) + { + Amatrix(a_ind+18,0) = Real(1.0); + Amatrix(a_ind+18,1) = x_off + bcent(ii,jj,kk,0); + Amatrix(a_ind+18,2) = y_off + bcent(ii,jj,kk,1) - yloc_on_xface; + Amatrix(a_ind+18,3) = z_off + bcent(ii,jj,kk,2) - zloc_on_xface; + } } } + } + } // Columns 4-9 : [x*x x*y y*y x*z y*z z*z] for (int irow = 0; irow < 36; irow++) { - Amatrix(irow,4) = Amatrix(irow,1) * Amatrix(irow,1); - Amatrix(irow,5) = Amatrix(irow,1) * Amatrix(irow,2); - Amatrix(irow,6) = Amatrix(irow,2) * Amatrix(irow,2); - Amatrix(irow,7) = Amatrix(irow,1) * Amatrix(irow,3); - Amatrix(irow,8) = Amatrix(irow,2) * Amatrix(irow,3); - Amatrix(irow,9) = Amatrix(irow,3) * Amatrix(irow,3); + Amatrix(irow,4) = Amatrix(irow,1) * Amatrix(irow,1); + Amatrix(irow,5) = Amatrix(irow,1) * Amatrix(irow,2); + Amatrix(irow,6) = Amatrix(irow,2) * Amatrix(irow,2); + Amatrix(irow,7) = Amatrix(irow,1) * Amatrix(irow,3); + Amatrix(irow,8) = Amatrix(irow,2) * Amatrix(irow,3); + Amatrix(irow,9) = Amatrix(irow,3) * Amatrix(irow,3); } // Make the RHS = A^T v @@ -374,18 +396,22 @@ Real grad_x_of_phi_on_centroids(int i,int j,int k,int n, { rhs(irow) = 0.; - for (int ii = i-1; ii <= i; ii++) // Normal to face - for (int kk = k-1; kk <= k+1; kk++) // Tangential to face - for (int jj = j-1; jj <= j+1; jj++) // Tangential to face - if (!flag(ii,jj,kk).isCovered()) - { - int a_ind = (jj-(j-1)) + 3*(kk-(k-1)) + 9*(ii-(i-1)); + for (int ii = i-1; ii <= i; ii++) { // Normal to face + for (int kk = k-1; kk <= k+1; kk++) { // Tangential to face + for (int jj = j-1; jj <= j+1; jj++) { // Tangential to face + if (!flag(ii,jj,kk).isCovered()) + { + int a_ind = (jj-(j-1)) + 3*(kk-(k-1)) + 9*(ii-(i-1)); - rhs(irow) += Amatrix(a_ind,irow)* phi(ii,jj,kk,n); + rhs(irow) += Amatrix(a_ind,irow)* phi(ii,jj,kk,n); - if (flag(ii,jj,kk).isSingleValued() && is_eb_inhomog) - rhs(irow) += Amatrix(a_ind+18,irow)*phieb(ii,jj,kk,n); - } + if (flag(ii,jj,kk).isSingleValued() && is_eb_inhomog) { + rhs(irow) += Amatrix(a_ind+18,irow)*phieb(ii,jj,kk,n); + } + } + } + } + } } cholsol_np10(Amatrix, rhs); @@ -409,45 +435,50 @@ Real grad_y_of_phi_on_centroids(int i,int j,int k,int n, // Order of column -- first 9 are cell centroids, second 9 are EB centroids - for (int irow = 0; irow < 36; irow++) - for (int icol = 0; icol < 10; icol++) + for (int irow = 0; irow < 36; irow++) { + for (int icol = 0; icol < 10; icol++) { Amatrix(irow,icol) = Real(0.0); + } + } // Columns 0-2: [e x y] - for (int jj = j-1; jj <= j; jj++) // Normal to face - for (int kk = k-1; kk <= k+1; kk++) // Tangential to face - for (int ii = i-1; ii <= i+1; ii++) // Tangential to face - if (!flag(ii,jj,kk).isCovered()) - { - int a_ind = (ii-(i-1)) + 3*(kk-(k-1)) + 9*(jj-(j-1)); - - Real x_off = static_cast(ii-i); - Real y_off = static_cast(jj-j) + Real(0.5); - Real z_off = static_cast(kk-k); - - Amatrix(a_ind,0) = Real(1.0); - Amatrix(a_ind,1) = x_off + ccent(ii,jj,kk,0) - xloc_on_yface; - Amatrix(a_ind,2) = y_off + ccent(ii,jj,kk,1); - Amatrix(a_ind,3) = z_off + ccent(ii,jj,kk,2) - zloc_on_yface; - - if (!flag(ii,jj,kk).isRegular()) + for (int jj = j-1; jj <= j; jj++) { // Normal to face + for (int kk = k-1; kk <= k+1; kk++) { // Tangential to face + for (int ii = i-1; ii <= i+1; ii++) { // Tangential to face + if (!flag(ii,jj,kk).isCovered()) { - Amatrix(a_ind+18,0) = Real(1.0); - Amatrix(a_ind+18,1) = x_off + bcent(ii,jj,kk,0) - xloc_on_yface; - Amatrix(a_ind+18,2) = y_off + bcent(ii,jj,kk,1); - Amatrix(a_ind+18,3) = z_off + bcent(ii,jj,kk,2) - zloc_on_yface; + int a_ind = (ii-(i-1)) + 3*(kk-(k-1)) + 9*(jj-(j-1)); + + Real x_off = static_cast(ii-i); + Real y_off = static_cast(jj-j) + Real(0.5); + Real z_off = static_cast(kk-k); + + Amatrix(a_ind,0) = Real(1.0); + Amatrix(a_ind,1) = x_off + ccent(ii,jj,kk,0) - xloc_on_yface; + Amatrix(a_ind,2) = y_off + ccent(ii,jj,kk,1); + Amatrix(a_ind,3) = z_off + ccent(ii,jj,kk,2) - zloc_on_yface; + + if (!flag(ii,jj,kk).isRegular()) + { + Amatrix(a_ind+18,0) = Real(1.0); + Amatrix(a_ind+18,1) = x_off + bcent(ii,jj,kk,0) - xloc_on_yface; + Amatrix(a_ind+18,2) = y_off + bcent(ii,jj,kk,1); + Amatrix(a_ind+18,3) = z_off + bcent(ii,jj,kk,2) - zloc_on_yface; + } } } + } + } // Columns 4-9 : [x*x x*y y*y x*z y*z z*z] for (int irow = 0; irow < 36; irow++) { - Amatrix(irow,4) = Amatrix(irow,1) * Amatrix(irow,1); - Amatrix(irow,5) = Amatrix(irow,1) * Amatrix(irow,2); - Amatrix(irow,6) = Amatrix(irow,2) * Amatrix(irow,2); - Amatrix(irow,7) = Amatrix(irow,1) * Amatrix(irow,3); - Amatrix(irow,8) = Amatrix(irow,2) * Amatrix(irow,3); - Amatrix(irow,9) = Amatrix(irow,3) * Amatrix(irow,3); + Amatrix(irow,4) = Amatrix(irow,1) * Amatrix(irow,1); + Amatrix(irow,5) = Amatrix(irow,1) * Amatrix(irow,2); + Amatrix(irow,6) = Amatrix(irow,2) * Amatrix(irow,2); + Amatrix(irow,7) = Amatrix(irow,1) * Amatrix(irow,3); + Amatrix(irow,8) = Amatrix(irow,2) * Amatrix(irow,3); + Amatrix(irow,9) = Amatrix(irow,3) * Amatrix(irow,3); } // Make the RHS = A^T v @@ -455,18 +486,22 @@ Real grad_y_of_phi_on_centroids(int i,int j,int k,int n, { rhs(irow) = 0.; - for (int jj = j-1; jj <= j; jj++) // Normal to face - for (int kk = k-1; kk <= k+1; kk++) // Tangential to face - for (int ii = i-1; ii <= i+1; ii++) // Tangential to face - if (!flag(ii,jj,kk).isCovered()) - { - int a_ind = (ii-(i-1)) + 3*(kk-(k-1)) + 9*(jj-(j-1)); + for (int jj = j-1; jj <= j; jj++) { // Normal to face + for (int kk = k-1; kk <= k+1; kk++) { // Tangential to face + for (int ii = i-1; ii <= i+1; ii++) { // Tangential to face + if (!flag(ii,jj,kk).isCovered()) + { + int a_ind = (ii-(i-1)) + 3*(kk-(k-1)) + 9*(jj-(j-1)); - rhs(irow) += Amatrix(a_ind,irow)* phi(ii,jj,kk,n); + rhs(irow) += Amatrix(a_ind,irow)* phi(ii,jj,kk,n); - if (flag(ii,jj,kk).isSingleValued() && is_eb_inhomog) - rhs(irow) += Amatrix(a_ind+18,irow)*phieb(ii,jj,kk,n); + if (flag(ii,jj,kk).isSingleValued() && is_eb_inhomog) { + rhs(irow) += Amatrix(a_ind+18,irow)*phieb(ii,jj,kk,n); + } + } } + } + } } cholsol_np10(Amatrix, rhs); @@ -490,49 +525,52 @@ Real grad_z_of_phi_on_centroids(int i,int j,int k,int n, // Order of column -- first 9 are cell centroids, second 9 are EB centroids - for (int irow = 0; irow < 36; irow++) - for (int icol = 0; icol < 10; icol++) + for (int irow = 0; irow < 36; irow++) { + for (int icol = 0; icol < 10; icol++) { Amatrix(irow,icol) = Real(0.0); + } + } // Columns 0-3: [e x y z] for (int kk = k-1; kk <= k; kk++) // Normal to face { - for (int jj = j-1; jj <= j+1; jj++) // Tangential to face - for (int ii = i-1; ii <= i+1; ii++) // Tangential to face - { - if (!flag(ii,jj,kk).isCovered()) + for (int jj = j-1; jj <= j+1; jj++) { // Tangential to face + for (int ii = i-1; ii <= i+1; ii++) // Tangential to face { - int a_ind = (ii-(i-1)) + 3*(jj-(j-1)) + 9*(kk-(k-1)); - - Real x_off = static_cast(ii-i); - Real y_off = static_cast(jj-j); - Real z_off = static_cast(kk-k) + Real(0.5); - - Amatrix(a_ind,0) = Real(1.0); - Amatrix(a_ind,1) = x_off + ccent(ii,jj,kk,0) - xloc_on_zface; - Amatrix(a_ind,2) = y_off + ccent(ii,jj,kk,1) - yloc_on_zface; - Amatrix(a_ind,3) = z_off + ccent(ii,jj,kk,2); - - if (!flag(ii,jj,kk).isRegular()) + if (!flag(ii,jj,kk).isCovered()) { - Amatrix(a_ind+18,0) = Real(1.0); - Amatrix(a_ind+18,1) = x_off + bcent(ii,jj,kk,0) - xloc_on_zface; - Amatrix(a_ind+18,2) = y_off + bcent(ii,jj,kk,1) - yloc_on_zface; - Amatrix(a_ind+18,3) = z_off + bcent(ii,jj,kk,2); + int a_ind = (ii-(i-1)) + 3*(jj-(j-1)) + 9*(kk-(k-1)); + + Real x_off = static_cast(ii-i); + Real y_off = static_cast(jj-j); + Real z_off = static_cast(kk-k) + Real(0.5); + + Amatrix(a_ind,0) = Real(1.0); + Amatrix(a_ind,1) = x_off + ccent(ii,jj,kk,0) - xloc_on_zface; + Amatrix(a_ind,2) = y_off + ccent(ii,jj,kk,1) - yloc_on_zface; + Amatrix(a_ind,3) = z_off + ccent(ii,jj,kk,2); + + if (!flag(ii,jj,kk).isRegular()) + { + Amatrix(a_ind+18,0) = Real(1.0); + Amatrix(a_ind+18,1) = x_off + bcent(ii,jj,kk,0) - xloc_on_zface; + Amatrix(a_ind+18,2) = y_off + bcent(ii,jj,kk,1) - yloc_on_zface; + Amatrix(a_ind+18,3) = z_off + bcent(ii,jj,kk,2); + } } } - } + } } // Columns 4-9 : [x*x x*y y*y x*z y*z z*z] for (int irow = 0; irow < 36; irow++) { - Amatrix(irow,4) = Amatrix(irow,1) * Amatrix(irow,1); - Amatrix(irow,5) = Amatrix(irow,1) * Amatrix(irow,2); - Amatrix(irow,6) = Amatrix(irow,2) * Amatrix(irow,2); - Amatrix(irow,7) = Amatrix(irow,1) * Amatrix(irow,3); - Amatrix(irow,8) = Amatrix(irow,2) * Amatrix(irow,3); - Amatrix(irow,9) = Amatrix(irow,3) * Amatrix(irow,3); + Amatrix(irow,4) = Amatrix(irow,1) * Amatrix(irow,1); + Amatrix(irow,5) = Amatrix(irow,1) * Amatrix(irow,2); + Amatrix(irow,6) = Amatrix(irow,2) * Amatrix(irow,2); + Amatrix(irow,7) = Amatrix(irow,1) * Amatrix(irow,3); + Amatrix(irow,8) = Amatrix(irow,2) * Amatrix(irow,3); + Amatrix(irow,9) = Amatrix(irow,3) * Amatrix(irow,3); } // Make the RHS = A^T v @@ -540,18 +578,22 @@ Real grad_z_of_phi_on_centroids(int i,int j,int k,int n, { rhs(irow) = 0.; - for (int kk = k-1; kk <= k; kk++) // Normal to face - for (int jj = j-1; jj <= j+1; jj++) // Tangential to face - for (int ii = i-1; ii <= i+1; ii++) // Tangential to face - if (!flag(ii,jj,kk).isCovered()) - { - int a_ind = (ii-(i-1)) + 3*(jj-(j-1)) + 9*(kk-(k-1)); + for (int kk = k-1; kk <= k; kk++) { // Normal to face + for (int jj = j-1; jj <= j+1; jj++) { // Tangential to face + for (int ii = i-1; ii <= i+1; ii++) { // Tangential to face + if (!flag(ii,jj,kk).isCovered()) + { + int a_ind = (ii-(i-1)) + 3*(jj-(j-1)) + 9*(kk-(k-1)); - rhs(irow) += Amatrix(a_ind,irow)* phi(ii,jj,kk,n); + rhs(irow) += Amatrix(a_ind,irow)* phi(ii,jj,kk,n); - if (flag(ii,jj,kk).isSingleValued() && is_eb_inhomog) - rhs(irow) += Amatrix(a_ind+18,irow)*phieb(ii,jj,kk,n); + if (flag(ii,jj,kk).isSingleValued() && is_eb_inhomog) { + rhs(irow) += Amatrix(a_ind+18,irow)*phieb(ii,jj,kk,n); + } + } } + } + } } cholsol_np10(Amatrix, rhs); @@ -574,47 +616,51 @@ Real grad_eb_of_phi_on_centroids(int i,int j,int k,int n, // Order of column -- first 27 are cell centroids, second 27 are EB centroids - for (int irow = 0; irow < 54; irow++) - for (int icol = 0; icol < 10; icol++) + for (int irow = 0; irow < 54; irow++) { + for (int icol = 0; icol < 10; icol++) { Amatrix(irow,icol) = Real(0.0); + } + } // Columns 0-3: [e x y z] - for (int kk = k-1; kk <= k+1; kk++) - for (int jj = j-1; jj <= j+1; jj++) - for (int ii = i-1; ii <= i+1; ii++) - { - if (!flag(ii,jj,kk).isCovered()) + for (int kk = k-1; kk <= k+1; kk++) { + for (int jj = j-1; jj <= j+1; jj++) { + for (int ii = i-1; ii <= i+1; ii++) { - int a_ind = (ii-(i-1)) + 3*(jj-(j-1)) + 9*(kk-(k-1)); - - Real x_off = static_cast(ii-i) - bcent(i,j,k,0); - Real y_off = static_cast(jj-j) - bcent(i,j,k,1); - Real z_off = static_cast(kk-k) - bcent(i,j,k,2); - - Amatrix(a_ind,0) = Real(1.0); - Amatrix(a_ind,1) = x_off + ccent(ii,jj,kk,0); - Amatrix(a_ind,2) = y_off + ccent(ii,jj,kk,1); - Amatrix(a_ind,3) = z_off + ccent(ii,jj,kk,2); - - if (!flag(ii,jj,kk).isRegular()) + if (!flag(ii,jj,kk).isCovered()) { - Amatrix(a_ind+27,0) = Real(1.0); - Amatrix(a_ind+27,1) = x_off + bcent(ii,jj,kk,0); - Amatrix(a_ind+27,2) = y_off + bcent(ii,jj,kk,1); - Amatrix(a_ind+27,3) = z_off + bcent(ii,jj,kk,2); + int a_ind = (ii-(i-1)) + 3*(jj-(j-1)) + 9*(kk-(k-1)); + + Real x_off = static_cast(ii-i) - bcent(i,j,k,0); + Real y_off = static_cast(jj-j) - bcent(i,j,k,1); + Real z_off = static_cast(kk-k) - bcent(i,j,k,2); + + Amatrix(a_ind,0) = Real(1.0); + Amatrix(a_ind,1) = x_off + ccent(ii,jj,kk,0); + Amatrix(a_ind,2) = y_off + ccent(ii,jj,kk,1); + Amatrix(a_ind,3) = z_off + ccent(ii,jj,kk,2); + + if (!flag(ii,jj,kk).isRegular()) + { + Amatrix(a_ind+27,0) = Real(1.0); + Amatrix(a_ind+27,1) = x_off + bcent(ii,jj,kk,0); + Amatrix(a_ind+27,2) = y_off + bcent(ii,jj,kk,1); + Amatrix(a_ind+27,3) = z_off + bcent(ii,jj,kk,2); + } } } - } + } + } // Columns 4-9 : [x*x x*y y*y x*z y*z z*z] for (int irow = 0; irow < 54; irow++) { - Amatrix(irow,4) = Amatrix(irow,1) * Amatrix(irow,1); - Amatrix(irow,5) = Amatrix(irow,1) * Amatrix(irow,2); - Amatrix(irow,6) = Amatrix(irow,2) * Amatrix(irow,2); - Amatrix(irow,7) = Amatrix(irow,1) * Amatrix(irow,3); - Amatrix(irow,8) = Amatrix(irow,2) * Amatrix(irow,3); - Amatrix(irow,9) = Amatrix(irow,3) * Amatrix(irow,3); + Amatrix(irow,4) = Amatrix(irow,1) * Amatrix(irow,1); + Amatrix(irow,5) = Amatrix(irow,1) * Amatrix(irow,2); + Amatrix(irow,6) = Amatrix(irow,2) * Amatrix(irow,2); + Amatrix(irow,7) = Amatrix(irow,1) * Amatrix(irow,3); + Amatrix(irow,8) = Amatrix(irow,2) * Amatrix(irow,3); + Amatrix(irow,9) = Amatrix(irow,3) * Amatrix(irow,3); } // Make the RHS = A^T v @@ -622,18 +668,22 @@ Real grad_eb_of_phi_on_centroids(int i,int j,int k,int n, { rhs(irow) = 0.; - for (int kk = k-1; kk <= k+1; kk++) - for (int jj = j-1; jj <= j+1; jj++) - for (int ii = i-1; ii <= i+1; ii++) - if (!flag(ii,jj,kk).isCovered()) - { - int a_ind = (ii-(i-1)) + 3*(jj-(j-1)) + 9*(kk-(k-1)); + for (int kk = k-1; kk <= k+1; kk++) { + for (int jj = j-1; jj <= j+1; jj++) { + for (int ii = i-1; ii <= i+1; ii++) { + if (!flag(ii,jj,kk).isCovered()) + { + int a_ind = (ii-(i-1)) + 3*(jj-(j-1)) + 9*(kk-(k-1)); - rhs(irow) += Amatrix(a_ind,irow)* phi(ii,jj,kk,n); + rhs(irow) += Amatrix(a_ind,irow)* phi(ii,jj,kk,n); - if (flag(ii,jj,kk).isSingleValued() && is_eb_inhomog) - rhs(irow) += Amatrix(a_ind+27,irow)*phieb(ii,jj,kk,n); + if (flag(ii,jj,kk).isSingleValued() && is_eb_inhomog) { + rhs(irow) += Amatrix(a_ind+27,irow)*phieb(ii,jj,kk,n); + } + } } + } + } } cholsol_for_eb(Amatrix, rhs); @@ -663,97 +713,108 @@ Real grad_x_of_phi_on_centroids_extdir(int i,int j,int k,int n, // Order of column -- first 9 are cell centroids, second 9 are EB centroids - for (int irow = 0; irow < 54; irow++) - for (int icol = 0; icol < 10; icol++) + for (int irow = 0; irow < 54; irow++) { + for (int icol = 0; icol < 10; icol++) { Amatrix(irow,icol) = Real(0.0); + } + } const int im = (i > domhi_x) ? 2 : 1; const int ip = 2 - im; // Columns 0-3: [e x y z] - for (int ii = i-im; ii <= i+ip; ii++) // Normal to face - for (int kk = k-1; kk <= k+1; kk++) // Tangential to face - for (int jj = j-1; jj <= j+1; jj++) // Tangential to face - { - // Don't include corner cells. Could make this even more strict - // by removing the on_?_face restrictions. - if (((on_x_face && ii < domlo_x) && (on_y_face && jj < domlo_y)) || - ((on_x_face && ii < domlo_x) && (on_y_face && jj > domhi_y)) || - ((on_x_face && ii > domhi_x) && (on_y_face && jj < domlo_y)) || - ((on_x_face && ii > domhi_x) && (on_y_face && jj > domhi_y)) || - ((on_y_face && jj < domlo_y) && (on_z_face && kk < domlo_z)) || - ((on_y_face && jj < domlo_y) && (on_z_face && kk > domhi_z)) || - ((on_y_face && jj > domhi_y) && (on_z_face && kk < domlo_z)) || - ((on_y_face && jj > domhi_y) && (on_z_face && kk > domhi_z)) || - ((on_x_face && ii < domlo_x) && (on_z_face && kk < domlo_z)) || - ((on_x_face && ii < domlo_x) && (on_z_face && kk > domhi_z)) || - ((on_x_face && ii > domhi_x) && (on_z_face && kk < domlo_z)) || - ((on_x_face && ii > domhi_x) && (on_z_face && kk > domhi_z))) { - continue; - } - - if ( !phi.contains(ii,jj,kk) ) - continue; - - if (!flag(ii,jj,kk).isCovered()) + for (int ii = i-im; ii <= i+ip; ii++) { // Normal to face + for (int kk = k-1; kk <= k+1; kk++) { // Tangential to face + for (int jj = j-1; jj <= j+1; jj++) // Tangential to face { - - int a_ind = (jj-(j-1)) + 3*(kk-(k-1)) + 9*(ii-(i-im)); - - Real x_off = static_cast(ii-i) + Real(0.5); - Real y_off = static_cast(jj-j); - Real z_off = static_cast(kk-k); - - if(on_x_face){ - if (ii < domlo_x && (vfrac(ii+1,jj,kk) != Real(1.0) || vfrac(ii+2,jj,kk) != Real(1.0)) ) - continue; - if (ii > domhi_x && (vfrac(ii-1,jj,kk) != Real(1.0) || vfrac(ii-2,jj,kk) != Real(1.0))) - continue; + // Don't include corner cells. Could make this even more strict + // by removing the on_?_face restrictions. + if (((on_x_face && ii < domlo_x) && (on_y_face && jj < domlo_y)) || + ((on_x_face && ii < domlo_x) && (on_y_face && jj > domhi_y)) || + ((on_x_face && ii > domhi_x) && (on_y_face && jj < domlo_y)) || + ((on_x_face && ii > domhi_x) && (on_y_face && jj > domhi_y)) || + ((on_y_face && jj < domlo_y) && (on_z_face && kk < domlo_z)) || + ((on_y_face && jj < domlo_y) && (on_z_face && kk > domhi_z)) || + ((on_y_face && jj > domhi_y) && (on_z_face && kk < domlo_z)) || + ((on_y_face && jj > domhi_y) && (on_z_face && kk > domhi_z)) || + ((on_x_face && ii < domlo_x) && (on_z_face && kk < domlo_z)) || + ((on_x_face && ii < domlo_x) && (on_z_face && kk > domhi_z)) || + ((on_x_face && ii > domhi_x) && (on_z_face && kk < domlo_z)) || + ((on_x_face && ii > domhi_x) && (on_z_face && kk > domhi_z))) { + continue; } - if(on_y_face){ - if (jj < domlo_y && (vfrac(ii,jj+1,kk) != Real(1.0) || vfrac(ii,jj+2,kk) != Real(1.0)) ) - continue; - if (jj > domhi_y && (vfrac(ii,jj-1,kk) != Real(1.0) || vfrac(ii,jj-2,kk) != Real(1.0)) ) - continue; + if ( !phi.contains(ii,jj,kk) ) { + continue; } - if(on_z_face){ - if (kk < domlo_z && (vfrac(ii,jj,kk+1) != Real(1.0) || vfrac(ii,jj,kk+2) != Real(1.0)) ) - continue; - if (kk > domhi_z && (vfrac(ii,jj,kk-1) != Real(1.0) || vfrac(ii,jj,kk-2) != Real(1.0)) ) - continue; - } + if (!flag(ii,jj,kk).isCovered()) + { - Amatrix(a_ind,0) = Real(1.0); - Amatrix(a_ind,1) = x_off + ccent(ii,jj,kk,0); - Amatrix(a_ind,2) = y_off + ccent(ii,jj,kk,1) - yloc_on_xface; - Amatrix(a_ind,3) = z_off + ccent(ii,jj,kk,2) - zloc_on_xface; + int a_ind = (jj-(j-1)) + 3*(kk-(k-1)) + 9*(ii-(i-im)); - // Add in information about the location of the EB. Exclude - // EBs that are outside the domain. - if (flag(ii,jj,kk).isSingleValued() && - domlo_x <= ii && ii <= domhi_x && - domlo_y <= jj && jj <= domhi_y && - domlo_z <= kk && kk <= domhi_z) - { - Amatrix(a_ind+27,0) = Real(1.0); - Amatrix(a_ind+27,1) = x_off + bcent(ii,jj,kk,0); - Amatrix(a_ind+27,2) = y_off + bcent(ii,jj,kk,1) - yloc_on_xface; - Amatrix(a_ind+27,3) = z_off + bcent(ii,jj,kk,2) - zloc_on_xface; + Real x_off = static_cast(ii-i) + Real(0.5); + Real y_off = static_cast(jj-j); + Real z_off = static_cast(kk-k); + + if(on_x_face){ + if (ii < domlo_x && (vfrac(ii+1,jj,kk) != Real(1.0) || vfrac(ii+2,jj,kk) != Real(1.0)) ) { + continue; + } + if (ii > domhi_x && (vfrac(ii-1,jj,kk) != Real(1.0) || vfrac(ii-2,jj,kk) != Real(1.0))) { + continue; + } + } + + if(on_y_face){ + if (jj < domlo_y && (vfrac(ii,jj+1,kk) != Real(1.0) || vfrac(ii,jj+2,kk) != Real(1.0)) ) { + continue; + } + if (jj > domhi_y && (vfrac(ii,jj-1,kk) != Real(1.0) || vfrac(ii,jj-2,kk) != Real(1.0)) ) { + continue; + } + } + + if(on_z_face){ + if (kk < domlo_z && (vfrac(ii,jj,kk+1) != Real(1.0) || vfrac(ii,jj,kk+2) != Real(1.0)) ) { + continue; + } + if (kk > domhi_z && (vfrac(ii,jj,kk-1) != Real(1.0) || vfrac(ii,jj,kk-2) != Real(1.0)) ) { + continue; + } + } + + Amatrix(a_ind,0) = Real(1.0); + Amatrix(a_ind,1) = x_off + ccent(ii,jj,kk,0); + Amatrix(a_ind,2) = y_off + ccent(ii,jj,kk,1) - yloc_on_xface; + Amatrix(a_ind,3) = z_off + ccent(ii,jj,kk,2) - zloc_on_xface; + + // Add in information about the location of the EB. Exclude + // EBs that are outside the domain. + if (flag(ii,jj,kk).isSingleValued() && + domlo_x <= ii && ii <= domhi_x && + domlo_y <= jj && jj <= domhi_y && + domlo_z <= kk && kk <= domhi_z) + { + Amatrix(a_ind+27,0) = Real(1.0); + Amatrix(a_ind+27,1) = x_off + bcent(ii,jj,kk,0); + Amatrix(a_ind+27,2) = y_off + bcent(ii,jj,kk,1) - yloc_on_xface; + Amatrix(a_ind+27,3) = z_off + bcent(ii,jj,kk,2) - zloc_on_xface; + } } } - } + } + } // Columns 4-9 : [x*x x*y y*y x*z y*z z*z] for (int irow = 0; irow < 54; irow++) { - Amatrix(irow,4) = Amatrix(irow,1) * Amatrix(irow,1); - Amatrix(irow,5) = Amatrix(irow,1) * Amatrix(irow,2); - Amatrix(irow,6) = Amatrix(irow,2) * Amatrix(irow,2); - Amatrix(irow,7) = Amatrix(irow,1) * Amatrix(irow,3); - Amatrix(irow,8) = Amatrix(irow,2) * Amatrix(irow,3); - Amatrix(irow,9) = Amatrix(irow,3) * Amatrix(irow,3); + Amatrix(irow,4) = Amatrix(irow,1) * Amatrix(irow,1); + Amatrix(irow,5) = Amatrix(irow,1) * Amatrix(irow,2); + Amatrix(irow,6) = Amatrix(irow,2) * Amatrix(irow,2); + Amatrix(irow,7) = Amatrix(irow,1) * Amatrix(irow,3); + Amatrix(irow,8) = Amatrix(irow,2) * Amatrix(irow,3); + Amatrix(irow,9) = Amatrix(irow,3) * Amatrix(irow,3); } // Make the RHS = A^T v @@ -761,23 +822,27 @@ Real grad_x_of_phi_on_centroids_extdir(int i,int j,int k,int n, { rhs(irow) = 0.; - for (int ii = i-im; ii <= i+ip; ii++) // Normal to face - for (int kk = k-1; kk <= k+1; kk++) // Tangential to face - for (int jj = j-1; jj <= j+1; jj++) { // Tangential to face - if ( !phi.contains(ii,jj,kk) ) - continue; + for (int ii = i-im; ii <= i+ip; ii++) { // Normal to face + for (int kk = k-1; kk <= k+1; kk++) { // Tangential to face + for (int jj = j-1; jj <= j+1; jj++) { // Tangential to face + if ( !phi.contains(ii,jj,kk) ) { + continue; + } - if (!flag(ii,jj,kk).isCovered()) - { - int a_ind = (jj-(j-1)) + 3*(kk-(k-1)) + 9*(ii-(i-im)); - Real phi_val = Amatrix(a_ind,0)*phi(ii,jj,kk,n); + if (!flag(ii,jj,kk).isCovered()) + { + int a_ind = (jj-(j-1)) + 3*(kk-(k-1)) + 9*(ii-(i-im)); + Real phi_val = Amatrix(a_ind,0)*phi(ii,jj,kk,n); - rhs(irow) += Amatrix(a_ind,irow)* phi_val; + rhs(irow) += Amatrix(a_ind,irow)* phi_val; - if (flag(ii,jj,kk).isSingleValued() && is_eb_inhomog && Amatrix(a_ind+27,irow) != Real(0.0)) - rhs(irow) += Amatrix(a_ind+27,irow)*phieb(ii,jj,kk,n); - } + if (flag(ii,jj,kk).isSingleValued() && is_eb_inhomog && Amatrix(a_ind+27,irow) != Real(0.0)) { + rhs(irow) += Amatrix(a_ind+27,irow)*phieb(ii,jj,kk,n); + } + } + } } + } } cholsol_for_eb(Amatrix, rhs); @@ -805,96 +870,107 @@ Real grad_y_of_phi_on_centroids_extdir(int i,int j,int k,int n, // Order of column -- first 9 are cell centroids, second 9 are EB centroids - for (int irow = 0; irow < 54; irow++) - for (int icol = 0; icol < 10; icol++) + for (int irow = 0; irow < 54; irow++) { + for (int icol = 0; icol < 10; icol++) { Amatrix(irow,icol) = Real(0.0); + } + } const int jm = (j > domhi_y) ? 2 : 1; const int jp = 2 - jm; // Columns 0-2: [e x y] - for (int jj = j-jm; jj <= j+jp; jj++) // Normal to face - for (int kk = k-1; kk <= k+1; kk++) // Tangential to face - for (int ii = i-1; ii <= i+1; ii++) // Tangential to face - { - // Don't include corner cells. Could make this even more strict - // by removing the on_?_face restrictions. - if (((on_x_face && ii < domlo_x) && (on_y_face && jj < domlo_y)) || - ((on_x_face && ii < domlo_x) && (on_y_face && jj > domhi_y)) || - ((on_x_face && ii > domhi_x) && (on_y_face && jj < domlo_y)) || - ((on_x_face && ii > domhi_x) && (on_y_face && jj > domhi_y)) || - ((on_y_face && jj < domlo_y) && (on_z_face && kk < domlo_z)) || - ((on_y_face && jj < domlo_y) && (on_z_face && kk > domhi_z)) || - ((on_y_face && jj > domhi_y) && (on_z_face && kk < domlo_z)) || - ((on_y_face && jj > domhi_y) && (on_z_face && kk > domhi_z)) || - ((on_x_face && ii < domlo_x) && (on_z_face && kk < domlo_z)) || - ((on_x_face && ii < domlo_x) && (on_z_face && kk > domhi_z)) || - ((on_x_face && ii > domhi_x) && (on_z_face && kk < domlo_z)) || - ((on_x_face && ii > domhi_x) && (on_z_face && kk > domhi_z))) { - continue; - } - - if ( !phi.contains(ii,jj,kk) ) - continue; - - if (!flag(ii,jj,kk).isCovered()) + for (int jj = j-jm; jj <= j+jp; jj++) { // Normal to face + for (int kk = k-1; kk <= k+1; kk++) { // Tangential to face + for (int ii = i-1; ii <= i+1; ii++) // Tangential to face { - int a_ind = (ii-(i-1)) + 3*(kk-(k-1)) + 9*(jj-(j-jm)); - - Real x_off = static_cast(ii-i); - Real y_off = static_cast(jj-j) + Real(0.5); - Real z_off = static_cast(kk-k); - - if(on_x_face){ - if (ii < domlo_x && (vfrac(ii+1,jj,kk) != Real(1.0) || vfrac(ii+2,jj,kk) != Real(1.0)) ) - continue; - if (ii > domhi_x && (vfrac(ii-1,jj,kk) != Real(1.0) || vfrac(ii-2,jj,kk) != Real(1.0))) - continue; + // Don't include corner cells. Could make this even more strict + // by removing the on_?_face restrictions. + if (((on_x_face && ii < domlo_x) && (on_y_face && jj < domlo_y)) || + ((on_x_face && ii < domlo_x) && (on_y_face && jj > domhi_y)) || + ((on_x_face && ii > domhi_x) && (on_y_face && jj < domlo_y)) || + ((on_x_face && ii > domhi_x) && (on_y_face && jj > domhi_y)) || + ((on_y_face && jj < domlo_y) && (on_z_face && kk < domlo_z)) || + ((on_y_face && jj < domlo_y) && (on_z_face && kk > domhi_z)) || + ((on_y_face && jj > domhi_y) && (on_z_face && kk < domlo_z)) || + ((on_y_face && jj > domhi_y) && (on_z_face && kk > domhi_z)) || + ((on_x_face && ii < domlo_x) && (on_z_face && kk < domlo_z)) || + ((on_x_face && ii < domlo_x) && (on_z_face && kk > domhi_z)) || + ((on_x_face && ii > domhi_x) && (on_z_face && kk < domlo_z)) || + ((on_x_face && ii > domhi_x) && (on_z_face && kk > domhi_z))) { + continue; } - if(on_y_face){ - if (jj < domlo_y && (vfrac(ii,jj+1,kk) != Real(1.0) || vfrac(ii,jj+2,kk) != Real(1.0)) ) - continue; - if (jj > domhi_y && (vfrac(ii,jj-1,kk) != Real(1.0) || vfrac(ii,jj-2,kk) != Real(1.0)) ) - continue; + if ( !phi.contains(ii,jj,kk) ) { + continue; } - if(on_z_face){ - if (kk < domlo_z && (vfrac(ii,jj,kk+1) != Real(1.0) || vfrac(ii,jj,kk+2) != Real(1.0)) ) - continue; - if (kk > domhi_z && (vfrac(ii,jj,kk-1) != Real(1.0) || vfrac(ii,jj,kk-2) != Real(1.0)) ) - continue; - } - - Amatrix(a_ind,0) = Real(1.0); - Amatrix(a_ind,1) = x_off + ccent(ii,jj,kk,0) - xloc_on_yface; - Amatrix(a_ind,2) = y_off + ccent(ii,jj,kk,1); - Amatrix(a_ind,3) = z_off + ccent(ii,jj,kk,2) - zloc_on_yface; - - // Add in information about the location of the EB. Exclude - // EBs that are outside the domain. - if (flag(ii,jj,kk).isSingleValued() && - domlo_x <= ii && ii <= domhi_x && - domlo_y <= jj && jj <= domhi_y && - domlo_z <= kk && kk <= domhi_z) + if (!flag(ii,jj,kk).isCovered()) { - Amatrix(a_ind+27,0) = Real(1.0); - Amatrix(a_ind+27,1) = x_off + bcent(ii,jj,kk,0) - xloc_on_yface; - Amatrix(a_ind+27,2) = y_off + bcent(ii,jj,kk,1); - Amatrix(a_ind+27,3) = z_off + bcent(ii,jj,kk,2) - zloc_on_yface; + int a_ind = (ii-(i-1)) + 3*(kk-(k-1)) + 9*(jj-(j-jm)); + + Real x_off = static_cast(ii-i); + Real y_off = static_cast(jj-j) + Real(0.5); + Real z_off = static_cast(kk-k); + + if(on_x_face){ + if (ii < domlo_x && (vfrac(ii+1,jj,kk) != Real(1.0) || vfrac(ii+2,jj,kk) != Real(1.0)) ) { + continue; + } + if (ii > domhi_x && (vfrac(ii-1,jj,kk) != Real(1.0) || vfrac(ii-2,jj,kk) != Real(1.0))) { + continue; + } + } + + if(on_y_face){ + if (jj < domlo_y && (vfrac(ii,jj+1,kk) != Real(1.0) || vfrac(ii,jj+2,kk) != Real(1.0)) ) { + continue; + } + if (jj > domhi_y && (vfrac(ii,jj-1,kk) != Real(1.0) || vfrac(ii,jj-2,kk) != Real(1.0)) ) { + continue; + } + } + + if(on_z_face){ + if (kk < domlo_z && (vfrac(ii,jj,kk+1) != Real(1.0) || vfrac(ii,jj,kk+2) != Real(1.0)) ) { + continue; + } + if (kk > domhi_z && (vfrac(ii,jj,kk-1) != Real(1.0) || vfrac(ii,jj,kk-2) != Real(1.0)) ) { + continue; + } + } + + Amatrix(a_ind,0) = Real(1.0); + Amatrix(a_ind,1) = x_off + ccent(ii,jj,kk,0) - xloc_on_yface; + Amatrix(a_ind,2) = y_off + ccent(ii,jj,kk,1); + Amatrix(a_ind,3) = z_off + ccent(ii,jj,kk,2) - zloc_on_yface; + + // Add in information about the location of the EB. Exclude + // EBs that are outside the domain. + if (flag(ii,jj,kk).isSingleValued() && + domlo_x <= ii && ii <= domhi_x && + domlo_y <= jj && jj <= domhi_y && + domlo_z <= kk && kk <= domhi_z) + { + Amatrix(a_ind+27,0) = Real(1.0); + Amatrix(a_ind+27,1) = x_off + bcent(ii,jj,kk,0) - xloc_on_yface; + Amatrix(a_ind+27,2) = y_off + bcent(ii,jj,kk,1); + Amatrix(a_ind+27,3) = z_off + bcent(ii,jj,kk,2) - zloc_on_yface; + } } } } + } // Columns 4-9 : [x*x x*y y*y x*z y*z z*z] for (int irow = 0; irow < 54; irow++) { - Amatrix(irow,4) = Amatrix(irow,1) * Amatrix(irow,1); - Amatrix(irow,5) = Amatrix(irow,1) * Amatrix(irow,2); - Amatrix(irow,6) = Amatrix(irow,2) * Amatrix(irow,2); - Amatrix(irow,7) = Amatrix(irow,1) * Amatrix(irow,3); - Amatrix(irow,8) = Amatrix(irow,2) * Amatrix(irow,3); - Amatrix(irow,9) = Amatrix(irow,3) * Amatrix(irow,3); + Amatrix(irow,4) = Amatrix(irow,1) * Amatrix(irow,1); + Amatrix(irow,5) = Amatrix(irow,1) * Amatrix(irow,2); + Amatrix(irow,6) = Amatrix(irow,2) * Amatrix(irow,2); + Amatrix(irow,7) = Amatrix(irow,1) * Amatrix(irow,3); + Amatrix(irow,8) = Amatrix(irow,2) * Amatrix(irow,3); + Amatrix(irow,9) = Amatrix(irow,3) * Amatrix(irow,3); } // Make the RHS = A^T v @@ -902,23 +978,27 @@ Real grad_y_of_phi_on_centroids_extdir(int i,int j,int k,int n, { rhs(irow) = 0.; - for (int jj = j-jm; jj <= j+jp; jj++) // Normal to face - for (int kk = k-1; kk <= k+1; kk++) // Tangential to face - for (int ii = i-1; ii <= i+1; ii++) {// Tangential to face - if ( !phi.contains(ii,jj,kk) ) - continue; + for (int jj = j-jm; jj <= j+jp; jj++) { // Normal to face + for (int kk = k-1; kk <= k+1; kk++) { // Tangential to face + for (int ii = i-1; ii <= i+1; ii++) {// Tangential to face + if ( !phi.contains(ii,jj,kk) ) { + continue; + } - if (!flag(ii,jj,kk).isCovered()) - { - int a_ind = (ii-(i-1)) + 3*(kk-(k-1)) + 9*(jj-(j-jm)); - Real phi_val = Amatrix(a_ind,0) * phi(ii,jj,kk,n); + if (!flag(ii,jj,kk).isCovered()) + { + int a_ind = (ii-(i-1)) + 3*(kk-(k-1)) + 9*(jj-(j-jm)); + Real phi_val = Amatrix(a_ind,0) * phi(ii,jj,kk,n); - rhs(irow) += Amatrix(a_ind,irow)* phi_val; + rhs(irow) += Amatrix(a_ind,irow)* phi_val; - if (flag(ii,jj,kk).isSingleValued() && is_eb_inhomog && Amatrix(a_ind+27,irow) != Real(0.0)) - rhs(irow) += Amatrix(a_ind+27,irow)*phieb(ii,jj,kk,n); + if (flag(ii,jj,kk).isSingleValued() && is_eb_inhomog && Amatrix(a_ind+27,irow) != Real(0.0)) { + rhs(irow) += Amatrix(a_ind+27,irow)*phieb(ii,jj,kk,n); + } + } } - } + } + } } cholsol_for_eb(Amatrix, rhs); @@ -946,9 +1026,11 @@ Real grad_z_of_phi_on_centroids_extdir(int i,int j,int k,int n, // Order of column -- first 9 are cell centroids, second 9 are EB centroids - for (int irow = 0; irow < 54; irow++) - for (int icol = 0; icol < 10; icol++) + for (int irow = 0; irow < 54; irow++) { + for (int icol = 0; icol < 10; icol++) { Amatrix(irow,icol) = Real(0.0); + } + } const int km = (k > domhi_z) ? 2 : 1; const int kp = 2 - km; @@ -956,88 +1038,96 @@ Real grad_z_of_phi_on_centroids_extdir(int i,int j,int k,int n, // Columns 0-3: [e x y z] for (int kk = k-km; kk <= k+kp; kk++) // Normal to face { - for (int jj = j-1; jj <= j+1; jj++) // Tangential to face - for (int ii = i-1; ii <= i+1; ii++) // Tangential to face - { - // Don't include corner cells. Could make this even more strict - // by removing the on_?_face restrictions. - if (((on_x_face && ii < domlo_x) && (on_y_face && jj < domlo_y)) || - ((on_x_face && ii < domlo_x) && (on_y_face && jj > domhi_y)) || - ((on_x_face && ii > domhi_x) && (on_y_face && jj < domlo_y)) || - ((on_x_face && ii > domhi_x) && (on_y_face && jj > domhi_y)) || - ((on_y_face && jj < domlo_y) && (on_z_face && kk < domlo_z)) || - ((on_y_face && jj < domlo_y) && (on_z_face && kk > domhi_z)) || - ((on_y_face && jj > domhi_y) && (on_z_face && kk < domlo_z)) || - ((on_y_face && jj > domhi_y) && (on_z_face && kk > domhi_z)) || - ((on_x_face && ii < domlo_x) && (on_z_face && kk < domlo_z)) || - ((on_x_face && ii < domlo_x) && (on_z_face && kk > domhi_z)) || - ((on_x_face && ii > domhi_x) && (on_z_face && kk < domlo_z)) || - ((on_x_face && ii > domhi_x) && (on_z_face && kk > domhi_z))) { - continue; - } - - if (!phi.contains(ii,jj,kk)) - continue; - - if (!flag(ii,jj,kk).isCovered()) + for (int jj = j-1; jj <= j+1; jj++) { // Tangential to face + for (int ii = i-1; ii <= i+1; ii++) // Tangential to face { - int a_ind = (ii-(i-1)) + 3*(jj-(j-1)) + 9*(kk-(k-km)); - - Real x_off = static_cast(ii-i); - Real y_off = static_cast(jj-j); - Real z_off = static_cast(kk-k) + Real(0.5); - - if(on_x_face){ - if (ii < domlo_x && (vfrac(ii+1,jj,kk) != Real(1.0) || vfrac(ii+2,jj,kk) != Real(1.0)) ) - continue; - if (ii > domhi_x && (vfrac(ii-1,jj,kk) != Real(1.0) || vfrac(ii-2,jj,kk) != Real(1.0))) - continue; - } - - if(on_y_face){ - if (jj < domlo_y && (vfrac(ii,jj+1,kk) != Real(1.0) || vfrac(ii,jj+2,kk) != Real(1.0)) ) - continue; - if (jj > domhi_y && (vfrac(ii,jj-1,kk) != Real(1.0) || vfrac(ii,jj-2,kk) != Real(1.0)) ) - continue; + // Don't include corner cells. Could make this even more strict + // by removing the on_?_face restrictions. + if (((on_x_face && ii < domlo_x) && (on_y_face && jj < domlo_y)) || + ((on_x_face && ii < domlo_x) && (on_y_face && jj > domhi_y)) || + ((on_x_face && ii > domhi_x) && (on_y_face && jj < domlo_y)) || + ((on_x_face && ii > domhi_x) && (on_y_face && jj > domhi_y)) || + ((on_y_face && jj < domlo_y) && (on_z_face && kk < domlo_z)) || + ((on_y_face && jj < domlo_y) && (on_z_face && kk > domhi_z)) || + ((on_y_face && jj > domhi_y) && (on_z_face && kk < domlo_z)) || + ((on_y_face && jj > domhi_y) && (on_z_face && kk > domhi_z)) || + ((on_x_face && ii < domlo_x) && (on_z_face && kk < domlo_z)) || + ((on_x_face && ii < domlo_x) && (on_z_face && kk > domhi_z)) || + ((on_x_face && ii > domhi_x) && (on_z_face && kk < domlo_z)) || + ((on_x_face && ii > domhi_x) && (on_z_face && kk > domhi_z))) { + continue; } - if(on_z_face){ - if (kk < domlo_z && (vfrac(ii,jj,kk+1) != Real(1.0) || vfrac(ii,jj,kk+2) != Real(1.0)) ) - continue; - if (kk > domhi_z && (vfrac(ii,jj,kk-1) != Real(1.0) || vfrac(ii,jj,kk-2) != Real(1.0)) ) - continue; + if (!phi.contains(ii,jj,kk)) { + continue; } - Amatrix(a_ind,0) = Real(1.0); - Amatrix(a_ind,1) = x_off + ccent(ii,jj,kk,0) - xloc_on_zface; - Amatrix(a_ind,2) = y_off + ccent(ii,jj,kk,1) - yloc_on_zface ; - Amatrix(a_ind,3) = z_off + ccent(ii,jj,kk,2); - - // Add in information about the location of the EB. Exclude - // EBs that are outside the domain. - if (flag(ii,jj,kk).isSingleValued() && - domlo_x <= ii && ii <= domhi_x && - domlo_y <= jj && jj <= domhi_y && - domlo_z <= kk && kk <= domhi_z) + if (!flag(ii,jj,kk).isCovered()) { - Amatrix(a_ind+27,0) = Real(1.0); - Amatrix(a_ind+27,1) = x_off + bcent(ii,jj,kk,0) - xloc_on_zface; - Amatrix(a_ind+27,2) = y_off + bcent(ii,jj,kk,1) - yloc_on_zface; - Amatrix(a_ind+27,3) = z_off + bcent(ii,jj,kk,2); + int a_ind = (ii-(i-1)) + 3*(jj-(j-1)) + 9*(kk-(k-km)); + + Real x_off = static_cast(ii-i); + Real y_off = static_cast(jj-j); + Real z_off = static_cast(kk-k) + Real(0.5); + + if(on_x_face){ + if (ii < domlo_x && (vfrac(ii+1,jj,kk) != Real(1.0) || vfrac(ii+2,jj,kk) != Real(1.0)) ) { + continue; + } + if (ii > domhi_x && (vfrac(ii-1,jj,kk) != Real(1.0) || vfrac(ii-2,jj,kk) != Real(1.0))) { + continue; + } + } + + if(on_y_face){ + if (jj < domlo_y && (vfrac(ii,jj+1,kk) != Real(1.0) || vfrac(ii,jj+2,kk) != Real(1.0)) ) { + continue; + } + if (jj > domhi_y && (vfrac(ii,jj-1,kk) != Real(1.0) || vfrac(ii,jj-2,kk) != Real(1.0)) ) { + continue; + } + } + + if(on_z_face){ + if (kk < domlo_z && (vfrac(ii,jj,kk+1) != Real(1.0) || vfrac(ii,jj,kk+2) != Real(1.0)) ) { + continue; + } + if (kk > domhi_z && (vfrac(ii,jj,kk-1) != Real(1.0) || vfrac(ii,jj,kk-2) != Real(1.0)) ) { + continue; + } + } + + Amatrix(a_ind,0) = Real(1.0); + Amatrix(a_ind,1) = x_off + ccent(ii,jj,kk,0) - xloc_on_zface; + Amatrix(a_ind,2) = y_off + ccent(ii,jj,kk,1) - yloc_on_zface ; + Amatrix(a_ind,3) = z_off + ccent(ii,jj,kk,2); + + // Add in information about the location of the EB. Exclude + // EBs that are outside the domain. + if (flag(ii,jj,kk).isSingleValued() && + domlo_x <= ii && ii <= domhi_x && + domlo_y <= jj && jj <= domhi_y && + domlo_z <= kk && kk <= domhi_z) + { + Amatrix(a_ind+27,0) = Real(1.0); + Amatrix(a_ind+27,1) = x_off + bcent(ii,jj,kk,0) - xloc_on_zface; + Amatrix(a_ind+27,2) = y_off + bcent(ii,jj,kk,1) - yloc_on_zface; + Amatrix(a_ind+27,3) = z_off + bcent(ii,jj,kk,2); + } } } - } + } } // Columns 4-9 : [x*x x*y y*y x*z y*z z*z] for (int irow = 0; irow < 54; irow++) { - Amatrix(irow,4) = Amatrix(irow,1) * Amatrix(irow,1); - Amatrix(irow,5) = Amatrix(irow,1) * Amatrix(irow,2); - Amatrix(irow,6) = Amatrix(irow,2) * Amatrix(irow,2); - Amatrix(irow,7) = Amatrix(irow,1) * Amatrix(irow,3); - Amatrix(irow,8) = Amatrix(irow,2) * Amatrix(irow,3); - Amatrix(irow,9) = Amatrix(irow,3) * Amatrix(irow,3); + Amatrix(irow,4) = Amatrix(irow,1) * Amatrix(irow,1); + Amatrix(irow,5) = Amatrix(irow,1) * Amatrix(irow,2); + Amatrix(irow,6) = Amatrix(irow,2) * Amatrix(irow,2); + Amatrix(irow,7) = Amatrix(irow,1) * Amatrix(irow,3); + Amatrix(irow,8) = Amatrix(irow,2) * Amatrix(irow,3); + Amatrix(irow,9) = Amatrix(irow,3) * Amatrix(irow,3); } // Make the RHS = A^T v @@ -1045,23 +1135,27 @@ Real grad_z_of_phi_on_centroids_extdir(int i,int j,int k,int n, { rhs(irow) = 0.; - for (int kk = k-km; kk <= k+kp; kk++) // Normal to face - for (int jj = j-1; jj <= j+1; jj++) // Tangential to face - for (int ii = i-1; ii <= i+1; ii++) {// Tangential to face - if ( !phi.contains(ii,jj,kk) ) - continue; + for (int kk = k-km; kk <= k+kp; kk++) { // Normal to face + for (int jj = j-1; jj <= j+1; jj++) { // Tangential to face + for (int ii = i-1; ii <= i+1; ii++) {// Tangential to face + if ( !phi.contains(ii,jj,kk) ) { + continue; + } - if (!flag(ii,jj,kk).isCovered()) - { - int a_ind = (ii-(i-1)) + 3*(jj-(j-1)) + 9*(kk-(k-km)); - Real phi_val = Amatrix(a_ind,0) * phi(ii,jj,kk,n); + if (!flag(ii,jj,kk).isCovered()) + { + int a_ind = (ii-(i-1)) + 3*(jj-(j-1)) + 9*(kk-(k-km)); + Real phi_val = Amatrix(a_ind,0) * phi(ii,jj,kk,n); - rhs(irow) += Amatrix(a_ind,irow)* phi_val; + rhs(irow) += Amatrix(a_ind,irow)* phi_val; - if (flag(ii,jj,kk).isSingleValued() && is_eb_inhomog && Amatrix(a_ind+27,irow) != Real(0.0)) - rhs(irow) += Amatrix(a_ind+27,irow)*phieb(ii,jj,kk,n); + if (flag(ii,jj,kk).isSingleValued() && is_eb_inhomog && Amatrix(a_ind+27,irow) != Real(0.0)) { + rhs(irow) += Amatrix(a_ind+27,irow)*phieb(ii,jj,kk,n); + } + } } - } + } + } } cholsol_for_eb(Amatrix, rhs); @@ -1088,37 +1182,40 @@ Real grad_eb_of_phi_on_centroids_extdir(int i,int j,int k,int n, // Order of column -- first 27 are cell centroids, second 27 are EB centroids - for (int irow = 0; irow < 54; irow++) - for (int icol = 0; icol < 10; icol++) + for (int irow = 0; irow < 54; irow++) { + for (int icol = 0; icol < 10; icol++) { Amatrix(irow,icol) = Real(0.0); + } + } // Columns 0-3: [e x y z] - for (int kk = k-1; kk <= k+1; kk++) - for (int jj = j-1; jj <= j+1; jj++) - for (int ii = i-1; ii <= i+1; ii++) - { - // This is likely overkill for EB grads. - // Don't include corner cells. Could make this even more strict - // by removing the on_?_face restrictions. - if (((on_x_face && ii < domlo_x) && (on_y_face && jj < domlo_y)) || - ((on_x_face && ii < domlo_x) && (on_y_face && jj > domhi_y)) || - ((on_x_face && ii > domhi_x) && (on_y_face && jj < domlo_y)) || - ((on_x_face && ii > domhi_x) && (on_y_face && jj > domhi_y)) || - ((on_y_face && jj < domlo_y) && (on_z_face && kk < domlo_z)) || - ((on_y_face && jj < domlo_y) && (on_z_face && kk > domhi_z)) || - ((on_y_face && jj > domhi_y) && (on_z_face && kk < domlo_z)) || - ((on_y_face && jj > domhi_y) && (on_z_face && kk > domhi_z)) || - ((on_x_face && ii < domlo_x) && (on_z_face && kk < domlo_z)) || - ((on_x_face && ii < domlo_x) && (on_z_face && kk > domhi_z)) || - ((on_x_face && ii > domhi_x) && (on_z_face && kk < domlo_z)) || - ((on_x_face && ii > domhi_x) && (on_z_face && kk > domhi_z))) { - continue; - } + for (int kk = k-1; kk <= k+1; kk++) { + for (int jj = j-1; jj <= j+1; jj++) { + for (int ii = i-1; ii <= i+1; ii++) + { + // This is likely overkill for EB grads. + // Don't include corner cells. Could make this even more strict + // by removing the on_?_face restrictions. + if (((on_x_face && ii < domlo_x) && (on_y_face && jj < domlo_y)) || + ((on_x_face && ii < domlo_x) && (on_y_face && jj > domhi_y)) || + ((on_x_face && ii > domhi_x) && (on_y_face && jj < domlo_y)) || + ((on_x_face && ii > domhi_x) && (on_y_face && jj > domhi_y)) || + ((on_y_face && jj < domlo_y) && (on_z_face && kk < domlo_z)) || + ((on_y_face && jj < domlo_y) && (on_z_face && kk > domhi_z)) || + ((on_y_face && jj > domhi_y) && (on_z_face && kk < domlo_z)) || + ((on_y_face && jj > domhi_y) && (on_z_face && kk > domhi_z)) || + ((on_x_face && ii < domlo_x) && (on_z_face && kk < domlo_z)) || + ((on_x_face && ii < domlo_x) && (on_z_face && kk > domhi_z)) || + ((on_x_face && ii > domhi_x) && (on_z_face && kk < domlo_z)) || + ((on_x_face && ii > domhi_x) && (on_z_face && kk > domhi_z))) { + continue; + } - if ( !phi.contains(ii,jj,kk) ) - continue; + if ( !phi.contains(ii,jj,kk) ) { + continue; + } - if (!flag(ii,jj,kk).isCovered()) + if (!flag(ii,jj,kk).isCovered()) { int a_ind = (ii-(i-1)) + 3*(jj-(j-1)) + 9*(kk-(k-1)); @@ -1127,24 +1224,30 @@ Real grad_eb_of_phi_on_centroids_extdir(int i,int j,int k,int n, Real z_off = static_cast(kk-k) - bcent(i,j,k,2); if(on_x_face){ - if (ii < domlo_x && (vfrac(ii+1,jj,kk) != Real(1.0) || vfrac(ii+2,jj,kk) != Real(1.0)) ) + if (ii < domlo_x && (vfrac(ii+1,jj,kk) != Real(1.0) || vfrac(ii+2,jj,kk) != Real(1.0)) ) { continue; - if (ii > domhi_x && (vfrac(ii-1,jj,kk) != Real(1.0) || vfrac(ii-2,jj,kk) != Real(1.0))) + } + if (ii > domhi_x && (vfrac(ii-1,jj,kk) != Real(1.0) || vfrac(ii-2,jj,kk) != Real(1.0))) { continue; + } } if(on_y_face){ - if (jj < domlo_y && (vfrac(ii,jj+1,kk) != Real(1.0) || vfrac(ii,jj+2,kk) != Real(1.0)) ) + if (jj < domlo_y && (vfrac(ii,jj+1,kk) != Real(1.0) || vfrac(ii,jj+2,kk) != Real(1.0)) ) { continue; - if (jj > domhi_y && (vfrac(ii,jj-1,kk) != Real(1.0) || vfrac(ii,jj-2,kk) != Real(1.0)) ) + } + if (jj > domhi_y && (vfrac(ii,jj-1,kk) != Real(1.0) || vfrac(ii,jj-2,kk) != Real(1.0)) ) { continue; + } } if(on_z_face){ - if (kk < domlo_z && (vfrac(ii,jj,kk+1) != Real(1.0) || vfrac(ii,jj,kk+2) != Real(1.0)) ) + if (kk < domlo_z && (vfrac(ii,jj,kk+1) != Real(1.0) || vfrac(ii,jj,kk+2) != Real(1.0)) ) { continue; - if (kk > domhi_z && (vfrac(ii,jj,kk-1) != Real(1.0) || vfrac(ii,jj,kk-2) != Real(1.0)) ) + } + if (kk > domhi_z && (vfrac(ii,jj,kk-1) != Real(1.0) || vfrac(ii,jj,kk-2) != Real(1.0)) ) { continue; + } } Amatrix(a_ind,0) = Real(1.0); @@ -1163,17 +1266,19 @@ Real grad_eb_of_phi_on_centroids_extdir(int i,int j,int k,int n, Amatrix(a_ind+27,3) = z_off + bcent(ii,jj,kk,2); } } - } + } + } + } // Columns 4-9 : [x*x x*y y*y x*z y*z z*z] for (int irow = 0; irow < 54; irow++) { - Amatrix(irow,4) = Amatrix(irow,1) * Amatrix(irow,1); - Amatrix(irow,5) = Amatrix(irow,1) * Amatrix(irow,2); - Amatrix(irow,6) = Amatrix(irow,2) * Amatrix(irow,2); - Amatrix(irow,7) = Amatrix(irow,1) * Amatrix(irow,3); - Amatrix(irow,8) = Amatrix(irow,2) * Amatrix(irow,3); - Amatrix(irow,9) = Amatrix(irow,3) * Amatrix(irow,3); + Amatrix(irow,4) = Amatrix(irow,1) * Amatrix(irow,1); + Amatrix(irow,5) = Amatrix(irow,1) * Amatrix(irow,2); + Amatrix(irow,6) = Amatrix(irow,2) * Amatrix(irow,2); + Amatrix(irow,7) = Amatrix(irow,1) * Amatrix(irow,3); + Amatrix(irow,8) = Amatrix(irow,2) * Amatrix(irow,3); + Amatrix(irow,9) = Amatrix(irow,3) * Amatrix(irow,3); } // Make the RHS = A^T v @@ -1181,24 +1286,28 @@ Real grad_eb_of_phi_on_centroids_extdir(int i,int j,int k,int n, { rhs(irow) = 0.; - for (int kk = k-1; kk <= k+1; kk++) - for (int jj = j-1; jj <= j+1; jj++) - for (int ii = i-1; ii <= i+1; ii++) { - if ( !phi.contains(ii,jj,kk) ) - continue; + for (int kk = k-1; kk <= k+1; kk++) { + for (int jj = j-1; jj <= j+1; jj++) { + for (int ii = i-1; ii <= i+1; ii++) { + if ( !phi.contains(ii,jj,kk) ) { + continue; + } - if (!flag(ii,jj,kk).isCovered()) - { - int a_ind = (ii-(i-1)) + 3*(jj-(j-1)) + 9*(kk-(k-1)); + if (!flag(ii,jj,kk).isCovered()) + { + int a_ind = (ii-(i-1)) + 3*(jj-(j-1)) + 9*(kk-(k-1)); - Real phi_val = Amatrix(a_ind,0) * phi(ii,jj,kk,n); + Real phi_val = Amatrix(a_ind,0) * phi(ii,jj,kk,n); - rhs(irow) += Amatrix(a_ind,irow)* phi_val; + rhs(irow) += Amatrix(a_ind,irow)* phi_val; - if (flag(ii,jj,kk).isSingleValued() && is_eb_inhomog && Amatrix(a_ind+27,irow) != Real(0.0)) - rhs(irow) += Amatrix(a_ind+27,irow)*phieb(ii,jj,kk,n); + if (flag(ii,jj,kk).isSingleValued() && is_eb_inhomog && Amatrix(a_ind+27,irow) != Real(0.0)) { + rhs(irow) += Amatrix(a_ind+27,irow)*phieb(ii,jj,kk,n); + } + } } - } + } + } } cholsol_for_eb(Amatrix, rhs); diff --git a/Src/EB/AMReX_EB_Redistribution.cpp b/Src/EB/AMReX_EB_Redistribution.cpp index 28fd8b70776..54fcea3889d 100644 --- a/Src/EB/AMReX_EB_Redistribution.cpp +++ b/Src/EB/AMReX_EB_Redistribution.cpp @@ -31,12 +31,14 @@ namespace amrex { const Real* dx = geom.CellSize(); #if (AMREX_SPACEDIM == 2) - if (! amrex::almostEqual(dx[0], dx[1])) + if (! amrex::almostEqual(dx[0], dx[1])) { amrex::Abort("apply_eb_redistribution(): grid spacing must be uniform"); + } #elif (AMREX_SPACEDIM == 3) if( ! amrex::almostEqual(dx[0],dx[1]) || - ! amrex::almostEqual(dx[1],dx[2]) ) + ! amrex::almostEqual(dx[1],dx[2]) ) { amrex::Abort("apply_eb_redistribution(): grid spacing must be uniform"); + } #endif // diff --git a/Src/EB/AMReX_EB_RedistributionApply.cpp b/Src/EB/AMReX_EB_RedistributionApply.cpp index d7c26a69378..d1900a435c9 100644 --- a/Src/EB/AMReX_EB_RedistributionApply.cpp +++ b/Src/EB/AMReX_EB_RedistributionApply.cpp @@ -91,29 +91,30 @@ void ApplyRedistribution ( Box const& bx, int ncomp, Array4 cent_hat_const = cent_hat_fab.const_array(); Box domain_per_grown = lev_geom.Domain(); - AMREX_D_TERM(if (lev_geom.isPeriodic(0)) domain_per_grown.grow(0,1);, - if (lev_geom.isPeriodic(1)) domain_per_grown.grow(1,1);, - if (lev_geom.isPeriodic(2)) domain_per_grown.grow(2,1);); + AMREX_D_TERM(if (lev_geom.isPeriodic(0)) { domain_per_grown.grow(0,1); }, + if (lev_geom.isPeriodic(1)) { domain_per_grown.grow(1,1); }, + if (lev_geom.isPeriodic(2)) { domain_per_grown.grow(2,1); }) // At any external Dirichlet domain boundaries we need to set dUdt_in to 0 // in the cells just outside the domain because those values will be used // in the slope computation in state redistribution. We assume here that // the ext_dir values of U_in itself have already been set. - if (!domain_per_grown.contains(bxg1)) + if (!domain_per_grown.contains(bxg1)) { amrex::ParallelFor(bxg1,ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept - { - if (!domain_per_grown.contains(IntVect(AMREX_D_DECL(i,j,k)))) - dUdt_in(i,j,k,n) = 0.; - }); + { + if (!domain_per_grown.contains(IntVect(AMREX_D_DECL(i,j,k)))) { + dUdt_in(i,j,k,n) = 0.; + } + }); + } amrex::ParallelFor(Box(scratch), ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept - { - const Real scale = (srd_update_scale) ? srd_update_scale(i,j,k) : Real(1.0); - scratch(i,j,k,n) = U_in(i,j,k,n) + dt * dUdt_in(i,j,k,n) / scale; - } - ); + { + const Real scale = (srd_update_scale) ? srd_update_scale(i,j,k) : Real(1.0); + scratch(i,j,k,n) = U_in(i,j,k,n) + dt * dUdt_in(i,j,k,n) / scale; + }); MakeITracker(bx, AMREX_D_DECL(apx, apy, apz), vfrac, itr, lev_geom, target_volfrac); @@ -127,40 +128,38 @@ void ApplyRedistribution ( Box const& bx, int ncomp, amrex::ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept + { + // Only update the values which actually changed -- this makes + // the results insensitive to tiling -- otherwise cells that aren't + // changed but are in a tile on which StateRedistribute gets called + // will have precision-level changes due to adding/subtracting U_in + // and multiplying/dividing by dt. Here we test on whether (i,j,k) + // has at least one neighbor and/or whether (i,j,k) is in the + // neighborhood of another cell -- if either of those is true the + // value may have changed + + if (itr(i,j,k,0) > 0 || nrs(i,j,k) > 1.) { - // Only update the values which actually changed -- this makes - // the results insensitive to tiling -- otherwise cells that aren't - // changed but are in a tile on which StateRedistribute gets called - // will have precision-level changes due to adding/subtracting U_in - // and multiplying/dividing by dt. Here we test on whether (i,j,k) - // has at least one neighbor and/or whether (i,j,k) is in the - // neighborhood of another cell -- if either of those is true the - // value may have changed - - if (itr(i,j,k,0) > 0 || nrs(i,j,k) > 1.) - { - const Real scale = (srd_update_scale) ? srd_update_scale(i,j,k) : Real(1.0); + const Real scale = (srd_update_scale) ? srd_update_scale(i,j,k) : Real(1.0); - dUdt_out(i,j,k,n) = scale * (dUdt_out(i,j,k,n) - U_in(i,j,k,n)) / dt; + dUdt_out(i,j,k,n) = scale * (dUdt_out(i,j,k,n) - U_in(i,j,k,n)) / dt; - } - else - { - dUdt_out(i,j,k,n) = dUdt_in(i,j,k,n); - } } - ); + else + { + dUdt_out(i,j,k,n) = dUdt_in(i,j,k,n); + } + }); } else if (redistribution_type == "NoRedist") { amrex::ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept - { - dUdt_out(i,j,k,n) = dUdt_in(i,j,k,n); - } - ); + { + dUdt_out(i,j,k,n) = dUdt_in(i,j,k,n); + }); } else { - amrex::Error("Not a legit redist_type"); + amrex::Error("Not a legit redist_type"); } } @@ -238,7 +237,7 @@ ApplyMLRedistribution ( Box const& bx, int ncomp, ); } else { - amrex::Error("Not a legit redist_type in ApplyML"); + amrex::Error("Not a legit redist_type in ApplyML"); } } diff --git a/Src/EB/AMReX_EB_Slopes_2D_K.H b/Src/EB/AMReX_EB_Slopes_2D_K.H index 7bc346c04a7..06f1e6cba6a 100644 --- a/Src/EB/AMReX_EB_Slopes_2D_K.H +++ b/Src/EB/AMReX_EB_Slopes_2D_K.H @@ -440,95 +440,93 @@ amrex_calc_slopes_extdir_eb (int i, int j, int k, int n, } else { - amrex::Real A[dim_a][AMREX_SPACEDIM]; + amrex::Real A[dim_a][AMREX_SPACEDIM]; - int lc=0; - int kk = 0; - { - for(int jj(-1); jj<=1; jj++) - for(int ii(-1); ii<=1; ii++) - { + int lc=0; + int kk = 0; + + for(int jj(-1); jj<=1; jj++) { + for(int ii(-1); ii<=1; ii++) + { if( flag(i,j,k).isConnected(ii,jj,kk) && ! (ii==0 && jj==0 && kk==0)) + { + bool ilo_test = ( edlo_x && (i == domlo_x) && ii == -1); + bool ihi_test = ( edhi_x && (i == domhi_x) && ii == 1); + + bool jlo_test = ( edlo_y && (j == domlo_y) && jj == -1); + bool jhi_test = ( edhi_y && (j == domhi_y) && jj == 1); + + bool klo_test = false; + bool khi_test = false; + + // These are the default values if no physical boundary + A[lc][0] = amrex::Real(ii) + ccent(i+ii,j+jj,k+kk,0); + A[lc][1] = amrex::Real(jj) + ccent(i+ii,j+jj,k+kk,1); + // Do corrections for entire x-face + if (ilo_test) + { + if (!jlo_test && !jhi_test && !klo_test && !khi_test) + { + A[lc][1] = amrex::Real(jj) + fcx(i ,j+jj,k+kk,0); + } + A[lc][0] = amrex::Real(-0.5) ; + } else if (ihi_test) { + + if (!jlo_test && !jhi_test && !klo_test && !khi_test) + { + A[lc][1] = amrex::Real(jj) + fcx(i+ii,j+jj,k+kk,0); + } + A[lc][0] = amrex::Real(0.5) ; + } + + // Do corrections for entire y-face + if (jlo_test) { - bool ilo_test = ( edlo_x && (i == domlo_x) && ii == -1); - bool ihi_test = ( edhi_x && (i == domhi_x) && ii == 1); - - bool jlo_test = ( edlo_y && (j == domlo_y) && jj == -1); - bool jhi_test = ( edhi_y && (j == domhi_y) && jj == 1); - - bool klo_test = false; - bool khi_test = false; - - // These are the default values if no physical boundary - A[lc][0] = amrex::Real(ii) + ccent(i+ii,j+jj,k+kk,0); - A[lc][1] = amrex::Real(jj) + ccent(i+ii,j+jj,k+kk,1); - // Do corrections for entire x-face - if (ilo_test) - { - if (!jlo_test && !jhi_test && !klo_test && !khi_test) - { - A[lc][1] = amrex::Real(jj) + fcx(i ,j+jj,k+kk,0); - } - A[lc][0] = amrex::Real(-0.5) ; - } else if (ihi_test) { - - if (!jlo_test && !jhi_test && !klo_test && !khi_test) - { - A[lc][1] = amrex::Real(jj) + fcx(i+ii,j+jj,k+kk,0); - } - A[lc][0] = amrex::Real(0.5) ; - } - - // Do corrections for entire y-face - if (jlo_test) - { - if (!ilo_test && !ihi_test && !klo_test && !khi_test) - { - A[lc][0] = amrex::Real(ii) + fcy(i+ii,j ,k+kk,0); - } - A[lc][1] = amrex::Real(-0.5) ; - - } else if (jhi_test) { - - if (!ilo_test && !ihi_test && !klo_test && !khi_test) - { - A[lc][0] = amrex::Real(ii) + fcy(i+ii,j+jj,k+kk,0); - } - A[lc][1] = amrex::Real(0.5) ; + if (!ilo_test && !ihi_test && !klo_test && !khi_test) + { + A[lc][0] = amrex::Real(ii) + fcy(i+ii,j ,k+kk,0); } + A[lc][1] = amrex::Real(-0.5) ; - A[lc][0] -= ccent(i,j,k,0); - A[lc][1] -= ccent(i,j,k,1); + } else if (jhi_test) { - } else { - A[lc][0] = amrex::Real(0.0); - A[lc][1] = amrex::Real(0.0); + if (!ilo_test && !ihi_test && !klo_test && !khi_test) + { + A[lc][0] = amrex::Real(ii) + fcy(i+ii,j+jj,k+kk,0); + } + A[lc][1] = amrex::Real(0.5) ; } - lc++; - } // i,j - } // k - const auto& slopes = amrex_calc_slopes_eb_given_A (i,j,k,n,A,state,flag); - xslope = slopes[0]; - yslope = slopes[1]; + A[lc][0] -= ccent(i,j,k,0); + A[lc][1] -= ccent(i,j,k,1); - // This will over-write the values of xslope and yslope if appropriate - amrex_overwrite_with_regular_slopes_extdir(i,j,k,n,xslope,yslope,state,vfrac, - edlo_x,edlo_y,edhi_x,edhi_y, - domlo_x,domlo_y,domhi_x,domhi_y,max_order); + } else { + A[lc][0] = amrex::Real(0.0); + A[lc][1] = amrex::Real(0.0); + } + lc++; + }} // i,j - } // end of needs_bndry_stencil + const auto& slopes = amrex_calc_slopes_eb_given_A (i,j,k,n,A,state,flag); + xslope = slopes[0]; + yslope = slopes[1]; - // Zero out slopes outside of an extdir (or hoextrap) boundary - // TODO: is this the right thing to do at a HOEXTRAP boundary?? - if ( (edlo_x && i < domlo_x) || (edhi_x && i > domhi_x) || - (edlo_y && j < domlo_y) || (edhi_y && j > domhi_y) ) - { - xslope = 0.; - yslope = 0.; + // This will over-write the values of xslope and yslope if appropriate + amrex_overwrite_with_regular_slopes_extdir(i,j,k,n,xslope,yslope,state,vfrac, + edlo_x,edlo_y,edhi_x,edhi_y, + domlo_x,domlo_y,domhi_x,domhi_y,max_order); + + // Zero out slopes outside of an extdir (or hoextrap) boundary + // TODO: is this the right thing to do at a HOEXTRAP boundary?? + if ( (edlo_x && i < domlo_x) || (edhi_x && i > domhi_x) || + (edlo_y && j < domlo_y) || (edhi_y && j > domhi_y) ) + { + xslope = 0.; + yslope = 0.; + } + return {xslope,yslope}; } - return {xslope,yslope}; } // amrex_calc_slopes_extdir_eb_grown calculates the slope in each coordinate direction using a @@ -574,94 +572,92 @@ amrex_calc_slopes_extdir_eb_grown (int i, int j, int k, int n, int nx, int ny, } else { - amrex::Real A[dim_a][AMREX_SPACEDIM]; + amrex::Real A[dim_a][AMREX_SPACEDIM]; - int lc=0; - int kk = 0; - { - for(int jj(-ny); jj<=ny; jj++) - for(int ii(-nx); ii<=nx; ii++) - { + int lc=0; + int kk = 0; + + for(int jj(-ny); jj<=ny; jj++) { + for(int ii(-nx); ii<=nx; ii++) + { if ( !flag(i+ii,j+jj,k).isCovered() && !(ii==0 && jj==0 && kk==0) ) - { - bool ilo_test = ( edlo_x && (i == domlo_x) && ii == -1); - bool ihi_test = ( edhi_x && (i == domhi_x) && ii == 1); + { + bool ilo_test = ( edlo_x && (i == domlo_x) && ii == -1); + bool ihi_test = ( edhi_x && (i == domhi_x) && ii == 1); - bool jlo_test = ( edlo_y && (j == domlo_y) && jj == -1); - bool jhi_test = ( edhi_y && (j == domhi_y) && jj == 1); + bool jlo_test = ( edlo_y && (j == domlo_y) && jj == -1); + bool jhi_test = ( edhi_y && (j == domhi_y) && jj == 1); - bool klo_test = false; - bool khi_test = false; + bool klo_test = false; + bool khi_test = false; - // These are the default values if no physical boundary - A[lc][0] = amrex::Real(ii) + ccent(i+ii,j+jj,k+kk,0); - A[lc][1] = amrex::Real(jj) + ccent(i+ii,j+jj,k+kk,1); - // Do corrections for entire x-face - if (ilo_test) + // These are the default values if no physical boundary + A[lc][0] = amrex::Real(ii) + ccent(i+ii,j+jj,k+kk,0); + A[lc][1] = amrex::Real(jj) + ccent(i+ii,j+jj,k+kk,1); + // Do corrections for entire x-face + if (ilo_test) + { + if (!jlo_test && !jhi_test && !klo_test && !khi_test) { - if (!jlo_test && !jhi_test && !klo_test && !khi_test) - { - A[lc][1] = amrex::Real(jj) + fcx(i ,j+jj,k+kk,0); - } - A[lc][0] = amrex::Real(-0.5) ; - } else if (ihi_test) { - - if (!jlo_test && !jhi_test && !klo_test && !khi_test) - { - A[lc][1] = amrex::Real(jj) + fcx(i+ii,j+jj,k+kk,0); - } - A[lc][0] = amrex::Real(0.5) ; + A[lc][1] = amrex::Real(jj) + fcx(i ,j+jj,k+kk,0); } + A[lc][0] = amrex::Real(-0.5) ; + } else if (ihi_test) { - // Do corrections for entire y-face - if (jlo_test) + if (!jlo_test && !jhi_test && !klo_test && !khi_test) { - if (!ilo_test && !ihi_test && !klo_test && !khi_test) - { - A[lc][0] = amrex::Real(ii) + fcy(i+ii,j ,k+kk,0); - } - A[lc][1] = amrex::Real(-0.5) ; - - } else if (jhi_test) { - - if (!ilo_test && !ihi_test && !klo_test && !khi_test) - { - A[lc][0] = amrex::Real(ii) + fcy(i+ii,j+jj,k+kk,0); - } - A[lc][1] = amrex::Real(0.5) ; - } - - A[lc][0] -= ccent(i,j,k,0); - A[lc][1] -= ccent(i,j,k,1); - - } else { - A[lc][0] = amrex::Real(0.0); - A[lc][1] = amrex::Real(0.0); + A[lc][1] = amrex::Real(jj) + fcx(i+ii,j+jj,k+kk,0); + } + A[lc][0] = amrex::Real(0.5) ; } - lc++; - } // i,j - } // k - const auto& slopes = amrex_calc_slopes_eb_given_A_grown (i,j,k,n,nx,ny,A,state,flag); - xslope = slopes[0]; - yslope = slopes[1]; + // Do corrections for entire y-face + if (jlo_test) + { + if (!ilo_test && !ihi_test && !klo_test && !khi_test) + { + A[lc][0] = amrex::Real(ii) + fcy(i+ii,j ,k+kk,0); + } + A[lc][1] = amrex::Real(-0.5) ; - // This will over-write the values of xslope and yslope if appropriate - amrex_overwrite_with_regular_slopes_extdir(i,j,k,n,xslope,yslope,state,vfrac, - edlo_x,edlo_y,edhi_x,edhi_y, - domlo_x,domlo_y,domhi_x,domhi_y,max_order); + } else if (jhi_test) { - } // end of needs_bndry_stencil + if (!ilo_test && !ihi_test && !klo_test && !khi_test) + { + A[lc][0] = amrex::Real(ii) + fcy(i+ii,j+jj,k+kk,0); + } + A[lc][1] = amrex::Real(0.5) ; + } - // Zero out slopes outside of an extdir (or hoextrap) boundary - // TODO: is this the right thing to do at a HOEXTRAP boundary?? - if ( (edlo_x && i < domlo_x) || (edhi_x && i > domhi_x) || - (edlo_y && j < domlo_y) || (edhi_y && j > domhi_y) ) - { - xslope = 0.; - yslope = 0.; + A[lc][0] -= ccent(i,j,k,0); + A[lc][1] -= ccent(i,j,k,1); + + } else { + A[lc][0] = amrex::Real(0.0); + A[lc][1] = amrex::Real(0.0); + } + lc++; + }} // i,j + + const auto& slopes = amrex_calc_slopes_eb_given_A_grown (i,j,k,n,nx,ny,A,state,flag); + xslope = slopes[0]; + yslope = slopes[1]; + + // This will over-write the values of xslope and yslope if appropriate + amrex_overwrite_with_regular_slopes_extdir(i,j,k,n,xslope,yslope,state,vfrac, + edlo_x,edlo_y,edhi_x,edhi_y, + domlo_x,domlo_y,domhi_x,domhi_y,max_order); + + // Zero out slopes outside of an extdir (or hoextrap) boundary + // TODO: is this the right thing to do at a HOEXTRAP boundary?? + if ( (edlo_x && i < domlo_x) || (edhi_x && i > domhi_x) || + (edlo_y && j < domlo_y) || (edhi_y && j > domhi_y) ) + { + xslope = 0.; + yslope = 0.; + } + return {xslope,yslope}; } - return {xslope,yslope}; } AMREX_GPU_DEVICE AMREX_FORCE_INLINE @@ -705,8 +701,8 @@ amrex_calc_alpha_limiter(int i, int j, int k, int n, for(int ii(-1); ii<=1; ii++){ if( flag(i,j,k).isConnected(ii,jj,kk) && !(ii==0 && jj==0)) { - if ((ii==-1 || ii==1) && jj==0) cuts_x++; - if ((jj==-1 || jj==1) && ii==0) cuts_y++; + if ((ii==-1 || ii==1) && jj==0) { cuts_x++; } + if ((jj==-1 || jj==1) && ii==0) { cuts_y++; } } } } @@ -716,101 +712,101 @@ amrex_calc_alpha_limiter(int i, int j, int k, int n, //Reconstruct values at the face centroids and compute the limiter if(flag(i,j,k).isConnected(0,1,0)) { - amrex::Real xf = fcy(i,j+1,k,0); // local (x,z) of centroid of y-face we are extrapolating to + amrex::Real xf = fcy(i,j+1,k,0); // local (x,z) of centroid of y-face we are extrapolating to - amrex::Real delta_x = xf - xc; - amrex::Real delta_y = amrex::Real(0.5) - yc; + amrex::Real delta_x = xf - xc; + amrex::Real delta_y = amrex::Real(0.5) - yc; - amrex::Real q_hat = state(i,j,k,n) + delta_x * slopes[0] + delta_y * slopes[1]; + amrex::Real q_hat = state(i,j,k,n) + delta_x * slopes[0] + delta_y * slopes[1]; - amrex::Real q_min = state(i,j,k,n); - amrex::Real q_max = state(i,j,k,n); + amrex::Real q_min = state(i,j,k,n); + amrex::Real q_max = state(i,j,k,n); - // Compute max and min values in a 3x2 stencil - for(int jj(0); jj<=1; jj++){ - for(int ii(-1); ii<=1; ii++){ - if ( flag(i,j,k).isConnected(ii,jj,kk) && !(ii==0 && jj==0) ) { - if (state(i+ii,j+jj,k+kk,n) > q_max) q_max = state(i+ii,j+jj,k+kk,n); - if (state(i+ii,j+jj,k+kk,n) < q_min) q_min = state(i+ii,j+jj,k+kk,n); + // Compute max and min values in a 3x2 stencil + for(int jj(0); jj<=1; jj++){ + for(int ii(-1); ii<=1; ii++){ + if ( flag(i,j,k).isConnected(ii,jj,kk) && !(ii==0 && jj==0) ) { + if (state(i+ii,j+jj,k+kk,n) > q_max) { q_max = state(i+ii,j+jj,k+kk,n); } + if (state(i+ii,j+jj,k+kk,n) < q_min) { q_min = state(i+ii,j+jj,k+kk,n); } } - } - } + } + } - alpha = amrex_calc_alpha_stencil(q_hat, q_max, q_min, state(i,j,k,n), alpha); + alpha = amrex_calc_alpha_stencil(q_hat, q_max, q_min, state(i,j,k,n), alpha); } if (flag(i,j,k).isConnected(0,-1,0)){ - amrex::Real xf = fcy(i,j,k,0); // local (x,z) of centroid of y-face we are extrapolating to + amrex::Real xf = fcy(i,j,k,0); // local (x,z) of centroid of y-face we are extrapolating to - amrex::Real delta_x = xf - xc; - amrex::Real delta_y = amrex::Real(0.5) + yc; + amrex::Real delta_x = xf - xc; + amrex::Real delta_y = amrex::Real(0.5) + yc; - amrex::Real q_hat = state(i,j,k,n) + delta_x * slopes[0] - delta_y * slopes[1]; + amrex::Real q_hat = state(i,j,k,n) + delta_x * slopes[0] - delta_y * slopes[1]; - amrex::Real q_min = state(i,j,k,n); - amrex::Real q_max = state(i,j,k,n); + amrex::Real q_min = state(i,j,k,n); + amrex::Real q_max = state(i,j,k,n); - // Compute max and min values in a 3x2 stencil - for(int jj(-1); jj<=0; jj++){ - for(int ii(-1); ii<=1; ii++){ - if ( flag(i,j,k).isConnected(ii,jj,kk) && !(ii==0 && jj==0) ) { - if (state(i+ii,j+jj,k+kk,n) > q_max) q_max = state(i+ii,j+jj,k+kk,n); - if (state(i+ii,j+jj,k+kk,n) < q_min) q_min = state(i+ii,j+jj,k+kk,n); + // Compute max and min values in a 3x2 stencil + for(int jj(-1); jj<=0; jj++){ + for(int ii(-1); ii<=1; ii++){ + if ( flag(i,j,k).isConnected(ii,jj,kk) && !(ii==0 && jj==0) ) { + if (state(i+ii,j+jj,k+kk,n) > q_max) { q_max = state(i+ii,j+jj,k+kk,n); } + if (state(i+ii,j+jj,k+kk,n) < q_min) { q_min = state(i+ii,j+jj,k+kk,n); } } - } - } + } + } - alpha = amrex_calc_alpha_stencil(q_hat, q_max, q_min, state(i,j,k,n), alpha); + alpha = amrex_calc_alpha_stencil(q_hat, q_max, q_min, state(i,j,k,n), alpha); } if(flag(i,j,k).isConnected(1,0,0)) { - amrex::Real yf = fcx(i+1,j,k,0); // local (y,z) of centroid of x-face we are extrapolating to + amrex::Real yf = fcx(i+1,j,k,0); // local (y,z) of centroid of x-face we are extrapolating to - amrex::Real delta_x = amrex::Real(0.5) - xc; - amrex::Real delta_y = yf - yc; + amrex::Real delta_x = amrex::Real(0.5) - xc; + amrex::Real delta_y = yf - yc; - amrex::Real q_hat = state(i,j,k,n) + delta_x * slopes[0] + delta_y * slopes[1]; + amrex::Real q_hat = state(i,j,k,n) + delta_x * slopes[0] + delta_y * slopes[1]; - amrex::Real q_min = state(i,j,k,n); - amrex::Real q_max = state(i,j,k,n); + amrex::Real q_min = state(i,j,k,n); + amrex::Real q_max = state(i,j,k,n); - for(int jj(-1); jj<=1; jj++){ - for(int ii(0); ii<=1; ii++){ - if ( flag(i,j,k).isConnected(ii,jj,kk) && !(ii==0 && jj==0) ) { - if (state(i+ii,j+jj,k+kk,n) > q_max) q_max = state(i+ii,j+jj,k+kk,n); - if (state(i+ii,j+jj,k+kk,n) < q_min) q_min = state(i+ii,j+jj,k+kk,n); + for(int jj(-1); jj<=1; jj++){ + for(int ii(0); ii<=1; ii++){ + if ( flag(i,j,k).isConnected(ii,jj,kk) && !(ii==0 && jj==0) ) { + if (state(i+ii,j+jj,k+kk,n) > q_max) { q_max = state(i+ii,j+jj,k+kk,n); } + if (state(i+ii,j+jj,k+kk,n) < q_min) { q_min = state(i+ii,j+jj,k+kk,n); } } - } - } + } + } - alpha = amrex_calc_alpha_stencil(q_hat, q_max, q_min, state(i,j,k,n), alpha); + alpha = amrex_calc_alpha_stencil(q_hat, q_max, q_min, state(i,j,k,n), alpha); } if(flag(i,j,k).isConnected(-1,0,0)) { - amrex::Real yf = fcx(i,j,k,0); // local (y,z) of centroid of x-face we are extrapolating to + amrex::Real yf = fcx(i,j,k,0); // local (y,z) of centroid of x-face we are extrapolating to - amrex::Real delta_x = amrex::Real(0.5) + xc; - amrex::Real delta_y = yf - yc; + amrex::Real delta_x = amrex::Real(0.5) + xc; + amrex::Real delta_y = yf - yc; - amrex::Real q_hat = state(i,j,k,n) - delta_x * slopes[0] + delta_y * slopes[1]; + amrex::Real q_hat = state(i,j,k,n) - delta_x * slopes[0] + delta_y * slopes[1]; - amrex::Real q_min = state(i,j,k,n); - amrex::Real q_max = state(i,j,k,n); + amrex::Real q_min = state(i,j,k,n); + amrex::Real q_max = state(i,j,k,n); - for(int jj(-1); jj<=1; jj++){ - for(int ii(-1); ii<=0; ii++){ - if( flag(i,j,k).isConnected(ii,jj,kk) && !(ii==0 && jj==0)) { - if (state(i+ii,j+jj,k+kk,n) > q_max) q_max = state(i+ii,j+jj,k+kk,n); - if (state(i+ii,j+jj,k+kk,n) < q_min) q_min = state(i+ii,j+jj,k+kk,n); + for(int jj(-1); jj<=1; jj++){ + for(int ii(-1); ii<=0; ii++){ + if( flag(i,j,k).isConnected(ii,jj,kk) && !(ii==0 && jj==0)) { + if (state(i+ii,j+jj,k+kk,n) > q_max) { q_max = state(i+ii,j+jj,k+kk,n); } + if (state(i+ii,j+jj,k+kk,n) < q_min) { q_min = state(i+ii,j+jj,k+kk,n); } } - } - } - alpha = amrex_calc_alpha_stencil(q_hat, q_max, q_min, state(i,j,k,n), alpha); + } + } + alpha = amrex_calc_alpha_stencil(q_hat, q_max, q_min, state(i,j,k,n), alpha); } amrex::Real xalpha = alpha; amrex::Real yalpha = alpha; //Zeroing out the slopes in the direction where a covered face exists. - if (cuts_x<2) xalpha = 0; - if (cuts_y<2) yalpha = 0; + if (cuts_x<2) { xalpha = 0; } + if (cuts_y<2) { yalpha = 0; } return {xalpha,yalpha}; } @@ -838,11 +834,13 @@ amrex_lim_slopes_eb (int i, int j, int k, int n, // Setting limiter to 1 for stencils that just consists of non-EB cells because // amrex_calc_slopes_eb routine will call the slope routine for non-EB stencils that has already a limiter - if ( max_order > 0 && vfrac(i,j,k) == 1. && vfrac(i-1,j,k) == 1. && vfrac(i+1,j,k) == 1.) - alpha_lim[0] = 1.0; + if ( max_order > 0 && vfrac(i,j,k) == 1. && vfrac(i-1,j,k) == 1. && vfrac(i+1,j,k) == 1.) { + alpha_lim[0] = 1.0; + } - if ( max_order > 0 && vfrac(i,j,k) == 1. && vfrac(i,j-1,k) == 1. && vfrac(i,j+1,k) == 1.) - alpha_lim[1] = 1.0; + if ( max_order > 0 && vfrac(i,j,k) == 1. && vfrac(i,j-1,k) == 1. && vfrac(i,j+1,k) == 1.) { + alpha_lim[1] = 1.0; + } return {alpha_lim[0]*slopes[0],alpha_lim[1]*slopes[1]}; } @@ -873,11 +871,13 @@ amrex_lim_slopes_extdir_eb (int i, int j, int k, int n, // Setting limiter to 1 for stencils that just consists of non-EB cells because // amrex_calc_slopes_extdir_eb routine will call the slope routine for non-EB stencils that has already a limiter - if ( max_order > 0 && vfrac(i,j,k) == 1. && vfrac(i-1,j,k) == 1. && vfrac(i+1,j,k) == 1.) + if ( max_order > 0 && vfrac(i,j,k) == 1. && vfrac(i-1,j,k) == 1. && vfrac(i+1,j,k) == 1.) { alpha_lim[0] = 1.0; + } - if ( max_order > 0 && vfrac(i,j,k) == 1. && vfrac(i,j-1,k) == 1. && vfrac(i,j+1,k) == 1.) - alpha_lim[1] = 1.0; + if ( max_order > 0 && vfrac(i,j,k) == 1. && vfrac(i,j-1,k) == 1. && vfrac(i,j+1,k) == 1.) { + alpha_lim[1] = 1.0; + } return {alpha_lim[0]*slopes[0],alpha_lim[1]*slopes[1]}; } diff --git a/Src/EB/AMReX_EB_Slopes_3D_K.H b/Src/EB/AMReX_EB_Slopes_3D_K.H index a7e91c7b905..1bbd6ab2ce2 100644 --- a/Src/EB/AMReX_EB_Slopes_3D_K.H +++ b/Src/EB/AMReX_EB_Slopes_3D_K.H @@ -296,23 +296,23 @@ amrex_calc_slopes_eb (int i, int j, int k, int n, amrex::Real A[dim_a][AMREX_SPACEDIM]; int lc=0; - for(int kk(-1); kk<=1; kk++) - for(int jj(-1); jj<=1; jj++) - for(int ii(-1); ii<=1; ii++) - { + for(int kk(-1); kk<=1; kk++) { + for(int jj(-1); jj<=1; jj++) { + for(int ii(-1); ii<=1; ii++) + { + if (flag(i,j,k).isConnected(ii,jj,kk) && !(ii==0 && jj==0 && kk==0)) + { + A[lc][0] = amrex::Real(ii) + ccent(i+ii,j+jj,k+kk,0) - ccent(i,j,k,0); + A[lc][1] = amrex::Real(jj) + ccent(i+ii,j+jj,k+kk,1) - ccent(i,j,k,1); + A[lc][2] = amrex::Real(kk) + ccent(i+ii,j+jj,k+kk,2) - ccent(i,j,k,2); + } else { + A[lc][0] = amrex::Real(0.0); + A[lc][1] = amrex::Real(0.0); + A[lc][2] = amrex::Real(0.0); + } + lc++; + }}} - if (flag(i,j,k).isConnected(ii,jj,kk) && !(ii==0 && jj==0 && kk==0)) - { - A[lc][0] = amrex::Real(ii) + ccent(i+ii,j+jj,k+kk,0) - ccent(i,j,k,0); - A[lc][1] = amrex::Real(jj) + ccent(i+ii,j+jj,k+kk,1) - ccent(i,j,k,1); - A[lc][2] = amrex::Real(kk) + ccent(i+ii,j+jj,k+kk,2) - ccent(i,j,k,2); - } else { - A[lc][0] = amrex::Real(0.0); - A[lc][1] = amrex::Real(0.0); - A[lc][2] = amrex::Real(0.0); - } - lc++; - } // ii // // These slopes use the EB stencil without testing whether it is actually needed // @@ -355,29 +355,29 @@ amrex_calc_slopes_eb_grown (int i, int j, int k, int n, int nx, int ny, int nz, // Make sure to zero all the entries in A (since the loop below may not cover all 125) int lc=0; - for(int kk(-2); kk<=2; kk++) - for(int jj(-2); jj<=2; jj++) - for(int ii(-2); ii<=2; ii++) - { - A[lc][0] = amrex::Real(0.0); - A[lc][1] = amrex::Real(0.0); + for(int kk(-2); kk<=2; kk++) { + for(int jj(-2); jj<=2; jj++) { + for(int ii(-2); ii<=2; ii++) + { + A[lc][0] = amrex::Real(0.0); + A[lc][1] = amrex::Real(0.0); A[lc][2] = amrex::Real(0.0); lc++; - } + }}} lc=0; - for(int kk(-nz); kk<=nz; kk++) - for(int jj(-ny); jj<=ny; jj++) - for(int ii(-nx); ii<=nx; ii++) - { - if (!flag(i+ii,j+jj,k+kk).isCovered() && !(ii==0 && jj==0 && kk==0)) - { - A[lc][0] = amrex::Real(ii) + ccent(i+ii,j+jj,k+kk,0) - ccent(i,j,k,0); - A[lc][1] = amrex::Real(jj) + ccent(i+ii,j+jj,k+kk,1) - ccent(i,j,k,1); - A[lc][2] = amrex::Real(kk) + ccent(i+ii,j+jj,k+kk,2) - ccent(i,j,k,2); - } - lc++; - } // ii + for(int kk(-nz); kk<=nz; kk++) { + for(int jj(-ny); jj<=ny; jj++) { + for(int ii(-nx); ii<=nx; ii++) + { + if (!flag(i+ii,j+jj,k+kk).isCovered() && !(ii==0 && jj==0 && kk==0)) + { + A[lc][0] = amrex::Real(ii) + ccent(i+ii,j+jj,k+kk,0) - ccent(i,j,k,0); + A[lc][1] = amrex::Real(jj) + ccent(i+ii,j+jj,k+kk,1) - ccent(i,j,k,1); + A[lc][2] = amrex::Real(kk) + ccent(i+ii,j+jj,k+kk,2) - ccent(i,j,k,2); + } + lc++; + }}} // // These slopes use the EB stencil without testing whether it is actually needed // @@ -522,111 +522,109 @@ amrex_calc_slopes_extdir_eb (int i, int j, int k, int n, } else { - amrex::Real A[dim_a][AMREX_SPACEDIM]; - - int lc=0; - for(int kk(-1); kk<=1; kk++) - { - for(int jj(-1); jj<=1; jj++) - for(int ii(-1); ii<=1; ii++) - { - if (flag(i,j,k).isConnected(ii,jj,kk) && !(ii==0 && jj==0 && kk==0)) - { - bool ilo_test = ( edlo_x && (i == domlo_x) && ii == -1); - bool ihi_test = ( edhi_x && (i == domhi_x) && ii == 1); - - bool jlo_test = ( edlo_y && (j == domlo_y) && jj == -1); - bool jhi_test = ( edhi_y && (j == domhi_y) && jj == 1); - - bool klo_test = ( edlo_z && (k == domlo_z) && kk == -1); - bool khi_test = ( edhi_z && (k == domhi_z) && kk == 1); + amrex::Real A[dim_a][AMREX_SPACEDIM]; - // These are the default values if no physical boundary - A[lc][0] = amrex::Real(ii) + ccent(i+ii,j+jj,k+kk,0); - A[lc][1] = amrex::Real(jj) + ccent(i+ii,j+jj,k+kk,1); - A[lc][2] = amrex::Real(kk) + ccent(i+ii,j+jj,k+kk,2); - // Do corrections for entire x-face - if (ilo_test) - { - if (!jlo_test && !jhi_test && !klo_test && !khi_test) - { - A[lc][1] = amrex::Real(jj) + fcx(i ,j+jj,k+kk,0); - A[lc][2] = amrex::Real(kk) + fcx(i ,j+jj,k+kk,1); - } - A[lc][0] = -amrex::Real(0.5); - } else if (ihi_test) { - - if (!jlo_test && !jhi_test && !klo_test && !khi_test) - { - A[lc][1] = amrex::Real(jj) + fcx(i+ii,j+jj,k+kk,0); - A[lc][2] = amrex::Real(kk) + fcx(i+ii,j+jj,k+kk,1); - } - A[lc][0] = amrex::Real(0.5); - } + int lc=0; + for(int kk(-1); kk<=1; kk++) { + for(int jj(-1); jj<=1; jj++) { + for(int ii(-1); ii<=1; ii++) + { + if (flag(i,j,k).isConnected(ii,jj,kk) && !(ii==0 && jj==0 && kk==0)) + { + bool ilo_test = ( edlo_x && (i == domlo_x) && ii == -1); + bool ihi_test = ( edhi_x && (i == domhi_x) && ii == 1); + + bool jlo_test = ( edlo_y && (j == domlo_y) && jj == -1); + bool jhi_test = ( edhi_y && (j == domhi_y) && jj == 1); + + bool klo_test = ( edlo_z && (k == domlo_z) && kk == -1); + bool khi_test = ( edhi_z && (k == domhi_z) && kk == 1); + + // These are the default values if no physical boundary + A[lc][0] = amrex::Real(ii) + ccent(i+ii,j+jj,k+kk,0); + A[lc][1] = amrex::Real(jj) + ccent(i+ii,j+jj,k+kk,1); + A[lc][2] = amrex::Real(kk) + ccent(i+ii,j+jj,k+kk,2); + // Do corrections for entire x-face + if (ilo_test) + { + if (!jlo_test && !jhi_test && !klo_test && !khi_test) + { + A[lc][1] = amrex::Real(jj) + fcx(i ,j+jj,k+kk,0); + A[lc][2] = amrex::Real(kk) + fcx(i ,j+jj,k+kk,1); + } + A[lc][0] = -amrex::Real(0.5); + } else if (ihi_test) { - // Do corrections for entire y-face - if (jlo_test) - { - if (!ilo_test && !ihi_test && !klo_test && !khi_test) - { - A[lc][0] = amrex::Real(ii) + fcy(i+ii,j ,k+kk,0); - A[lc][2] = amrex::Real(kk) + fcy(i+ii,j ,k+kk,1); - } - A[lc][1] = -amrex::Real(0.5); + if (!jlo_test && !jhi_test && !klo_test && !khi_test) + { + A[lc][1] = amrex::Real(jj) + fcx(i+ii,j+jj,k+kk,0); + A[lc][2] = amrex::Real(kk) + fcx(i+ii,j+jj,k+kk,1); + } + A[lc][0] = amrex::Real(0.5); + } + + // Do corrections for entire y-face + if (jlo_test) + { + if (!ilo_test && !ihi_test && !klo_test && !khi_test) + { + A[lc][0] = amrex::Real(ii) + fcy(i+ii,j ,k+kk,0); + A[lc][2] = amrex::Real(kk) + fcy(i+ii,j ,k+kk,1); + } + A[lc][1] = -amrex::Real(0.5); - } else if (jhi_test) { + } else if (jhi_test) { - if (!ilo_test && !ihi_test && !klo_test && !khi_test) - { - A[lc][0] = amrex::Real(ii) + fcy(i+ii,j+jj,k+kk,0); - A[lc][2] = amrex::Real(kk) + fcy(i+ii,j+jj,k+kk,1); - } - A[lc][1] = amrex::Real(0.5); + if (!ilo_test && !ihi_test && !klo_test && !khi_test) + { + A[lc][0] = amrex::Real(ii) + fcy(i+ii,j+jj,k+kk,0); + A[lc][2] = amrex::Real(kk) + fcy(i+ii,j+jj,k+kk,1); } + A[lc][1] = amrex::Real(0.5); + } + + // Do corrections for entire z-face + if (klo_test) + { + if (!ilo_test && !ihi_test && !jlo_test && !jhi_test) + { + A[lc][0] = amrex::Real(ii) + fcz(i+ii,j+jj,k ,0); + A[lc][1] = amrex::Real(jj) + fcz(i+ii,j+jj,k ,1); + } + A[lc][2] = -amrex::Real(0.5); - // Do corrections for entire z-face - if (klo_test) - { - if (!ilo_test && !ihi_test && !jlo_test && !jhi_test) - { - A[lc][0] = amrex::Real(ii) + fcz(i+ii,j+jj,k ,0); - A[lc][1] = amrex::Real(jj) + fcz(i+ii,j+jj,k ,1); - } - A[lc][2] = -amrex::Real(0.5); - - } else if (khi_test) { - if (!ilo_test && !ihi_test && !jlo_test && !jhi_test) - { - A[lc][0] = amrex::Real(ii) + fcz(i+ii,j+jj,k+kk,0); - A[lc][1] = amrex::Real(jj) + fcz(i+ii,j+jj,k+kk,1); - } - A[lc][2] = amrex::Real(0.5); - } + } else if (khi_test) { + if (!ilo_test && !ihi_test && !jlo_test && !jhi_test) + { + A[lc][0] = amrex::Real(ii) + fcz(i+ii,j+jj,k+kk,0); + A[lc][1] = amrex::Real(jj) + fcz(i+ii,j+jj,k+kk,1); + } + A[lc][2] = amrex::Real(0.5); + } - A[lc][0] -= ccent(i,j,k,0); - A[lc][1] -= ccent(i,j,k,1); - A[lc][2] -= ccent(i,j,k,2); + A[lc][0] -= ccent(i,j,k,0); + A[lc][1] -= ccent(i,j,k,1); + A[lc][2] -= ccent(i,j,k,2); - } else { + } else { - A[lc][0] = amrex::Real(0.0); - A[lc][1] = amrex::Real(0.0); - A[lc][2] = amrex::Real(0.0); - } - lc++; - } // i,j - } // k + A[lc][0] = amrex::Real(0.0); + A[lc][1] = amrex::Real(0.0); + A[lc][2] = amrex::Real(0.0); + } + lc++; + }}} - const auto& slopes = amrex_calc_slopes_eb_given_A (i,j,k,n,A,state,flag); - xslope = slopes[0]; - yslope = slopes[1]; - zslope = slopes[2]; + const auto& slopes = amrex_calc_slopes_eb_given_A (i,j,k,n,A,state,flag); + xslope = slopes[0]; + yslope = slopes[1]; + zslope = slopes[2]; - // This will over-write the values of xslope and yslope if appropriate - amrex_overwrite_with_regular_slopes_extdir(i,j,k,n,xslope,yslope,zslope,state,vfrac, - edlo_x,edlo_y,edlo_z,edhi_x,edhi_y,edhi_z, - domlo_x,domlo_y,domlo_z,domhi_x,domhi_y,domhi_z, - max_order); + // This will over-write the values of xslope and yslope if appropriate + amrex_overwrite_with_regular_slopes_extdir(i,j,k,n,xslope,yslope,zslope,state,vfrac, + edlo_x,edlo_y,edlo_z,edhi_x,edhi_y,edhi_z, + domlo_x,domlo_y,domlo_z,domhi_x,domhi_y,domhi_z, + max_order); } // end of needs_bndry_stencil @@ -696,7 +694,7 @@ amrex_calc_slopes_extdir_eb_grown (int i, int j, int k, int n, int lc=0; for(int kk(-nz); kk<=nz; kk++) { - for(int jj(-ny); jj<=ny; jj++) + for(int jj(-ny); jj<=ny; jj++) { for(int ii(-nx); ii<=nx; ii++) { if (!flag(i+ii,j+jj,k+kk).isCovered() && !(ii==0 && jj==0 && kk==0)) @@ -783,6 +781,7 @@ amrex_calc_slopes_extdir_eb_grown (int i, int j, int k, int n, } lc++; } // i,j +} } // k const auto& slopes = amrex_calc_slopes_eb_given_A_grown (i,j,k,n,nx,ny,nz,A,state,flag); @@ -852,9 +851,9 @@ amrex_calc_alpha_limiter(int i, int j, int k, int n, for(int ii(-1); ii<=1; ii++){ if (flag(i,j,k).isConnected(ii,jj,kk) && !(ii==0 && jj==0 && kk==0)) { - if ((ii==-1 || ii==1) && jj==0 && kk==0) cuts_x++; - if ((jj==-1 || jj==1) && ii==0 && kk==0) cuts_y++; - if ((kk==-1 || kk==1) && ii==0 && jj==0) cuts_z++; + if ((ii==-1 || ii==1) && jj==0 && kk==0) { cuts_x++; } + if ((jj==-1 || jj==1) && ii==0 && kk==0) { cuts_y++; } + if ((kk==-1 || kk==1) && ii==0 && jj==0) { cuts_z++; } } } } @@ -885,8 +884,8 @@ amrex_calc_alpha_limiter(int i, int j, int k, int n, for(int jj(0); jj<=1; jj++){ for(int ii(-1); ii<=1; ii++){ if (flag(i,j,k).isConnected(ii,jj,kk) && !(ii==0 && jj==0 && kk==0)) { - if (state(i+ii,j+jj,k+kk,n) > q_max) q_max = state(i+ii,j+jj,k+kk,n); - if (state(i+ii,j+jj,k+kk,n) < q_min) q_min = state(i+ii,j+jj,k+kk,n); + if (state(i+ii,j+jj,k+kk,n) > q_max) { q_max = state(i+ii,j+jj,k+kk,n); } + if (state(i+ii,j+jj,k+kk,n) < q_min) { q_min = state(i+ii,j+jj,k+kk,n); } } } } @@ -913,8 +912,8 @@ amrex_calc_alpha_limiter(int i, int j, int k, int n, for(int jj(-1); jj<=0; jj++){ for(int ii(-1); ii<=1; ii++){ if (flag(i,j,k).isConnected(ii,jj,kk) && !(ii==0 && jj==0 && kk==0)) { - if (state(i+ii,j+jj,k+kk,n) > q_max) q_max = state(i+ii,j+jj,k+kk,n); - if (state(i+ii,j+jj,k+kk,n) < q_min) q_min = state(i+ii,j+jj,k+kk,n); + if (state(i+ii,j+jj,k+kk,n) > q_max) { q_max = state(i+ii,j+jj,k+kk,n); } + if (state(i+ii,j+jj,k+kk,n) < q_min) { q_min = state(i+ii,j+jj,k+kk,n); } } } } @@ -941,8 +940,8 @@ amrex_calc_alpha_limiter(int i, int j, int k, int n, for(int jj(-1); jj<=1; jj++){ for(int ii(0); ii<=1; ii++){ if (flag(i,j,k).isConnected(ii,jj,kk) && !(ii==0 && jj==0 && kk==0)) { - if (state(i+ii,j+jj,k+kk,n) > q_max) q_max = state(i+ii,j+jj,k+kk,n); - if (state(i+ii,j+jj,k+kk,n) < q_min) q_min = state(i+ii,j+jj,k+kk,n); + if (state(i+ii,j+jj,k+kk,n) > q_max) { q_max = state(i+ii,j+jj,k+kk,n); } + if (state(i+ii,j+jj,k+kk,n) < q_min) { q_min = state(i+ii,j+jj,k+kk,n); } } } } @@ -969,8 +968,8 @@ amrex_calc_alpha_limiter(int i, int j, int k, int n, for(int jj(-1); jj<=1; jj++){ for(int ii(-1); ii<=0; ii++){ if (flag(i,j,k).isConnected(ii,jj,kk) && !(ii==0 && jj==0 && kk==0)) { - if (state(i+ii,j+jj,k+kk,n) > q_max) q_max = state(i+ii,j+jj,k+kk,n); - if (state(i+ii,j+jj,k+kk,n) < q_min) q_min = state(i+ii,j+jj,k+kk,n); + if (state(i+ii,j+jj,k+kk,n) > q_max) { q_max = state(i+ii,j+jj,k+kk,n); } + if (state(i+ii,j+jj,k+kk,n) < q_min) { q_min = state(i+ii,j+jj,k+kk,n); } } } } @@ -996,8 +995,8 @@ amrex_calc_alpha_limiter(int i, int j, int k, int n, for(int jj(-1); jj<=1; jj++){ for(int ii(-1); ii<=1; ii++){ if (flag(i,j,k).isConnected(ii,jj,kk) && !(ii==0 && jj==0 && kk==0)) { - if (state(i+ii,j+jj,k+kk,n) > q_max) q_max = state(i+ii,j+jj,k+kk,n); - if (state(i+ii,j+jj,k+kk,n) < q_min) q_min = state(i+ii,j+jj,k+kk,n); + if (state(i+ii,j+jj,k+kk,n) > q_max) { q_max = state(i+ii,j+jj,k+kk,n); } + if (state(i+ii,j+jj,k+kk,n) < q_min) { q_min = state(i+ii,j+jj,k+kk,n); } } } } @@ -1023,8 +1022,8 @@ amrex_calc_alpha_limiter(int i, int j, int k, int n, for(int jj(-1); jj<=1; jj++){ for(int ii(-1); ii<=1; ii++){ if (flag(i,j,k).isConnected(ii,jj,kk) && !(ii==0 && jj==0 && kk==0)) { - if (state(i+ii,j+jj,k+kk,n) > q_max) q_max = state(i+ii,j+jj,k+kk,n); - if (state(i+ii,j+jj,k+kk,n) < q_min) q_min = state(i+ii,j+jj,k+kk,n); + if (state(i+ii,j+jj,k+kk,n) > q_max) { q_max = state(i+ii,j+jj,k+kk,n); } + if (state(i+ii,j+jj,k+kk,n) < q_min) { q_min = state(i+ii,j+jj,k+kk,n); } } } } @@ -1037,9 +1036,9 @@ amrex_calc_alpha_limiter(int i, int j, int k, int n, amrex::Real zalpha = alpha; //Zeroing out the slopes in the direction where a covered face exists. - if (cuts_x<2) xalpha = 0; - if (cuts_y<2) yalpha = 0; - if (cuts_z<2) zalpha = 0; + if (cuts_x<2) { xalpha = 0; } + if (cuts_y<2) { yalpha = 0; } + if (cuts_z<2) { zalpha = 0; } return {xalpha,yalpha,zalpha}; } @@ -1068,14 +1067,17 @@ amrex_lim_slopes_eb (int i, int j, int k, int n, // Setting limiter to 1 for stencils that just consists of non-EB cells because // amrex_calc_slopes_eb routine will call the slope routine for non-EB stencils that has already a limiter - if ( max_order > 0 && vfrac(i,j,k) == 1. && vfrac(i-1,j,k) == 1. && vfrac(i+1,j,k) == 1.) - alpha_lim[0] = 1.0; + if ( max_order > 0 && vfrac(i,j,k) == 1. && vfrac(i-1,j,k) == 1. && vfrac(i+1,j,k) == 1.) { + alpha_lim[0] = 1.0; + } - if ( max_order > 0 && vfrac(i,j,k) == 1. && vfrac(i,j-1,k) == 1. && vfrac(i,j+1,k) == 1.) - alpha_lim[1] = 1.0; + if ( max_order > 0 && vfrac(i,j,k) == 1. && vfrac(i,j-1,k) == 1. && vfrac(i,j+1,k) == 1.) { + alpha_lim[1] = 1.0; + } - if ( max_order > 0 && vfrac(i,j,k) == 1. && vfrac(i,j,k-1) == 1. && vfrac(i,j,k+1) == 1.) - alpha_lim[2] = 1.0; + if ( max_order > 0 && vfrac(i,j,k) == 1. && vfrac(i,j,k-1) == 1. && vfrac(i,j,k+1) == 1.) { + alpha_lim[2] = 1.0; + } return {alpha_lim[0]*slopes[0],alpha_lim[1]*slopes[1],alpha_lim[2]*slopes[2]}; } @@ -1109,14 +1111,17 @@ amrex_lim_slopes_extdir_eb (int i, int j, int k, int n, // Setting limiter to 1 for stencils that just consists of non-EB cells because // amrex_calc_slopes_extdir_eb routine will call the slope routine for non-EB stencils that has already a limiter - if ( max_order > 0 && vfrac(i,j,k) == 1. && vfrac(i-1,j,k) == 1. && vfrac(i+1,j,k) == 1.) + if ( max_order > 0 && vfrac(i,j,k) == 1. && vfrac(i-1,j,k) == 1. && vfrac(i+1,j,k) == 1.) { alpha_lim[0] = 1.0; + } - if ( max_order > 0 && vfrac(i,j,k) == 1. && vfrac(i,j-1,k) == 1. && vfrac(i,j+1,k) == 1.) - alpha_lim[1] = 1.0; + if ( max_order > 0 && vfrac(i,j,k) == 1. && vfrac(i,j-1,k) == 1. && vfrac(i,j+1,k) == 1.) { + alpha_lim[1] = 1.0; + } - if ( max_order > 0 && vfrac(i,j,k) == 1. && vfrac(i,j,k-1) == 1. && vfrac(i,j,k+1) == 1.) - alpha_lim[2] = 1.0; + if ( max_order > 0 && vfrac(i,j,k) == 1. && vfrac(i,j,k-1) == 1. && vfrac(i,j,k+1) == 1.) { + alpha_lim[2] = 1.0; + } return {alpha_lim[0]*slopes[0],alpha_lim[1]*slopes[1],alpha_lim[2]*slopes[2]}; } diff --git a/Src/EB/AMReX_EB_StateRedistItracker.cpp b/Src/EB/AMReX_EB_StateRedistItracker.cpp index 50c2360161b..538a917c72a 100644 --- a/Src/EB/AMReX_EB_StateRedistItracker.cpp +++ b/Src/EB/AMReX_EB_StateRedistItracker.cpp @@ -53,8 +53,8 @@ MakeITracker ( Box const& bx, }); Box domain_per_grown = domain; - if (is_periodic_x) domain_per_grown.grow(0,4); - if (is_periodic_y) domain_per_grown.grow(1,4); + if (is_periodic_x) { domain_per_grown.grow(0,4); } + if (is_periodic_y) { domain_per_grown.grow(1,4); } Box const& bxg4 = amrex::grow(bx,4); Box bx_per_g4= domain_per_grown & bxg4; @@ -78,16 +78,18 @@ MakeITracker ( Box const& bx, // As a first pass, choose just based on the normal if (std::abs(nx) > std::abs(ny)) { - if (nx > 0) + if (nx > 0) { itracker(i,j,k,1) = 5; - else + } else { itracker(i,j,k,1) = 4; + } } else { - if (ny > 0) + if (ny > 0) { itracker(i,j,k,1) = 7; - else + } else { itracker(i,j,k,1) = 2; + } } bool xdir_mns_ok = (is_periodic_x || (i > domain.smallEnd(0))); @@ -115,8 +117,9 @@ MakeITracker ( Box const& bx, int joff = jmap[itracker(i,j,k,1)]; // Sanity check - if (vfrac(i+ioff,j+joff,k) == 0.) + if (vfrac(i+ioff,j+joff,k) == 0.) { amrex::Abort(" Trying to merge with covered cell"); + } Real sum_vol = vfrac(i,j,k) + vfrac(i+ioff,j+joff,k); @@ -184,14 +187,15 @@ MakeITracker ( Box const& bx, ioff = imap[itracker(i,j,k,1)] + imap[itracker(i,j,k,2)]; joff = jmap[itracker(i,j,k,1)] + jmap[itracker(i,j,k,2)]; - if (ioff > 0 && joff > 0) + if (ioff > 0 && joff > 0) { itracker(i,j,k,3) = 8; - else if (ioff < 0 && joff > 0) + } else if (ioff < 0 && joff > 0) { itracker(i,j,k,3) = 6; - else if (ioff > 0 && joff < 0) + } else if (ioff > 0 && joff < 0) { itracker(i,j,k,3) = 3; - else + } else { itracker(i,j,k,3) = 1; + } // (i,j) merges with at least three cells now itracker(i,j,k,0) += 1; @@ -270,10 +274,10 @@ MakeITracker ( Box const& bx, }); Box domain_per_grown = domain; - if (is_periodic_x) domain_per_grown.grow(0,4); - if (is_periodic_y) domain_per_grown.grow(1,4); + if (is_periodic_x) { domain_per_grown.grow(0,4); } + if (is_periodic_y) { domain_per_grown.grow(1,4); } #if (AMREX_SPACEDIM == 3) - if (is_periodic_z) domain_per_grown.grow(2,4); + if (is_periodic_z) { domain_per_grown.grow(2,4); } #endif Box const& bxg4 = amrex::grow(bx,4); @@ -312,54 +316,59 @@ MakeITracker ( Box const& bx, if ( (std::abs(nx) > std::abs(ny)) && (std::abs(nx) > std::abs(nz)) ) { - if (nx > 0) + if (nx > 0) { itracker(i,j,k,1) = 5; - else + } else { itracker(i,j,k,1) = 4; + } // y-component of normal is greatest } else if ( (std::abs(ny) >= std::abs(nx)) && (std::abs(ny) > std::abs(nz)) ) { - if (ny > 0) + if (ny > 0) { itracker(i,j,k,1) = 7; - else - + } else { itracker(i,j,k,1) = 2; + } // z-component of normal is greatest } else { - if (nz > 0) + if (nz > 0) { itracker(i,j,k,1) = 22; - else + } else { itracker(i,j,k,1) = 13; + } } // Override above logic if trying to reach outside a domain boundary (and non-periodic) if ( (!xdir_mns_ok && (itracker(i,j,k,1) == 4)) || (!xdir_pls_ok && (itracker(i,j,k,1) == 5)) ) { - if ( (std::abs(ny) > std::abs(nz)) ) + if ( (std::abs(ny) > std::abs(nz)) ) { itracker(i,j,k,1) = (ny > 0) ? 7 : 2; - else + } else { itracker(i,j,k,1) = (nz > 0) ? 22 : 13; + } } if ( (!ydir_mns_ok && (itracker(i,j,k,1) == 2)) || (!ydir_pls_ok && (itracker(i,j,k,1) == 7)) ) { - if ( (std::abs(nx) > std::abs(nz)) ) + if ( (std::abs(nx) > std::abs(nz)) ) { itracker(i,j,k,1) = (nx > 0) ? 5 : 4; - else + } else { itracker(i,j,k,1) = (nz > 0) ? 22 : 13; + } } if ( (!zdir_mns_ok && (itracker(i,j,k,1) == 13)) || (!zdir_pls_ok && (itracker(i,j,k,1) == 22)) ) { - if ( (std::abs(nx) > std::abs(ny)) ) + if ( (std::abs(nx) > std::abs(ny)) ) { itracker(i,j,k,1) = (nx > 0) ? 5 : 4; - else + } else { itracker(i,j,k,1) = (ny > 0) ? 7 : 2; + } } // (i,j,k) merges with at least one cell now @@ -466,36 +475,39 @@ MakeITracker ( Box const& bx, // Both nbors are in the koff=0 plane if (koff == 0) { - if (ioff > 0 && joff > 0) + if (ioff > 0 && joff > 0) { itracker(i,j,k,3) = 8; - else if (ioff < 0 && joff > 0) + } else if (ioff < 0 && joff > 0) { itracker(i,j,k,3) = 6; - else if (ioff > 0 && joff < 0) + } else if (ioff > 0 && joff < 0) { itracker(i,j,k,3) = 3; - else + } else { itracker(i,j,k,3) = 1; + } // Both nbors are in the joff=0 plane } else if (joff == 0) { - if (ioff > 0 && koff > 0) + if (ioff > 0 && koff > 0) { itracker(i,j,k,3) = 23; - else if (ioff < 0 && koff > 0) + } else if (ioff < 0 && koff > 0) { itracker(i,j,k,3) = 21; - else if (ioff > 0 && koff < 0) + } else if (ioff > 0 && koff < 0) { itracker(i,j,k,3) = 14; - else + } else { itracker(i,j,k,3) = 12; + } // Both nbors are in the ioff=0 plane } else { - if (joff > 0 && koff > 0) + if (joff > 0 && koff > 0) { itracker(i,j,k,3) = 25; - else if (joff < 0 && koff > 0) + } else if (joff < 0 && koff > 0) { itracker(i,j,k,3) = 19; - else if (joff > 0 && koff < 0) + } else if (joff > 0 && koff < 0) { itracker(i,j,k,3) = 16; - else + } else { itracker(i,j,k,3) = 10; + } } // (i,j,k) merges with at least three cells now @@ -539,14 +551,16 @@ MakeITracker ( Box const& bx, { itracker(i,j,k,4) = 22; - if (ioff > 0) + if (ioff > 0) { itracker(i,j,k,5) = 23; - else + } else { itracker(i,j,k,5) = 21; - if (joff > 0) + } + if (joff > 0) { itracker(i,j,k,6) = 25; - else + } else { itracker(i,j,k,6) = 19; + } if (ioff > 0 && joff > 0) { itracker(i,j,k,7) = 26; @@ -561,14 +575,16 @@ MakeITracker ( Box const& bx, itracker(i,j,k,4) = 13; - if (ioff > 0) + if (ioff > 0) { itracker(i,j,k,5) = 14; - else + } else { itracker(i,j,k,5) = 12; - if (joff > 0) + } + if (joff > 0) { itracker(i,j,k,6) = 16; - else + } else { itracker(i,j,k,6) = 10; + } if (ioff > 0 && joff > 0) { itracker(i,j,k,7) = 17; @@ -585,14 +601,16 @@ MakeITracker ( Box const& bx, { itracker(i,j,k,4) = 7; - if (ioff > 0) + if (ioff > 0) { itracker(i,j,k,5) = 8; - else + } else { itracker(i,j,k,5) = 6; - if (koff > 0) + } + if (koff > 0) { itracker(i,j,k,6) = 25; - else + } else { itracker(i,j,k,6) = 16; + } if (ioff > 0 && koff > 0) { itracker(i,j,k,7) = 26; @@ -608,14 +626,16 @@ MakeITracker ( Box const& bx, itracker(i,j,k,4) = 2; - if (ioff > 0) + if (ioff > 0) { itracker(i,j,k,5) = 3; - else + } else { itracker(i,j,k,5) = 1; - if (koff > 0) + } + if (koff > 0) { itracker(i,j,k,6) = 19; - else + } else { itracker(i,j,k,6) = 10; + } if (ioff > 0 && koff > 0) { itracker(i,j,k,7) = 20; @@ -633,14 +653,16 @@ MakeITracker ( Box const& bx, { itracker(i,j,k,4) = 5; - if (joff > 0) + if (joff > 0) { itracker(i,j,k,5) = 8; - else + } else { itracker(i,j,k,5) = 3; - if (koff > 0) + } + if (koff > 0) { itracker(i,j,k,6) = 23; - else + } else { itracker(i,j,k,6) = 14; + } if (joff > 0 && koff > 0) { itracker(i,j,k,7) = 26; @@ -655,14 +677,16 @@ MakeITracker ( Box const& bx, itracker(i,j,k,4) = 4; - if (joff > 0) + if (joff > 0) { itracker(i,j,k,5) = 6; - else + } else { itracker(i,j,k,5) = 1; - if (koff > 0) + } + if (koff > 0) { itracker(i,j,k,6) = 21; - else + } else { itracker(i,j,k,6) = 12; + } if (joff > 0 && koff > 0) { itracker(i,j,k,7) = 24; diff --git a/Src/EB/AMReX_EB_StateRedistSlopeLimiter_K.H b/Src/EB/AMReX_EB_StateRedistSlopeLimiter_K.H index 7ce6c5cdd0a..90ce4a07016 100644 --- a/Src/EB/AMReX_EB_StateRedistSlopeLimiter_K.H +++ b/Src/EB/AMReX_EB_StateRedistSlopeLimiter_K.H @@ -74,10 +74,10 @@ amrex_calc_centroid_limiter(int i, int j, int k, int n, { Real new_lim = amrex_calc_alpha_stencil(q_hat, q_max, q_min, state(i,j,k,n)); - if (amrex::Math::abs(delta_x) > epsilon) xalpha = amrex::min(xalpha,new_lim); - if (amrex::Math::abs(delta_y) > epsilon) yalpha = amrex::min(yalpha,new_lim); + if (amrex::Math::abs(delta_x) > epsilon) { xalpha = amrex::min(xalpha,new_lim); } + if (amrex::Math::abs(delta_y) > epsilon) { yalpha = amrex::min(yalpha,new_lim); } #if (AMREX_SPACEDIM == 3) - if (amrex::Math::abs(delta_z) > epsilon) zalpha = amrex::min(zalpha,new_lim); + if (amrex::Math::abs(delta_z) > epsilon) { zalpha = amrex::min(zalpha,new_lim); } #endif } } diff --git a/Src/EB/AMReX_EB_StateRedistUtils.cpp b/Src/EB/AMReX_EB_StateRedistUtils.cpp index 80539761516..6c819beb860 100644 --- a/Src/EB/AMReX_EB_StateRedistUtils.cpp +++ b/Src/EB/AMReX_EB_StateRedistUtils.cpp @@ -62,10 +62,10 @@ MakeStateRedistUtils ( Box const& bx, const Box domain = lev_geom.Domain(); Box domain_per_grown = domain; - if (is_periodic_x) domain_per_grown.grow(0,2); - if (is_periodic_y) domain_per_grown.grow(1,2); + if (is_periodic_x) { domain_per_grown.grow(0,2); } + if (is_periodic_y) { domain_per_grown.grow(1,2); } #if (AMREX_SPACEDIM == 3) - if (is_periodic_z) domain_per_grown.grow(2,2); + if (is_periodic_z) { domain_per_grown.grow(2,2); } #endif amrex::ParallelFor(bxg3, @@ -114,8 +114,9 @@ MakeStateRedistUtils ( Box const& bx, vol_of_nbors += vfrac(r,s,t); } - if (itracker(i,j,k,0) > 0) + if (itracker(i,j,k,0) > 0) { alpha(i,j,k,1) = (target_vol - vfrac(i,j,k)) / vol_of_nbors; + } } else { nbhd_vol(i,j,k) = 0.; diff --git a/Src/EB/AMReX_EB_StateRedistribute.cpp b/Src/EB/AMReX_EB_StateRedistribute.cpp index af6c255bf63..023c8fe0716 100644 --- a/Src/EB/AMReX_EB_StateRedistribute.cpp +++ b/Src/EB/AMReX_EB_StateRedistribute.cpp @@ -78,10 +78,10 @@ StateRedistribute ( Box const& bx, int ncomp, Box const& bxg3 = amrex::grow(bx,3); Box domain_per_grown = domain; - if (is_periodic_x) domain_per_grown.grow(0,2); - if (is_periodic_y) domain_per_grown.grow(1,2); + if (is_periodic_x) { domain_per_grown.grow(0,2); } + if (is_periodic_y) { domain_per_grown.grow(1,2); } #if (AMREX_SPACEDIM == 3) - if (is_periodic_z) domain_per_grown.grow(2,2); + if (is_periodic_z) { domain_per_grown.grow(2,2); } #endif // Solution at the centroid of my nbhd @@ -96,15 +96,17 @@ StateRedistribute ( Box const& bx, int ncomp, amrex::ParallelFor(bxg3, [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept { - for (int n = 0; n < ncomp; n++) + for (int n = 0; n < ncomp; n++) { soln_hat(i,j,k,n) = U_in(i,j,k,n); + } if (vfrac(i,j,k) > 0.0 && bxg2.contains(IntVect(AMREX_D_DECL(i,j,k))) && domain_per_grown.contains(IntVect(AMREX_D_DECL(i,j,k)))) { // Start with U_in(i,j,k) itself - for (int n = 0; n < ncomp; n++) + for (int n = 0; n < ncomp; n++) { soln_hat(i,j,k,n) = U_in(i,j,k,n) * alpha(i,j,k,0) * vfrac(i,j,k); + } // This loops over the neighbors of (i,j,k), and doesn't include (i,j,k) itself for (int i_nbor = 1; i_nbor <= itracker(i,j,k,0); i_nbor++) @@ -115,12 +117,14 @@ StateRedistribute ( Box const& bx, int ncomp, if (domain_per_grown.contains(IntVect(AMREX_D_DECL(r,s,t)))) { - for (int n = 0; n < ncomp; n++) + for (int n = 0; n < ncomp; n++) { soln_hat(i,j,k,n) += U_in(r,s,t,n) * alpha(i,j,k,1) * vfrac(r,s,t) / nrs(r,s,t); + } } } - for (int n = 0; n < ncomp; n++) + for (int n = 0; n < ncomp; n++) { soln_hat(i,j,k,n) /= nbhd_vol(i,j,k); + } } }); @@ -135,8 +139,9 @@ StateRedistribute ( Box const& bx, int ncomp, { if (bx.contains(IntVect(AMREX_D_DECL(i,j,k)))) { - for (int n = 0; n < ncomp; n++) + for (int n = 0; n < ncomp; n++) { amrex::Gpu::Atomic::Add(&U_out(i,j,k,n),alpha(i,j,k,0)*nrs(i,j,k)*soln_hat(i,j,k,n)); + } } } else { @@ -170,11 +175,10 @@ StateRedistribute ( Box const& bx, int ncomp, #if (AMREX_SPACEDIM == 2) int kk = 0; #elif (AMREX_SPACEDIM == 3) - for(int kk(-1); kk<=1; kk++) + for(int kk(-1); kk<=1; kk++) { #endif - { - for(int jj(-1); jj<=1; jj++) - for(int ii(-1); ii<=1; ii++) + for(int jj(-1); jj<=1; jj++) { + for(int ii(-1); ii<=1; ii++) { if (flag(i,j,k).isConnected(ii,jj,kk)) { int r = i+ii; int s = j+jj; int t = k+kk; @@ -188,13 +192,13 @@ StateRedistribute ( Box const& bx, int ncomp, z_min = amrex::min(z_min, cent_hat(r,s,t,2)+static_cast(kk)); #endif } - } + AMREX_D_TERM(},},}) // If we need to grow the stencil, we let it be -nx:nx in the x-direction, // for example. Note that nx,ny,nz are either 1 or 2 - if ( (x_max-x_min) < slope_stencil_min_width ) nx = 2; - if ( (y_max-y_min) < slope_stencil_min_width ) ny = 2; + if ( (x_max-x_min) < slope_stencil_min_width ) { nx = 2; } + if ( (y_max-y_min) < slope_stencil_min_width ) { ny = 2; } #if (AMREX_SPACEDIM == 3) - if ( (z_max-z_min) < slope_stencil_min_width ) nz = 2; + if ( (z_max-z_min) < slope_stencil_min_width ) { nz = 2; } #endif amrex::GpuArray slopes_eb; diff --git a/Src/EB/AMReX_EB_chkpt_file.cpp b/Src/EB/AMReX_EB_chkpt_file.cpp index 3521a47d638..1acb6b4764d 100644 --- a/Src/EB/AMReX_EB_chkpt_file.cpp +++ b/Src/EB/AMReX_EB_chkpt_file.cpp @@ -40,8 +40,9 @@ ChkptFile::writeHeader (const BoxArray& cut_ba, const BoxArray& covered_ba, std::ofstream::trunc | std::ofstream::binary); - if ( ! HeaderFile.good() ) + if ( ! HeaderFile.good() ) { FileOpenFailed(HeaderFileName); + } HeaderFile.precision(17); @@ -51,17 +52,20 @@ ChkptFile::writeHeader (const BoxArray& cut_ba, const BoxArray& covered_ba, HeaderFile << nlevels << "\n"; // Geometry - for (int i = 0; i < AMREX_SPACEDIM; ++i) + for (int i = 0; i < AMREX_SPACEDIM; ++i) { HeaderFile << geom.ProbLo(i) << ' '; + } HeaderFile << '\n'; - for (int i = 0; i < AMREX_SPACEDIM; ++i) + for (int i = 0; i < AMREX_SPACEDIM; ++i) { HeaderFile << geom.ProbHi(i) << ' '; + } HeaderFile << '\n'; // ngrow - for (int i = 0; i < AMREX_SPACEDIM; ++i) + for (int i = 0; i < AMREX_SPACEDIM; ++i) { HeaderFile << ngrow[i] << ' '; + } HeaderFile << '\n'; // extend domain face @@ -117,7 +121,7 @@ ChkptFile::read_from_chkpt_file (BoxArray& cut_grids, BoxArray& covered_grids, std::string File(m_restart_file + "/Header"); - if (amrex::Verbose()) amrex::Print() << "file=" << File << std::endl; + if (amrex::Verbose()) { amrex::Print() << "file=" << File << std::endl; } VisMF::IO_Buffer io_buffer(VisMF::GetIOBufferSize()); @@ -191,12 +195,12 @@ ChkptFile::read_from_chkpt_file (BoxArray& cut_grids, BoxArray& covered_grids, AMREX_ALWAYS_ASSERT_WITH_MESSAGE(max_grid_size == mgs_chkptfile, "EB2::ChkptFile cannot read from different max_grid_size"); - if (amrex::Verbose()) amrex::Print() << "Loading cut_grids\n"; + if (amrex::Verbose()) { amrex::Print() << "Loading cut_grids\n"; } cut_grids.readFrom(is); gotoNextLine(is); if (is.peek() != EOF) { - if (amrex::Verbose()) amrex::Print() << "Loading covered_grids\n"; + if (amrex::Verbose()) { amrex::Print() << "Loading covered_grids\n"; } covered_grids.readFrom(is); gotoNextLine(is); } @@ -205,7 +209,7 @@ ChkptFile::read_from_chkpt_file (BoxArray& cut_grids, BoxArray& covered_grids, // volfrac { - if (amrex::Verbose()) amrex::Print() << " Loading " << m_volfrac_name << std::endl; + if (amrex::Verbose()) { amrex::Print() << " Loading " << m_volfrac_name << std::endl; } volfrac.define(cut_grids, dmap, 1, ng_gfab); @@ -215,7 +219,7 @@ ChkptFile::read_from_chkpt_file (BoxArray& cut_grids, BoxArray& covered_grids, // centroid { - if (amrex::Verbose()) amrex::Print() << " Loading " << m_centroid_name << std::endl; + if (amrex::Verbose()) { amrex::Print() << " Loading " << m_centroid_name << std::endl; } centroid.define(cut_grids, dmap, AMREX_SPACEDIM, ng_gfab); @@ -225,7 +229,7 @@ ChkptFile::read_from_chkpt_file (BoxArray& cut_grids, BoxArray& covered_grids, // bndryarea { - if (amrex::Verbose()) amrex::Print() << " Loading " << m_bndryarea_name << std::endl; + if (amrex::Verbose()) { amrex::Print() << " Loading " << m_bndryarea_name << std::endl; } bndryarea.define(cut_grids, dmap, 1, ng_gfab); @@ -235,7 +239,7 @@ ChkptFile::read_from_chkpt_file (BoxArray& cut_grids, BoxArray& covered_grids, // bndrycent { - if (amrex::Verbose()) amrex::Print() << " Loading " << m_bndrycent_name << std::endl; + if (amrex::Verbose()) { amrex::Print() << " Loading " << m_bndrycent_name << std::endl; } bndrycent.define(cut_grids, dmap, AMREX_SPACEDIM, ng_gfab); @@ -245,7 +249,7 @@ ChkptFile::read_from_chkpt_file (BoxArray& cut_grids, BoxArray& covered_grids, // bndrynorm { - if (amrex::Verbose()) amrex::Print() << " Loading " << m_bndrynorm_name << std::endl; + if (amrex::Verbose()) { amrex::Print() << " Loading " << m_bndrynorm_name << std::endl; } bndrynorm.define(cut_grids, dmap, AMREX_SPACEDIM, ng_gfab); @@ -256,7 +260,7 @@ ChkptFile::read_from_chkpt_file (BoxArray& cut_grids, BoxArray& covered_grids, for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { // areafrac { - if (amrex::Verbose()) amrex::Print() << " Loading " << m_areafrac_name[idim] << std::endl; + if (amrex::Verbose()) { amrex::Print() << " Loading " << m_areafrac_name[idim] << std::endl; } areafrac[idim].define(convert(cut_grids, IntVect::TheDimensionVector(idim)), dmap, 1, ng_gfab); @@ -266,7 +270,7 @@ ChkptFile::read_from_chkpt_file (BoxArray& cut_grids, BoxArray& covered_grids, // facecent { - if (amrex::Verbose()) amrex::Print() << " Loading " << m_facecent_name[idim] << std::endl; + if (amrex::Verbose()) { amrex::Print() << " Loading " << m_facecent_name[idim] << std::endl; } facecent[idim].define(convert(cut_grids, IntVect::TheDimensionVector(idim)), dmap, AMREX_SPACEDIM-1, ng_gfab); @@ -276,7 +280,7 @@ ChkptFile::read_from_chkpt_file (BoxArray& cut_grids, BoxArray& covered_grids, // edgecent { - if (amrex::Verbose()) amrex::Print() << " Loading " << m_edgecent_name[idim] << std::endl; + if (amrex::Verbose()) { amrex::Print() << " Loading " << m_edgecent_name[idim] << std::endl; } IntVect edge_type{1}; edge_type[idim] = 0; edgecent[idim].define(convert(cut_grids, edge_type), dmap, 1, ng_gfab); @@ -288,7 +292,7 @@ ChkptFile::read_from_chkpt_file (BoxArray& cut_grids, BoxArray& covered_grids, // levelset { - if (amrex::Verbose()) amrex::Print() << " Loading " << m_levelset_name << std::endl; + if (amrex::Verbose()) { amrex::Print() << " Loading " << m_levelset_name << std::endl; } levelset.define(convert(cut_grids,IntVect::TheNodeVector()), dmap, 1, ng_gfab); diff --git a/Src/EB/AMReX_EB_utils.cpp b/Src/EB/AMReX_EB_utils.cpp index ff8c16f5d76..948a3d5db20 100644 --- a/Src/EB/AMReX_EB_utils.cpp +++ b/Src/EB/AMReX_EB_utils.cpp @@ -139,19 +139,19 @@ facets_nearest_pt (IntVect const& ind_pt, IntVect const& ind_loop, RealVect cons if ( std::abs(edge_v[0]) > eps ) { cx_lo = -( edge_p0[0] - static_cast( ind_loop[0] ) * dx[0] ) / edge_v[0]; cx_hi = -( edge_p0[0] - static_cast( ind_loop[0] + 1 ) * dx[0] ) / edge_v[0]; - if ( edge_v[0] < 0._rt ) amrex::Swap(cx_lo, cx_hi); + if ( edge_v[0] < 0._rt ) { amrex::Swap(cx_lo, cx_hi); } } // if ( std::abs(edge_v[1]) > eps ) { cy_lo = -( edge_p0[1] - static_cast( ind_loop[1] ) * dx[1] ) / edge_v[1]; cy_hi = -( edge_p0[1] - static_cast( ind_loop[1] + 1 ) * dx[1] ) / edge_v[1]; - if ( edge_v[1] < 0._rt ) amrex::Swap(cy_lo, cy_hi); + if ( edge_v[1] < 0._rt ) { amrex::Swap(cy_lo, cy_hi); } } // if ( std::abs(edge_v[2]) > eps ) { cz_lo = -( edge_p0[2] - static_cast( ind_loop[2] ) * dx[2] ) / edge_v[2]; cz_hi = -( edge_p0[2] - static_cast( ind_loop[2] + 1 ) * dx[2] ) / edge_v[2]; - if ( edge_v[2] < 0._rt ) amrex::Swap(cz_lo, cz_hi); + if ( edge_v[2] < 0._rt ) { amrex::Swap(cz_lo, cz_hi); } } // Real lambda_min = amrex::max(cx_lo, cy_lo, cz_lo); diff --git a/Src/EB/AMReX_WriteEBSurface.cpp b/Src/EB/AMReX_WriteEBSurface.cpp index 52b50d6c182..75a0421c94d 100644 --- a/Src/EB/AMReX_WriteEBSurface.cpp +++ b/Src/EB/AMReX_WriteEBSurface.cpp @@ -28,7 +28,7 @@ void WriteEBSurface (const BoxArray & ba, const DistributionMapping & dmap, cons const Box & bx = mfi.validbox(); if (my_flag.getType(bx) == FabType::covered || - my_flag.getType(bx) == FabType::regular) continue; + my_flag.getType(bx) == FabType::regular) { continue; } std::array areafrac; const MultiCutFab * bndrycent; @@ -62,7 +62,7 @@ void WriteEBSurface (const BoxArray & ba, const DistributionMapping & dmap, cons const Box & bx = mfi.validbox(); if (my_flag.getType(bx) == FabType::covered || - my_flag.getType(bx) == FabType::regular) continue; + my_flag.getType(bx) == FabType::regular) { continue; } eb_to_pvd.EBGridCoverage(cpu, problo, dx, bx, my_flag.const_array()); } diff --git a/Src/EB/AMReX_algoim.cpp b/Src/EB/AMReX_algoim.cpp index 08a4e2f5d54..254e15dab0f 100644 --- a/Src/EB/AMReX_algoim.cpp +++ b/Src/EB/AMReX_algoim.cpp @@ -30,7 +30,7 @@ compute_integrals (MultiFab& intgmf, IntVect nghost) const auto& flags = my_factory.getMultiEBCellFlagFab(); MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if(Gpu::notInLaunchRegion()) @@ -73,7 +73,7 @@ compute_integrals (MultiFab& intgmf, IntVect nghost) if (ebflag.isRegular()) { set_regular(i,j,k,intg); } else if (ebflag.isCovered()) { - for (int n = 0; n < numIntgs; ++n) intg(i,j,k,n) = 0.0; + for (int n = 0; n < numIntgs; ++n) { intg(i,j,k,n) = 0.0; } } else { EBPlane phi(bc(i,j,k,0),bc(i,j,k,1),bc(i,j,k,2), bn(i,j,k,0),bn(i,j,k,1),bn(i,j,k,2)); @@ -125,15 +125,15 @@ compute_integrals (MultiFab& intgmf, IntVect nghost) { const auto lo = amrex::lbound(bx); const auto hi = amrex::ubound(bx); - for (int k = lo.z; k <= hi.z; ++k) - for (int j = lo.y; j <= hi.y; ++j) + for (int k = lo.z; k <= hi.z; ++k) { + for (int j = lo.y; j <= hi.y; ++j) { for (int i = lo.x; i <= hi.x; ++i) { const auto ebflag = fg(i,j,k); if (ebflag.isRegular()) { set_regular(i,j,k,intg); } else if (ebflag.isCovered()) { - for (int n = 0; n < numIntgs; ++n) intg(i,j,k,n) = 0.0; + for (int n = 0; n < numIntgs; ++n) { intg(i,j,k,n) = 0.0; } } else { EBPlane phi(bc(i,j,k,0),bc(i,j,k,1),bc(i,j,k,2), bn(i,j,k,0),bn(i,j,k,1),bn(i,j,k,2)); @@ -179,7 +179,7 @@ compute_integrals (MultiFab& intgmf, IntVect nghost) intg(i,j,k,i_S_xyz ) = q.eval([](Real x, Real y, Real z) noexcept { return x*y*z; }); } - } + }}} } } } @@ -213,7 +213,7 @@ compute_surface_integrals (MultiFab& sintgmf, IntVect nghost) const auto& barea = my_factory.getBndryArea(); MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if(Gpu::notInLaunchRegion()) @@ -260,12 +260,12 @@ compute_surface_integrals (MultiFab& sintgmf, IntVect nghost) if (ebflag.isRegular()) { set_regular_surface(i,j,k,sintg); } else if (ebflag.isCovered()) { - for (int n = 0; n < numSurfIntgs; ++n) sintg(i,j,k,n) = 0.0; + for (int n = 0; n < numSurfIntgs; ++n) { sintg(i,j,k,n) = 0.0; } } else { constexpr Real almostone = Real(1.) - Real(100.)*std::numeric_limits::epsilon(); if (vf(i,j,k) >= almostone) { - for(int n = 0; n < numSurfIntgs; ++n) sintg(i,j,k,n) = 0.0; + for(int n = 0; n < numSurfIntgs; ++n) { sintg(i,j,k,n) = 0.0; } Real apxm = apx(i ,j ,k ); Real apxp = apx(i+1,j ,k ); @@ -317,20 +317,20 @@ compute_surface_integrals (MultiFab& sintgmf, IntVect nghost) { const auto lo = amrex::lbound(bx); const auto hi = amrex::ubound(bx); - for (int k = lo.z; k <= hi.z; ++k) - for (int j = lo.y; j <= hi.y; ++j) + for (int k = lo.z; k <= hi.z; ++k) { + for (int j = lo.y; j <= hi.y; ++j) { for (int i = lo.x; i <= hi.x; ++i) { const auto ebflag = fg(i,j,k); if (ebflag.isRegular()) { set_regular_surface(i,j,k,sintg); } else if (ebflag.isCovered()) { - for (int n = 0; n < numSurfIntgs; ++n) sintg(i,j,k,n) = 0.0; + for (int n = 0; n < numSurfIntgs; ++n) { sintg(i,j,k,n) = 0.0; } } else { constexpr Real almostone = Real(1.) - Real(100.)*std::numeric_limits::epsilon(); if (vf(i,j,k) >= almostone) { - for(int n = 0; n < numSurfIntgs; ++n) sintg(i,j,k,n) = 0.0; + for(int n = 0; n < numSurfIntgs; ++n) { sintg(i,j,k,n) = 0.0; } Real apxm = apx(i ,j ,k ); Real apxp = apx(i+1,j ,k ); @@ -376,7 +376,7 @@ compute_surface_integrals (MultiFab& sintgmf, IntVect nghost) { return x*y*z; }); } } - } + }}} } } } diff --git a/Src/EB/AMReX_algoim_K.H b/Src/EB/AMReX_algoim_K.H index c8a47614a08..09b7f1bd715 100644 --- a/Src/EB/AMReX_algoim_K.H +++ b/Src/EB/AMReX_algoim_K.H @@ -442,7 +442,7 @@ struct ImplicitIntegral // Loop over segments of divided interval for (int i = 0; i < nroots - 1; ++i) { - if (roots[i+1] - roots[i] < tol) continue; + if (roots[i+1] - roots[i] < tol) { continue; } // Evaluate sign of phi within segment and check for consistency with psi bool okay = true; @@ -457,7 +457,7 @@ struct ImplicitIntegral bool new_ok = (phi(x) > 0.0) ? (psi[j].sign() >= 0) : (psi[j].sign() <= 0); okay = okay && new_ok; } - if (!okay) continue; + if (!okay) { continue; } for (int j = 0; j < p; ++j) { @@ -521,8 +521,9 @@ struct ImplicitIntegral // integral domain is the entire hyperrectangle. if (psiCount == 0) { - if (!S) + if (!S) { tensorProductIntegral(); + } return; } diff --git a/Src/Extern/HDF5/AMReX_ParticleHDF5.H b/Src/Extern/HDF5/AMReX_ParticleHDF5.H index db58f07d6a3..6bd18ab9227 100644 --- a/Src/Extern/HDF5/AMReX_ParticleHDF5.H +++ b/Src/Extern/HDF5/AMReX_ParticleHDF5.H @@ -155,10 +155,10 @@ ParticleContainer_impl AMREX_ASSERT( int_comp_names.size() == NStructInt + NumIntComps() ); Vector write_real_comp; - for (int i = 0; i < NStructReal + NumRealComps(); ++i) write_real_comp.push_back(1); + for (int i = 0; i < NStructReal + NumRealComps(); ++i) { write_real_comp.push_back(1); } Vector write_int_comp; - for (int i = 0; i < NStructInt + NumIntComps(); ++i) write_int_comp.push_back(1); + for (int i = 0; i < NStructInt + NumIntComps(); ++i) { write_int_comp.push_back(1); } WriteHDF5ParticleData(dir, name, write_real_comp, write_int_comp, @@ -312,10 +312,10 @@ ParticleContainer_impl AMREX_ASSERT( int_comp_names.size() == NStructInt + NArrayInt ); Vector write_real_comp; - for (int i = 0; i < NStructReal + NumRealComps(); ++i) write_real_comp.push_back(1); + for (int i = 0; i < NStructReal + NumRealComps(); ++i) { write_real_comp.push_back(1); } Vector write_int_comp; - for (int i = 0; i < NStructInt + NumIntComps(); ++i) write_int_comp.push_back(1); + for (int i = 0; i < NStructInt + NumIntComps(); ++i) { write_int_comp.push_back(1); } WriteHDF5ParticleData(dir, name, write_real_comp, write_int_comp, @@ -335,10 +335,10 @@ ParticleContainer_impl AMREX_ASSERT(real_comp_names.size() == NStructReal + NumRealComps()); Vector write_real_comp; - for (int i = 0; i < NStructReal + NumRealComps(); ++i) write_real_comp.push_back(1); + for (int i = 0; i < NStructReal + NumRealComps(); ++i) { write_real_comp.push_back(1); } Vector write_int_comp; - for (int i = 0; i < NStructInt + NumIntComps(); ++i) write_int_comp.push_back(1); + for (int i = 0; i < NStructInt + NumIntComps(); ++i) { write_int_comp.push_back(1); } Vector int_comp_names; for (int i = 0; i < NStructInt + NumIntComps(); ++i ) @@ -604,8 +604,9 @@ ParticleContainer_impl hsize_t chunk_dim = 1024; chunk_env = getenv("HDF5_CHUNK_SIZE"); - if (chunk_env != NULL) + if (chunk_env != NULL) { chunk_dim = atoi(chunk_env); + } H5Pset_chunk(dcpl_int, 1, &chunk_dim); H5Pset_chunk(dcpl_real, 1, &chunk_dim); @@ -626,7 +627,7 @@ ParticleContainer_impl pos = compression.find("ZFP"); if (pos != std::string::npos) { ret = H5Z_zfp_initialize(); - if (ret < 0) amrex::Abort("ZFP initialize failed!"); + if (ret < 0) { amrex::Abort("ZFP initialize failed!"); } } #endif @@ -815,22 +816,25 @@ ParticleContainer_impl H5Sclose(real_dset_space); real_file_offset = 0; - for (int i = 0; i < ParallelDescriptor::MyProc(); i++) + for (int i = 0; i < ParallelDescriptor::MyProc(); i++) { real_file_offset += all_mfi_real_total_size[i]; + } my_real_offset = real_file_offset; my_real_count = 0; int max_mfi_count = 0, write_count = 0; - for (int i = 0; i < ParallelDescriptor::MyProc(); i++) - if (max_mfi_count < all_mfi_cnt[i]) + for (int i = 0; i < ParallelDescriptor::MyProc(); i++) { + if (max_mfi_count < all_mfi_cnt[i]) { max_mfi_count = all_mfi_cnt[i]; + } + } for (MFIter mfi(state); mfi.isValid(); ++mfi) { const int grid = mfi.index(); - if (count[grid] == 0) continue; + if (count[grid] == 0) { continue; } Vector istuff; Vector rstuff; @@ -851,7 +855,7 @@ ParticleContainer_impl #else ret = H5Dwrite(int_dset_id, H5T_NATIVE_INT, int_mem_space, int_dset_space, dxpl_col, istuff.dataPtr()); #endif - if (ret < 0) amrex::Abort("H5Dwrite int_dset failed!"); + if (ret < 0) { amrex::Abort("H5Dwrite int_dset failed!"); } H5Sclose(int_dset_space); H5Sclose(int_mem_space); @@ -864,20 +868,21 @@ ParticleContainer_impl /* my_real_offset << ", my_real_count = " << my_real_count << ", total_real_size = " << total_real_size << '\n'; */ real_dset_space = H5Screate_simple(1, &total_real_size, NULL); H5Sselect_hyperslab (real_dset_space, H5S_SELECT_SET, &my_real_offset, NULL, &my_real_count, NULL); - if (sizeof(typename ParticleType::RealType) == 4) + if (sizeof(typename ParticleType::RealType) == 4) { #ifdef AMREX_USE_HDF5_ASYNC ret = H5Dwrite_async(real_dset_id, H5T_NATIVE_FLOAT, real_mem_space, real_dset_space, dxpl_col, rstuff.dataPtr(), es_par_g); #else ret = H5Dwrite(real_dset_id, H5T_NATIVE_FLOAT, real_mem_space, real_dset_space, dxpl_col, rstuff.dataPtr()); #endif - else + } else { #ifdef AMREX_USE_HDF5_ASYNC ret = H5Dwrite_async(real_dset_id, H5T_NATIVE_DOUBLE, real_mem_space, real_dset_space, dxpl_col, rstuff.dataPtr(), es_par_g); #else ret = H5Dwrite(real_dset_id, H5T_NATIVE_DOUBLE, real_mem_space, real_dset_space, dxpl_col, rstuff.dataPtr()); #endif + } - if (ret < 0) amrex::Abort("H5Dwrite real_dset failed!"); + if (ret < 0) { amrex::Abort("H5Dwrite real_dset failed!"); } H5Sclose(real_mem_space); H5Sclose(real_dset_space); @@ -896,16 +901,18 @@ ParticleContainer_impl #ifdef AMREX_USE_HDF5_ASYNC H5Dwrite_async(int_dset_id, H5T_NATIVE_INT, int_dset_space, int_dset_space, dxpl_col, NULL, es_par_g); - if (sizeof(typename ParticleType::RealType) == 4) + if (sizeof(typename ParticleType::RealType) == 4) { H5Dwrite_async(real_dset_id, H5T_NATIVE_FLOAT, real_dset_space, real_dset_space, dxpl_col, NULL, es_par_g); - else + } else { H5Dwrite_async(real_dset_id, H5T_NATIVE_DOUBLE, real_dset_space, real_dset_space, dxpl_col, NULL, es_par_g); + } #else H5Dwrite(int_dset_id, H5T_NATIVE_INT, int_dset_space, int_dset_space, dxpl_col, NULL); - if (sizeof(typename ParticleType::RealType) == 4) + if (sizeof(typename ParticleType::RealType) == 4) { H5Dwrite(real_dset_id, H5T_NATIVE_FLOAT, real_dset_space, real_dset_space, dxpl_col, NULL); - else + } else { H5Dwrite(real_dset_id, H5T_NATIVE_DOUBLE, real_dset_space, real_dset_space, dxpl_col, NULL); + } #endif H5Sclose(int_dset_space); @@ -931,8 +938,9 @@ ParticleContainer_impl #endif my_int_offset = 0; - for (int i = 0; i < ParallelDescriptor::MyProc(); i++) + for (int i = 0; i < ParallelDescriptor::MyProc(); i++) { my_int_offset += all_mfi_cnt[i]; + } my_int_count = my_mfi_cnt; int_mem_space = H5Screate_simple(1, &my_int_count, NULL); /* std::cout << "Rank " << ParallelDescriptor::MyProc() << ": my_int_offset = " << */ @@ -944,7 +952,7 @@ ParticleContainer_impl #else ret = H5Dwrite(offset_id, H5T_NATIVE_INT, int_mem_space, offset_space, dxpl_col, &(my_nparticles[0])); #endif - if (ret < 0) amrex::Abort("H5Dwrite offset failed!"); + if (ret < 0) { amrex::Abort("H5Dwrite offset failed!"); } H5Pclose(dcpl_int); H5Pclose(dcpl_real); @@ -986,8 +994,9 @@ ParticleContainer_impl const auto strttime = amrex::second(); std::string fullname = dir; - if (!fullname.empty() && fullname[fullname.size()-1] != '/') + if (!fullname.empty() && fullname[fullname.size()-1] != '/') { fullname += '/'; + } fullname += file; fullname += ".h5"; @@ -1097,8 +1106,9 @@ ParticleContainer_impl msg += aname; amrex::Abort(msg.c_str()); } - if (ni != NStructInt + NumIntComps()) + if (ni != NStructInt + NumIntComps()) { amrex::Abort("ParticleContainer::Restart(): ni != NStructInt"); + } aname = "nparticles"; Long nparticles; @@ -1285,7 +1295,7 @@ ParticleContainer_impl const int rank = ParallelDescriptor::MyProc(); const int NReaders = MaxReaders(); - if (rank >= NReaders) return; + if (rank >= NReaders) { return; } const int Navg = ngrids[lev] / NReaders; const int Nleft = ngrids[lev] - Navg * NReaders; @@ -1378,10 +1388,11 @@ ParticleContainer_impl hsize_t real_offset = offset*rChunkSize; real_dspace = H5Screate_simple(1, &real_cnt, NULL); H5Sselect_hyperslab (real_fspace, H5S_SELECT_SET, &real_offset, NULL, &real_cnt, NULL); - if (sizeof(RTYPE) == 4) + if (sizeof(RTYPE) == 4) { H5Dread(real_dset, H5T_NATIVE_FLOAT, real_dspace, real_fspace, H5P_DEFAULT, rstuff.dataPtr()); - else + } else { H5Dread(real_dset, H5T_NATIVE_DOUBLE, real_dspace, real_fspace, H5P_DEFAULT, rstuff.dataPtr()); + } H5Sclose(real_fspace); H5Sclose(real_dspace); diff --git a/Src/Extern/HDF5/AMReX_PlotFileUtilHDF5.cpp b/Src/Extern/HDF5/AMReX_PlotFileUtilHDF5.cpp index 13d62b7f790..eb0c1af1c33 100644 --- a/Src/Extern/HDF5/AMReX_PlotFileUtilHDF5.cpp +++ b/Src/Extern/HDF5/AMReX_PlotFileUtilHDF5.cpp @@ -470,7 +470,7 @@ void WriteMultiLevelPlotfileHDF5SingleDset (const std::string& plotfilename, pos = compression.find("ZFP"); if (pos != std::string::npos) { ret = H5Z_zfp_initialize(); - if (ret < 0) amrex::Abort("ZFP initialize failed!"); + if (ret < 0) { amrex::Abort("ZFP initialize failed!"); } } #endif @@ -912,7 +912,7 @@ void WriteMultiLevelPlotfileHDF5MultiDset (const std::string& plotfilename, pos = compression.find("ZFP"); if (pos != std::string::npos) { ret = H5Z_zfp_initialize(); - if (ret < 0) amrex::Abort("ZFP initialize failed!"); + if (ret < 0) { amrex::Abort("ZFP initialize failed!"); } } #endif @@ -920,7 +920,7 @@ void WriteMultiLevelPlotfileHDF5MultiDset (const std::string& plotfilename, pos = compression.find("SZ"); if (pos != std::string::npos) { ret = H5Z_SZ_Init((char*)value_env.c_str()); - if (ret < 0) amrex::Abort("ZFP initialize failed, check SZ config file!"); + if (ret < 0) { amrex::Abort("ZFP initialize failed, check SZ config file!"); } } #endif @@ -1161,24 +1161,26 @@ void WriteMultiLevelPlotfileHDF5MultiDset (const std::string& plotfilename, snprintf(dataname, sizeof dataname, "data:datatype=%d", jj); #ifdef AMREX_USE_HDF5_ASYNC dataset = H5Dcreate_async(grp, dataname, H5T_NATIVE_DOUBLE, dataspace, H5P_DEFAULT, lev_dcpl_id, H5P_DEFAULT, es_id_g); - if(dataset < 0) std::cout << ParallelDescriptor::MyProc() << "create data failed! ret = " << dataset << std::endl; + if(dataset < 0) { std::cout << ParallelDescriptor::MyProc() << "create data failed! ret = " << dataset << std::endl; } - if (hs_procsize[0] == 0) + if (hs_procsize[0] == 0) { H5Sselect_none(dataspace); - else + } else { H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, ch_offset, NULL, hs_procsize, NULL); + } ret = H5Dwrite_async(dataset, H5T_NATIVE_DOUBLE, memdataspace, dataspace, dxpl_col, a_buffer_ind.dataPtr(), es_id_g); if(ret < 0) { std::cout << ParallelDescriptor::MyProc() << "Write data failed! ret = " << ret << std::endl; break; } H5Dclose_async(dataset, es_id_g); #else dataset = H5Dcreate(grp, dataname, H5T_NATIVE_DOUBLE, dataspace, H5P_DEFAULT, lev_dcpl_id, H5P_DEFAULT); - if(dataset < 0) std::cout << ParallelDescriptor::MyProc() << "create data failed! ret = " << dataset << std::endl; + if(dataset < 0) { std::cout << ParallelDescriptor::MyProc() << "create data failed! ret = " << dataset << std::endl; } - if (hs_procsize[0] == 0) + if (hs_procsize[0] == 0) { H5Sselect_none(dataspace); - else + } else { H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, ch_offset, NULL, hs_procsize, NULL); + } ret = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memdataspace, dataspace, dxpl_col, a_buffer_ind.dataPtr()); if(ret < 0) { std::cout << ParallelDescriptor::MyProc() << "Write data failed! ret = " << ret << std::endl; break; } diff --git a/Src/Extern/HDF5/AMReX_WriteBinaryParticleDataHDF5.H b/Src/Extern/HDF5/AMReX_WriteBinaryParticleDataHDF5.H index d3869ad2453..e808e01e33e 100644 --- a/Src/Extern/HDF5/AMReX_WriteBinaryParticleDataHDF5.H +++ b/Src/Extern/HDF5/AMReX_WriteBinaryParticleDataHDF5.H @@ -147,7 +147,7 @@ void WriteHDF5ParticleDataSync (PC const& pc, #endif std::string pdir = dir; - if ( ! pdir.empty() && pdir[pdir.size()-1] != '/') pdir += '/'; + if ( ! pdir.empty() && pdir[pdir.size()-1] != '/') { pdir += '/'; } pdir += name; if ( ! pc.GetLevelDirectoriesCreated()) { @@ -250,7 +250,7 @@ void WriteHDF5ParticleDataSync (PC const& pc, } fid = H5Fcreate(HDF5FileName.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - if (fid < 0) amrex::FileOpenFailed(HDF5FileName.c_str()); + if (fid < 0) { amrex::FileOpenFailed(HDF5FileName.c_str()); } // // First thing written is our Checkpoint/Restart version string. @@ -259,20 +259,23 @@ void WriteHDF5ParticleDataSync (PC const& pc, // particles so that we can Restart from the checkpoint files. // std::string versionName = is_checkpoint ? PC::CheckpointVersion() : PC::PlotfileVersion(); - if (sizeof(typename PC::ParticleType::RealType) == 4) + if (sizeof(typename PC::ParticleType::RealType) == 4) { versionName += "_single"; - else + } else { versionName += "_double"; + } CreateWriteHDF5AttrString(fid, "version_name", versionName.c_str()); int num_output_real = 0; - for (int i = 0; i < pc.NumRealComps() + NStructReal; ++i) - if (write_real_comp[i]) ++num_output_real; + for (int i = 0; i < pc.NumRealComps() + NStructReal; ++i) { + if (write_real_comp[i]) { ++num_output_real; } + } int num_output_int = 0; - for (int i = 0; i < pc.NumIntComps() + NStructInt; ++i) - if (write_int_comp[i]) ++num_output_int; + for (int i = 0; i < pc.NumIntComps() + NStructInt; ++i) { + if (write_int_comp[i]) { ++num_output_int; } + } // AMREX_SPACEDIM and N for sanity checking. int ndim = AMREX_SPACEDIM; @@ -392,8 +395,9 @@ void WriteHDF5ParticleDataSync (PC const& pc, #else fid = H5Fopen(HDF5FileName.c_str(), H5F_ACC_RDWR, fapl); #endif - if (fid < 0) + if (fid < 0) { FileOpenFailed(HDF5FileName.c_str()); + } char level_name[64]; for (int lev = 0; lev <= pc.finestLevel(); lev++) @@ -406,10 +410,11 @@ void WriteHDF5ParticleDataSync (PC const& pc, #endif bool gotsome; - if(pc.usePrePost) + if(pc.usePrePost) { gotsome = (pc.nParticlesAtLevelPrePost[lev] > 0); - else + } else { gotsome = (pc.NumberOfParticlesAtLevel(lev) > 0); + } MFInfo info; info.SetAlloc(false); @@ -423,8 +428,9 @@ void WriteHDF5ParticleDataSync (PC const& pc, Vector count(state.size(),0); Vector where(state.size(),0); - if(pc.usePrePost) + if(pc.usePrePost) { pc.filePrefixPrePost[lev] = HDF5FileName; + } if (gotsome) { diff --git a/Src/Extern/HYPRE/AMReX_HypreIJIface.cpp b/Src/Extern/HYPRE/AMReX_HypreIJIface.cpp index aaf1ecea689..0466343d7db 100644 --- a/Src/Extern/HYPRE/AMReX_HypreIJIface.cpp +++ b/Src/Extern/HYPRE/AMReX_HypreIJIface.cpp @@ -102,9 +102,10 @@ void HypreIJIface::run_hypre_setup () { if (m_need_setup || m_recompute_preconditioner) { BL_PROFILE("HypreIJIface::run_hypre_setup()"); - if (m_has_preconditioner) + if (m_has_preconditioner) { m_solverPrecondPtr( m_solver, m_precondSolvePtr, m_precondSetupPtr, m_precond); + } m_solverSetupPtr(m_solver, m_parA, m_parRhs, m_parSln); m_need_setup = false; @@ -137,8 +138,9 @@ void HypreIJIface::solve ( m_solverSetTolPtr(m_solver, rel_tol); m_solverSetMaxIterPtr(m_solver, max_iter); - if ((abs_tol > 0.0) && (m_solverSetAbsTolPtr != nullptr)) + if ((abs_tol > 0.0) && (m_solverSetAbsTolPtr != nullptr)) { m_solverSetAbsTolPtr(m_solver, abs_tol); + } // setup run_hypre_setup(); @@ -156,14 +158,15 @@ void HypreIJIface::solve ( HYPRE_IJVectorPrint(m_sln, slnfile.c_str()); // Increment counter if the user has requested output of multiple solves - if (!m_overwrite_files) ++m_write_counter; + if (!m_overwrite_files) { ++m_write_counter; } } - if (m_verbose > 1) + if (m_verbose > 1) { amrex::Print() << "HYPRE " << m_solver_name << ": Num. iterations = " << m_num_iterations << "; Relative residual = " << m_final_res_norm << std::endl; + } } void HypreIJIface::parse_inputs (const std::string& prefix) @@ -177,10 +180,11 @@ void HypreIJIface::parse_inputs (const std::string& prefix) pp.queryAdd("overwrite_existing_matrix_files", m_overwrite_files); pp.queryAdd("adjust_singular_matrix", m_adjust_singular_matrix); - if (m_verbose > 2) + if (m_verbose > 2) { amrex::Print() << "HYPRE: solver = " << m_solver_name << "; preconditioner = " << m_preconditioner_name << std::endl; + } if (m_preconditioner_name == "none") { m_has_preconditioner = false; @@ -230,8 +234,9 @@ void HypreIJIface::init_solver ( void HypreIJIface::boomeramg_precond_configure (const std::string& prefix) { - if (m_verbose > 2) + if (m_verbose > 2) { amrex::Print() << "Creating BoomerAMG preconditioner" << std::endl; + } HYPRE_BoomerAMGCreate(&m_precond); // Setup the pointers @@ -291,14 +296,16 @@ void HypreIJIface::boomeramg_precond_configure (const std::string& prefix) hpp.pp.getarr("bamg_non_galerkin_level_levels", levels); hpp.pp.getarr("bamg_non_galerkin_level_tols", tols); - if (levels.size() != tols.size()) + if (levels.size() != tols.size()) { amrex::Abort( "HypreIJIface: Invalid sizes for non-Galerkin level " "tolerances"); + } - for (size_t i = 0; i < levels.size(); ++i) + for (size_t i = 0; i < levels.size(); ++i) { HYPRE_BoomerAMGSetLevelNonGalerkinTol( m_precond, tols[i], levels[i]); + } } } @@ -418,14 +425,16 @@ void HypreIJIface::boomeramg_solver_configure (const std::string& prefix) bool use_old_default = true; hpp.pp.queryAdd("bamg_use_old_default", use_old_default); - if (use_old_default) + if (use_old_default) { HYPRE_BoomerAMGSetOldDefault(m_solver); + } } void HypreIJIface::gmres_solver_configure (const std::string& prefix) { - if (m_verbose > 2) + if (m_verbose > 2) { amrex::Print() << "Creating GMRES solver" << std::endl; + } HYPRE_ParCSRGMRESCreate(m_comm, &m_solver); // Setup pointers diff --git a/Src/Extern/PETSc/AMReX_PETSc.cpp b/Src/Extern/PETSc/AMReX_PETSc.cpp index 5b6f82cec85..6d6835e6808 100644 --- a/Src/Extern/PETSc/AMReX_PETSc.cpp +++ b/Src/Extern/PETSc/AMReX_PETSc.cpp @@ -20,7 +20,7 @@ namespace amrex { struct amrex_KSP { amrex_KSP () = default; - ~amrex_KSP () { if (a) KSPDestroy(&a); } + ~amrex_KSP () { if (a) { KSPDestroy(&a); } } amrex_KSP (amrex_KSP const&) = delete; amrex_KSP (amrex_KSP &&) = delete; amrex_KSP& operator= (amrex_KSP const&) = delete; @@ -31,7 +31,7 @@ struct amrex_KSP struct amrex_Mat { amrex_Mat () = default; - ~amrex_Mat () { if (a) MatDestroy(&a); } + ~amrex_Mat () { if (a) { MatDestroy(&a); } } amrex_Mat (amrex_Mat const&) = delete; amrex_Mat (amrex_Mat &&) = delete; amrex_Mat& operator= (amrex_Mat const&) = delete; @@ -42,7 +42,7 @@ struct amrex_Mat struct amrex_Vec { amrex_Vec () = default; - ~amrex_Vec () { if (a) VecDestroy(&a); } + ~amrex_Vec () { if (a) { VecDestroy(&a); } } amrex_Vec (amrex_Vec const&) = delete; amrex_Vec (amrex_Vec &&) = delete; amrex_Vec& operator= (amrex_Vec const&) = delete; diff --git a/Src/Extern/ProfParser/AMReX_ProfParserBatch.cpp b/Src/Extern/ProfParser/AMReX_ProfParserBatch.cpp index 4c0e5e25413..388b2366a2f 100644 --- a/Src/Extern/ProfParser/AMReX_ProfParserBatch.cpp +++ b/Src/Extern/ProfParser/AMReX_ProfParserBatch.cpp @@ -106,23 +106,23 @@ bool ProfParserBatchFunctions(int argc, char *argv[], bool runDefault, if(argc > 2) { // parse the command line int ia(1); while(ia < argc-1) { - if(bIOP) cout << "argv[" << ia << "] = " << argv[ia] << endl; + if(bIOP) { std::cout << "argv[" << ia << "] = " << argv[ia] << endl; } if(strcmp(argv[ia], "-v") == 0) { if(ia < argc-2) { verbose = atoi(argv[ia+1]); } - if(bIOP) cout << "*** verbose = " << verbose << endl; + if(bIOP) { std::cout << "*** verbose = " << verbose << endl; } ++ia; } else if(strcmp(argv[ia], "-ws") == 0) { bWriteSummary = true; } else if(strcmp(argv[ia], "-check") == 0) { // ---- commprof options - if(bIOP) cout << "*** data integrity check." << endl; + if(bIOP) { std::cout << "*** data integrity check." << endl; } runCheck = true; } else if(strcmp(argv[ia], "-stats") == 0) { - if(bIOP) cout << "*** print database statistics." << endl; + if(bIOP) { std::cout << "*** print database statistics." << endl; } runStats = true; } else if(strcmp(argv[ia], "-timelinepf") == 0) { - if(bIOP) cout << "*** output a timeline plotfile." << endl; + if(bIOP) { std::cout << "*** output a timeline plotfile." << endl; } runTimelinePF = true; runStats = true; } else if(strcmp(argv[ia], "-actpf") == 0) { @@ -134,59 +134,59 @@ bool ProfParserBatchFunctions(int argc, char *argv[], bool runDefault, << actFNames[actFNames.size() - 1] << endl; ++ia; } else if(strcmp(argv[ia], "-sr") == 0) { - if(bIOP) cout << "*** send receive pairing." << endl; + if(bIOP) { std::cout << "*** send receive pairing." << endl; } runSendRecv = true; } else if(strcmp(argv[ia], "-srlist") == 0) { - if(bIOP) cout << "*** send receive pairing list." << endl; + if(bIOP) { std::cout << "*** send receive pairing list." << endl; } runSendRecvList = true; } else if(strcmp(argv[ia], "-sendspf") == 0) { - if(bIOP) cout << "*** sendspf." << endl; + if(bIOP) { std::cout << "*** sendspf." << endl; } runSendsPF = true; } else if(strcmp(argv[ia], "-gl") == 0) { - if(bIOP) cout << "*** grdlog." << endl; + if(bIOP) { std::cout << "*** grdlog." << endl; } glOnly = true; } else if(strcmp(argv[ia], "-tce") == 0) { - if(bIOP) cout << "*** topolcoords for edison." << endl; + if(bIOP) { std::cout << "*** topolcoords for edison." << endl; } tcEdisonOnly = true; } else if(strcmp(argv[ia], "-spd") == 0) { - if(bIOP) cout << "*** sync point data." << endl; + if(bIOP) { std::cout << "*** sync point data." << endl; } runSyncPointData = true; } else if(strcmp(argv[ia], "-redist") == 0) { - if(bIOP) cout << "*** redist." << endl; + if(bIOP) { std::cout << "*** redist." << endl; } runRedist = true; } else if(strcmp(argv[ia], "-msil") == 0) { if(ia < argc-2) { maxSmallImageLength = atoi(argv[ia+1]); } - if(bIOP) cout << "*** msil = " << maxSmallImageLength << endl; + if(bIOP) { std::cout << "*** msil = " << maxSmallImageLength << endl; } ++ia; } else if(strcmp(argv[ia], "-rra") == 0) { if(ia < argc-2) { refRatioAll = atoi(argv[ia+1]); } - if(bIOP) cout << "*** rra = " << refRatioAll << endl; + if(bIOP) { std::cout << "*** rra = " << refRatioAll << endl; } ++ia; } else if(strcmp(argv[ia], "-nts") == 0) { if(ia < argc-2) { nTimeSlots = atoi(argv[ia+1]); } - if(bIOP) cout << "*** nts = " << nTimeSlots << endl; + if(bIOP) { std::cout << "*** nts = " << nTimeSlots << endl; } ++ia; } else if(strcmp(argv[ia], "-proc") == 0) { if(ia < argc-2) { whichProc = atoi(argv[ia+1]); } - if(bIOP) cout << "*** whichProc = " << whichProc << endl; + if(bIOP) { std::cout << "*** whichProc = " << whichProc << endl; } ++ia; } else if(strcmp(argv[ia], "-of") == 0) { if(ia < argc-2) { outfileName = argv[ia+1]; filenameSet = true; } - if(bIOP) cout << "*** outfileName = " << outfileName << endl; + if(bIOP) { std::cout << "*** outfileName = " << outfileName << endl; } ++ia; } else if(strcmp(argv[ia], "-proxmap") == 0) { - if(bIOP) cout << "*** proxmap." << endl; + if(bIOP) { std::cout << "*** proxmap." << endl; } proxMap = true; } else if(strcmp(argv[ia], "-mff") == 0) { // ---- region and trace options @@ -210,9 +210,9 @@ bool ProfParserBatchFunctions(int argc, char *argv[], bool runDefault, Real gpct(atof(argv[ia+1])); if(gpct >= 0.0 && gpct <= 100.0) { RegionsProfStats::SetGPercent(gpct); - if(bIOP) cout << "*** gpct = " << gpct << endl; + if(bIOP) { std::cout << "*** gpct = " << gpct << endl; } } else { - if(bIOP) cout << "*** gpct must be in range [0.0, 100.0]" << endl; + if(bIOP) { std::cout << "*** gpct must be in range [0.0, 100.0]" << endl; } } } ++ia; @@ -221,10 +221,10 @@ bool ProfParserBatchFunctions(int argc, char *argv[], bool runDefault, } else if(strcmp(argv[ia], "-prof") == 0) { bParserProf = true; } else if(strcmp(argv[ia], "-dispatch") == 0) { - if(bIOP) cout << "*** using dispatch interface." << endl; + if(bIOP) { std::cout << "*** using dispatch interface." << endl; } bUseDispatch = true; } else { - if(bIOP) cerr << "*** Error: bad command line arg: " << argv[ia] << endl; + if(bIOP) { std::cerr << "*** Error: bad command line arg: " << argv[ia] << endl; } } ++ia; } diff --git a/Src/Extern/SUNDIALS/AMReX_NVector_MultiFab.cpp b/Src/Extern/SUNDIALS/AMReX_NVector_MultiFab.cpp index 1674a83343a..8408f75c41d 100644 --- a/Src/Extern/SUNDIALS/AMReX_NVector_MultiFab.cpp +++ b/Src/Extern/SUNDIALS/AMReX_NVector_MultiFab.cpp @@ -29,7 +29,7 @@ N_Vector N_VNewEmpty_MultiFab(sunindextype length, ::sundials::Context* sunctx) { /* Create vector */ N_Vector v = N_VNewEmpty(*sunctx); - if (v == nullptr) return(nullptr); + if (v == nullptr) { return(nullptr); } v->ops->nvclone = N_VClone_MultiFab; v->ops->nvcloneempty = N_VCloneEmpty_MultiFab; @@ -85,7 +85,7 @@ N_Vector N_VNew_MultiFab(sunindextype length, ::sundials::Context* sunctx) { N_Vector v = N_VNewEmpty_MultiFab(length, sunctx); - if (v == nullptr) return(nullptr); + if (v == nullptr) { return(nullptr); } // Create and attach new MultiFab if (length > 0) @@ -107,7 +107,7 @@ N_Vector N_VMake_MultiFab(sunindextype length, amrex::MultiFab *v_mf, ::sundials::Context* sunctx) { N_Vector v = N_VNewEmpty_MultiFab(length, sunctx); - if (v == nullptr) return(nullptr); + if (v == nullptr) { return(nullptr); } if (length > 0) { @@ -157,11 +157,11 @@ int N_VGetOwnMF_MultiFab(N_Vector v) N_Vector N_VCloneEmpty_MultiFab(N_Vector w) { - if (w == nullptr) return(nullptr); + if (w == nullptr) { return(nullptr); } /* Create vector and copy operations */ N_Vector v = N_VNewEmpty(w->sunctx); - if (v == nullptr) return(nullptr); + if (v == nullptr) { return(nullptr); } N_VCopyOps(w, v); /* Create content */ @@ -182,7 +182,7 @@ N_Vector N_VCloneEmpty_MultiFab(N_Vector w) N_Vector N_VClone_MultiFab(N_Vector w) { N_Vector v = N_VCloneEmpty_MultiFab(w); - if (v == nullptr) return(nullptr); + if (v == nullptr) { return(nullptr); } sunindextype length = amrex::sundials::N_VGetLength_MultiFab(w); diff --git a/Src/Extern/SUNDIALS/AMReX_SUNMemory.cpp b/Src/Extern/SUNDIALS/AMReX_SUNMemory.cpp index 043c44b0634..285cdb2f17b 100644 --- a/Src/Extern/SUNDIALS/AMReX_SUNMemory.cpp +++ b/Src/Extern/SUNDIALS/AMReX_SUNMemory.cpp @@ -37,7 +37,7 @@ namespace { { SUNMemory mem = SUNMemoryNewEmpty(); - if (mem == nullptr) return -1; + if (mem == nullptr) { return -1; } mem->ptr = nullptr; mem->own = SUNTRUE; mem->type = mem_type; @@ -48,7 +48,7 @@ namespace { return 0; } else { - free(mem); + std::free(mem); memptr = nullptr; return -1; } @@ -59,22 +59,22 @@ namespace { int Dealloc(SUNMemoryHelper, SUNMemory mem, void* /*queue*/) { - if (mem == nullptr) return 0; + if (mem == nullptr) { return 0; } auto* arena = getArena(mem->type); if (arena) { if(mem->own) { arena->free(mem->ptr); - free(mem); + std::free(mem); return 0; } } else { - free(mem); + std::free(mem); return -1; } - free(mem); + std::free(mem); return 0; } @@ -93,8 +93,8 @@ namespace { void ActuallyDestroySUNMemoryHelper(SUNMemoryHelper helper) { - if (helper->ops) free(helper->ops); - free(helper); + if (helper->ops) { std::free(helper->ops); } + std::free(helper); } SUNMemoryHelper CreateMemoryHelper(::sundials::Context* sunctx) @@ -173,7 +173,7 @@ void MemoryHelper::Initialize(int nthreads) std::fill(the_sunmemory_helper.begin(), the_sunmemory_helper.end(), nullptr); } for (int i = 0; i < nthreads; i++) { - if (initialized[i]) continue; + if (initialized[i]) { continue; } initialized[i] = 1; BL_ASSERT(the_sunmemory_helper[i] == nullptr); the_sunmemory_helper[i] = new MemoryHelper(The_Sundials_Context(i)); diff --git a/Src/Extern/SUNDIALS/AMReX_SundialsIntegrator.H b/Src/Extern/SUNDIALS/AMReX_SundialsIntegrator.H index 8981cfed3c4..85e4cab9220 100644 --- a/Src/Extern/SUNDIALS/AMReX_SundialsIntegrator.H +++ b/Src/Extern/SUNDIALS/AMReX_SundialsIntegrator.H @@ -493,17 +493,19 @@ public: if(use_mri_strategy_test) { - if(use_erk3) + if(use_erk3) { inner_mem = ARKStepCreate(SundialsUserFun::f0, nullptr, time, nv_S, sunctx); // explicit bc (explicit f, implicit f, time, data) - else + } else { inner_mem = ARKStepCreate(nullptr, SundialsUserFun::f0, time, nv_S, sunctx); // implicit + } } else { - if(use_erk3) + if(use_erk3) { inner_mem = ARKStepCreate(SundialsUserFun::f_fast, nullptr, time, nv_S, sunctx); - else + } else { inner_mem = ARKStepCreate(nullptr, SundialsUserFun::f_fast, time, nv_S, sunctx); + } } ARKStepSetFixedStep(inner_mem, hfixed_mri); // Specify fixed time step size @@ -549,8 +551,9 @@ public: B->b[0] = 1.0; B->q=1; B->p=0; - } else + } else { amrex::Error("MRI method not implemented"); + } return B; }; @@ -592,11 +595,12 @@ public: LS = SUNLinSol_SPGMR(nv_S, PREC_NONE, 10, sunctx); NLS = SUNNonlinSol_FixedPoint(nv_S, 50, sunctx); - if (use_implicit_inner) ARKStepSetNonlinearSolver(inner_mem, NLS); - if(use_linear) + if (use_implicit_inner) { ARKStepSetNonlinearSolver(inner_mem, NLS); } + if(use_linear) { MRIStepSetLinearSolver(mristep_mem, LS, nullptr); - else + } else { MRIStepSetNonlinearSolver(mristep_mem, NLS); + } MRIStepSetUserData(mristep_mem, &udata); /* Pass udata to user functions */ MRIStepSetPostprocessStageFn(mristep_mem, SundialsUserFun::ProcessStage); diff --git a/Src/Extern/SUNDIALS/AMReX_Sundials_Core.cpp b/Src/Extern/SUNDIALS/AMReX_Sundials_Core.cpp index 80a3a33b18d..286662058a3 100644 --- a/Src/Extern/SUNDIALS/AMReX_Sundials_Core.cpp +++ b/Src/Extern/SUNDIALS/AMReX_Sundials_Core.cpp @@ -22,7 +22,7 @@ void Initialize(int nthreads) std::fill(the_sundials_context.begin(), the_sundials_context.end(), nullptr); } for (int i = 0; i < nthreads; i++) { - if (initialized[i]) continue; + if (initialized[i]) { continue; } initialized[i] = 1; BL_ASSERT(the_sundials_context[i] == nullptr); the_sundials_context[i] = new ::sundials::Context(); diff --git a/Src/F_Interfaces/AmrCore/AMReX_FlashFluxRegister.cpp b/Src/F_Interfaces/AmrCore/AMReX_FlashFluxRegister.cpp index 8f5a04dfc39..abe74c41125 100644 --- a/Src/F_Interfaces/AmrCore/AMReX_FlashFluxRegister.cpp +++ b/Src/F_Interfaces/AmrCore/AMReX_FlashFluxRegister.cpp @@ -68,7 +68,7 @@ void FlashFluxRegister::define (const BoxArray& fba, const BoxArray& cba, bl[dir].push_back(amrex::coarsen(amrex::bdryNode(ccbx,face.first), ref_ratio)); procmap[dir].push_back(fdm[i]); - if (fdm[i] == myproc) my_global_indices[dir].push_back(i); + if (fdm[i] == myproc) { my_global_indices[dir].push_back(i); } } } @@ -140,7 +140,7 @@ void FlashFluxRegister::define (const BoxArray& fba, const BoxArray& cba, const int dir = face.coordDir(); bl[dir].push_back(amrex::bdryNode(ccbx,face)); procmap[dir].push_back(cdm[i]); - if (cdm[i] == myproc) my_global_indices[dir].push_back(i); + if (cdm[i] == myproc) { my_global_indices[dir].push_back(i); } } } diff --git a/Src/F_Interfaces/AmrCore/AMReX_fluxregister_fi.cpp b/Src/F_Interfaces/AmrCore/AMReX_fluxregister_fi.cpp index edbc3970bad..2ce6399a695 100644 --- a/Src/F_Interfaces/AmrCore/AMReX_fluxregister_fi.cpp +++ b/Src/F_Interfaces/AmrCore/AMReX_fluxregister_fi.cpp @@ -31,8 +31,9 @@ extern "C" bx.shiftHalf(dir,-1); BL_ASSERT(flux_reg->nComp() == nfluxes); - if (zeroFirst) + if (zeroFirst) { flux_reg->FineSetVal(dir, boxno, 0, flux_reg->nComp(), 0.0, RunOn::Cpu); + } const FArrayBox fab(bx, nfluxes, const_cast(fabdata)); flux_reg->FineAdd(fab, dir, boxno, 0, 0, flux_reg->nComp(), scale, RunOn::Cpu); } diff --git a/Src/F_Interfaces/Base/AMReX_distromap_fi.cpp b/Src/F_Interfaces/Base/AMReX_distromap_fi.cpp index 66d744cde5c..e50031a5887 100644 --- a/Src/F_Interfaces/Base/AMReX_distromap_fi.cpp +++ b/Src/F_Interfaces/Base/AMReX_distromap_fi.cpp @@ -32,8 +32,9 @@ extern "C" { { Long dmsize = dm->size(); AMREX_ASSERT(plen >= dmsize); - for (int i = 0; i < dmsize && i < plen; ++i) + for (int i = 0; i < dmsize && i < plen; ++i) { pmap[i] = (*dm)[i]; + } } void amrex_fi_print_distromap (const DistributionMapping* dm) diff --git a/Src/F_Interfaces/Base/AMReX_geometry_fi.cpp b/Src/F_Interfaces/Base/AMReX_geometry_fi.cpp index d50f22c2220..bafc9d0f5e0 100644 --- a/Src/F_Interfaces/Base/AMReX_geometry_fi.cpp +++ b/Src/F_Interfaces/Base/AMReX_geometry_fi.cpp @@ -19,8 +19,9 @@ extern "C" void amrex_fi_geometry_get_pmask (int is_per[3]) { Geometry* gg = AMReX::top()->getDefaultGeometry(); - for (int i = 0; i < BL_SPACEDIM; ++i) + for (int i = 0; i < BL_SPACEDIM; ++i) { is_per[i] = gg->isPeriodic(i); + } } void amrex_fi_geometry_get_probdomain (Real problo[3], Real probhi[3]) diff --git a/Src/F_Interfaces/LinearSolvers/AMReX_abeclaplacian_fi.cpp b/Src/F_Interfaces/LinearSolvers/AMReX_abeclaplacian_fi.cpp index 673192dd84b..010951c60f8 100644 --- a/Src/F_Interfaces/LinearSolvers/AMReX_abeclaplacian_fi.cpp +++ b/Src/F_Interfaces/LinearSolvers/AMReX_abeclaplacian_fi.cpp @@ -11,9 +11,9 @@ extern "C" { int max_coarsening_level) { LPInfo info; - if (metric_term >= 0) info.setMetricTerm(metric_term); - if (agglomeration >= 0) info.setAgglomeration(agglomeration); - if (consolidation >= 0) info.setConsolidation(consolidation); + if (metric_term >= 0) { info.setMetricTerm(metric_term); } + if (agglomeration >= 0) { info.setAgglomeration(agglomeration); } + if (consolidation >= 0) { info.setConsolidation(consolidation); } info.setMaxCoarseningLevel(max_coarsening_level); Vector g; Vector b; diff --git a/Src/F_Interfaces/LinearSolvers/AMReX_poisson_fi.cpp b/Src/F_Interfaces/LinearSolvers/AMReX_poisson_fi.cpp index 54351a33864..4680451022a 100644 --- a/Src/F_Interfaces/LinearSolvers/AMReX_poisson_fi.cpp +++ b/Src/F_Interfaces/LinearSolvers/AMReX_poisson_fi.cpp @@ -11,9 +11,9 @@ extern "C" { int max_coarsening_level) { LPInfo info; - if (metric_term >= 0) info.setMetricTerm(metric_term); - if (agglomeration >= 0) info.setAgglomeration(agglomeration); - if (consolidation >= 0) info.setConsolidation(consolidation); + if (metric_term >= 0) { info.setMetricTerm(metric_term); } + if (agglomeration >= 0) { info.setAgglomeration(agglomeration); } + if (consolidation >= 0) { info.setConsolidation(consolidation); } info.setMaxCoarseningLevel(max_coarsening_level); Vector g; Vector b; diff --git a/Src/LinearSolvers/MLMG/AMReX_MLABecLap_2D_K.H b/Src/LinearSolvers/MLMG/AMReX_MLABecLap_2D_K.H index 56f040ef528..8aec299d903 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLABecLap_2D_K.H +++ b/Src/LinearSolvers/MLMG/AMReX_MLABecLap_2D_K.H @@ -255,12 +255,12 @@ void abec_gsrb_with_line_solve ( // int idir = 1; // This should be moved outside the kernel! - if (dhy <= dhx) amrex::Abort("dhy is supposed to be much larger than dhx"); + if (dhy <= dhx) { amrex::Abort("dhy is supposed to be much larger than dhx"); } int ilen = hi.y - lo.y + 1; // This should be moved outside the kernel! - if (ilen > 32) amrex::Abort("abec_gsrb_with_line_solve is hard-wired to be no longer than 32"); + if (ilen > 32) { amrex::Abort("abec_gsrb_with_line_solve is hard-wired to be no longer than 32"); } Array1D a_ls; Array1D b_ls; @@ -295,10 +295,12 @@ void abec_gsrb_with_line_solve ( + bX(i+1,j,0,n)*phi(i+1,j,0,n) ); // We have already accounted for this external boundary in the coefficient of phi(i,j,k,n) - if (i == vlo.x && m0(vlo.x-1,j,0) > 0) + if (i == vlo.x && m0(vlo.x-1,j,0) > 0) { rho -= dhx*bX(i ,j,0,n)*phi(i-1,j,0,n); - if (i == vhi.x && m3(vhi.x+1,j,0) > 0) + } + if (i == vhi.x && m3(vhi.x+1,j,0) > 0) { rho -= dhx*bX(i+1,j,0,n)*phi(i+1,j,0,n); + } a_ls(j-lo.y) = -dhy*bY(i,j,0,n); b_ls(j-lo.y) = g_m_d; @@ -308,11 +310,11 @@ void abec_gsrb_with_line_solve ( if (j == lo.y) { a_ls(j-lo.y) = T(0.); - if (!(m1(i,vlo.y-1,0) > 0)) r_ls(j-lo.y) += dhy*bY(i,j,0,n)*phi(i,j-1,0,n); + if (!(m1(i,vlo.y-1,0) > 0)) { r_ls(j-lo.y) += dhy*bY(i,j,0,n)*phi(i,j-1,0,n); } } if (j == hi.y) { c_ls(j-lo.y) = T(0.); - if (!(m3(i,vhi.y+1,0) > 0)) r_ls(j-lo.y) += dhy*bY(i,j+1,0,n)*phi(i,j+1,0,n); + if (!(m3(i,vhi.y+1,0) > 0)) { r_ls(j-lo.y) += dhy*bY(i,j+1,0,n)*phi(i,j+1,0,n); } } } // This is the tridiagonal solve @@ -323,7 +325,7 @@ void abec_gsrb_with_line_solve ( for (int jj = 1; jj <= ilen-1; jj++) { gam(jj) = c_ls(jj-1) / bet; bet = b_ls(jj) - a_ls(jj)*gam(jj); - if (bet == 0) amrex::Abort(">>>TRIDIAG FAILED"); + if (bet == 0) { amrex::Abort(">>>TRIDIAG FAILED"); } u_ls(jj) = (r_ls(jj)-a_ls(jj)*u_ls(jj-1)) / bet; } diff --git a/Src/LinearSolvers/MLMG/AMReX_MLABecLap_3D_K.H b/Src/LinearSolvers/MLMG/AMReX_MLABecLap_3D_K.H index 2573e9fbd93..7501153a656 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLABecLap_3D_K.H +++ b/Src/LinearSolvers/MLMG/AMReX_MLABecLap_3D_K.H @@ -339,7 +339,7 @@ void tridiagonal_solve (Array1D& a_ls, Array1D& b_ls, Array1D>>TRIDIAG FAILED"); + if (bet == 0) { amrex::Abort(">>>TRIDIAG FAILED"); } u_ls(i) = (r_ls(i)-a_ls(i)*u_ls(i-1)) / bet; } for (int i = ilen-2; i >= 0; i--) { @@ -388,7 +388,7 @@ void abec_gsrb_with_line_solve ( } // This assertion should be moved outside the kernel for performance! - if (ilen > 32) amrex::Abort("abec_gsrb_with_line_solve is hard-wired to be no longer than 32"); + if (ilen > 32) { amrex::Abort("abec_gsrb_with_line_solve is hard-wired to be no longer than 32"); } Array1D a_ls; Array1D b_ls; @@ -435,14 +435,18 @@ void abec_gsrb_with_line_solve ( + bY(i,j+1,k,n)*phi(i,j+1,k,n) ); // We have already accounted for this external boundary in the coefficient of phi(i,j,k,n) - if (i == vlo.x && m0(vlo.x-1,j,k) > 0) + if (i == vlo.x && m0(vlo.x-1,j,k) > 0) { rho -= dhx*bX(i ,j,k,n)*phi(i-1,j,k,n); - if (i == vhi.x && m3(vhi.x+1,j,k) > 0) + } + if (i == vhi.x && m3(vhi.x+1,j,k) > 0) { rho -= dhx*bX(i+1,j,k,n)*phi(i+1,j,k,n); - if (j == vlo.y && m1(i,vlo.y-1,k) > 0) + } + if (j == vlo.y && m1(i,vlo.y-1,k) > 0) { rho -= dhy*bY(i,j ,k,n)*phi(i,j-1,k,n); - if (j == vhi.y && m4(i,vhi.y+1,k) > 0) + } + if (j == vhi.y && m4(i,vhi.y+1,k) > 0) { rho -= dhy*bY(i,j+1,k,n)*phi(i,j+1,k,n); + } a_ls(k-lo.z) = -dhz*bZ(i,j,k,n); b_ls(k-lo.z) = g_m_d; @@ -454,12 +458,12 @@ void abec_gsrb_with_line_solve ( if (k == lo.z) { a_ls(k-lo.z) = T(0.); - if (!(m2(i,j,vlo.z-1) > 0)) r_ls(k-lo.z) += dhz*bZ(i,j,k,n)*phi(i,j,k-1,n); + if (!(m2(i,j,vlo.z-1) > 0)) { r_ls(k-lo.z) += dhz*bZ(i,j,k,n)*phi(i,j,k-1,n); } } if (k == hi.z) { c_ls(k-lo.z) = T(0.); - if (!(m5(i,j,vhi.z+1) > 0)) r_ls(k-lo.z) += dhz*bZ(i,j,k+1,n)*phi(i,j,k+1,n); + if (!(m5(i,j,vhi.z+1) > 0)) { r_ls(k-lo.z) += dhz*bZ(i,j,k+1,n)*phi(i,j,k+1,n); } } } @@ -511,14 +515,18 @@ void abec_gsrb_with_line_solve ( + bZ(i,j,k+1,n)*phi(i,j,k+1,n) ); // We have already accounted for this external boundary in the coefficient of phi(i,j,k,n) - if (i == vlo.x && m0(vlo.x-1,j,k) > 0) + if (i == vlo.x && m0(vlo.x-1,j,k) > 0) { rho -= dhx*bX(i ,j,k,n)*phi(i-1,j,k,n); - if (i == vhi.x && m3(vhi.x+1,j,k) > 0) + } + if (i == vhi.x && m3(vhi.x+1,j,k) > 0) { rho -= dhx*bX(i+1,j,k,n)*phi(i+1,j,k,n); - if (k == vlo.z && m2(i,j,vlo.z-1) > 0) + } + if (k == vlo.z && m2(i,j,vlo.z-1) > 0) { rho -= dhz*bZ(i,j ,k,n)*phi(i,j,k-1,n); - if (k == vhi.z && m5(i,j,vhi.z+1) > 0) + } + if (k == vhi.z && m5(i,j,vhi.z+1) > 0) { rho -= dhz*bZ(i,j,k+1,n)*phi(i,j,k+1,n); + } a_ls(j-lo.y) = -dhy*bY(i,j,k,n); b_ls(j-lo.y) = g_m_d; @@ -529,12 +537,12 @@ void abec_gsrb_with_line_solve ( if (j == lo.y) { a_ls(j-lo.y) = T(0.); - if (!(m1(i,vlo.y-1,k) > 0)) r_ls(j-lo.y) += dhy*bY(i,j,k,n)*phi(i,j-1,k,n); + if (!(m1(i,vlo.y-1,k) > 0)) { r_ls(j-lo.y) += dhy*bY(i,j,k,n)*phi(i,j-1,k,n); } } if (j == hi.y) { c_ls(j-lo.y) = T(0.); - if (!(m4(i,vhi.y+1,k) > 0)) r_ls(j-lo.y) += dhy*bY(i,j+1,k,n)*phi(i,j+1,k,n); + if (!(m4(i,vhi.y+1,k) > 0)) { r_ls(j-lo.y) += dhy*bY(i,j+1,k,n)*phi(i,j+1,k,n); } } } @@ -586,14 +594,18 @@ void abec_gsrb_with_line_solve ( + bZ(i,j,k+1,n)*phi(i,j,k+1,n) ); // We have already accounted for this external boundary in the coefficient of phi(i,j,k,n) - if (j == vlo.y && m1(i,vlo.y-1,k) > 0) + if (j == vlo.y && m1(i,vlo.y-1,k) > 0) { rho -= dhy*bY(i,j ,k,n)*phi(i,j-1,k,n); - if (j == vhi.y && m4(i,vhi.y+1,k) > 0) + } + if (j == vhi.y && m4(i,vhi.y+1,k) > 0) { rho -= dhy*bY(i,j+1,k,n)*phi(i,j+1,k,n); - if (k == vlo.z && m2(i,j,vlo.z-1) > 0) + } + if (k == vlo.z && m2(i,j,vlo.z-1) > 0) { rho -= dhz*bZ(i,j ,k,n)*phi(i,j,k-1,n); - if (k == vhi.z && m5(i,j,vhi.z+1) > 0) + } + if (k == vhi.z && m5(i,j,vhi.z+1) > 0) { rho -= dhz*bZ(i,j,k+1,n)*phi(i,j,k+1,n); + } a_ls(i-lo.x) = -dhx*bX(i,j,k,n); b_ls(i-lo.x) = g_m_d; @@ -604,12 +616,12 @@ void abec_gsrb_with_line_solve ( if (i == lo.x) { a_ls(i-lo.x) = T(0.); - if (!(m0(vlo.x-1,j,k) > 0)) r_ls(i-lo.x) += dhx*bX(i,j,k,n)*phi(i-1,j,k,n); + if (!(m0(vlo.x-1,j,k) > 0)) { r_ls(i-lo.x) += dhx*bX(i,j,k,n)*phi(i-1,j,k,n); } } if (i == hi.x) { c_ls(i-lo.x) = T(0.); - if (!(m3(vhi.x+1,j,k) > 0)) r_ls(i-lo.x) += dhx*bX(i+1,j,k,n)*phi(i+1,j,k,n); + if (!(m3(vhi.x+1,j,k) > 0)) { r_ls(i-lo.x) += dhx*bX(i+1,j,k,n)*phi(i+1,j,k,n); } } } diff --git a/Src/LinearSolvers/MLMG/AMReX_MLABecLaplacian.H b/Src/LinearSolvers/MLMG/AMReX_MLABecLaplacian.H index 5384f534107..af0d1218d1f 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLABecLaplacian.H +++ b/Src/LinearSolvers/MLMG/AMReX_MLABecLaplacian.H @@ -478,7 +478,7 @@ template void MLABecLaplacianT::applyRobinBCTermsCoeffs () { - if (!(this->hasRobinBC())) return; + if (!(this->hasRobinBC())) { return; } const int ncomp = this->getNComp(); bool reset_alpha = false; @@ -500,7 +500,7 @@ MLABecLaplacianT::applyRobinBCTermsCoeffs () } MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) @@ -515,7 +515,7 @@ MLABecLaplacianT::applyRobinBCTermsCoeffs () const Box& bhi = amrex::adjCellHi(vbx,idim); bool outside_domain_lo = !(domain.contains(blo)); bool outside_domain_hi = !(domain.contains(bhi)); - if ((!outside_domain_lo) && (!outside_domain_hi)) continue; + if ((!outside_domain_lo) && (!outside_domain_hi)) { continue; } for (int icomp = 0; icomp < ncomp; ++icomp) { auto const& rbc = (*(this->m_robin_bcval[amrlev]))[mfi].const_array(icomp*3); if (this->m_lobc_orig[icomp][idim] == LinOpBCType::Robin && outside_domain_lo) @@ -914,7 +914,7 @@ MLABecLaplacianT::Fsmooth (int amrlev, int mglev, MF& sol, const MF& rhs, in #endif { MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) @@ -1166,7 +1166,7 @@ template std::unique_ptr> MLABecLaplacianT::makeNLinOp (int /*grid_size*/) const { - if (this->m_overset_mask[0][0] == nullptr) return nullptr; + if (this->m_overset_mask[0][0] == nullptr) { return nullptr; } const Geometry& geom = this->m_geom[0].back(); const BoxArray& ba = this->m_grids[0].back(); @@ -1265,7 +1265,7 @@ template void MLABecLaplacianT::copyNSolveSolution (MF& dst, MF const& src) const { - if (this->m_overset_mask[0].back() == nullptr) return; + if (this->m_overset_mask[0].back() == nullptr) { return; } const int ncomp = dst.nComp(); diff --git a/Src/LinearSolvers/MLMG/AMReX_MLALaplacian.H b/Src/LinearSolvers/MLMG/AMReX_MLALaplacian.H index dc1dfb53e38..dc95e39d9cd 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLALaplacian.H +++ b/Src/LinearSolvers/MLMG/AMReX_MLALaplacian.H @@ -257,7 +257,7 @@ template void MLALaplacianT::update () { - if (MLCellABecLapT::needsUpdate()) MLCellABecLapT::update(); + if (MLCellABecLapT::needsUpdate()) { MLCellABecLapT::update(); } averageDownCoeffs(); updateSingularFlag(); m_needs_update = false; @@ -453,7 +453,7 @@ MLALaplacianT::Fsmooth (int amrlev, int mglev, MF& sol, const MF& rhs, int r const RT alpha = m_a_scalar; MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) diff --git a/Src/LinearSolvers/MLMG/AMReX_MLCGSolver.H b/Src/LinearSolvers/MLMG/AMReX_MLCGSolver.H index 0b54c0309fd..3764fa38f8a 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLCGSolver.H +++ b/Src/LinearSolvers/MLMG/AMReX_MLCGSolver.H @@ -182,7 +182,7 @@ MLCGSolverT::solve_bicgstab (MF& sol, const MF& rhs, RT eps_rel, RT eps_abs) << rnorm/(rnorm0) << '\n'; } - if ( rnorm < eps_rel*rnorm0 || rnorm < eps_abs ) break; + if ( rnorm < eps_rel*rnorm0 || rnorm < eps_abs ) { break; } sh.LocalCopy(s,0,0,ncomp,nghost); Lp.apply(amrlev, mglev, t, sh, MLLinOpT::BCMode::Homogeneous, MLLinOpT::StateMode::Correction); @@ -219,7 +219,7 @@ MLCGSolverT::solve_bicgstab (MF& sol, const MF& rhs, RT eps_rel, RT eps_abs) << rnorm/(rnorm0) << '\n'; } - if ( rnorm < eps_rel*rnorm0 || rnorm < eps_abs ) break; + if ( rnorm < eps_rel*rnorm0 || rnorm < eps_abs ) { break; } if ( omega == 0 ) { @@ -238,8 +238,9 @@ MLCGSolverT::solve_bicgstab (MF& sol, const MF& rhs, RT eps_rel, RT eps_abs) if ( ret == 0 && rnorm > eps_rel*rnorm0 && rnorm > eps_abs) { - if ( verbose > 0 && ParallelDescriptor::IOProcessor() ) + if ( verbose > 0 && ParallelDescriptor::IOProcessor() ) { amrex::Warning("MLCGSolver_BiCGStab:: failed to converge!"); + } ret = 8; } @@ -355,7 +356,7 @@ MLCGSolverT::solve_cg (MF& sol, const MF& rhs, RT eps_rel, RT eps_abs) << rnorm/(rnorm0) << '\n'; } - if ( rnorm < eps_rel*rnorm0 || rnorm < eps_abs ) break; + if ( rnorm < eps_rel*rnorm0 || rnorm < eps_abs ) { break; } rho_1 = rho; } @@ -370,8 +371,9 @@ MLCGSolverT::solve_cg (MF& sol, const MF& rhs, RT eps_rel, RT eps_abs) if ( ret == 0 && rnorm > eps_rel*rnorm0 && rnorm > eps_abs ) { - if ( verbose > 0 && ParallelDescriptor::IOProcessor() ) + if ( verbose > 0 && ParallelDescriptor::IOProcessor() ) { amrex::Warning("MLCGSolver_cg: failed to converge!"); + } ret = 8; } diff --git a/Src/LinearSolvers/MLMG/AMReX_MLCellABecLap.H b/Src/LinearSolvers/MLMG/AMReX_MLCellABecLap.H index f58137ca9b6..6262064f550 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLCellABecLap.H +++ b/Src/LinearSolvers/MLMG/AMReX_MLCellABecLap.H @@ -237,7 +237,7 @@ template void MLCellABecLapT::update () { - if (MLCellLinOpT::needsUpdate()) MLCellLinOpT::update(); + if (MLCellLinOpT::needsUpdate()) { MLCellLinOpT::update(); } } template @@ -277,7 +277,7 @@ MLCellABecLapT::applyInhomogNeumannTerm (int amrlev, MF& rhs) const bool has_inhomog_neumann = this->hasInhomogNeumannBC(); bool has_robin = this->hasRobinBC(); - if (!has_inhomog_neumann && !has_robin) return; + if (!has_inhomog_neumann && !has_robin) { return; } int ncomp = this->getNComp(); const int mglev = 0; @@ -302,7 +302,7 @@ MLCellABecLapT::applyInhomogNeumannTerm (int amrlev, MF& rhs) const const auto& bndry = *(this->m_bndry_sol[amrlev]); MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) @@ -329,7 +329,7 @@ MLCellABecLapT::applyInhomogNeumannTerm (int amrlev, MF& rhs) const const auto& bvhi = bndry.bndryValues(ohi).array(mfi); bool outside_domain_lo = !(domain.contains(blo)); bool outside_domain_hi = !(domain.contains(bhi)); - if ((!outside_domain_lo) && (!outside_domain_hi)) continue; + if ((!outside_domain_lo) && (!outside_domain_hi)) { continue; } for (int icomp = 0; icomp < ncomp; ++icomp) { const BoundCond bctlo = bdcv[icomp][olo]; const BoundCond bcthi = bdcv[icomp][ohi]; @@ -509,7 +509,7 @@ MLCellABecLapT::addInhomogNeumannFlux ( bool has_inhomog_neumann = this->hasInhomogNeumannBC(); bool has_robin = this->hasRobinBC(); - if (!has_inhomog_neumann && !has_robin) return; + if (!has_inhomog_neumann && !has_robin) { return; } int ncomp = this->getNComp(); const int mglev = 0; @@ -525,7 +525,7 @@ MLCellABecLapT::addInhomogNeumannFlux ( const auto& bndry = *this->m_bndry_sol[amrlev]; MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) @@ -633,7 +633,7 @@ MLCellABecLapT::applyOverset (int amrlev, MF& rhs) const auto const& osm = m_overset_mask[amrlev][0]->const_array(mfi); AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n, { - if (osm(i,j,k) == 0) rfab(i,j,k,n) = RT(0.0); + if (osm(i,j,k) == 0) { rfab(i,j,k,n) = RT(0.0); } }); } } diff --git a/Src/LinearSolvers/MLMG/AMReX_MLCellLinOp.H b/Src/LinearSolvers/MLMG/AMReX_MLCellLinOp.H index 9fa69d8de12..175f34ae54b 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLCellLinOp.H +++ b/Src/LinearSolvers/MLMG/AMReX_MLCellLinOp.H @@ -504,7 +504,7 @@ MLCellLinOpT::setLevelBC (int amrlev, const MF* a_levelbcdata, const MF* rob MF zero; IntVect ng(1); - if (this->hasHiddenDimension()) ng[this->hiddenDirection()] = 0; + if (this->hasHiddenDimension()) { ng[this->hiddenDirection()] = 0; } if (a_levelbcdata == nullptr) { zero.define(this->m_grids[amrlev][0], this->m_dmap[amrlev][0], ncomp, ng); zero.setVal(RT(0.0)); @@ -574,7 +574,7 @@ MLCellLinOpT::setLevelBC (int amrlev, const MF* a_levelbcdata, const MF* rob ncomp*3, 1); const Box& domain = this->m_geom[amrlev][0].Domain(); MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -588,7 +588,7 @@ MLCellLinOpT::setLevelBC (int amrlev, const MF* a_levelbcdata, const MF* rob const Box& bhi = amrex::adjCellHi(vbx, idim); bool outside_domain_lo = !(domain.contains(blo)); bool outside_domain_hi = !(domain.contains(bhi)); - if ((!outside_domain_lo) && (!outside_domain_hi)) continue; + if ((!outside_domain_lo) && (!outside_domain_hi)) { continue; } for (int icomp = 0; icomp < ncomp; ++icomp) { Array4 const& rbc = (*m_robin_bcval[amrlev])[mfi].array(icomp*3); if (this->m_lobc_orig[icomp][idim] == LinOpBCType::Robin && outside_domain_lo) @@ -619,7 +619,7 @@ template void MLCellLinOpT::update () { - if (MLLinOpT::needsUpdate()) MLLinOpT::update(); + if (MLLinOpT::needsUpdate()) { MLLinOpT::update(); } } template @@ -681,7 +681,7 @@ MLCellLinOpT::applyBC (int amrlev, int mglev, MF& in, BCMode bc_mode, StateM const auto& foo = foofab.const_array(); MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.SetDynamic(true); } AMREX_ALWAYS_ASSERT_WITH_MESSAGE(cross || tensorop || Gpu::notInLaunchRegion(), "non-cross stencil not support for gpu"); @@ -775,7 +775,7 @@ MLCellLinOpT::applyBC (int amrlev, int mglev, MF& in, BCMode bc_mode, StateM for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { - if (hidden_direction == idim) continue; + if (hidden_direction == idim) { continue; } const Orientation olo(idim,Orientation::low); const Orientation ohi(idim,Orientation::high); const Box blo = amrex::adjCellLo(vbx, idim); @@ -1005,7 +1005,7 @@ MLCellLinOpT::interpAssign (int amrlev, int fmglev, MF& fine, MF& crse) cons #endif MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -1075,7 +1075,7 @@ MLCellLinOpT::interpolationAmr (int famrlev, MF& fine, const MF& crse, #endif MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -1253,15 +1253,15 @@ MLCellLinOpT::reflux (int crse_amrlev, MF& res, const MF& crse_sol, const MF m_bndry_sol[fine_amrlev].get()); MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif { Array flux; - Array pflux {{ AMREX_D_DECL(&flux[0], &flux[1], &flux[2]) }}; - Array cpflux {{ AMREX_D_DECL(&flux[0], &flux[1], &flux[2]) }}; + Array pflux {{ AMREX_D_DECL(flux.data(), flux.data()+1, flux.data()+2) }}; + Array cpflux {{ AMREX_D_DECL(flux.data(), flux.data()+1, flux.data()+2) }}; for (MFIter mfi(crse_sol, mfi_info); mfi.isValid(); ++mfi) { @@ -1317,14 +1317,14 @@ MLCellLinOpT::compFlux (int amrlev, const Array& fluxes, m_bndry_sol[amrlev].get()); MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif { Array flux; - Array pflux {{ AMREX_D_DECL(&flux[0], &flux[1], &flux[2]) }}; + Array pflux {{ AMREX_D_DECL(flux.data(), flux.data()+1, flux.data()+2) }}; for (MFIter mfi(sol, mfi_info); mfi.isValid(); ++mfi) { const Box& tbx = mfi.tilebox(); @@ -1408,7 +1408,7 @@ MLCellLinOpT::applyMetricTerm (int amrlev, int mglev, MF& rhs) const { amrex::ignore_unused(amrlev,mglev,rhs); #if (AMREX_SPACEDIM != 3) - if (!m_has_metric_term) return; + if (!m_has_metric_term) { return; } const int ncomp = rhs.nComp(); @@ -1464,7 +1464,7 @@ MLCellLinOpT::unapplyMetricTerm (int amrlev, int mglev, MF& rhs) const { amrex::ignore_unused(amrlev,mglev,rhs); #if (AMREX_SPACEDIM != 3) - if (!m_has_metric_term) return; + if (!m_has_metric_term) { return; } const int ncomp = rhs.nComp(); @@ -1808,7 +1808,7 @@ MLCellLinOpT::prepareForSolve () #endif for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { - if (idim == hidden_direction) continue; + if (idim == hidden_direction) { continue; } const Orientation olo(idim,Orientation::low); const Orientation ohi(idim,Orientation::high); const Box blo = amrex::adjCellLo(vbx, idim); @@ -1904,7 +1904,7 @@ template void MLCellLinOpT::computeVolInv () const { - if (!m_volinv.empty()) return; + if (!m_volinv.empty()) { return; } m_volinv.resize(this->m_num_amr_levels); for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev) { @@ -2051,7 +2051,7 @@ MLCellLinOpT::normInf (int amrlev, MF const& mf, bool local) const -> RT } } - if (!local) ParallelAllReduce::Max(norm, ParallelContext::CommunicatorSub()); + if (!local) { ParallelAllReduce::Max(norm, ParallelContext::CommunicatorSub()); } return norm; } diff --git a/Src/LinearSolvers/MLMG/AMReX_MLEBABecLap.cpp b/Src/LinearSolvers/MLMG/AMReX_MLEBABecLap.cpp index 170ed49f6d4..b37537645e6 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLEBABecLap.cpp +++ b/Src/LinearSolvers/MLMG/AMReX_MLEBABecLap.cpp @@ -211,7 +211,7 @@ MLEBABecLap::setEBDirichlet (int amrlev, const MultiFab& phi, const MultiFab& be const FabArray* flags = (factory) ? &(factory->getMultiEBCellFlagFab()) : nullptr; MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -257,8 +257,9 @@ MLEBABecLap::setEBDirichlet (int amrlev, const MultiFab& phi, const MultiFab& be } } - if (phi_on_centroid) - m_eb_phi[amrlev]->FillBoundary(m_geom[amrlev][0].periodicity()); + if (phi_on_centroid) { + m_eb_phi[amrlev]->FillBoundary(m_geom[amrlev][0].periodicity()); + } } void @@ -287,7 +288,7 @@ MLEBABecLap::setEBDirichlet (int amrlev, const MultiFab& phi, Real beta) const FabArray* flags = (factory) ? &(factory->getMultiEBCellFlagFab()) : nullptr; MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -319,8 +320,9 @@ MLEBABecLap::setEBDirichlet (int amrlev, const MultiFab& phi, Real beta) } } - if (phi_on_centroid) - m_eb_phi[amrlev]->FillBoundary(m_geom[amrlev][0].periodicity()); + if (phi_on_centroid) { + m_eb_phi[amrlev]->FillBoundary(m_geom[amrlev][0].periodicity()); + } } void @@ -353,7 +355,7 @@ MLEBABecLap::setEBDirichlet (int amrlev, const MultiFab& phi, Vector const Real const* beta = dv_beta.data(); MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -385,8 +387,9 @@ MLEBABecLap::setEBDirichlet (int amrlev, const MultiFab& phi, Vector const } } - if (phi_on_centroid) - m_eb_phi[amrlev]->FillBoundary(m_geom[amrlev][0].periodicity()); + if (phi_on_centroid) { + m_eb_phi[amrlev]->FillBoundary(m_geom[amrlev][0].periodicity()); + } } void @@ -417,7 +420,7 @@ MLEBABecLap::setEBHomogDirichlet (int amrlev, const MultiFab& beta) const FabArray* flags = (factory) ? &(factory->getMultiEBCellFlagFab()) : nullptr; MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -461,8 +464,9 @@ MLEBABecLap::setEBHomogDirichlet (int amrlev, const MultiFab& beta) } } - if (phi_on_centroid) - m_eb_phi[amrlev]->FillBoundary(m_geom[amrlev][0].periodicity()); + if (phi_on_centroid) { + m_eb_phi[amrlev]->FillBoundary(m_geom[amrlev][0].periodicity()); + } } void @@ -491,7 +495,7 @@ MLEBABecLap::setEBHomogDirichlet (int amrlev, Real beta) const FabArray* flags = (factory) ? &(factory->getMultiEBCellFlagFab()) : nullptr; MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -523,8 +527,9 @@ MLEBABecLap::setEBHomogDirichlet (int amrlev, Real beta) } } - if (phi_on_centroid) - m_eb_phi[amrlev]->FillBoundary(m_geom[amrlev][0].periodicity()); + if (phi_on_centroid) { + m_eb_phi[amrlev]->FillBoundary(m_geom[amrlev][0].periodicity()); + } } void @@ -557,7 +562,7 @@ MLEBABecLap::setEBHomogDirichlet (int amrlev, Vector const& hv_beta) Real const* beta = dv_beta.data(); MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -589,8 +594,9 @@ MLEBABecLap::setEBHomogDirichlet (int amrlev, Vector const& hv_beta) } } - if (phi_on_centroid) - m_eb_phi[amrlev]->FillBoundary(m_geom[amrlev][0].periodicity()); + if (phi_on_centroid) { + m_eb_phi[amrlev]->FillBoundary(m_geom[amrlev][0].periodicity()); + } } void @@ -740,7 +746,7 @@ MLEBABecLap::compGrad (int amrlev, const Array& grad, Array{AMREX_D_DECL(nullptr, nullptr, nullptr)}; MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -796,7 +802,7 @@ MLEBABecLap::compGrad (int amrlev, const Array& grad, bool phi_on_centroid = (m_phi_loc == Location::CellCentroid); - if (phi_on_centroid) amrex::Abort("phi_on_centroid is still a WIP"); + if (phi_on_centroid) { amrex::Abort("phi_on_centroid is still a WIP"); } AMREX_LAUNCH_HOST_DEVICE_LAMBDA_DIM ( fbx, txbx, @@ -877,7 +883,7 @@ MLEBABecLap::normalize (int amrlev, int mglev, MultiFab& mf) const const int ncomp = getNComp(); MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling(); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling(); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -1039,7 +1045,7 @@ MLEBABecLap::applyBC (int amrlev, int mglev, MultiFab& in, BCMode bc_mode, State const auto& foo = foofab.array(); MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) @@ -1175,7 +1181,7 @@ MLEBABecLap::apply (int amrlev, int mglev, MultiFab& out, MultiFab& in, BCMode b void MLEBABecLap::update () { - if (MLCellABecLap::needsUpdate()) MLCellABecLap::update(); + if (MLCellABecLap::needsUpdate()) { MLCellABecLap::update(); } averageDownCoeffs(); @@ -1237,7 +1243,7 @@ MLEBABecLap::getEBFluxes (const Vector& a_flux, const Vector foo; MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif diff --git a/Src/LinearSolvers/MLMG/AMReX_MLEBABecLap_2D_K.H b/Src/LinearSolvers/MLMG/AMReX_MLEBABecLap_2D_K.H index f2bac77be77..557b14f7a4d 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLEBABecLap_2D_K.H +++ b/Src/LinearSolvers/MLMG/AMReX_MLEBABecLap_2D_K.H @@ -210,44 +210,48 @@ void mlebabeclap_adotx (Box const& box, Array4 const& y, if (apxm != Real(0.0) && apxm != Real(1.0)) { int jj = j + static_cast(std::copysign(Real(1.0),fcx(i,j,k))); Real fracy = (ccm(i-1,jj,k) || ccm(i,jj,k)) ? std::abs(fcx(i,j,k)) : Real(0.0); - if (beta_on_center && phi_on_center) + if (beta_on_center && phi_on_center) { fxm = (Real(1.0)-fracy)*fxm + fracy*bX(i,jj,k,n)*(x(i,jj,k,n)-x(i-1,jj,k,n)); - else if (beta_on_centroid && phi_on_center) + } else if (beta_on_centroid && phi_on_center) { fxm = bX(i,j,k,n) * ( (Real(1.0)-fracy)*(x(i, j,k,n)-x(i-1, j,k,n)) + fracy *(x(i,jj,k,n)-x(i-1,jj,k,n)) ); + } } Real fxp = bX(i+1,j,k,n)*(x(i+1,j,k,n)-x(i,j,k,n)); if (apxp != Real(0.0) && apxp != Real(1.0)) { int jj = j + static_cast(std::copysign(Real(1.0),fcx(i+1,j,k))); Real fracy = (ccm(i,jj,k) || ccm(i+1,jj,k)) ? std::abs(fcx(i+1,j,k)) : Real(0.0); - if (beta_on_center && phi_on_center) + if (beta_on_center && phi_on_center) { fxp = (Real(1.0)-fracy)*fxp + fracy*bX(i+1,jj,k,n)*(x(i+1,jj,k,n)-x(i,jj,k,n)); - else if (beta_on_centroid && phi_on_center) + } else if (beta_on_centroid && phi_on_center) { fxp = bX(i+1,j,k,n) * ( (Real(1.0)-fracy)*(x(i+1, j,k,n)-x(i, j,k,n)) + fracy *(x(i+1,jj,k,n)-x(i,jj,k,n)) ); + } } Real fym = bY(i,j,k,n)*(x(i,j,k,n)-x(i,j-1,k,n)); if (apym != Real(0.0) && apym != Real(1.0)) { int ii = i + static_cast(std::copysign(Real(1.0),fcy(i,j,k))); Real fracx = (ccm(ii,j-1,k) || ccm(ii,j,k)) ? std::abs(fcy(i,j,k)) : Real(0.0); - if (beta_on_center && phi_on_center) + if (beta_on_center && phi_on_center) { fym = (Real(1.0)-fracx)*fym + fracx*bY(ii,j,k,n)*(x(ii,j,k,n)-x(ii,j-1,k,n)); - else if (beta_on_centroid && phi_on_center) + } else if (beta_on_centroid && phi_on_center) { fym = bY(i,j,k,n) * ( (Real(1.0)-fracx)*(x( i,j,k,n)-x( i,j-1,k,n)) + fracx *(x(ii,j,k,n)-x(ii,j-1,k,n)) ); + } } Real fyp = bY(i,j+1,k,n)*(x(i,j+1,k,n)-x(i,j,k,n)); if (apyp != Real(0.0) && apyp != Real(1.0)) { int ii = i + static_cast(std::copysign(Real(1.0),fcy(i,j+1,k))); Real fracx = (ccm(ii,j,k) || ccm(ii,j+1,k)) ? std::abs(fcy(i,j+1,k)) : Real(0.0); - if (beta_on_center && phi_on_center) + if (beta_on_center && phi_on_center) { fyp = (Real(1.0)-fracx)*fyp + fracx*bY(ii,j+1,k,n)*(x(ii,j+1,k,n)-x(ii,j,k,n)); - else if (beta_on_centroid && phi_on_center) + } else if (beta_on_centroid && phi_on_center) { fyp = bY(i,j+1,k,n) * ( (Real(1.0)-fracx)*(x( i,j+1,k,n)-x( i,j,k,n)) + fracx *(x(ii,j+1,k,n)-x(ii,j,k,n)) ); + } } Real feb = Real(0.0); @@ -603,9 +607,10 @@ void mlebabeclap_flux_x (Box const& box, Array4 const& fx, Array4 const& fy, Array4 const& gx, Array4(std::copysign(Real(1.0),fcx(i,j,k))); Real fracy = (ccm(i-1,jj,k) || ccm(i,jj,k)) ? std::abs(fcx(i,j,k)) : Real(0.0); - if (!phi_on_centroid) + if (!phi_on_centroid) { gxm = (Real(1.0)-fracy)*gxm + fracy*(sol(i,jj,k,n)-sol(i-1,jj,k,n)); + } gx(i,j,k,n) = gxm*dxi; } }); @@ -721,8 +728,9 @@ void mlebabeclap_grad_y (Box const& box, Array4 const& gy, Array4(std::copysign(Real(1.0),fcy(i,j,k))); Real fracx = (ccm(ii,j-1,k) || ccm(ii,j,k)) ? std::abs(fcy(i,j,k)) : Real(0.0); - if (!phi_on_centroid) + if (!phi_on_centroid) { gym = (Real(1.0)-fracx)*gym + fracx*(sol(ii,j,k,n)-sol(ii,j-1,k,n)); + } gy(i,j,k,n) = gym*dyi; } }); diff --git a/Src/LinearSolvers/MLMG/AMReX_MLEBABecLap_F.cpp b/Src/LinearSolvers/MLMG/AMReX_MLEBABecLap_F.cpp index 68f94cc0915..89258b75fe9 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLEBABecLap_F.cpp +++ b/Src/LinearSolvers/MLMG/AMReX_MLEBABecLap_F.cpp @@ -58,7 +58,7 @@ MLEBABecLap::Fapply (int amrlev, int mglev, MultiFab& out, const MultiFab& in) c const bool extdir_z = !(m_geom[amrlev][mglev].isPeriodic(2));); MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -216,7 +216,7 @@ MLEBABecLap::Fsmooth (int amrlev, int mglev, MultiFab& sol, const MultiFab& rhs, Array4 foo; MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -295,7 +295,7 @@ MLEBABecLap::Fsmooth (int amrlev, int mglev, MultiFab& sol, const MultiFab& rhs, bool beta_on_centroid = (m_beta_loc == Location::FaceCentroid); bool phi_on_centroid = (m_phi_loc == Location::CellCentroid); - if (phi_on_centroid) amrex::Abort("phi_on_centroid is still a WIP"); + if (phi_on_centroid) { amrex::Abort("phi_on_centroid is still a WIP"); } AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( vbx, thread_box, { @@ -385,7 +385,7 @@ MLEBABecLap::FFlux (int amrlev, const MFIter& mfi, const Array x{-bcl * dxinv, Real(0.5), Real(1.5), Real(2.5)}; Array2D coef{}; for (int r = 0; r <= maxorder-2; ++r) { - poly_interp_coeff(-Real(0.5), &x[0], r+2, &(coef(0,r))); + poly_interp_coeff(-Real(0.5), x.data(), r+2, &(coef(0,r))); } for (int k = lo.z; k <= hi.z; ++k) { for (int j = lo.y; j <= hi.y; ++j) { @@ -84,7 +84,7 @@ void mlebabeclap_apply_bc_x (int side, Box const& box, int blen, break; } } - if (has_cutfaces) order = amrex::min(2,order); + if (has_cutfaces) { order = amrex::min(2,order); } if (order == 1) { if (inhomog) { phi(i,j,k,icomp) = bcval(i,j,k,icomp); @@ -152,7 +152,7 @@ void mlebabeclap_apply_bc_y (int side, Box const& box, int blen, GpuArray x{-bcl * dyinv, Real(0.5), Real(1.5), Real(2.5)}; Array2D coef{}; for (int r = 0; r <= maxorder-2; ++r) { - poly_interp_coeff(-Real(0.5), &x[0], r+2, &(coef(0,r))); + poly_interp_coeff(-Real(0.5), x.data(), r+2, &(coef(0,r))); } for (int k = lo.z; k <= hi.z; ++k) { for (int i = lo.x; i <= hi.x; ++i) { @@ -170,7 +170,7 @@ void mlebabeclap_apply_bc_y (int side, Box const& box, int blen, break; } } - if (has_cutfaces) order = amrex::min(2,order); + if (has_cutfaces) { order = amrex::min(2,order); } if (order == 1) { if (inhomog) { phi(i,j,k,icomp) = bcval(i,j,k,icomp); @@ -238,7 +238,7 @@ void mlebabeclap_apply_bc_z (int side, Box const& box, int blen, GpuArray x{-bcl * dzinv, Real(0.5), Real(1.5), Real(2.5)}; Array2D coef{}; for (int r = 0; r <= maxorder-2; ++r) { - poly_interp_coeff(-Real(0.5), &x[0], r+2, &(coef(0,r))); + poly_interp_coeff(-Real(0.5), x.data(), r+2, &(coef(0,r))); } for (int j = lo.y; j <= hi.y; ++j) { for (int i = lo.x; i <= hi.x; ++i) { @@ -256,7 +256,7 @@ void mlebabeclap_apply_bc_z (int side, Box const& box, int blen, break; } } - if (has_cutfaces) order = amrex::min(2,order); + if (has_cutfaces) { order = amrex::min(2,order); } if (order == 1) { if (inhomog) { phi(i,j,k,icomp) = bcval(i,j,k,icomp); @@ -285,4 +285,3 @@ void mlebabeclap_apply_bc_z (int side, Box const& box, int blen, } #endif - diff --git a/Src/LinearSolvers/MLMG/AMReX_MLEBTensorOp.cpp b/Src/LinearSolvers/MLMG/AMReX_MLEBTensorOp.cpp index 0538421683b..c180d951909 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLEBTensorOp.cpp +++ b/Src/LinearSolvers/MLMG/AMReX_MLEBTensorOp.cpp @@ -203,7 +203,7 @@ MLEBTensorOp::apply (int amrlev, int mglev, MultiFab& out, MultiFab& in, BCMode BL_PROFILE("MLEBTensorOp::apply()"); MLEBABecLap::apply(amrlev, mglev, out, in, bc_mode, s_mode, bndry); - if (mglev >= m_kappa[amrlev].size()) return; + if (mglev >= m_kappa[amrlev].size()) { return; } applyBCTensor(amrlev, mglev, in, bc_mode, s_mode, bndry); @@ -228,7 +228,7 @@ MLEBTensorOp::apply (int amrlev, int mglev, MultiFab& out, MultiFab& in, BCMode compCrossTerms(amrlev, mglev, in, bndry); MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -237,7 +237,7 @@ MLEBTensorOp::apply (int amrlev, int mglev, MultiFab& out, MultiFab& in, BCMode const Box& bx = mfi.tilebox(); auto fabtyp = (flags) ? (*flags)[mfi].getType(bx) : FabType::regular; - if (fabtyp == FabType::covered) continue; + if (fabtyp == FabType::covered) { continue; } Array4 const axfab = out.array(mfi); AMREX_D_TERM(Array4 const fxfab = fluxmf[0].const_array(mfi);, @@ -311,7 +311,7 @@ MLEBTensorOp::compCrossTerms(int amrlev, int mglev, MultiFab const& mf, Array& fluxmf = m_tauflux[amrlev][mglev]; MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -504,14 +504,15 @@ MLEBTensorOp::compFlux (int amrlev, const Array& fluxe { BL_PROFILE("MLEBTensorOp::compFlux()"); - if ( !(loc==Location::FaceCenter || loc==Location::FaceCentroid) ) + if ( !(loc==Location::FaceCenter || loc==Location::FaceCentroid) ) { amrex::Abort("MLEBTensorOp::compFlux() unknown location for fluxes."); + } const int mglev = 0; const int ncomp = getNComp(); MLEBABecLap::compFlux(amrlev, fluxes, sol, loc); - if (mglev >= m_kappa[amrlev].size()) return; + if (mglev >= m_kappa[amrlev].size()) { return; } applyBCTensor(amrlev, mglev, sol, BCMode::Inhomogeneous, StateMode::Solution, m_bndry_sol[amrlev].get()); @@ -526,7 +527,7 @@ MLEBTensorOp::compFlux (int amrlev, const Array& fluxe compCrossTerms(amrlev, mglev, sol, m_bndry_sol[amrlev].get()); MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -535,7 +536,7 @@ MLEBTensorOp::compFlux (int amrlev, const Array& fluxe const Box& bx = mfi.tilebox(); auto fabtyp = (flags) ? (*flags)[mfi].getType(bx) : FabType::regular; - if (fabtyp == FabType::covered) continue; + if (fabtyp == FabType::covered) { continue; } if (fabtyp == FabType::regular) { @@ -648,7 +649,7 @@ MLEBTensorOp::compVelGrad (int amrlev, const Box& bx = mfi.tilebox(); auto fabtyp = (flags) ? (*flags)[mfi].getType(bx) : FabType::regular; - if (fabtyp == FabType::covered) continue; + if (fabtyp == FabType::covered) { continue; } Array4 const vfab = sol.const_array(mfi); AMREX_D_TERM(Box const xbx = mfi.nodaltilebox(0);, diff --git a/Src/LinearSolvers/MLMG/AMReX_MLEBTensorOp_bc.cpp b/Src/LinearSolvers/MLMG/AMReX_MLEBTensorOp_bc.cpp index 77050040091..4c316eab2f8 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLEBTensorOp_bc.cpp +++ b/Src/LinearSolvers/MLMG/AMReX_MLEBTensorOp_bc.cpp @@ -24,7 +24,7 @@ MLEBTensorOp::applyBCTensor (int amrlev, int mglev, MultiFab& vel, const FabArray* flags = (factory) ? &(factory->getMultiEBCellFlagFab()) : nullptr; MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif diff --git a/Src/LinearSolvers/MLMG/AMReX_MLEBTensor_2D_K.H b/Src/LinearSolvers/MLMG/AMReX_MLEBTensor_2D_K.H index e153dee4a47..12cf9db8a54 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLEBTensor_2D_K.H +++ b/Src/LinearSolvers/MLMG/AMReX_MLEBTensor_2D_K.H @@ -371,8 +371,9 @@ void mlebtensor_flux_0 (Box const& box, AMREX_PRAGMA_SIMD for (int i = lo.x; i <= hi.x; ++i) { if (ap(i,j,k) != Real(0.0)) { - for (int n=0; n const& Ax, { if (!face_only || lof == i || hif == i) { if (apx(i,j,k) == Real(1.0)) { - for (int n=0; n const& Ay, { if (!face_only || lof == j || hif == j) { if (apy(i,j,k) == Real(1.0)) { - for (int n=0; n const& Ax, { if (!face_only || lof == i || hif == i) { if (apx(i,j,k) == Real(1.0)) { - for (int n=0; n const& Ay, { if (!face_only || lof == j || hif == j) { if (apy(i,j,k) == Real(1.0)) { - for (int n=0; n const& Az, { if (!face_only || lof == k || hif == k) { if (apz(i,j,k) == Real(1.0)) { - for (int n=0; n::define (const Vector& a_geom, #ifdef AMREX_USE_GPU if (Gpu::notInLaunchRegion()) { - if (info.agg_grid_size <= 0) info.agg_grid_size = AMREX_D_PICK(32, 16, 8); - if (info.con_grid_size <= 0) info.con_grid_size = AMREX_D_PICK(32, 16, 8); + if (info.agg_grid_size <= 0) { info.agg_grid_size = AMREX_D_PICK(32, 16, 8); } + if (info.con_grid_size <= 0) { info.con_grid_size = AMREX_D_PICK(32, 16, 8); } } else #endif { - if (info.agg_grid_size <= 0) info.agg_grid_size = LPInfo::getDefaultAgglomerationGridSize(); - if (info.con_grid_size <= 0) info.con_grid_size = LPInfo::getDefaultConsolidationGridSize(); + if (info.agg_grid_size <= 0) { info.agg_grid_size = LPInfo::getDefaultAgglomerationGridSize(); } + if (info.con_grid_size <= 0) { info.con_grid_size = LPInfo::getDefaultConsolidationGridSize(); } } #ifdef AMREX_USE_EB @@ -780,10 +780,10 @@ MLLinOpT::defineGrids (const Vector& a_geom, const Box& dom = a_geom[amrlev].Domain(); for (int i = 0; i < 2; ++i) { - if (!dom.coarsenable(rr)) amrex::Abort("MLLinOp: Uncoarsenable domain"); + if (!dom.coarsenable(rr)) { amrex::Abort("MLLinOp: Uncoarsenable domain"); } const Box& cdom = amrex::coarsen(dom,rr); - if (cdom == a_geom[amrlev-1].Domain()) break; + if (cdom == a_geom[amrlev-1].Domain()) { break; } ++(m_num_mg_levels[amrlev]); @@ -824,7 +824,7 @@ MLLinOpT::defineGrids (const Vector& a_geom, m_domain_covered[0] = (npts0 == compactify(m_geom[0][0].Domain()).numPts()); for (int amrlev = 1; amrlev < m_num_amr_levels; ++amrlev) { - if (!m_domain_covered[amrlev-1]) break; + if (!m_domain_covered[amrlev-1]) { break; } m_domain_covered[amrlev] = (m_grids[amrlev][0].numPts() == compactify(m_geom[amrlev][0].Domain()).numPts()); } diff --git a/Src/LinearSolvers/MLMG/AMReX_MLLinOp_K.H b/Src/LinearSolvers/MLMG/AMReX_MLLinOp_K.H index 37edff20049..a6bc6517366 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLLinOp_K.H +++ b/Src/LinearSolvers/MLMG/AMReX_MLLinOp_K.H @@ -50,7 +50,7 @@ void mllinop_apply_bc_x (int side, Box const& box, int blen, const int NX = amrex::min(blen+1, maxorder); GpuArray x{{-bcl * dxinv, T(0.5), T(1.5), T(2.5)}}; GpuArray coef{}; - poly_interp_coeff(-T(0.5), &x[0], NX, &coef[0]); + poly_interp_coeff(-T(0.5), x.data(), NX, coef.data()); for (int k = lo.z; k <= hi.z; ++k) { for (int j = lo.y; j <= hi.y; ++j) { if (mask(i,j,k) > 0) { @@ -98,7 +98,7 @@ void mllinop_apply_bc_x (int side, int i, int j, int k, int blen, const int NX = amrex::min(blen+1, maxorder); GpuArray x{{-bcl * dxinv, T(0.5), T(1.5), T(2.5)}}; GpuArray coef{}; - poly_interp_coeff(-T(0.5), &x[0], NX, &coef[0]); + poly_interp_coeff(-T(0.5), x.data(), NX, coef.data()); T tmp = T(0.0); for (int m = 1; m < NX; ++m) { tmp += phi(i+m*s,j,k,icomp) * coef[m]; @@ -155,7 +155,7 @@ void mllinop_apply_bc_y (int side, Box const& box, int blen, const int NX = amrex::min(blen+1, maxorder); GpuArray x{{-bcl * dyinv, T(0.5), T(1.5), T(2.5)}}; GpuArray coef{}; - poly_interp_coeff(-T(0.5), &x[0], NX, &coef[0]); + poly_interp_coeff(-T(0.5), x.data(), NX, coef.data()); for (int k = lo.z; k <= hi.z; ++k) { for (int i = lo.x; i <= hi.x; ++i) { if (mask(i,j,k) > 0) { @@ -203,7 +203,7 @@ void mllinop_apply_bc_y (int side, int i, int j, int k, int blen, const int NX = amrex::min(blen+1, maxorder); GpuArray x{{-bcl * dyinv, T(0.5), T(1.5), T(2.5)}}; GpuArray coef{}; - poly_interp_coeff(-T(0.5), &x[0], NX, &coef[0]); + poly_interp_coeff(-T(0.5), x.data(), NX, coef.data()); T tmp = T(0.0); for (int m = 1; m < NX; ++m) { tmp += phi(i,j+m*s,k,icomp) * coef[m]; @@ -260,7 +260,7 @@ void mllinop_apply_bc_z (int side, Box const& box, int blen, const int NX = amrex::min(blen+1, maxorder); GpuArray x{{-bcl * dzinv, T(0.5), T(1.5), T(2.5)}}; GpuArray coef{}; - poly_interp_coeff(-T(0.5), &x[0], NX, &coef[0]); + poly_interp_coeff(-T(0.5), x.data(), NX, coef.data()); for (int j = lo.y; j <= hi.y; ++j) { for (int i = lo.x; i <= hi.x; ++i) { if (mask(i,j,k) > 0) { @@ -308,7 +308,7 @@ void mllinop_apply_bc_z (int side, int i, int j, int k, int blen, const int NX = amrex::min(blen+1, maxorder); GpuArray x{{-bcl * dzinv, T(0.5), T(1.5), T(2.5)}}; GpuArray coef{}; - poly_interp_coeff(-T(0.5), &x[0], NX, &coef[0]); + poly_interp_coeff(-T(0.5), x.data(), NX, coef.data()); T tmp = T(0.0); for (int m = 1; m < NX; ++m) { tmp += phi(i,j,k+m*s,icomp) * coef[m]; @@ -360,7 +360,7 @@ void mllinop_comp_interp_coef0_x (int side, Box const& box, int blen, const int NX = amrex::min(blen+1, maxorder); GpuArray x{{-bcl * dxinv, T(0.5), T(1.5), T(2.5)}}; GpuArray coef{}; - poly_interp_coeff(-T(0.5), &x[0], NX, &coef[0]); + poly_interp_coeff(-T(0.5), x.data(), NX, coef.data()); for (int k = lo.z; k <= hi.z; ++k) { for (int j = lo.y; j <= hi.y; ++j) { f(ii,j,k,icomp) = (mask(ib,j,k) > 0) ? coef[1] : T(0.0); @@ -397,7 +397,7 @@ void mllinop_comp_interp_coef0_x (int side, int i, int j, int k, int blen, const int NX = amrex::min(blen+1, maxorder); GpuArray x{{-bcl * dxinv, T(0.5), T(1.5), T(2.5)}}; GpuArray coef{}; - poly_interp_coeff(-T(0.5), &x[0], NX, &coef[0]); + poly_interp_coeff(-T(0.5), x.data(), NX, coef.data()); f(ii,j,k,icomp) = (mask(i,j,k) > 0) ? coef[1] : T(0.0); break; } @@ -441,7 +441,7 @@ void mllinop_comp_interp_coef0_y (int side, Box const& box, int blen, const int NX = amrex::min(blen+1, maxorder); GpuArray x{{-bcl * dyinv, T(0.5), T(1.5), T(2.5)}}; GpuArray coef{}; - poly_interp_coeff(-T(0.5), &x[0], NX, &coef[0]); + poly_interp_coeff(-T(0.5), x.data(), NX, coef.data()); for (int k = lo.z; k <= hi.z; ++k) { for (int i = lo.x; i <= hi.x; ++i) { f(i,ji,k,icomp) = (mask(i,jb,k) > 0) ? coef[1] : T(0.0); @@ -478,7 +478,7 @@ void mllinop_comp_interp_coef0_y (int side, int i, int j, int k, int blen, const int NX = amrex::min(blen+1, maxorder); GpuArray x{{-bcl * dyinv, T(0.5), T(1.5), T(2.5)}}; GpuArray coef{}; - poly_interp_coeff(-T(0.5), &x[0], NX, &coef[0]); + poly_interp_coeff(-T(0.5), x.data(), NX, coef.data()); f(i,ji,k,icomp) = (mask(i,j,k) > 0) ? coef[1] : T(0.0); break; } @@ -522,7 +522,7 @@ void mllinop_comp_interp_coef0_z (int side, Box const& box, int blen, const int NX = amrex::min(blen+1, maxorder); GpuArray x{{-bcl * dzinv, T(0.5), T(1.5), T(2.5)}}; GpuArray coef{}; - poly_interp_coeff(-T(0.5), &x[0], NX, &coef[0]); + poly_interp_coeff(-T(0.5), x.data(), NX, coef.data()); for (int j = lo.y; j <= hi.y; ++j) { for (int i = lo.x; i <= hi.x; ++i) { f(i,j,ki,icomp) = (mask(i,j,kb) > 0) ? coef[1] : T(0.0); @@ -559,7 +559,7 @@ void mllinop_comp_interp_coef0_z (int side, int i, int j, int k, int blen, const int NX = amrex::min(blen+1, maxorder); GpuArray x{{-bcl * dzinv, T(0.5), T(1.5), T(2.5)}}; GpuArray coef{}; - poly_interp_coeff(-T(0.5), &x[0], NX, &coef[0]); + poly_interp_coeff(-T(0.5), x.data(), NX, coef.data()); f(i,j,ki,icomp) = (mask(i,j,k) > 0) ? coef[1] : T(0.0); break; } @@ -607,7 +607,7 @@ void mllinop_comp_interp_coef0_x_eb (int side, Box const& box, int blen, GpuArray x{{-bcl * dxinv, Real(0.5), Real(1.5), Real(2.5)}}; Array2D coef{}; for (int r = 0; r <= maxorder-2; ++r) { - poly_interp_coeff(-Real(0.5), &x[0], r+2, &coef(0,r)); + poly_interp_coeff(-Real(0.5), x.data(), r+2, &coef(0,r)); } for (int k = lo.z; k <= hi.z; ++k) { for (int j = lo.y; j <= hi.y; ++j) { @@ -625,7 +625,7 @@ void mllinop_comp_interp_coef0_x_eb (int side, Box const& box, int blen, break; } } - if (has_cutfaces) order = amrex::min(2,order); + if (has_cutfaces) { order = amrex::min(2,order); } } f(ii,j,k,icomp) = (order==1) ? Real(0.0) : coef(1,order-2); } @@ -663,7 +663,7 @@ void mllinop_comp_interp_coef0_x_eb (int side, int i, int j, int k, int blen, GpuArray x{{-bcl * dxinv, Real(0.5), Real(1.5), Real(2.5)}}; Array2D coef{}; for (int r = 0; r <= maxorder-2; ++r) { - poly_interp_coeff(-Real(0.5), &x[0], r+2, &coef(0,r)); + poly_interp_coeff(-Real(0.5), x.data(), r+2, &coef(0,r)); } int order = 1; if (mask(i,j,k) > 0) { @@ -679,7 +679,7 @@ void mllinop_comp_interp_coef0_x_eb (int side, int i, int j, int k, int blen, break; } } - if (has_cutfaces) order = amrex::min(2,order); + if (has_cutfaces) { order = amrex::min(2,order); } } f(ii,j,k,icomp) = (order==1) ? Real(0.0) : coef(1,order-2); break; @@ -726,7 +726,7 @@ void mllinop_comp_interp_coef0_y_eb (int side, Box const& box, int blen, GpuArray x{{-bcl * dyinv, Real(0.5), Real(1.5), Real(2.5)}}; Array2D coef{}; for (int r = 0; r <= maxorder-2; ++r) { - poly_interp_coeff(-Real(0.5), &x[0], r+2, &coef(0,r)); + poly_interp_coeff(-Real(0.5), x.data(), r+2, &coef(0,r)); } for (int k = lo.z; k <= hi.z; ++k) { for (int i = lo.x; i <= hi.x; ++i) { @@ -744,7 +744,7 @@ void mllinop_comp_interp_coef0_y_eb (int side, Box const& box, int blen, break; } } - if (has_cutfaces) order = amrex::min(2,order); + if (has_cutfaces) { order = amrex::min(2,order); } } f(i,ji,k,icomp) = (order==1) ? Real(0.0) : coef(1,order-2); } @@ -782,7 +782,7 @@ void mllinop_comp_interp_coef0_y_eb (int side, int i, int j, int k, int blen, GpuArray x{{-bcl * dyinv, Real(0.5), Real(1.5), Real(2.5)}}; Array2D coef{}; for (int r = 0; r <= maxorder-2; ++r) { - poly_interp_coeff(-Real(0.5), &x[0], r+2, &coef(0,r)); + poly_interp_coeff(-Real(0.5), x.data(), r+2, &coef(0,r)); } int order = 1; if (mask(i,j,k) > 0) { @@ -798,7 +798,7 @@ void mllinop_comp_interp_coef0_y_eb (int side, int i, int j, int k, int blen, break; } } - if (has_cutfaces) order = amrex::min(2,order); + if (has_cutfaces) { order = amrex::min(2,order); } } f(i,ji,k,icomp) = (order==1) ? Real(0.0) : coef(1,order-2); break; @@ -845,7 +845,7 @@ void mllinop_comp_interp_coef0_z_eb (int side, Box const& box, int blen, GpuArray x{{-bcl * dzinv, Real(0.5), Real(1.5), Real(2.5)}}; Array2D coef{}; for (int r = 0; r <= maxorder-2; ++r) { - poly_interp_coeff(-Real(0.5), &x[0], r+2, &coef(0,r)); + poly_interp_coeff(-Real(0.5), x.data(), r+2, &coef(0,r)); } for (int j = lo.y; j <= hi.y; ++j) { for (int i = lo.x; i <= hi.x; ++i) { @@ -863,7 +863,7 @@ void mllinop_comp_interp_coef0_z_eb (int side, Box const& box, int blen, break; } } - if (has_cutfaces) order = amrex::min(2,order); + if (has_cutfaces) { order = amrex::min(2,order); } } f(i,j,ki,icomp) = (order==1) ? Real(0.0) : coef(1,order-2); } @@ -901,7 +901,7 @@ void mllinop_comp_interp_coef0_z_eb (int side, int i, int j, int k, int blen, GpuArray x{{-bcl * dzinv, Real(0.5), Real(1.5), Real(2.5)}}; Array2D coef{}; for (int r = 0; r <= maxorder-2; ++r) { - poly_interp_coeff(-Real(0.5), &x[0], r+2, &coef(0,r)); + poly_interp_coeff(-Real(0.5), x.data(), r+2, &coef(0,r)); } int order = 1; if (mask(i,j,k) > 0) { @@ -917,7 +917,7 @@ void mllinop_comp_interp_coef0_z_eb (int side, int i, int j, int k, int blen, break; } } - if (has_cutfaces) order = amrex::min(2,order); + if (has_cutfaces) { order = amrex::min(2,order); } } f(i,j,ki,icomp) = (order==1) ? Real(0.0) : coef(1,order-2); break; diff --git a/Src/LinearSolvers/MLMG/AMReX_MLMG.H b/Src/LinearSolvers/MLMG/AMReX_MLMG.H index ccc54128f7c..7fc822afa23 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLMG.H +++ b/Src/LinearSolvers/MLMG/AMReX_MLMG.H @@ -426,7 +426,7 @@ MLMGT::solve (const Vector& a_sol, const Vector& a_rhs, // Test convergence on the fine amr level computeResidual(finest_amr_lev); - if (is_nsolve) continue; + if (is_nsolve) { continue; } RT fine_norminf = ResNormInf(finest_amr_lev); m_iter_fine_resnorm0.push_back(fine_norminf); @@ -916,7 +916,7 @@ MLMGT::apply (const Vector& out, const Vector& a_in) #endif for (int alev = 0; alev <= finest_amr_lev; ++alev) { - if (cf_strategy == CFStrategy::ghostnodes) nghost = linop.getNGrow(alev); + if (cf_strategy == CFStrategy::ghostnodes) { nghost = linop.getNGrow(alev); } out[alev]->negate(nghost); } } @@ -1417,7 +1417,7 @@ MLMGT::actualBottomSolve () { BL_PROFILE("MLMG::actualBottomSolve()"); - if (!linop.isBottomActive()) return; + if (!linop.isBottomActive()) { return; } auto bottom_start_time = amrex::second(); @@ -1572,7 +1572,9 @@ MLMGT::computeResWithCrseSolFineCor (int calev, int falev) BL_PROFILE("MLMG::computeResWithCrseSolFineCor()"); IntVect nghost(0); - if (cf_strategy == CFStrategy::ghostnodes) nghost = IntVect(std::min(linop.getNGrow(falev),linop.getNGrow(calev))); + if (cf_strategy == CFStrategy::ghostnodes) { + nghost = IntVect(std::min(linop.getNGrow(falev),linop.getNGrow(calev))); + } MF& crse_sol = sol[calev]; const MF& crse_rhs = rhs[calev]; @@ -1603,7 +1605,9 @@ MLMGT::computeResWithCrseCorFineCor (int falev) BL_PROFILE("MLMG::computeResWithCrseCorFineCor()"); IntVect nghost(0); - if (cf_strategy == CFStrategy::ghostnodes) nghost = IntVect(linop.getNGrow(falev)); + if (cf_strategy == CFStrategy::ghostnodes) { + nghost = IntVect(linop.getNGrow(falev)); + } const MF& crse_cor = cor[falev-1][0]; @@ -1625,7 +1629,9 @@ MLMGT::interpCorrection (int alev) BL_PROFILE("MLMG::interpCorrection_1"); IntVect nghost(0); - if (cf_strategy == CFStrategy::ghostnodes) nghost = IntVect(linop.getNGrow(alev)); + if (cf_strategy == CFStrategy::ghostnodes) { + nghost = IntVect(linop.getNGrow(alev)); + } MF const& crse_cor = cor[alev-1][0]; MF & fine_cor = cor[alev ][0]; @@ -1724,7 +1730,7 @@ MLMGT::MLResNormInf (int alevmax, bool local) -> RT { r = std::max(r, ResNormInf(alev,true)); } - if (!local) ParallelAllReduce::Max(r, ParallelContext::CommunicatorSub()); + if (!local) { ParallelAllReduce::Max(r, ParallelContext::CommunicatorSub()); } return r; } @@ -1739,7 +1745,7 @@ MLMGT::MLRhsNormInf (bool local) -> RT auto t = linop.normInf(alev, rhs[alev], true); r = std::max(r, t); } - if (!local) ParallelAllReduce::Max(r, ParallelContext::CommunicatorSub()); + if (!local) { ParallelAllReduce::Max(r, ParallelContext::CommunicatorSub()); } return r; } diff --git a/Src/LinearSolvers/MLMG/AMReX_MLNodeLap_2D_K.H b/Src/LinearSolvers/MLMG/AMReX_MLNodeLap_2D_K.H index d34290c0162..1de55f8a63d 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLNodeLap_2D_K.H +++ b/Src/LinearSolvers/MLMG/AMReX_MLNodeLap_2D_K.H @@ -186,11 +186,11 @@ void mlndlap_bc_doit (Box const& vbx, Array4 const& a, Box const& domain, { Box gdomain = domain; for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { - if (! bflo[idim]) gdomain.growLo(idim,1); - if (! bfhi[idim]) gdomain.growHi(idim,1); + if (! bflo[idim]) { gdomain.growLo(idim,1); } + if (! bfhi[idim]) { gdomain.growHi(idim,1); } } - if (gdomain.strictly_contains(vbx)) return; + if (gdomain.strictly_contains(vbx)) { return; } const int offset = domain.cellCentered() ? 0 : 1; @@ -667,7 +667,7 @@ void tridiagonal_solve (Array1D& a_ls, Array1D& b_ls, Arra for (int i = 1; i <= ilen - 1; i++) { gam(i) = c_ls(i-1) / bet; bet = b_ls(i) - a_ls(i)*gam(i); - if (bet == 0) amrex::Abort(">>>TRIDIAG FAILED"); + if (bet == 0) { amrex::Abort(">>>TRIDIAG FAILED"); } u_ls(i) = (r_ls(i)-a_ls(i)*u_ls(i-1)) / bet; } for (int i = ilen-2; i >= 0; i--) { @@ -707,7 +707,9 @@ void mlndlap_gauss_seidel_with_line_solve_aa (Box const& bx, Array4 const& ilen = hi.x - lo.x + 1; } - if (ilen > 32) amrex::Abort("mlndlap_gauss_seidel_with_line_solve_aa is hard-wired to be no longer than 32"); + if (ilen > 32) { + amrex::Abort("mlndlap_gauss_seidel_with_line_solve_aa is hard-wired to be no longer than 32"); + } Array1D a_ls,b_ls,c_ls,u_ls,r_ls,gam; @@ -1079,13 +1081,25 @@ void mlndlap_divu (int i, int j, int k, Array4 const& rhs, Array4 const& resid, GpuArray const& bchi, bool neumann_doubling) noexcept { - if ((msk(i-1,j-1,k ) == 0 || + if ( msk(i-1,j-1,k ) == 0 || msk(i ,j-1,k ) == 0 || msk(i-1,j ,k ) == 0 || - msk(i ,j ,k ) == 0) && - (msk(i-1,j-1,k ) == 0 || - msk(i ,j-1,k ) == 0 || - msk(i-1,j ,k ) == 0 || - msk(i ,j ,k ) == 0)) + msk(i ,j ,k ) == 0 ) { Real fac = Real(1.0); if (neumann_doubling) { @@ -1982,9 +1992,9 @@ void mlndlap_set_connection (int i, int j, int, Array4 const& conn, Array4 const& flag) noexcept { if (flag(i,j,0).isCovered()) { - for (int n = 0; n < 6; ++n) conn(i,j,0,n) = Real(0.); + for (int n = 0; n < 6; ++n) { conn(i,j,0,n) = Real(0.); } } else if (flag(i,j,0).isRegular() || vol(i,j,0) >= almostone) { - for (int n = 0; n < 6; ++n) conn(i,j,0,n) = Real(1.); + for (int n = 0; n < 6; ++n) { conn(i,j,0,n) = Real(1.); } } else { // Note that these are normalized so that they equal 1 in the case of a regular cell @@ -2038,13 +2048,25 @@ void mlndlap_divu_eb (int i, int j, int, Array4 const& rhs, Array4 const& a, Box const& doma { Box gdomain = domain; for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { - if (! bflo[idim]) gdomain.growLo(idim,1); - if (! bfhi[idim]) gdomain.growHi(idim,1); + if (! bflo[idim]) { gdomain.growLo(idim,1); } + if (! bfhi[idim]) { gdomain.growHi(idim,1); } } - if (gdomain.strictly_contains(vbx)) return; + if (gdomain.strictly_contains(vbx)) { return; } const int offset = domain.cellCentered() ? 0 : 1; @@ -1342,7 +1342,7 @@ void tridiagonal_solve (Array1D& a_ls, Array1D& b_ls, Arra for (int i = 1; i <= ilen - 1; i++) { gam(i) = c_ls(i-1) / bet; bet = b_ls(i) - a_ls(i)*gam(i); - if (bet == 0) amrex::Abort(">>>TRIDIAG FAILED"); + if (bet == 0) { amrex::Abort(">>>TRIDIAG FAILED"); } u_ls(i) = (r_ls(i)-a_ls(i)*u_ls(i-1)) / bet; } for (int i = ilen-2; i >= 0; i--) { @@ -1386,7 +1386,9 @@ void mlndlap_gauss_seidel_with_line_solve_aa (Box const& bx, Array4 const& ilen = hi.x - lo.x + 1; } - if (ilen > 32) amrex::Abort("mlndlap_gauss_seidel_with_line_solve_aa is hard-wired to be no longer than 32"); + if (ilen > 32) { + amrex::Abort("mlndlap_gauss_seidel_with_line_solve_aa is hard-wired to be no longer than 32"); + } Array1D a_ls,b_ls,c_ls,u_ls,r_ls,gam; @@ -2103,17 +2105,35 @@ void mlndlap_divu (int i, int j, int k, Array4 const& rhs, Array4 const& resid, GpuArray const& bchi, bool neumann_doubling) noexcept { - if ((msk(i-1,j-1,k-1) == 0 || + if ( msk(i-1,j-1,k-1) == 0 || msk(i ,j-1,k-1) == 0 || msk(i-1,j ,k-1) == 0 || msk(i ,j ,k-1) == 0 || msk(i-1,j-1,k ) == 0 || msk(i ,j-1,k ) == 0 || msk(i-1,j ,k ) == 0 || - msk(i ,j ,k ) == 0) && - (msk(i-1,j-1,k-1) == 0 || - msk(i ,j-1,k-1) == 0 || - msk(i-1,j ,k-1) == 0 || - msk(i ,j ,k-1) == 0 || - msk(i-1,j-1,k ) == 0 || - msk(i ,j-1,k ) == 0 || - msk(i-1,j ,k ) == 0 || - msk(i ,j ,k ) == 0)) + msk(i ,j ,k ) == 0 ) { Real fac = Real(1.0); if (neumann_doubling) { @@ -6448,9 +6460,9 @@ void mlndlap_set_connection (int i, int j, int k, Array4 const& conn, Array4 const& flag) noexcept { if (flag(i,j,k).isCovered()) { - for (int n = 0; n < n_conn; ++n) conn(i,j,k,n) = Real(0.); + for (int n = 0; n < n_conn; ++n) { conn(i,j,k,n) = Real(0.); } } else if (flag(i,j,k).isRegular() || vol(i,j,k) >= almostone) { - for (int n = 0; n < n_conn; ++n) conn(i,j,k,n) = Real(1.); + for (int n = 0; n < n_conn; ++n) { conn(i,j,k,n) = Real(1.); } } else { // Scaled by 9 conn(i,j,k,i_c_xmym) = Real(0.5625)*vol(i,j,k) @@ -6689,17 +6701,35 @@ void mlndlap_divu_eb (int i, int j, int k, Array4 const& rhs, Array4 const& msk, i for (int k = lo.z; k <= hi.z; ++k) { for (int j = lo.y; j <= hi.y; ++j) { for (int i = lo.x; i <= hi.x; ++i) { - if (msk(i,j,k) == fine_flag) return true; + if (msk(i,j,k) == fine_flag) { return true; } }}} return false; } diff --git a/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian.H b/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian.H index d284eff33ee..7ec9f13ce73 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian.H +++ b/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian.H @@ -81,7 +81,7 @@ public : void setMapped (bool flag) noexcept { m_use_mapped = flag; } void setCoarseningStrategy (CoarseningStrategy cs) noexcept { - if (m_const_sigma == Real(0.0)) m_coarsening_strategy = cs; + if (m_const_sigma == Real(0.0)) { m_coarsening_strategy = cs; } } void setSmoothNumSweeps (int nsweeps) noexcept { diff --git a/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian.cpp b/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian.cpp index e33bdfdb21d..27e0f6c4b62 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian.cpp +++ b/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian.cpp @@ -150,7 +150,7 @@ MLNodeLaplacian::unimposeNeumannBC (int amrlev, MultiFab& rhs) const const auto hibc = HiBC(); MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -383,14 +383,16 @@ MLNodeLaplacian::setSigma (int amrlev, const MultiFab& a_sigma) if (a_sigma.nComp() > 1) { AMREX_ALWAYS_ASSERT(a_sigma.nComp() == AMREX_SPACEDIM); - for (int idim = 1; idim < AMREX_SPACEDIM; idim++) + for (int idim = 1; idim < AMREX_SPACEDIM; idim++) { m_sigma[amrlev][0][idim] = std::make_unique(m_grids[amrlev][0], m_dmap[amrlev][0], 1, 1, MFInfo()); + } setMapped(true); - for (int idim = 0; idim < AMREX_SPACEDIM; idim++) + for (int idim = 0; idim < AMREX_SPACEDIM; idim++) { MultiFab::Copy(*m_sigma[amrlev][0][idim], a_sigma, idim, 0, 1, 0); + } } else { MultiFab::Copy(*m_sigma[amrlev][0][0], a_sigma, 0, 0, 1, 0); @@ -411,7 +413,7 @@ MLNodeLaplacian::FillBoundaryCoeff (MultiFab& sigma, const Geometry& geom) const auto hibc = HiBC(); MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -426,7 +428,7 @@ MLNodeLaplacian::FillBoundaryCoeff (MultiFab& sigma, const Geometry& geom) void MLNodeLaplacian::fixUpResidualMask (int amrlev, iMultiFab& resmsk) { - if (!m_masks_built) buildMasks(); + if (!m_masks_built) { buildMasks(); } const iMultiFab& cfmask = *m_nd_fine_mask[amrlev]; @@ -440,7 +442,7 @@ MLNodeLaplacian::fixUpResidualMask (int amrlev, iMultiFab& resmsk) Array4 const& fmsk = cfmask.const_array(mfi); AMREX_HOST_DEVICE_PARALLEL_FOR_3D ( bx, i, j, k, { - if (fmsk(i,j,k) == crse_fine_node) rmsk(i,j,k) = 1; + if (fmsk(i,j,k) == crse_fine_node) { rmsk(i,j,k) = 1; } }); } } @@ -458,7 +460,7 @@ MLNodeLaplacian::prepareForSolve () #ifdef AMREX_USE_EB buildIntegral(); - if (m_build_surface_integral) buildSurfaceIntegral(); + if (m_build_surface_integral) { buildSurfaceIntegral(); } #endif buildStencil(); @@ -813,7 +815,7 @@ MLNodeLaplacian::restrictInteriorNodes (int camrlev, MultiFab& crhs, MultiFab& a const auto& has_fine_bndry = *m_has_fine_bndry[camrlev]; MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -827,7 +829,7 @@ MLNodeLaplacian::restrictInteriorNodes (int camrlev, MultiFab& crhs, MultiFab& a Array4 const& mfab = c_nd_mask.const_array(mfi); AMREX_HOST_DEVICE_PARALLEL_FOR_3D ( bx, i, j, k, { - if (mfab(i,j,k) == fine_node) dfab(i,j,k) = sfab(i,j,k); + if (mfab(i,j,k) == fine_node) { dfab(i,j,k) = sfab(i,j,k); } }); } } @@ -838,7 +840,7 @@ MLNodeLaplacian::normalize (int amrlev, int mglev, MultiFab& mf) const { BL_PROFILE("MLNodeLaplacian::normalize()"); - if (m_sigma[0][0][0] == nullptr) return; + if (m_sigma[0][0][0] == nullptr) { return; } const auto& sigma = m_sigma[amrlev][mglev]; const auto& stencil = m_stencil[amrlev][mglev]; @@ -1031,7 +1033,7 @@ MLNodeLaplacian::setEBInflowVelocity (int amrlev, const MultiFab& eb_vel) const auto *ebfactory = dynamic_cast(m_factory[amrlev][mglev].get()); MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif diff --git a/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian_eb.cpp b/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian_eb.cpp index 67ebc27b884..49f80ad4c11 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian_eb.cpp +++ b/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian_eb.cpp @@ -14,7 +14,7 @@ namespace amrex { void MLNodeLaplacian::buildIntegral () { - if (m_integral_built) return; + if (m_integral_built) { return; } BL_PROFILE("MLNodeLaplacian::buildIntegral()"); @@ -35,7 +35,7 @@ MLNodeLaplacian::buildIntegral () const auto& bcent = factory->getBndryCent(); MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -83,7 +83,7 @@ MLNodeLaplacian::buildIntegral () void MLNodeLaplacian::buildSurfaceIntegral () { - if (m_surface_integral_built) return; + if (m_surface_integral_built) { return; } BL_PROFILE("MLNodeLaplacian::buildSurfaceIntegral()"); @@ -105,7 +105,7 @@ MLNodeLaplacian::buildSurfaceIntegral () const auto& barea = factory->getBndryArea(); MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif diff --git a/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian_misc.cpp b/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian_misc.cpp index 907b0483400..4abf219e44f 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian_misc.cpp +++ b/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian_misc.cpp @@ -18,7 +18,7 @@ MLNodeLaplacian::averageDownCoeffs () { BL_PROFILE("MLNodeLaplacian::averageDownCoeffs()"); - if (m_sigma[0][0][0] == nullptr) return; + if (m_sigma[0][0][0] == nullptr) { return; } if (m_coarsening_strategy == CoarseningStrategy::Sigma) { @@ -85,7 +85,7 @@ MLNodeLaplacian::averageDownCoeffs () void MLNodeLaplacian::averageDownCoeffsToCoarseAmrLevel (int flev) { - if (m_sigma[0][0][0] == nullptr) return; + if (m_sigma[0][0][0] == nullptr) { return; } const int mglev = 0; const int idim = 0; // other dimensions are just aliases @@ -101,9 +101,9 @@ MLNodeLaplacian::averageDownCoeffsToCoarseAmrLevel (int flev) void MLNodeLaplacian::averageDownCoeffsSameAmrLevel (int amrlev) { - if (m_sigma[0][0][0] == nullptr) return; + if (m_sigma[0][0][0] == nullptr) { return; } - if (m_coarsening_strategy != CoarseningStrategy::Sigma) return; + if (m_coarsening_strategy != CoarseningStrategy::Sigma) { return; } #if (AMREX_SPACEDIM == 1) const int nsigma = 1; @@ -421,7 +421,7 @@ MLNodeLaplacian::Fsmooth (int amrlev, int mglev, MultiFab& sol, const MultiFab& } Gpu::streamSynchronize(); - if (m_smooth_num_sweeps > 1) nodalSync(amrlev, mglev, sol); + if (m_smooth_num_sweeps > 1) { nodalSync(amrlev, mglev, sol); } } else // cpu #endif @@ -904,11 +904,11 @@ MLNodeLaplacian::compRHS (const Vector& rhs, const Vector& BL_PROFILE("MLNodeLaplacian::compRHS()"); - if (!m_masks_built) buildMasks(); + if (!m_masks_built) { buildMasks(); } #ifdef AMREX_USE_EB - if (!m_integral_built) buildIntegral(); - if (m_build_surface_integral && !m_surface_integral_built) buildSurfaceIntegral(); + if (!m_integral_built) { buildIntegral(); } + if (m_build_surface_integral && !m_surface_integral_built) { buildSurfaceIntegral(); } #endif #if (AMREX_SPACEDIM == 2) @@ -1002,7 +1002,7 @@ MLNodeLaplacian::compRHS (const Vector& rhs, const Vector& #endif MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -1125,7 +1125,7 @@ MLNodeLaplacian::compRHS (const Vector& rhs, const Vector& const iMultiFab& fdmsk = *m_dirichlet_mask[ilev+1][0]; MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -1239,7 +1239,7 @@ MLNodeLaplacian::compRHS (const Vector& rhs, const Vector& const auto& has_fine_bndry = *m_has_fine_bndry[ilev]; MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif diff --git a/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian_sten.cpp b/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian_sten.cpp index 8a71c6cf760..602f893b00d 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian_sten.cpp +++ b/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian_sten.cpp @@ -25,7 +25,7 @@ MLNodeLaplacian::buildStencil () m_s0_norm0[amrlev].resize(m_num_mg_levels[amrlev],0.0); } - if (m_coarsening_strategy != CoarseningStrategy::RAP) return; + if (m_coarsening_strategy != CoarseningStrategy::RAP) { return; } const int ncomp_s = (AMREX_SPACEDIM == 2) ? 5 : 9; AMREX_ALWAYS_ASSERT_WITH_MESSAGE(AMREX_SPACEDIM != 1, @@ -64,7 +64,7 @@ MLNodeLaplacian::buildStencil () #endif MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif diff --git a/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian_sync.cpp b/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian_sync.cpp index d562100d5a3..f8bff06337b 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian_sync.cpp +++ b/Src/LinearSolvers/MLMG/AMReX_MLNodeLaplacian_sync.cpp @@ -89,7 +89,7 @@ MLNodeLaplacian::compSyncResidualCoarse (MultiFab& sync_resid, const MultiFab& a bool neumann_doubling = true; // yes even for RAP, because unimposeNeumannBC will be called on rhs MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -359,7 +359,7 @@ MLNodeLaplacian::compSyncResidualFine (MultiFab& sync_resid, const MultiFab& phi #endif MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -710,7 +710,7 @@ MLNodeLaplacian::reflux (int crse_amrlev, const auto& fsigma = m_sigma[crse_amrlev+1][0][0]; MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif diff --git a/Src/LinearSolvers/MLMG/AMReX_MLNodeLinOp.cpp b/Src/LinearSolvers/MLMG/AMReX_MLNodeLinOp.cpp index f6f6dbd80e6..5e2389c1f93 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLNodeLinOp.cpp +++ b/Src/LinearSolvers/MLMG/AMReX_MLNodeLinOp.cpp @@ -257,7 +257,7 @@ void MLNodeLinOp_set_dot_mask (MultiFab& dot_mask, iMultiFab const& omask, Geome void MLNodeLinOp::buildMasks () { - if (m_masks_built) return; + if (m_masks_built) { return; } BL_PROFILE("MLNodeLinOp::buildMasks()"); @@ -289,7 +289,7 @@ MLNodeLinOp::buildMasks () ccm.BuildMask(ccdomain,period,0,1,2,0); MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.SetDynamic(true); } if (m_overset_dirichlet_mask && mglev > 0) { const auto& dmask_fine = *m_dirichlet_mask[amrlev][mglev-1]; diff --git a/Src/LinearSolvers/MLMG/AMReX_MLNodeTensorLaplacian.cpp b/Src/LinearSolvers/MLMG/AMReX_MLNodeTensorLaplacian.cpp index e0a1e383722..130c4eb6e84 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLNodeTensorLaplacian.cpp +++ b/Src/LinearSolvers/MLMG/AMReX_MLNodeTensorLaplacian.cpp @@ -16,7 +16,7 @@ MLNodeTensorLaplacian::MLNodeTensorLaplacian (const Vector& a_geom, void MLNodeTensorLaplacian::setSigma (Array const& a_sigma) noexcept { - for (int i = 0; i < nelems; ++i) m_sigma[i] = a_sigma[i]; + for (int i = 0; i < nelems; ++i) { m_sigma[i] = a_sigma[i]; } } void diff --git a/Src/LinearSolvers/MLMG/AMReX_MLPoisson.H b/Src/LinearSolvers/MLMG/AMReX_MLPoisson.H index 8cf808c073b..5b05b298357 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLPoisson.H +++ b/Src/LinearSolvers/MLMG/AMReX_MLPoisson.H @@ -299,7 +299,7 @@ MLPoissonT::normalize (int amrlev, int mglev, MF& mf) const #if (AMREX_SPACEDIM != 3) BL_PROFILE("MLPoisson::normalize()"); - if (!this->m_has_metric_term) return; + if (!this->m_has_metric_term) { return; } const Real* dxinv = this->m_geom[amrlev][mglev].InvCellSize(); AMREX_D_TERM(const RT dhx = RT(dxinv[0]*dxinv[0]);, @@ -393,7 +393,7 @@ MLPoissonT::Fsmooth (int amrlev, int mglev, MF& sol, const MF& rhs, int redb #endif MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.EnableTiling().SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); } #ifdef AMREX_USE_GPU if (Gpu::inLaunchRegion() && sol.isFusingCandidate() @@ -865,9 +865,9 @@ bool MLPoissonT::supportNSolve () const { bool support = true; - if (this->m_domain_covered[0]) support = false; - if (this->doAgglomeration()) support = false; - if (AMREX_SPACEDIM != 3) support = false; + if (this->m_domain_covered[0]) { support = false; } + if (this->doAgglomeration()) { support = false; } + if (AMREX_SPACEDIM != 3) { support = false; } return support; } diff --git a/Src/LinearSolvers/MLMG/AMReX_MLTensorOp.cpp b/Src/LinearSolvers/MLMG/AMReX_MLTensorOp.cpp index f8a7ff71b97..c0e8805bd24 100644 --- a/Src/LinearSolvers/MLMG/AMReX_MLTensorOp.cpp +++ b/Src/LinearSolvers/MLMG/AMReX_MLTensorOp.cpp @@ -207,7 +207,7 @@ MLTensorOp::apply (int amrlev, int mglev, MultiFab& out, MultiFab& in, BCMode bc MLABecLaplacian::apply(amrlev, mglev, out, in, bc_mode, s_mode, bndry); - if (mglev >= m_kappa[amrlev].size()) return; + if (mglev >= m_kappa[amrlev].size()) { return; } applyBCTensor(amrlev, mglev, in, bc_mode, s_mode, bndry); @@ -353,7 +353,7 @@ MLTensorOp::applyBCTensor (int amrlev, int mglev, MultiFab& vel, // NOLINT(reada const auto dhi = amrex::ubound(domain); MFItInfo mfi_info; - if (Gpu::notInLaunchRegion()) mfi_info.SetDynamic(true); + if (Gpu::notInLaunchRegion()) { mfi_info.SetDynamic(true); } #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif diff --git a/Src/LinearSolvers/OpenBC/AMReX_OpenBC.cpp b/Src/LinearSolvers/OpenBC/AMReX_OpenBC.cpp index b7c83a6a2d9..79104cc2c65 100644 --- a/Src/LinearSolvers/OpenBC/AMReX_OpenBC.cpp +++ b/Src/LinearSolvers/OpenBC/AMReX_OpenBC.cpp @@ -331,7 +331,7 @@ Real OpenBCSolver::solve (const Vector& a_sol, {AMREX_D_DECL(LinOpBCType::Dirichlet, LinOpBCType::Dirichlet, LinOpBCType::Dirichlet)}); - m_poisson_2->setLevelBC(0, &sol_all[0]); + m_poisson_2->setLevelBC(0, sol_all.data()); for (int ilev = 1; ilev < nlevels; ++ilev) { m_poisson_2->setLevelBC(ilev, nullptr); } @@ -346,7 +346,7 @@ Real OpenBCSolver::solve (const Vector& a_sol, } #endif } else { - m_poisson_2->setLevelBC(0, &sol_all[0]); + m_poisson_2->setLevelBC(0, sol_all.data()); } Real err = m_mlmg_2->solve(GetVecOfPtrs(sol_all), GetVecOfConstPtrs(rhs_all), diff --git a/Src/Particle/AMReX_NeighborList.H b/Src/Particle/AMReX_NeighborList.H index 28afc3bce9d..330bef29834 100644 --- a/Src/Particle/AMReX_NeighborList.H +++ b/Src/Particle/AMReX_NeighborList.H @@ -375,7 +375,7 @@ public: for (auto p = poffset[index]; p < poffset[index+1]; ++p) { const auto& pid = pperm[p]; bool ghost_pid = (pid >= np_real); - if (is_same && (pid == i)) continue; + if (is_same && (pid == i)) { continue; } if (call_check_pair(check_pair, src_ptile_data, dst_ptile_data, i, pid, type, ghost_i, ghost_pid)) { @@ -435,7 +435,7 @@ public: for (auto p = poffset[index]; p < poffset[index+1]; ++p) { const auto& pid = pperm[p]; bool ghost_pid = (pid >= np_real); - if (is_same && (pid == i)) continue; + if (is_same && (pid == i)) { continue; } if (call_check_pair(check_pair, src_ptile_data, dst_ptile_data, i, pid, type, ghost_i, ghost_pid)) { diff --git a/Src/Particle/AMReX_NeighborParticles.H b/Src/Particle/AMReX_NeighborParticles.H index 1e0872a032e..5789f6f206d 100644 --- a/Src/Particle/AMReX_NeighborParticles.H +++ b/Src/Particle/AMReX_NeighborParticles.H @@ -71,8 +71,9 @@ private: os << nim.dst_level << " " << nim.dst_grid << " " << nim.dst_tile << " " << nim.dst_index << nim.src_level << " " << nim.src_grid << " " << nim.src_tile << " " << nim.src_index << nim.thread_num; - if (!os.good()) + if (!os.good()) { amrex::Error("operator<<(ostream&, const NeighborIndexMap& nim) failed"); + } return os; } @@ -93,9 +94,9 @@ private: {} bool operator< (const NeighborCopyTag& other) const { - if (level != other.level) return level < other.level; - if (grid != other.grid) return grid < other.grid; - if (tile != other.tile) return tile < other.tile; + if (level != other.level) { return level < other.level; } + if (grid != other.grid) { return grid < other.grid; } + if (tile != other.tile) { return tile < other.tile; } AMREX_D_TERM( if (periodic_shift[0] != other.periodic_shift[0]) return periodic_shift[0] < other.periodic_shift[0];, @@ -124,8 +125,9 @@ private: friend std::ostream& operator<< (std::ostream& os, const NeighborCopyTag& tag) { os << tag.level << " " << tag.grid << " " << tag.tile << " " << tag.periodic_shift; - if (!os.good()) + if (!os.good()) { amrex::Error("operator<<(ostream&, const NeighborCopyTag&) failed"); + } return os; } }; @@ -141,8 +143,9 @@ private: friend std::ostream& operator<< (std::ostream& os, const InverseCopyTag& tag) { os << tag.src_level << " " << tag.src_grid << " " << tag.src_tile << " " << tag.src_index; - if (!os.good()) + if (!os.good()) { amrex::Error("operator<<(ostream&, const InverseCopyTag&) failed"); + } return os; } @@ -180,8 +183,9 @@ private: friend std::ostream& operator<< (std::ostream& os, const NeighborCommTag& tag) { os << tag.proc_id << " " << tag.level_id << " " << tag.grid_id << " " << tag.tile_id; - if (!os.good()) + if (!os.good()) { amrex::Error("operator<<(ostream&, const NeighborCommTag&) failed"); + } return os; } }; @@ -433,15 +437,15 @@ protected: : grid_id(a_grid_id), box(a_box), periodic_shift(a_periodic_shift) {} bool operator<(const NeighborTask& other) const { - if (grid_id != other.grid_id) return grid_id < other.grid_id; - if (box != other.box ) return box < other.box; + if (grid_id != other.grid_id) { return grid_id < other.grid_id; } + if (box != other.box ) { return box < other.box; } AMREX_D_TERM( if (periodic_shift[0] != other.periodic_shift[0]) - return periodic_shift[0] < other.periodic_shift[0];, + { return periodic_shift[0] < other.periodic_shift[0]; }, if (periodic_shift[1] != other.periodic_shift[1]) - return periodic_shift[1] < other.periodic_shift[1];, + { return periodic_shift[1] < other.periodic_shift[1]; }, if (periodic_shift[2] != other.periodic_shift[2]) - return periodic_shift[2] < other.periodic_shift[2]; + { return periodic_shift[2] < other.periodic_shift[2]; } ) return false; } diff --git a/Src/Particle/AMReX_NeighborParticlesCPUImpl.H b/Src/Particle/AMReX_NeighborParticlesCPUImpl.H index 6be73c64a5a..6f7da58f06e 100644 --- a/Src/Particle/AMReX_NeighborParticlesCPUImpl.H +++ b/Src/Particle/AMReX_NeighborParticlesCPUImpl.H @@ -116,8 +116,9 @@ sumNeighborsMPI (std::map >& not_ours, Vector isnds(NProcs, 0); Vector ircvs(NProcs, 0); - for (int i = 0; i < NProcs; ++i) + for (int i = 0; i < NProcs; ++i) { ircvs[i] = 0; + } { // each proc figures out how many bytes it will send, and how @@ -132,7 +133,7 @@ sumNeighborsMPI (std::map >& not_ours, ParallelAllReduce::Max(num_isnds, ParallelContext::CommunicatorSub()); - if (num_isnds == 0) return; + if (num_isnds == 0) { return; } const int num_ircvs = neighbor_procs.size(); Vector stats(num_ircvs); @@ -163,7 +164,7 @@ sumNeighborsMPI (std::map >& not_ours, ParallelContext::CommunicatorSub()); } - if (num_ircvs > 0) ParallelDescriptor::Waitall(rreqs, stats); + if (num_ircvs > 0) { ParallelDescriptor::Waitall(rreqs, stats); } } Vector RcvProc; @@ -303,11 +304,12 @@ NeighborParticleContainer auto& aos = neighbors[tag.level][dst_index].GetArrayOfStructs(); ParticleType& p = aos[tag.dst_index]; for (int dir = 0; dir < AMREX_SPACEDIM; ++dir) { - if (! periodicity.isPeriodic(dir)) continue; - if (tag.periodic_shift[dir] < 0) + if (! periodicity.isPeriodic(dir)) { continue; } + if (tag.periodic_shift[dir] < 0) { p.pos(dir) += static_cast (prob_domain.length(dir)); - else if (tag.periodic_shift[dir] > 0) + } else if (tag.periodic_shift[dir] > 0) { p.pos(dir) -= static_cast (prob_domain.length(dir)); + } } } @@ -326,11 +328,12 @@ NeighborParticleContainer ParticleType p = aos[tag.src_index]; // copy if (periodicity.isAnyPeriodic()) { for (int dir = 0; dir < AMREX_SPACEDIM; ++dir) { - if (! periodicity.isPeriodic(dir)) continue; - if (tag.periodic_shift[dir] < 0) + if (! periodicity.isPeriodic(dir)) { continue; } + if (tag.periodic_shift[dir] < 0) { p.pos(dir) += static_cast (prob_domain.length(dir)); - else if (tag.periodic_shift[dir] > 0) + } else if (tag.periodic_shift[dir] > 0) { p.pos(dir) -= static_cast (prob_domain.length(dir)); + } } } @@ -386,8 +389,9 @@ NeighborParticleContainer const int tile = mfi.LocalTileIndex(); PairIndex dst_index(grid, tile); neighbors[lev][dst_index].resize(local_neighbor_sizes[lev][dst_index]); - if ( enableInverse() ) + if ( enableInverse() ) { inverse_tags[lev][dst_index].resize(local_neighbor_sizes[lev][dst_index]); + } } } BL_PROFILE_VAR_STOP(update); @@ -420,7 +424,7 @@ NeighborParticleContainer resizeContainers(this->numLevels()); for (int lev = 0; lev < this->numLevels(); ++lev) { neighbors[lev].clear(); - if ( enableInverse() ) inverse_tags[lev].clear(); + if ( enableInverse() ) { inverse_tags[lev].clear(); } buffer_tag_cache[lev].clear(); for(MFIter mfi = this->MakeMFIter(lev); mfi.isValid(); ++mfi) @@ -450,8 +454,9 @@ getRcvCountsMPI () { // many it will receive Vector snds(NProcs, 0); rcvs.resize(NProcs); - for (int i = 0; i < NProcs; ++i) + for (int i = 0; i < NProcs; ++i) { rcvs[i] = 0; + } num_snds = 0; for (const auto& kv : send_data) { @@ -461,7 +466,7 @@ getRcvCountsMPI () { ParallelAllReduce::Max(num_snds, ParallelContext::CommunicatorSub()); - if (num_snds == 0) return; + if (num_snds == 0) { return; } const int num_rcvs = neighbor_procs.size(); Vector stats(num_rcvs); @@ -491,7 +496,7 @@ getRcvCountsMPI () { ParallelContext::CommunicatorSub()); } - if (num_rcvs > 0) ParallelDescriptor::Waitall(rreqs, stats); + if (num_rcvs > 0) { ParallelDescriptor::Waitall(rreqs, stats); } #endif // AMREX_USE_MPI } @@ -508,8 +513,8 @@ fillNeighborsMPI (bool reuse_rcv_counts) { // each proc figures out how many bytes it will send, and how // many it will receive - if (!reuse_rcv_counts) getRcvCountsMPI(); - if (num_snds == 0) return; + if (!reuse_rcv_counts) { getRcvCountsMPI(); } + if (num_snds == 0) { return; } Vector RcvProc; Vector rOffset; // Offset (in bytes) in the receive buffer @@ -571,7 +576,7 @@ fillNeighborsMPI (bool reuse_rcv_counts) { std::memcpy(&tid, buffer, sizeof(int)); buffer += sizeof(int); std::memcpy(&size, buffer, sizeof(int)); buffer += sizeof(int); - if (size == 0) continue; + if (size == 0) { continue; } np = size / cdata_size; diff --git a/Src/Particle/AMReX_NeighborParticlesGPUImpl.H b/Src/Particle/AMReX_NeighborParticlesGPUImpl.H index 6338d9d55f8..4cc533d901e 100644 --- a/Src/Particle/AMReX_NeighborParticlesGPUImpl.H +++ b/Src/Particle/AMReX_NeighborParticlesGPUImpl.H @@ -21,7 +21,7 @@ namespace detail face_boxes.push_back(lo_face_box); bl.push_back(lo_face_box); for (auto face_box : face_boxes) { for (int j = 0; j < AMREX_SPACEDIM; ++j) { - if (i == j) continue; + if (i == j) { continue; } BoxList edge_boxes; Box hi_edge_box = adjCellHi(face_box, j, ncells); Box lo_edge_box = adjCellLo(face_box, j, ncells); @@ -29,7 +29,7 @@ namespace detail edge_boxes.push_back(lo_edge_box); bl.push_back(lo_edge_box); for (auto edge_box : edge_boxes) { for (int k = 0; k < AMREX_SPACEDIM; ++k) { - if ((j == k) || (i == k)) continue; + if ((j == k) || (i == k)) { continue; } Box hi_corner_box = adjCellHi(edge_box, k, ncells); Box lo_corner_box = adjCellLo(edge_box, k, ncells); bl.push_back(hi_corner_box); @@ -57,7 +57,7 @@ buildNeighborMask () const BoxArray& ba = this->ParticleBoxArray(lev); const DistributionMapping& dmap = this->ParticleDistributionMap(lev); - if (ba.size() == 1 && (! geom.isAnyPeriodic()) ) return; + if (ba.size() == 1 && (! geom.isAnyPeriodic()) ) { return; } if (m_neighbor_mask_ptr == nullptr || ! BoxArray::SameRefs(m_neighbor_mask_ptr->boxArray(), ba) || @@ -82,7 +82,7 @@ buildNeighborMask () { int nbor_grid = isec.first; const Box isec_box = isec.second - pshift; - if ( (grid == nbor_grid) && (pshift == 0)) continue; + if ( (grid == nbor_grid) && (pshift == 0)) { continue; } neighbor_grids.insert(NeighborTask(nbor_grid, isec_box, pshift)); const int global_rank = dmap[nbor_grid]; neighbor_procs.push_back(ParallelContext::global_to_local_rank(global_rank)); @@ -131,7 +131,7 @@ buildNeighborCopyOp (bool use_boundary_neighbor) auto& plev = this->GetParticles(lev); auto& ba = this->ParticleBoxArray(lev); - if (ba.size() == 1 && (! geom.isAnyPeriodic()) ) return; + if (ba.size() == 1 && (! geom.isAnyPeriodic()) ) { return; } for(MFIter mfi = this->MakeMFIter(lev); mfi.isValid(); ++mfi) { @@ -159,7 +159,7 @@ buildNeighborCopyOp (bool use_boundary_neighbor) auto p_counts = counts.dataPtr(); auto p_offsets = offsets.dataPtr(); - ParticleType* p_ptr = &(aos[0]); + ParticleType* p_ptr = aos.data(); auto p_code_array = m_code_array[gid].dataPtr(); auto p_isec_boxes = m_isec_boxes[gid].dataPtr(); const int nisec_box = m_isec_boxes[gid].size(); diff --git a/Src/Particle/AMReX_NeighborParticlesI.H b/Src/Particle/AMReX_NeighborParticlesI.H index 56ce4ff27d8..07819e9f7b4 100644 --- a/Src/Particle/AMReX_NeighborParticlesI.H +++ b/Src/Particle/AMReX_NeighborParticlesI.H @@ -43,10 +43,12 @@ template void NeighborParticleContainer ::initializeCommComps () { - for (int ii = 0; ii < AMREX_SPACEDIM + NStructReal + this->NumRealComps(); ++ii) + for (int ii = 0; ii < AMREX_SPACEDIM + NStructReal + this->NumRealComps(); ++ii) { ghost_real_comp.push_back(1); - for (int ii = 0; ii < 2 + NStructInt + this->NumIntComps(); ++ii) + } + for (int ii = 0; ii < 2 + NStructInt + this->NumIntComps(); ++ii) { ghost_int_comp.push_back(1); + } calcCommSize(); } @@ -81,7 +83,7 @@ NeighborParticleContainer comm_size += sizeof(int); } } - if ( enableInverse() ) comm_size += 4*sizeof(int); + if ( enableInverse() ) { comm_size += 4*sizeof(int); } cdata_size = comm_size; } @@ -150,8 +152,8 @@ NeighborParticleContainer BL_PROFILE("NeighborParticleContainer::BuildMasks"); - if (this->numLevels() == 1) use_mask = true; - else use_mask = false; + if (this->numLevels() == 1) { use_mask = true; } + else { use_mask = false; } resizeContainers(this->numLevels()); @@ -208,8 +210,9 @@ NeighborParticleContainer const int proc = ParallelContext::global_to_local_rank(global_proc); NeighborCommTag comm_tag(proc, level, grid, tile); local_neighbors.push_back(comm_tag); - if (proc != ParallelContext::MyProcSub()) + if (proc != ParallelContext::MyProcSub()) { neighbor_procs.push_back(proc); + } } } } @@ -225,8 +228,9 @@ NeighborParticleContainer GetCommTagsBox(comm_tags, lev, box); for (auto const& tag : comm_tags) { local_neighbors.push_back(tag); - if (tag.proc_id != ParallelContext::MyProcSub()) + if (tag.proc_id != ParallelContext::MyProcSub()) { neighbor_procs.push_back(tag.proc_id); + } } } } @@ -373,7 +377,7 @@ NeighborParticleContainer for (int j = 0; j < static_cast(tags.size()); ++j) { NeighborCopyTag& tag = tags[j]; PairIndex dst_index(tag.grid, tag.tile); - if (tag.grid < 0) continue; + if (tag.grid < 0) { continue; } tag.src_index = i; const int cache_index = cache.size(); @@ -482,14 +486,14 @@ NeighborParticleContainer } for (const auto& kv: remote_map) { - if (kv.first.proc_id == MyProc) continue; + if (kv.first.proc_id == MyProc) { continue; } Vector& buffer = send_data[kv.first.proc_id]; buffer.resize(sizeof(int)); - std::memcpy(&buffer[0], &tile_counts[kv.first.proc_id], sizeof(int)); + std::memcpy(buffer.data(), &tile_counts[kv.first.proc_id], sizeof(int)); } for (auto& kv : remote_map) { - if (kv.first.proc_id == MyProc) continue; + if (kv.first.proc_id == MyProc) { continue; } int np = kv.second.size(); int data_size = np * cdata_size; Vector& buffer = send_data[kv.first.proc_id]; @@ -550,7 +554,7 @@ getNeighborTags (Vector& tags, const ParticleType& p, const int lev = 0; const IntVect& iv = this->Index(p, lev); - if (shrink_box.contains(iv)) return; + if (shrink_box.contains(iv)) { return; } const Periodicity& periodicity = this->Geom(lev).periodicity(); const Box& domain = this->Geom(lev).Domain(); @@ -562,7 +566,7 @@ getNeighborTags (Vector& tags, const ParticleType& p, for (int ii = -nGrow[0]; ii < nGrow[0] + 1; ii += nGrow[0]) {, for (int jj = -nGrow[1]; jj < nGrow[1] + 1; jj += nGrow[1]) {, for (int kk = -nGrow[2]; kk < nGrow[2] + 1; kk += nGrow[2]) {) - if (AMREX_D_TERM((ii == 0), && (jj == 0), && (kk == 0))) continue; + if (AMREX_D_TERM((ii == 0), && (jj == 0), && (kk == 0))) { continue; } IntVect shift(AMREX_D_DECL(ii, jj, kk)); IntVect neighbor_cell = iv + shift; @@ -572,15 +576,16 @@ getNeighborTags (Vector& tags, const ParticleType& p, tag.level = mask(neighbor_cell, MaskComps::level); if (periodicity.isAnyPeriodic()) { for (int dir = 0; dir < AMREX_SPACEDIM; ++dir) { - if (! periodicity.isPeriodic(dir)) continue; - if (neighbor_cell[dir] < lo[dir]) + if (! periodicity.isPeriodic(dir)) { continue; } + if (neighbor_cell[dir] < lo[dir]) { tag.periodic_shift[dir] = -1; - else if (neighbor_cell[dir] > hi[dir]) + } else if (neighbor_cell[dir] > hi[dir]) { tag.periodic_shift[dir] = 1; + } } } - if (tag != src_tag) tags.push_back(tag); + if (tag != src_tag) { tags.push_back(tag); } AMREX_D_TERM( }, @@ -610,12 +615,12 @@ getNeighborTags (Vector& tags, const ParticleType& p, { const Box& grid_box = ba[isec.first]; for (IntVect cell = pbox.smallEnd(); cell <= pbox.bigEnd(); pbox.next(cell)) { - if ( !grid_box.contains(cell) ) continue; + if ( !grid_box.contains(cell) ) { continue; } int tile = getTileIndex(cell, grid_box, this->do_tiling, this->tile_size, tbx); auto nbor = NeighborCopyTag(lev, isec.first, tile); nbor.periodic_shift = -pshift; - if (src_tag != nbor) tags.push_back(nbor); + if (src_tag != nbor) { tags.push_back(nbor); } } } } @@ -724,7 +729,7 @@ buildNeighborList (CheckPair&& check_pair, bool /*sort*/) auto& ptile = plev[index]; - if (ptile.numParticles() == 0) continue; + if (ptile.numParticles() == 0) { continue; } Box bx = pti.tilebox(); int ng = computeRefFac(0, lev).max()*m_num_neighbor_cells; @@ -809,7 +814,7 @@ buildNeighborList (CheckPair&& check_pair, OtherPCType& other, const auto& ptile = plev[index]; auto& other_ptile = other.ParticlesAt(lev, pti); - if (ptile.numParticles() == 0) continue; + if (ptile.numParticles() == 0) { continue; } Box bx = pti.tilebox(); int ng = computeRefFac(0, lev).max()*m_num_neighbor_cells; @@ -844,7 +849,7 @@ buildNeighborList (CheckPair&& check_pair, int type_ind, int* ref_ratio, { AMREX_ASSERT(numParticlesOutOfRange(*this, m_num_neighbor_cells) == 0); - if (num_bin_types == 1) AMREX_ASSERT(ref_ratio[0] == 1); + if (num_bin_types == 1) { AMREX_ASSERT(ref_ratio[0] == 1); } BL_PROFILE("NeighborParticleContainer::buildNeighborList"); @@ -880,7 +885,7 @@ buildNeighborList (CheckPair&& check_pair, int type_ind, int* ref_ratio, auto index = std::make_pair(gid, tid); auto& ptile = plev[index]; - if (ptile.numParticles() == 0) continue; + if (ptile.numParticles() == 0) { continue; } Box bx = pti.tilebox(); int ng = 1; @@ -1057,10 +1062,10 @@ selectActualNeighbors (CheckPair&& check_pair, int num_cells) for (int ii = amrex::max(ix-num_cells, 0); ii <= amrex::min(ix+num_cells, nx); ++ii) { for (int jj = amrex::max(iy-num_cells, 0); jj <= amrex::min(iy+num_cells, ny); ++jj) { for (int kk = amrex::max(iz-num_cells, 0); kk <= amrex::min(iz+num_cells, nz); ++kk) { - if (isActualNeighbor) break; + if (isActualNeighbor) { break; } int nbr_cell_id = (ii * ny + jj) * nz + kk; for (auto p = poffset[nbr_cell_id]; p < poffset[nbr_cell_id+1]; ++p) { - if (pperm[p] == i) continue; + if (pperm[p] == i) { continue; } if (call_check_pair(check_pair, ptile_data, ptile_data, i, pperm[p])) { IntVect cell_ijk = getParticleCell(pstruct[pperm[p]], plo, dxi, domain); if (!box.contains(cell_ijk)) { @@ -1116,7 +1121,7 @@ resizeContainers (int num_levels) mask_ptr.resize(num_levels); buffer_tag_cache.resize(num_levels); local_neighbor_sizes.resize(num_levels); - if ( enableInverse() ) inverse_tags.resize(num_levels); + if ( enableInverse() ) { inverse_tags.resize(num_levels); } } AMREX_ASSERT((neighbors.size() == m_neighbor_list.size()) && diff --git a/Src/Particle/AMReX_ParGDB.H b/Src/Particle/AMReX_ParGDB.H index 925416170e5..59f236ad1f5 100644 --- a/Src/Particle/AMReX_ParGDB.H +++ b/Src/Particle/AMReX_ParGDB.H @@ -342,8 +342,9 @@ int ParGDB::MaxRefRatio (int /*level*/) const { int max_ref_ratio = 0; - for (int lev = 0; lev < m_nlevels-1; lev++) + for (int lev = 0; lev < m_nlevels-1; lev++) { max_ref_ratio = std::max(max_ref_ratio, m_rr[lev].max()); + } return max_ref_ratio; } diff --git a/Src/Particle/AMReX_Particle.H b/Src/Particle/AMReX_Particle.H index 8bee9af24eb..16004d1231e 100644 --- a/Src/Particle/AMReX_Particle.H +++ b/Src/Particle/AMReX_Particle.H @@ -55,7 +55,7 @@ struct ParticleIDWrapper AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE ParticleIDWrapper& operator= (const ParticleIDWrapper& pidw) noexcept { - return this->operator=(Long(pidw)); // NOLINT(cppcoreguidelines-c-copy-assignment-signature) + return this->operator=(Long(pidw)); // NOLINT } AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE @@ -123,7 +123,7 @@ struct ParticleCPUWrapper AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE ParticleCPUWrapper& operator= (const ParticleCPUWrapper& pcpuw) noexcept { - return this->operator=(int(pcpuw)); // NOLINT(cppcoreguidelines-c-copy-assignment-signature) + return this->operator=(int(pcpuw)); // NOLINT } AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE @@ -441,8 +441,9 @@ Particle::NextID () #endif next = the_next_id++; - if (next > LongParticleIds::LastParticleID) + if (next > LongParticleIds::LastParticleID) { amrex::Abort("Particle::NextID() -- too many particles"); + } return next; } @@ -452,8 +453,9 @@ Long Particle::UnprotectedNextID () { Long next = the_next_id++; - if (next > LongParticleIds::LastParticleID) + if (next > LongParticleIds::LastParticleID) { amrex::Abort("Particle::NextID() -- too many particles"); + } return next; } @@ -471,17 +473,21 @@ operator<< (std::ostream& os, const Particle& p) os << p.id() << ' ' << p.cpu() << ' '; - for (int i = 0; i < AMREX_SPACEDIM; i++) + for (int i = 0; i < AMREX_SPACEDIM; i++) { os << p.pos(i) << ' '; + } - for (int i = 0; i < NReal; i++) + for (int i = 0; i < NReal; i++) { os << p.rdata(i) << ' '; + } - for (int i = 0; i < NInt; i++) + for (int i = 0; i < NInt; i++) { os << p.idata(i) << ' '; + } - if (!os.good()) + if (!os.good()) { amrex::Error("operator<<(ostream&,Particle&) failed"); + } return os; } @@ -493,14 +499,17 @@ operator<< (std::ostream& os, const Particle& p) os << p.id() << ' ' << p.cpu() << ' '; - for (int i = 0; i < AMREX_SPACEDIM; i++) + for (int i = 0; i < AMREX_SPACEDIM; i++) { os << p.pos(i) << ' '; + } - for (int i = 0; i < NReal; i++) + for (int i = 0; i < NReal; i++) { os << p.rdata(i) << ' '; + } - if (!os.good()) + if (!os.good()) { amrex::Error("operator<<(ostream&,Particle&) failed"); + } return os; } @@ -512,14 +521,17 @@ operator<< (std::ostream& os, const Particle<0, NInt>& p) os << p.id() << ' ' << p.cpu() << ' '; - for (int i = 0; i < AMREX_SPACEDIM; i++) + for (int i = 0; i < AMREX_SPACEDIM; i++) { os << p.pos(i) << ' '; + } - for (int i = 0; i < NInt; i++) + for (int i = 0; i < NInt; i++) { os << p.idata(i) << ' '; + } - if (!os.good()) + if (!os.good()) { amrex::Error("operator<<(ostream&,Particle&) failed"); + } return os; } @@ -531,11 +543,13 @@ operator<< (std::ostream& os, const Particle<0, 0>& p) os << p.id() << ' ' << p.cpu() << ' '; - for (int i = 0; i < AMREX_SPACEDIM; i++) + for (int i = 0; i < AMREX_SPACEDIM; i++) { os << p.pos(i) << ' '; + } - if (!os.good()) + if (!os.good()) { amrex::Error("operator<<(ostream&,Particle&) failed"); + } return os; } diff --git a/Src/Particle/AMReX_ParticleArray.H b/Src/Particle/AMReX_ParticleArray.H index 8b7b4425210..0079d573620 100644 --- a/Src/Particle/AMReX_ParticleArray.H +++ b/Src/Particle/AMReX_ParticleArray.H @@ -83,7 +83,7 @@ struct DataLayoutPolicy, DataLayout::AoS> static constexpr raw_type get_raw_data (container_type& a_container) { - return raw_type(static_cast*>(&a_container[0])); + return raw_type(static_cast*>(a_container.data())); } static constexpr void resize (container_type& a_container, std::size_t a_size) diff --git a/Src/Particle/AMReX_ParticleBufferMap.cpp b/Src/Particle/AMReX_ParticleBufferMap.cpp index 86686ec87b1..a342a1ff86b 100644 --- a/Src/Particle/AMReX_ParticleBufferMap.cpp +++ b/Src/Particle/AMReX_ParticleBufferMap.cpp @@ -57,15 +57,15 @@ void ParticleBufferMap::define (const ParGDBBase* a_gdb) { int pid_a = std::get<2>(a); int pid_b = std::get<2>(b); - if (pid_a != pid_b) return pid_a < pid_b; + if (pid_a != pid_b) { return pid_a < pid_b; } int lev_a = std::get<1>(a); int lev_b = std::get<1>(b); - if (lev_a != lev_b) return lev_a < lev_b; + if (lev_a != lev_b) { return lev_a < lev_b; } int gid_a = std::get<0>(a); int gid_b = std::get<0>(b); - if (gid_a != gid_b) return gid_a < gid_b; + if (gid_a != gid_b) { return gid_a < gid_b; } return false; }); @@ -93,8 +93,9 @@ void ParticleBufferMap::define (const ParGDBBase* a_gdb) m_proc_box_offsets.resize(0); m_proc_box_offsets.push_back(0); - for (auto count : m_proc_box_counts) + for (auto count : m_proc_box_counts) { m_proc_box_offsets.push_back(m_proc_box_offsets.back() + count); + } d_bucket_to_pid.resize(0); d_bucket_to_pid.resize(num_buckets); @@ -113,10 +114,10 @@ void ParticleBufferMap::define (const ParGDBBase* a_gdb) bool ParticleBufferMap::isValid (const ParGDBBase* a_gdb) const { - if (!m_defined) return false; + if (!m_defined) { return false; } int num_levs = a_gdb->finestLevel() + 1; - if (num_levs != m_ba.size()) return false; + if (num_levs != m_ba.size()) { return false; } bool valid = true; for (int lev = 0; lev < num_levs; ++lev) diff --git a/Src/Particle/AMReX_ParticleCommunication.H b/Src/Particle/AMReX_ParticleCommunication.H index 924fc894a89..36ecb747e4b 100644 --- a/Src/Particle/AMReX_ParticleCommunication.H +++ b/Src/Particle/AMReX_ParticleCommunication.H @@ -37,8 +37,9 @@ struct RedistributeUnpackPolicy int N = static_cast(sizes.size()); std::map tile_sizes; - for(int i = 0; i < N; ++i) + for(int i = 0; i < N; ++i) { tile_sizes[tiles[i]] = tiles[i]->numParticles(); + } for(int i = 0; i < N; ++i) { @@ -46,8 +47,9 @@ struct RedistributeUnpackPolicy tile_sizes[tiles[i]] += sizes[i]; } - for (auto& kv : tile_sizes) + for (auto& kv : tile_sizes) { kv.first->resize(kv.second); + } } }; @@ -66,7 +68,7 @@ struct ParticleCopyOp [[nodiscard]] int numCopies (int gid, int lev) const { - if (m_boxes.size() <= lev) return 0; + if (m_boxes.size() <= lev) { return 0; } auto mit = m_boxes[lev].find(gid); return mit == m_boxes[lev].end() ? 0 : int(mit->second.size()); } @@ -160,7 +162,7 @@ struct ParticleCopyPlan { int gid = kv.first.first; int num_copies = op.numCopies(gid, lev); - if (num_copies == 0) continue; + if (num_copies == 0) { continue; } m_dst_indices[lev][gid].resize(num_copies); const auto* p_boxes = op.m_boxes[lev].at(gid).dataPtr(); @@ -337,7 +339,7 @@ void packBuffer (const PC& pc, const ParticleCopyOp& op, const ParticleCopyPlan& const auto ptd = src_tile.getConstParticleTileData(); int num_copies = op.numCopies(gid, lev); - if (num_copies == 0) continue; + if (num_copies == 0) { continue; } const auto* p_boxes = op.m_boxes[lev].at(gid).dataPtr(); const auto* p_levels = op.m_levels[lev].at(gid).dataPtr(); @@ -370,11 +372,12 @@ void packBuffer (const PC& pc, const ParticleCopyOp& op, const ParticleCopyPlan& AMREX_SPACEDIM*sizeof(ParticleReal)); for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { - if (! is_per[idim]) continue; - if (pshift[idim] > 0) + if (! is_per[idim]) { continue; } + if (pshift[idim] > 0) { pos[idim] += phi[idim] - plo[idim]; - else if (pshift[idim] < 0) + } else if (pshift[idim] < 0) { pos[idim] -= phi[idim] - plo[idim]; + } } amrex::Gpu::memcpy(&p_snd_buffer[dst_offset], &pos[0], AMREX_SPACEDIM*sizeof(ParticleReal)); @@ -461,7 +464,7 @@ void communicateParticlesStart (const PC& pc, ParticleCopyPlan& plan, const SndB const int NProcs = ParallelContext::NProcsSub(); const int MyProc = ParallelContext::MyProcSub(); - if (NProcs == 1) return; + if (NProcs == 1) { return; } Vector RcvProc; Vector rOffset; @@ -519,15 +522,15 @@ void communicateParticlesStart (const PC& pc, ParticleCopyPlan& plan, const SndB ParallelDescriptor::Arecv((char*) (rcv_buffer.dataPtr() + offset), Cnt, Who, SeqNum, ParallelContext::CommunicatorSub()).req(); } - if (plan.m_NumSnds == 0) return; + if (plan.m_NumSnds == 0) { return; } // Send. for (int i = 0; i < NProcs; ++i) { - if (i == MyProc) continue; + if (i == MyProc) { continue; } const auto Who = i; const auto Cnt = plan.m_snd_counts[i]; - if (Cnt == 0) continue; + if (Cnt == 0) { continue; } auto snd_offset = plan.m_snd_offsets[i]; AMREX_ASSERT(plan.m_snd_counts[i] % ParallelDescriptor::alignof_comm_data(plan.m_snd_num_particles[i]*psize) == 0); @@ -554,7 +557,7 @@ void unpackRemotes (PC& pc, const ParticleCopyPlan& plan, Buffer& rcv_buffer, Un #ifdef AMREX_USE_MPI const int NProcs = ParallelContext::NProcsSub(); - if (NProcs == 1) return; + if (NProcs == 1) { return; } const int MyProc = ParallelContext::MyProcSub(); amrex::ignore_unused(MyProc); diff --git a/Src/Particle/AMReX_ParticleCommunication.cpp b/Src/Particle/AMReX_ParticleCommunication.cpp index 474c8adc40e..6558c30e3fa 100644 --- a/Src/Particle/AMReX_ParticleCommunication.cpp +++ b/Src/Particle/AMReX_ParticleCommunication.cpp @@ -54,7 +54,7 @@ void ParticleCopyPlan::buildMPIStart (const ParticleBufferMap& map, Long psize) const int MyProc = ParallelContext::MyProcSub(); const auto NNeighborProcs = static_cast(m_neighbor_procs.size()); - if (NProcs == 1) return; + if (NProcs == 1) { return; } m_Snds.resize(0); m_Snds.resize(NProcs, 0); @@ -81,9 +81,9 @@ void ParticleCopyPlan::buildMPIStart (const ParticleBufferMap& map, Long psize) int lev = map.bucketToLevel(bucket); AMREX_ASSERT(m_box_counts_h[bucket] <= static_cast(std::numeric_limits::max())); int npart = static_cast(m_box_counts_h[bucket]); - if (npart == 0) continue; + if (npart == 0) { continue; } m_snd_num_particles[i] += npart; - if (i == MyProc) continue; + if (i == MyProc) { continue; } snd_data[i].push_back(npart); snd_data[i].push_back(dst); snd_data[i].push_back(lev); @@ -160,10 +160,10 @@ void ParticleCopyPlan::buildMPIStart (const ParticleBufferMap& map, Long psize) for (auto i : m_neighbor_procs) { - if (i == MyProc) continue; + if (i == MyProc) { continue; } const auto Who = i; const auto Cnt = m_Snds[i]; - if (Cnt == 0) continue; + if (Cnt == 0) { continue; } AMREX_ASSERT(Cnt > 0); AMREX_ASSERT(Who >= 0 && Who < NProcs); @@ -212,7 +212,7 @@ void ParticleCopyPlan::buildMPIFinish (const ParticleBufferMap& map) // NOLINT(r #ifdef AMREX_USE_MPI const int NProcs = ParallelContext::NProcsSub(); - if (NProcs == 1) return; + if (NProcs == 1) { return; } if (m_nrcvs > 0) { @@ -255,8 +255,8 @@ void ParticleCopyPlan::buildMPIFinish (const ParticleBufferMap& map) // NOLINT(r void ParticleCopyPlan::doHandShake (const Vector& Snds, Vector& Rcvs) const // NOLINT(readability-convert-member-functions-to-static) { BL_PROFILE("ParticleCopyPlan::doHandShake"); - if (m_local) doHandShakeLocal(Snds, Rcvs); - else doHandShakeGlobal(Snds, Rcvs); + if (m_local) { doHandShakeLocal(Snds, Rcvs); } + else { doHandShakeGlobal(Snds, Rcvs); } } void ParticleCopyPlan::doHandShakeLocal (const Vector& Snds, Vector& Rcvs) const // NOLINT(readability-convert-member-functions-to-static) @@ -331,7 +331,7 @@ void ParticleCopyPlan::doHandShakeGlobal (const Vector& Snds, Vector Vector snd_connectivity(NProcs, 0); Vector rcv_connectivity(NProcs, 1); - for (int i = 0; i < NProcs; ++i) { if (Snds[i] > 0) snd_connectivity[i] = 1; } + for (int i = 0; i < NProcs; ++i) { if (Snds[i] > 0) { snd_connectivity[i] = 1; } } Long num_rcvs = 0; MPI_Reduce_scatter(snd_connectivity.data(), &num_rcvs, rcv_connectivity.data(), @@ -349,7 +349,7 @@ void ParticleCopyPlan::doHandShakeGlobal (const Vector& Snds, Vector } for (int i = 0; i < NProcs; ++i) { - if (Snds[i] == 0) continue; + if (Snds[i] == 0) { continue; } const Long Cnt = 1; MPI_Send( &Snds[i], Cnt, ParallelDescriptor::Mpi_typemap::type(), i, SeqNum, ParallelContext::CommunicatorSub()); diff --git a/Src/Particle/AMReX_ParticleContainerBase.cpp b/Src/Particle/AMReX_ParticleContainerBase.cpp index 7583302c8f5..7b405681e4d 100644 --- a/Src/Particle/AMReX_ParticleContainerBase.cpp +++ b/Src/Particle/AMReX_ParticleContainerBase.cpp @@ -53,7 +53,7 @@ void ParticleContainerBase::resizeData () void ParticleContainerBase::RedefineDummyMF (int lev) { - if (lev > m_dummy_mf.size()-1) m_dummy_mf.resize(lev+1); + if (lev > m_dummy_mf.size()-1) { m_dummy_mf.resize(lev+1); } if (m_dummy_mf[lev] == nullptr || ! BoxArray::SameRefs(m_dummy_mf[lev]->boxArray(), @@ -319,8 +319,9 @@ void ParticleContainerBase::BuildRedistributeMask (int lev, int nghost) const { const int global_rank = this->ParticleDistributionMap(lev)[grid]; const int rank = ParallelContext::global_to_local_rank(global_rank); - if (rank != ParallelContext::MyProcSub()) + if (rank != ParallelContext::MyProcSub()) { neighbor_procs.push_back(rank); + } } } } diff --git a/Src/Particle/AMReX_ParticleContainerI.H b/Src/Particle/AMReX_ParticleContainerI.H index 9712a13a415..377d87e21e8 100644 --- a/Src/Particle/AMReX_ParticleContainerI.H +++ b/Src/Particle/AMReX_ParticleContainerI.H @@ -48,7 +48,7 @@ ParticleContainer_impl :: Initia pp.queryAdd("do_tiling", do_tiling); Vector tilesize(AMREX_SPACEDIM); if (pp.queryarr("tile_size", tilesize, 0, AMREX_SPACEDIM)) { - for (int i=0; i::value, @@ -97,8 +97,9 @@ ParticleContainer_impl AMREX_ASSERT(m_gdb != nullptr); - if (lev_max == -1) + if (lev_max == -1) { lev_max = finestLevel(); + } AMREX_ASSERT(lev_max <= finestLevel()); @@ -176,10 +177,11 @@ ParticleContainer_impl AMREX_ASSERT(m_gdb != nullptr); - if (!Geom(0).isAnyPeriodic()) return false; + if (!Geom(0).isAnyPeriodic()) { return false; } - if (lev_max == -1) + if (lev_max == -1) { lev_max = finestLevel(); + } AMREX_ASSERT(lev_max <= finestLevel()); @@ -411,7 +413,7 @@ ParticleContainer_impl::NumberOf { ParallelDescriptor::GatherLayoutDataToVector(np_per_grid_local, nparticles, ParallelContext::IOProcessorNumberSub()); - ParallelDescriptor::Bcast(&nparticles[0], nparticles.size(), + ParallelDescriptor::Bcast(nparticles.data(), nparticles.size(), ParallelContext::IOProcessorNumberSub()); } @@ -424,7 +426,7 @@ Long ParticleContainer_impl::Num { Long nparticles = 0; - if (level < 0 || level >= int(m_particles.size())) return nparticles; + if (level < 0 || level >= int(m_particles.size())) { return nparticles; } if (only_valid) { ReduceOps reduce_op; @@ -575,7 +577,7 @@ ParticleContainer_impl::Incremen BL_PROFILE("ParticleContainer::Increment"); AMREX_ASSERT(OK()); - if (m_particles.empty()) return; + if (m_particles.empty()) { return; } AMREX_ASSERT(lev >= 0 && lev < int(m_particles.size())); AMREX_ASSERT(numParticlesOutOfRange(*this, 0) == 0); @@ -609,7 +611,7 @@ void ParticleContainer_impl::RemoveParticlesAtLevel (int level) { BL_PROFILE("ParticleContainer::RemoveParticlesAtLevel()"); - if (level >= int(this->m_particles.size())) return; + if (level >= int(this->m_particles.size())) { return; } if (!this->m_particles[level].empty()) { @@ -1038,12 +1040,12 @@ addParticles (const PCType& other, F&& f, bool local) for(MFIter mfi = other.MakeMFIter(lev); mfi.isValid(); ++mfi) { auto index = std::make_pair(mfi.index(), mfi.LocalTileIndex()); - if(plevel_other.find(index) == plevel_other.end()) continue; + if(plevel_other.find(index) == plevel_other.end()) { continue; } auto& ptile = DefineAndReturnParticleTile(lev, mfi.index(), mfi.LocalTileIndex()); const auto& ptile_other = plevel_other.at(index); auto np = ptile_other.numParticles(); - if (np == 0) continue; + if (np == 0) { continue; } auto dst_index = ptile.numParticles(); ptile.resize(dst_index + np); @@ -1054,7 +1056,7 @@ addParticles (const PCType& other, F&& f, bool local) } } - if (! local) Redistribute(); + if (! local) { Redistribute(); } } // @@ -1175,7 +1177,7 @@ ParticleContainer_impl { BL_PROFILE("ParticleContainer::SortParticlesByBin()"); - if (bin_size == IntVect::TheZeroVector()) return; + if (bin_size == IntVect::TheZeroVector()) { return; } for (int lev = 0; lev < numLevels(); ++lev) { @@ -1238,7 +1240,7 @@ ParticleContainer_impl { #ifdef AMREX_USE_GPU - if (local) AMREX_ASSERT(numParticlesOutOfRange(*this, lev_min, lev_max, local) == 0); + if (local) { AMREX_ASSERT(numParticlesOutOfRange(*this, lev_min, lev_max, local) == 0); } // sanity check AMREX_ALWAYS_ASSERT(do_tiling == false); @@ -1272,7 +1274,7 @@ ParticleContainer_impl this->defineBufferMap(); - if (! m_particle_locator.isValid(GetParGDB())) m_particle_locator.build(GetParGDB()); + if (! m_particle_locator.isValid(GetParGDB())) { m_particle_locator.build(GetParGDB()); } m_particle_locator.setGeometry(GetParGDB()); auto assign_grid = m_particle_locator.getGridAssignor(); @@ -1423,475 +1425,491 @@ void ParticleContainer_impl ::RedistributeCPU (int lev_min, int lev_max, int nGrow, int local, bool remove_negative) { - BL_PROFILE("ParticleContainer::RedistributeCPU()"); + BL_PROFILE("ParticleContainer::RedistributeCPU()"); - const int MyProc = ParallelContext::MyProcSub(); - auto strttime = amrex::second(); + const int MyProc = ParallelContext::MyProcSub(); + auto strttime = amrex::second(); - if (local > 0) BuildRedistributeMask(0, local); + if (local > 0) { BuildRedistributeMask(0, local); } - // On startup there are cases where Redistribute() could be called - // with a given finestLevel() where that AmrLevel has yet to be defined. - int theEffectiveFinestLevel = m_gdb->finestLevel(); + // On startup there are cases where Redistribute() could be called + // with a given finestLevel() where that AmrLevel has yet to be defined. + int theEffectiveFinestLevel = m_gdb->finestLevel(); - while (!m_gdb->LevelDefined(theEffectiveFinestLevel)) - theEffectiveFinestLevel--; + while (!m_gdb->LevelDefined(theEffectiveFinestLevel)) { + theEffectiveFinestLevel--; + } - if (int(m_particles.size()) < theEffectiveFinestLevel+1) { - if (Verbose()) { - amrex::Print() << "ParticleContainer::Redistribute() resizing containers from " - << m_particles.size() << " to " - << theEffectiveFinestLevel + 1 << '\n'; - } - m_particles.resize(theEffectiveFinestLevel+1); - m_dummy_mf.resize(theEffectiveFinestLevel+1); - } + if (int(m_particles.size()) < theEffectiveFinestLevel+1) { + if (Verbose()) { + amrex::Print() << "ParticleContainer::Redistribute() resizing containers from " + << m_particles.size() << " to " + << theEffectiveFinestLevel + 1 << '\n'; + } + m_particles.resize(theEffectiveFinestLevel+1); + m_dummy_mf.resize(theEffectiveFinestLevel+1); + } - // It is important to do this even if we don't have more levels because we may have changed the - // grids at this level in a regrid. - for (int lev = 0; lev < theEffectiveFinestLevel+1; ++lev) - RedefineDummyMF(lev); + // It is important to do this even if we don't have more levels because we may have changed the + // grids at this level in a regrid. + for (int lev = 0; lev < theEffectiveFinestLevel+1; ++lev) { + RedefineDummyMF(lev); + } - int finest_lev_particles; - if (lev_max == -1) { - lev_max = theEffectiveFinestLevel; - finest_lev_particles = m_particles.size() - 1; - } else { - finest_lev_particles = lev_max; - } - AMREX_ASSERT(lev_max <= finestLevel()); + int finest_lev_particles; + if (lev_max == -1) { + lev_max = theEffectiveFinestLevel; + finest_lev_particles = m_particles.size() - 1; + } else { + finest_lev_particles = lev_max; + } + AMREX_ASSERT(lev_max <= finestLevel()); - // This will hold the valid particles that go to another process - std::map > not_ours; - - int num_threads = OpenMP::get_max_threads(); - - // these are temporary buffers for each thread - std::map > > tmp_remote; - Vector, Vector > > tmp_local; - Vector, Vector > > > soa_local; - tmp_local.resize(theEffectiveFinestLevel+1); - soa_local.resize(theEffectiveFinestLevel+1); - - // we resize these buffers outside the parallel region - for (int lev = lev_min; lev <= lev_max; lev++) { - for (MFIter mfi(*m_dummy_mf[lev], this->do_tiling ? this->tile_size : IntVect::TheZeroVector()); - mfi.isValid(); ++mfi) { - auto index = std::make_pair(mfi.index(), mfi.LocalTileIndex()); - tmp_local[lev][index].resize(num_threads); - soa_local[lev][index].resize(num_threads); - for (int t = 0; t < num_threads; ++t) { - soa_local[lev][index][t].define(m_num_runtime_real, m_num_runtime_int); - } - } - } - if (local) { - for (int i = 0; i < neighbor_procs.size(); ++i) - tmp_remote[neighbor_procs[i]].resize(num_threads); - } else { - for (int i = 0; i < ParallelContext::NProcsSub(); ++i) - tmp_remote[i].resize(num_threads); - } + // This will hold the valid particles that go to another process + std::map > not_ours; - // first pass: for each tile in parallel, in each thread copies the particles that - // need to be moved into it's own, temporary buffer. - for (int lev = lev_min; lev <= finest_lev_particles; lev++) { - auto& pmap = m_particles[lev]; + int num_threads = OpenMP::get_max_threads(); - Vector > grid_tile_ids; - Vector ptile_ptrs; - for (auto& kv : pmap) - { - grid_tile_ids.push_back(kv.first); - ptile_ptrs.push_back(&(kv.second)); - } + // these are temporary buffers for each thread + std::map > > tmp_remote; + Vector, Vector > > tmp_local; + Vector, Vector > > > soa_local; + tmp_local.resize(theEffectiveFinestLevel+1); + soa_local.resize(theEffectiveFinestLevel+1); + + // we resize these buffers outside the parallel region + for (int lev = lev_min; lev <= lev_max; lev++) { + for (MFIter mfi(*m_dummy_mf[lev], this->do_tiling ? this->tile_size : IntVect::TheZeroVector()); + mfi.isValid(); ++mfi) { + auto index = std::make_pair(mfi.index(), mfi.LocalTileIndex()); + tmp_local[lev][index].resize(num_threads); + soa_local[lev][index].resize(num_threads); + for (int t = 0; t < num_threads; ++t) { + soa_local[lev][index][t].define(m_num_runtime_real, m_num_runtime_int); + } + } + } + if (local) { + for (int i = 0; i < neighbor_procs.size(); ++i) { + tmp_remote[neighbor_procs[i]].resize(num_threads); + } + } else { + for (int i = 0; i < ParallelContext::NProcsSub(); ++i) { + tmp_remote[i].resize(num_threads); + } + } + + // first pass: for each tile in parallel, in each thread copies the particles that + // need to be moved into it's own, temporary buffer. + for (int lev = lev_min; lev <= finest_lev_particles; lev++) { + auto& pmap = m_particles[lev]; + + Vector > grid_tile_ids; + Vector ptile_ptrs; + for (auto& kv : pmap) + { + grid_tile_ids.push_back(kv.first); + ptile_ptrs.push_back(&(kv.second)); + } #ifdef AMREX_USE_OMP #pragma omp parallel for #endif - for (int pmap_it = 0; pmap_it < static_cast(ptile_ptrs.size()); ++pmap_it) - { - int thread_num = OpenMP::get_thread_num(); - int grid = grid_tile_ids[pmap_it].first; - int tile = grid_tile_ids[pmap_it].second; - auto& soa = ptile_ptrs[pmap_it]->GetStructOfArrays(); - auto& aos = ptile_ptrs[pmap_it]->GetArrayOfStructs(); - - // AMREX_ASSERT_WITH_MESSAGE((NumRealComps() == 0 && NumIntComps() == 0) - // || aos.size() == soa.size(), - // "The AoS and SoA data on this tile are different sizes - " - // "perhaps particles have not been initialized correctly?"); - unsigned npart = ptile_ptrs[pmap_it]->numParticles(); - ParticleLocData pld; - - if constexpr(!ParticleType::is_soa_particle){ - - if (npart != 0) { - Long last = npart - 1; - Long pindex = 0; - while (pindex <= last) { - ParticleType& p = aos[pindex]; - - if ((remove_negative == false) && (p.id() < 0)) { - ++pindex; - continue; - } - - if (p.id() < 0) - { - aos[pindex] = aos[last]; - for (int comp = 0; comp < NumRealComps(); comp++) - soa.GetRealData(comp)[pindex] = soa.GetRealData(comp)[last]; - for (int comp = 0; comp < NumIntComps(); comp++) - soa.GetIntData(comp)[pindex] = soa.GetIntData(comp)[last]; - correctCellVectors(last, pindex, grid, aos[pindex]); - --last; - continue; - } - - locateParticle(p, pld, lev_min, lev_max, nGrow, local ? grid : -1); - - particlePostLocate(p, pld, lev); - - if (p.id() < 0) - { - aos[pindex] = aos[last]; - for (int comp = 0; comp < NumRealComps(); comp++) - soa.GetRealData(comp)[pindex] = soa.GetRealData(comp)[last]; - for (int comp = 0; comp < NumIntComps(); comp++) - soa.GetIntData(comp)[pindex] = soa.GetIntData(comp)[last]; - correctCellVectors(last, pindex, grid, aos[pindex]); - --last; - continue; - } - - const int who = ParallelContext::global_to_local_rank(ParticleDistributionMap(pld.m_lev)[pld.m_grid]); - if (who == MyProc) { - if (pld.m_lev != lev || pld.m_grid != grid || pld.m_tile != tile) { - // We own it but must shift it to another place. - auto index = std::make_pair(pld.m_grid, pld.m_tile); - AMREX_ASSERT(tmp_local[pld.m_lev][index].size() == num_threads); - tmp_local[pld.m_lev][index][thread_num].push_back(p); - for (int comp = 0; comp < NumRealComps(); ++comp) { - RealVector& arr = soa_local[pld.m_lev][index][thread_num].GetRealData(comp); - arr.push_back(soa.GetRealData(comp)[pindex]); + for (int pmap_it = 0; pmap_it < static_cast(ptile_ptrs.size()); ++pmap_it) + { + int thread_num = OpenMP::get_thread_num(); + int grid = grid_tile_ids[pmap_it].first; + int tile = grid_tile_ids[pmap_it].second; + auto& soa = ptile_ptrs[pmap_it]->GetStructOfArrays(); + auto& aos = ptile_ptrs[pmap_it]->GetArrayOfStructs(); + + // AMREX_ASSERT_WITH_MESSAGE((NumRealComps() == 0 && NumIntComps() == 0) + // || aos.size() == soa.size(), + // "The AoS and SoA data on this tile are different sizes - " + // "perhaps particles have not been initialized correctly?"); + unsigned npart = ptile_ptrs[pmap_it]->numParticles(); + ParticleLocData pld; + + if constexpr(!ParticleType::is_soa_particle){ + + if (npart != 0) { + Long last = npart - 1; + Long pindex = 0; + while (pindex <= last) { + ParticleType& p = aos[pindex]; + + if ((remove_negative == false) && (p.id() < 0)) { + ++pindex; + continue; + } + + if (p.id() < 0) + { + aos[pindex] = aos[last]; + for (int comp = 0; comp < NumRealComps(); comp++) { + soa.GetRealData(comp)[pindex] = soa.GetRealData(comp)[last]; + } + for (int comp = 0; comp < NumIntComps(); comp++) { + soa.GetIntData(comp)[pindex] = soa.GetIntData(comp)[last]; } - for (int comp = 0; comp < NumIntComps(); ++comp) { - IntVector& arr = soa_local[pld.m_lev][index][thread_num].GetIntData(comp); - arr.push_back(soa.GetIntData(comp)[pindex]); + correctCellVectors(last, pindex, grid, aos[pindex]); + --last; + continue; + } + + locateParticle(p, pld, lev_min, lev_max, nGrow, local ? grid : -1); + + particlePostLocate(p, pld, lev); + + if (p.id() < 0) + { + aos[pindex] = aos[last]; + for (int comp = 0; comp < NumRealComps(); comp++) { + soa.GetRealData(comp)[pindex] = soa.GetRealData(comp)[last]; + } + for (int comp = 0; comp < NumIntComps(); comp++) { + soa.GetIntData(comp)[pindex] = soa.GetIntData(comp)[last]; + } + correctCellVectors(last, pindex, grid, aos[pindex]); + --last; + continue; + } + + const int who = ParallelContext::global_to_local_rank(ParticleDistributionMap(pld.m_lev)[pld.m_grid]); + if (who == MyProc) { + if (pld.m_lev != lev || pld.m_grid != grid || pld.m_tile != tile) { + // We own it but must shift it to another place. + auto index = std::make_pair(pld.m_grid, pld.m_tile); + AMREX_ASSERT(tmp_local[pld.m_lev][index].size() == num_threads); + tmp_local[pld.m_lev][index][thread_num].push_back(p); + for (int comp = 0; comp < NumRealComps(); ++comp) { + RealVector& arr = soa_local[pld.m_lev][index][thread_num].GetRealData(comp); + arr.push_back(soa.GetRealData(comp)[pindex]); + } + for (int comp = 0; comp < NumIntComps(); ++comp) { + IntVector& arr = soa_local[pld.m_lev][index][thread_num].GetIntData(comp); + arr.push_back(soa.GetIntData(comp)[pindex]); } + p.id() = -p.id(); // Invalidate the particle + } + } + else { + auto& particles_to_send = tmp_remote[who][thread_num]; + auto old_size = particles_to_send.size(); + auto new_size = old_size + superparticle_size; + particles_to_send.resize(new_size); + std::memcpy(&particles_to_send[old_size], &p, particle_size); + char* dst = &particles_to_send[old_size] + particle_size; + int array_comp_start = AMREX_SPACEDIM + NStructReal; + for (int comp = 0; comp < NumRealComps(); comp++) { + if (h_redistribute_real_comp[array_comp_start + comp]) { + std::memcpy(dst, &soa.GetRealData(comp)[pindex], sizeof(ParticleReal)); + dst += sizeof(ParticleReal); + } + } + array_comp_start = 2 + NStructInt; + for (int comp = 0; comp < NumIntComps(); comp++) { + if (h_redistribute_int_comp[array_comp_start + comp]) { + std::memcpy(dst, &soa.GetIntData(comp)[pindex], sizeof(int)); + dst += sizeof(int); + } + } + p.id() = -p.id(); // Invalidate the particle - } - } - else { - auto& particles_to_send = tmp_remote[who][thread_num]; - auto old_size = particles_to_send.size(); - auto new_size = old_size + superparticle_size; - particles_to_send.resize(new_size); - std::memcpy(&particles_to_send[old_size], &p, particle_size); - char* dst = &particles_to_send[old_size] + particle_size; - int array_comp_start = AMREX_SPACEDIM + NStructReal; - for (int comp = 0; comp < NumRealComps(); comp++) { - if (h_redistribute_real_comp[array_comp_start + comp]) { - std::memcpy(dst, &soa.GetRealData(comp)[pindex], sizeof(ParticleReal)); - dst += sizeof(ParticleReal); - } - } - array_comp_start = 2 + NStructInt; - for (int comp = 0; comp < NumIntComps(); comp++) { - if (h_redistribute_int_comp[array_comp_start + comp]) { - std::memcpy(dst, &soa.GetIntData(comp)[pindex], sizeof(int)); - dst += sizeof(int); - } - } - - p.id() = -p.id(); // Invalidate the particle - } + } - if (p.id() < 0) - { - aos[pindex] = aos[last]; - for (int comp = 0; comp < NumRealComps(); comp++) - soa.GetRealData(comp)[pindex] = soa.GetRealData(comp)[last]; - for (int comp = 0; comp < NumIntComps(); comp++) - soa.GetIntData(comp)[pindex] = soa.GetIntData(comp)[last]; - correctCellVectors(last, pindex, grid, aos[pindex]); - --last; - continue; + if (p.id() < 0) + { + aos[pindex] = aos[last]; + for (int comp = 0; comp < NumRealComps(); comp++) { + soa.GetRealData(comp)[pindex] = soa.GetRealData(comp)[last]; + } + for (int comp = 0; comp < NumIntComps(); comp++) { + soa.GetIntData(comp)[pindex] = soa.GetIntData(comp)[last]; + } + correctCellVectors(last, pindex, grid, aos[pindex]); + --last; + continue; + } + + ++pindex; } - ++pindex; + aos().erase(aos().begin() + last + 1, aos().begin() + npart); + for (int comp = 0; comp < NumRealComps(); comp++) { + RealVector& rdata = soa.GetRealData(comp); + rdata.erase(rdata.begin() + last + 1, rdata.begin() + npart); + } + for (int comp = 0; comp < NumIntComps(); comp++) { + IntVector& idata = soa.GetIntData(comp); + idata.erase(idata.begin() + last + 1, idata.begin() + npart); + } } - aos().erase(aos().begin() + last + 1, aos().begin() + npart); - for (int comp = 0; comp < NumRealComps(); comp++) { - RealVector& rdata = soa.GetRealData(comp); - rdata.erase(rdata.begin() + last + 1, rdata.begin() + npart); - } - for (int comp = 0; comp < NumIntComps(); comp++) { - IntVector& idata = soa.GetIntData(comp); - idata.erase(idata.begin() + last + 1, idata.begin() + npart); - } - } + } else{ // soa particle - } else{ // soa particle + auto particle_tile = ptile_ptrs[pmap_it]; + if (npart != 0) { + Long last = npart - 1; + Long pindex = 0; + while (pindex <= last) { + auto ptd = particle_tile->getParticleTileData(); + ParticleType p(ptd,pindex); - auto particle_tile = ptile_ptrs[pmap_it]; - if (npart != 0) { - Long last = npart - 1; - Long pindex = 0; - while (pindex <= last) { - auto ptd = particle_tile->getParticleTileData(); - ParticleType p(ptd,pindex); + if ((remove_negative == false) && (p.id() < 0)) { + ++pindex; + continue; + } - if ((remove_negative == false) && (p.id() < 0)) { - ++pindex; - continue; - } + if (p.id() < 0){ + for (int comp = 0; comp < NumRealComps(); comp++) { + soa.GetRealData(comp)[pindex] = soa.GetRealData(comp)[last]; + } + for (int comp = 0; comp < NumIntComps(); comp++) { + soa.GetIntData(comp)[pindex] = soa.GetIntData(comp)[last]; + } + correctCellVectors(last, pindex, grid, aos[pindex]); + --last; + continue; + } - if (p.id() < 0){ - for (int comp = 0; comp < NumRealComps(); comp++) - soa.GetRealData(comp)[pindex] = soa.GetRealData(comp)[last]; - for (int comp = 0; comp < NumIntComps(); comp++) - soa.GetIntData(comp)[pindex] = soa.GetIntData(comp)[last]; - correctCellVectors(last, pindex, grid, aos[pindex]); - --last; - continue; - } + locateParticle(p, pld, lev_min, lev_max, nGrow, local ? grid : -1); - locateParticle(p, pld, lev_min, lev_max, nGrow, local ? grid : -1); + particlePostLocate(p, pld, lev); - particlePostLocate(p, pld, lev); + if (p.id() < 0) { + for (int comp = 0; comp < NumRealComps(); comp++) { + soa.GetRealData(comp)[pindex] = soa.GetRealData(comp)[last]; + } + for (int comp = 0; comp < NumIntComps(); comp++) { + soa.GetIntData(comp)[pindex] = soa.GetIntData(comp)[last]; + } + correctCellVectors(last, pindex, grid, aos[pindex]); + --last; + continue; + } - if (p.id() < 0) { - for (int comp = 0; comp < NumRealComps(); comp++) - soa.GetRealData(comp)[pindex] = soa.GetRealData(comp)[last]; - for (int comp = 0; comp < NumIntComps(); comp++) - soa.GetIntData(comp)[pindex] = soa.GetIntData(comp)[last]; - correctCellVectors(last, pindex, grid, aos[pindex]); - --last; - continue; - } + const int who = ParallelContext::global_to_local_rank(ParticleDistributionMap(pld.m_lev)[pld.m_grid]); + if (who == MyProc) { + if (pld.m_lev != lev || pld.m_grid != grid || pld.m_tile != tile) { + // We own it but must shift it to another place. + auto index = std::make_pair(pld.m_grid, pld.m_tile); + AMREX_ASSERT(soa_local[pld.m_lev][index].size() == num_threads); + for (int comp = 0; comp < NumRealComps(); ++comp) { + RealVector& arr = soa_local[pld.m_lev][index][thread_num].GetRealData(comp); + arr.push_back(soa.GetRealData(comp)[pindex]); + } + for (int comp = 0; comp < NumIntComps(); ++comp) { + IntVector& arr = soa_local[pld.m_lev][index][thread_num].GetIntData(comp); + arr.push_back(soa.GetIntData(comp)[pindex]); + } - const int who = ParallelContext::global_to_local_rank(ParticleDistributionMap(pld.m_lev)[pld.m_grid]); - if (who == MyProc) { - if (pld.m_lev != lev || pld.m_grid != grid || pld.m_tile != tile) { - // We own it but must shift it to another place. - auto index = std::make_pair(pld.m_grid, pld.m_tile); - AMREX_ASSERT(soa_local[pld.m_lev][index].size() == num_threads); - for (int comp = 0; comp < NumRealComps(); ++comp) { - RealVector& arr = soa_local[pld.m_lev][index][thread_num].GetRealData(comp); - arr.push_back(soa.GetRealData(comp)[pindex]); - } - for (int comp = 0; comp < NumIntComps(); ++comp) { - IntVector& arr = soa_local[pld.m_lev][index][thread_num].GetIntData(comp); - arr.push_back(soa.GetIntData(comp)[pindex]); - } - - p.id() = -p.id(); // Invalidate the particle - } - } - else { - auto& particles_to_send = tmp_remote[who][thread_num]; - auto old_size = particles_to_send.size(); - auto new_size = old_size + superparticle_size; - particles_to_send.resize(new_size); - - char* dst = &particles_to_send[old_size]; - int array_comp_start = AMREX_SPACEDIM + NStructReal; - for (int comp = 0; comp < NumRealComps(); comp++) { - if (h_redistribute_real_comp[array_comp_start + comp]) { - std::memcpy(dst, &soa.GetRealData(comp)[pindex], sizeof(ParticleReal)); - dst += sizeof(ParticleReal); - } + p.id() = -p.id(); // Invalidate the particle + } } - array_comp_start = 2 + NStructInt; - for (int comp = 0; comp < NumIntComps(); comp++) { - if (h_redistribute_int_comp[array_comp_start + comp]) { - std::memcpy(dst, &soa.GetIntData(comp)[pindex], sizeof(int)); - dst += sizeof(int); - } + else { + auto& particles_to_send = tmp_remote[who][thread_num]; + auto old_size = particles_to_send.size(); + auto new_size = old_size + superparticle_size; + particles_to_send.resize(new_size); + + char* dst = &particles_to_send[old_size]; + int array_comp_start = AMREX_SPACEDIM + NStructReal; + for (int comp = 0; comp < NumRealComps(); comp++) { + if (h_redistribute_real_comp[array_comp_start + comp]) { + std::memcpy(dst, &soa.GetRealData(comp)[pindex], sizeof(ParticleReal)); + dst += sizeof(ParticleReal); + } + } + array_comp_start = 2 + NStructInt; + for (int comp = 0; comp < NumIntComps(); comp++) { + if (h_redistribute_int_comp[array_comp_start + comp]) { + std::memcpy(dst, &soa.GetIntData(comp)[pindex], sizeof(int)); + dst += sizeof(int); + } + } + p.id() = -p.id(); // Invalidate the particle } - p.id() = -p.id(); // Invalidate the particle - } - if (p.id() < 0){ - for (int comp = 0; comp < NumRealComps(); comp++) - soa.GetRealData(comp)[pindex] = soa.GetRealData(comp)[last]; - for (int comp = 0; comp < NumIntComps(); comp++) - soa.GetIntData(comp)[pindex] = soa.GetIntData(comp)[last]; - correctCellVectors(last, pindex, grid, aos[pindex]); - --last; - continue; + if (p.id() < 0){ + for (int comp = 0; comp < NumRealComps(); comp++) { + soa.GetRealData(comp)[pindex] = soa.GetRealData(comp)[last]; + } + for (int comp = 0; comp < NumIntComps(); comp++) { + soa.GetIntData(comp)[pindex] = soa.GetIntData(comp)[last]; + } + correctCellVectors(last, pindex, grid, aos[pindex]); + --last; + continue; + } + + ++pindex; } - ++pindex; + for (int comp = 0; comp < NumRealComps(); comp++) { + RealVector& rdata = soa.GetRealData(comp); + rdata.erase(rdata.begin() + last + 1, rdata.begin() + npart); + } + for (int comp = 0; comp < NumIntComps(); comp++) { + IntVector& idata = soa.GetIntData(comp); + idata.erase(idata.begin() + last + 1, idata.begin() + npart); + } } + } + } + } - for (int comp = 0; comp < NumRealComps(); comp++) { - RealVector& rdata = soa.GetRealData(comp); - rdata.erase(rdata.begin() + last + 1, rdata.begin() + npart); - } - for (int comp = 0; comp < NumIntComps(); comp++) { - IntVector& idata = soa.GetIntData(comp); - idata.erase(idata.begin() + last + 1, idata.begin() + npart); - } - } - } - } - } - - for (int lev = lev_min; lev <= lev_max; lev++) { - particle_detail::clearEmptyEntries(m_particles[lev]); - } + for (int lev = lev_min; lev <= lev_max; lev++) { + particle_detail::clearEmptyEntries(m_particles[lev]); + } - // Second pass - for each tile in parallel, collect the particles we are owed from all thread's buffers. - for (int lev = lev_min; lev <= lev_max; lev++) { - typename std::map, Vector >::iterator pmap_it; + // Second pass - for each tile in parallel, collect the particles we are owed from all thread's buffers. + for (int lev = lev_min; lev <= lev_max; lev++) { + typename std::map, Vector >::iterator pmap_it; - if constexpr(!ParticleType::is_soa_particle) { - Vector > grid_tile_ids; - Vector* > pvec_ptrs; + if constexpr(!ParticleType::is_soa_particle) { + Vector > grid_tile_ids; + Vector* > pvec_ptrs; - // we need to create any missing map entries in serial here - for (pmap_it=tmp_local[lev].begin(); pmap_it != tmp_local[lev].end(); pmap_it++) - { - m_particles[lev][pmap_it->first]; - grid_tile_ids.push_back(pmap_it->first); - pvec_ptrs.push_back(&(pmap_it->second)); - } + // we need to create any missing map entries in serial here + for (pmap_it=tmp_local[lev].begin(); pmap_it != tmp_local[lev].end(); pmap_it++) + { + m_particles[lev][pmap_it->first]; + grid_tile_ids.push_back(pmap_it->first); + pvec_ptrs.push_back(&(pmap_it->second)); + } #ifdef AMREX_USE_OMP #pragma omp parallel for #endif - for (int pit = 0; pit < static_cast(pvec_ptrs.size()); ++pit) - { - auto index = grid_tile_ids[pit]; - auto& ptile = DefineAndReturnParticleTile(lev, index.first, index.second); - auto& aos = ptile.GetArrayOfStructs(); - auto& soa = ptile.GetStructOfArrays(); - auto& aos_tmp = *(pvec_ptrs[pit]); - auto& soa_tmp = soa_local[lev][index]; - for (int i = 0; i < num_threads; ++i) { - aos.insert(aos.end(), aos_tmp[i].begin(), aos_tmp[i].end()); - aos_tmp[i].erase(aos_tmp[i].begin(), aos_tmp[i].end()); - for (int comp = 0; comp < NumRealComps(); ++comp) { - RealVector& arr = soa.GetRealData(comp); - RealVector& tmp = soa_tmp[i].GetRealData(comp); - arr.insert(arr.end(), tmp.begin(), tmp.end()); - tmp.erase(tmp.begin(), tmp.end()); - } - for (int comp = 0; comp < NumIntComps(); ++comp) { - IntVector& arr = soa.GetIntData(comp); - IntVector& tmp = soa_tmp[i].GetIntData(comp); - arr.insert(arr.end(), tmp.begin(), tmp.end()); - tmp.erase(tmp.begin(), tmp.end()); - } - } - } - } else { // soa particle - Vector > grid_tile_ids; - - // we need to create any missing map entries in serial here - for (auto soa_map_it=soa_local[lev].begin(); soa_map_it != soa_local[lev].end(); soa_map_it++) - { - m_particles[lev][soa_map_it->first]; - grid_tile_ids.push_back(soa_map_it->first); - } + for (int pit = 0; pit < static_cast(pvec_ptrs.size()); ++pit) + { + auto index = grid_tile_ids[pit]; + auto& ptile = DefineAndReturnParticleTile(lev, index.first, index.second); + auto& aos = ptile.GetArrayOfStructs(); + auto& soa = ptile.GetStructOfArrays(); + auto& aos_tmp = *(pvec_ptrs[pit]); + auto& soa_tmp = soa_local[lev][index]; + for (int i = 0; i < num_threads; ++i) { + aos.insert(aos.end(), aos_tmp[i].begin(), aos_tmp[i].end()); + aos_tmp[i].erase(aos_tmp[i].begin(), aos_tmp[i].end()); + for (int comp = 0; comp < NumRealComps(); ++comp) { + RealVector& arr = soa.GetRealData(comp); + RealVector& tmp = soa_tmp[i].GetRealData(comp); + arr.insert(arr.end(), tmp.begin(), tmp.end()); + tmp.erase(tmp.begin(), tmp.end()); + } + for (int comp = 0; comp < NumIntComps(); ++comp) { + IntVector& arr = soa.GetIntData(comp); + IntVector& tmp = soa_tmp[i].GetIntData(comp); + arr.insert(arr.end(), tmp.begin(), tmp.end()); + tmp.erase(tmp.begin(), tmp.end()); + } + } + } + } else { // soa particle + Vector > grid_tile_ids; + + // we need to create any missing map entries in serial here + for (auto soa_map_it=soa_local[lev].begin(); soa_map_it != soa_local[lev].end(); soa_map_it++) + { + m_particles[lev][soa_map_it->first]; + grid_tile_ids.push_back(soa_map_it->first); + } #ifdef AMREX_USE_OMP #pragma omp parallel for #endif - for (int pit = 0; pit < static_cast(grid_tile_ids.size()); ++pit) - { - auto index = grid_tile_ids[pit]; - auto& ptile = DefineAndReturnParticleTile(lev, index.first, index.second); - auto& soa = ptile.GetStructOfArrays(); - auto& soa_tmp = soa_local[lev][index]; - for (int i = 0; i < num_threads; ++i) { - for (int comp = 0; comp < NumRealComps(); ++comp) { - RealVector& arr = soa.GetRealData(comp); - RealVector& tmp = soa_tmp[i].GetRealData(comp); - arr.insert(arr.end(), tmp.begin(), tmp.end()); - tmp.erase(tmp.begin(), tmp.end()); - } - for (int comp = 0; comp < NumIntComps(); ++comp) { - IntVector& arr = soa.GetIntData(comp); - IntVector& tmp = soa_tmp[i].GetIntData(comp); - arr.insert(arr.end(), tmp.begin(), tmp.end()); - tmp.erase(tmp.begin(), tmp.end()); - } - } - } - } - } + for (int pit = 0; pit < static_cast(grid_tile_ids.size()); ++pit) + { + auto index = grid_tile_ids[pit]; + auto& ptile = DefineAndReturnParticleTile(lev, index.first, index.second); + auto& soa = ptile.GetStructOfArrays(); + auto& soa_tmp = soa_local[lev][index]; + for (int i = 0; i < num_threads; ++i) { + for (int comp = 0; comp < NumRealComps(); ++comp) { + RealVector& arr = soa.GetRealData(comp); + RealVector& tmp = soa_tmp[i].GetRealData(comp); + arr.insert(arr.end(), tmp.begin(), tmp.end()); + tmp.erase(tmp.begin(), tmp.end()); + } + for (int comp = 0; comp < NumIntComps(); ++comp) { + IntVector& arr = soa.GetIntData(comp); + IntVector& tmp = soa_tmp[i].GetIntData(comp); + arr.insert(arr.end(), tmp.begin(), tmp.end()); + tmp.erase(tmp.begin(), tmp.end()); + } + } + } + } + } - for (auto& map_it : tmp_remote) { - int who = map_it.first; - not_ours[who]; - } + for (auto& map_it : tmp_remote) { + int who = map_it.first; + not_ours[who]; + } - Vector dest_proc_ids; - Vector >* > pbuff_ptrs; - for (auto& kv : tmp_remote) - { - dest_proc_ids.push_back(kv.first); - pbuff_ptrs.push_back(&(kv.second)); - } + Vector dest_proc_ids; + Vector >* > pbuff_ptrs; + for (auto& kv : tmp_remote) + { + dest_proc_ids.push_back(kv.first); + pbuff_ptrs.push_back(&(kv.second)); + } #ifdef AMREX_USE_OMP #pragma omp parallel for #endif - for (int pmap_it = 0; pmap_it < static_cast(pbuff_ptrs.size()); ++pmap_it) - { - int who = dest_proc_ids[pmap_it]; - Vector >& tmp = *(pbuff_ptrs[pmap_it]); - for (int i = 0; i < num_threads; ++i) { - not_ours[who].insert(not_ours[who].end(), tmp[i].begin(), tmp[i].end()); - tmp[i].erase(tmp[i].begin(), tmp[i].end()); - } - } + for (int pmap_it = 0; pmap_it < static_cast(pbuff_ptrs.size()); ++pmap_it) + { + int who = dest_proc_ids[pmap_it]; + Vector >& tmp = *(pbuff_ptrs[pmap_it]); + for (int i = 0; i < num_threads; ++i) { + not_ours[who].insert(not_ours[who].end(), tmp[i].begin(), tmp[i].end()); + tmp[i].erase(tmp[i].begin(), tmp[i].end()); + } + } - particle_detail::clearEmptyEntries(not_ours); + particle_detail::clearEmptyEntries(not_ours); - if (int(m_particles.size()) > theEffectiveFinestLevel+1) { - // Looks like we lost an AmrLevel on a regrid. - if (m_verbose > 0) { - amrex::Print() << "ParticleContainer::Redistribute() resizing m_particles from " - << m_particles.size() << " to " << theEffectiveFinestLevel+1 << '\n'; - } - AMREX_ASSERT(int(m_particles.size()) >= 2); + if (int(m_particles.size()) > theEffectiveFinestLevel+1) { + // Looks like we lost an AmrLevel on a regrid. + if (m_verbose > 0) { + amrex::Print() << "ParticleContainer::Redistribute() resizing m_particles from " + << m_particles.size() << " to " << theEffectiveFinestLevel+1 << '\n'; + } + AMREX_ASSERT(int(m_particles.size()) >= 2); - m_particles.resize(theEffectiveFinestLevel + 1); - m_dummy_mf.resize(theEffectiveFinestLevel + 1); - } + m_particles.resize(theEffectiveFinestLevel + 1); + m_dummy_mf.resize(theEffectiveFinestLevel + 1); + } - if (ParallelContext::NProcsSub() == 1) { - AMREX_ASSERT(not_ours.empty()); - } - else { - RedistributeMPI(not_ours, lev_min, lev_max, nGrow, local); - } + if (ParallelContext::NProcsSub() == 1) { + AMREX_ASSERT(not_ours.empty()); + } + else { + RedistributeMPI(not_ours, lev_min, lev_max, nGrow, local); + } - AMREX_ASSERT(OK(lev_min, lev_max, nGrow)); + AMREX_ASSERT(OK(lev_min, lev_max, nGrow)); - if (m_verbose > 0) { - auto stoptime = amrex::second() - strttime; + if (m_verbose > 0) { + auto stoptime = amrex::second() - strttime; - ByteSpread(); + ByteSpread(); #ifdef AMREX_LAZY - Lazy::QueueReduction( [=] () mutable { + Lazy::QueueReduction( [=] () mutable { #endif - ParallelReduce::Max(stoptime, ParallelContext::IOProcessorNumberSub(), - ParallelContext::CommunicatorSub()); + ParallelReduce::Max(stoptime, ParallelContext::IOProcessorNumberSub(), + ParallelContext::CommunicatorSub()); - amrex::Print() << "ParticleContainer::Redistribute() time: " << stoptime << "\n\n"; + amrex::Print() << "ParticleContainer::Redistribute() time: " << stoptime << "\n\n"; #ifdef AMREX_LAZY - }); + }); #endif - } + } } template >& not_ours, const int SeqNum = ParallelDescriptor::SeqNum(); - if ((! local) && NumSnds == 0) + if ((! local) && NumSnds == 0) { return; // There's no parallel work to do. + } if (local) { @@ -2227,8 +2246,9 @@ ParticleContainer_impl::OK (int { BL_PROFILE("ParticleContainer::OK()"); - if (lev_max == -1) + if (lev_max == -1) { lev_max = finestLevel(); +} return (numParticlesOutOfRange(*this, lev_min, lev_max, nGrow) == 0); } @@ -2291,7 +2311,7 @@ AssignCellDensitySingleLevel (int rho_index, { BL_PROFILE("ParticleContainer::AssignCellDensitySingleLevel()"); - if (rho_index != 0) amrex::Abort("AssignCellDensitySingleLevel only works if rho_index = 0"); + if (rho_index != 0) { amrex::Abort("AssignCellDensitySingleLevel only works if rho_index = 0"); } MultiFab* mf_pointer; @@ -2312,8 +2332,9 @@ AssignCellDensitySingleLevel (int rho_index, // its effect to an adjacent grid by first putting the value into ghost cells of its // own grid. The mf->SumBoundary call then adds the value from one grid's ghost cell // to another grid's valid region. - if (mf_pointer->nGrow() < 1) + if (mf_pointer->nGrow() < 1) { amrex::Error("Must have at least one ghost cell when in AssignCellDensitySingleLevel"); + } const auto strttime = amrex::second(); @@ -2438,8 +2459,9 @@ InterpolateSingleLevel (MultiFab& mesh_data, int lev) { BL_PROFILE("ParticleContainer::InterpolateSingleLevel()"); - if (mesh_data.nGrow() < 1) + if (mesh_data.nGrow() < 1) { amrex::Error("Must have at least one ghost cell when in InterpolateSingleLevel"); + } const Geometry& gm = Geom(lev); const auto plo = gm.ProbLoArray(); diff --git a/Src/Particle/AMReX_ParticleIO.H b/Src/Particle/AMReX_ParticleIO.H index 4417385d39f..d23fde9d1dd 100644 --- a/Src/Particle/AMReX_ParticleIO.H +++ b/Src/Particle/AMReX_ParticleIO.H @@ -129,10 +129,14 @@ ParticleContainer_impl AMREX_ASSERT( int_comp_names.size() == NStructInt + NumIntComps() ); Vector write_real_comp; - for (int i = 0; i < NStructReal + NumRealComps(); ++i) write_real_comp.push_back(1); + for (int i = 0; i < NStructReal + NumRealComps(); ++i) { + write_real_comp.push_back(1); + } Vector write_int_comp; - for (int i = 0; i < NStructInt + NumIntComps(); ++i) write_int_comp.push_back(1); + for (int i = 0; i < NStructInt + NumIntComps(); ++i) { + write_int_comp.push_back(1); + } WriteBinaryParticleData(dir, name, write_real_comp, write_int_comp, @@ -153,10 +157,14 @@ ParticleContainer_impl AMREX_ASSERT(real_comp_names.size() == NStructReal + NumRealComps()); Vector write_real_comp; - for (int i = 0; i < NStructReal + NumRealComps(); ++i) write_real_comp.push_back(1); + for (int i = 0; i < NStructReal + NumRealComps(); ++i) { + write_real_comp.push_back(1); + } Vector write_int_comp; - for (int i = 0; i < NStructInt + NumIntComps(); ++i) write_int_comp.push_back(1); + for (int i = 0; i < NStructInt + NumIntComps(); ++i) { + write_int_comp.push_back(1); + } Vector int_comp_names; for (int i = 0; i < NStructInt + NumIntComps(); ++i ) @@ -277,10 +285,14 @@ ParticleContainer_impl AMREX_ASSERT( int_comp_names.size() == NStructInt + NArrayInt ); Vector write_real_comp; - for (int i = 0; i < NStructReal + NumRealComps(); ++i) write_real_comp.push_back(1); + for (int i = 0; i < NStructReal + NumRealComps(); ++i) { + write_real_comp.push_back(1); + } Vector write_int_comp; - for (int i = 0; i < NStructInt + NumIntComps(); ++i) write_int_comp.push_back(1); + for (int i = 0; i < NStructInt + NumIntComps(); ++i) { + write_int_comp.push_back(1); + } WriteBinaryParticleData(dir, name, write_real_comp, write_int_comp, @@ -299,10 +311,14 @@ ParticleContainer_impl AMREX_ASSERT(real_comp_names.size() == NStructReal + NumRealComps()); Vector write_real_comp; - for (int i = 0; i < NStructReal + NumRealComps(); ++i) write_real_comp.push_back(1); + for (int i = 0; i < NStructReal + NumRealComps(); ++i) { + write_real_comp.push_back(1); + } Vector write_int_comp; - for (int i = 0; i < NStructInt + NumIntComps(); ++i) write_int_comp.push_back(1); + for (int i = 0; i < NStructInt + NumIntComps(); ++i) { + write_int_comp.push_back(1); + } Vector int_comp_names; for (int i = 0; i < NStructInt + NumIntComps(); ++i ) @@ -568,7 +584,7 @@ ParticleContainer_impl which[grid] = fnum; where[grid] = VisMF::FileOffset(ofs); - if (count[grid] == 0) continue; + if (count[grid] == 0) { continue; } Vector istuff; Vector rstuff; @@ -611,12 +627,14 @@ ParticleContainer_impl pp.queryAdd("datadigits_read",DATA_Digits_Read); std::string fullname = dir; - if (!fullname.empty() && fullname[fullname.size()-1] != '/') + if (!fullname.empty() && fullname[fullname.size()-1] != '/') { fullname += '/'; + } fullname += file; std::string HdrFileName = fullname; - if (!HdrFileName.empty() && HdrFileName[HdrFileName.size()-1] != '/') + if (!HdrFileName.empty() && HdrFileName[HdrFileName.size()-1] != '/') { HdrFileName += '/'; + } HdrFileName += "Header"; Vector fileCharPtr; @@ -666,25 +684,30 @@ ParticleContainer_impl int dm; HdrFile >> dm; - if (dm != AMREX_SPACEDIM) + if (dm != AMREX_SPACEDIM) { amrex::Abort("ParticleContainer::Restart(): dm != AMREX_SPACEDIM"); + } int nr; HdrFile >> nr; - if (nr != NStructReal + NumRealComps()) + if (nr != NStructReal + NumRealComps()) { amrex::Abort("ParticleContainer::Restart(): nr != NStructReal + NumRealComps()"); + } std::string comp_name; - for (int i = 0; i < nr; ++i) + for (int i = 0; i < nr; ++i) { HdrFile >> comp_name; + } int ni; HdrFile >> ni; - if (ni != NStructInt + NumIntComps()) + if (ni != NStructInt + NumIntComps()) { amrex::Abort("ParticleContainer::Restart(): ni != NStructInt"); + } - for (int i = 0; i < ni; ++i) + for (int i = 0; i < ni; ++i) { HdrFile >> comp_name; + } bool checkpoint; HdrFile >> checkpoint; @@ -733,7 +756,7 @@ ParticleContainer_impl phdr_name = amrex::Concatenate(phdr_name, lev, 1); phdr_name += "/Particle_H"; - if (! amrex::FileExists(phdr_name)) continue; + if (! amrex::FileExists(phdr_name)) { continue; } Vector phdr_chars; ParallelDescriptor::ReadAndBcastFile(phdr_name, phdr_chars); @@ -747,7 +770,7 @@ ParticleContainer_impl } particle_box_arrays[lev].readFrom(phdr_file); - if (! particle_box_arrays[lev].CellEqual(ParticleBoxArray(lev))) dual_grid = true; + if (! particle_box_arrays[lev].CellEqual(ParticleBoxArray(lev))) { dual_grid = true; } } } else // if no particle box array information exists in the file, we assume a single grid restart { @@ -798,7 +821,7 @@ ParticleContainer_impl const int rank = ParallelDescriptor::MyProc(); const int NReaders = MaxReaders(); - if (rank >= NReaders) return; + if (rank >= NReaders) { return; } const int Navg = ngrids[lev] / NReaders; const int Nleft = ngrids[lev] - Navg * NReaders; @@ -821,13 +844,14 @@ ParticleContainer_impl for(int igrid = 0; igrid < static_cast(grids_to_read.size()); ++igrid) { const int grid = grids_to_read[igrid]; - if (count[grid] <= 0) continue; + if (count[grid] <= 0) { continue; } // The file names in the header file are relative. std::string name = fullname; - if (!name.empty() && name[name.size()-1] != '/') + if (!name.empty() && name[name.size()-1] != '/') { name += '/'; + } name += "Level_"; name += amrex::Concatenate("", lev, 1); @@ -839,8 +863,9 @@ ParticleContainer_impl ParticleFile.open(name.c_str(), std::ios::in | std::ios::binary); - if (!ParticleFile.good()) + if (!ParticleFile.good()) { amrex::FileOpenFailed(name); + } ParticleFile.seekg(where[grid], std::ios::beg); @@ -869,8 +894,9 @@ ParticleContainer_impl ParticleFile.close(); - if (!ParticleFile.good()) + if (!ParticleFile.good()) { amrex::Abort("ParticleContainer::Restart(): problem reading particles"); + } } } @@ -1090,11 +1116,12 @@ ParticleContainer_impl Gpu::copy(Gpu::deviceToHost, aos.begin(), aos.begin() + np, host_aos.begin()); for (int k = 0; k < np; ++k) { const ParticleType& p = host_aos[k]; - if (p.id() > 0) + if (p.id() > 0) { // // Only count (and checkpoint) valid particles. // nparticles++; + } } } } @@ -1113,8 +1140,9 @@ ParticleContainer_impl File.open(filename.c_str(), std::ios::out|std::ios::trunc); - if (!File.good()) + if (!File.good()) { amrex::FileOpenFailed(filename); + } File << nparticles << '\n'; File << NStructReal << '\n'; @@ -1126,8 +1154,9 @@ ParticleContainer_impl File.close(); - if (!File.good()) + if (!File.good()) { amrex::Abort("ParticleContainer::WriteAsciiFile(): problem writing file"); + } } ParallelDescriptor::Barrier(); @@ -1151,8 +1180,9 @@ ParticleContainer_impl File.precision(15); - if (!File.good()) + if (!File.good()) { amrex::FileOpenFailed(filename); + } for (int lev = 0; lev < m_particles.size(); lev++) { auto& pmap = m_particles[lev]; @@ -1175,21 +1205,25 @@ ParticleContainer_impl << it->pos(1) << ' ', << it->pos(2) << ' '); - for (int i = 0; i < NStructReal; i++) + for (int i = 0; i < NStructReal; i++) { File << it->rdata(i) << ' '; + } File << it->id() << ' '; File << it->cpu() << ' '; - for (int i = 0; i < NStructInt; i++) + for (int i = 0; i < NStructInt; i++) { File << it->idata(i) << ' '; + } // then the particle attributes. - for (int i = 0; i < NumRealComps(); i++) + for (int i = 0; i < NumRealComps(); i++) { File << host_soa.GetRealData(i)[index] << ' '; + } - for (int i = 0; i < NumIntComps(); i++) + for (int i = 0; i < NumIntComps(); i++) { File << host_soa.GetIntData(i)[index] << ' '; + } File << '\n'; } @@ -1201,8 +1235,9 @@ ParticleContainer_impl File.close(); - if (!File.good()) + if (!File.good()) { amrex::Abort("ParticleContainer::WriteAsciiFile(): problem writing file"); + } } diff --git a/Src/Particle/AMReX_ParticleInit.H b/Src/Particle/AMReX_ParticleInit.H index 04e8b75fbc3..83f63b04b85 100644 --- a/Src/Particle/AMReX_ParticleInit.H +++ b/Src/Particle/AMReX_ParticleInit.H @@ -75,15 +75,16 @@ ParticleContainer_impl IntVect lNrep(AMREX_D_DECL(1,1,1)); - if (Nrep != nullptr) + if (Nrep != nullptr) { lNrep = *Nrep; + } Long how_many = 0; Long how_many_read = 0; Gpu::HostVector nparticles; Vector > nreals(0); - if (extradata > NStructReal) nreals.resize(extradata - NStructReal); + if (extradata > NStructReal) { nreals.resize(extradata - NStructReal); } if (MyProc < NReaders) { @@ -108,7 +109,7 @@ ParticleContainer_impl ParticleType p, p_rep; Vector r; - if (extradata > NStructReal) r.resize(extradata - NStructReal); + if (extradata > NStructReal) { r.resize(extradata - NStructReal); } const int Chunk = cnt / NReaders; @@ -183,11 +184,12 @@ ParticleContainer_impl p.cpu() = MyProc; nparticles.push_back(p); - if(nreals.size() > extradata - NStructReal) + if(nreals.size() > extradata - NStructReal) { for (int n = NStructReal; n < extradata; n++) { nreals[n-NStructReal].push_back(r[n-NStructReal]); } + } how_many++; how_many_read++; @@ -242,11 +244,12 @@ ParticleContainer_impl nparticles.push_back(p_rep); - if (nreals.size() > extradata - NStructReal) + if (nreals.size() > extradata - NStructReal) { for (int n = NStructReal; n < extradata; n++) { nreals[n-NStructReal].push_back(r[n-NStructReal]); } + } how_many++; } @@ -333,13 +336,14 @@ ParticleContainer_impl Gpu::copyAsync(Gpu::hostToDevice, src_tile.begin(), src_tile.end(), dst_tile.GetArrayOfStructs().begin() + old_size); - if((host_real_attribs[lev][std::make_pair(grid, tile)]).size() > (long unsigned int) NArrayReal) - for (int i = 0; i < NArrayReal; ++i) { - Gpu::copyAsync(Gpu::hostToDevice, - host_real_attribs[lev][std::make_pair(grid,tile)][i].begin(), - host_real_attribs[lev][std::make_pair(grid,tile)][i].end(), - dst_tile.GetStructOfArrays().GetRealData(i).begin() + old_size); - } + if((host_real_attribs[lev][std::make_pair(grid, tile)]).size() > (long unsigned int) NArrayReal) { + for (int i = 0; i < NArrayReal; ++i) { + Gpu::copyAsync(Gpu::hostToDevice, + host_real_attribs[lev][std::make_pair(grid,tile)][i].begin(), + host_real_attribs[lev][std::make_pair(grid,tile)][i].end(), + dst_tile.GetStructOfArrays().GetRealData(i).begin() + old_size); + } + } } } Gpu::streamSynchronize(); @@ -373,20 +377,22 @@ ParticleContainer_impl Where(p, pld); host_particles[pld.m_lev][std::make_pair(pld.m_grid, pld.m_tile)].push_back(p); - if((host_real_attribs[pld.m_lev][std::make_pair(pld.m_grid, pld.m_tile)]).size() > (long unsigned int) (extradata - NStructReal)) + if((host_real_attribs[pld.m_lev][std::make_pair(pld.m_grid, pld.m_tile)]).size() > (long unsigned int) (extradata - NStructReal)) { for (int n = NStructReal; n < extradata; n++) { Real rdata = nreals[n-NStructReal].back(); host_real_attribs[pld.m_lev][std::make_pair(pld.m_grid, pld.m_tile)][n-NStructReal].push_back(rdata); } + } nparticles.pop_back(); - if (nreals.size() > extradata - NStructReal) + if (nreals.size() > extradata - NStructReal) { for (int n = NStructReal; n < extradata; n++) { nreals[n-NStructReal].pop_back(); } + } } } @@ -539,8 +545,9 @@ InitFromBinaryFile (const std::string& file, // // Just set'm. // - for (int i = 0; i < NProcs; i++) + for (int i = 0; i < NProcs; i++) { rprocs[i] = i; + } } else { @@ -604,8 +611,9 @@ InitFromBinaryFile (const std::string& file, ifs.open(file.c_str(), std::ios::in|std::ios::binary); - if (!ifs.good()) + if (!ifs.good()) { amrex::FileOpenFailed(file); + } ifs.read((char*)&NP, sizeof(NP)); ifs.read((char*)&DM, sizeof(DM)); @@ -613,23 +621,27 @@ InitFromBinaryFile (const std::string& file, // // NP MUST be positive! // - if (NP <= 0) + if (NP <= 0) { amrex::Abort("ParticleContainer_impl::InitFromBinaryFile(): NP <= 0"); + } // // DM must equal AMREX_SPACEDIM. // - if (DM != AMREX_SPACEDIM) + if (DM != AMREX_SPACEDIM) { amrex::Abort("ParticleContainer_impl::InitFromBinaryFile(): DM != AMREX_SPACEDIM"); + } // // NX MUST be in [0,N]. // - if (NX < 0 || NX > NStructReal) + if (NX < 0 || NX > NStructReal) { amrex::Abort("ParticleContainer_impl::InitFromBinaryFile(): NX < 0 || NX > N"); + } // // Can't ask for more data than exists in the file! // - if (extradata > NX) + if (extradata > NX) { amrex::Abort("ParticleContainer_impl::InitFromBinaryFile(): extradata > NX"); + } // // Figure out whether we're dealing with floats or doubles. // @@ -656,9 +668,11 @@ InitFromBinaryFile (const std::string& file, // Skip to our place in the file. // int id = 0; - for ( ; id < NReaders; id++) - if (rprocs[id] == MyProc) + for ( ; id < NReaders; id++) { + if (rprocs[id] == MyProc) { break; + } + } AMREX_ASSERT(id >= 0 && id < NReaders); @@ -686,15 +700,16 @@ InitFromBinaryFile (const std::string& file, // Long MyCnt = NP / NReaders; - if (MyProc == rprocs[0]) + if (MyProc == rprocs[0]) { // // Give any remainder to the first reader. // MyCnt += NP % NReaders; + } Long how_many_redists = NP / (NPartPerRedist*NReaders), how_many_read = 0; - if (NP % (NPartPerRedist*NReaders)) how_many_redists++; + if (NP % (NPartPerRedist*NReaders)) { how_many_redists++; } Vector fxtra, fignore; Vector dxtra, dignore; @@ -763,17 +778,19 @@ InitFromBinaryFile (const std::string& file, { if (RealSizeInFile == sizeof(float)) { - ifs.read((char*)&fxtra[0], std::streamsize(extradata*sizeof(float))); + ifs.read((char*)fxtra.data(), std::streamsize(extradata*sizeof(float))); - for (int ii = 0; ii < extradata; ii++) + for (int ii = 0; ii < extradata; ii++) { p.rdata(ii) = static_cast(fxtra[ii]); +} } else if (RealSizeInFile == sizeof(double)) { - ifs.read((char*)&dxtra[0], std::streamsize(extradata*sizeof(double))); + ifs.read((char*)dxtra.data(), std::streamsize(extradata*sizeof(double))); - for (int ii = 0; ii < extradata; ii++) + for (int ii = 0; ii < extradata; ii++) { p.rdata(ii) = static_cast(dxtra[ii]); +} } } // @@ -783,11 +800,11 @@ InitFromBinaryFile (const std::string& file, { if (RealSizeInFile == sizeof(float)) { - ifs.read((char*)&fignore[0], std::streamsize((NX-extradata)*sizeof(float))); + ifs.read((char*)fignore.data(), std::streamsize((NX-extradata)*sizeof(float))); } else if (RealSizeInFile == sizeof(double)) { - ifs.read((char*)&dignore[0], std::streamsize((NX-extradata)*sizeof(double))); + ifs.read((char*)dignore.data(), std::streamsize((NX-extradata)*sizeof(double))); } } @@ -923,10 +940,11 @@ InitFromBinaryMetaFile (const std::string& metafile, { std::getline(ifs,file); - if (!ifs.good()) break; + if (!ifs.good()) { break; } - if (m_verbose > 1) + if (m_verbose > 1) { amrex::Print() << "InitFromBinaryMetaFile: processing file: " << file << '\n'; + } InitFromBinaryFile(file, extradata); } @@ -971,7 +989,7 @@ InitRandom (Long icount, // We will enforce that the particles are within the containing_bx. // If containing_bx is not passed in, it defaults to the full domain. - if (!containing_bx.ok()) containing_bx = geom.ProbDomain(); + if (!containing_bx.ok()) { containing_bx = geom.ProbDomain(); } // containing_bx is assumed to lie within the domain. if (!geom.ProbDomain().contains(containing_bx)) @@ -1366,8 +1384,9 @@ ParticleContainer_impl p.pos(1) = static_cast(grid_box.lo(1) + (dist(mt) + double(jcnt)) / double(icount_per_box) * grid_box.length(1)); p.pos(2) = static_cast(grid_box.lo(2) + (dist(mt) + double(kcnt)) / double(icount_per_box) * grid_box.length(2)); - for (int i = 0; i < AMREX_SPACEDIM; i++) + for (int i = 0; i < AMREX_SPACEDIM; i++) { AMREX_ASSERT(p.pos(i) < grid_box.hi(i)); + } // the real struct data for (int i = 0; i < NStructReal; i++) { @@ -1518,8 +1537,9 @@ InitNRandomPerCell (int n_per_cell, const ParticleInitData& pdata) resizeData(); - for (int lev = 0; lev < m_particles.size(); lev++) + for (int lev = 0; lev < m_particles.size(); lev++) { AMREX_ASSERT(m_particles[lev].empty()); + } ParticleLocData pld; ParticleType p; @@ -1555,7 +1575,7 @@ InitNRandomPerCell (int n_per_cell, const ParticleInitData& pdata) while (iter < max_iter) { r = amrex::Random(); p.pos(i) = static_cast(grid_box.lo(i) + (r + Real(cell[i]-beg[i]))*dx[i]); - if (p.pos(i) < grid_box.hi(i)) break; + if (p.pos(i) < grid_box.hi(i)) { break; } iter++; } AMREX_ASSERT(p.pos(i) < grid_box.hi(i)); diff --git a/Src/Particle/AMReX_ParticleLocator.H b/Src/Particle/AMReX_ParticleLocator.H index 35257049b9d..f9eac5d6ee4 100644 --- a/Src/Particle/AMReX_ParticleLocator.H +++ b/Src/Particle/AMReX_ParticleLocator.H @@ -112,7 +112,7 @@ public: m_geom = geom; int num_boxes = static_cast(ba.size()); m_host_boxes.resize(0); - for (int i = 0; i < num_boxes; ++i) m_host_boxes.push_back(ba[i]); + for (int i = 0; i < num_boxes; ++i) { m_host_boxes.push_back(ba[i]); } m_device_boxes.resize(num_boxes); Gpu::copyAsync(Gpu::hostToDevice, m_host_boxes.begin(), m_host_boxes.end(), m_device_boxes.begin()); @@ -178,7 +178,7 @@ public: bool isValid (const BoxArray& ba) const noexcept { - if (m_defined) return BoxArray::SameRefs(m_ba, ba); + if (m_defined) { return BoxArray::SameRefs(m_ba, ba); } return false; } @@ -220,13 +220,13 @@ struct AmrAssignGrid for (int lev = lev_max; lev >= lev_min; --lev) { int grid = m_funcs[lev](p); - if (grid >= 0) return makeTuple(grid, lev); + if (grid >= 0) { return makeTuple(grid, lev); } } for (int lev = lev_min; lev >= lev_min; --lev) { int grid = m_funcs[lev](p, nGrow); - if (grid >= 0) return makeTuple(grid, lev); + if (grid >= 0) { return makeTuple(grid, lev); } } return makeTuple(-1, -1); @@ -301,11 +301,12 @@ public: [[nodiscard]] bool isValid (const Vector& a_ba) const { if ( !m_defined || (m_locators.empty()) || - (m_locators.size() != a_ba.size()) ) return false; + (m_locators.size() != a_ba.size()) ) { return false; } bool all_valid = true; int num_levels = m_locators.size(); - for (int lev = 0; lev < num_levels; ++lev) + for (int lev = 0; lev < num_levels; ++lev) { all_valid = all_valid && m_locators[lev].isValid(a_ba[lev]); + } return all_valid; } @@ -313,8 +314,9 @@ public: { Vector ba; int num_levels = a_gdb->finestLevel()+1; - for (int lev = 0; lev < num_levels; ++lev) + for (int lev = 0; lev < num_levels; ++lev) { ba.push_back(a_gdb->ParticleBoxArray(lev)); + } return this->isValid(ba); } diff --git a/Src/Particle/AMReX_ParticleMPIUtil.cpp b/Src/Particle/AMReX_ParticleMPIUtil.cpp index 64e9737a95a..d9b9f73a29f 100644 --- a/Src/Particle/AMReX_ParticleMPIUtil.cpp +++ b/Src/Particle/AMReX_ParticleMPIUtil.cpp @@ -26,7 +26,7 @@ namespace amrex { Vector& Snds, Vector& Rcvs) { Long NumSnds = CountSnds(not_ours, Snds); - if (NumSnds == 0) return NumSnds; + if (NumSnds == 0) { return NumSnds; } BL_COMM_PROFILE(BLProfiler::Alltoall, sizeof(Long), ParallelContext::MyProcSub(), BLProfiler::BeforeCall()); diff --git a/Src/Particle/AMReX_ParticleMesh.H b/Src/Particle/AMReX_ParticleMesh.H index 75d2cdb4838..2ded8df7506 100644 --- a/Src/Particle/AMReX_ParticleMesh.H +++ b/Src/Particle/AMReX_ParticleMesh.H @@ -130,7 +130,7 @@ MeshToParticle (PC& pc, MF const& mf, int lev, F&& f) pc.ParticleDistributionMap(lev), mf.nComp(), mf.nGrowVect()); - if (mf_pointer != &mf) mf_pointer->ParallelCopy(mf,0,0,mf.nComp(),mf.nGrowVect(),mf.nGrowVect()); + if (mf_pointer != &mf) {mf_pointer->ParallelCopy(mf,0,0,mf.nComp(),mf.nGrowVect(),mf.nGrowVect()); } const auto plo = pc.Geom(lev).ProbLoArray(); const auto dxi = pc.Geom(lev).InvCellSizeArray(); @@ -154,7 +154,7 @@ MeshToParticle (PC& pc, MF const& mf, int lev, F&& f) }); } - if (mf_pointer != &mf) delete mf_pointer; + if (mf_pointer != &mf) { delete mf_pointer; } } } diff --git a/Src/Particle/AMReX_ParticleTile.H b/Src/Particle/AMReX_ParticleTile.H index 871acad3533..1663fece598 100644 --- a/Src/Particle/AMReX_ParticleTile.H +++ b/Src/Particle/AMReX_ParticleTile.H @@ -205,18 +205,23 @@ struct ParticleTileData { AMREX_ASSERT(index < m_size); SuperParticleType sp; - for (int i = 0; i < AMREX_SPACEDIM; ++i) + for (int i = 0; i < AMREX_SPACEDIM; ++i) { sp.pos(i) = m_aos[index].pos(i); - for (int i = 0; i < NStructReal; ++i) + } + for (int i = 0; i < NStructReal; ++i) { sp.rdata(i) = m_aos[index].rdata(i); - for (int i = 0; i < NAR; ++i) + } + for (int i = 0; i < NAR; ++i) { sp.rdata(NStructReal+i) = m_rdata[i][index]; + } sp.id() = m_aos[index].id(); sp.cpu() = m_aos[index].cpu(); - for (int i = 0; i < NStructInt; ++i) + for (int i = 0; i < NStructInt; ++i) { sp.idata(i) = m_aos[index].idata(i); - for (int i = 0; i < NAI; ++i) + } + for (int i = 0; i < NAI; ++i) { sp.idata(NStructInt+i) = m_idata[i][index]; + } return sp; } @@ -229,10 +234,12 @@ struct ParticleTileData for (int i = 0; i < AMREX_SPACEDIM; ++i) {sp.pos(i) = m_rdata[i][index];} sp.id() = m_idata[0][index]; sp.cpu() = m_idata[1][index]; - for (int i = 0; i < NAR; ++i) + for (int i = 0; i < NAR; ++i) { sp.rdata(i) = m_rdata[i][index]; - for (int i = 0; i < NAI; ++i) + } + for (int i = 0; i < NAI; ++i) { sp.idata(i) = m_idata[i][index]; + } return sp; } @@ -240,28 +247,35 @@ struct ParticleTileData AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void setSuperParticle (const SuperParticleType& sp, int index) const noexcept { - for (int i = 0; i < AMREX_SPACEDIM; ++i) + for (int i = 0; i < AMREX_SPACEDIM; ++i) { m_aos[index].pos(i) = sp.pos(i); - for (int i = 0; i < NStructReal; ++i) + } + for (int i = 0; i < NStructReal; ++i) { m_aos[index].rdata(i) = sp.rdata(i); - for (int i = 0; i < NAR; ++i) + } + for (int i = 0; i < NAR; ++i) { m_rdata[i][index] = sp.rdata(NStructReal+i); + } m_aos[index].id() = sp.id(); m_aos[index].cpu() = sp.cpu(); - for (int i = 0; i < NStructInt; ++i) + for (int i = 0; i < NStructInt; ++i) { m_aos[index].idata(i) = sp.idata(i); - for (int i = 0; i < NAI; ++i) + } + for (int i = 0; i < NAI; ++i) { m_idata[i][index] = sp.idata(NStructInt+i); + } } template ::type = 0> AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void setSuperParticle (const SuperParticleType& sp, int index) const noexcept { - for (int i = 0; i < NAR; ++i) + for (int i = 0; i < NAR; ++i) { m_rdata[i][index] = sp.rdata(i); - for (int i = 0; i < NAI; ++i) + } + for (int i = 0; i < NAI; ++i) { m_idata[i][index] = sp.idata(i); + } } }; @@ -421,8 +435,9 @@ SoAParticle::NextID () #endif next = the_next_id++; - if (next > IntParticleIds::LastParticleID) + if (next > IntParticleIds::LastParticleID) { amrex::Abort("SoAParticle::NextID() -- too many particles"); + } return int(next); } @@ -432,8 +447,9 @@ int SoAParticle::UnprotectedNextID () { int next = the_next_id++; - if (next > IntParticleIds::LastParticleID) + if (next > IntParticleIds::LastParticleID) { amrex::Abort("SoAParticle::NextID() -- too many particles"); + } return next; } @@ -575,20 +591,27 @@ struct ConstParticleTileData { AMREX_ASSERT(index < m_size); SuperParticleType sp; - for (int i = 0; i < AMREX_SPACEDIM; ++i) + for (int i = 0; i < AMREX_SPACEDIM; ++i) { sp.pos(i) = m_aos[index].pos(i); - for (int i = 0; i < NStructReal; ++i) + } + for (int i = 0; i < NStructReal; ++i) { sp.rdata(i) = m_aos[index].rdata(i); - if constexpr(NArrayReal > 0) - for (int i = 0; i < NArrayReal; ++i) + } + if constexpr(NArrayReal > 0) { + for (int i = 0; i < NArrayReal; ++i) { sp.rdata(NStructReal+i) = m_rdata[i][index]; + } + } sp.id() = m_aos[index].id(); sp.cpu() = m_aos[index].cpu(); - for (int i = 0; i < NStructInt; ++i) + for (int i = 0; i < NStructInt; ++i) { sp.idata(i) = m_aos[index].idata(i); - if constexpr(NArrayInt > 0) - for (int i = 0; i < NArrayInt; ++i) + } + if constexpr(NArrayInt > 0) { + for (int i = 0; i < NArrayInt; ++i) { sp.idata(NStructInt+i) = m_idata[i][index]; + } + } return sp; } @@ -601,10 +624,12 @@ struct ConstParticleTileData for (int i = 0; i < AMREX_SPACEDIM; ++i) {sp.pos(i) = m_rdata[i][index];} sp.id() = m_idata[0][index]; sp.cpu() = m_idata[1][index]; - for (int i = 0; i < NAR; ++i) + for (int i = 0; i < NAR; ++i) { sp.rdata(i) = m_rdata[i][index]; - for (int i = 0; i < NAI; ++i) + } + for (int i = 0; i < NAI; ++i) { sp.idata(i) = m_idata[i][index]; + } return sp; } }; @@ -825,24 +850,29 @@ struct ParticleTile if constexpr (!ParticleType::is_soa_particle) { m_aos_tile.resize(np+1); - for (int i = 0; i < AMREX_SPACEDIM; ++i) + for (int i = 0; i < AMREX_SPACEDIM; ++i) { m_aos_tile[np].pos(i) = sp.pos(i); - for (int i = 0; i < NStructReal; ++i) + } + for (int i = 0; i < NStructReal; ++i) { m_aos_tile[np].rdata(i) = sp.rdata(i); + } m_aos_tile[np].id() = sp.id(); m_aos_tile[np].cpu() = sp.cpu(); - for (int i = 0; i < NStructInt; ++i) + for (int i = 0; i < NStructInt; ++i) { m_aos_tile[np].idata(i) = sp.idata(i); + } } m_soa_tile.resize(np+1); auto& arr_rdata = m_soa_tile.GetRealData(); auto& arr_idata = m_soa_tile.GetIntData(); - for (int i = 0; i < NArrayReal; ++i) + for (int i = 0; i < NArrayReal; ++i) { arr_rdata[i][np] = sp.rdata(NStructReal+i); - for (int i = 0; i < NArrayInt; ++i) + } + for (int i = 0; i < NArrayInt; ++i) { arr_idata[i][np] = sp.idata(NStructInt+i); + } } /// @@ -1053,12 +1083,16 @@ struct ParticleTile } else { ptd.m_aos = nullptr; } - if constexpr(NArrayReal > 0) - for (int i = 0; i < NArrayReal; ++i) + if constexpr(NArrayReal > 0) { + for (int i = 0; i < NArrayReal; ++i) { ptd.m_rdata[i] = m_soa_tile.GetRealData(i).dataPtr(); - if constexpr(NArrayInt > 0) - for (int i = 0; i < NArrayInt; ++i) + } + } + if constexpr(NArrayInt > 0) { + for (int i = 0; i < NArrayInt; ++i) { ptd.m_idata[i] = m_soa_tile.GetIntData(i).dataPtr(); + } + } ptd.m_size = size(); ptd.m_num_runtime_real = m_runtime_r_ptrs.size(); ptd.m_num_runtime_int = m_runtime_i_ptrs.size(); @@ -1114,12 +1148,16 @@ struct ParticleTile } else { ptd.m_aos = nullptr; } - if constexpr(NArrayReal > 0) - for (int i = 0; i < NArrayReal; ++i) + if constexpr(NArrayReal > 0) { + for (int i = 0; i < NArrayReal; ++i) { ptd.m_rdata[i] = m_soa_tile.GetRealData(i).dataPtr(); - if constexpr(NArrayInt > 0) - for (int i = 0; i < NArrayInt; ++i) + } + } + if constexpr(NArrayInt > 0) { + for (int i = 0; i < NArrayInt; ++i) { ptd.m_idata[i] = m_soa_tile.GetIntData(i).dataPtr(); + } + } ptd.m_size = size(); ptd.m_num_runtime_real = m_runtime_r_cptrs.size(); ptd.m_num_runtime_int = m_runtime_i_cptrs.size(); diff --git a/Src/Particle/AMReX_ParticleTransformation.H b/Src/Particle/AMReX_ParticleTransformation.H index 0858ac6b1a0..a5b7afa2356 100644 --- a/Src/Particle/AMReX_ParticleTransformation.H +++ b/Src/Particle/AMReX_ParticleTransformation.H @@ -36,16 +36,22 @@ void copyParticle (const ParticleTileData& dst, AMREX_ASSERT(dst.m_num_runtime_int == src.m_num_runtime_int ); dst.m_aos[dst_i] = src.m_aos[src_i]; - if constexpr(NAR > 0) - for (int j = 0; j < NAR; ++j) + if constexpr(NAR > 0) { + for (int j = 0; j < NAR; ++j) { dst.m_rdata[j][dst_i] = src.m_rdata[j][src_i]; - for (int j = 0; j < dst.m_num_runtime_real; ++j) + } + } + for (int j = 0; j < dst.m_num_runtime_real; ++j) { dst.m_runtime_rdata[j][dst_i] = src.m_runtime_rdata[j][src_i]; - if constexpr(NAI > 0) - for (int j = 0; j < NAI; ++j) + } + if constexpr(NAI > 0) { + for (int j = 0; j < NAI; ++j) { dst.m_idata[j][dst_i] = src.m_idata[j][src_i]; - for (int j = 0; j < dst.m_num_runtime_int; ++j) + } + } + for (int j = 0; j < dst.m_num_runtime_int; ++j) { dst.m_runtime_idata[j][dst_i] = src.m_runtime_idata[j][src_i]; + } } /** @@ -72,14 +78,18 @@ void copyParticle (const ParticleTileData& dst, AMREX_ASSERT(dst.m_num_runtime_int == src.m_num_runtime_int ); dst.m_aos[dst_i] = src.m_aos[src_i]; - for (int j = 0; j < NAR; ++j) + for (int j = 0; j < NAR; ++j) { dst.m_rdata[j][dst_i] = src.m_rdata[j][src_i]; - for (int j = 0; j < dst.m_num_runtime_real; ++j) + } + for (int j = 0; j < dst.m_num_runtime_real; ++j) { dst.m_runtime_rdata[j][dst_i] = src.m_runtime_rdata[j][src_i]; - for (int j = 0; j < NAI; ++j) + } + for (int j = 0; j < NAI; ++j) { dst.m_idata[j][dst_i] = src.m_idata[j][src_i]; - for (int j = 0; j < dst.m_num_runtime_int; ++j) + } + for (int j = 0; j < dst.m_num_runtime_int; ++j) { dst.m_runtime_idata[j][dst_i] = src.m_runtime_idata[j][src_i]; + } } /** @@ -106,14 +116,18 @@ void swapParticle (const ParticleTileData& dst, AMREX_ASSERT(dst.m_num_runtime_int == src.m_num_runtime_int ); amrex::Swap(src.m_aos[src_i], dst.m_aos[dst_i]); - for (int j = 0; j < NAR; ++j) + for (int j = 0; j < NAR; ++j) { amrex::Swap(dst.m_rdata[j][dst_i], src.m_rdata[j][src_i]); - for (int j = 0; j < dst.m_num_runtime_real; ++j) + } + for (int j = 0; j < dst.m_num_runtime_real; ++j) { amrex::Swap(dst.m_runtime_rdata[j][dst_i], src.m_runtime_rdata[j][src_i]); - for (int j = 0; j < NAI; ++j) + } + for (int j = 0; j < NAI; ++j) { amrex::Swap(dst.m_idata[j][dst_i], src.m_idata[j][src_i]); - for (int j = 0; j < dst.m_num_runtime_int; ++j) + } + for (int j = 0; j < dst.m_num_runtime_int; ++j) { amrex::Swap(dst.m_runtime_idata[j][dst_i], src.m_runtime_idata[j][src_i]); + } } /** @@ -337,7 +351,7 @@ Index filterParticles (DstTile& dst, const SrcTile& src, const Index* mask, AMREX_HOST_DEVICE_FOR_1D( n, i, { - if (mask[i]) copyParticle(dst_data, src_data, src_start+i, dst_start+p_offsets[i]); + if (mask[i]) { copyParticle(dst_data, src_data, src_start+i, dst_start+p_offsets[i]); } }); Gpu::streamSynchronize(); @@ -437,8 +451,10 @@ Index filterAndTransformParticles (DstTile& dst, const SrcTile& src, Index* mask AMREX_HOST_DEVICE_FOR_1D( np, i, { - if (mask[i]) f(dst_data, src_data, src_start+i, - dst_start+p_offsets[src_start+i]); + if (mask[i]) { + f(dst_data, src_data, src_start+i, + dst_start+p_offsets[src_start+i]); + } }); Gpu::streamSynchronize(); @@ -526,7 +542,7 @@ Index filterAndTransformParticles (DstTile1& dst1, DstTile2& dst2, AMREX_HOST_DEVICE_FOR_1D( np, i, { - if (mask[i]) f(dst_data1, dst_data2, src_data, i, p_offsets[i], p_offsets[i]); + if (mask[i]) { f(dst_data1, dst_data2, src_data, i, p_offsets[i], p_offsets[i]); } }); Gpu::streamSynchronize(); diff --git a/Src/Particle/AMReX_ParticleUtil.H b/Src/Particle/AMReX_ParticleUtil.H index 0d7f9af776b..9136bea6741 100644 --- a/Src/Particle/AMReX_ParticleUtil.H +++ b/Src/Particle/AMReX_ParticleUtil.H @@ -74,7 +74,7 @@ numParticlesOutOfRange (Iterator const& pti, IntVect nGrow) [=] AMREX_GPU_DEVICE (int i) -> ReduceTuple { auto p = make_particle{}(ptd,i); - if ((p.id() < 0)) return false; + if ((p.id() < 0)) { return false; } IntVect iv( AMREX_D_DECL(int(amrex::Math::floor((p.pos(0)-plo[0])*dxi[0])), int(amrex::Math::floor((p.pos(1)-plo[1])*dxi[1])), @@ -383,7 +383,7 @@ int getParticleGrid (P const& p, amrex::Array4 const& mask, amrex::GpuArray const& dxi, const Box& domain) noexcept { - if (p.id() < 0) return -1; + if (p.id() < 0) { return -1; } IntVect iv = getParticleCell(p, plo, dxi, domain); return mask(iv); } @@ -400,7 +400,7 @@ bool enforcePeriodic (P& p, bool shifted = false; for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { - if (! is_per[idim]) continue; + if (! is_per[idim]) { continue; } if (p.pos(idim) > rhi[idim]) { while (p.pos(idim) > rhi[idim]) { p.pos(idim) -= static_cast(phi[idim] - plo[idim]); @@ -441,7 +441,7 @@ partitionParticlesByDest (PTile& ptile, const PLocator& ploc, const ParticleBuff int lev_min, int lev_max, int nGrow, bool remove_negative) { const int np = ptile.numParticles(); - if (np == 0) return 0; + if (np == 0) { return 0; } auto getPID = pmap.getPIDFunctor(); diff --git a/Src/Particle/AMReX_ParticleUtil.cpp b/Src/Particle/AMReX_ParticleUtil.cpp index 4dcced59c2c..63d0cbe90db 100644 --- a/Src/Particle/AMReX_ParticleUtil.cpp +++ b/Src/Particle/AMReX_ParticleUtil.cpp @@ -36,8 +36,8 @@ Vector computeNeighborProcs (const ParGDBBase* a_gdb, int ngrow) { Box box = src_box; const IntVect& ref_fac = computeRefFac(a_gdb, src_lev, lev); - if (ref_fac < IntVect::TheZeroVector()) box.coarsen(-1*ref_fac); - else if (ref_fac > IntVect::TheZeroVector()) box.refine(ref_fac); + if (ref_fac < IntVect::TheZeroVector()) { box.coarsen(-1*ref_fac); } + else if (ref_fac > IntVect::TheZeroVector()) { box.refine(ref_fac); } box.grow(computeRefFac(a_gdb, 0, src_lev)*ngrow); const Periodicity& periodicity = a_gdb->Geom(lev).periodicity(); diff --git a/Src/Particle/AMReX_Particle_mod_K.H b/Src/Particle/AMReX_Particle_mod_K.H index fe62bb002a6..2d1f91b4275 100644 --- a/Src/Particle/AMReX_Particle_mod_K.H +++ b/Src/Particle/AMReX_Particle_mod_K.H @@ -123,7 +123,7 @@ void amrex_deposit_particle_dx_cic (P const& p, int nc, amrex::Array4(amrex::Math::floor(hx)); for (int i = lo_x; i <= hi_x; ++i) { - if (i < rho.begin.x || i >= rho.end.x) continue; + if (i < rho.begin.x || i >= rho.end.x) { continue; } amrex::Real wx = amrex::min(hx - static_cast(i), amrex::Real(1.0)) - amrex::max(lx - static_cast(i), amrex::Real(0.0)); amrex::Real weight = wx*factor; amrex::Gpu::Atomic::AddNoRet(&rho(i, 0, 0, 0), static_cast(weight*p.rdata(0))); @@ -132,7 +132,7 @@ void amrex_deposit_particle_dx_cic (P const& p, int nc, amrex::Array4= rho.end.x) continue; + if (i < rho.begin.x || i >= rho.end.x) { continue; } amrex::Real wx = amrex::min(hx - static_cast(i), amrex::Real(1.0)) - amrex::max(lx - static_cast(i), amrex::Real(0.0)); amrex::Real weight = wx*factor; amrex::Gpu::Atomic::AddNoRet(&rho(i, 0, 0, comp), static_cast(weight*p.rdata(0)*p.rdata(comp))); @@ -155,10 +155,10 @@ void amrex_deposit_particle_dx_cic (P const& p, int nc, amrex::Array4(amrex::Math::floor(hy)); for (int j = lo_y; j <= hi_y; ++j) { - if (j < rho.begin.y || j >= rho.end.y) continue; + if (j < rho.begin.y || j >= rho.end.y) { continue; } amrex::Real wy = amrex::min(hy - static_cast(j), amrex::Real(1.0)) - amrex::max(ly - static_cast(j), amrex::Real(0.0)); for (int i = lo_x; i <= hi_x; ++i) { - if (i < rho.begin.x || i >= rho.end.x) continue; + if (i < rho.begin.x || i >= rho.end.x) { continue; } amrex::Real wx = amrex::min(hx - static_cast(i), amrex::Real(1.0)) - amrex::max(lx - static_cast(i), amrex::Real(0.0)); amrex::Real weight = wx*wy*factor; amrex::Gpu::Atomic::AddNoRet(&rho(i, j, 0, 0), static_cast(weight*p.rdata(0))); @@ -167,10 +167,10 @@ void amrex_deposit_particle_dx_cic (P const& p, int nc, amrex::Array4= rho.end.y) continue; + if (j < rho.begin.y || j >= rho.end.y) { continue; } amrex::Real wy = amrex::min(hy - static_cast(j), amrex::Real(1.0)) - amrex::max(ly - static_cast(j), amrex::Real(0.0)); for (int i = lo_x; i <= hi_x; ++i) { - if (i < rho.begin.x || i >= rho.end.x) continue; + if (i < rho.begin.x || i >= rho.end.x) { continue; } amrex::Real wx = amrex::min(hx - static_cast(i), amrex::Real(1.0)) - amrex::max(lx - static_cast(i), amrex::Real(0.0)); amrex::Real weight = wx*wy*factor; amrex::Gpu::Atomic::AddNoRet(&rho(i, j, 0, comp), static_cast(weight*p.rdata(0)*p.rdata(comp))); @@ -198,13 +198,13 @@ void amrex_deposit_particle_dx_cic (P const& p, int nc, amrex::Array4(amrex::Math::floor(hz)); for (int k = lo_z; k <= hi_z; ++k) { - if (k < rho.begin.z || k >= rho.end.z) continue; + if (k < rho.begin.z || k >= rho.end.z) { continue; } amrex::Real wz = amrex::min(hz - static_cast(k), amrex::Real(1.0)) - amrex::max(lz - static_cast(k), amrex::Real(0.0)); for (int j = lo_y; j <= hi_y; ++j) { - if (j < rho.begin.y || j >= rho.end.y) continue; + if (j < rho.begin.y || j >= rho.end.y) { continue; } amrex::Real wy = amrex::min(hy - static_cast(j), amrex::Real(1.0)) - amrex::max(ly - static_cast(j), amrex::Real(0.0)); for (int i = lo_x; i <= hi_x; ++i) { - if (i < rho.begin.x || i >= rho.end.x) continue; + if (i < rho.begin.x || i >= rho.end.x) { continue; } amrex::Real wx = amrex::min(hx - static_cast(i), amrex::Real(1.0)) - amrex::max(lx - static_cast(i), amrex::Real(0.0)); amrex::Real weight = wx*wy*wz*factor; amrex::Gpu::Atomic::AddNoRet(&rho(i, j, k, 0), static_cast(weight*p.rdata(0))); @@ -214,13 +214,13 @@ void amrex_deposit_particle_dx_cic (P const& p, int nc, amrex::Array4= rho.end.z) continue; + if (k < rho.begin.z || k >= rho.end.z) { continue; } amrex::Real wz = amrex::min(hz - static_cast(k), amrex::Real(1.0)) - amrex::max(lz - static_cast(k), amrex::Real(0.0)); for (int j = lo_y; j <= hi_y; ++j) { - if (j < rho.begin.y || j >= rho.end.y) continue; + if (j < rho.begin.y || j >= rho.end.y) { continue; } amrex::Real wy = amrex::min(hy - static_cast(j), amrex::Real(1.0)) - amrex::max(ly - static_cast(j), amrex::Real(0.0)); for (int i = lo_x; i <= hi_x; ++i) { - if (i < rho.begin.x || i >= rho.end.x) continue; + if (i < rho.begin.x || i >= rho.end.x) { continue; } amrex::Real wx = amrex::min(hx - static_cast(i), amrex::Real(1.0)) - amrex::max(lx - static_cast(i), amrex::Real(0.0)); amrex::Real weight = wx*wy*wz*factor; amrex::Gpu::Atomic::AddNoRet(&rho(i, j, k, comp), static_cast(weight*p.rdata(0)*p.rdata(comp))); diff --git a/Src/Particle/AMReX_SparseBins.H b/Src/Particle/AMReX_SparseBins.H index 035828b1da6..6ec80a0a831 100644 --- a/Src/Particle/AMReX_SparseBins.H +++ b/Src/Particle/AMReX_SparseBins.H @@ -39,18 +39,18 @@ struct SparseBinIteratorFactory [[nodiscard]] AMREX_GPU_HOST_DEVICE index_type getIndex (const index_type bin_number) const noexcept { - if (m_num_bins == 1) return 0; + if (m_num_bins == 1) { return 0; } index_type lo = 0; index_type hi = m_num_bins - 1; while (lo < hi) { - if (m_bins_ptr[lo] == bin_number) return lo; - if (m_bins_ptr[hi] == bin_number) return hi; + if (m_bins_ptr[lo] == bin_number) { return lo; } + if (m_bins_ptr[hi] == bin_number) { return hi; } index_type mid = (lo + hi) / 2; index_type mid_value = m_bins_ptr[mid]; - if (mid_value == bin_number) return mid; + if (mid_value == bin_number) { return mid; } mid_value < bin_number ? lo = mid+1 : hi = mid; } diff --git a/Src/Particle/AMReX_StructOfArrays.H b/Src/Particle/AMReX_StructOfArrays.H index 00097e445b7..0ef3b8ae869 100644 --- a/Src/Particle/AMReX_StructOfArrays.H +++ b/Src/Particle/AMReX_StructOfArrays.H @@ -42,10 +42,16 @@ struct StructOfArrays { */ [[nodiscard]] RealVector& GetRealData (const int index) { AMREX_ASSERT(index >= 0 && index < NReal + static_cast(m_runtime_rdata.size())); - if (index >= 0 && index < NReal) return m_rdata[index]; - else { + if constexpr (NReal == 0) { AMREX_ASSERT(m_defined); - return m_runtime_rdata[index - NReal]; + return m_runtime_rdata[index]; + } else { + if (index < NReal) { + return m_rdata[index]; + } else { + AMREX_ASSERT(m_defined); + return m_runtime_rdata[index - NReal]; + } } } @@ -55,10 +61,16 @@ struct StructOfArrays { */ [[nodiscard]] const RealVector& GetRealData (const int index) const { AMREX_ASSERT(index >= 0 && index < NReal + static_cast(m_runtime_rdata.size())); - if (index >= 0 && index < NReal) return m_rdata[index]; - else { + if constexpr (NReal == 0) { AMREX_ASSERT(m_defined); - return m_runtime_rdata[index - NReal]; + return m_runtime_rdata[index]; + } else { + if (index < NReal) { + return m_rdata[index]; + } else { + AMREX_ASSERT(m_defined); + return m_runtime_rdata[index - NReal]; + } } } @@ -68,10 +80,16 @@ struct StructOfArrays { */ [[nodiscard]] IntVector& GetIntData (const int index) { AMREX_ASSERT(index >= 0 && index < NInt + static_cast(m_runtime_idata.size())); - if (index >= 0 && index < NInt) return m_idata[index]; - else { + if constexpr (NInt == 0) { AMREX_ASSERT(m_defined); - return m_runtime_idata[index - NInt]; + return m_runtime_idata[index]; + } else { + if (index < NInt) { + return m_idata[index]; + } else { + AMREX_ASSERT(m_defined); + return m_runtime_idata[index - NInt]; + } } } @@ -82,10 +100,16 @@ struct StructOfArrays { */ [[nodiscard]] const IntVector& GetIntData (const int index) const { AMREX_ASSERT(index >= 0 && index < NInt + static_cast(m_runtime_idata.size())); - if (index >= 0 && index < NInt) return m_idata[index]; - else { + if constexpr (NInt == 0) { AMREX_ASSERT(m_defined); - return m_runtime_idata[index - NInt]; + return m_runtime_idata[index]; + } else { + if (index < NInt) { + return m_idata[index]; + } else { + AMREX_ASSERT(m_defined); + return m_runtime_idata[index - NInt]; + } } } @@ -95,16 +119,19 @@ struct StructOfArrays { */ [[nodiscard]] std::size_t size () const { - if (NReal > 0) + if constexpr (NReal > 0) { return m_rdata[0].size(); - else if (NInt > 0) + } else if constexpr (NInt > 0) { return m_idata[0].size(); - else if (!m_runtime_rdata.empty()) - return m_runtime_rdata[0].size(); - else if (!m_runtime_idata.empty()) - return m_runtime_idata[0].size(); - else - return 0; + } else { + if (!m_runtime_rdata.empty()) { + return m_runtime_rdata[0].size(); + } else if (!m_runtime_idata.empty()) { + return m_runtime_idata[0].size(); + } else { + return 0; + } + } } /** @@ -148,10 +175,14 @@ struct StructOfArrays { void resize (size_t count) { - for (int i = 0; i < NReal; ++i) m_rdata[i].resize(count); - for (int i = 0; i < NInt; ++i) m_idata[i].resize(count); - for (int i = 0; i < int(m_runtime_rdata.size()); ++i) m_runtime_rdata[i].resize(count); - for (int i = 0; i < int(m_runtime_idata.size()); ++i) m_runtime_idata[i].resize(count); + if constexpr (NReal > 0) { + for (int i = 0; i < NReal; ++i) { m_rdata[i].resize(count); } + } + if constexpr (NInt > 0) { + for (int i = 0; i < NInt; ++i) { m_idata[i].resize(count); } + } + for (int i = 0; i < int(m_runtime_rdata.size()); ++i) { m_runtime_rdata[i].resize(count); } + for (int i = 0; i < int(m_runtime_idata.size()); ++i) { m_runtime_idata[i].resize(count); } } [[nodiscard]] GpuArray realarray () diff --git a/Src/Particle/AMReX_TracerParticles.cpp b/Src/Particle/AMReX_TracerParticles.cpp index 5d290be43f2..08fbf08dfae 100644 --- a/Src/Particle/AMReX_TracerParticles.cpp +++ b/Src/Particle/AMReX_TracerParticles.cpp @@ -73,7 +73,7 @@ TracerParticleContainer::AdvectWithUmac (MultiFab* umac, int lev, Real dt) [=] AMREX_GPU_DEVICE (int i) { ParticleType& p = p_pbox[i]; - if (p.id() <= 0) return; + if (p.id() <= 0) { return; } ParticleReal v[AMREX_SPACEDIM]; mac_interpolate(p, plo, dxi, umacarr, v); if (ipass == 0) @@ -151,7 +151,7 @@ TracerParticleContainer::AdvectWithUcc (const MultiFab& Ucc, int lev, Real dt) [=] AMREX_GPU_DEVICE (int i) { ParticleType& p = p_pbox[i]; - if (p.id() <= 0) return; + if (p.id() <= 0) { return; } ParticleReal v[AMREX_SPACEDIM]; cic_interpolate(p, plo, dxi, uccarr, v); @@ -258,8 +258,9 @@ TracerParticleContainer::Timestamp (const std::string& basename, TimeStampFile.seekp(0, std::ios::end); - if (!TimeStampFile.good()) + if (!TimeStampFile.good()) { amrex::FileOpenFailed(FileName); + } const auto M = static_cast(indices.size()); const BoxArray& ba = mf.boxArray(); @@ -299,11 +300,11 @@ TracerParticleContainer::Timestamp (const std::string& basename, { const ParticleType& p = pbox[k]; - if (p.id() <= 0) continue; + if (p.id() <= 0) { continue; } const IntVect& iv = Index(p,lev); - if (!bx.contains(iv) && !ba.contains(iv)) continue; + if (!bx.contains(iv) && !ba.contains(iv)) { continue; } TimeStampFile << p.id() << ' ' << p.cpu() << ' '; @@ -321,7 +322,7 @@ TracerParticleContainer::Timestamp (const std::string& basename, if (M > 0) { - cic_interpolate(p, plo, dxi, *uccarr_ptr, &vals[0], M); + cic_interpolate(p, plo, dxi, *uccarr_ptr, vals.data(), M); for (int i = 0; i < M; i++) { @@ -341,8 +342,9 @@ TracerParticleContainer::Timestamp (const std::string& basename, const int wakeUpPID = (MyProc + nOutFiles); const int tag = (MyProc % nOutFiles); - if (wakeUpPID < NProcs) + if (wakeUpPID < NProcs) { ParallelDescriptor::Send(&iBuff, 1, wakeUpPID, tag); + } } if (mySet == (iSet + 1)) { @@ -369,6 +371,6 @@ TracerParticleContainer::Timestamp (const std::string& basename, #ifdef AMREX_LAZY }); #endif - } + } } } diff --git a/Src/Particle/AMReX_WriteBinaryParticleData.H b/Src/Particle/AMReX_WriteBinaryParticleData.H index 40203e17c12..8a67ac62d45 100644 --- a/Src/Particle/AMReX_WriteBinaryParticleData.H +++ b/Src/Particle/AMReX_WriteBinaryParticleData.H @@ -129,7 +129,7 @@ countFlags (const Vector,Container>>& const auto& pflags = particle_io_flags[lev].at(kv.first); for (int k = 0; k < kv.second.numParticles(); ++k) { - if (pflags[k]) nparticles++; + if (pflags[k]) { nparticles++; } } } } @@ -143,7 +143,7 @@ countFlags (const Container& pflags) int nparticles = 0; for (std::size_t k = 0; k < pflags.size(); ++k) { - if (pflags[k]) nparticles++; + if (pflags[k]) { nparticles++; } } return nparticles; } @@ -175,15 +175,17 @@ packIOData (Vector& idata, Vector& rdata, const PC& pc, int l const Vector& tiles, int np, bool is_checkpoint) { int num_output_int = 0; - for (int i = 0; i < pc.NumIntComps() + PC::NStructInt; ++i) - if (write_int_comp[i]) ++num_output_int; + for (int i = 0; i < pc.NumIntComps() + PC::NStructInt; ++i) { + if (write_int_comp[i]) { ++num_output_int; } + } const Long iChunkSize = 2 + num_output_int; idata.resize(np*iChunkSize); int num_output_real = 0; - for (int i = 0; i < pc.NumRealComps() + PC::NStructReal; ++i) - if (write_real_comp[i]) ++num_output_real; + for (int i = 0; i < pc.NumRealComps() + PC::NStructReal; ++i) { + if (write_real_comp[i]) { ++num_output_real; } + } const Long rChunkSize = AMREX_SPACEDIM + num_output_real; rdata.resize(np*rChunkSize); @@ -279,20 +281,22 @@ packIOData (Vector& idata, Vector& rdata, const PC& pc, int l const Vector& tiles, int np, bool is_checkpoint) { int num_output_int = 0; - for (int i = 0; i < pc.NumIntComps() + PC::NStructInt; ++i) - if (write_int_comp[i]) ++num_output_int; + for (int i = 0; i < pc.NumIntComps() + PC::NStructInt; ++i) { + if (write_int_comp[i]) { ++num_output_int; } + } const Long iChunkSize = 2 + num_output_int; idata.resize(np*iChunkSize); int num_output_real = 0; - for (int i = 0; i < pc.NumRealComps() + PC::NStructReal; ++i) - if (write_real_comp[i]) ++num_output_real; + for (int i = 0; i < pc.NumRealComps() + PC::NStructReal; ++i) { + if (write_real_comp[i]) { ++num_output_real; } + } const Long rChunkSize = AMREX_SPACEDIM + num_output_real; rdata.resize(np*rChunkSize); - int* iptr = idata.dataPtr(); + int* iptr = idata.dataPtr(); ParticleReal* rptr = rdata.dataPtr(); for (unsigned i = 0; i < tiles.size(); i++) { const auto& ptile = pc.ParticlesAt(lev, grid, tiles[i]); @@ -313,7 +317,7 @@ packIOData (Vector& idata, Vector& rdata, const PC& pc, int l iptr += 2; // Real: positions - for (int j = 0; j < AMREX_SPACEDIM; j++) rptr[j] = p.pos(j); + for (int j = 0; j < AMREX_SPACEDIM; j++) { rptr[j] = p.pos(j); } rptr += AMREX_SPACEDIM; // extra AoS Int components @@ -340,7 +344,7 @@ packIOData (Vector& idata, Vector& rdata, const PC& pc, int l iptr += 1; // Real: position - for (int j = 0; j < AMREX_SPACEDIM; j++) rptr[j] = soa.GetRealData(j)[pindex]; + for (int j = 0; j < AMREX_SPACEDIM; j++) { rptr[j] = soa.GetRealData(j)[pindex]; } rptr += AMREX_SPACEDIM; } @@ -392,7 +396,7 @@ void WriteBinaryParticleDataSync (PC const& pc, AMREX_ALWAYS_ASSERT( int_comp_names.size() == pc.NumIntComps() + NStructInt); std::string pdir = dir; - if ( ! pdir.empty() && pdir[pdir.size()-1] != '/') pdir += '/'; + if ( ! pdir.empty() && pdir[pdir.size()-1] != '/') { pdir += '/'; } pdir += name; if ( ! pc.GetLevelDirectoriesCreated()) { @@ -444,15 +448,16 @@ void WriteBinaryParticleDataSync (PC const& pc, { std::string HdrFileName = pdir; - if ( ! HdrFileName.empty() && HdrFileName[HdrFileName.size()-1] != '/') + if ( ! HdrFileName.empty() && HdrFileName[HdrFileName.size()-1] != '/') { HdrFileName += '/'; + } HdrFileName += "Header"; pc.HdrFileNamePrePost = HdrFileName; HdrFile.open(HdrFileName.c_str(), std::ios::out|std::ios::trunc); - if ( ! HdrFile.good()) amrex::FileOpenFailed(HdrFileName); + if ( ! HdrFile.good()) { amrex::FileOpenFailed(HdrFileName); } // // First thing written is our version string. @@ -470,12 +475,14 @@ void WriteBinaryParticleDataSync (PC const& pc, } int num_output_real = 0; - for (int i = 0; i < pc.NumRealComps() + NStructReal; ++i) - if (write_real_comp[i]) ++num_output_real; + for (int i = 0; i < pc.NumRealComps() + NStructReal; ++i) { + if (write_real_comp[i]) { ++num_output_real; } + } int num_output_int = 0; - for (int i = 0; i < pc.NumIntComps() + NStructInt; ++i) - if (write_int_comp[i]) ++num_output_int; + for (int i = 0; i < pc.NumIntComps() + NStructInt; ++i) { + if (write_int_comp[i]) { ++num_output_int; } + } // AMREX_SPACEDIM and N for sanity checking. HdrFile << AMREX_SPACEDIM << '\n'; @@ -484,15 +491,17 @@ void WriteBinaryParticleDataSync (PC const& pc, HdrFile << num_output_real << '\n'; // Real component names - for (int i = 0; i < NStructReal + pc.NumRealComps(); ++i ) - if (write_real_comp[i]) HdrFile << real_comp_names[i] << '\n'; + for (int i = 0; i < NStructReal + pc.NumRealComps(); ++i ) { + if (write_real_comp[i]) { HdrFile << real_comp_names[i] << '\n'; } + } // The number of extra int parameters HdrFile << num_output_int << '\n'; // int component names - for (int i = 0; i < NStructInt + pc.NumIntComps(); ++i ) - if (write_int_comp[i]) HdrFile << int_comp_names[i] << '\n'; + for (int i = 0; i < NStructInt + pc.NumIntComps(); ++i ) { + if (write_int_comp[i]) { HdrFile << int_comp_names[i] << '\n'; } + } bool is_checkpoint_legacy = true; // legacy HdrFile << is_checkpoint_legacy << '\n'; @@ -507,8 +516,9 @@ void WriteBinaryParticleDataSync (PC const& pc, HdrFile << pc.finestLevel() << '\n'; // Then the number of grids at each level. - for (int lev = 0; lev <= pc.finestLevel(); lev++) + for (int lev = 0; lev <= pc.finestLevel(); lev++) { HdrFile << pc.ParticleBoxArray(lev).size() << '\n'; + } } // We want to write the data out in parallel. @@ -517,7 +527,7 @@ void WriteBinaryParticleDataSync (PC const& pc, ParmParse pp("particles"); pp.queryAdd("particles_nfiles",nOutFiles); - if(nOutFiles == -1) nOutFiles = NProcs; + if(nOutFiles == -1) { nOutFiles = NProcs; } nOutFiles = std::max(1, std::min(nOutFiles,NProcs)); pc.nOutFilesPrePost = nOutFiles; @@ -538,15 +548,17 @@ void WriteBinaryParticleDataSync (PC const& pc, if (gotsome) { - if ( ! LevelDir.empty() && LevelDir[LevelDir.size()-1] != '/') LevelDir += '/'; + if ( ! LevelDir.empty() && LevelDir[LevelDir.size()-1] != '/') { LevelDir += '/'; } LevelDir = amrex::Concatenate(LevelDir.append("Level_"), lev, 1); if ( ! pc.GetLevelDirectoriesCreated()) { - if (ParallelDescriptor::IOProcessor()) - if ( ! amrex::UtilCreateDirectory(LevelDir, 0755)) + if (ParallelDescriptor::IOProcessor()) { + if ( ! amrex::UtilCreateDirectory(LevelDir, 0755)) { amrex::CreateDirectoryFailed(LevelDir); + } + } ParallelDescriptor::Barrier(); } } @@ -715,7 +727,7 @@ void WriteBinaryParticleDataAsync (PC const& pc, } std::string pdir = dir; - if ( ! pdir.empty() && pdir[pdir.size()-1] != '/') pdir += '/'; + if ( ! pdir.empty() && pdir[pdir.size()-1] != '/') { pdir += '/'; } pdir += name; if (MyProc == IOProcNumber) @@ -735,7 +747,7 @@ void WriteBinaryParticleDataAsync (PC const& pc, if (gotsome) { - if ( ! LevelDir.empty() && LevelDir[LevelDir.size()-1] != '/') LevelDir += '/'; + if ( ! LevelDir.empty() && LevelDir[LevelDir.size()-1] != '/') { LevelDir += '/'; } LevelDir = amrex::Concatenate(LevelDir.append("Level_"), lev, 1); @@ -810,11 +822,13 @@ void WriteBinaryParticleDataAsync (PC const& pc, new_ptile.define(runtime_real_comps, runtime_int_comps); - for (auto comp(0); comp < runtime_real_comps; ++comp) - new_ptile.push_back_real(NArrayReal+comp, np, 0.); + for (auto comp(0); comp < runtime_real_comps; ++comp) { + new_ptile.push_back_real(NArrayReal+comp, np, 0.); + } - for (auto comp(0); comp < runtime_int_comps; ++comp) - new_ptile.push_back_int(NArrayInt+comp, np, 0); + for (auto comp(0); comp < runtime_int_comps; ++comp) { + new_ptile.push_back_int(NArrayInt+comp, np, 0); + } amrex::filterParticles(new_ptile, ptile, KeepValidFilter()); } @@ -845,14 +859,15 @@ void WriteBinaryParticleDataAsync (PC const& pc, std::string HdrFileName = pdir; std::ofstream HdrFile; - if ( ! HdrFileName.empty() && HdrFileName[HdrFileName.size()-1] != '/') + if ( ! HdrFileName.empty() && HdrFileName[HdrFileName.size()-1] != '/') { HdrFileName += '/'; + } HdrFileName += "Header"; HdrFile.open(HdrFileName.c_str(), std::ios::out|std::ios::trunc); - if ( ! HdrFile.good()) amrex::FileOpenFailed(HdrFileName); + if ( ! HdrFile.good()) { amrex::FileOpenFailed(HdrFileName); } std::string version_string = is_checkpoint ? PC::CheckpointVersion() : PC::PlotfileVersion(); if (sizeof(typename PC::ParticleType::RealType) == 4) @@ -865,12 +880,14 @@ void WriteBinaryParticleDataAsync (PC const& pc, } int num_output_real = 0; - for (int i = 0; i < nrc + NStructReal; ++i) - if (write_real_comp[i]) ++num_output_real; + for (int i = 0; i < nrc + NStructReal; ++i) { + if (write_real_comp[i]) { ++num_output_real; } + } int num_output_int = 0; - for (int i = 0; i < nic + NStructInt; ++i) - if (write_int_comp[i]) ++num_output_int; + for (int i = 0; i < nic + NStructInt; ++i) { + if (write_int_comp[i]) { ++num_output_int; } + } // AMREX_SPACEDIM and N for sanity checking. HdrFile << AMREX_SPACEDIM << '\n'; @@ -879,15 +896,17 @@ void WriteBinaryParticleDataAsync (PC const& pc, HdrFile << num_output_real << '\n'; // Real component names - for (int i = 0; i < NStructReal + nrc; ++i ) - if (write_real_comp[i]) HdrFile << real_comp_names[i] << '\n'; + for (int i = 0; i < NStructReal + nrc; ++i ) { + if (write_real_comp[i]) { HdrFile << real_comp_names[i] << '\n'; } + } // The number of extra int parameters HdrFile << num_output_int << '\n'; // int component names - for (int i = 0; i < NStructInt + nic; ++i ) - if (write_int_comp[i]) HdrFile << int_comp_names[i] << '\n'; + for (int i = 0; i < NStructInt + nic; ++i ) { + if (write_int_comp[i]) { HdrFile << int_comp_names[i] << '\n'; } + } bool is_checkpoint_legacy = true; // legacy HdrFile << is_checkpoint_legacy << '\n'; @@ -902,8 +921,9 @@ void WriteBinaryParticleDataAsync (PC const& pc, HdrFile << finest_level << '\n'; // Then the number of grids at each level. - for (int lev = 0; lev <= finest_level; lev++) + for (int lev = 0; lev <= finest_level; lev++) { HdrFile << dms[lev].size() << '\n'; + } for (int lev = 0; lev <= finest_level; lev++) { @@ -942,7 +962,7 @@ void WriteBinaryParticleDataAsync (PC const& pc, } std::string LevelDir = pdir; - if ( ! LevelDir.empty() && LevelDir[LevelDir.size()-1] != '/') LevelDir += '/'; + if ( ! LevelDir.empty() && LevelDir[LevelDir.size()-1] != '/') { LevelDir += '/'; } LevelDir = amrex::Concatenate(LevelDir.append("Level_"), lev, 1); std::string filePrefix(LevelDir); filePrefix += '/'; @@ -956,14 +976,15 @@ void WriteBinaryParticleDataAsync (PC const& pc, for (int k = 0; k < bas[lev].size(); ++k) { int rank = dms[lev][k]; - if (rank != MyProc) continue; + if (rank != MyProc) { continue; } const int grid = k; - if (np_per_grid_local[lev][grid] == 0) continue; + if (np_per_grid_local[lev][grid] == 0) { continue; } // First write out the integer data in binary. int num_output_int = 0; - for (int i = 0; i < nic + NStructInt; ++i) - if (write_int_comp[i]) ++num_output_int; + for (int i = 0; i < nic + NStructInt; ++i) { + if (write_int_comp[i]) { ++num_output_int; } + } const Long iChunkSize = 2 + num_output_int; Vector istuff(np_per_grid_local[lev][grid]*iChunkSize); @@ -978,7 +999,7 @@ void WriteBinaryParticleDataAsync (PC const& pc, const auto& soa = pbox.GetStructOfArrays(); const auto& p = make_particle{}(ptd, pindex); - if (p.id() <= 0) continue; + if (p.id() <= 0) { continue; } // note: for pure SoA particle layouts, we do write the id, cpu and positions as a struct // for backwards compatibility with readers @@ -1024,8 +1045,9 @@ void WriteBinaryParticleDataAsync (PC const& pc, // Write the Real data in binary. int num_output_real = 0; - for (int i = 0; i < nrc + NStructReal; ++i) - if (write_real_comp[i]) ++num_output_real; + for (int i = 0; i < nrc + NStructReal; ++i) { + if (write_real_comp[i]) { ++num_output_real; } + } const Long rChunkSize = AMREX_SPACEDIM + num_output_real; Vector rstuff(np_per_grid_local[lev][grid]*rChunkSize); @@ -1041,12 +1063,12 @@ void WriteBinaryParticleDataAsync (PC const& pc, const auto& soa = pbox.GetStructOfArrays(); const auto& p = make_particle{}(ptd, pindex); - if (p.id() <= 0) continue; + if (p.id() <= 0) { continue; } if constexpr(!PC::ParticleType::is_soa_particle) { // Real: position - for (int j = 0; j < AMREX_SPACEDIM; j++) rptr[j] = p.pos(j); + for (int j = 0; j < AMREX_SPACEDIM; j++) { rptr[j] = p.pos(j); } rptr += AMREX_SPACEDIM; // extra AoS real @@ -1061,7 +1083,7 @@ void WriteBinaryParticleDataAsync (PC const& pc, } else { // Real: position - for (int j = 0; j < AMREX_SPACEDIM; j++) rptr[j] = soa.GetRealData(j)[pindex]; + for (int j = 0; j < AMREX_SPACEDIM; j++) { rptr[j] = soa.GetRealData(j)[pindex]; } rptr += AMREX_SPACEDIM; } diff --git a/Tests/Amr/Advection_AmrCore/Source/AmrCoreAdv.cpp b/Tests/Amr/Advection_AmrCore/Source/AmrCoreAdv.cpp index ea60a0a4077..48a5d96e37d 100644 --- a/Tests/Amr/Advection_AmrCore/Source/AmrCoreAdv.cpp +++ b/Tests/Amr/Advection_AmrCore/Source/AmrCoreAdv.cpp @@ -117,10 +117,11 @@ AmrCoreAdv::Evolve () int lev = 0; int iteration = 1; - if (do_subcycle) + if (do_subcycle) { timeStepWithSubcycling(lev, cur_time, iteration); - else + } else { timeStepNoSubcycling(cur_time, iteration); + } cur_time += dt[0]; @@ -152,7 +153,7 @@ AmrCoreAdv::Evolve () } #endif - if (cur_time >= stop_time - 1.e-6*dt[0]) break; + if (cur_time >= stop_time - 1.e-6*dt[0]) { break; } } if (plot_int > 0 && istep[0] > last_plot_file_step) { @@ -332,7 +333,7 @@ AmrCoreAdv::ErrorEst (int lev, TagBoxArray& tags, Real /*time*/, int /*ngrow*/) } } - if (lev >= phierr.size()) return; + if (lev >= phierr.size()) { return; } // const int clearval = TagBox::CLEAR; const int tagval = TagBox::SET; @@ -722,8 +723,9 @@ AmrCoreAdv::timeStepNoSubcycling (Real time, int iteration) fp.reset(); // Because the data have changed. } - for (int lev = 0; lev <= finest_level; lev++) + for (int lev = 0; lev <= finest_level; lev++) { ++istep[lev]; + } if (Verbose()) { @@ -745,7 +747,7 @@ AmrCoreAdv::ComputeDt () { dt_tmp[lev] = EstTimeStep(lev, t_new[lev]); } - ParallelDescriptor::ReduceRealMin(&dt_tmp[0], int(dt_tmp.size())); + ParallelDescriptor::ReduceRealMin(dt_tmp.data(), int(dt_tmp.size())); constexpr Real change_max = 1.1; Real dt_0 = dt_tmp[0]; diff --git a/Tests/Amr/Advection_AmrCore/Source/DefineVelocity.cpp b/Tests/Amr/Advection_AmrCore/Source/DefineVelocity.cpp index 4dc1076dec8..4591a85375f 100644 --- a/Tests/Amr/Advection_AmrCore/Source/DefineVelocity.cpp +++ b/Tests/Amr/Advection_AmrCore/Source/DefineVelocity.cpp @@ -8,8 +8,9 @@ using namespace amrex; void AmrCoreAdv::DefineVelocityAllLevels (Real time) { - for (int lev = 0; lev <= finest_level; ++lev) + for (int lev = 0; lev <= finest_level; ++lev) { DefineVelocityAtLevel(lev,time); + } } void diff --git a/Tests/Amr/Advection_AmrCore/Source/Tagging.H b/Tests/Amr/Advection_AmrCore/Source/Tagging.H index ace85b04ed2..6f05edd2260 100644 --- a/Tests/Amr/Advection_AmrCore/Source/Tagging.H +++ b/Tests/Amr/Advection_AmrCore/Source/Tagging.H @@ -11,8 +11,9 @@ state_error (int i, int j, int k, amrex::Array4 const& state, amrex::Real phierr, char tagval) { - if (state(i,j,k) > phierr) + if (state(i,j,k) > phierr) { tag(i,j,k) = tagval; + } } #endif diff --git a/Tests/Amr/Advection_AmrLevel/Source/AmrLevelAdv.cpp b/Tests/Amr/Advection_AmrLevel/Source/AmrLevelAdv.cpp index ecf7aec9f73..f9d13cec1d7 100644 --- a/Tests/Amr/Advection_AmrLevel/Source/AmrLevelAdv.cpp +++ b/Tests/Amr/Advection_AmrLevel/Source/AmrLevelAdv.cpp @@ -368,8 +368,9 @@ AmrLevelAdv::advance (Real time, #ifndef AMREX_USE_GPU if (do_reflux) { - for (int i = 0; i < BL_SPACEDIM ; i++) + for (int i = 0; i < BL_SPACEDIM ; i++) { fluxes[i][mfi].copy(*flux[i],mfi.nodaltilebox(i)); + } } #endif } @@ -378,12 +379,14 @@ AmrLevelAdv::advance (Real time, if (do_reflux) { if (current) { - for (int i = 0; i < BL_SPACEDIM ; i++) + for (int i = 0; i < BL_SPACEDIM ; i++) { current->FineAdd(fluxes[i],i,0,0,NUM_STATE,1.); + } } if (fine) { - for (int i = 0; i < BL_SPACEDIM ; i++) + for (int i = 0; i < BL_SPACEDIM ; i++) { fine->CrseInit(fluxes[i],i,0,0,NUM_STATE,-1.); + } } } @@ -477,8 +480,9 @@ AmrLevelAdv::computeInitialDt (int finest_level, // // Grids have been constructed, compute dt for all levels. // - if (level > 0) + if (level > 0) { return; + } Real dt_0 = 1.0e+100; int n_factor = 1; @@ -495,8 +499,9 @@ AmrLevelAdv::computeInitialDt (int finest_level, const Real eps = 0.001*dt_0; Real cur_time = state[Phi_Type].curTime(); if (stop_time >= 0.0) { - if ((cur_time + dt_0) > (stop_time - eps)) + if ((cur_time + dt_0) > (stop_time - eps)) { dt_0 = stop_time - cur_time; + } } n_factor = 1; @@ -524,8 +529,9 @@ AmrLevelAdv::computeNewDt (int finest_level, // We are at the end of a coarse grid timecycle. // Compute the timesteps for the next iteration. // - if (level > 0) + if (level > 0) { return; + } for (int i = 0; i <= finest_level; i++) { @@ -572,8 +578,9 @@ AmrLevelAdv::computeNewDt (int finest_level, const Real eps = 0.001*dt_0; Real cur_time = state[Phi_Type].curTime(); if (stop_time >= 0.0) { - if ((cur_time + dt_0) > (stop_time - eps)) + if ((cur_time + dt_0) > (stop_time - eps)) { dt_0 = stop_time - cur_time; + } } n_factor = 1; @@ -660,15 +667,17 @@ AmrLevelAdv::post_restart() void AmrLevelAdv::post_init (Real /*stop_time*/) { - if (level > 0) + if (level > 0) { return; + } // // Average data down from finer levels // so that conserved data is consistent between levels. // int finest_level = parent->finestLevel(); - for (int k = finest_level-1; k>= 0; k--) + for (int k = finest_level-1; k>= 0; k--) { getLevel(k).avgDown(); + } } /** @@ -737,7 +746,7 @@ AmrLevelAdv::read_params () { static bool done = false; - if (done) return; + if (done) { return; } done = true; @@ -793,14 +802,14 @@ AmrLevelAdv::reflux () void AmrLevelAdv::avgDown () { - if (level == parent->finestLevel()) return; + if (level == parent->finestLevel()) { return; } avgDown(Phi_Type); } void AmrLevelAdv::avgDown (int state_indx) { - if (level == parent->finestLevel()) return; + if (level == parent->finestLevel()) { return; } AmrLevelAdv& fine_lev = getLevel(level+1); MultiFab& S_fine = fine_lev.get_new_data(state_indx); diff --git a/Tests/DivFreePatch/main.cpp b/Tests/DivFreePatch/main.cpp index 2eb6fb8a4a4..77973484a8d 100644 --- a/Tests/DivFreePatch/main.cpp +++ b/Tests/DivFreePatch/main.cpp @@ -463,7 +463,7 @@ void main_main () } // Make sure coarse & fine are properly setup for the interpolation stencil. - amrex::average_down_faces( {AMREX_D_DECL(&f_mf_faces[0], &f_mf_faces[1], &f_mf_faces[2])}, coarse_faces, ratio, 0); + amrex::average_down_faces( {AMREX_D_DECL(f_mf_faces.data(), f_mf_faces.data()+1, f_mf_faces.data()+2)}, coarse_faces, ratio, 0); Vector > fine_v; Vector > coarse_v; diff --git a/Tests/EB/CNS/Source/CNS.cpp b/Tests/EB/CNS/Source/CNS.cpp index 3ab01b68dd0..c2416c54339 100644 --- a/Tests/EB/CNS/Source/CNS.cpp +++ b/Tests/EB/CNS/Source/CNS.cpp @@ -142,8 +142,9 @@ CNS::computeInitialDt (int finest_level, const Real eps = 0.001*dt_0; Real cur_time = state[State_Type].curTime(); if (stop_time >= 0.0) { - if ((cur_time + dt_0) > (stop_time - eps)) + if ((cur_time + dt_0) > (stop_time - eps)) { dt_0 = stop_time - cur_time; + } } n_factor = 1; @@ -286,7 +287,7 @@ CNS::printTotal () const void CNS::post_init (Real) { - if (level > 0) return; + if (level > 0) { return; } for (int k = parent->finestLevel()-1; k >= 0; --k) { getLevel(k).avgDown(); } @@ -382,7 +383,7 @@ CNS::read_params () Vector tilesize(AMREX_SPACEDIM); if (pp.queryarr("hydro_tile_size", tilesize, 0, AMREX_SPACEDIM)) { - for (int i=0; ifinestLevel()) return; + if (level == parent->finestLevel()) { return; } auto& fine_lev = getLevel(level+1); diff --git a/Tests/EB/CNS/Source/CNS_advance.cpp b/Tests/EB/CNS/Source/CNS_advance.cpp index d38d05632ef..213383cf21e 100644 --- a/Tests/EB/CNS/Source/CNS_advance.cpp +++ b/Tests/EB/CNS/Source/CNS_advance.cpp @@ -117,11 +117,11 @@ CNS::compute_dSdt (const MultiFab& S, MultiFab& dSdt, Real dt, dx, &dt,&level); if (fr_as_crse) { - fr_as_crse->CrseAdd(mfi,{&flux[0],&flux[1],&flux[2]},dx,dt,RunOn::Cpu); + fr_as_crse->CrseAdd(mfi,{flux.data(),flux.data()+1,flux.data()+2},dx,dt,RunOn::Cpu); } if (fr_as_fine) { - fr_as_fine->FineAdd(mfi,{&flux[0],&flux[1],&flux[2]},dx,dt,RunOn::Cpu); + fr_as_fine->FineAdd(mfi,{flux.data(),flux.data()+1,flux.data()+2},dx,dt,RunOn::Cpu); } } else @@ -159,7 +159,7 @@ CNS::compute_dSdt (const MultiFab& S, MultiFab& dSdt, Real dt, dx, &dt,&level); if (fr_as_crse) { - fr_as_crse->CrseAdd(mfi, {&flux[0],&flux[1],&flux[2]}, dx,dt, + fr_as_crse->CrseAdd(mfi, {flux.data(),flux.data()+1,flux.data()+2}, dx,dt, (*volfrac)[mfi], {&((*areafrac[0])[mfi]), &((*areafrac[1])[mfi]), @@ -168,7 +168,7 @@ CNS::compute_dSdt (const MultiFab& S, MultiFab& dSdt, Real dt, } if (fr_as_fine) { - fr_as_fine->FineAdd(mfi, {&flux[0],&flux[1],&flux[2]}, dx,dt, + fr_as_fine->FineAdd(mfi, {flux.data(),flux.data()+1,flux.data()+2}, dx,dt, (*volfrac)[mfi], {&((*areafrac[0])[mfi]), &((*areafrac[1])[mfi]), diff --git a/Tests/EB_CNS/Source/CNS.cpp b/Tests/EB_CNS/Source/CNS.cpp index 5bb3c4e2532..40882a4edb0 100644 --- a/Tests/EB_CNS/Source/CNS.cpp +++ b/Tests/EB_CNS/Source/CNS.cpp @@ -293,7 +293,7 @@ CNS::printTotal () const void CNS::post_init (Real) { - if (level > 0) return; + if (level > 0) { return; } for (int k = parent->finestLevel()-1; k >= 0; --k) { getLevel(k).avgDown(); } @@ -462,7 +462,7 @@ CNS::avgDown () { BL_PROFILE("CNS::avgDown()"); - if (level == parent->finestLevel()) return; + if (level == parent->finestLevel()) { return; } auto& fine_lev = getLevel(level+1); diff --git a/Tests/GPU/CNS/Source/CNS.cpp b/Tests/GPU/CNS/Source/CNS.cpp index 1a073c68c8a..25823b9b76d 100644 --- a/Tests/GPU/CNS/Source/CNS.cpp +++ b/Tests/GPU/CNS/Source/CNS.cpp @@ -284,7 +284,7 @@ CNS::printTotal () const void CNS::post_init (Real) { - if (level > 0) return; + if (level > 0) { return; } for (int k = parent->finestLevel()-1; k >= 0; --k) { getLevel(k).avgDown(); } @@ -401,7 +401,7 @@ CNS::avgDown () { BL_PROFILE("CNS::avgDown()"); - if (level == parent->finestLevel()) return; + if (level == parent->finestLevel()) { return; } auto& fine_lev = getLevel(level+1); diff --git a/Tests/LinearSolvers/CellEB/MyTest.cpp b/Tests/LinearSolvers/CellEB/MyTest.cpp index 473c72ed758..f7e83e1b848 100644 --- a/Tests/LinearSolvers/CellEB/MyTest.cpp +++ b/Tests/LinearSolvers/CellEB/MyTest.cpp @@ -78,8 +78,11 @@ MyTest::solve () mlmg.setBottomTolerance(bottom_reltol); mlmg.setVerbose(verbose); mlmg.setBottomVerbose(bottom_verbose); - if (use_hypre) mlmg.setBottomSolver(MLMG::BottomSolver::hypre); - if (use_petsc) mlmg.setBottomSolver(MLMG::BottomSolver::petsc); + if (use_hypre) { + mlmg.setBottomSolver(MLMG::BottomSolver::hypre); + } else if (use_petsc) { + mlmg.setBottomSolver(MLMG::BottomSolver::petsc); + } const Real tol_rel = reltol; const Real tol_abs = 0.0; mlmg.solve(amrex::GetVecOfPtrs(phi), amrex::GetVecOfConstPtrs(rhs), tol_rel, tol_abs); diff --git a/Tests/LinearSolvers/NodalPoisson/main.cpp b/Tests/LinearSolvers/NodalPoisson/main.cpp index db78e65acd1..3e0d102a401 100644 --- a/Tests/LinearSolvers/NodalPoisson/main.cpp +++ b/Tests/LinearSolvers/NodalPoisson/main.cpp @@ -13,8 +13,9 @@ int main (int argc, char* argv[]) mytest.solve(); mytest.compute_norms(); } - if (mytest.getDoPlots()) + if (mytest.getDoPlots()) { mytest.writePlotfile(); + } } amrex::Finalize(); diff --git a/Tests/LinearSolvers/Nodal_Projection_EB/main.cpp b/Tests/LinearSolvers/Nodal_Projection_EB/main.cpp index b1aa6c001de..3a01aac3bbc 100644 --- a/Tests/LinearSolvers/Nodal_Projection_EB/main.cpp +++ b/Tests/LinearSolvers/Nodal_Projection_EB/main.cpp @@ -57,12 +57,14 @@ int main (int argc, char* argv[]) } #ifndef AMREX_USE_HYPRE - if (use_hypre == 1) - amrex::Abort("Cant use hypre if we dont build with USE_HYPRE=TRUE"); + if (use_hypre == 1) { + amrex::Abort("Cant use hypre if we dont build with USE_HYPRE=TRUE"); + } #endif - if (n_cell%8 != 0) - amrex::Abort("n_cell must be a multiple of 8"); + if (n_cell%8 != 0) { + amrex::Abort("n_cell must be a multiple of 8"); + } int n_cell_y = n_cell; int n_cell_x = 2*n_cell; diff --git a/Tests/MultiBlock/IndexType/main.cpp b/Tests/MultiBlock/IndexType/main.cpp index 84f939dd5e0..1a35bc1e5b4 100644 --- a/Tests/MultiBlock/IndexType/main.cpp +++ b/Tests/MultiBlock/IndexType/main.cpp @@ -53,7 +53,7 @@ bool ParallelCopyWithItselfIsCorrect(amrex::iMultiFab& mf, const amrex::Box& dom int fails = 0; for (amrex::MFIter mfi(mf); mfi.isValid(); ++mfi) { const amrex::Box section = dest_box & mfi.tilebox(); - if (section.isEmpty()) continue; + if (section.isEmpty()) { continue; } auto array = mf.const_array(mfi); amrex::LoopOnCpu(section, [&](int i, int j, int k) { @@ -115,7 +115,7 @@ bool ParallelCopyFaceToFace(amrex::iMultiFab& dest, const amrex::Box& domain_des const int ny = domain_src.length(1); for (amrex::MFIter mfi(dest); mfi.isValid(); ++mfi) { const amrex::Box section = dest_box & mfi.tilebox(); - if (section.isEmpty()) continue; + if (section.isEmpty()) { continue; } auto darray = dest.const_array(mfi); amrex::LoopOnCpu(section, [&](int i, int j, int k) { diff --git a/Tests/Parser2/fn.cpp b/Tests/Parser2/fn.cpp index 18e2b00d694..46ce898b320 100644 --- a/Tests/Parser2/fn.cpp +++ b/Tests/Parser2/fn.cpp @@ -1,8 +1,8 @@ #include #include +// This is intentional. Cannot have std:: in amrex::Parser expressions. using std::sin; -using std::cos; using std::atan2; using std::pow; diff --git a/Tests/Particles/AssignDensity/main.cpp b/Tests/Particles/AssignDensity/main.cpp index f0c1b1a3b19..fb4a0fb970b 100644 --- a/Tests/Particles/AssignDensity/main.cpp +++ b/Tests/Particles/AssignDensity/main.cpp @@ -56,8 +56,9 @@ void test_assign_density(TestParams& parms) myPC.SetVerbose(false); int num_particles = parms.nppc * parms.nx * parms.ny * parms.nz; - if (ParallelDescriptor::IOProcessor()) + if (ParallelDescriptor::IOProcessor()) { std::cout << "Total number of particles : " << num_particles << '\n' << '\n'; + } bool serialize = true; int iseed = 451; @@ -93,8 +94,9 @@ int main(int argc, char* argv[]) pp.get("nz", parms.nz); pp.get("max_grid_size", parms.max_grid_size); pp.get("nppc", parms.nppc); - if (parms.nppc < 1 && ParallelDescriptor::IOProcessor()) + if (parms.nppc < 1 && ParallelDescriptor::IOProcessor()) { amrex::Abort("Must specify at least one particle per cell"); + } parms.verbose = false; pp.query("verbose", parms.verbose); diff --git a/Tests/Particles/AssignMultiLevelDensity/main.cpp b/Tests/Particles/AssignMultiLevelDensity/main.cpp index f2f11d035ae..0ffa37dd695 100644 --- a/Tests/Particles/AssignMultiLevelDensity/main.cpp +++ b/Tests/Particles/AssignMultiLevelDensity/main.cpp @@ -42,8 +42,9 @@ void test_assign_density(TestParams& parms) // Define the refinement ratio Vector rr(nlevs-1); - for (int lev = 1; lev < nlevs; lev++) + for (int lev = 1; lev < nlevs; lev++) { rr[lev-1] = 2; + } // This sets the boundary conditions to be doubly or triply periodic int is_per[] = {AMREX_D_DECL(1,1,1)}; @@ -147,8 +148,9 @@ int main(int argc, char* argv[]) pp.get("max_grid_size", parms.max_grid_size); pp.get("nlevs", parms.nlevs); pp.get("nppc", parms.nppc); - if (parms.nppc < 1 && ParallelDescriptor::IOProcessor()) + if (parms.nppc < 1 && ParallelDescriptor::IOProcessor()) { amrex::Abort("Must specify at least one particle per cell"); + } parms.verbose = false; pp.query("verbose", parms.verbose); diff --git a/Tests/Particles/AsyncIO/main.cpp b/Tests/Particles/AsyncIO/main.cpp index 1dd88e9671e..1ea036113d8 100644 --- a/Tests/Particles/AsyncIO/main.cpp +++ b/Tests/Particles/AsyncIO/main.cpp @@ -101,18 +101,35 @@ class MyParticleContainer p.pos(2) = static_cast (plo[2] + (iv[2] + r[2])*dx[2]); #endif - for (int i = 0; i < NSR; ++i) p.rdata(i) = ParticleReal(p.id()); - for (int i = 0; i < NSI; ++i) p.idata(i) = p.id(); + if constexpr (NSR > 0) { + for (int i = 0; i < NSR; ++i) { + p.rdata(i) = ParticleReal(p.id()); + } + } + if constexpr (NSI > 0) { + for (int i = 0; i < NSI; ++i) { + p.idata(i) = p.id(); + } + } host_particles.push_back(p); - for (int i = 0; i < NAR; ++i) - host_real[i].push_back(ParticleReal(p.id())); - for (int i = 0; i < NAI; ++i) - host_int[i].push_back(int(p.id())); - for (int i = 0; i < NumRuntimeRealComps(); ++i) + + if constexpr (NAR > 0) { + for (int i = 0; i < NAR; ++i) { + host_real[i].push_back(ParticleReal(p.id())); + } + } + if constexpr (NAI > 0) { + for (int i = 0; i < NAI; ++i) { + host_int[i].push_back(int(p.id())); + } + } + for (int i = 0; i < NumRuntimeRealComps(); ++i) { host_runtime_real[i].push_back(ParticleReal(p.id())); - for (int i = 0; i < NumRuntimeIntComps(); ++i) + } + for (int i = 0; i < NumRuntimeIntComps(); ++i) { host_runtime_int[i].push_back(int(p.id())); + } } } @@ -188,8 +205,9 @@ void test_async_io(TestParams& parms) // Define the refinement ratio Vector rr(nlevs-1); - for (int lev = 1; lev < nlevs; lev++) + for (int lev = 1; lev < nlevs; lev++) { rr[lev-1] = IntVect(AMREX_D_DECL(2, 2, 2)); + } // This sets the boundary conditions to be doubly or triply periodic int is_per[] = {AMREX_D_DECL(1,1,1)}; @@ -290,8 +308,9 @@ int main(int argc, char* argv[]) pp.get("max_grid_size", parms.max_grid_size); pp.get("nlevs", parms.nlevs); pp.get("nppc", parms.nppc); - if (parms.nppc < 1 && ParallelDescriptor::IOProcessor()) + if (parms.nppc < 1 && ParallelDescriptor::IOProcessor()) { amrex::Abort("Must specify at least one particle per cell"); + } parms.verbose = false; pp.query("verbose", parms.verbose); diff --git a/Tests/Particles/CheckpointRestart/main.cpp b/Tests/Particles/CheckpointRestart/main.cpp index 1c743280cc9..9d15324d88c 100644 --- a/Tests/Particles/CheckpointRestart/main.cpp +++ b/Tests/Particles/CheckpointRestart/main.cpp @@ -122,12 +122,14 @@ void test () amrex::Print() << " done \n"; Vector particle_realnames; - for (int i = 0; i < NStructReal + NArrayReal; ++i) + for (int i = 0; i < NStructReal + NArrayReal; ++i) { particle_realnames.push_back("particle_real_component_" + std::to_string(i)); + } Vector particle_intnames; - for (int i = 0; i < NStructInt + NArrayInt; ++i) + for (int i = 0; i < NStructInt + NArrayInt; ++i) { particle_intnames.push_back("particle_int_component_" + std::to_string(i)); + } for (int ts = 0; ts < nparticlefile; ts++) { std::snprintf(fname, sizeof fname, "%splt%05d", directory.c_str(), ts); @@ -194,8 +196,9 @@ void set_grids_nested (Vector& domains, domains[0].setBig(domain_hi); ref_ratio.resize(nlevs-1); - for (int lev = 1; lev < nlevs; lev++) + for (int lev = 1; lev < nlevs; lev++) { ref_ratio[lev-1] = IntVect(AMREX_D_DECL(2, 2, 2)); + } grids.resize(nlevs); grids[0].define(domains[0]); diff --git a/Tests/Particles/CheckpointRestartSOA/main.cpp b/Tests/Particles/CheckpointRestartSOA/main.cpp index 7fa6475b4cc..e8629560196 100644 --- a/Tests/Particles/CheckpointRestartSOA/main.cpp +++ b/Tests/Particles/CheckpointRestartSOA/main.cpp @@ -120,12 +120,14 @@ void test () amrex::Print() << " done \n"; Vector particle_realnames; - for (int i = 0; i < NReal; ++i) + for (int i = 0; i < NReal; ++i) { particle_realnames.push_back("particle_real_component_" + std::to_string(i)); + } Vector particle_intnames; - for (int i = 0; i < NInt; ++i) + for (int i = 0; i < NInt; ++i) { particle_intnames.push_back("particle_int_component_" + std::to_string(i)); + } for (int ts = 0; ts < nparticlefile; ts++) { std::snprintf(fname, sizeof fname, "%splt%05d", directory.c_str(), ts); @@ -217,8 +219,9 @@ void set_grids_nested (Vector& domains, domains[0].setBig(domain_hi); ref_ratio.resize(nlevs-1); - for (int lev = 1; lev < nlevs; lev++) + for (int lev = 1; lev < nlevs; lev++) { ref_ratio[lev-1] = IntVect(AMREX_D_DECL(2, 2, 2)); + } grids.resize(nlevs); grids[0].define(domains[0]); diff --git a/Tests/Particles/DenseBins/main.cpp b/Tests/Particles/DenseBins/main.cpp index dd3019d5951..cc40e2fcb77 100644 --- a/Tests/Particles/DenseBins/main.cpp +++ b/Tests/Particles/DenseBins/main.cpp @@ -47,7 +47,7 @@ void checkAnswer (const amrex::DenseBins& bins) for (int i = 0; i < bins.numBins(); ++i) { auto start = offsets[i ]; auto stop = offsets[i+1]; - if (start == stop) continue; + if (start == stop) { continue; } for (auto j = start+1; j < stop; ++j) { AMREX_ALWAYS_ASSERT(bins_ptr[perm[start]] == bins_ptr[perm[j]]); diff --git a/Tests/Particles/GhostsAndVirtuals/main.cpp b/Tests/Particles/GhostsAndVirtuals/main.cpp index 6ed294b11dc..02384eb43ea 100644 --- a/Tests/Particles/GhostsAndVirtuals/main.cpp +++ b/Tests/Particles/GhostsAndVirtuals/main.cpp @@ -43,13 +43,15 @@ void test_ghosts_and_virtuals (TestParams& parms) // Define the refinement ratio Vector rr(nlevs); - for (int lev = 1; lev < nlevs; lev++) + for (int lev = 1; lev < nlevs; lev++) { rr.at(lev-1) = 2; + } // This sets the boundary conditions to be doubly or triply periodic std::array is_per; - for (int i = 0; i < BL_SPACEDIM; i++) + for (int i = 0; i < BL_SPACEDIM; i++) { is_per.at(i) = 1; + } // This defines a Geometry object which is useful for writing the plotfiles Vector geom(nlevs); @@ -135,13 +137,15 @@ void test_ghosts_and_virtuals_ascii (TestParams& parms) // Define the refinement ratio Vector rr(nlevs); rr[0] = 2; - for (int lev = 1; lev < nlevs; lev++) + for (int lev = 1; lev < nlevs; lev++) { rr.at(lev) = 2; + } // This sets the boundary conditions to be doubly or triply periodic std::array is_per; - for (int i = 0; i < BL_SPACEDIM; i++) + for (int i = 0; i < BL_SPACEDIM; i++) { is_per[i] = 1; + } // This defines a Geometry object which is useful for writing the plotfiles Vector geom(nlevs); @@ -162,8 +166,9 @@ void test_ghosts_and_virtuals_ascii (TestParams& parms) #define STRIP while( is.get() != '\n' ) {} std::ifstream is(regrid_grids_file.c_str(),std::ios::in); - if (!is.good()) + if (!is.good()) { amrex::FileOpenFailed(regrid_grids_file); + } int in_finest,ngrid; @@ -373,13 +378,15 @@ void test_ghosts_and_virtuals_randomperbox (TestParams& parms) // Define the refinement ratio Vector rr(nlevs); - for (int lev = 1; lev < nlevs; lev++) + for (int lev = 1; lev < nlevs; lev++) { rr.at(lev-1) = 2; + } // This sets the boundary conditions to be doubly or triply periodic std::array is_per; - for (int i = 0; i < BL_SPACEDIM; i++) + for (int i = 0; i < BL_SPACEDIM; i++) { is_per.at(i) = 1; + } // This defines a Geometry object which is useful for writing the plotfiles Vector geom(nlevs); @@ -566,13 +573,15 @@ void test_ghosts_and_virtuals_onepercell (TestParams& parms) // Define the refinement ratio Vector rr(nlevs); - for (int lev = 1; lev < nlevs; lev++) + for (int lev = 1; lev < nlevs; lev++) { rr.at(lev-1) = 2; + } // This sets the boundary conditions to be doubly or triply periodic std::array is_per; - for (int i = 0; i < BL_SPACEDIM; i++) + for (int i = 0; i < BL_SPACEDIM; i++) { is_per.at(i) = 1; + } // This defines a Geometry object which is useful for writing the plotfiles Vector geom(nlevs); @@ -740,8 +749,9 @@ int main(int argc, char* argv[]) pp.get("max_grid_size", parms.max_grid_size); pp.get("nlevs", parms.nlevs); pp.get("nppc", parms.nppc); - if (parms.nppc < 1 && ParallelDescriptor::IOProcessor()) + if (parms.nppc < 1 && ParallelDescriptor::IOProcessor()) { amrex::Abort("Must specify at least one particle per cell"); + } parms.verbose = false; pp.query("verbose", parms.verbose); diff --git a/Tests/Particles/InitRandom/main.cpp b/Tests/Particles/InitRandom/main.cpp index 14eb0c715c1..43a7b838710 100644 --- a/Tests/Particles/InitRandom/main.cpp +++ b/Tests/Particles/InitRandom/main.cpp @@ -159,8 +159,9 @@ void set_grids_nested (Vector& domains, domains[0].setBig(domain_hi); ref_ratio.resize(nlevs-1); - for (int lev = 1; lev < nlevs; lev++) + for (int lev = 1; lev < nlevs; lev++) { ref_ratio[lev-1] = IntVect(AMREX_D_DECL(2, 2, 2)); + } grids.resize(nlevs); grids[0].define(domains[0]); diff --git a/Tests/Particles/Intersection/main.cpp b/Tests/Particles/Intersection/main.cpp index 5a8e87711f2..48ccb3c0bcd 100644 --- a/Tests/Particles/Intersection/main.cpp +++ b/Tests/Particles/Intersection/main.cpp @@ -32,8 +32,9 @@ void testIntersection() params.is_periodic)}; Vector rr(params.nlevs-1); - for (int lev = 1; lev < params.nlevs; lev++) + for (int lev = 1; lev < params.nlevs; lev++) { rr[lev-1] = IntVect(AMREX_D_DECL(2,2,2)); + } RealBox real_box; for (int n = 0; n < AMREX_SPACEDIM; n++) @@ -77,7 +78,7 @@ void testIntersection() const Box& box = ba[lev][i]; Gpu::HostVector host_cells; - for (IntVect iv = box.smallEnd(); iv <= box.bigEnd(); box.next(iv)) host_cells.push_back(iv); + for (IntVect iv = box.smallEnd(); iv <= box.bigEnd(); box.next(iv)) { host_cells.push_back(iv); } //host_cells.push_back(box.smallEnd()); auto const num_cells = int(host_cells.size()); diff --git a/Tests/Particles/NeighborParticles/MDParticleContainer.cpp b/Tests/Particles/NeighborParticles/MDParticleContainer.cpp index bcbf6760b0e..c4d4d800847 100644 --- a/Tests/Particles/NeighborParticles/MDParticleContainer.cpp +++ b/Tests/Particles/NeighborParticles/MDParticleContainer.cpp @@ -100,10 +100,12 @@ InitParticles(const IntVect& a_num_particles_per_cell, p.idata(0) = mfi.index(); host_particles.push_back(p); - for (int i = 0; i < NumRealComps(); ++i) + for (int i = 0; i < NumRealComps(); ++i) { host_real[i].push_back(ParticleReal(mfi.index())); - for (int i = 0; i < NumIntComps(); ++i) + } + for (int i = 0; i < NumIntComps(); ++i) { host_int[i].push_back(mfi.index()); + } } } @@ -212,7 +214,7 @@ void MDParticleContainer::moveParticles(amrex::ParticleReal dx) auto& ptile = plev[std::make_pair(gid, tid)]; auto& aos = ptile.GetArrayOfStructs(); - ParticleType* pstruct = &(aos[0]); + ParticleType* pstruct = aos.data(); const size_t np = aos.numParticles(); @@ -251,7 +253,7 @@ void MDParticleContainer::checkNeighborParticles() { int gid = mfi.index(); - if (gid != 0) continue; + if (gid != 0) { continue; } int tid = mfi.LocalTileIndex(); auto index = std::make_pair(gid, tid); @@ -320,7 +322,7 @@ void MDParticleContainer::checkNeighborList() for (int j = 0; j < np_total; j++) { // Don't be your own neighbor. - if ( i == j ) continue; + if ( i == j ) { continue; } ParticleType& p2 = h_pstruct[j]; AMREX_D_TERM(Real dx = p1.pos(0) - p2.pos(0);, diff --git a/Tests/Particles/NeighborParticles/main.cpp b/Tests/Particles/NeighborParticles/main.cpp index 7853c568ce8..92e2a0268ef 100644 --- a/Tests/Particles/NeighborParticles/main.cpp +++ b/Tests/Particles/NeighborParticles/main.cpp @@ -85,41 +85,49 @@ void testNeighborParticles () IntVect nppc(params.num_ppc); - if (ParallelDescriptor::MyProc() == dm[0]) + if (ParallelDescriptor::MyProc() == dm[0]) { amrex::PrintToFile("neighbor_test") << "About to initialize particles \n"; + } pc.InitParticles(nppc, 1.0, 0.0); - if (ParallelDescriptor::MyProc() == dm[0]) + if (ParallelDescriptor::MyProc() == dm[0]) { amrex::PrintToFile("neighbor_test") << "Check neighbors after init ... \n"; + } pc.checkNeighborParticles(); pc.fillNeighbors(); - if (ParallelDescriptor::MyProc() == dm[0]) + if (ParallelDescriptor::MyProc() == dm[0]) { amrex::PrintToFile("neighbor_test") << "Check neighbors after fill ... \n"; + } pc.checkNeighborParticles(); pc.updateNeighbors(); - if (ParallelDescriptor::MyProc() == dm[0]) + if (ParallelDescriptor::MyProc() == dm[0]) { amrex::PrintToFile("neighbor_test") << "Check neighbors after update ... \n"; + } pc.checkNeighborParticles(); - if (ParallelDescriptor::MyProc() == dm[0]) + if (ParallelDescriptor::MyProc() == dm[0]) { amrex::PrintToFile("neighbor_test") << "Now resetting the particle test_id values \n"; + } pc.reset_test_id(); - if (ParallelDescriptor::MyProc() == dm[0]) + if (ParallelDescriptor::MyProc() == dm[0]) { amrex::PrintToFile("neighbor_test") << "Check neighbors after reset ... \n"; + } pc.checkNeighborParticles(); - if (ParallelDescriptor::MyProc() == dm[0]) + if (ParallelDescriptor::MyProc() == dm[0]) { amrex::PrintToFile("neighbor_test") << "Now updateNeighbors again ... \n"; + } pc.updateNeighbors(); - if (ParallelDescriptor::MyProc() == dm[0]) + if (ParallelDescriptor::MyProc() == dm[0]) { amrex::PrintToFile("neighbor_test") << "Check neighbors after update ... \n"; + } pc.checkNeighborParticles(); ParallelDescriptor::Barrier(); diff --git a/Tests/Particles/ParallelContext/main.cpp b/Tests/Particles/ParallelContext/main.cpp index 5deb22245ca..3208b1f3597 100644 --- a/Tests/Particles/ParallelContext/main.cpp +++ b/Tests/Particles/ParallelContext/main.cpp @@ -106,18 +106,22 @@ class TestParticleContainer p.pos(2) = static_cast (plo[2] + (iv[2] + r[2])*dx[2]); #endif - for (int i = 0; i < NSR; ++i) p.rdata(i) = ParticleReal(p.id()); - for (int i = 0; i < NSI; ++i) p.idata(i) = int(p.id()); + for (int i = 0; i < NSR; ++i) { p.rdata(i) = ParticleReal(p.id()); } + for (int i = 0; i < NSI; ++i) { p.idata(i) = int(p.id()); } host_particles.push_back(p); - for (int i = 0; i < NAR; ++i) + for (int i = 0; i < NAR; ++i) { host_real[i].push_back(ParticleReal(p.id())); - for (int i = 0; i < NAI; ++i) + } + for (int i = 0; i < NAI; ++i) { host_int[i].push_back(int(p.id())); - for (int i = 0; i < NumRuntimeRealComps(); ++i) + } + for (int i = 0; i < NumRuntimeRealComps(); ++i) { host_runtime_real[i].push_back(ParticleReal(p.id())); - for (int i = 0; i < NumRuntimeIntComps(); ++i) + } + for (int i = 0; i < NumRuntimeIntComps(); ++i) { host_runtime_int[i].push_back(int(p.id())); + } } } @@ -184,7 +188,7 @@ class TestParticleContainer int tid = mfi.LocalTileIndex(); auto& ptile = plev[std::make_pair(gid, tid)]; auto& aos = ptile.GetArrayOfStructs(); - ParticleType* pstruct = &(aos[0]); + ParticleType* pstruct = aos.data(); const size_t np = aos.numParticles(); if (do_random == 0) @@ -328,7 +332,7 @@ void testParallelContext () int myproc = ParallelContext::MyProcSub(); int task_me = myproc / (amrex::max(rank_n, 2) / 2); - if (task_me > 1) task_me = 1; + if (task_me > 1) { task_me = 1; } #ifdef BL_USE_MPI MPI_Comm new_comm; @@ -394,11 +398,13 @@ void testParallelContext () { pc.moveParticles(params.move_dir, params.do_random); pc.RedistributeLocal(); - if (params.sort) pc.SortParticlesByCell(); + if (params.sort) { pc.SortParticlesByCell(); } pc.checkAnswer(); } - if (geom.isAllPeriodic()) AMREX_ALWAYS_ASSERT(np_old == pc.TotalNumberOfParticles()); + if (geom.isAllPeriodic()) { + AMREX_ALWAYS_ASSERT(np_old == pc.TotalNumberOfParticles()); + } } if (task_me == 1) @@ -417,11 +423,13 @@ void testParallelContext () { pc.moveParticles(params.move_dir, params.do_random); pc.RedistributeLocal(); - if (params.sort) pc.SortParticlesByCell(); + if (params.sort) { pc.SortParticlesByCell(); } pc.checkAnswer(); } - if (geom.isAllPeriodic()) AMREX_ALWAYS_ASSERT(np_old == pc.TotalNumberOfParticles()); + if (geom.isAllPeriodic()) { + AMREX_ALWAYS_ASSERT(np_old == pc.TotalNumberOfParticles()); + } } } diff --git a/Tests/Particles/ParticleIterator/main.cpp b/Tests/Particles/ParticleIterator/main.cpp index a195a905008..aa54681d426 100644 --- a/Tests/Particles/ParticleIterator/main.cpp +++ b/Tests/Particles/ParticleIterator/main.cpp @@ -31,8 +31,9 @@ int main(int argc, char* argv[]) const Box domain(domain_lo, domain_hi); Vector rr(nlevs-1); - for (int lev = 1; lev < nlevs; lev++) + for (int lev = 1; lev < nlevs; lev++) { rr[lev-1] = 2; + } int is_per[] = {AMREX_D_DECL(1,1,1)}; @@ -42,12 +43,14 @@ int main(int argc, char* argv[]) Vector ba(nlevs); ba[0].define(domain); - for (int lev = 0; lev < nlevs; lev++) + for (int lev = 0; lev < nlevs; lev++) { ba[lev].maxSize(max_grid_size); + } Vector dmap(nlevs); - for (int lev = 0; lev < nlevs; lev++) + for (int lev = 0; lev < nlevs; lev++) { dmap[lev].define(ba[lev]); + } using MyParticleContainer = ParticleContainer<1+BL_SPACEDIM>; MyParticleContainer MyPC(geom, dmap, ba, rr); diff --git a/Tests/Particles/ParticleMesh/main.cpp b/Tests/Particles/ParticleMesh/main.cpp index cc116593b86..df398a52756 100644 --- a/Tests/Particles/ParticleMesh/main.cpp +++ b/Tests/Particles/ParticleMesh/main.cpp @@ -55,8 +55,9 @@ void testParticleMesh (TestParams& parms) myPC.SetVerbose(false); int num_particles = parms.nppc * parms.nx * parms.ny * parms.nz; - if (ParallelDescriptor::IOProcessor()) + if (ParallelDescriptor::IOProcessor()) { std::cout << "Total number of particles : " << num_particles << '\n' << '\n'; + } bool serialize = true; int iseed = 451; @@ -165,8 +166,9 @@ int main(int argc, char* argv[]) pp.get("nz", parms.nz); pp.get("max_grid_size", parms.max_grid_size); pp.get("nppc", parms.nppc); - if (parms.nppc < 1 && ParallelDescriptor::IOProcessor()) + if (parms.nppc < 1 && ParallelDescriptor::IOProcessor()) { amrex::Abort("Must specify at least one particle per cell"); + } parms.verbose = false; pp.query("verbose", parms.verbose); diff --git a/Tests/Particles/ParticleMeshMultiLevel/main.cpp b/Tests/Particles/ParticleMeshMultiLevel/main.cpp index 53b11252cae..1cd6f45e1c5 100644 --- a/Tests/Particles/ParticleMeshMultiLevel/main.cpp +++ b/Tests/Particles/ParticleMeshMultiLevel/main.cpp @@ -25,8 +25,9 @@ struct TestParams { void testParticleMesh (TestParams& parms) { Vector rr(parms.nlevs-1); - for (int lev = 1; lev < parms.nlevs; lev++) + for (int lev = 1; lev < parms.nlevs; lev++) { rr[lev-1] = IntVect(AMREX_D_DECL(2,2,2)); + } RealBox real_box; for (int n = 0; n < BL_SPACEDIM; n++) { @@ -161,8 +162,9 @@ int main(int argc, char* argv[]) pp.get("max_grid_size", parms.max_grid_size); pp.get("nppc", parms.nppc); pp.get("nlevs", parms.nlevs); - if (parms.nppc < 1 && ParallelDescriptor::IOProcessor()) + if (parms.nppc < 1 && ParallelDescriptor::IOProcessor()) { amrex::Abort("Must specify at least one particle per cell"); + } parms.verbose = false; pp.query("verbose", parms.verbose); diff --git a/Tests/Particles/ParticleReduce/main.cpp b/Tests/Particles/ParticleReduce/main.cpp index 5e461084273..e7b912902c6 100644 --- a/Tests/Particles/ParticleReduce/main.cpp +++ b/Tests/Particles/ParticleReduce/main.cpp @@ -82,14 +82,16 @@ class TestParticleContainer p.pos(1) = y;, p.pos(2) = z;) - for (int i = 0; i < NSR; ++i) p.rdata(i) = ParticleReal(i); - for (int i = 0; i < NSI; ++i) p.idata(i) = i; + for (int i = 0; i < NSR; ++i) { p.rdata(i) = ParticleReal(i); } + for (int i = 0; i < NSI; ++i) { p.idata(i) = i; } host_particles.push_back(p); - for (int i = 0; i < NAR; ++i) + for (int i = 0; i < NAR; ++i) { host_real[i].push_back(ParticleReal(i)); - for (int i = 0; i < NAI; ++i) + } + for (int i = 0; i < NAI; ++i) { host_int[i].push_back(i); + } } } @@ -176,8 +178,9 @@ void testReduce () IntVect nppc(params.num_ppc); - if (ParallelDescriptor::MyProc() == dm[0]) + if (ParallelDescriptor::MyProc() == dm[0]) { amrex::Print() << "About to initialize particles \n"; + } pc.InitParticles(nppc); diff --git a/Tests/Particles/ParticleTransformations/main.cpp b/Tests/Particles/ParticleTransformations/main.cpp index a55111f4b78..f88b68444b8 100644 --- a/Tests/Particles/ParticleTransformations/main.cpp +++ b/Tests/Particles/ParticleTransformations/main.cpp @@ -83,14 +83,25 @@ class TestParticleContainer p.pos(1) = y;, p.pos(2) = z;) - for (int i = 0; i < NSR; ++i) p.rdata(i) = ParticleReal(i); - for (int i = 0; i < NSI; ++i) p.idata(i) = i; + if constexpr (NSR > 0) { + for (int i = 0; i < NSR; ++i) { p.rdata(i) = ParticleReal(i); } + } + if constexpr (NSI > 0) { + for (int i = 0; i < NSI; ++i) { p.idata(i) = i; } + } host_particles.push_back(p); - for (int i = 0; i < NAR; ++i) - host_real[i].push_back(ParticleReal(i)); - for (int i = 0; i < NAI; ++i) - host_int[i].push_back(i); + + if constexpr (NAR > 0) { + for (int i = 0; i < NAR; ++i) { + host_real[i].push_back(ParticleReal(i)); + } + } + if constexpr (NAI > 0) { + for (int i = 0; i < NAI; ++i) { + host_int[i].push_back(i); + } + } } } @@ -139,14 +150,18 @@ struct Transformer int src_i, int dst_i) const noexcept { dst.m_aos[dst_i] = src.m_aos[src_i]; - for (int j = 0; j < DstData::NAR; ++j) + for (int j = 0; j < DstData::NAR; ++j) { dst.m_rdata[j][dst_i] = src.m_rdata[j][src_i]; - for (int j = 0; j < dst.m_num_runtime_real; ++j) + } + for (int j = 0; j < dst.m_num_runtime_real; ++j) { dst.m_runtime_rdata[j][dst_i] = src.m_runtime_rdata[j][src_i]; - for (int j = 0; j < DstData::NAI; ++j) + } + for (int j = 0; j < DstData::NAI; ++j) { dst.m_idata[j][dst_i] = m_factor*src.m_idata[j][src_i]; - for (int j = 0; j < dst.m_num_runtime_int; ++j) + } + for (int j = 0; j < dst.m_num_runtime_int; ++j) { dst.m_runtime_idata[j][dst_i] = src.m_runtime_idata[j][src_i]; + } } }; @@ -170,24 +185,32 @@ struct TwoWayTransformer int src_i, int dst1_i, int dst2_i) const noexcept { dst1.m_aos[dst1_i] = src.m_aos[src_i]; - for (int j = 0; j < DstData::NAR; ++j) + for (int j = 0; j < DstData::NAR; ++j) { dst1.m_rdata[j][dst1_i] = src.m_rdata[j][src_i]; - for (int j = 0; j < dst1.m_num_runtime_real; ++j) + } + for (int j = 0; j < dst1.m_num_runtime_real; ++j) { dst1.m_runtime_rdata[j][dst1_i] = src.m_runtime_rdata[j][src_i]; - for (int j = 0; j < DstData::NAI; ++j) + } + for (int j = 0; j < DstData::NAI; ++j) { dst1.m_idata[j][dst1_i] = m_factor1*src.m_idata[j][src_i]; - for (int j = 0; j < dst1.m_num_runtime_int; ++j) + } + for (int j = 0; j < dst1.m_num_runtime_int; ++j) { dst1.m_runtime_idata[j][dst1_i] = src.m_runtime_idata[j][src_i]; + } dst2.m_aos[dst2_i] = src.m_aos[src_i]; - for (int j = 0; j < DstData::NAR; ++j) + for (int j = 0; j < DstData::NAR; ++j) { dst2.m_rdata[j][dst2_i] = src.m_rdata[j][src_i]; - for (int j = 0; j < dst2.m_num_runtime_real; ++j) + } + for (int j = 0; j < dst2.m_num_runtime_real; ++j) { dst2.m_runtime_rdata[j][dst2_i] = src.m_runtime_rdata[j][src_i]; - for (int j = 0; j < DstData::NAI; ++j) + } + for (int j = 0; j < DstData::NAI; ++j) { dst2.m_idata[j][dst2_i] = m_factor2*src.m_idata[j][src_i]; - for (int j = 0; j < dst2.m_num_runtime_int; ++j) + } + for (int j = 0; j < dst2.m_num_runtime_int; ++j) { dst2.m_runtime_idata[j][dst2_i] = src.m_runtime_idata[j][src_i]; + } } }; @@ -545,8 +568,9 @@ void testTransformations () IntVect nppc(params.num_ppc); - if (ParallelDescriptor::MyProc() == dm[0]) + if (ParallelDescriptor::MyProc() == dm[0]) { amrex::Print() << "About to initialize particles \n"; + } pc.InitParticles(nppc); diff --git a/Tests/Particles/Redistribute/main.cpp b/Tests/Particles/Redistribute/main.cpp index 83ed2cd390c..139c63dd0e1 100644 --- a/Tests/Particles/Redistribute/main.cpp +++ b/Tests/Particles/Redistribute/main.cpp @@ -118,18 +118,22 @@ class TestParticleContainer p.pos(2) = static_cast (plo[2] + (iv[2] + r[2])*dx[2]); #endif - for (int i = 0; i < NSR; ++i) p.rdata(i) = ParticleReal(p.id()); - for (int i = 0; i < NSI; ++i) p.idata(i) = int(p.id()); + for (int i = 0; i < NSR; ++i) { p.rdata(i) = ParticleReal(p.id()); } + for (int i = 0; i < NSI; ++i) { p.idata(i) = int(p.id()); } host_particles.push_back(p); - for (int i = 0; i < NAR; ++i) + for (int i = 0; i < NAR; ++i) { host_real[i].push_back(ParticleReal(p.id())); - for (int i = 0; i < NAI; ++i) + } + for (int i = 0; i < NAI; ++i) { host_int[i].push_back(int(p.id())); - for (int i = 0; i < NumRuntimeRealComps(); ++i) + } + for (int i = 0; i < NumRuntimeRealComps(); ++i) { host_runtime_real[i].push_back(ParticleReal(p.id())); - for (int i = 0; i < NumRuntimeIntComps(); ++i) + } + for (int i = 0; i < NumRuntimeIntComps(); ++i) { host_runtime_int[i].push_back(int(p.id())); + } } } @@ -196,7 +200,7 @@ class TestParticleContainer int tid = mfi.LocalTileIndex(); auto& ptile = plev[std::make_pair(gid, tid)]; auto& aos = ptile.GetArrayOfStructs(); - ParticleType* pstruct = &(aos[0]); + ParticleType* pstruct = aos.data(); const size_t np = aos.numParticles(); if (do_random == 0) @@ -246,7 +250,7 @@ class TestParticleContainer int tid = mfi.LocalTileIndex(); auto& ptile = plev[std::make_pair(gid, tid)]; auto& aos = ptile.GetArrayOfStructs(); - ParticleType* pstruct = &(aos[0]); + ParticleType* pstruct = aos.data(); const size_t np = aos.numParticles(); amrex::ParallelFor( np, [=] AMREX_GPU_DEVICE (int i) noexcept { @@ -370,8 +374,9 @@ void testRedistribute () params.is_periodic)}; Vector rr(params.nlevs-1); - for (int lev = 1; lev < params.nlevs; lev++) + for (int lev = 1; lev < params.nlevs; lev++) { rr[lev-1] = IntVect(AMREX_D_DECL(2,2,2)); + } RealBox real_box; for (int n = 0; n < BL_SPACEDIM; n++) @@ -416,7 +421,7 @@ void testRedistribute () auto np_old = pc.TotalNumberOfParticles(); - if (params.sort) pc.SortParticlesByCell(); + if (params.sort) { pc.SortParticlesByCell(); } for (int i = 0; i < params.nsteps; ++i) { @@ -429,7 +434,7 @@ void testRedistribute () pc.negateEven(); } pc.RedistributeLocal(); - if (params.sort) pc.SortParticlesByCell(); + if (params.sort) { pc.SortParticlesByCell(); } pc.checkAnswer(); } @@ -441,7 +446,7 @@ void testRedistribute () { DistributionMapping new_dm; Vector pmap; - for (int i = 0; i < ba[lev].size(); ++i) pmap.push_back(i % NProcs); + for (int i = 0; i < ba[lev].size(); ++i) { pmap.push_back(i % NProcs); } new_dm.define(pmap); pc.SetParticleDistributionMap(lev, new_dm); } @@ -461,7 +466,7 @@ void testRedistribute () { DistributionMapping new_dm; Vector pmap; - for (int i = 0; i < ba[lev].size(); ++i) pmap.push_back((i+1) % NProcs); + for (int i = 0; i < ba[lev].size(); ++i) { pmap.push_back((i+1) % NProcs); } new_dm.define(pmap); pc.SetParticleDistributionMap(lev, new_dm); } @@ -491,7 +496,9 @@ void testRedistribute () } } - if (geom[0].isAllPeriodic()) AMREX_ALWAYS_ASSERT(np_old == pc.TotalNumberOfParticles()); + if (geom[0].isAllPeriodic()) { + AMREX_ALWAYS_ASSERT(np_old == pc.TotalNumberOfParticles()); + } // the way this test is set up, if we make it here we pass amrex::Print() << "pass \n"; diff --git a/Tests/Particles/RedistributeSOA/main.cpp b/Tests/Particles/RedistributeSOA/main.cpp index 0bf11f6f11c..eba02904fb7 100644 --- a/Tests/Particles/RedistributeSOA/main.cpp +++ b/Tests/Particles/RedistributeSOA/main.cpp @@ -116,14 +116,18 @@ class TestParticleContainer host_real[2].push_back(static_cast (plo[2] + (iv[2] + r[2])*dx[2])); #endif - for (int i = AMREX_SPACEDIM; i < NR; ++i) + for (int i = AMREX_SPACEDIM; i < NR; ++i) { host_real[i].push_back(static_cast(id)); - for (int i = 2; i < NI; ++i) + } + for (int i = 2; i < NI; ++i) { host_int[i].push_back(static_cast(id)); - for (int i = 0; i < NumRuntimeRealComps(); ++i) + } + for (int i = 0; i < NumRuntimeRealComps(); ++i) { host_runtime_real[i].push_back(static_cast(id)); - for (int i = 0; i < NumRuntimeIntComps(); ++i) + } + for (int i = 0; i < NumRuntimeIntComps(); ++i) { host_runtime_int[i].push_back(static_cast(id)); + } } } @@ -345,12 +349,14 @@ void testRedistribute () get_test_params(params, "redistribute"); int is_per[BL_SPACEDIM]; - for (int & d : is_per) + for (int & d : is_per) { d = params.is_periodic; + } Vector rr(params.nlevs-1); - for (int lev = 1; lev < params.nlevs; lev++) + for (int lev = 1; lev < params.nlevs; lev++) { rr[lev-1] = IntVect(AMREX_D_DECL(2,2,2)); + } RealBox real_box; for (int n = 0; n < BL_SPACEDIM; n++) @@ -396,7 +402,7 @@ void testRedistribute () auto np_old = pc.TotalNumberOfParticles(); - if (params.sort) pc.SortParticlesByCell(); + if (params.sort) { pc.SortParticlesByCell(); } for (int i = 0; i < params.nsteps; ++i) { @@ -410,7 +416,7 @@ void testRedistribute () pc.negateEven(); } pc.RedistributeLocal(); - if (params.sort) pc.SortParticlesByCell(); + if (params.sort) { pc.SortParticlesByCell(); } pc.checkAnswer(); } @@ -422,7 +428,7 @@ void testRedistribute () { DistributionMapping new_dm; Vector pmap; - for (int i = 0; i < ba[lev].size(); ++i) pmap.push_back(i % NProcs); + for (int i = 0; i < ba[lev].size(); ++i) { pmap.push_back(i % NProcs); } new_dm.define(pmap); pc.SetParticleDistributionMap(lev, new_dm); } @@ -442,7 +448,7 @@ void testRedistribute () { DistributionMapping new_dm; Vector pmap; - for (int i = 0; i < ba[lev].size(); ++i) pmap.push_back((i+1) % NProcs); + for (int i = 0; i < ba[lev].size(); ++i) { pmap.push_back((i+1) % NProcs); } new_dm.define(pmap); pc.SetParticleDistributionMap(lev, new_dm); } diff --git a/Tests/Particles/SOAParticle/GNUmakefile b/Tests/Particles/SOAParticle/GNUmakefile new file mode 100644 index 00000000000..9f49d3ec029 --- /dev/null +++ b/Tests/Particles/SOAParticle/GNUmakefile @@ -0,0 +1,22 @@ +AMREX_HOME = ../../../ + +DEBUG = FALSE + +DIM = 3 + +COMP = gcc + +USE_MPI = TRUE +USE_OMP = FALSE +USE_CUDA = FALSE + +#TINY_PROFILE = TRUE +USE_PARTICLES = TRUE + +include $(AMREX_HOME)/Tools/GNUMake/Make.defs + +include ./Make.package +include $(AMREX_HOME)/Src/Base/Make.package +include $(AMREX_HOME)/Src/Particle/Make.package + +include $(AMREX_HOME)/Tools/GNUMake/Make.rules diff --git a/Tests/Particles/SOAParticle/Make.package b/Tests/Particles/SOAParticle/Make.package new file mode 100644 index 00000000000..6b4b865e8fc --- /dev/null +++ b/Tests/Particles/SOAParticle/Make.package @@ -0,0 +1 @@ +CEXE_sources += main.cpp diff --git a/Tests/Particles/SOAParticle/main.cpp b/Tests/Particles/SOAParticle/main.cpp index 56a621daf40..d800f9ddffb 100644 --- a/Tests/Particles/SOAParticle/main.cpp +++ b/Tests/Particles/SOAParticle/main.cpp @@ -15,8 +15,9 @@ template class Allocator=DefaultAllocator> void addParticles () { int is_per[AMREX_SPACEDIM]; - for (int & d : is_per) + for (int & d : is_per) { d = 1; + } RealBox real_box; for (int n = 0; n < AMREX_SPACEDIM; n++) @@ -49,8 +50,9 @@ void addParticles () for (int i = 0; i < add_num_particles; ++i) { - for (int d = 0; d < AMREX_SPACEDIM; d++) + for (int d = 0; d < AMREX_SPACEDIM; d++) { ptile1.pos(i, d) = 12.0; + } ptile1.getParticleTileData().rdata(AMREX_SPACEDIM)[i] = 1.2; // w ptile1.push_back_int(0, ParticleType::NextID()); @@ -184,7 +186,3 @@ int main(int argc, char* argv[]) } amrex::Finalize(); } - - - - diff --git a/Tests/Particles/SparseBins/main.cpp b/Tests/Particles/SparseBins/main.cpp index 7acd1d0767e..7a028072f5c 100644 --- a/Tests/Particles/SparseBins/main.cpp +++ b/Tests/Particles/SparseBins/main.cpp @@ -32,8 +32,9 @@ void testIntersection() params.is_periodic)}; Vector rr(params.nlevs-1); - for (int lev = 1; lev < params.nlevs; lev++) + for (int lev = 1; lev < params.nlevs; lev++) { rr[lev-1] = IntVect(AMREX_D_DECL(2,2,2)); + } RealBox real_box; for (int n = 0; n < AMREX_SPACEDIM; n++) diff --git a/Tools/Plotfile/fcompare.cpp b/Tools/Plotfile/fcompare.cpp index 7dc5ed361ad..6a7b95daf5a 100644 --- a/Tools/Plotfile/fcompare.cpp +++ b/Tools/Plotfile/fcompare.cpp @@ -366,7 +366,7 @@ int main_main() if (! all_variables_found) { amrex::Print() << " WARNING: not all variables present in both files\n"; - if (abort_if_not_all_found) return EXIT_FAILURE; + if (abort_if_not_all_found) { return EXIT_FAILURE; } } if (any_nans) { diff --git a/Tools/Plotfile/fextract.cpp b/Tools/Plotfile/fextract.cpp index eb1b50a2192..9da5ede5176 100644 --- a/Tools/Plotfile/fextract.cpp +++ b/Tools/Plotfile/fextract.cpp @@ -200,7 +200,7 @@ void main_main() const IntVect ivloc{AMREX_D_DECL(iloc,jloc,kloc)}; - if (fine_level < 0) fine_level = pf.finestLevel(); + if (fine_level < 0) { fine_level = pf.finestLevel(); } // sanity check on valid selected levels if (fine_level > pf.finestLevel() || coarse_level < 0 || coarse_level > fine_level) { amrex::Abort("Invalid level selection"); @@ -373,7 +373,7 @@ void main_main() for (int i = 0; i < posidx.size(); ++i) { ofs << std::setw(25) << std::right << std::setprecision(precision) << posidx[i].first; for (int j = 0; j < var_names.size(); ++j) { - if (std::abs(data[j][posidx[i].second])< tolerance ) data[j][posidx[i].second] = 0.; + if (std::abs(data[j][posidx[i].second])< tolerance ) { data[j][posidx[i].second] = 0.; } ofs << std::setw(25) << std::right << std::setprecision(precision) << data[j][posidx[i].second]; } ofs << "\n"; diff --git a/Tools/Plotfile/fnan.cpp b/Tools/Plotfile/fnan.cpp index 50f9b60c120..ba8f7276790 100644 --- a/Tools/Plotfile/fnan.cpp +++ b/Tools/Plotfile/fnan.cpp @@ -42,7 +42,7 @@ int main_main() int num_nans = 0; for (int b : has_nan) { - if (b) ++num_nans; + if (b) { ++num_nans; } } if (num_nans == 0) { amrex::Print() << " " << std::setw(nwidth+1) << std::left << varname << ": clean" << "\n"; diff --git a/Tools/Plotfile/fsnapshot.cpp b/Tools/Plotfile/fsnapshot.cpp index d35cef2a718..e68f8a33b6d 100644 --- a/Tools/Plotfile/fsnapshot.cpp +++ b/Tools/Plotfile/fsnapshot.cpp @@ -97,8 +97,8 @@ void main_main() // make sure we have valid options set if (do_log) { - if (ldef_mx && def_mx < 0.) amrex::Abort("ERROR: log plot specified with negative maximum"); - if (ldef_mn && def_mn < 0.) amrex::Abort("ERROR: log plot specified with negative minimum"); + if (ldef_mx && def_mx < 0.) { amrex::Abort("ERROR: log plot specified with negative maximum"); } + if (ldef_mn && def_mn < 0.) { amrex::Abort("ERROR: log plot specified with negative minimum"); } } // get the palette @@ -291,7 +291,7 @@ void main_main() int jj = (idir == 2) ? height - 1 - j : j; // flip the data in second image direction int kk = (idir == 2) ? k : height - 1 - k; Real rd = realarr(i,jj,kk); - if (do_log) rd = std::log10(rd); + if (do_log) { rd = std::log10(rd); } int id = std::max(0,std::min(255,static_cast((rd-gmn)*fac))); auto c = static_cast(id); constexpr auto cmn = static_cast(1); // avoid zero diff --git a/Tools/Postprocessing/C_Src/particle_compare.cpp b/Tools/Postprocessing/C_Src/particle_compare.cpp index 84ceeed914a..9967625cca6 100644 --- a/Tools/Postprocessing/C_Src/particle_compare.cpp +++ b/Tools/Postprocessing/C_Src/particle_compare.cpp @@ -302,8 +302,8 @@ int sort_particles_ascending(const void *p, const void *q) std::memcpy(&cpu1, iptr1, sizeof(int)); std::memcpy(&cpu2, iptr2, sizeof(int)); - if (cpu1 != cpu2) return (cpu1 - cpu2); - if (id1 != id2 ) return (id1 - id2 ); + if (cpu1 != cpu2) { return (cpu1 - cpu2); } + if (id1 != id2 ) { return (id1 - id2 ); } return 0; } @@ -312,7 +312,7 @@ void compare_particle_chunk(const ParticleHeader& header1, std::vector& norms, int level, int file_num, int np, int offset) { - if (np == 0) return; + if (np == 0) { return; } std::string read_file1 = getDataFileName(header1.par_file_name, level, file_num); std::string read_file2 = getDataFileName(header2.par_file_name, level, file_num);