diff --git a/Source/Advection/Advection.H b/Source/Advection/Advection.H index 1007f1f04..edaa8f8cd 100644 --- a/Source/Advection/Advection.H +++ b/Source/Advection/Advection.H @@ -4,36 +4,32 @@ #include #include #include -#include +#include #include #include #include /** Compute advection tendency for density and potential temperature */ -void AdvectionSrcForRhoAndTheta (const amrex::Box& bx, const amrex::Box& valid_bx, - const amrex::Array4& src, - const amrex::Array4& rho_u, // These are being used - const amrex::Array4& rho_v, // to define the fluxes - const amrex::Array4& omega, - amrex::Real fac, - const amrex::Array4< amrex::Real>& avg_xmom, // These are being defined - const amrex::Array4< amrex::Real>& avg_ymom, // from the rho fluxes - const amrex::Array4< amrex::Real>& avg_zmom, - const amrex::Array4& cell_prim, - const amrex::Array4& z_nd, - const amrex::Array4& detJ, - const amrex::GpuArray& cellSize, - const amrex::Array4& mf_m, - const amrex::Array4& mf_u, - const amrex::Array4& mf_v, - const AdvType horiz_adv_type, const AdvType vert_adv_type, - const bool use_terrain); +void AdvectionSrcForRho (const amrex::Box& bx, const amrex::Box& valid_bx, + const amrex::Array4& src, + const amrex::Array4& rho_u, // These are being used + const amrex::Array4& rho_v, // to define the fluxes + const amrex::Array4& omega, + const amrex::Array4< amrex::Real>& avg_xmom, // These are being defined + const amrex::Array4< amrex::Real>& avg_ymom, // from the rho fluxes + const amrex::Array4< amrex::Real>& avg_zmom, + const amrex::Array4& z_nd, + const amrex::Array4& detJ, + const amrex::GpuArray& cellSize, + const amrex::Array4& mf_m, + const amrex::Array4& mf_u, + const amrex::Array4& mf_v, + const bool use_terrain, + const amrex::GpuArray, AMREX_SPACEDIM>& flx_arr); /** Compute advection tendency for all scalars other than density and potential temperature */ -void AdvectionSrcForScalars (int level, int finest_level, - const amrex::MFIter& mfi, - const amrex::Box& bx, +void AdvectionSrcForScalars (const amrex::Box& bx, const int icomp, const int ncomp, const amrex::Array4& avg_xmom, const amrex::Array4& avg_ymom, @@ -41,11 +37,11 @@ void AdvectionSrcForScalars (int level, int finest_level, const amrex::Array4& cell_prim, const amrex::Array4& src, const amrex::Array4& detJ, - const amrex::Real dt, const amrex::GpuArray& cellSize, const amrex::Array4& mf_m, const AdvType horiz_adv_type, const AdvType vert_adv_type, - const bool use_terrain, const bool is_two_way_coupling); + const bool use_terrain, + const amrex::GpuArray, AMREX_SPACEDIM>& flx_arr); /** Compute advection tendencies for all components of momentum */ void AdvectionSrcForMom (const amrex::Box& bxx, const amrex::Box& bxy, const amrex::Box& bxz, diff --git a/Source/Advection/AdvectionSrcForScalars.H b/Source/Advection/AdvectionSrcForScalars.H index 44e5865a3..5b99c5cbf 100644 --- a/Source/Advection/AdvectionSrcForScalars.H +++ b/Source/Advection/AdvectionSrcForScalars.H @@ -6,13 +6,13 @@ */ template void -AdvectionSrcForScalarsWrapper_N(const amrex::Box& bx, - const int& ncomp, const int& icomp, - const amrex::GpuArray, AMREX_SPACEDIM> flx_arr, - const amrex::Array4& cell_prim, - const amrex::Array4& avg_xmom, - const amrex::Array4& avg_ymom, - const amrex::Array4& avg_zmom) +AdvectionSrcForScalarsWrapper(const amrex::Box& bx, + const int& ncomp, const int& icomp, + const amrex::GpuArray, AMREX_SPACEDIM> flx_arr, + const amrex::Array4& cell_prim, + const amrex::Array4& avg_xmom, + const amrex::Array4& avg_ymom, + const amrex::Array4& avg_zmom) { // Instantiate structs for vert/horiz interp InterpType_H interp_prim_h(cell_prim); @@ -30,7 +30,7 @@ AdvectionSrcForScalarsWrapper_N(const amrex::Box& bx, amrex::Real interpx(0.); interp_prim_h.InterpolateInX(i,j,k,prim_index,interpx,avg_xmom(i ,j ,k )); - (flx_arr[0])(i,j,k,n) = avg_xmom(i,j,k) * interpx; + (flx_arr[0])(i,j,k,cons_index) = avg_xmom(i,j,k) * interpx; }); amrex::ParallelFor(ybx, ncomp,[=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept { @@ -40,7 +40,7 @@ AdvectionSrcForScalarsWrapper_N(const amrex::Box& bx, amrex::Real interpy(0.); interp_prim_h.InterpolateInY(i,j,k,prim_index,interpy,avg_ymom(i ,j ,k )); - (flx_arr[1])(i,j,k,n) = avg_ymom(i,j,k) * interpy; + (flx_arr[1])(i,j,k,cons_index) = avg_ymom(i,j,k) * interpy; }); amrex::ParallelFor(zbx, ncomp,[=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept { @@ -51,7 +51,7 @@ AdvectionSrcForScalarsWrapper_N(const amrex::Box& bx, interp_prim_v.InterpolateInZ(i,j,k,prim_index,interpz,avg_zmom(i ,j ,k )); - (flx_arr[2])(i,j,k,n) = avg_zmom(i,j,k) * interpz; + (flx_arr[2])(i,j,k,cons_index) = avg_zmom(i,j,k) * interpz; }); } @@ -60,7 +60,7 @@ AdvectionSrcForScalarsWrapper_N(const amrex::Box& bx, */ template void -AdvectionSrcForScalarsVert_N(const amrex::Box& bx, +AdvectionSrcForScalarsVert(const amrex::Box& bx, const int& ncomp, const int& icomp, const amrex::GpuArray, AMREX_SPACEDIM> flx_arr, const amrex::Array4& cell_prim, @@ -71,27 +71,27 @@ AdvectionSrcForScalarsVert_N(const amrex::Box& bx, { switch(vert_adv_type) { case AdvType::Centered_2nd: - AdvectionSrcForScalarsWrapper_N(bx, ncomp, icomp, + AdvectionSrcForScalarsWrapper(bx, ncomp, icomp, flx_arr, cell_prim, avg_xmom, avg_ymom, avg_zmom); break; case AdvType::Upwind_3rd: - AdvectionSrcForScalarsWrapper_N(bx, ncomp, icomp, + AdvectionSrcForScalarsWrapper(bx, ncomp, icomp, flx_arr, cell_prim, avg_xmom, avg_ymom, avg_zmom); break; case AdvType::Centered_4th: - AdvectionSrcForScalarsWrapper_N(bx, ncomp, icomp, + AdvectionSrcForScalarsWrapper(bx, ncomp, icomp, flx_arr, cell_prim, avg_xmom, avg_ymom, avg_zmom); break; case AdvType::Upwind_5th: - AdvectionSrcForScalarsWrapper_N(bx, ncomp, icomp, + AdvectionSrcForScalarsWrapper(bx, ncomp, icomp, flx_arr, cell_prim, avg_xmom, avg_ymom, avg_zmom); break; case AdvType::Centered_6th: - AdvectionSrcForScalarsWrapper_N(bx, ncomp, icomp, + AdvectionSrcForScalarsWrapper(bx, ncomp, icomp, flx_arr, cell_prim, avg_xmom, avg_ymom, avg_zmom); break; diff --git a/Source/Advection/AdvectionSrcForState.cpp b/Source/Advection/AdvectionSrcForState.cpp index 4be573815..719f21561 100644 --- a/Source/Advection/AdvectionSrcForState.cpp +++ b/Source/Advection/AdvectionSrcForState.cpp @@ -2,9 +2,6 @@ #include #include #include -#include -#include -#include using namespace amrex; @@ -20,11 +17,9 @@ using namespace amrex; * @param[in] rho_u x-component of momentum * @param[in] rho_v y-component of momentum * @param[in] Omega component of momentum normal to the z-coordinate surface - * @param[in] fac weighting factor for use in defining time-averaged momentum * @param[out] avg_xmom x-component of time-averaged momentum defined in this routine * @param[out] avg_ymom y-component of time-averaged momentum defined in this routine * @param[out] avg_zmom z-component of time-averaged momentum defined in this routine - * @param[in] cell_prim primtive form of scalar variales, here only potential temperature theta * @param[in] z_nd height coordinate at nodes * @param[in] detJ Jacobian of the metric transformation (= 1 if use_terrain is false) * @param[in] cellSizeInv inverse of the mesh spacing @@ -37,23 +32,22 @@ using namespace amrex; */ void -AdvectionSrcForRhoAndTheta (const Box& bx, const Box& valid_bx, - const Array4& advectionSrc, - const Array4& rho_u, - const Array4& rho_v, - const Array4& Omega, Real fac, - const Array4< Real>& avg_xmom, - const Array4< Real>& avg_ymom, - const Array4< Real>& avg_zmom, - const Array4& cell_prim, - const Array4& z_nd, const Array4& detJ, - const GpuArray& cellSizeInv, - const Array4& mf_m, - const Array4& mf_u, - const Array4& mf_v, - const AdvType horiz_adv_type, - const AdvType vert_adv_type, - const bool use_terrain) +AdvectionSrcForRho (const Box& bx, const Box& valid_bx, + const Array4& advectionSrc, + const Array4& rho_u, + const Array4& rho_v, + const Array4& Omega, + const Array4< Real>& avg_xmom, + const Array4< Real>& avg_ymom, + const Array4< Real>& avg_zmom, + const Array4& z_nd, + const Array4& detJ, + const GpuArray& cellSizeInv, + const Array4& mf_m, + const Array4& mf_u, + const Array4& mf_v, + const bool use_terrain, + const GpuArray, AMREX_SPACEDIM>& flx_arr) { BL_PROFILE_VAR("AdvectionSrcForRhoAndTheta", AdvectionSrcForRhoAndTheta); auto dxInv = cellSizeInv[0], dyInv = cellSizeInv[1], dzInv = cellSizeInv[2]; @@ -61,182 +55,53 @@ AdvectionSrcForRhoAndTheta (const Box& bx, const Box& valid_bx, // We note that valid_bx is the actual grid, while bx may be a tile within that grid const auto& vbx_hi = ubound(valid_bx); - if (!use_terrain) { - // Inline with 2nd order for efficiency - if (horiz_adv_type == AdvType::Centered_2nd && vert_adv_type == AdvType::Centered_2nd) - { - ParallelFor(bx, [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept - { - Real xflux_lo = rho_u(i ,j,k) / mf_u(i ,j ,0); - Real xflux_hi = rho_u(i+1,j,k) / mf_u(i+1,j ,0); - Real yflux_lo = rho_v(i,j ,k) / mf_v(i ,j ,0); - Real yflux_hi = rho_v(i,j+1,k) / mf_v(i ,j+1,0); - Real zflux_lo = Omega(i,j,k ); - Real zflux_hi = Omega(i,j,k+1); - - avg_xmom(i ,j,k) += fac*xflux_lo; - if (i == vbx_hi.x) - avg_xmom(i+1,j,k) += fac*xflux_hi; - avg_ymom(i,j ,k) += fac*yflux_lo; - if (j == vbx_hi.y) - avg_ymom(i,j+1,k) += fac*yflux_hi; - avg_zmom(i,j,k ) += fac*zflux_lo; - if (k == vbx_hi.z) - avg_zmom(i,j,k+1) += fac*zflux_hi; - - Real mfsq = mf_m(i,j,0) * mf_m(i,j,0); - - advectionSrc(i,j,k,0) = -( - ( xflux_hi - xflux_lo ) * dxInv * mfsq + - ( yflux_hi - yflux_lo ) * dyInv * mfsq + - ( zflux_hi - zflux_lo ) * dzInv ); + const Box xbx = surroundingNodes(bx,0); + const Box ybx = surroundingNodes(bx,1); + const Box zbx = surroundingNodes(bx,2); - const int prim_index = 0; - advectionSrc(i,j,k,1) = - 0.5 * ( - ( xflux_hi * (cell_prim(i+1,j,k,prim_index) + cell_prim(i,j,k,prim_index)) - - xflux_lo * (cell_prim(i-1,j,k,prim_index) + cell_prim(i,j,k,prim_index)) ) * dxInv * mfsq + - ( yflux_hi * (cell_prim(i,j+1,k,prim_index) + cell_prim(i,j,k,prim_index)) - - yflux_lo * (cell_prim(i,j-1,k,prim_index) + cell_prim(i,j,k,prim_index)) ) * dyInv * mfsq + - ( zflux_hi * (cell_prim(i,j,k+1,prim_index) + cell_prim(i,j,k,prim_index)) - - zflux_lo * (cell_prim(i,j,k-1,prim_index) + cell_prim(i,j,k,prim_index)) ) * dzInv); - }); - // Template higher order methods - } else { - switch(horiz_adv_type) { - case AdvType::Centered_2nd: - AdvectionSrcForRhoThetaVert_N(bx, vbx_hi, fac, advectionSrc, - cell_prim, rho_u, rho_v, Omega, - avg_xmom, avg_ymom, avg_zmom, - cellSizeInv, mf_m, mf_u, mf_v, - vert_adv_type); - break; - case AdvType::Upwind_3rd: - AdvectionSrcForRhoThetaVert_N(bx, vbx_hi, fac, advectionSrc, - cell_prim, rho_u, rho_v, Omega, - avg_xmom, avg_ymom, avg_zmom, - cellSizeInv, mf_m, mf_u, mf_v, - vert_adv_type); - break; - case AdvType::Centered_4th: - AdvectionSrcForRhoThetaVert_N(bx, vbx_hi, fac, advectionSrc, - cell_prim, rho_u, rho_v, Omega, - avg_xmom, avg_ymom, avg_zmom, - cellSizeInv, mf_m, mf_u, mf_v, - vert_adv_type); - break; - case AdvType::Upwind_5th: - AdvectionSrcForRhoThetaVert_N(bx, vbx_hi, fac, advectionSrc, - cell_prim, rho_u, rho_v, Omega, - avg_xmom, avg_ymom, avg_zmom, - cellSizeInv, mf_m, mf_u, mf_v, - vert_adv_type); - break; - case AdvType::Centered_6th: - AdvectionSrcForRhoThetaVert_N(bx, vbx_hi, fac, advectionSrc, - cell_prim, rho_u, rho_v, Omega, - avg_xmom, avg_ymom, avg_zmom, - cellSizeInv, mf_m, mf_u, mf_v, - vert_adv_type); - break; - default: - AMREX_ASSERT_WITH_MESSAGE(false, "Unknown advection scheme!"); - } - } + ParallelFor(xbx, [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept + { + (flx_arr[0])(i,j,k,0) = rho_u(i,j,k) / mf_u(i,j,0); + avg_xmom(i,j,k) = (flx_arr[0])(i,j,k,0); + }); + ParallelFor(ybx, [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept + { + (flx_arr[1])(i,j,k,0) = rho_v(i,j,k) / mf_v(i,j,0); + avg_ymom(i,j,k) = (flx_arr[1])(i,j,k,0); + }); + ParallelFor(zbx, [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept + { + Real mfsq = mf_m(i,j,0) * mf_m(i,j,0); + (flx_arr[2])(i,j,k,0) = Omega(i,j,k) / mfsq; + avg_zmom(i,j,k ) = (flx_arr[2])(i,j,k,0); + }); - } else { - // Inline with 2nd order for efficiency - if (horiz_adv_type == AdvType::Centered_2nd && vert_adv_type == AdvType::Centered_2nd) + if (use_terrain) { + ParallelFor(xbx, [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept { - amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept - { - Real invdetJ = 1./ detJ(i,j,k); - - Real xflux_lo = rho_u(i ,j,k) / mf_u(i ,j ,0); - Real xflux_hi = rho_u(i+1,j,k) / mf_u(i+1,j ,0); - Real yflux_lo = rho_v(i,j ,k) / mf_v(i ,j ,0); - Real yflux_hi = rho_v(i,j+1,k) / mf_v(i ,j+1,0); - Real zflux_lo = Omega(i,j,k ); - Real zflux_hi = Omega(i,j,k+1); - - Real met_h_zeta_xlo = Compute_h_zeta_AtIface(i ,j ,k,cellSizeInv,z_nd); - xflux_lo *= met_h_zeta_xlo; - Real met_h_zeta_xhi = Compute_h_zeta_AtIface(i+1,j ,k,cellSizeInv,z_nd); - xflux_hi *= met_h_zeta_xhi; - - Real met_h_zeta_ylo = Compute_h_zeta_AtJface(i ,j ,k,cellSizeInv,z_nd); - yflux_lo *= met_h_zeta_ylo; - Real met_h_zeta_yhi = Compute_h_zeta_AtJface(i ,j+1,k,cellSizeInv,z_nd); - yflux_hi *= met_h_zeta_yhi; - - avg_xmom(i ,j,k) += fac*xflux_lo; - if (i == vbx_hi.x) - avg_xmom(i+1,j,k) += fac*xflux_hi; - avg_ymom(i,j ,k) += fac*yflux_lo; - if (j == vbx_hi.y) - avg_ymom(i,j+1,k) += fac*yflux_hi; - avg_zmom(i,j,k ) += fac*zflux_lo; - if (k == vbx_hi.z) - avg_zmom(i,j,k+1) += fac*zflux_hi; + Real h_zeta = Compute_h_zeta_AtIface(i,j,k,cellSizeInv,z_nd); + (flx_arr[0])(i,j,k,0) *= h_zeta; + avg_xmom(i,j,k) *= h_zeta; + }); + ParallelFor(ybx, [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept + { + Real h_zeta = Compute_h_zeta_AtJface(i,j,k,cellSizeInv,z_nd); + (flx_arr[1])(i,j,k,0) *= h_zeta; + avg_ymom(i,j,k) *= h_zeta; + }); + } - Real mfsq = mf_m(i,j,0) * mf_m(i,j,0); + ParallelFor(bx, [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept + { + Real invdetJ = (use_terrain) ? 1. / detJ(i,j,k) : 1.; - advectionSrc(i,j,k,0) = - invdetJ * ( - ( xflux_hi - xflux_lo ) * dxInv * mfsq + - ( yflux_hi - yflux_lo ) * dyInv * mfsq + - ( zflux_hi - zflux_lo ) * dzInv ); + Real mfsq = mf_m(i,j,0) * mf_m(i,j,0); - const int prim_index = 0; - advectionSrc(i,j,k,1) = - invdetJ * 0.5 * ( - ( xflux_hi * (cell_prim(i,j,k,prim_index) + cell_prim(i+1,j,k,prim_index)) - - xflux_lo * (cell_prim(i,j,k,prim_index) + cell_prim(i-1,j,k,prim_index)) ) * dxInv * mfsq + - ( yflux_hi * (cell_prim(i,j,k,prim_index) + cell_prim(i,j+1,k,prim_index)) - - yflux_lo * (cell_prim(i,j,k,prim_index) + cell_prim(i,j-1,k,prim_index)) ) * dyInv * mfsq + - ( zflux_hi * (cell_prim(i,j,k,prim_index) + cell_prim(i,j,k+1,prim_index)) - - zflux_lo * (cell_prim(i,j,k,prim_index) + cell_prim(i,j,k-1,prim_index)) ) * dzInv); - }); - // Template higher order methods (horizontal first) - } else { - switch(horiz_adv_type) { - case AdvType::Centered_2nd: - AdvectionSrcForRhoThetaVert_T(bx, vbx_hi, fac, advectionSrc, - cell_prim, rho_u, rho_v, Omega, - avg_xmom, avg_ymom, avg_zmom, - z_nd, detJ, cellSizeInv, mf_m, - mf_u, mf_v, vert_adv_type); - break; - case AdvType::Upwind_3rd: - AdvectionSrcForRhoThetaVert_T(bx, vbx_hi, fac, advectionSrc, - cell_prim, rho_u, rho_v, Omega, - avg_xmom, avg_ymom, avg_zmom, - z_nd, detJ, cellSizeInv, mf_m, - mf_u, mf_v, vert_adv_type); - break; - case AdvType::Centered_4th: - AdvectionSrcForRhoThetaVert_T(bx, vbx_hi, fac, advectionSrc, - cell_prim, rho_u, rho_v, Omega, - avg_xmom, avg_ymom, avg_zmom, - z_nd, detJ, cellSizeInv, mf_m, - mf_u, mf_v, vert_adv_type); - break; - case AdvType::Upwind_5th: - AdvectionSrcForRhoThetaVert_T(bx, vbx_hi, fac, advectionSrc, - cell_prim, rho_u, rho_v, Omega, - avg_xmom, avg_ymom, avg_zmom, - z_nd, detJ, cellSizeInv, mf_m, - mf_u, mf_v, vert_adv_type); - break; - case AdvType::Centered_6th: - AdvectionSrcForRhoThetaVert_T(bx, vbx_hi, fac, advectionSrc, - cell_prim, rho_u, rho_v, Omega, - avg_xmom, avg_ymom, avg_zmom, - z_nd, detJ, cellSizeInv, mf_m, - mf_u, mf_v, vert_adv_type); - break; - default: - AMREX_ASSERT_WITH_MESSAGE(false, "Unknown advection scheme!"); - } - } - } + advectionSrc(i,j,k,0) = - invdetJ * mfsq * ( + ( (flx_arr[0])(i+1,j,k,0) - (flx_arr[0])(i ,j,k,0) ) * dxInv + + ( (flx_arr[1])(i,j+1,k,0) - (flx_arr[1])(i,j ,k,0) ) * dyInv + + ( (flx_arr[2])(i,j,k+1,0) - (flx_arr[2])(i,j,k ,0) ) * dzInv ); + }); } /** @@ -262,35 +127,23 @@ AdvectionSrcForRhoAndTheta (const Box& bx, const Box& valid_bx, */ void -AdvectionSrcForScalars (int level, int finest_level, - const MFIter& mfi, - const Box& bx, const int icomp, const int ncomp, - const Array4& avg_xmom, const Array4& avg_ymom, +AdvectionSrcForScalars (const Box& bx, const int icomp, const int ncomp, + const Array4& avg_xmom, + const Array4& avg_ymom, const Array4& avg_zmom, const Array4& cell_prim, const Array4& advectionSrc, const Array4& detJ, - const Real dt, const GpuArray& cellSizeInv, const Array4& mf_m, const AdvType horiz_adv_type, const AdvType vert_adv_type, const bool use_terrain, - const bool two_way_coupling) + const GpuArray, AMREX_SPACEDIM>& flx_arr) { BL_PROFILE_VAR("AdvectionSrcForScalars", AdvectionSrcForScalars); auto dxInv = cellSizeInv[0], dyInv = cellSizeInv[1], dzInv = cellSizeInv[2]; - amrex::GpuArray flux; - for (int dir = 0; dir < AMREX_SPACEDIM; ++dir) { - const Box& efbx = amrex::surroundingNodes(bx, dir); - flux[dir].resize(efbx, ncomp, amrex::The_Async_Arena()); - flux[dir].setVal(0.); - } - - const GpuArray, AMREX_SPACEDIM> - flx_arr{{AMREX_D_DECL(flux[0].array(), flux[1].array(), flux[2].array())}}; - const Box xbx = surroundingNodes(bx,0); const Box ybx = surroundingNodes(bx,1); const Box zbx = surroundingNodes(bx,2); @@ -300,66 +153,69 @@ AdvectionSrcForScalars (int level, int finest_level, // because that was done when they were constructed in AdvectionSrcForRhoAndTheta if (horiz_adv_type == AdvType::Centered_2nd && vert_adv_type == AdvType::Centered_2nd) { - amrex::ParallelFor(xbx, ncomp,[=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept + ParallelFor(xbx, ncomp,[=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept { const int cons_index = icomp + n; const int prim_index = cons_index - 1; - (flx_arr[0])(i,j,k,n) = 0.5 * avg_xmom(i,j,k) * (cell_prim(i,j,k,prim_index) + cell_prim(i-1,j,k,prim_index)); + const Real prim_on_face = 0.5 * (cell_prim(i,j,k,prim_index) + cell_prim(i-1,j,k,prim_index)); + (flx_arr[0])(i,j,k,cons_index) = avg_xmom(i,j,k) * prim_on_face; }); - amrex::ParallelFor(ybx, ncomp,[=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept + ParallelFor(ybx, ncomp,[=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept { const int cons_index = icomp + n; const int prim_index = cons_index - 1; - (flx_arr[1])(i,j,k,n) = 0.5 * avg_ymom(i,j,k) * (cell_prim(i,j,k,prim_index) + cell_prim(i,j-1,k,prim_index)); + const Real prim_on_face = 0.5 * (cell_prim(i,j,k,prim_index) + cell_prim(i,j-1,k,prim_index)); + (flx_arr[1])(i,j,k,cons_index) = avg_ymom(i,j,k) * prim_on_face; }); - amrex::ParallelFor(zbx, ncomp,[=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept + ParallelFor(zbx, ncomp,[=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept { const int cons_index = icomp + n; const int prim_index = cons_index - 1; - (flx_arr[2])(i,j,k,n) = 0.5 * avg_zmom(i,j,k) * (cell_prim(i,j,k,prim_index) + cell_prim(i,j,k-1,prim_index)); + const Real prim_on_face = 0.5 * (cell_prim(i,j,k,prim_index) + cell_prim(i,j,k-1,prim_index)); + (flx_arr[2])(i,j,k,cons_index) = avg_zmom(i,j,k) * prim_on_face; }); // Template higher order methods (horizontal first) } else { switch(horiz_adv_type) { case AdvType::Centered_2nd: - AdvectionSrcForScalarsVert_N(bx, ncomp, icomp, flx_arr, cell_prim, + AdvectionSrcForScalarsVert(bx, ncomp, icomp, flx_arr, cell_prim, avg_xmom, avg_ymom, avg_zmom, vert_adv_type); break; case AdvType::Upwind_3rd: - AdvectionSrcForScalarsVert_N(bx, ncomp, icomp, flx_arr, cell_prim, + AdvectionSrcForScalarsVert(bx, ncomp, icomp, flx_arr, cell_prim, avg_xmom, avg_ymom, avg_zmom, vert_adv_type); break; case AdvType::Centered_4th: - AdvectionSrcForScalarsVert_N(bx, ncomp, icomp, flx_arr, cell_prim, + AdvectionSrcForScalarsVert(bx, ncomp, icomp, flx_arr, cell_prim, avg_xmom, avg_ymom, avg_zmom, vert_adv_type); break; case AdvType::Upwind_5th: - AdvectionSrcForScalarsVert_N(bx, ncomp, icomp, flx_arr, cell_prim, + AdvectionSrcForScalarsVert(bx, ncomp, icomp, flx_arr, cell_prim, avg_xmom, avg_ymom, avg_zmom, vert_adv_type); break; case AdvType::Centered_6th: - AdvectionSrcForScalarsVert_N(bx, ncomp, icomp, flx_arr, cell_prim, + AdvectionSrcForScalarsVert(bx, ncomp, icomp, flx_arr, cell_prim, avg_xmom, avg_ymom, avg_zmom, vert_adv_type); break; case AdvType::Weno_3: - AdvectionSrcForScalarsWrapper_N(bx, ncomp, icomp, flx_arr, cell_prim, + AdvectionSrcForScalarsWrapper(bx, ncomp, icomp, flx_arr, cell_prim, avg_xmom, avg_ymom, avg_zmom); break; case AdvType::Weno_5: - AdvectionSrcForScalarsWrapper_N(bx, ncomp, icomp, flx_arr, cell_prim, + AdvectionSrcForScalarsWrapper(bx, ncomp, icomp, flx_arr, cell_prim, avg_xmom, avg_ymom, avg_zmom); break; case AdvType::Weno_3Z: - AdvectionSrcForScalarsWrapper_N(bx, ncomp, icomp, flx_arr, cell_prim, + AdvectionSrcForScalarsWrapper(bx, ncomp, icomp, flx_arr, cell_prim, avg_xmom, avg_ymom, avg_zmom); break; case AdvType::Weno_3MZQ: - AdvectionSrcForScalarsWrapper_N(bx, ncomp, icomp, flx_arr, cell_prim, + AdvectionSrcForScalarsWrapper(bx, ncomp, icomp, flx_arr, cell_prim, avg_xmom, avg_ymom, avg_zmom); break; case AdvType::Weno_5Z: - AdvectionSrcForScalarsWrapper_N(bx, ncomp, icomp, flx_arr, cell_prim, + AdvectionSrcForScalarsWrapper(bx, ncomp, icomp, flx_arr, cell_prim, avg_xmom, avg_ymom, avg_zmom); break; default: @@ -374,29 +230,9 @@ AdvectionSrcForScalars (int level, int finest_level, Real mfsq = mf_m(i,j,0) * mf_m(i,j,0); const int cons_index = icomp + n; - advectionSrc(i,j,k,cons_index) = - invdetJ * ( - ( (flx_arr[0])(i+1,j,k,n) - (flx_arr[0])(i ,j,k,n) ) * dxInv * mfsq + - ( (flx_arr[1])(i,j+1,k,n) - (flx_arr[1])(i,j ,k,n) ) * dyInv * mfsq + - ( (flx_arr[2])(i,j,k+1,n) - (flx_arr[2])(i,j,k ,n) ) * dzInv ); + advectionSrc(i,j,k,cons_index) = - invdetJ * mfsq * ( + ( (flx_arr[0])(i+1,j,k,cons_index) - (flx_arr[0])(i ,j,k,cons_index) ) * dxInv + + ( (flx_arr[1])(i,j+1,k,cons_index) - (flx_arr[1])(i,j ,k,cons_index) ) * dyInv + + ( (flx_arr[2])(i,j,k+1,cons_index) - (flx_arr[2])(i,j,k ,cons_index) ) * dzInv ); }); - -#if 0 - Real fac_for_reflux = 1.0; - - std::array dxD = {{dx, dy, dz}}; - const Real* dxDp = &(dxD[0]); - - if (two_way_coupling) { - if (level < finest_level) { - getAdvFluxReg(level + 1).CrseAdd(mfi, - {{AMREX_D_DECL(&(flux[0]), &(flux[1]), &(flux[2]))}}, - dxDp, fac_for_reflux*dt, amrex::RunOn::Device); - } - if (level > 0) { - getAdvFluxReg(level).FineAdd(mfi, - {{AMREX_D_DECL(&(flux[0]), &(flux[1]), &(flux[2]))}}, - dxDp, fac_for_reflux*dt, amrex::RunOn::Device); - } - } // two-way coupling -#endif } diff --git a/Source/Advection/AdvectionSrcForState_N.H b/Source/Advection/AdvectionSrcForState_N.H deleted file mode 100644 index 3758df06f..000000000 --- a/Source/Advection/AdvectionSrcForState_N.H +++ /dev/null @@ -1,134 +0,0 @@ -#include -#include - -/** - * Wrapper function for computing the advective tendency w/ spatial order > 2. - */ -template -void -AdvectionSrcForRhoThetaWrapper_N(const amrex::Box& bx, - const amrex::Dim3& vbx_hi, - const amrex::Real fac, - const amrex::Array4< amrex::Real>& advectionSrc, - const amrex::Array4& cell_prim, - const amrex::Array4& rho_u, - const amrex::Array4& rho_v, - const amrex::Array4& rho_w, - const amrex::Array4< amrex::Real>& avg_xmom, - const amrex::Array4< amrex::Real>& avg_ymom, - const amrex::Array4< amrex::Real>& avg_zmom, - const amrex::GpuArray& cellSizeInv, - const amrex::Array4& mf_m, - const amrex::Array4& mf_u, - const amrex::Array4& mf_v) -{ - // Instantiate struct - InterpType_H interp_prim_h(cell_prim); - InterpType_V interp_prim_v(cell_prim); - - auto dxInv = cellSizeInv[0], dyInv = cellSizeInv[1], dzInv = cellSizeInv[2]; - amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept - { - amrex::Real xflux_lo = rho_u(i ,j,k) / mf_u(i ,j ,0); - amrex::Real xflux_hi = rho_u(i+1,j,k) / mf_u(i+1,j ,0); - amrex::Real yflux_lo = rho_v(i,j ,k) / mf_v(i ,j ,0); - amrex::Real yflux_hi = rho_v(i,j+1,k) / mf_v(i ,j+1,0); - amrex::Real zflux_lo = rho_w(i,j,k ); - amrex::Real zflux_hi = rho_w(i,j,k+1); - - avg_xmom(i ,j,k) += fac*xflux_lo; - if (i == vbx_hi.x) avg_xmom(i+1,j,k) += fac*xflux_hi; - - avg_ymom(i,j ,k) += fac*yflux_lo; - if (j == vbx_hi.y) avg_ymom(i,j+1,k) += fac*yflux_hi; - - avg_zmom(i,j,k ) += fac*zflux_lo; - if (k == vbx_hi.z) avg_zmom(i,j,k+1) += fac*zflux_hi; - - amrex::Real mf = mf_m(i,j,0); - amrex::Real mfsq = mf*mf; - - advectionSrc(i,j,k,0) = -( - ( xflux_hi - xflux_lo ) * dxInv * mfsq + - ( yflux_hi - yflux_lo ) * dyInv * mfsq + - ( zflux_hi - zflux_lo ) * dzInv); - - const int prim_index = 0; - amrex::Real interpx_hi(0.), interpx_lo(0.); - amrex::Real interpy_hi(0.), interpy_lo(0.); - amrex::Real interpz_hi(0.), interpz_lo(0.); - - interp_prim_h.InterpolateInX(i+1,j,k,prim_index,interpx_hi,rho_u(i+1,j ,k )); - interp_prim_h.InterpolateInX(i ,j,k,prim_index,interpx_lo,rho_u(i ,j ,k )); - - interp_prim_h.InterpolateInY(i,j+1,k,prim_index,interpy_hi,rho_v(i ,j+1,k )); - interp_prim_h.InterpolateInY(i,j ,k,prim_index,interpy_lo,rho_v(i ,j ,k )); - - interp_prim_v.InterpolateInZ(i,j,k+1,prim_index,interpz_hi,rho_w(i ,j ,k+1)); - interp_prim_v.InterpolateInZ(i,j,k ,prim_index,interpz_lo,rho_w(i ,j ,k )); - - advectionSrc(i,j,k,1) = -( - ( xflux_hi * interpx_hi - xflux_lo * interpx_lo ) * dxInv * mfsq + - ( yflux_hi * interpy_hi - yflux_lo * interpy_lo ) * dyInv * mfsq + - ( zflux_hi * interpz_hi - zflux_lo * interpz_lo ) * dzInv); - }); -} - -/** - * Wrapper function for templating the vertical advective tendency w/ spatial order > 2. - */ -template -void -AdvectionSrcForRhoThetaVert_N(const amrex::Box& bx, - const amrex::Dim3& vbx_hi, - const amrex::Real fac, - const amrex::Array4< amrex::Real>& advectionSrc, - const amrex::Array4& cell_prim, - const amrex::Array4& rho_u, - const amrex::Array4& rho_v, - const amrex::Array4& rho_w, - const amrex::Array4< amrex::Real>& avg_xmom, - const amrex::Array4< amrex::Real>& avg_ymom, - const amrex::Array4< amrex::Real>& avg_zmom, - const amrex::GpuArray& cellSizeInv, - const amrex::Array4& mf_m, - const amrex::Array4& mf_u, - const amrex::Array4& mf_v, - const AdvType vert_adv_type) -{ - switch(vert_adv_type) { - case AdvType::Centered_2nd: - AdvectionSrcForRhoThetaWrapper_N(bx, vbx_hi, fac, advectionSrc, - cell_prim, rho_u, rho_v, rho_w, - avg_xmom, avg_ymom, avg_zmom, - cellSizeInv, mf_m, mf_u, mf_v); - break; - case AdvType::Upwind_3rd: - AdvectionSrcForRhoThetaWrapper_N(bx, vbx_hi, fac, advectionSrc, - cell_prim, rho_u, rho_v, rho_w, - avg_xmom, avg_ymom, avg_zmom, - cellSizeInv, mf_m, mf_u, mf_v); - break; - case AdvType::Centered_4th: - AdvectionSrcForRhoThetaWrapper_N(bx, vbx_hi, fac, advectionSrc, - cell_prim, rho_u, rho_v, rho_w, - avg_xmom, avg_ymom, avg_zmom, - cellSizeInv, mf_m, mf_u, mf_v); - break; - case AdvType::Upwind_5th: - AdvectionSrcForRhoThetaWrapper_N(bx, vbx_hi, fac, advectionSrc, - cell_prim, rho_u, rho_v, rho_w, - avg_xmom, avg_ymom, avg_zmom, - cellSizeInv, mf_m, mf_u, mf_v); - break; - case AdvType::Centered_6th: - AdvectionSrcForRhoThetaWrapper_N(bx, vbx_hi, fac, advectionSrc, - cell_prim, rho_u, rho_v, rho_w, - avg_xmom, avg_ymom, avg_zmom, - cellSizeInv, mf_m, mf_u, mf_v); - break; - default: - AMREX_ASSERT_WITH_MESSAGE(false, "Unknown advection scheme!"); - } -} - diff --git a/Source/Advection/AdvectionSrcForState_T.H b/Source/Advection/AdvectionSrcForState_T.H deleted file mode 100644 index 79ee4e0a9..000000000 --- a/Source/Advection/AdvectionSrcForState_T.H +++ /dev/null @@ -1,149 +0,0 @@ -#include -#include -#include - -/** - * Wrapper function for computing the advective tendency w/ spatial order > 2. - */ -template -void -AdvectionSrcForRhoThetaWrapper_T(const amrex::Box& bx, - const amrex::Dim3& vbx_hi, - const amrex::Real fac, - const amrex::Array4< amrex::Real>& advectionSrc, - const amrex::Array4& cell_prim, - const amrex::Array4& rho_u, - const amrex::Array4& rho_v, - const amrex::Array4& Omega, - const amrex::Array4< amrex::Real>& avg_xmom, - const amrex::Array4< amrex::Real>& avg_ymom, - const amrex::Array4< amrex::Real>& avg_zmom, - const amrex::Array4& z_nd, - const amrex::Array4& detJ, - const amrex::GpuArray& cellSizeInv, - const amrex::Array4& mf_m, - const amrex::Array4& mf_u, - const amrex::Array4& mf_v) -{ - // Instantiate struct - InterpType_H interp_prim_h(cell_prim); - InterpType_V interp_prim_v(cell_prim); - - auto dxInv = cellSizeInv[0], dyInv = cellSizeInv[1], dzInv = cellSizeInv[2]; - amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept - { - amrex::Real invdetJ = 1./ detJ(i,j,k); - - amrex::Real xflux_lo = rho_u(i ,j,k) / mf_u(i ,j ,0); - amrex::Real xflux_hi = rho_u(i+1,j,k) / mf_u(i+1,j ,0); - amrex::Real yflux_lo = rho_v(i,j ,k) / mf_v(i ,j ,0); - amrex::Real yflux_hi = rho_v(i,j+1,k) / mf_v(i ,j+1,0); - amrex::Real zflux_lo = Omega(i,j,k ); - amrex::Real zflux_hi = Omega(i,j,k+1); - - amrex::Real met_h_zeta_xlo = Compute_h_zeta_AtIface(i ,j ,k,cellSizeInv,z_nd); - xflux_lo *= met_h_zeta_xlo; - amrex::Real met_h_zeta_xhi = Compute_h_zeta_AtIface(i+1,j ,k,cellSizeInv,z_nd); - xflux_hi *= met_h_zeta_xhi; - - amrex::Real met_h_zeta_ylo = Compute_h_zeta_AtJface(i ,j ,k,cellSizeInv,z_nd); - yflux_lo *= met_h_zeta_ylo; - amrex::Real met_h_zeta_yhi = Compute_h_zeta_AtJface(i ,j+1,k,cellSizeInv,z_nd); - yflux_hi *= met_h_zeta_yhi; - - avg_xmom(i ,j,k) += fac*xflux_lo; - if (i == vbx_hi.x) - avg_xmom(i+1,j,k) += fac*xflux_hi; - avg_ymom(i,j ,k) += fac*yflux_lo; - if (j == vbx_hi.y) - avg_ymom(i,j+1,k) += fac*yflux_hi; - avg_zmom(i,j,k ) += fac*zflux_lo; - if (k == vbx_hi.z) - avg_zmom(i,j,k+1) += fac*zflux_hi; - - amrex::Real mfsq = mf_m(i,j,0) * mf_m(i,j,0); - - advectionSrc(i,j,k,0) = - invdetJ * ( - ( xflux_hi - xflux_lo ) * dxInv * mfsq + - ( yflux_hi - yflux_lo ) * dyInv * mfsq + - ( zflux_hi - zflux_lo ) * dzInv ); - - const int prim_index = 0; - amrex::Real interpx_hi(0.), interpx_lo(0.); - amrex::Real interpy_hi(0.), interpy_lo(0.); - amrex::Real interpz_hi(0.), interpz_lo(0.); - - interp_prim_h.InterpolateInX(i+1,j,k,prim_index,interpx_hi,rho_u(i+1,j ,k )); - interp_prim_h.InterpolateInX(i ,j,k,prim_index,interpx_lo,rho_u(i ,j ,k )); - - interp_prim_h.InterpolateInY(i,j+1,k,prim_index,interpy_hi,rho_v(i ,j+1,k )); - interp_prim_h.InterpolateInY(i,j ,k,prim_index,interpy_lo,rho_v(i ,j ,k )); - - interp_prim_v.InterpolateInZ(i,j,k+1,prim_index,interpz_hi,Omega(i ,j ,k+1)); - interp_prim_v.InterpolateInZ(i,j,k ,prim_index,interpz_lo,Omega(i ,j ,k )); - - advectionSrc(i,j,k,1) = - invdetJ * ( - ( xflux_hi * interpx_hi - xflux_lo * interpx_lo ) * dxInv * mfsq + - ( yflux_hi * interpy_hi - yflux_lo * interpy_lo ) * dyInv * mfsq + - ( zflux_hi * interpz_hi - zflux_lo * interpz_lo ) * dzInv); - }); -} - -/** - * Wrapper function for computing the advective tendency w/ spatial order > 2. - */ -template -void -AdvectionSrcForRhoThetaVert_T(const amrex::Box& bx, - const amrex::Dim3& vbx_hi, - const amrex::Real fac, - const amrex::Array4< amrex::Real>& advectionSrc, - const amrex::Array4& cell_prim, - const amrex::Array4& rho_u, - const amrex::Array4& rho_v, - const amrex::Array4& Omega, - const amrex::Array4< amrex::Real>& avg_xmom, - const amrex::Array4< amrex::Real>& avg_ymom, - const amrex::Array4< amrex::Real>& avg_zmom, - const amrex::Array4& z_nd, - const amrex::Array4& detJ, - const amrex::GpuArray& cellSizeInv, - const amrex::Array4& mf_m, - const amrex::Array4& mf_u, - const amrex::Array4& mf_v, - const AdvType vert_adv_type) -{ - if (vert_adv_type == AdvType::Centered_2nd) { - AdvectionSrcForRhoThetaWrapper_T(bx, vbx_hi, fac, advectionSrc, - cell_prim, rho_u, rho_v, Omega, - avg_xmom, avg_ymom, avg_zmom, - z_nd, detJ, cellSizeInv, mf_m, - mf_u, mf_v); - } else if (vert_adv_type == AdvType::Upwind_3rd) { - AdvectionSrcForRhoThetaWrapper_T(bx, vbx_hi, fac, advectionSrc, - cell_prim, rho_u, rho_v, Omega, - avg_xmom, avg_ymom, avg_zmom, - z_nd, detJ, cellSizeInv, mf_m, - mf_u, mf_v); - } else if (vert_adv_type == AdvType::Centered_4th) { - AdvectionSrcForRhoThetaWrapper_T(bx, vbx_hi, fac, advectionSrc, - cell_prim, rho_u, rho_v, Omega, - avg_xmom, avg_ymom, avg_zmom, - z_nd, detJ, cellSizeInv, mf_m, - mf_u, mf_v); - } else if (vert_adv_type == AdvType::Upwind_5th) { - AdvectionSrcForRhoThetaWrapper_T(bx, vbx_hi, fac, advectionSrc, - cell_prim, rho_u, rho_v, Omega, - avg_xmom, avg_ymom, avg_zmom, - z_nd, detJ, cellSizeInv, mf_m, - mf_u, mf_v); - } else if (vert_adv_type == AdvType::Centered_6th) { - AdvectionSrcForRhoThetaWrapper_T(bx, vbx_hi, fac, advectionSrc, - cell_prim, rho_u, rho_v, Omega, - avg_xmom, avg_ymom, avg_zmom, - z_nd, detJ, cellSizeInv, mf_m, - mf_u, mf_v); - } else { - AMREX_ASSERT_WITH_MESSAGE(false, "Unknown advection scheme!"); - } -} diff --git a/Source/Advection/Make.package b/Source/Advection/Make.package index d2bcefa7f..0a7ce393f 100644 --- a/Source/Advection/Make.package +++ b/Source/Advection/Make.package @@ -4,7 +4,5 @@ CEXE_sources += AdvectionSrcForState.cpp CEXE_headers += Advection.H CEXE_headers += AdvectionSrcForMom_N.H CEXE_headers += AdvectionSrcForMom_T.H -CEXE_headers += AdvectionSrcForState_N.H -CEXE_headers += AdvectionSrcForState_T.H CEXE_headers += AdvectionSrcForScalars.H CEXE_headers += Interpolation.H diff --git a/Source/DataStructs/DataStruct.H b/Source/DataStructs/DataStruct.H index abf87a276..4a70f4d2c 100644 --- a/Source/DataStructs/DataStruct.H +++ b/Source/DataStructs/DataStruct.H @@ -21,7 +21,7 @@ enum struct ABLDriverType { }; enum struct CouplingType { - OneWay, TwoWay + OneWay, TwoWay, Mixed }; enum struct TerrainType { @@ -194,8 +194,12 @@ struct SolverChoice { pp.query("coupling_type",coupling_type_string); if (coupling_type_string == "TwoWay") { coupling_type = CouplingType::TwoWay; - } else { + } else if (coupling_type_string == "OneWay") { coupling_type = CouplingType::OneWay; + } else if (coupling_type_string == "Mixed") { + coupling_type = CouplingType::Mixed; + } else { + amrex::Abort("Dont know this coupling_type"); } } @@ -211,8 +215,10 @@ struct SolverChoice { if (coupling_type == CouplingType::TwoWay) { amrex::Print() << "Using two-way coupling " << std::endl; - } else { + } else if (coupling_type == CouplingType::OneWay) { amrex::Print() << "Using one-way coupling " << std::endl; + } else if (coupling_type == CouplingType::Mixed) { + amrex::Print() << "Using mixed coupling " << std::endl; } if (terrain_type == TerrainType::Static) { diff --git a/Source/Diffusion/Diffusion.H b/Source/Diffusion/Diffusion.H index 7356a5499..57cb501db 100644 --- a/Source/Diffusion/Diffusion.H +++ b/Source/Diffusion/Diffusion.H @@ -4,7 +4,6 @@ #include #include #include -#include #include #include #include diff --git a/Source/ERF.H b/Source/ERF.H index 0f8adfc1f..455eb8a0a 100644 --- a/Source/ERF.H +++ b/Source/ERF.H @@ -173,7 +173,9 @@ public: // Perform the volume-weighted sum amrex::Real - volWgtSumMF (int lev, const amrex::MultiFab& mf, int comp, bool local, bool finemask); + volWgtSumMF (int lev, const amrex::MultiFab& mf, int comp, + const amrex::MultiFab& mapfac, + bool local, bool finemask); // Decide if it is time to take an action static bool is_it_time_for_action (int nstep, amrex::Real time, amrex::Real dt, @@ -328,6 +330,9 @@ public: void init_from_metgrid (int lev); #endif // ERF_USE_NETCDF + // more flexible version of AverageDown() that lets you average down across multiple levels + void AverageDownTo (int crse_lev, int scomp, int ncomp); // NOLINT + private: /////////////////////////// @@ -338,7 +343,7 @@ private: void ReadParameters (); // set covered coarse cells to be the average of overlying fine cells - void AverageDown (); + void AverageDown (int scomp, int ncomp); void update_arrays (int lev, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm); @@ -360,9 +365,6 @@ private: void initialize_integrator (int lev, amrex::MultiFab& cons_mf, amrex::MultiFab& vel_mf); - // more flexible version of AverageDown() that lets you average down across multiple levels - void AverageDownTo (int crse_lev); // NOLINT - // Compute a vector of new MultiFabs by copying from valid region and filling ghost cells - // works for single level and 2-level cases (fill fine grid ghost by interpolating from coarse) void FillPatch (int lev, amrex::Real time, const amrex::Vector& mf, bool fillset=true); @@ -842,9 +844,9 @@ private: } AMREX_FORCE_INLINE - amrex::YAFluxRegister& getAdvFluxReg (int lev) + amrex::YAFluxRegister* getAdvFluxReg (int lev) { - return *advflux_reg[lev]; + return advflux_reg[lev]; } AMREX_FORCE_INLINE diff --git a/Source/ERF.cpp b/Source/ERF.cpp index 9dff3a68b..95bc5b29d 100644 --- a/Source/ERF.cpp +++ b/Source/ERF.cpp @@ -316,17 +316,63 @@ ERF::post_timestep (int nstep, Real time, Real dt_lev0) { BL_PROFILE("ERF::post_timestep()"); - if (solverChoice.coupling_type == CouplingType::TwoWay) + if (solverChoice.coupling_type == CouplingType::TwoWay || + solverChoice.coupling_type == CouplingType::Mixed) { + // If we doing mixed rather than two-way coupling, we only reflux the "slow" variables (not rho and rhotheta) + int src_comp_reflux = (solverChoice.coupling_type == CouplingType::TwoWay) ? 0 : 2; + int num_comp_reflux = NVAR - src_comp_reflux; + bool use_terrain = solverChoice.use_terrain; for (int lev = finest_level-1; lev >= 0; lev--) { + // The quantity that is conserved is not (rho S), but rather (rho S / m^2) where + // m is the map scale factor at cell centers + // Here we pre-divide (rho S) by m^2 before refluxing + for (MFIter mfi(vars_new[lev][Vars::cons], TilingIfNotGPU()); mfi.isValid(); ++mfi) { + const Box& bx = mfi.tilebox(); + const Array4< Real> cons_arr = vars_new[lev][Vars::cons].array(mfi); + const Array4 mapfac_arr = mapfac_m[lev]->const_array(mfi); + if (use_terrain) { + const Array4 detJ_arr = detJ_cc[lev]->const_array(mfi); + ParallelFor(bx, num_comp_reflux, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept + { + cons_arr(i,j,k,src_comp_reflux+n) *= detJ_arr(i,j,k) / (mapfac_arr(i,j,0)*mapfac_arr(i,j,0)); + }); + } else { + ParallelFor(bx, num_comp_reflux, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept + { + cons_arr(i,j,k,src_comp_reflux+n) /= (mapfac_arr(i,j,0)*mapfac_arr(i,j,0)); + }); + } + } // mfi + // This call refluxes from the lev/lev+1 interface onto lev - getAdvFluxReg(lev+1).Reflux(vars_new[lev][Vars::cons], 0, 0, NVAR); + getAdvFluxReg(lev+1)->Reflux(vars_new[lev][Vars::cons], + src_comp_reflux, src_comp_reflux, num_comp_reflux); + + // Here we multiply (rho S) by m^2 after refluxing + for (MFIter mfi(vars_new[lev][Vars::cons], TilingIfNotGPU()); mfi.isValid(); ++mfi) { + const Box& bx = mfi.tilebox(); + const Array4< Real> cons_arr = vars_new[lev][Vars::cons].array(mfi); + const Array4 mapfac_arr = mapfac_m[lev]->const_array(mfi); + if (use_terrain) { + const Array4 detJ_arr = detJ_cc[lev]->const_array(mfi); + ParallelFor(bx, num_comp_reflux, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept + { + cons_arr(i,j,k,src_comp_reflux+n) *= (mapfac_arr(i,j,0)*mapfac_arr(i,j,0)) / detJ_arr(i,j,k); + }); + } else { + ParallelFor(bx, num_comp_reflux, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept + { + cons_arr(i,j,k,src_comp_reflux+n) *= (mapfac_arr(i,j,0)*mapfac_arr(i,j,0)); + }); + } + } // mfi // We need to do this before anything else because refluxing changes the // values of coarse cells underneath fine grids with the assumption they'll // be over-written by averaging down - AverageDownTo(lev); + AverageDownTo(lev, src_comp_reflux, num_comp_reflux); } } @@ -486,8 +532,11 @@ ERF::InitData () } - if (solverChoice.coupling_type == CouplingType::TwoWay) { - AverageDown(); + if (solverChoice.coupling_type == CouplingType::TwoWay || + solverChoice.coupling_type == CouplingType::Mixed) { + int src_comp_reflux = (solverChoice.coupling_type == CouplingType::TwoWay) ? 0 : 2; + int num_comp_reflux = NVAR - src_comp_reflux; + AverageDown(src_comp_reflux, num_comp_reflux); } #ifdef ERF_USE_PARTICLES @@ -526,7 +575,8 @@ ERF::InitData () } // Initialize flux registers (whether we start from scratch or restart) - if (solverChoice.coupling_type == CouplingType::TwoWay) { + if (solverChoice.coupling_type == CouplingType::TwoWay || + solverChoice.coupling_type == CouplingType::Mixed) { advflux_reg[0] = nullptr; for (int lev = 1; lev <= finest_level; lev++) { @@ -640,11 +690,12 @@ ERF::InitData () // Fill ghost cells/faces for (int lev = 0; lev <= finest_level; ++lev) { - // NOTE: we must set up the FillPatcher object before calling - // FillPatch at a fine level - if (solverChoice.coupling_type== CouplingType::OneWay && cf_width>0 && lev>0) { - Construct_ERFFillPatchers(lev); - Register_ERFFillPatchers(lev); + // + // NOTE: we must set up the FillPatcher object before calling FillPatch at a fine level + // + if (solverChoice.coupling_type != CouplingType::TwoWay && cf_width>0 && lev>0) { + Construct_ERFFillPatchers(lev); + Register_ERFFillPatchers(lev); } auto& lev_new = vars_new[lev]; @@ -1079,8 +1130,11 @@ ERF::MakeHorizontalAverages () int lev = 0; // First, average down all levels (if doing two-way coupling) - if (solverChoice.coupling_type == CouplingType::TwoWay) { - AverageDown(); + if (solverChoice.coupling_type == CouplingType::TwoWay || + solverChoice.coupling_type == CouplingType::Mixed) { + int src_comp_reflux = (solverChoice.coupling_type == CouplingType::TwoWay) ? 0 : 2; + int num_comp_reflux = NVAR - src_comp_reflux; + AverageDown(src_comp_reflux, num_comp_reflux); } MultiFab mf(grids[lev], dmap[lev], 5, 0); @@ -1209,34 +1263,91 @@ ERF::MakeDiagnosticAverage (Vector& h_havg, MultiFab& S, int n) // Set covered coarse cells to be the average of overlying fine cells for all levels void -ERF::AverageDown () +ERF::AverageDown (int scomp, int ncomp) { - AMREX_ALWAYS_ASSERT(solverChoice.coupling_type == CouplingType::TwoWay); + AMREX_ALWAYS_ASSERT(solverChoice.coupling_type == CouplingType::TwoWay || + solverChoice.coupling_type == CouplingType::Mixed); for (int lev = finest_level-1; lev >= 0; --lev) { - AverageDownTo(lev); + AverageDownTo(lev, scomp, ncomp); } } // Set covered coarse cells to be the average of overlying fine cells at level crse_lev void -ERF::AverageDownTo (int crse_lev) // NOLINT +ERF::AverageDownTo (int crse_lev, int scomp, int ncomp) // NOLINT { - AMREX_ALWAYS_ASSERT(solverChoice.coupling_type == CouplingType::TwoWay); + AMREX_ALWAYS_ASSERT(solverChoice.coupling_type == CouplingType::TwoWay || + solverChoice.coupling_type == CouplingType::Mixed); + for (int var_idx = 0; var_idx < Vars::NumTypes; ++var_idx) { const BoxArray& ba(vars_new[crse_lev][var_idx].boxArray()); if (ba[0].type() == IntVect::TheZeroVector()) - amrex::average_down(vars_new[crse_lev+1][var_idx], vars_new[crse_lev][var_idx], - 0, vars_new[crse_lev][var_idx].nComp(), refRatio(crse_lev)); - else // We assume the arrays are face-centered if not cell-centered - amrex::average_down_faces(vars_new[crse_lev+1][var_idx], vars_new[crse_lev][var_idx], - refRatio(crse_lev),geom[crse_lev]); + { + // The quantity that is conserved is not (rho S), but rather (rho S / m^2) where + // m is the map scale factor at cell centers + // Here we pre-divide (rho S) by m^2 before average down + for (int lev = crse_lev; lev <= crse_lev+1; lev++) { + for (MFIter mfi(vars_new[lev][Vars::cons], TilingIfNotGPU()); mfi.isValid(); ++mfi) { + const Box& bx = mfi.tilebox(); + const Array4< Real> cons_arr = vars_new[lev][Vars::cons].array(mfi); + const Array4 mapfac_arr = mapfac_m[lev]->const_array(mfi); + if (solverChoice.use_terrain) { + const Array4 detJ_arr = detJ_cc[lev]->const_array(mfi); + ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept + { + cons_arr(i,j,k,scomp+n) *= detJ_arr(i,j,k) / (mapfac_arr(i,j,0)*mapfac_arr(i,j,0)); + }); + } else { + ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept + { + cons_arr(i,j,k,scomp+n) /= (mapfac_arr(i,j,0)*mapfac_arr(i,j,0)); + }); + } + } // mfi + } // lev + + amrex::average_down(vars_new[crse_lev+1][var_idx], + vars_new[crse_lev ][var_idx], + scomp, ncomp, refRatio(crse_lev)); + + // Here we multiply (rho S) by m^2 after average down + for (int lev = crse_lev; lev <= crse_lev+1; lev++) { + for (MFIter mfi(vars_new[lev][Vars::cons], TilingIfNotGPU()); mfi.isValid(); ++mfi) { + const Box& bx = mfi.tilebox(); + const Array4< Real> cons_arr = vars_new[lev][Vars::cons].array(mfi); + const Array4 mapfac_arr = mapfac_m[lev]->const_array(mfi); + if (solverChoice.use_terrain) { + const Array4 detJ_arr = detJ_cc[lev]->const_array(mfi); + ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept + { + cons_arr(i,j,k,scomp+n) *= (mapfac_arr(i,j,0)*mapfac_arr(i,j,0)) / detJ_arr(i,j,k); + }); + } else { + ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept + { + cons_arr(i,j,k,scomp+n) *= (mapfac_arr(i,j,0)*mapfac_arr(i,j,0)); + }); + } + } // mfi + } // lev + + } else { // We assume the arrays are face-centered if not cell-centered, and + // only average down the momenta if using two-way coupling + if (solverChoice.coupling_type == CouplingType::TwoWay) { + amrex::average_down_faces(vars_new[crse_lev+1][var_idx], vars_new[crse_lev][var_idx], + refRatio(crse_lev),geom[crse_lev]); + } + } } } void ERF::Construct_ERFFillPatchers (int lev) { + AMREX_ALWAYS_ASSERT(solverChoice.coupling_type == CouplingType::OneWay || + solverChoice.coupling_type == CouplingType::Mixed); + auto& fine_new = vars_new[lev]; auto& crse_new = vars_new[lev-1]; auto& ba_fine = fine_new[Vars::cons].boxArray(); @@ -1244,10 +1355,12 @@ ERF::Construct_ERFFillPatchers (int lev) auto& dm_fine = fine_new[Vars::cons].DistributionMap(); auto& dm_crse = crse_new[Vars::cons].DistributionMap(); - // NOTE: crse-fine set/relaxation only done on Rho/RhoTheta + // NOTE: if "Mixed", then crse-fine set/relaxation only done on Rho/RhoTheta + int ncomp = (solverChoice.coupling_type == CouplingType::OneWay) ? NVAR : 2; + FPr_c.emplace_back(ba_fine, dm_fine, geom[lev] , ba_crse, dm_crse, geom[lev-1], - -cf_width, -cf_set_width, 2, &cell_cons_interp); + -cf_width, -cf_set_width, ncomp, &cell_cons_interp); FPr_u.emplace_back(convert(ba_fine, IntVect(1,0,0)), dm_fine, geom[lev] , convert(ba_crse, IntVect(1,0,0)), dm_crse, geom[lev-1], -cf_width, -cf_set_width, 1, &face_linear_interp); @@ -1262,6 +1375,9 @@ ERF::Construct_ERFFillPatchers (int lev) void ERF::Define_ERFFillPatchers (int lev) { + AMREX_ALWAYS_ASSERT(solverChoice.coupling_type == CouplingType::OneWay || + solverChoice.coupling_type == CouplingType::Mixed); + auto& fine_new = vars_new[lev]; auto& crse_new = vars_new[lev-1]; auto& ba_fine = fine_new[Vars::cons].boxArray(); @@ -1269,10 +1385,12 @@ ERF::Define_ERFFillPatchers (int lev) auto& dm_fine = fine_new[Vars::cons].DistributionMap(); auto& dm_crse = crse_new[Vars::cons].DistributionMap(); - // NOTE: crse-fine set/relaxation only done on Rho/RhoTheta + // NOTE: if "Mixed", then crse-fine set/relaxation only done on Rho/RhoTheta + int ncomp = (solverChoice.coupling_type == CouplingType::OneWay) ? NVAR : 2; + FPr_c[lev-1].Define(ba_fine, dm_fine, geom[lev] , ba_crse, dm_crse, geom[lev-1], - -cf_width, -cf_set_width, 2, &cell_cons_interp); + -cf_width, -cf_set_width, ncomp, &cell_cons_interp); FPr_u[lev-1].Define(convert(ba_fine, IntVect(1,0,0)), dm_fine, geom[lev] , convert(ba_crse, IntVect(1,0,0)), dm_crse, geom[lev-1], -cf_width, -cf_set_width, 1, &face_linear_interp); diff --git a/Source/IO/ERF_WriteScalarProfiles.cpp b/Source/IO/ERF_WriteScalarProfiles.cpp index 78521d620..4e0561090 100644 --- a/Source/IO/ERF_WriteScalarProfiles.cpp +++ b/Source/IO/ERF_WriteScalarProfiles.cpp @@ -21,12 +21,20 @@ ERF::sum_integrated_quantities(Real time) int datwidth = 14; int datprecision = 6; - amrex::Real scalar = 0.0; - amrex::Real mass = 0.0; + // Multilevel sums + Real mass_ml = 0.0; + Real rhth_ml = 0.0; + Real scal_ml = 0.0; + + // Level 0 sums + Real mass_sl = volWgtSumMF(0,vars_new[0][Vars::cons], Rho_comp,*mapfac_m[0],false,false); + Real rhth_sl = volWgtSumMF(0,vars_new[0][Vars::cons], RhoTheta_comp,*mapfac_m[0],false,false); + Real scal_sl = volWgtSumMF(0,vars_new[0][Vars::cons],RhoScalar_comp,*mapfac_m[0],false,false); for (int lev = 0; lev <= finest_level; lev++) { - mass += volWgtSumMF(lev,vars_new[lev][Vars::cons],Rho_comp,true,true); - scalar += volWgtSumMF(lev,vars_new[lev][Vars::cons],RhoScalar_comp,true,true); + mass_ml += volWgtSumMF(lev,vars_new[lev][Vars::cons], Rho_comp,*mapfac_m[lev],false,true); + rhth_ml += volWgtSumMF(lev,vars_new[lev][Vars::cons], RhoTheta_comp,*mapfac_m[lev],false,true); + scal_ml += volWgtSumMF(lev,vars_new[lev][Vars::cons],RhoScalar_comp,*mapfac_m[lev],false,true); } if (verbose > 0) { @@ -53,8 +61,8 @@ ERF::sum_integrated_quantities(Real time) h_avg_olen[0] = 0.; } - const int nfoo = 2; - amrex::Real foo[nfoo] = {mass,scalar}; + const int nfoo = 6; + amrex::Real foo[nfoo] = {mass_sl,rhth_sl,scal_sl,mass_ml,rhth_ml,scal_ml}; #ifdef AMREX_LAZY Lazy::QueueReduction([=]() mutable { #endif @@ -63,12 +71,23 @@ ERF::sum_integrated_quantities(Real time) if (amrex::ParallelDescriptor::IOProcessor()) { int i = 0; - mass = foo[i++]; - scalar = foo[i++]; + mass_sl = foo[i++]; + rhth_sl = foo[i++]; + scal_sl = foo[i++]; + mass_ml = foo[i++]; + rhth_ml = foo[i++]; + scal_ml = foo[i++]; amrex::Print() << '\n'; - amrex::Print() << "TIME= " << time << " MASS = " << mass << '\n'; - amrex::Print() << "TIME= " << time << " SCALAR = " << scalar << '\n'; + if (finest_level == 0) { + amrex::Print() << "TIME= " << time << " MASS = " << mass_sl << '\n'; + amrex::Print() << "TIME= " << time << " RHO THETA = " << rhth_sl << '\n'; + amrex::Print() << "TIME= " << time << " RHO SCALAR = " << scal_sl << '\n'; + } else { + amrex::Print() << "TIME= " << time << " MASS SL/ML = " << mass_sl << " " << mass_ml << '\n'; + amrex::Print() << "TIME= " << time << " RHO THETA SL/ML = " << rhth_sl << " " << rhth_ml << '\n'; + amrex::Print() << "TIME= " << time << " RHO SCALAR SL/ML = " << scal_sl << " " << scal_ml << '\n'; + } // The first data log only holds scalars if (NumDataLogs() > 0) @@ -256,29 +275,42 @@ ERF::sample_lines(int lev, Real time, IntVect cell, MultiFab& mf) */ amrex::Real ERF::volWgtSumMF(int lev, - const amrex::MultiFab& mf, int comp, bool local, bool finemask) + const MultiFab& mf, int comp, + const MultiFab& mapfac, bool local, bool finemask) { BL_PROFILE("ERF::volWgtSumMF()"); - amrex::Real sum = 0.0; - amrex::MultiFab tmp(grids[lev], dmap[lev], 1, 0); - amrex::MultiFab::Copy(tmp, mf, comp, 0, 1, 0); + Real sum = 0.0; + MultiFab tmp(grids[lev], dmap[lev], 1, 0); + MultiFab::Copy(tmp, mf, comp, 0, 1, 0); + + // The quantity that is conserved is not (rho S), but rather (rho S / m^2) where + // m is the map scale factor at cell centers + for (MFIter mfi(tmp, TilingIfNotGPU()); mfi.isValid(); ++mfi) { + const Box& bx = mfi.tilebox(); + const Array4< Real> tmp_arr = tmp.array(mfi); + const Array4 mapfac_arr = mapfac.const_array(mfi); + ParallelFor(bx, [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept + { + tmp_arr(i,j,k) /= (mapfac_arr(i,j,0)*mapfac_arr(i,j,0)); + }); + } // mfi if (lev < finest_level && finemask) { const amrex::MultiFab& mask = build_fine_mask(lev+1); - amrex::MultiFab::Multiply(tmp, mask, 0, 0, 1, 0); + MultiFab::Multiply(tmp, mask, 0, 0, 1, 0); } - amrex::MultiFab volume(grids[lev], dmap[lev], 1, 0); + MultiFab volume(grids[lev], dmap[lev], 1, 0); auto const& dx = geom[lev].CellSizeArray(); Real cell_vol = dx[0]*dx[1]*dx[2]; volume.setVal(cell_vol); if (solverChoice.use_terrain) amrex::MultiFab::Multiply(volume, *detJ_cc[lev], 0, 0, 1, 0); - sum = amrex::MultiFab::Dot(tmp, 0, volume, 0, 1, 0, local); + sum = MultiFab::Dot(tmp, 0, volume, 0, 1, 0, local); if (!local) - amrex::ParallelDescriptor::ReduceRealSum(sum); + ParallelDescriptor::ReduceRealSum(sum); return sum; } diff --git a/Source/Initialization/InputSoundingData.H b/Source/Initialization/InputSoundingData.H index c7d471e3b..0496fb2cb 100644 --- a/Source/Initialization/InputSoundingData.H +++ b/Source/Initialization/InputSoundingData.H @@ -157,7 +157,7 @@ public: // integrate from surface to domain top amrex::Real qvf, dz; -#if 0 +#if 0 // Printing // In this absence of moisture, this moist profile will match the // following dry profile amrex::Print() << "z p_m rho_m theta U V" << std::endl; @@ -182,7 +182,7 @@ public: rhom_integ[k] = 1 / ( R_d/p_0 * theta_inp_sound[k]*qvf * std::pow(pm_integ[k]/p_0, -iGamma)); } -#if 0 +#if 0 // Printing amrex::Print() << z_inp_sound[k] << " " << pm_integ[k] << " " << rhom_integ[k] diff --git a/Source/TimeIntegration/ERF_TimeStep.cpp b/Source/TimeIntegration/ERF_TimeStep.cpp index 5244dcefb..05bb36dbd 100644 --- a/Source/TimeIntegration/ERF_TimeStep.cpp +++ b/Source/TimeIntegration/ERF_TimeStep.cpp @@ -34,9 +34,10 @@ ERF::timeStep (int lev, Real time, int iteration) int old_finest = finest_level; regrid(lev, time); - // NOTE: Def & Reg index lev backwards (so we add 1 here) + // NOTE: Define & Register index lev backwards (so we add 1 here) // Redefine & register the ERFFillpatcher objects - if (solverChoice.coupling_type == CouplingType::OneWay && cf_width>0) { + if (solverChoice.coupling_type != CouplingType::TwoWay && cf_width > 0) + { Define_ERFFillPatchers(lev+1); Register_ERFFillPatchers(lev+1); if (lev < max_level-1) { @@ -84,11 +85,15 @@ ERF::timeStep (int lev, Real time, int iteration) // recursive call for next-finer level for (int i = 1; i <= nsubsteps[lev+1]; ++i) { - timeStep(lev+1, time+(i-1)*dt[lev+1], i); + Real strt_time_for_fine = time + (i-1)*dt[lev+1]; + timeStep(lev+1, strt_time_for_fine, i); } - if (solverChoice.coupling_type == CouplingType::TwoWay) { - AverageDownTo(lev); // average lev+1 down to lev + if (solverChoice.coupling_type == CouplingType::TwoWay || + solverChoice.coupling_type == CouplingType::Mixed) { + int scomp = (solverChoice.coupling_type == CouplingType::TwoWay) ? 0 : 2; + int ncomp = NVAR - scomp; + AverageDownTo(lev, scomp, ncomp); // average lev+1 down to lev } } } diff --git a/Source/TimeIntegration/ERF_advance_dycore.cpp b/Source/TimeIntegration/ERF_advance_dycore.cpp index 25a080309..a56ae7083 100644 --- a/Source/TimeIntegration/ERF_advance_dycore.cpp +++ b/Source/TimeIntegration/ERF_advance_dycore.cpp @@ -314,7 +314,7 @@ void ERF::advance_dycore(int level, mri_integrator.advance(state_old, state_new, old_time, dt_advance); // Register coarse data for coarse-fine fill - if (level0) { + if (level0) { FPr_c[level].RegisterCoarseData({&cons_old, &cons_new}, {old_time, old_time + dt_advance}); FPr_u[level].RegisterCoarseData({&xvel_old, &xvel_new}, {old_time, old_time + dt_advance}); FPr_v[level].RegisterCoarseData({&yvel_old, &yvel_new}, {old_time, old_time + dt_advance}); diff --git a/Source/TimeIntegration/ERF_slow_rhs_inc.cpp b/Source/TimeIntegration/ERF_slow_rhs_inc.cpp index 9fbb9d07d..8d922fd3f 100644 --- a/Source/TimeIntegration/ERF_slow_rhs_inc.cpp +++ b/Source/TimeIntegration/ERF_slow_rhs_inc.cpp @@ -457,7 +457,7 @@ void erf_slow_rhs_inc (int /*level*/, int nrk, } // l_use_diff // HACK FOR PRINTING - S_rhs[IntVar::cons].setVal(0.); + // S_rhs[IntVar::cons].setVal(0.); // ************************************************************************* // Define updates and fluxes in the current RK stage @@ -582,15 +582,20 @@ void erf_slow_rhs_inc (int /*level*/, int nrk, } // ************************************************************************** - // Define updates in the RHS of continuity, temperature, and scalar equations + // Define updates in the RHS of continuity and potential temperature equations // ************************************************************************** - Real fac = 1.0; - AdvectionSrcForRhoAndTheta(bx, valid_bx, cell_rhs, // these are being used to build the fluxes - rho_u, rho_v, omega_arr, fac, - avg_xmom, avg_ymom, avg_zmom, // these are being defined from the rho fluxes - cell_prim, z_nd, detJ_arr, - dxInv, mf_m, mf_u, mf_v, - horiz_adv_type, vert_adv_type, l_use_terrain); + AdvectionSrcForRho(bx, valid_bx, cell_rhs, + rho_u, rho_v, omega_arr, // these are being used to build the fluxes + avg_xmom, avg_ymom, avg_zmom, // these are being defined from the fluxes + cell_prim, z_nd, detJ_arr, + dxInv, mf_m, mf_u, mf_v, + l_horiz_adv_type, l_vert_adv_type, l_use_terrain, flx_arr); + + int icomp = RhoTheta_comp; int ncomp = 1; + AdvectionSrcForScalars(bx, icomp, ncomp, + avg_xmom, avg_ymom, avg_zmom, + cell_prim, cell_rhs, detJ_arr, dxInv, mf_m, + l_horiz_adv_type, l_vert_adv_type, l_use_terrain, flx_arr); if (l_use_diff) { Array4 diffflux_x = dflux_x->array(mfi); diff --git a/Source/TimeIntegration/ERF_slow_rhs_post.cpp b/Source/TimeIntegration/ERF_slow_rhs_post.cpp index 51c10dd7b..ff1b961ae 100644 --- a/Source/TimeIntegration/ERF_slow_rhs_post.cpp +++ b/Source/TimeIntegration/ERF_slow_rhs_post.cpp @@ -17,6 +17,8 @@ using namespace amrex; * Function for computing the slow RHS for the evolution equations for the scalars other than density or potential temperature * * @param[in] level level of resolution + * @param[in] finest_level finest level of resolution + * @param[in] nrk which RK stage * @param[in] dt slow time step * @param[out] S_rhs RHS computed here * @param[in] S_old solution at start of time step @@ -42,6 +44,8 @@ using namespace amrex; * @param[in] mapfac_m map factor at cell centers * @param[in] mapfac_u map factor at x-faces * @param[in] mapfac_v map factor at y-faces + * @param[inout] fr_as_crse YAFluxRegister at level l at level l / l+1 interface + * @param[inout] fr_as_fine YAFluxRegister at level l at level l-1 / l interface */ void erf_slow_rhs_post (int level, int finest_level, @@ -69,9 +73,9 @@ void erf_slow_rhs_post (int level, int finest_level, std::unique_ptr& detJ_new, std::unique_ptr& mapfac_m, std::unique_ptr& mapfac_u, - std::unique_ptr& mapfac_v + std::unique_ptr& mapfac_v, #if defined(ERF_USE_NETCDF) && (defined(ERF_USE_MOISTURE) || defined(ERF_USE_WARM_NO_PRECIP)) - ,const bool& moist_zero, + const bool& moist_zero, const Real& bdy_time_interval, const Real& start_bdy_time, const Real& new_stage_time, @@ -80,8 +84,10 @@ void erf_slow_rhs_post (int level, int finest_level, Vector>& bdy_data_xlo, Vector>& bdy_data_xhi, Vector>& bdy_data_ylo, - Vector>& bdy_data_yhi + Vector>& bdy_data_yhi, #endif + YAFluxRegister* fr_as_crse, + YAFluxRegister* fr_as_fine ) { BL_PROFILE_REGION("erf_slow_rhs_post()"); @@ -94,7 +100,7 @@ void erf_slow_rhs_post (int level, int finest_level, if (most) t_mean_mf = most->get_mac_avg(0,2); const bool l_use_terrain = solverChoice.use_terrain; - const bool l_two_way_coupling = (solverChoice.coupling_type == CouplingType::TwoWay); + const bool l_reflux = (solverChoice.coupling_type != CouplingType::OneWay); const bool l_moving_terrain = (solverChoice.terrain_type == TerrainType::Moving); if (l_moving_terrain) AMREX_ALWAYS_ASSERT(l_use_terrain); @@ -114,9 +120,11 @@ void erf_slow_rhs_post (int level, int finest_level, const Box& domain = geom.Domain(); const GpuArray dxInv = geom.InvCellSizeArray(); + const Real* dx = geom.CellSize(); // ************************************************************************* // Set gravity as a vector + // ************************************************************************* const Array grav{0.0, 0.0, -solverChoice.gravity}; const GpuArray grav_gpu{grav[0], grav[1], grav[2]}; @@ -159,10 +167,26 @@ void erf_slow_rhs_post (int level, int finest_level, #ifdef _OPENMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for ( MFIter mfi(S_data[IntVar::cons],TilingIfNotGPU()); mfi.isValid(); ++mfi) { + { + std::array flux; + + for ( MFIter mfi(S_data[IntVar::cons],TilingIfNotGPU()); mfi.isValid(); ++mfi) { const Box& tbx = mfi.tilebox(); + // ************************************************************************* + // Define flux arrays for use in advection + // ************************************************************************* + for (int dir = 0; dir < AMREX_SPACEDIM; ++dir) { + flux[dir].resize(amrex::surroundingNodes(tbx,dir),NVAR); + flux[dir].setVal(0.); + } + const GpuArray, AMREX_SPACEDIM> + flx_arr{{AMREX_D_DECL(flux[0].array(), flux[1].array(), flux[2].array())}}; + + // ************************************************************************* + // Define Array4's + // ************************************************************************* const Array4< Real> & old_cons = S_old[IntVar::cons].array(mfi); const Array4< Real> & cell_rhs = S_rhs[IntVar::cons].array(mfi); @@ -251,27 +275,24 @@ void erf_slow_rhs_post (int level, int finest_level, if (l_use_deardorff) { start_comp = RhoKE_comp; num_comp = 1; - AdvectionSrcForScalars(level, finest_level, mfi, tbx, start_comp, num_comp, avg_xmom, avg_ymom, avg_zmom, - cur_prim, cell_rhs, detJ_arr, dt, dxInv, mf_m, - horiz_adv_type, vert_adv_type, - l_use_terrain, l_two_way_coupling); + AdvectionSrcForScalars(tbx, start_comp, num_comp, avg_xmom, avg_ymom, avg_zmom, + cur_prim, cell_rhs, detJ_arr, dxInv, mf_m, + horiz_adv_type, vert_adv_type, l_use_terrain, flx_arr); } if (l_use_QKE) { start_comp = RhoQKE_comp; num_comp = 1; - AdvectionSrcForScalars(level, finest_level, mfi, tbx, start_comp, num_comp, avg_xmom, avg_ymom, avg_zmom, - cur_prim, cell_rhs, detJ_arr, dt, dxInv, mf_m, - horiz_adv_type, vert_adv_type, - l_use_terrain, l_two_way_coupling); + AdvectionSrcForScalars(tbx, start_comp, num_comp, avg_xmom, avg_ymom, avg_zmom, + cur_prim, cell_rhs, detJ_arr, dxInv, mf_m, + horiz_adv_type, vert_adv_type, l_use_terrain, flx_arr); } // This is simply an advected scalar for convenience start_comp = RhoScalar_comp; num_comp = 1; - AdvectionSrcForScalars(level, finest_level, mfi, tbx, start_comp, num_comp, avg_xmom, avg_ymom, avg_zmom, - cur_prim, cell_rhs, detJ_arr, dt, dxInv, mf_m, - horiz_adv_type, vert_adv_type, - l_use_terrain, l_two_way_coupling); + AdvectionSrcForScalars(tbx, start_comp, num_comp, avg_xmom, avg_ymom, avg_zmom, + cur_prim, cell_rhs, detJ_arr, dxInv, mf_m, + horiz_adv_type, vert_adv_type, l_use_terrain, flx_arr); #ifdef ERF_USE_MOISTURE start_comp = RhoQt_comp; @@ -284,10 +305,9 @@ void erf_slow_rhs_post (int level, int finest_level, moist_horiz_adv_type = EfficientAdvType(nrk,ac.moistscal_horiz_adv_type); moist_vert_adv_type = EfficientAdvType(nrk,ac.moistscal_vert_adv_type); } - AdvectionSrcForScalars(level, finest_level, mfi, tbx, start_comp, num_comp, avg_xmom, avg_ymom, avg_zmom, - cur_prim, cell_rhs, detJ_arr, dt, dxInv, mf_m, - moist_horiz_adv_type, moist_vert_adv_type, - l_use_terrain, l_two_way_coupling); + AdvectionSrcForScalars(tbx, start_comp, num_comp, avg_xmom, avg_ymom, avg_zmom, + cur_prim, cell_rhs, detJ_arr, dxInv, mf_m, + moist_horiz_adv_type, moist_vert_adv_type, l_use_terrain, flx_arr); #elif defined(ERF_USE_WARM_NO_PRECIP) start_comp = RhoQv_comp; @@ -301,10 +321,9 @@ void erf_slow_rhs_post (int level, int finest_level, moist_vert_adv_type = EfficientAdvType(nrk,solverChoice.moistscal_vert_adv_type); } - AdvectionSrcForScalars(level, finest_level, mfi, tbx, start_comp, num_comp, avg_xmom, avg_ymom, avg_zmom, - cur_prim, cell_rhs, detJ_arr, dt, dxInv, mf_m, - moist_horiz_adv_type, moist_vert_adv_type, - l_use_terrain, l_two_way_coupling); + AdvectionSrcForScalars(tbx, start_comp, num_comp, avg_xmom, avg_ymom, avg_zmom, + cur_prim, cell_rhs, detJ_arr, dxInv, mf_m, + moist_horiz_adv_type, moist_vert_adv_type, l_use_terrain, flx_arr); #endif if (l_use_diff) { @@ -443,7 +462,7 @@ void erf_slow_rhs_post (int level, int finest_level, Real temp_val = detJ_arr(i,j,k) * old_cons(i,j,k,n) + dt * detJ_arr(i,j,k) * cell_rhs(i,j,k,n); cur_cons(i,j,k,n) = temp_val / detJ_new_arr(i,j,k); cur_cons(i,j,k,n) = amrex::max(cur_cons(i,j,k,n), 1e-12); -#if 0 +#if 0 // Printing if (cur_cons(i,j,k,n) < Real(0.)) { amrex::AllPrint() << "MAKING NEGATIVE QKE " << IntVect(i,j,k) << " NEW / OLD " << cur_cons(i,j,k,n) << " " << old_cons(i,j,k,n) << std::endl; @@ -485,7 +504,7 @@ void erf_slow_rhs_post (int level, int finest_level, cell_rhs(i,j,k,n) += src_arr(i,j,k,n); cur_cons(i,j,k,n) = old_cons(i,j,k,n) + dt * cell_rhs(i,j,k,n); cur_cons(i,j,k,n) = amrex::max(cur_cons(i,j,k,n), 1e-12); -#if 0 +#if 0 // Printing if (cur_cons(i,j,k,n) < Real(0.)) { amrex::AllPrint() << "MAKING NEGATIVE QKE " << IntVect(i,j,k) << " NEW / OLD " << cur_cons(i,j,k,n) << " " << old_cons(i,j,k,n) << std::endl; @@ -524,5 +543,25 @@ void erf_slow_rhs_post (int level, int finest_level, new_zmom(i,j,k) = cur_zmom(i,j,k); }); } // end profile - } // mfi + + { + BL_PROFILE("rhs_post_10"); + // We only add to the flux registers in the final RK step + if (l_reflux && nrk == 2) { + int strt_comp_reflux = RhoTheta_comp + 1; + int num_comp_reflux = NVAR - strt_comp_reflux; + if (level < finest_level) { + fr_as_crse->CrseAdd(mfi, + {{AMREX_D_DECL(&(flux[0]), &(flux[1]), &(flux[2]))}}, + dx, dt, strt_comp_reflux, strt_comp_reflux, num_comp_reflux, amrex::RunOn::Device); + } + if (level > 0) { + fr_as_fine->FineAdd(mfi, + {{AMREX_D_DECL(&(flux[0]), &(flux[1]), &(flux[2]))}}, + dx, dt, strt_comp_reflux, strt_comp_reflux, num_comp_reflux, amrex::RunOn::Device); + } + } // two-way coupling + } // end profile + } // mfi + } // OMP } diff --git a/Source/TimeIntegration/ERF_slow_rhs_pre.cpp b/Source/TimeIntegration/ERF_slow_rhs_pre.cpp index 50b509895..2adcc4db9 100644 --- a/Source/TimeIntegration/ERF_slow_rhs_pre.cpp +++ b/Source/TimeIntegration/ERF_slow_rhs_pre.cpp @@ -21,6 +21,7 @@ using namespace amrex; * Function for computing the slow RHS for the evolution equations for the density, potential temperature and momentum. * * @param[in] level level of resolution + * @param[in] finest_level finest level of resolution * @param[in] nrk which RK stage * @param[in] dt slow time step * @param[out] S_rhs RHS computed here @@ -60,6 +61,8 @@ using namespace amrex; * @param[in] mapfac_m map factor at cell centers * @param[in] mapfac_u map factor at x-faces * @param[in] mapfac_v map factor at y-faces + * @param[inout] fr_as_crse YAFluxRegister at level l at level l / l+1 interface + * @param[inout] fr_as_fine YAFluxRegister at level l at level l-1 / l interface * @param[in] dptr_rayleigh_tau strength of Rayleigh damping * @param[in] dptr_rayleigh_ubar reference value for x-velocity used to define Rayleigh damping * @param[in] dptr_rayleigh_vbar reference value for y-velocity used to define Rayleigh damping @@ -67,7 +70,7 @@ using namespace amrex; * @param[in] dptr_rayleigh_thetabar reference value for potential temperature used to define Rayleigh damping */ -void erf_slow_rhs_pre (int level, +void erf_slow_rhs_pre (int level, int finest_level, int nrk, amrex::Real dt, Vector& S_rhs, @@ -100,6 +103,8 @@ void erf_slow_rhs_pre (int level, std::unique_ptr& mapfac_m, std::unique_ptr& mapfac_u, std::unique_ptr& mapfac_v, + YAFluxRegister* fr_as_crse, + YAFluxRegister* fr_as_fine, const amrex::Real* dptr_rayleigh_tau, const amrex::Real* dptr_rayleigh_ubar, const amrex::Real* dptr_rayleigh_vbar, const amrex::Real* dptr_rayleigh_wbar, const amrex::Real* dptr_rayleigh_thetabar) @@ -122,6 +127,8 @@ void erf_slow_rhs_pre (int level, const bool l_moving_terrain = (solverChoice.terrain_type == TerrainType::Moving); if (l_moving_terrain) AMREX_ALWAYS_ASSERT (l_use_terrain); + const bool l_reflux = (solverChoice.coupling_type == CouplingType::TwoWay); + const bool l_use_ndiff = solverChoice.use_NumDiff; const bool l_use_diff = ( (dc.molec_diff_type != MolecDiffType::None) || (tc.les_type != LESType::None) || @@ -138,6 +145,9 @@ void erf_slow_rhs_pre (int level, const int domhi_z = domain.bigEnd()[2]; const GpuArray dxInv = geom.InvCellSizeArray(); + const Real* dx = geom.CellSize(); + + std::array flux; // ************************************************************************* // Combine external forcing terms @@ -522,6 +532,16 @@ void erf_slow_rhs_pre (int level, // Base state const Array4& p0_arr = p0->const_array(mfi); + // ************************************************************************* + // Define flux arrays for use in advection + // ************************************************************************* + for (int dir = 0; dir < AMREX_SPACEDIM; ++dir) { + flux[dir].resize(amrex::surroundingNodes(tbx,dir),2); + flux[dir].setVal(0.); + } + const GpuArray, AMREX_SPACEDIM> + flx_arr{{AMREX_D_DECL(flux[0].array(), flux[1].array(), flux[2].array())}}; + //----------------------------------------- // Perturbational pressure field //----------------------------------------- @@ -628,15 +648,19 @@ void erf_slow_rhs_pre (int level, } // ************************************************************************** - // Define updates in the RHS of continuity, temperature, and scalar equations + // Define updates in the RHS of continuity and potential temperature equations // ************************************************************************** - Real fac = 1.0; - AdvectionSrcForRhoAndTheta(bx, valid_bx, cell_rhs, // these are being used to build the fluxes - rho_u, rho_v, omega_arr, fac, - avg_xmom, avg_ymom, avg_zmom, // these are being defined from the rho fluxes - cell_prim, z_nd, detJ_arr, - dxInv, mf_m, mf_u, mf_v, - l_horiz_adv_type, l_vert_adv_type, l_use_terrain); + AdvectionSrcForRho(bx, valid_bx, cell_rhs, + rho_u, rho_v, omega_arr, // these are being used to build the fluxes + avg_xmom, avg_ymom, avg_zmom, // these are being defined from the fluxes + z_nd, detJ_arr, dxInv, mf_m, mf_u, mf_v, + l_use_terrain, flx_arr); + + int icomp = RhoTheta_comp; int ncomp = 1; + AdvectionSrcForScalars(bx, icomp, ncomp, + avg_xmom, avg_ymom, avg_zmom, + cell_prim, cell_rhs, detJ_arr, dxInv, mf_m, + l_horiz_adv_type, l_vert_adv_type, l_use_terrain, flx_arr); if (l_use_diff) { Array4 diffflux_x = dflux_x->array(mfi); @@ -1059,8 +1083,27 @@ void erf_slow_rhs_pre (int level, } }); } // no terrain - ApplySpongeZoneBCs(solverChoice.spongeChoice, geom, tbx, tby, tbz, rho_u_rhs, rho_v_rhs, rho_w_rhs, rho_u, rho_v, - rho_w, bx, cell_rhs, cell_data); + ApplySpongeZoneBCs(solverChoice.spongeChoice, geom, tbx, tby, tbz, rho_u_rhs, rho_v_rhs, rho_w_rhs, rho_u, rho_v, + rho_w, bx, cell_rhs, cell_data); + } // end profile + + { + BL_PROFILE("slow_rhs_pre_fluxreg"); + // We only add to the flux registers in the final RK step + if (l_reflux && nrk == 2) { + int strt_comp_reflux = 0; + int num_comp_reflux = 2; + if (level < finest_level) { + fr_as_crse->CrseAdd(mfi, + {{AMREX_D_DECL(&(flux[0]), &(flux[1]), &(flux[2]))}}, + dx, dt, strt_comp_reflux, strt_comp_reflux, num_comp_reflux, amrex::RunOn::Device); + } + if (level > 0) { + fr_as_fine->FineAdd(mfi, + {{AMREX_D_DECL(&(flux[0]), &(flux[1]), &(flux[2]))}}, + dx, dt, strt_comp_reflux, strt_comp_reflux, num_comp_reflux, amrex::RunOn::Device); + } + } // two-way coupling } // end profile } // mfi } diff --git a/Source/TimeIntegration/TI_headers.H b/Source/TimeIntegration/TI_headers.H index 4c226be3b..f3591df28 100644 --- a/Source/TimeIntegration/TI_headers.H +++ b/Source/TimeIntegration/TI_headers.H @@ -3,6 +3,7 @@ #include #include +#include #include #include "DataStruct.H" #include "IndexDefines.H" @@ -12,7 +13,7 @@ * Function for computing the slow RHS for the evolution equations for the density, potential temperature and momentum. * */ -void erf_slow_rhs_pre (int level, int nrk, +void erf_slow_rhs_pre (int level, int finest_level, int nrk, amrex::Real dt, amrex::Vector& S_rhs, amrex::Vector& S_data, @@ -52,6 +53,8 @@ void erf_slow_rhs_pre (int level, int nrk, std::unique_ptr& mapfac_m, std::unique_ptr& mapfac_u, std::unique_ptr& mapfac_v, + amrex::YAFluxRegister* fr_as_crse, + amrex::YAFluxRegister* fr_as_fine, const amrex::Real* dptr_rayleigh_tau, const amrex::Real* dptr_rayleigh_ubar, const amrex::Real* dptr_rayleigh_vbar, @@ -87,9 +90,9 @@ void erf_slow_rhs_post (int level, int finest_level, int nrk, std::unique_ptr& dJ_new, std::unique_ptr& mapfac_m, std::unique_ptr& mapfac_u, - std::unique_ptr& mapfac_v + std::unique_ptr& mapfac_v, #if defined(ERF_USE_NETCDF) && (defined(ERF_USE_MOISTURE) || defined(ERF_USE_WARM_NO_PRECIP)) - ,const bool& moist_zero, + const bool& moist_zero, const amrex::Real& bdy_time_interval, const amrex::Real& start_bdy_time, const amrex::Real& new_stage_time, @@ -98,8 +101,10 @@ void erf_slow_rhs_post (int level, int finest_level, int nrk, amrex::Vector>& bdy_data_xlo, amrex::Vector>& bdy_data_xhi, amrex::Vector>& bdy_data_ylo, - amrex::Vector>& bdy_data_yhi + amrex::Vector>& bdy_data_yhi, #endif + amrex::YAFluxRegister* fr_as_crse, + amrex::YAFluxRegister* fr_as_fine ); /** diff --git a/Source/TimeIntegration/TI_slow_rhs_fun.H b/Source/TimeIntegration/TI_slow_rhs_fun.H index 9be9eb6fc..249587df1 100644 --- a/Source/TimeIntegration/TI_slow_rhs_fun.H +++ b/Source/TimeIntegration/TI_slow_rhs_fun.H @@ -17,6 +17,21 @@ Real slow_dt = new_stage_time - old_step_time; + // ************************************************************************* + // Set up flux registers if using two_way coupling + // ************************************************************************* + YAFluxRegister* fr_as_crse = nullptr; + YAFluxRegister* fr_as_fine = nullptr; + if (solverChoice.coupling_type == CouplingType::TwoWay) { + if (level < finest_level) { + fr_as_crse = getAdvFluxReg(level+1); + fr_as_crse->reset(); + } + if (level > 0) { + fr_as_fine = getAdvFluxReg(level); + } + } + // Moving terrain if ( solverChoice.use_terrain && (solverChoice.terrain_type == TerrainType::Moving) ) { @@ -80,7 +95,7 @@ #endif fine_geom, solverChoice, r0_new); - erf_slow_rhs_pre(level, nrk, slow_dt, S_rhs, S_data, S_prim, S_scratch, + erf_slow_rhs_pre(level, finest_level, nrk, slow_dt, S_rhs, S_data, S_prim, S_scratch, xvel_new, yvel_new, zvel_new, #if defined(ERF_USE_MOISTURE) qmoist[level], @@ -91,6 +106,7 @@ fine_geom, solverChoice, m_most, domain_bcs_type_d, domain_bcs_type, z_phys_nd_src[level], detJ_cc_src[level], p0_new, mapfac_m[level], mapfac_u[level], mapfac_v[level], + fr_as_crse, fr_as_fine, dptr_rayleigh_tau, dptr_rayleigh_ubar, dptr_rayleigh_vbar, dptr_rayleigh_wbar, dptr_rayleigh_thetabar); @@ -176,7 +192,7 @@ #endif fine_geom, solverChoice, r0); - erf_slow_rhs_pre(level, nrk, slow_dt, S_rhs, S_data, S_prim, S_scratch, + erf_slow_rhs_pre(level, finest_level, nrk, slow_dt, S_rhs, S_data, S_prim, S_scratch, xvel_new, yvel_new, zvel_new, #if defined(ERF_USE_MOISTURE) qmoist[level], @@ -187,6 +203,7 @@ fine_geom, solverChoice, m_most, domain_bcs_type_d, domain_bcs_type, z_phys_nd[level], detJ_cc[level], p0, mapfac_m[level], mapfac_u[level], mapfac_v[level], + fr_as_crse, fr_as_fine, dptr_rayleigh_tau, dptr_rayleigh_ubar, dptr_rayleigh_vbar, dptr_rayleigh_wbar, dptr_rayleigh_thetabar); @@ -211,7 +228,7 @@ #endif // Compute RHS for fine interior ghost - if (level>0 && solverChoice.coupling_type == CouplingType::OneWay && cf_width>0) { + if (level>0 && solverChoice.coupling_type != CouplingType::TwoWay && cf_width>0) { fine_compute_interior_ghost_rhs(new_stage_time, slow_dt, cf_width, cf_set_width, fine_geom, &FPr_c[level-1], &FPr_u[level-1], &FPr_v[level-1], &FPr_w[level-1], @@ -275,6 +292,27 @@ if (init_type=="metgrid" && level==0 && metgrid_bdy_set_width > 0) moist_zero = true; #endif + // ************************************************************************* + // Set up flux registers if using two_way coupling + // ************************************************************************* + YAFluxRegister* fr_as_crse = nullptr; + YAFluxRegister* fr_as_fine = nullptr; + if (solverChoice.coupling_type == CouplingType::TwoWay || + solverChoice.coupling_type == CouplingType::Mixed) + { + if (level < finest_level) { + fr_as_crse = getAdvFluxReg(level+1); + // For Mixed we haven't reset this in erf_slow_rhs_pre but we need to here. + if (solverChoice.coupling_type == CouplingType::Mixed) + { + fr_as_crse->reset(); + } + } + if (level > 0) { + fr_as_fine = getAdvFluxReg(level); + } + } + // Moving terrain if ( solverChoice.use_terrain && (solverChoice.terrain_type == TerrainType::Moving) ) { erf_slow_rhs_post(level, finest_level, nrk, slow_dt, @@ -284,12 +322,13 @@ Hfx3, Diss, fine_geom, solverChoice, m_most, domain_bcs_type_d, z_phys_nd_src[level], detJ_cc[level], detJ_cc_new[level], - mapfac_m[level], mapfac_u[level], mapfac_v[level] + mapfac_m[level], mapfac_u[level], mapfac_v[level], #if defined(ERF_USE_NETCDF) && (defined(ERF_USE_MOISTURE) || defined(ERF_USE_WARM_NO_PRECIP)) - ,moist_zero, bdy_time_interval, start_bdy_time, new_stage_time, + moist_zero, bdy_time_interval, start_bdy_time, new_stage_time, wrfbdy_width-1, wrfbdy_set_width, - bdy_data_xlo, bdy_data_xhi, bdy_data_ylo, bdy_data_yhi + bdy_data_xlo, bdy_data_xhi, bdy_data_ylo, bdy_data_yhi, #endif + fr_as_crse, fr_as_fine ); } else { erf_slow_rhs_post(level, finest_level, nrk, slow_dt, @@ -299,12 +338,13 @@ Hfx3, Diss, fine_geom, solverChoice, m_most, domain_bcs_type_d, z_phys_nd[level], detJ_cc[level], detJ_cc[level], - mapfac_m[level], mapfac_u[level], mapfac_v[level] + mapfac_m[level], mapfac_u[level], mapfac_v[level], #if defined(ERF_USE_NETCDF) && (defined(ERF_USE_MOISTURE) || defined(ERF_USE_WARM_NO_PRECIP)) - ,moist_zero, bdy_time_interval, start_bdy_time, new_stage_time, + moist_zero, bdy_time_interval, start_bdy_time, new_stage_time, wrfbdy_width-1, wrfbdy_set_width, - bdy_data_xlo, bdy_data_xhi, bdy_data_ylo, bdy_data_yhi + bdy_data_xlo, bdy_data_xhi, bdy_data_ylo, bdy_data_yhi, #endif + fr_as_crse, fr_as_fine ); } }; // end slow_rhs_fun_post @@ -367,7 +407,7 @@ #endif // Compute RHS for fine interior ghost - if (level>0 && coupling_type=="OneWay" && cf_width>0) { + if (level>0 && solverChoice.coupling_type != CouplingType::TwoWay && cf_width>0) { fine_compute_interior_ghost_rhs(new_stage_time, slow_dt, cf_width, cf_set_width, &FPr_c[level-1], &FPr_u[level-1], &FPr_v[level-1], &FPr_w[level-1], diff --git a/Source/Utils/Utils.H b/Source/Utils/Utils.H index e154a4946..fb4b34037 100644 --- a/Source/Utils/Utils.H +++ b/Source/Utils/Utils.H @@ -4,7 +4,6 @@ #include #include #include -#include #include #include #include