From f4ac4965563bb68d4e8682fe38a3068fb2ebbadc Mon Sep 17 00:00:00 2001 From: hnil Date: Fri, 30 Jun 2023 12:52:53 +0200 Subject: [PATCH] -- added posibility for double sided flux -- restructured flores --- .../discretization/common/tpfalinearizer.hh | 130 +++++++++++++++--- 1 file changed, 108 insertions(+), 22 deletions(-) diff --git a/opm/models/discretization/common/tpfalinearizer.hh b/opm/models/discretization/common/tpfalinearizer.hh index 438c87672..19a15d16a 100644 --- a/opm/models/discretization/common/tpfalinearizer.hh +++ b/opm/models/discretization/common/tpfalinearizer.hh @@ -58,6 +58,11 @@ namespace Opm::Properties { using type = bool; static constexpr type value = false; }; + template + struct FluxDoubleSided { + using type = bool; + static constexpr type value = false; + }; } namespace Opm { @@ -86,6 +91,7 @@ class TpfaLinearizer using Scalar = GetPropType; using Evaluation = GetPropType; + using ThreadManager = GetPropType; using SolutionVector = GetPropType; using GlobalEqVector = GetPropType; using SparseMatrixAdapter = GetPropType; @@ -100,6 +106,7 @@ class TpfaLinearizer using Vector = GlobalEqVector; + bool FluxDoubleSided = getPropValue; enum { numEq = getPropValue() }; enum { historySize = getPropValue() }; enum { dimWorld = GridView::dimensionworld }; @@ -469,6 +476,7 @@ private: diagMatAddress_[globI] = jacobian_->blockAddress(globI, globI); for (auto& nbInfo : nbInfos) { nbInfo.matBlockAddress = jacobian_->blockAddress(nbInfo.neighbor, globI); + nbInfo.matBlockAddress2 = jacobian_->blockAddress(globI, nbInfo.neighbor); } } @@ -476,6 +484,9 @@ private: fullDomain_.cells.resize(numCells); std::iota(fullDomain_.cells.begin(), fullDomain_.cells.end(), 0); } + void createOmpDomains(){ + //omp_domains_ //need to be found + } // reset the global linear system of equations. void resetSystem_() @@ -593,7 +604,8 @@ private: const bool& enableFlores = simulator_().problem().eclWriter()->eclOutputModule().hasFlores(); const unsigned int numCells = domain.cells.size(); const bool on_full_domain = (numCells == model_().numTotalDof()); - + bool single_thread = ThreadManager::maxThreads() < 2; + bool fluxdoublesided = FluxDoubleSided && single_thread; #ifdef _OPENMP #pragma omp parallel for #endif @@ -610,30 +622,20 @@ private: // Flux term. { OPM_TIMEBLOCK_LOCAL(fluxCalculationForEachCell); - short loc = 0; for (const auto& nbInfo : nbInfos) { OPM_TIMEBLOCK_LOCAL(fluxCalculationForEachFace); unsigned globJ = nbInfo.neighbor; - assert(globJ != globI); - res = 0.0; - bMat = 0.0; - adres = 0.0; - darcyFlux = 0.0; + assert(globJ != globI); + if( !fluxdoublesided || globJ < globI){ + //res = 0.0; + //bMat = 0.0; + //adres = 0.0; + //darcyFlux = 0.0; const IntensiveQuantities& intQuantsEx = model_().intensiveQuantities(globJ, /*timeIdx*/ 0); LocalResidual::computeFlux( adres, darcyFlux, problem_(), globI, globJ, intQuantsIn, intQuantsEx, nbInfo.trans, nbInfo.faceArea, nbInfo.faceDirection); adres *= nbInfo.faceArea; - if (enableFlows) { - for (unsigned phaseIdx = 0; phaseIdx < numEq; ++ phaseIdx) { - flowsInfo_[globI][loc].flow[phaseIdx] = adres[phaseIdx].value(); - } - } - if (enableFlores) { - for (unsigned phaseIdx = 0; phaseIdx < numEq; ++ phaseIdx) { - floresInfo_[globI][loc].flow[phaseIdx] = darcyFlux[phaseIdx].value(); - } - } setResAndJacobi(res, bMat, adres); residual_[globI] += res; //SparseAdapter syntax: jacobian_->addToBlock(globI, globI, bMat); @@ -641,7 +643,25 @@ private: bMat *= -1.0; //SparseAdapter syntax: jacobian_->addToBlock(globJ, globI, bMat); *nbInfo.matBlockAddress += bMat; - ++loc; + //++loc; + if (fluxdoublesided){ + //res = 0.0; + //bMat = 0.0; + //adres = 0.0; + //darcyFlux = 0.0; + LocalResidual::computeFlux( + adres, darcyFlux, problem_(), globJ, globI, intQuantsEx, intQuantsIn, + nbInfo.trans, nbInfo.faceArea, nbInfo.faceDirection); + adres *= nbInfo.faceArea; + setResAndJacobi(res, bMat, adres); + residual_[globJ] += res; + //SparseAdapter syntax: jacobian_->addToBlock(globI, globI, bMat); + *diagMatAddress_[globJ] += bMat; + bMat *= -1.0; + //SparseAdapter syntax: jacobian_->addToBlock(globJ, globI, bMat); + *nbInfo.matBlockAddress2 += bMat; + } + } } } @@ -677,7 +697,7 @@ private: } } else { Dune::FieldVector tmp; - IntensiveQuantities intQuantOld = model_().intensiveQuantities(globI, 1); + const IntensiveQuantities& intQuantOld = model_().intensiveQuantities(globI, 1); LocalResidual::computeStorage(tmp, intQuantOld); model_().updateCachedStorage(globI, /*timeIdx=*/1, tmp); } @@ -686,7 +706,7 @@ private: } else { OPM_TIMEBLOCK_LOCAL(computeStorage0); Dune::FieldVector tmp; - IntensiveQuantities intQuantOld = model_().intensiveQuantities(globI, 1); + const IntensiveQuantities& intQuantOld = model_().intensiveQuantities(globI, 1); LocalResidual::computeStorage(tmp, intQuantOld); // assume volume do not change res -= tmp; @@ -703,9 +723,9 @@ private: bMat = 0.0; adres = 0.0; if (separateSparseSourceTerms_) { - LocalResidual::computeSourceDense(adres, problem_(), globI, 0); + LocalResidual::computeSourceDense(adres, problem_(), globI, 0); } else { - LocalResidual::computeSource(adres, problem_(), globI, 0); + LocalResidual::computeSource(adres, problem_(), globI, 0); } adres *= -volume; setResAndJacobi(res, bMat, adres); @@ -738,6 +758,70 @@ private: } } + template + void addFlo_(const SubDomainType& domain) + { + OPM_TIMEBLOCK(flores_); + + // We do not call resetSystem_() here, since that will set + // the full system to zero, not just our part. + // Instead, that must be called before starting the linearization. + + const bool& enableFlows = simulator_().problem().eclWriter()->eclOutputModule().hasFlows(); + const bool& enableFlores = simulator_().problem().eclWriter()->eclOutputModule().hasFlores(); + if( ! (enableFlows || enableFlores)){ + return; + } + const unsigned int numCells = domain.cells.size(); + const bool on_full_domain = (numCells == model_().numTotalDof()); + bool single_thread = ThreadManager::maxThreads() < 2; + bool fluxdoublesided = FluxDoubleSided && single_thread;// && +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (unsigned ii = 0; ii < numCells; ++ii) { + OPM_TIMEBLOCK_LOCAL(linearizationForEachCell); + const unsigned globI = domain.cells[ii]; + const auto& nbInfos = neighborInfo_[globI]; + VectorBlock res(0.0); + MatrixBlock bMat(0.0); + ADVectorBlock adres(0.0); + ADVectorBlock darcyFlux(0.0); + const IntensiveQuantities& intQuantsIn = model_().intensiveQuantities(globI, /*timeIdx*/ 0); + + // Flux term. + { + OPM_TIMEBLOCK_LOCAL(fluxCalculationForEachCell); + short loc = 0; + for (const auto& nbInfo : nbInfos) { + OPM_TIMEBLOCK_LOCAL(fluxCalculationForEachFace); + unsigned globJ = nbInfo.neighbor; + res = 0.0; + bMat = 0.0; + adres = 0.0; + darcyFlux = 0.0; + const IntensiveQuantities& intQuantsEx = model_().intensiveQuantities(globJ, /*timeIdx*/ 0); + LocalResidual::computeFlux( + adres, darcyFlux, problem_(), globI, globJ, intQuantsIn, intQuantsEx, + nbInfo.trans, nbInfo.faceArea, nbInfo.faceDirection); + adres *= nbInfo.faceArea; + if (enableFlows) { + for (unsigned phaseIdx = 0; phaseIdx < numEq; ++ phaseIdx) { + flowsInfo_[globI][loc].flow[phaseIdx] = adres[phaseIdx].value(); + } + } + if (enableFlores) { + for (unsigned phaseIdx = 0; phaseIdx < numEq; ++ phaseIdx) { + floresInfo_[globI][loc].flow[phaseIdx] = darcyFlux[phaseIdx].value(); + } + } + ++loc; + } + } // end of loop for cell globI. + } + } + + void updateStoredTransmissibilities() { if (neighborInfo_.empty()) { @@ -777,6 +861,7 @@ private: double faceArea; FaceDir::DirEnum faceDirection; MatrixBlock* matBlockAddress; + MatrixBlock* matBlockAddress2; }; SparseTable neighborInfo_; std::vector diagMatAddress_; @@ -814,6 +899,7 @@ private: std::vector interior; }; FullDomain fullDomain_; + std::vector> openmp_domains_; }; } // namespace Opm