My Project
Loading...
Searching...
No Matches
BlackoilModelNldd.hpp
1/*
2 Copyright 2013, 2015 SINTEF ICT, Applied Mathematics.
3 Copyright 2014, 2015 Dr. Blatt - HPC-Simulation-Software & Services
4 Copyright 2014, 2015 Statoil ASA.
5 Copyright 2015 NTNU
6 Copyright 2015, 2016, 2017 IRIS AS
7
8 This file is part of the Open Porous Media project (OPM).
9
10 OPM is free software: you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation, either version 3 of the License, or
13 (at your option) any later version.
14
15 OPM is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with OPM. If not, see <http://www.gnu.org/licenses/>.
22*/
23
24#ifndef OPM_BLACKOILMODEL_NLDD_HEADER_INCLUDED
25#define OPM_BLACKOILMODEL_NLDD_HEADER_INCLUDED
26
27#include <dune/common/timer.hh>
28
29#include <opm/grid/common/SubGridPart.hpp>
30
31#include <opm/simulators/aquifers/AquiferGridUtils.hpp>
32
33#include <opm/simulators/flow/countGlobalCells.hpp>
34#include <opm/simulators/flow/NlddReporting.hpp>
35#include <opm/simulators/flow/NonlinearSolver.hpp>
36#include <opm/simulators/flow/partitionCells.hpp>
37#include <opm/simulators/flow/priVarsPacking.hpp>
38#include <opm/simulators/flow/SubDomain.hpp>
39
40#include <opm/simulators/linalg/extractMatrix.hpp>
41
42#if COMPILE_GPU_BRIDGE
43#include <opm/simulators/linalg/ISTLSolverGpuBridge.hpp>
44#else
45#include <opm/simulators/linalg/ISTLSolver.hpp>
46#endif
47
48#include <opm/simulators/timestepping/ConvergenceReport.hpp>
49#include <opm/simulators/timestepping/SimulatorReport.hpp>
50#include <opm/simulators/timestepping/SimulatorTimerInterface.hpp>
51
52#include <opm/simulators/utils/ComponentName.hpp>
53
54#include <opm/simulators/wells/BlackoilWellModelNldd.hpp>
55
56#include <fmt/format.h>
57
58#include <algorithm>
59#include <array>
60#include <cassert>
61#include <cmath>
62#include <cstddef>
63#include <filesystem>
64#include <memory>
65#include <set>
66#include <sstream>
67#include <string>
68#include <type_traits>
69#include <utility>
70#include <vector>
71
72namespace Opm {
73
74template<class TypeTag> class BlackoilModel;
75
77template <class TypeTag>
79{
80public:
88
89 using BVector = typename BlackoilModel<TypeTag>::BVector;
90 using Domain = SubDomain<Grid>;
92 using Mat = typename BlackoilModel<TypeTag>::Mat;
93
94 static constexpr int numEq = Indices::numEq;
95
101 : model_(model)
102 , wellModel_(model.wellModel())
103 , rank_(model_.simulator().vanguard().grid().comm().rank())
104 {
105 // Create partitions.
106 const auto& [partition_vector_initial, num_domains_initial] = this->partitionCells();
107
108 int num_domains = num_domains_initial;
110
111 // Fix-up for an extreme case: Interior cells who do not have any on-rank
112 // neighbours. Move all such cells into a single domain on this rank,
113 // and mark the domain for skipping. For what it's worth, we've seen this
114 // case occur in practice when testing on field cases.
115 bool isolated_cells = false;
116 for (auto& domainId : partition_vector) {
117 if (domainId < 0) {
118 domainId = num_domains;
119 isolated_cells = true;
120 }
121 }
122 if (isolated_cells) {
123 num_domains++;
124 }
125
126 // Set nldd handler in main well model
127 model.wellModel().setNlddAdapter(&wellModel_);
128
129 // Scan through partitioning to get correct size for each.
130 std::vector<int> sizes(num_domains, 0);
131 for (const auto& p : partition_vector) {
132 ++sizes[p];
133 }
134
135 // Set up correctly sized vectors of entity seeds and of indices for each partition.
136 using EntitySeed = typename Grid::template Codim<0>::EntitySeed;
137 std::vector<std::vector<EntitySeed>> seeds(num_domains);
138 std::vector<std::vector<int>> partitions(num_domains);
139 for (int domain = 0; domain < num_domains; ++domain) {
140 seeds[domain].resize(sizes[domain]);
141 partitions[domain].resize(sizes[domain]);
142 }
143
144 // Iterate through grid once, setting the seeds of all partitions.
145 // Note: owned cells only!
146 const auto& grid = model_.simulator().vanguard().grid();
147
148 std::vector<int> count(num_domains, 0);
149 const auto& gridView = grid.leafGridView();
150 const auto beg = gridView.template begin<0, Dune::Interior_Partition>();
151 const auto end = gridView.template end<0, Dune::Interior_Partition>();
152 int cell = 0;
153 for (auto it = beg; it != end; ++it, ++cell) {
154 const int p = partition_vector[cell];
155 seeds[p][count[p]] = it->seed();
156 partitions[p][count[p]] = cell;
157 ++count[p];
158 }
159 assert(count == sizes);
160
161 // Create the domains.
162 for (int index = 0; index < num_domains; ++index) {
163 std::vector<bool> interior(partition_vector.size(), false);
164 for (int ix : partitions[index]) {
165 interior[ix] = true;
166 }
167
168 Dune::SubGridPart<Grid> view{grid, std::move(seeds[index])};
169
170 // Mark the last domain for skipping if it contains isolated cells
171 const bool skip = isolated_cells && (index == num_domains - 1);
172 this->domains_.emplace_back(index,
173 std::move(partitions[index]),
174 std::move(interior),
175 std::move(view),
176 skip);
177 }
178
179 // Set up container for the local system matrices.
180 domain_matrices_.resize(num_domains);
181
182 // Set up container for the local linear solvers.
183 for (int index = 0; index < num_domains; ++index) {
184 // TODO: The ISTLSolver constructor will make
185 // parallel structures appropriate for the full grid
186 // only. This must be addressed before going parallel.
187 const auto& eclState = model_.simulator().vanguard().eclState();
189 loc_param.is_nldd_local_solver_ = true;
190 loc_param.init(eclState.getSimulationConfig().useCPR());
191 // Override solver type with umfpack if small domain.
192 if (domains_[index].cells.size() < 200) {
193 loc_param.linsolver_ = "umfpack";
194 }
195 loc_param.linear_solver_print_json_definition_ = false;
196 const bool force_serial = true;
197 domain_linsolvers_.emplace_back(model_.simulator(), loc_param, force_serial);
198 domain_linsolvers_.back().setDomainIndex(index);
199 }
200
201 assert(int(domains_.size()) == num_domains);
202
203 domain_reports_accumulated_.resize(num_domains);
204
205 // Print domain distribution summary
208 domains_,
209 local_reports_accumulated_,
210 domain_reports_accumulated_,
211 grid,
212 wellModel_.numLocalWellsEnd());
213 }
214
217 {
218 // Setup domain->well mapping.
219 wellModel_.setupDomains(domains_);
220 }
221
223 template <class NonlinearSolverType>
225 const SimulatorTimerInterface& timer,
227 {
228 // ----------- Set up reports and timer -----------
230
231 model_.initialLinearization(report, iteration, nonlinear_solver.minIter(), nonlinear_solver.maxIter(), timer);
232
233 if (report.converged) {
234 return report;
235 }
236
237 // ----------- If not converged, do an NLDD iteration -----------
238 Dune::Timer localSolveTimer;
239 Dune::Timer detailTimer;
240 localSolveTimer.start();
241 detailTimer.start();
242 auto& solution = model_.simulator().model().solution(0);
243 auto initial_solution = solution;
245
246 // ----------- Decide on an ordering for the domains -----------
247 const auto domain_order = this->getSubdomainOrder();
248 local_reports_accumulated_.success.pre_post_time += detailTimer.stop();
249
250 // ----------- Solve each domain separately -----------
252 std::vector<SimulatorReportSingle> domain_reports(domains_.size());
253 for (const int domain_index : domain_order) {
254 const auto& domain = domains_[domain_index];
256 detailTimer.reset();
257 detailTimer.start();
258 if (domain.skip) {
259 local_report.converged = true;
261 continue;
262 }
263 try {
264 switch (model_.param().local_solve_approach_) {
265 case DomainSolveApproach::Jacobi:
266 solveDomainJacobi(solution, locally_solved, local_report, logger,
267 iteration, timer, domain);
268 break;
269 default:
270 case DomainSolveApproach::GaussSeidel:
271 solveDomainGaussSeidel(solution, locally_solved, local_report, logger,
272 iteration, timer, domain);
273 break;
274 }
275 }
276 catch (...) {
277 // Something went wrong during local solves.
278 local_report.converged = false;
279 }
280 // This should have updated the global matrix to be
281 // dR_i/du_j evaluated at new local solutions for
282 // i == j, at old solution for i != j.
283 if (!local_report.converged) {
284 // TODO: more proper treatment, including in parallel.
285 logger.debug(fmt::format("Convergence failure in domain {} on rank {}." , domain.index, rank_));
286 }
287 local_report.solver_time += detailTimer.stop();
289 }
290 detailTimer.reset();
291 detailTimer.start();
292 // Communicate and log all messages.
293 auto global_logger = gatherDeferredLogger(logger, model_.simulator().vanguard().grid().comm());
294 global_logger.logMessages();
295
296 // Accumulate local solve data.
297 // Putting the counts in a single array to avoid multiple
298 // comm.sum() calls. Keeping the named vars for readability.
299 std::array<int, 4> counts{ 0, 0, 0, static_cast<int>(domain_reports.size()) };
300 int& num_converged = counts[0];
302 int& num_local_newtons = counts[2];
303 int& num_domains = counts[3];
304 {
305 auto step_newtons = 0;
306 const auto dr_size = domain_reports.size();
307 for (auto i = 0*dr_size; i < dr_size; ++i) {
308 const auto& dr = domain_reports[i];
309 if (dr.converged) {
311 if (dr.total_newton_iterations == 0) {
313 }
314 }
315 step_newtons += dr.total_newton_iterations;
316 // Accumulate local reports per domain
317 domain_reports_accumulated_[i] += dr;
318 // Accumulate local reports per rank
319 local_reports_accumulated_ += dr;
320 }
322 }
323
324 if (model_.param().local_solve_approach_ == DomainSolveApproach::Jacobi) {
325 solution = locally_solved;
326 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0);
327 }
328
329#if HAVE_MPI
330 // Communicate solutions:
331 // With multiple processes, this process' overlap (i.e. not
332 // owned) cells' solution values have been modified by local
333 // solves in the owning processes, and remain unchanged
334 // here. We must therefore receive the updated solution on the
335 // overlap cells and update their intensive quantities before
336 // we move on.
337 const auto& comm = model_.simulator().vanguard().grid().comm();
338 if (comm.size() > 1) {
339 const auto* ccomm = model_.simulator().model().newtonMethod().linearSolver().comm();
340
341 // Copy numerical values from primary vars.
342 ccomm->copyOwnerToAll(solution, solution);
343
344 // Copy flags from primary vars.
345 const std::size_t num = solution.size();
346 Dune::BlockVector<std::size_t> allmeanings(num);
347 for (std::size_t ii = 0; ii < num; ++ii) {
348 allmeanings[ii] = PVUtil::pack(solution[ii]);
349 }
350 ccomm->copyOwnerToAll(allmeanings, allmeanings);
351 for (std::size_t ii = 0; ii < num; ++ii) {
352 PVUtil::unPack(solution[ii], allmeanings[ii]);
353 }
354
355 // Update intensive quantities for our overlap values.
356 model_.simulator().model().invalidateAndUpdateIntensiveQuantitiesOverlap(/*timeIdx=*/0);
357
358 // Make total counts of domains converged.
359 comm.sum(counts.data(), counts.size());
360 }
361#endif // HAVE_MPI
362
363 const bool is_iorank = this->rank_ == 0;
364 if (is_iorank) {
365 OpmLog::debug(fmt::format("Local solves finished. Converged for {}/{} domains. {} domains did no work. {} total local Newton iterations.\n",
367 }
369 report.local_solve_time += total_local_solve_time;
370 local_reports_accumulated_.success.total_time += total_local_solve_time;
371 local_reports_accumulated_.success.pre_post_time += detailTimer.stop();
372
373 // Finish with a Newton step.
374 // Note that the "iteration + 100" is a simple way to avoid entering
375 // "if (iteration == 0)" and similar blocks, and also makes it a little
376 // easier to spot the iteration residuals in the DBG file. A more sophisticated
377 // approach can be done later.
378 auto rep = model_.nonlinearIterationNewton(iteration + 100, timer, nonlinear_solver);
379 report += rep;
380 if (rep.converged) {
381 report.converged = true;
382 }
383 return report;
384 }
385
388 {
389 return local_reports_accumulated_;
390 }
391
393 const std::vector<SimulatorReport>& domainAccumulatedReports() const
394 {
395 // Update the number of wells for each domain that has been added to the well model at this point
396 // this is a mutable operation that updates the well counts in the domain reports.
397 const auto dr_size = domain_reports_accumulated_.size();
398 // Reset well counts before updating
399 for (auto i = 0*dr_size; i < dr_size; ++i) {
400 domain_reports_accumulated_[i].success.num_wells = 0;
401 }
402 // Update the number of wells for each domain
403 for (const auto& [wname, domain] : wellModel_.well_domain()) {
404 domain_reports_accumulated_[domain].success.num_wells++;
405 }
406 return domain_reports_accumulated_;
407 }
408
411 void writePartitions(const std::filesystem::path& odir) const
412 {
413 const auto& grid = this->model_.simulator().vanguard().grid();
414 const auto& elementMapper = this->model_.simulator().model().elementMapper();
415 const auto& cartMapper = this->model_.simulator().vanguard().cartesianIndexMapper();
416
418 odir,
419 domains_,
420 grid,
421 elementMapper,
422 cartMapper);
423 }
424
426 void writeNonlinearIterationsPerCell(const std::filesystem::path& odir) const
427 {
428 const auto& grid = this->model_.simulator().vanguard().grid();
429 const auto& elementMapper = this->model_.simulator().model().elementMapper();
430 const auto& cartMapper = this->model_.simulator().vanguard().cartesianIndexMapper();
431
433 odir,
434 domains_,
435 domain_reports_accumulated_,
436 grid,
437 elementMapper,
438 cartMapper);
439 }
440
441private:
444 solveDomain(const Domain& domain,
445 const SimulatorTimerInterface& timer,
448 [[maybe_unused]] const int global_iteration,
449 const bool initial_assembly_required)
450 {
451 auto& modelSimulator = model_.simulator();
452 Dune::Timer detailTimer;
453
454 modelSimulator.model().newtonMethod().setIterationIndex(0);
455
456 // When called, if assembly has already been performed
457 // with the initial values, we only need to check
458 // for local convergence. Otherwise, we must do a local
459 // assembly.
460 int iter = 0;
462 detailTimer.start();
463 modelSimulator.model().newtonMethod().setIterationIndex(iter);
464 // TODO: we should have a beginIterationLocal function()
465 // only handling the well model for now
466 wellModel_.assemble(modelSimulator.model().newtonMethod().numIterations(),
467 modelSimulator.timeStepSize(),
468 domain);
469 const double tt0 = detailTimer.stop();
470 local_report.assemble_time += tt0;
471 local_report.assemble_time_well += tt0;
472 detailTimer.reset();
473 detailTimer.start();
474 // Assemble reservoir locally.
475 this->assembleReservoirDomain(domain);
476 local_report.assemble_time += detailTimer.stop();
477 local_report.total_linearizations += 1;
478 }
479 detailTimer.reset();
480 detailTimer.start();
481 std::vector<Scalar> resnorms;
482 auto convreport = this->getDomainConvergence(domain, timer, 0, logger, resnorms);
483 local_report.update_time += detailTimer.stop();
484 if (convreport.converged()) {
485 // TODO: set more info, timing etc.
486 local_report.converged = true;
487 return convreport;
488 }
489
490 // We have already assembled for the first iteration,
491 // but not done the Schur complement for the wells yet.
492 detailTimer.reset();
493 detailTimer.start();
494 model_.wellModel().linearizeDomain(domain,
495 modelSimulator.model().linearizer().jacobian(),
496 modelSimulator.model().linearizer().residual());
497 const double tt1 = detailTimer.stop();
498 local_report.assemble_time += tt1;
499 local_report.assemble_time_well += tt1;
500
501 // Local Newton loop.
502 const int max_iter = model_.param().max_local_solve_iterations_;
503 const auto& grid = modelSimulator.vanguard().grid();
504 double damping_factor = 1.0;
505 std::vector<std::vector<Scalar>> convergence_history;
506 convergence_history.reserve(20);
507 convergence_history.push_back(resnorms);
508 do {
509 // Solve local linear system.
510 // Note that x has full size, we expect it to be nonzero only for in-domain cells.
511 const int nc = grid.size(0);
512 BVector x(nc);
513 detailTimer.reset();
514 detailTimer.start();
515 this->solveJacobianSystemDomain(domain, x);
516 model_.wellModel().postSolveDomain(x, domain);
517 if (damping_factor != 1.0) {
518 x *= damping_factor;
519 }
520 local_report.linear_solve_time += detailTimer.stop();
521 local_report.linear_solve_setup_time += model_.linearSolveSetupTime();
522 local_report.total_linear_iterations = model_.linearIterationsLastSolve();
523
524 // Update local solution. // TODO: x is still full size, should we optimize it?
525 detailTimer.reset();
526 detailTimer.start();
527 this->updateDomainSolution(domain, x);
528 local_report.update_time += detailTimer.stop();
529
530 // Assemble well and reservoir.
531 detailTimer.reset();
532 detailTimer.start();
533 ++iter;
534 modelSimulator.model().newtonMethod().setIterationIndex(iter);
535 // TODO: we should have a beginIterationLocal function()
536 // only handling the well model for now
537 // Assemble reservoir locally.
538 wellModel_.assemble(modelSimulator.model().newtonMethod().numIterations(),
539 modelSimulator.timeStepSize(),
540 domain);
541 const double tt3 = detailTimer.stop();
542 local_report.assemble_time += tt3;
543 local_report.assemble_time_well += tt3;
544 detailTimer.reset();
545 detailTimer.start();
546 this->assembleReservoirDomain(domain);
547 local_report.assemble_time += detailTimer.stop();
548
549 // Check for local convergence.
550 detailTimer.reset();
551 detailTimer.start();
552 resnorms.clear();
553 convreport = this->getDomainConvergence(domain, timer, iter, logger, resnorms);
554 convergence_history.push_back(resnorms);
555 local_report.update_time += detailTimer.stop();
556
557 // apply the Schur complement of the well model to the
558 // reservoir linearized equations
559 detailTimer.reset();
560 detailTimer.start();
561 model_.wellModel().linearizeDomain(domain,
562 modelSimulator.model().linearizer().jacobian(),
563 modelSimulator.model().linearizer().residual());
564 const double tt2 = detailTimer.stop();
565 local_report.assemble_time += tt2;
566 local_report.assemble_time_well += tt2;
567
568 // Check if we should dampen. Only do so if wells are converged.
569 if (!convreport.converged() && !convreport.wellFailed()) {
570 bool oscillate = false;
571 bool stagnate = false;
572 const int numPhases = convergence_history.front().size();
573 detail::detectOscillations(convergence_history, iter, numPhases,
574 Scalar{0.2}, 1, oscillate, stagnate);
575 if (oscillate) {
576 damping_factor *= 0.85;
577 logger.debug(fmt::format("| Damping factor is now {}", damping_factor));
578 }
579 }
580 } while (!convreport.converged() && iter <= max_iter);
581
582 modelSimulator.problem().endIteration();
583
584 local_report.converged = convreport.converged();
585 local_report.total_newton_iterations = iter;
586 local_report.total_linearizations += iter;
587 // TODO: set more info, timing etc.
588 return convreport;
589 }
590
592 void assembleReservoirDomain(const Domain& domain)
593 {
594 OPM_TIMEBLOCK(assembleReservoirDomain);
595 // -------- Mass balance equations --------
596 model_.simulator().model().linearizer().linearizeDomain(domain);
597 }
598
600 void solveJacobianSystemDomain(const Domain& domain, BVector& global_x)
601 {
602 const auto& modelSimulator = model_.simulator();
603
604 Dune::Timer perfTimer;
605 perfTimer.start();
606
607 const Mat& main_matrix = modelSimulator.model().linearizer().jacobian().istlMatrix();
608 if (domain_matrices_[domain.index]) {
609 Details::copySubMatrix(main_matrix, domain.cells, *domain_matrices_[domain.index]);
610 } else {
611 domain_matrices_[domain.index] = std::make_unique<Mat>(Details::extractMatrix(main_matrix, domain.cells));
612 }
613 auto& jac = *domain_matrices_[domain.index];
614 auto res = Details::extractVector(modelSimulator.model().linearizer().residual(),
615 domain.cells);
616 auto x = res;
617
618 // set initial guess
619 global_x = 0.0;
620 x = 0.0;
621
622 auto& linsolver = domain_linsolvers_[domain.index];
623
624 linsolver.prepare(jac, res);
625 model_.linearSolveSetupTime() = perfTimer.stop();
626 linsolver.setResidual(res);
627 linsolver.solve(x);
628
629 Details::setGlobal(x, domain.cells, global_x);
630 }
631
633 void updateDomainSolution(const Domain& domain, const BVector& dx)
634 {
635 OPM_TIMEBLOCK(updateDomainSolution);
636 auto& simulator = model_.simulator();
637 auto& newtonMethod = simulator.model().newtonMethod();
638 SolutionVector& solution = simulator.model().solution(/*timeIdx=*/0);
639
640 newtonMethod.update_(/*nextSolution=*/solution,
641 /*curSolution=*/solution,
642 /*update=*/dx,
643 /*resid=*/dx,
644 domain.cells); // the update routines of the black
645 // oil model do not care about the
646 // residual
647
648 // if the solution is updated, the intensive quantities need to be recalculated
649 simulator.model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
650 }
651
653 std::pair<Scalar, Scalar> localDomainConvergenceData(const Domain& domain,
654 std::vector<Scalar>& R_sum,
655 std::vector<Scalar>& maxCoeff,
656 std::vector<Scalar>& B_avg,
657 std::vector<int>& maxCoeffCell)
658 {
659 const auto& modelSimulator = model_.simulator();
660
661 Scalar pvSumLocal = 0.0;
662 Scalar numAquiferPvSumLocal = 0.0;
663 const auto& model = modelSimulator.model();
664 const auto& problem = modelSimulator.problem();
665
666 const auto& modelResid = modelSimulator.model().linearizer().residual();
667
668 ElementContext elemCtx(modelSimulator);
669 const auto& gridView = domain.view;
670 const auto& elemEndIt = gridView.template end</*codim=*/0>();
671 IsNumericalAquiferCell isNumericalAquiferCell(gridView.grid());
672
673 for (auto elemIt = gridView.template begin</*codim=*/0>();
674 elemIt != elemEndIt;
675 ++elemIt)
676 {
677 if (elemIt->partitionType() != Dune::InteriorEntity) {
678 continue;
679 }
680 const auto& elem = *elemIt;
681 elemCtx.updatePrimaryStencil(elem);
682 elemCtx.updatePrimaryIntensiveQuantities(/*timeIdx=*/0);
683
684 const unsigned cell_idx = elemCtx.globalSpaceIndex(/*spaceIdx=*/0, /*timeIdx=*/0);
685 const auto& intQuants = elemCtx.intensiveQuantities(/*spaceIdx=*/0, /*timeIdx=*/0);
686 const auto& fs = intQuants.fluidState();
687
688 const auto pvValue = problem.referencePorosity(cell_idx, /*timeIdx=*/0) *
689 model.dofTotalVolume(cell_idx);
691
693 {
695 }
696
697 model_.getMaxCoeff(cell_idx, intQuants, fs, modelResid, pvValue,
699 }
700
701 // compute local average in terms of global number of elements
702 const int bSize = B_avg.size();
703 for ( int i = 0; i<bSize; ++i )
704 {
705 B_avg[ i ] /= Scalar(domain.cells.size());
706 }
707
709 }
710
711 ConvergenceReport getDomainReservoirConvergence(const double reportTime,
712 const double dt,
713 const int iteration,
714 const Domain& domain,
715 DeferredLogger& logger,
716 std::vector<Scalar>& B_avg,
717 std::vector<Scalar>& residual_norms)
718 {
719 using Vector = std::vector<Scalar>;
720
721 const int numComp = numEq;
722 Vector R_sum(numComp, 0.0 );
723 Vector maxCoeff(numComp, std::numeric_limits<Scalar>::lowest() );
724 std::vector<int> maxCoeffCell(numComp, -1);
725 const auto [ pvSum, numAquiferPvSum]
726 = this->localDomainConvergenceData(domain, R_sum, maxCoeff, B_avg, maxCoeffCell);
727
728 auto cnvErrorPvFraction = computeCnvErrorPvLocal(domain, B_avg, dt);
730
731 // Default value of relaxed_max_pv_fraction_ is 0.03 and min_strict_cnv_iter_ is 0.
732 // For each iteration, we need to determine whether to use the relaxed CNV tolerance.
733 // To disable the usage of relaxed CNV tolerance, you can set the relaxed_max_pv_fraction_ to be 0.
734 const bool use_relaxed_cnv = cnvErrorPvFraction < model_.param().relaxed_max_pv_fraction_ &&
735 iteration >= model_.param().min_strict_cnv_iter_;
736 // Tighter bound for local convergence should increase the
737 // likelyhood of: local convergence => global convergence
738 const Scalar tol_cnv = model_.param().local_tolerance_scaling_cnv_
739 * (use_relaxed_cnv ? model_.param().tolerance_cnv_relaxed_
740 : model_.param().tolerance_cnv_);
741
742 const bool use_relaxed_mb = iteration >= model_.param().min_strict_mb_iter_;
743 const Scalar tol_mb = model_.param().local_tolerance_scaling_mb_
744 * (use_relaxed_mb ? model_.param().tolerance_mb_relaxed_ : model_.param().tolerance_mb_);
745
746 // Finish computation
747 std::vector<Scalar> CNV(numComp);
748 std::vector<Scalar> mass_balance_residual(numComp);
749 for (int compIdx = 0; compIdx < numComp; ++compIdx )
750 {
753 residual_norms.push_back(CNV[compIdx]);
754 }
755
756 // Create convergence report.
757 ConvergenceReport report{reportTime};
758 using CR = ConvergenceReport;
759 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
760 Scalar res[2] = { mass_balance_residual[compIdx], CNV[compIdx] };
761 CR::ReservoirFailure::Type types[2] = { CR::ReservoirFailure::Type::MassBalance,
762 CR::ReservoirFailure::Type::Cnv };
763 Scalar tol[2] = { tol_mb, tol_cnv };
764 for (int ii : {0, 1}) {
765 if (std::isnan(res[ii])) {
766 report.setReservoirFailed({types[ii], CR::Severity::NotANumber, compIdx});
767 logger.debug("NaN residual for " + model_.compNames().name(compIdx) + " equation.");
768 } else if (res[ii] > model_.param().max_residual_allowed_) {
769 report.setReservoirFailed({types[ii], CR::Severity::TooLarge, compIdx});
770 logger.debug("Too large residual for " + model_.compNames().name(compIdx) + " equation.");
771 } else if (res[ii] < 0.0) {
772 report.setReservoirFailed({types[ii], CR::Severity::Normal, compIdx});
773 logger.debug("Negative residual for " + model_.compNames().name(compIdx) + " equation.");
774 } else if (res[ii] > tol[ii]) {
775 report.setReservoirFailed({types[ii], CR::Severity::Normal, compIdx});
776 }
777
778 report.setReservoirConvergenceMetric(types[ii], compIdx, res[ii], tol[ii]);
779 }
780 }
781
782 // Output of residuals. If converged at initial state, log nothing.
783 const bool converged_at_initial_state = (report.converged() && iteration == 0);
785 if (iteration == 0) {
786 // Log header.
787 std::string msg = fmt::format("Domain {} on rank {}, size {}, containing cell {}\n| Iter",
788 domain.index, this->rank_, domain.cells.size(), domain.cells[0]);
789 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
790 msg += " MB(";
791 msg += model_.compNames().name(compIdx)[0];
792 msg += ") ";
793 }
794 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
795 msg += " CNV(";
796 msg += model_.compNames().name(compIdx)[0];
797 msg += ") ";
798 }
799 logger.debug(msg);
800 }
801 // Log convergence data.
802 std::ostringstream ss;
803 ss << "| ";
804 const std::streamsize oprec = ss.precision(3);
805 const std::ios::fmtflags oflags = ss.setf(std::ios::scientific);
806 ss << std::setw(4) << iteration;
807 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
808 ss << std::setw(11) << mass_balance_residual[compIdx];
809 }
810 for (int compIdx = 0; compIdx < numComp; ++compIdx) {
811 ss << std::setw(11) << CNV[compIdx];
812 }
813 ss.precision(oprec);
814 ss.flags(oflags);
815 logger.debug(ss.str());
816 }
817
818 return report;
819 }
820
821 ConvergenceReport getDomainConvergence(const Domain& domain,
822 const SimulatorTimerInterface& timer,
823 const int iteration,
824 DeferredLogger& logger,
825 std::vector<Scalar>& residual_norms)
826 {
827 OPM_TIMEBLOCK(getDomainConvergence);
828 std::vector<Scalar> B_avg(numEq, 0.0);
829 auto report = this->getDomainReservoirConvergence(timer.simulationTimeElapsed(),
830 timer.currentStepLength(),
831 iteration,
832 domain,
833 logger,
834 B_avg,
836 report += wellModel_.getWellConvergence(domain, B_avg, logger);
837 return report;
838 }
839
841 std::vector<int> getSubdomainOrder()
842 {
843 const auto& modelSimulator = model_.simulator();
844 const auto& solution = modelSimulator.model().solution(0);
845
846 std::vector<int> domain_order(domains_.size());
847 std::iota(domain_order.begin(), domain_order.end(), 0);
848
849 if (model_.param().local_solve_approach_ == DomainSolveApproach::Jacobi) {
850 // Do nothing, 0..n-1 order is fine.
851 return domain_order;
852 } else if (model_.param().local_solve_approach_ == DomainSolveApproach::GaussSeidel) {
853 // Calculate the measure used to order the domains.
854 std::vector<Scalar> measure_per_domain(domains_.size());
855 switch (model_.param().local_domains_ordering_) {
856 case DomainOrderingMeasure::AveragePressure: {
857 // Use average pressures to order domains.
858 for (const auto& domain : domains_) {
859 Scalar press_sum = 0.0;
860 for (const int c : domain.cells) {
861 press_sum += solution[c][Indices::pressureSwitchIdx];
862 }
863 const Scalar avgpress = press_sum / domain.cells.size();
865 }
866 break;
867 }
868 case DomainOrderingMeasure::MaxPressure: {
869 // Use max pressures to order domains.
870 for (const auto& domain : domains_) {
871 Scalar maxpress = 0.0;
872 for (const int c : domain.cells) {
873 maxpress = std::max(maxpress, solution[c][Indices::pressureSwitchIdx]);
874 }
876 }
877 break;
878 }
879 case DomainOrderingMeasure::Residual: {
880 // Use maximum residual to order domains.
881 const auto& residual = modelSimulator.model().linearizer().residual();
882 const int num_vars = residual[0].size();
883 for (const auto& domain : domains_) {
884 Scalar maxres = 0.0;
885 for (const int c : domain.cells) {
886 for (int ii = 0; ii < num_vars; ++ii) {
887 maxres = std::max(maxres, std::fabs(residual[c][ii]));
888 }
889 }
891 }
892 break;
893 }
894 } // end of switch (model_.param().local_domains_ordering_)
895
896 // Sort by largest measure, keeping index order if equal.
897 const auto& m = measure_per_domain;
898 std::stable_sort(domain_order.begin(), domain_order.end(),
899 [&m](const int i1, const int i2){ return m[i1] > m[i2]; });
900 return domain_order;
901 } else {
902 throw std::logic_error("Domain solve approach must be Jacobi or Gauss-Seidel");
903 }
904 }
905
906 template<class GlobalEqVector>
907 void solveDomainJacobi(GlobalEqVector& solution,
908 GlobalEqVector& locally_solved,
909 SimulatorReportSingle& local_report,
910 DeferredLogger& logger,
911 const int iteration,
912 const SimulatorTimerInterface& timer,
913 const Domain& domain)
914 {
915 auto initial_local_well_primary_vars = wellModel_.getPrimaryVarsDomain(domain.index);
916 auto initial_local_solution = Details::extractVector(solution, domain.cells);
917 auto convrep = solveDomain(domain, timer, local_report, logger, iteration, false);
918 if (local_report.converged) {
919 auto local_solution = Details::extractVector(solution, domain.cells);
920 Details::setGlobal(local_solution, domain.cells, locally_solved);
921 Details::setGlobal(initial_local_solution, domain.cells, solution);
922 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
923 } else {
924 wellModel_.setPrimaryVarsDomain(domain.index, initial_local_well_primary_vars);
925 Details::setGlobal(initial_local_solution, domain.cells, solution);
926 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
927 }
928 }
929
930 template<class GlobalEqVector>
931 void solveDomainGaussSeidel(GlobalEqVector& solution,
932 GlobalEqVector& locally_solved,
933 SimulatorReportSingle& local_report,
934 DeferredLogger& logger,
935 const int iteration,
936 const SimulatorTimerInterface& timer,
937 const Domain& domain)
938 {
939 auto initial_local_well_primary_vars = wellModel_.getPrimaryVarsDomain(domain.index);
940 auto initial_local_solution = Details::extractVector(solution, domain.cells);
941 auto convrep = solveDomain(domain, timer, local_report, logger, iteration, true);
942 if (!local_report.converged) {
943 // We look at the detailed convergence report to evaluate
944 // if we should accept the unconverged solution.
945 // We do not accept a solution if the wells are unconverged.
946 if (!convrep.wellFailed()) {
947 // Calculare the sums of the mb and cnv failures.
948 Scalar mb_sum = 0.0;
949 Scalar cnv_sum = 0.0;
950 for (const auto& rc : convrep.reservoirConvergence()) {
951 if (rc.type() == ConvergenceReport::ReservoirFailure::Type::MassBalance) {
952 mb_sum += rc.value();
953 } else if (rc.type() == ConvergenceReport::ReservoirFailure::Type::Cnv) {
954 cnv_sum += rc.value();
955 }
956 }
957 // If not too high, we overrule the convergence failure.
958 const Scalar acceptable_local_mb_sum = 1e-3;
959 const Scalar acceptable_local_cnv_sum = 1.0;
961 local_report.converged = true;
962 local_report.accepted_unconverged_domains += 1;
963 logger.debug(fmt::format("Accepting solution in unconverged domain {} on rank {}.", domain.index, rank_));
964 logger.debug(fmt::format("Value of mb_sum: {} cnv_sum: {}", mb_sum, cnv_sum));
965 } else {
966 logger.debug("Unconverged local solution.");
967 }
968 } else {
969 logger.debug("Unconverged local solution with well convergence failures:");
970 for (const auto& wf : convrep.wellFailures()) {
971 logger.debug(to_string(wf));
972 }
973 }
974 }
975 if (local_report.converged) {
976 local_report.converged_domains += 1;
977 auto local_solution = Details::extractVector(solution, domain.cells);
978 Details::setGlobal(local_solution, domain.cells, locally_solved);
979 } else {
980 local_report.unconverged_domains += 1;
981 wellModel_.setPrimaryVarsDomain(domain.index, initial_local_well_primary_vars);
982 Details::setGlobal(initial_local_solution, domain.cells, solution);
983 model_.simulator().model().invalidateAndUpdateIntensiveQuantities(/*timeIdx=*/0, domain);
984 }
985 }
986
987 Scalar computeCnvErrorPvLocal(const Domain& domain,
988 const std::vector<Scalar>& B_avg, double dt) const
989 {
990 Scalar errorPV{};
991 const auto& simulator = model_.simulator();
992 const auto& model = simulator.model();
993 const auto& problem = simulator.problem();
994 const auto& residual = simulator.model().linearizer().residual();
995
996 for (const int cell_idx : domain.cells) {
997 const Scalar pvValue = problem.referencePorosity(cell_idx, /*timeIdx=*/0) *
998 model.dofTotalVolume(cell_idx);
999 const auto& cellResidual = residual[cell_idx];
1000 bool cnvViolated = false;
1001
1002 for (unsigned eqIdx = 0; eqIdx < cellResidual.size(); ++eqIdx) {
1003 using std::fabs;
1004 Scalar CNV = cellResidual[eqIdx] * dt * B_avg[eqIdx] / pvValue;
1005 cnvViolated = cnvViolated || (fabs(CNV) > model_.param().tolerance_cnv_);
1006 }
1007
1008 if (cnvViolated) {
1009 errorPV += pvValue;
1010 }
1011 }
1012 return errorPV;
1013 }
1014
1015 decltype(auto) partitionCells() const
1016 {
1017 const auto& grid = this->model_.simulator().vanguard().grid();
1018
1019 using GridView = std::remove_cv_t<std::remove_reference_t<
1020 decltype(grid.leafGridView())>>;
1021
1022 using Element = std::remove_cv_t<std::remove_reference_t<
1023 typename GridView::template Codim<0>::Entity>>;
1024
1025 const auto& param = this->model_.param();
1026
1028
1029 zoltan_ctrl.domain_imbalance = param.local_domains_partition_imbalance_;
1030
1031 zoltan_ctrl.index =
1032 [elementMapper = &this->model_.simulator().model().elementMapper()]
1033 (const Element& element)
1034 {
1035 return elementMapper->index(element);
1036 };
1037
1038 zoltan_ctrl.local_to_global =
1039 [cartMapper = &this->model_.simulator().vanguard().cartesianIndexMapper()]
1040 (const int elemIdx)
1041 {
1042 return cartMapper->cartesianIndex(elemIdx);
1043 };
1044
1045 // Forming the list of wells is expensive, so do this only if needed.
1046 const auto need_wells = param.local_domains_partition_method_ == "zoltan";
1047
1048 const auto wells = need_wells
1049 ? this->model_.simulator().vanguard().schedule().getWellsatEnd()
1050 : std::vector<Well>{};
1051
1053 ? this->model_.simulator().vanguard().schedule().getPossibleFutureConnections()
1054 : std::unordered_map<std::string, std::set<int>> {};
1055
1056 // If defaulted parameter for number of domains, choose a reasonable default.
1057 constexpr int default_cells_per_domain = 1000;
1058 const int num_domains = (param.num_local_domains_ > 0)
1059 ? param.num_local_domains_
1060 : detail::countGlobalCells(grid) / default_cells_per_domain;
1061 return ::Opm::partitionCells(param.local_domains_partition_method_,
1062 num_domains, grid.leafGridView(), wells,
1064 param.local_domains_partition_well_neighbor_levels_);
1065 }
1066
1067 BlackoilModel<TypeTag>& model_;
1069 std::vector<Domain> domains_;
1070 std::vector<std::unique_ptr<Mat>> domain_matrices_;
1071 std::vector<ISTLSolverType> domain_linsolvers_;
1072 SimulatorReport local_reports_accumulated_;
1073 // mutable because we need to update the number of wells for each domain in getDomainAccumulatedReports()
1074 mutable std::vector<SimulatorReport> domain_reports_accumulated_;
1075 int rank_ = 0;
1076};
1077
1078} // namespace Opm
1079
1080#endif // OPM_BLACKOILMODEL_NLDD_HEADER_INCLUDED
A NLDD implementation for three-phase black oil.
Definition BlackoilModelNldd.hpp:79
const std::vector< SimulatorReport > & domainAccumulatedReports() const
return the statistics of local solves accumulated for each domain on this rank
Definition BlackoilModelNldd.hpp:393
void writePartitions(const std::filesystem::path &odir) const
Write the partition vector to a file in ResInsight compatible format for inspection and a partition f...
Definition BlackoilModelNldd.hpp:411
BlackoilModelNldd(BlackoilModel< TypeTag > &model)
The constructor sets up the subdomains.
Definition BlackoilModelNldd.hpp:100
const SimulatorReport & localAccumulatedReports() const
return the statistics of local solves accumulated for this rank
Definition BlackoilModelNldd.hpp:387
SimulatorReportSingle nonlinearIterationNldd(const int iteration, const SimulatorTimerInterface &timer, NonlinearSolverType &nonlinear_solver)
Do one non-linear NLDD iteration.
Definition BlackoilModelNldd.hpp:224
void prepareStep()
Called before starting a time step.
Definition BlackoilModelNldd.hpp:216
void writeNonlinearIterationsPerCell(const std::filesystem::path &odir) const
Write the number of nonlinear iterations per cell to a file in ResInsight compatible format.
Definition BlackoilModelNldd.hpp:426
A model implementation for three-phase black oil.
Definition BlackoilModel.hpp:62
BlackoilWellModel< TypeTag > & wellModel()
return the StandardWells object
Definition BlackoilModel.hpp:278
Represents the convergence status of the whole simulator, to make it possible to query and store the ...
Definition ConvergenceReport.hpp:38
Definition DeferredLogger.hpp:57
This class solves the fully implicit black-oil system by solving the reduced system (after eliminatin...
Definition ISTLSolver.hpp:145
Interface class for SimulatorTimer objects, to be improved.
Definition SimulatorTimerInterface.hpp:34
This file contains a set of helper functions used by VFPProd / VFPInj.
Definition blackoilboundaryratevector.hh:37
Opm::DeferredLogger gatherDeferredLogger(const Opm::DeferredLogger &local_deferredlogger, Opm::Parallel::Communication)
Create a global log combining local logs.
Definition gatherDeferredLogger.cpp:168
void printDomainDistributionSummary(const std::vector< int > &partition_vector, const std::vector< Domain > &domains, SimulatorReport &local_reports_accumulated, std::vector< SimulatorReport > &domain_reports_accumulated, const Grid &grid, int num_wells)
Prints a summary of domain distribution across ranks.
Definition NlddReporting.hpp:201
constexpr auto getPropValue()
get the value data member of a property
Definition propertysystem.hh:242
typename Properties::Detail::GetPropImpl< TypeTag, Property >::type::type GetPropType
get the type alias defined in the property (equivalent to old macro GET_PROP_TYPE(....
Definition propertysystem.hh:235
void writePartitions(const std::filesystem::path &odir, const std::vector< Domain > &domains, const Grid &grid, const ElementMapper &elementMapper, const CartMapper &cartMapper)
Writes the partition vector to a file in ResInsight compatible format and a partition file for each r...
Definition NlddReporting.hpp:137
void writeNonlinearIterationsPerCell(const std::filesystem::path &odir, const std::vector< Domain > &domains, const std::vector< SimulatorReport > &domain_reports, const Grid &grid, const ElementMapper &elementMapper, const CartMapper &cartMapper)
Writes the number of nonlinear iterations per cell to a file in ResInsight compatible format.
Definition NlddReporting.hpp:69
Solver parameters for the BlackoilModel.
Definition BlackoilModelParameters.hpp:174
This class carries all parameters for the NewtonIterationBlackoilInterleaved class.
Definition FlowLinearSolverParameters.hpp:95
A struct for returning timing data from a simulator to its caller.
Definition SimulatorReport.hpp:34
Definition SimulatorReport.hpp:119
Representing a part of a grid, in a way suitable for performing local solves.
Definition SubDomain.hpp:85