Flow123d  jenkins-Flow123d-linux-release-multijob-282
darcy_flow_mh.cc
Go to the documentation of this file.
1 /*!
2  *
3  * Copyright (C) 2007 Technical University of Liberec. All rights reserved.
4  *
5  * Please make a following refer to Flow123d on your project site if you use the program for any purpose,
6  * especially for academic research:
7  * Flow123d, Research Centre: Advanced Remedial Technologies, Technical University of Liberec, Czech Republic
8  *
9  * This program is free software; you can redistribute it and/or modify it under the terms
10  * of the GNU General Public License version 3 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
13  * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14  * See the GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License along with this program; if not,
17  * write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 021110-1307, USA.
18  *
19  *
20  * $Id$
21  * $Revision$
22  * $LastChangedBy$
23  * $LastChangedDate$
24  *
25  *
26  * @file
27  * @ingroup flow
28  * @brief Setup and solve linear system of mixed-hybrid discretization of the linear
29  * porous media flow with possible preferential flow in fractures and chanels.
30  *
31  */
32 
33 #include "petscmat.h"
34 #include "petscviewer.h"
35 #include "petscerror.h"
36 #include <armadillo>
37 #include <boost/foreach.hpp>
38 
39 
40 #include "system/system.hh"
41 
42 #include "mesh/mesh.h"
43 #include "mesh/intersection.hh"
44 #include "mesh/partitioning.hh"
45 #include "la/distribution.hh"
46 #include "la/linsys.hh"
47 #include "la/linsys_PETSC.hh"
48 #include "la/linsys_BDDC.hh"
49 #include "la/schur.hh"
50 #include "la/sparse_graph.hh"
52 
53 #include "system/file_path.hh"
54 #include "flow/mh_fe_values.hh"
55 #include "flow/darcy_flow_mh.hh"
56 
58 
59 #include "fem/mapping_p1.hh"
60 #include "fem/fe_p.hh"
61 #include "fem/fe_values.hh"
63 
64 #include <limits>
65 #include <set>
66 #include <vector>
67 #include <iostream>
68 #include <iterator>
69 #include <algorithm>
70 
71 #include "tools/time_governor.hh"
73 #include "fields/field.hh"
74 #include "fields/field_values.hh"
75 #include "system/sys_profiler.hh"
76 
78 
79 
80 
81 
82 namespace it = Input::Type;
83 
85  = it::Selection("MH_MortarMethod")
86  .add_value(NoMortar, "None", "Mortar space: P0 on elements of lower dimension.")
87  .add_value(MortarP0, "P0", "Mortar space: P0 on elements of lower dimension.")
88  .add_value(MortarP1, "P1", "Mortar space: P1 on intersections, using non-conforming pressures.");
89 
90 
92  it::Selection("DarcyFlow_BC_Type")
93  .add_value(none, "none", "Homogeneous Neumann boundary condition. Zero flux")
94  .add_value(dirichlet, "dirichlet",
95  "Dirichlet boundary condition. "
96  "Specify the pressure head through the 'bc_pressure' field "
97  "or the piezometric head through the 'bc_piezo_head' field.")
98  .add_value(neumann, "neumann", "Neumann boundary condition. Prescribe water outflow by the 'bc_flux' field.")
99  .add_value(robin, "robin", "Robin boundary condition. Water outflow equal to $\\sigma (h - h^R)$. "
100  "Specify the transition coefficient by 'bc_sigma' and the reference pressure head or pieaozmetric head "
101  "through 'bc_pressure' and 'bc_piezo_head' respectively.");
102  //.add_value(total_flux, "total_flux");
103 
104 //new input type with FIELDS
106  it::AbstractRecord("DarcyFlowMH", "Mixed-Hybrid solver for saturated Darcy flow.")
107  .declare_key("n_schurs", it::Integer(0,2), it::Default("2"),
108  "Number of Schur complements to perform when solving MH sytem.")
110  "Linear solver for MH problem.")
112  "Parameters of output form MH module.")
113  .declare_key("mortar_method", mh_mortar_selection, it::Default("None"),
114  "Method for coupling Darcy flow between dimensions." )
116  "Settings for computing mass balance.");
117 /*
118  .declare_key("gravity", it::String(), it::Default("0 0 -1 0"),
119  "Four-component vector contains potential gradient (positions 0, 1 and 2) and potential constant term (position 3).");
120 */
121 
123  = it::Record("Steady_MH", "Mixed-Hybrid solver for STEADY saturated Darcy flow.")
124  .derive_from(DarcyFlowMH::input_type)
125  .declare_key("input_fields", it::Array(
126  DarcyFlowMH_Steady::EqData().make_field_descriptor_type("DarcyFlowMH")
127  .declare_key("bc_piezo_head", FieldAlgorithmBase< 3, FieldValue<3>::Scalar >::get_input_type(), "Boundary condition for pressure as piezometric head." )
128  .declare_key("init_piezo_head", FieldAlgorithmBase< 3, FieldValue<3>::Scalar >::get_input_type(), "Initial condition for pressure as piezometric head." )
129  .declare_key(OldBcdInput::flow_old_bcd_file_key(), it::FileName::input(), "File with mesh dependent boundary conditions (obsolete).")
130  ), it::Default::obligatory(), "" );
131 
133  = it::Record("Unsteady_MH", "Mixed-Hybrid solver for unsteady saturated Darcy flow.")
134  .derive_from(DarcyFlowMH::input_type)
136  "Time governor setting for the unsteady Darcy flow model.")
137  .copy_keys(DarcyFlowMH_Steady::input_type);
138 
139 
141  = it::Record("Unsteady_LMH", "Lumped Mixed-Hybrid solver for unsteady saturated Darcy flow.")
142  .derive_from(DarcyFlowMH::input_type)
144  "Time governor setting for the unsteady Darcy flow model.")
145  .copy_keys(DarcyFlowMH_Steady::input_type);
146 
147 
148 
149 
151 {
152  ADD_FIELD(anisotropy, "Anisotropy of the conductivity tensor.", "1.0" );
154 
155  ADD_FIELD(cross_section, "Complement dimension parameter (cross section for 1D, thickness for 2D).", "1.0" );
156  cross_section.units( UnitSI().m(3).md() );
157 
158  ADD_FIELD(conductivity, "Isotropic conductivity scalar.", "1.0" );
159  conductivity.units( UnitSI().m().s(-1) );
160 
161  ADD_FIELD(sigma, "Transition coefficient between dimensions.", "1.0");
163 
164  ADD_FIELD(water_source_density, "Water source density.", "0.0");
165  water_source_density.units( UnitSI().s(-1) );
166 
167  ADD_FIELD(bc_type,"Boundary condition type, possible values:", "\"none\"" );
169  bc_type.add_factory( OldBcdInput::instance()->flow_type_factory );
171 
172  ADD_FIELD(bc_pressure,"Dirichlet BC condition value for pressure.");
174  bc_pressure.units( UnitSI().m() );
175 
176  ADD_FIELD(bc_flux,"Flux in Neumman or Robin boundary condition.");
178  bc_flux.add_factory( OldBcdInput::instance()->flow_flux_factory );
179  bc_flux.units( UnitSI().m(4).s(-1).md() );
180 
181  ADD_FIELD(bc_robin_sigma,"Conductivity coefficient in Robin boundary condition.");
183  bc_robin_sigma.add_factory( OldBcdInput::instance()->flow_sigma_factory );
184  bc_robin_sigma.units( UnitSI().m(3).s(-1).md() );
185 
186  //these are for unsteady
187  ADD_FIELD(init_pressure, "Initial condition as pressure", "0.0" );
188  init_pressure.units( UnitSI().m() );
189 
190  ADD_FIELD(storativity,"Storativity.", "1.0" );
191  storativity.units( UnitSI().m(-1) );
192 
193  time_term_fields = this->subset({"storativity"});
194  main_matrix_fields = this->subset({"anisotropy", "conductivity", "cross_section", "sigma", "bc_type", "bc_robin_sigma"});
195  rhs_fields = this->subset({"water_source_density", "bc_pressure", "bc_flux"});
196 
197 }
198 
199 
200 
201 
202 //=============================================================================
203 // CREATE AND FILL GLOBAL MH MATRIX OF THE WATER MODEL
204 // - do it in parallel:
205 // - initial distribution of elements, edges
206 //
207 /*! @brief CREATE AND FILL GLOBAL MH MATRIX OF THE WATER MODEL
208  *
209  * Parameters {Solver,NSchurs} number of performed Schur
210  * complements (0,1,2) for water flow MH-system
211  *
212  */
213 //=============================================================================
214 DarcyFlowMH_Steady::DarcyFlowMH_Steady(Mesh &mesh_in, const Input::Record in_rec, bool make_tg )
215 : DarcyFlowMH(mesh_in, in_rec)
216 
217 {
218  using namespace Input;
219  START_TIMER("Darcy constructor");
220 
221  this->eq_data_ = &data_;
222 
223  //connecting data fields with mesh
224  START_TIMER("data init");
225  data_.set_mesh(mesh_in);
226  data_.set_input_list( in_rec.val<Input::Array>("input_fields") );
227 
228 
229 
230 
231  END_TIMER("data init");
232 
233 
234  size = mesh_->n_elements() + mesh_->n_sides() + mesh_->n_edges();
235  n_schur_compls = in_rec.val<int>("n_schurs");
236  //data_.gravity_ = arma::vec4( in_rec.val<std::string>("gravity") );
237  data_.gravity_ = arma::vec4(" 0 0 -1 0");
238  data_.bc_pressure.add_factory( OldBcdInput::instance()->flow_pressure_factory );
240  std::make_shared<FieldAddPotential<3, FieldValue<3>::Scalar>::FieldFactory>
241  (data_.gravity_, "bc_piezo_head") );
242 
243  solution = NULL;
244  schur0 = NULL;
245 
246 
247  mortar_method_= in_rec.val<MortarMethod>("mortar_method");
248  if (mortar_method_ != NoMortar) {
250  }
251 
252  mh_dh.reinit(mesh_);
253 
254  prepare_parallel(in_rec.val<AbstractRecord>("solver"));
255 
256  //side_ds->view( std::cout );
257  //el_ds->view( std::cout );
258  //edge_ds->view( std::cout );
259  //rows_ds->view( std::cout );
260 
261 
262  // initialization of balance object
263  Input::Iterator<Input::Record> it = in_rec.find<Input::Record>("balance");
264  if (it->val<bool>("balance_on"))
265  {
266  balance_ = boost::make_shared<Balance>("water", mesh_, el_ds, el_4_loc, *it);
267  //if (time_ != nullptr && time_->is_steady())
268  water_balance_idx_ = balance_->add_quantity("water_volume");
269  balance_->allocate(rows_ds->lsize(), 1);
270  }
271 
272 
273  if (make_tg) {
274  // steady time governor
275  time_ = new TimeGovernor();
278  data_.set_time(time_->step());
279 
281  output_object = new DarcyFlowMHOutput(this, in_rec.val<Input::Record>("output"));
282  }
283 
284 
285 
286 }
287 
288 
289 
290 //=============================================================================
291 // COMPOSE and SOLVE WATER MH System possibly through Schur complements
292 //=============================================================================
294  START_TIMER("Solving MH system");
295 
296 
297  if (time_->is_end()) return;
298 
299  if (! time_->is_steady()) time_->next_time();
300 
301 
302 
304  int convergedReason = schur0->solve();
305 
306  xprintf(MsgLog, "Linear solver ended with reason: %d \n", convergedReason );
307  ASSERT( convergedReason >= 0, "Linear solver failed to converge. Convergence reason %d \n", convergedReason );
308 
309  this -> postprocess();
310 
312 
313  output_data();
314 
315  if (time_->is_steady()) time_->next_time();
316 }
317 
319 {
320  START_TIMER("postprocess");
321  //ElementFullIter ele = ELEMENT_FULL_ITER(mesh_, NULL);
322 
323  // modify side fluxes in parallel
324  // for every local edge take time term on digonal and add it to the corresponding flux
325  /*
326  for (unsigned int i_loc = 0; i_loc < el_ds->lsize(); i_loc++) {
327  ele = mesh_->element(el_4_loc[i_loc]);
328  FOR_ELEMENT_SIDES(ele,i) {
329  side_rows[i] = side_row_4_id[ mh_dh.side_dof( ele->side(i) ) ];
330  values[i] = -1.0 * ele->measure() *
331  data.cross_section.value(ele->centre(), ele->element_accessor()) *
332  data.water_source_density.value(ele->centre(), ele->element_accessor()) /
333  ele->n_sides();
334  }
335  VecSetValues(schur0->get_solution(), ele->n_sides(), side_rows, values, ADD_VALUES);
336  }
337  VecAssemblyBegin(schur0->get_solution());
338  VecAssemblyEnd(schur0->get_solution());
339  */
340 }
341 
342 
344  time_->view("DARCY"); //time governor information output
345  this->output_object->output();
346 
347  if (balance_ != nullptr)
348  {
349  if (balance_->cumulative() && time_->tlevel() > 0)
350  {
351  balance_->calculate_cumulative_sources(water_balance_idx_, schur0->get_solution(), time_->dt());
352  balance_->calculate_cumulative_fluxes(water_balance_idx_, schur0->get_solution(), time_->dt());
353  }
354 
356  {
357  balance_->calculate_mass(water_balance_idx_, schur0->get_solution());
358  balance_->calculate_source(water_balance_idx_, schur0->get_solution());
359  balance_->calculate_flux(water_balance_idx_, schur0->get_solution());
360  balance_->output(time_->t());
361  }
362  }
363 }
364 
366 {
367  return schur0->get_solution_precision();
368 }
369 
370 
371 void DarcyFlowMH_Steady::get_solution_vector(double * &vec, unsigned int &vec_size)
372 {
373  // TODO: make class for vectors (wrapper for PETSC or other) derived from LazyDependency
374  // and use its mechanism to manage dependency between vectors
376 
377  // scatter solution to all procs
378  VecScatterBegin(par_to_all, schur0->get_solution(), sol_vec, INSERT_VALUES, SCATTER_FORWARD);
379  VecScatterEnd( par_to_all, schur0->get_solution(), sol_vec, INSERT_VALUES, SCATTER_FORWARD);
381  }
382 
383  vec = solution;
384  vec_size = this->size;
385  ASSERT(vec != NULL, "Requested solution is not allocated!\n");
386 }
387 
389 {
390  vec=schur0->get_solution();
391  ASSERT(vec != NULL, "Requested solution is not allocated!\n");
392 }
393 
394 
395 
396 // ===========================================================================================
397 //
398 // MATRIX ASSEMBLY - we use abstract assembly routine, where LS Mat/Vec SetValues
399 // are in fact pointers to allocating or filling functions - this is governed by Linsystem roitunes
400 //
401 // =======================================================================================
402 
403 
404 // ******************************************
405 // ABSTRACT ASSEMBLY OF MH matrix
406 // TODO: matice by se mela sestavovat zvlast pro kazdou dimenzi (objem, pukliny, pruseciky puklin)
407 // konekce by se mely sestavovat cyklem pres konekce, konekce by mely byt paralelizovany podle
408 // distribuce elementu nizssi dimenze
409 // k tomuto je treba nejdriv spojit s JK verzi, aby se vedelo co se deje v transportu a
410 // predelat mesh a neigbouring
411 // *****************************************
413  LinSys *ls = schur0;
415  MHFEValues fe_values;
416 
417  // We use FESideValues for calculating normal vectors.
418  // For initialization of FESideValues some auxiliary objects are needed.
419  MappingP1<1,3> map1;
420  MappingP1<2,3> map2;
421  MappingP1<3,3> map3;
422  QGauss<0> q0(1);
423  QGauss<1> q1(1);
424  QGauss<2> q2(1);
425  FE_P_disc<1,1,3> fe1;
426  FE_P_disc<0,2,3> fe2;
427  FE_P_disc<0,3,3> fe3;
428  FESideValues<1,3> fe_side_values1(map1, q0, fe1, update_normal_vectors);
429  FESideValues<2,3> fe_side_values2(map2, q1, fe2, update_normal_vectors);
430  FESideValues<3,3> fe_side_values3(map3, q2, fe3, update_normal_vectors);
431 
432  class Boundary *bcd;
433  class Neighbour *ngh;
434 
435  bool fill_matrix = schur0->is_preallocated();
436  int el_row, side_row, edge_row, loc_b = 0;
437  int tmp_rows[100];
438  int side_rows[4], edge_rows[4]; // rows for sides and edges of one element
439  double local_vb[4]; // 2x2 matrix
440 
441  // to make space for second schur complement, max. 10 neighbour edges of one el.
442  double zeros[1000];
443  for(int i=0; i<1000; i++) zeros[i]=0.0;
444 
445  double minus_ones[4] = { -1.0, -1.0, -1.0, -1.0 };
446  double loc_side_rhs[4];
447 
448  if (balance_ != nullptr)
449  balance_->start_flux_assembly(water_balance_idx_);
450 
451  for (unsigned int i_loc = 0; i_loc < el_ds->lsize(); i_loc++) {
452 
453  ele = mesh_->element(el_4_loc[i_loc]);
454  el_row = row_4_el[el_4_loc[i_loc]];
455  unsigned int nsides = ele->n_sides();
456  if (fill_matrix) fe_values.update(ele, data_.anisotropy, data_.cross_section, data_.conductivity);
457  double cross_section = data_.cross_section.value(ele->centre(), ele->element_accessor());
458 
459  for (unsigned int i = 0; i < nsides; i++) {
460  side_row = side_rows[i] = side_row_4_id[ mh_dh.side_dof( ele->side(i) ) ];
461  edge_row = edge_rows[i] = row_4_edge[ele->side(i)->edge_idx()];
462  bcd=ele->side(i)->cond();
463 
464  // gravity term on RHS
465  loc_side_rhs[i] = (ele->centre()[ 2 ] - ele->side(i)->centre()[ 2 ]);
466 
467  // set block C and C': side-edge, edge-side
468  double c_val = 1.0;
469 
470  if (bcd) {
471  ElementAccessor<3> b_ele = bcd->element_accessor();
472  EqData::BC_Type type = (EqData::BC_Type)data_.bc_type.value(b_ele.centre(), b_ele);
473  if ( type == EqData::none) {
474  // homogeneous neumann
475  } else if ( type == EqData::dirichlet ) {
476  c_val = 0.0;
477  double bc_pressure = data_.bc_pressure.value(b_ele.centre(), b_ele);
478  loc_side_rhs[i] -= bc_pressure;
479  ls->rhs_set_value(edge_row, -bc_pressure);
480  ls->mat_set_value(edge_row, edge_row, -1.0);
481 
482  } else if ( type == EqData::neumann) {
483  double bc_flux = data_.bc_flux.value(b_ele.centre(), b_ele);
484  ls->rhs_set_value(edge_row, bc_flux * bcd->element()->measure() * cross_section);
485 
486  } else if ( type == EqData::robin) {
487  double bc_pressure = data_.bc_pressure.value(b_ele.centre(), b_ele);
488  double bc_sigma = data_.bc_robin_sigma.value(b_ele.centre(), b_ele);
489  ls->rhs_set_value(edge_row, -bcd->element()->measure() * bc_sigma * bc_pressure * cross_section );
490  ls->mat_set_value(edge_row, edge_row, -bcd->element()->measure() * bc_sigma * cross_section );
491 
492  } else {
493  xprintf(UsrErr, "BC type not supported.\n");
494  }
495 
496  if (balance_ != nullptr)
497  {
498  balance_->add_flux_matrix_values(water_balance_idx_, loc_b, {side_row}, {1});
499  }
500  ++loc_b;
501  }
502  ls->mat_set_value(side_row, edge_row, c_val);
503  ls->mat_set_value(edge_row, side_row, c_val);
504 
505  // assemble matrix for weights in BDDCML
506  // approximation to diagonal of
507  // S = -C - B*inv(A)*B'
508  // as
509  // diag(S) ~ - diag(C) - 1./diag(A)
510  // the weights form a partition of unity to average a discontinuous solution from neighbouring subdomains
511  // to a continuous one
512  // it is important to scale the effect - if conductivity is low for one subdomain and high for the other,
513  // trust more the one with low conductivity - it will be closer to the truth than an arithmetic average
514  if ( typeid(*ls) == typeid(LinSys_BDDC) ) {
515  double val_side = (fe_values.local_matrix())[i*nsides+i];
516  double val_edge = -1./ (fe_values.local_matrix())[i*nsides+i];
517 
518  static_cast<LinSys_BDDC*>(ls)->diagonal_weights_set_value( side_row, val_side );
519  static_cast<LinSys_BDDC*>(ls)->diagonal_weights_set_value( edge_row, val_edge );
520  }
521  }
522 
523  ls->rhs_set_values(nsides, side_rows, loc_side_rhs);
524 
525 
526  // set block A: side-side on one element - block diagonal matrix
527  ls->mat_set_values(nsides, side_rows, nsides, side_rows, fe_values.local_matrix() );
528  // set block B, B': element-side, side-element
529  ls->mat_set_values(1, &el_row, nsides, side_rows, minus_ones);
530  ls->mat_set_values(nsides, side_rows, 1, &el_row, minus_ones);
531 
532 
533  // D block: non-compatible conections and diagonal: element-element
534 
535  ls->mat_set_value(el_row, el_row, 0.0); // maybe this should be in virtual block for schur preallocation
536 
537  if ( typeid(*ls) == typeid(LinSys_BDDC) ) {
538  double val_ele = 1.;
539  static_cast<LinSys_BDDC*>(ls)->diagonal_weights_set_value( el_row, val_ele );
540  }
541 
542  // D, E',E block: compatible connections: element-edge
543 
544  for (unsigned int i = 0; i < ele->n_neighs_vb; i++) {
545  // every compatible connection adds a 2x2 matrix involving
546  // current element pressure and a connected edge pressure
547  ngh= ele->neigh_vb[i];
548  tmp_rows[0]=el_row;
549  tmp_rows[1]=row_4_edge[ ngh->edge_idx() ];
550 
551  // compute normal vector to side
552  arma::vec3 nv;
553  ElementFullIter ele_higher = mesh_->element.full_iter(ngh->side()->element());
554  switch (ele_higher->dim()) {
555  case 1:
556  fe_side_values1.reinit(ele_higher, ngh->side()->el_idx());
557  nv = fe_side_values1.normal_vector(0);
558  break;
559  case 2:
560  fe_side_values2.reinit(ele_higher, ngh->side()->el_idx());
561  nv = fe_side_values2.normal_vector(0);
562  break;
563  case 3:
564  fe_side_values3.reinit(ele_higher, ngh->side()->el_idx());
565  nv = fe_side_values3.normal_vector(0);
566  break;
567  }
568 
569  double value = data_.sigma.value( ele->centre(), ele->element_accessor()) *
570  2*data_.conductivity.value( ele->centre(), ele->element_accessor()) *
571  arma::dot(data_.anisotropy.value( ele->centre(), ele->element_accessor())*nv, nv) *
572  data_.cross_section.value( ngh->side()->centre(), ele_higher->element_accessor() ) * // cross-section of higher dim. (2d)
573  data_.cross_section.value( ngh->side()->centre(), ele_higher->element_accessor() ) /
574  data_.cross_section.value( ele->centre(), ele->element_accessor() ) * // crossection of lower dim.
575  ngh->side()->measure();
576 
577 
578  local_vb[0] = -value; local_vb[1] = value;
579  local_vb[2] = value; local_vb[3] = -value;
580 
581  ls->mat_set_values(2, tmp_rows, 2, tmp_rows, local_vb);
582 
583  // update matrix for weights in BDDCML
584  if ( typeid(*ls) == typeid(LinSys_BDDC) ) {
585  int ind = tmp_rows[1];
586  // there is -value on diagonal in block C!
587  double new_val = - value;
588  static_cast<LinSys_BDDC*>(ls)->diagonal_weights_set_value( ind, new_val );
589  }
590 
591  if (n_schur_compls == 2) {
592  // for 2. Schur: N dim edge is conected with N dim element =>
593  // there are nz between N dim edge and N-1 dim edges of the element
594 
595  ls->mat_set_values(nsides, edge_rows, 1, tmp_rows+1, zeros);
596  ls->mat_set_values(1, tmp_rows+1, nsides, edge_rows, zeros);
597 
598  // save all global edge indices to higher positions
599  tmp_rows[2+i] = tmp_rows[1];
600  }
601  }
602 
603 
604  // add virtual values for schur complement allocation
605  switch (n_schur_compls) {
606  case 2:
607  // Connections between edges of N+1 dim. elements neighboring with actual N dim element 'ele'
608  ASSERT(ele->n_neighs_vb*ele->n_neighs_vb<1000, "Too many values in E block.");
609  ls->mat_set_values(ele->n_neighs_vb, tmp_rows+2,
610  ele->n_neighs_vb, tmp_rows+2, zeros);
611 
612  case 1: // included also for case 2
613  // -(C')*(A-)*B block and its transpose conect edge with its elements
614  ls->mat_set_values(1, &el_row, nsides, edge_rows, zeros);
615  ls->mat_set_values(nsides, edge_rows, 1, &el_row, zeros);
616  // -(C')*(A-)*C block conect all edges of every element
617  ls->mat_set_values(nsides, edge_rows, nsides, edge_rows, zeros);
618  }
619  }
620 
621  if (balance_ != nullptr)
622  balance_->finish_flux_assembly(water_balance_idx_);
623 
625 
626 
627  if (mortar_method_ == MortarP0) {
628  P0_CouplingAssembler(*this).assembly(*ls);
629  } else if (mortar_method_ == MortarP1) {
630  P1_CouplingAssembler(*this).assembly(*ls);
631  }
632 }
633 
634 
636 {
637  if (balance_ != nullptr)
638  balance_->start_source_assembly(water_balance_idx_);
639 
640  for (unsigned int i_loc = 0; i_loc < el_ds->lsize(); i_loc++) {
641 
642  ElementFullIter ele = mesh_->element(el_4_loc[i_loc]);
643  int el_row = row_4_el[el_4_loc[i_loc]];
644 
645  // set sources
646  double source = ele->measure() *
647  data_.cross_section.value(ele->centre(), ele->element_accessor()) *
648  data_.water_source_density.value(ele->centre(), ele->element_accessor());
649  schur0->rhs_set_value(el_row, -1.0 * source );
650 
651  if (balance_ != nullptr)
652  balance_->add_source_rhs_values(water_balance_idx_, ele->region().bulk_idx(), {el_row}, {source});
653  }
654 
655  if (balance_ != nullptr)
656  balance_->finish_source_assembly(water_balance_idx_);
657 }
658 
659 
661  vector<int> &dofs, unsigned int &ele_type, double &delta, arma::vec &dirichlet) {
662 
663  const Element *ele;
664 
665  if (i_ele == (int)(ml_it_->size()) ) { // master element .. 1D
666  ele_type = 0;
667  delta = -delta_0;
668  ele=master_;
669  } else {
670  ele_type = 1;
671  const Intersection &isect=intersections_[ (*ml_it_)[i_ele] ];
672  delta = isect.intersection_true_size();
673  ele = isect.slave_iter();
674  }
675 
676  dofs.resize(ele->n_sides());
677  dirichlet.resize(ele->n_sides());
678  dirichlet.zeros();
679 
680  for(unsigned int i_side=0; i_side < ele->n_sides(); i_side++ ) {
681  dofs[i_side]=darcy_.row_4_edge[ele->side(i_side)->edge_idx()];
682  Boundary * bcd = ele->side(i_side)->cond();
683  if (bcd) {
684  ElementAccessor<3> b_ele = bcd->element_accessor();
685  DarcyFlowMH::EqData::BC_Type type = (DarcyFlowMH::EqData::BC_Type)darcy_.data_.bc_type.value(b_ele.centre(), b_ele);
686  //DBGMSG("bcd id: %d sidx: %d type: %d\n", ele->id(), i_side, type);
687  if (type == DarcyFlowMH::EqData::dirichlet) {
688  //DBGMSG("Dirichlet: %d\n", ele->index());
689  dofs[i_side] = -dofs[i_side];
690  double bc_pressure = darcy_.data_.bc_pressure.value(b_ele.centre(), b_ele);
691  dirichlet[i_side] = bc_pressure;
692  }
693  }
694  }
695 
696 }
697 
698 /**
699  * Works well but there is large error next to the boundary.
700  */
702  double delta_i, delta_j;
703  arma::mat product;
704  arma::vec dirichlet_i, dirichlet_j;
705  unsigned int ele_type_i, ele_type_j;
706 
707  unsigned int i,j;
708  vector<int> dofs_i,dofs_j;
709 
710  for(ml_it_ = master_list_.begin(); ml_it_ != master_list_.end(); ++ml_it_) {
711 
712  if (ml_it_->size() == 0) continue; // skip empty masters
713 
714 
715  // on the intersection element we consider
716  // * intersection dofs for master and slave
717  // those are dofs of the space into which we interpolate
718  // base functions from individual master and slave elements
719  // For the master dofs both are usualy eqivalent.
720  // * original dofs - for master same as intersection dofs, for slave
721  // all dofs of slave elements
722 
723  // form list of intersection dofs, in this case pressures in barycenters
724  // but we do not use those form MH system in order to allow second schur somplement. We rather map these
725  // dofs to pressure traces, i.e. we use averages of traces as barycentric values.
726 
727 
728  master_ = intersections_[ml_it_->front()].master_iter();
729  delta_0 = master_->measure();
730 
731  double master_sigma=darcy_.data_.sigma.value( master_->centre(), master_->element_accessor());
732 
733  // rows
734  double check_delta_sum=0;
735  for(i = 0; i <= ml_it_->size(); ++i) {
736  pressure_diff(i, dofs_i, ele_type_i, delta_i, dirichlet_i);
737  check_delta_sum+=delta_i;
738  //columns
739  for (j = 0; j <= ml_it_->size(); ++j) {
740  pressure_diff(j, dofs_j, ele_type_j, delta_j, dirichlet_j);
741 
742  double scale = -master_sigma * delta_i * delta_j / delta_0;
743  product = scale * tensor_average[ele_type_i][ele_type_j];
744 
745  arma::vec rhs(dofs_i.size());
746  rhs.zeros();
747  ls.set_values( dofs_i, dofs_j, product, rhs, dirichlet_i, dirichlet_j);
748  auto dofs_i_cp=dofs_i;
749  auto dofs_j_cp=dofs_j;
750  ls.set_values( dofs_i_cp, dofs_j_cp, product, rhs, dirichlet_i, dirichlet_j);
751  }
752  }
753  ASSERT(check_delta_sum < 1E-5*delta_0, "sum err %f > 0\n", check_delta_sum/delta_0);
754  } // loop over master elements
755 }
756 
757 
758 
759  void P1_CouplingAssembler::add_sides(const Element * ele, unsigned int shift, vector<int> &dofs, vector<double> &dirichlet)
760  {
761 
762  for(unsigned int i_side=0; i_side < ele->n_sides(); i_side++ ) {
763  dofs[shift+i_side] = darcy_.row_4_edge[ele->side(i_side)->edge_idx()];
764  Boundary * bcd = ele->side(i_side)->cond();
765 
766  if (bcd) {
767  ElementAccessor<3> b_ele = bcd->element_accessor();
768  DarcyFlowMH::EqData::BC_Type type = (DarcyFlowMH::EqData::BC_Type)darcy_.data_.bc_type.value(b_ele.centre(), b_ele);
769  //DBGMSG("bcd id: %d sidx: %d type: %d\n", ele->id(), i_side, type);
770  if (type == DarcyFlowMH::EqData::dirichlet) {
771  //DBGMSG("Dirichlet: %d\n", ele->index());
772  dofs[shift + i_side] = -dofs[shift + i_side];
773  double bc_pressure = darcy_.data_.bc_pressure.value(b_ele.centre(), b_ele);
774  dirichlet[shift + i_side] = bc_pressure;
775  }
776  }
777  }
778  }
779 
780 
781 /**
782  * P1 connection of different dimensions
783  *
784  * - 20.11. 2014 - very poor convergence, big error in pressure even at internal part of the fracture
785  */
786 
787 void P1_CouplingAssembler::assembly(LinSys &ls) {
788 
789  for (const Intersection &intersec : intersections_) {
790  const Element * master = intersec.master_iter();
791  const Element * slave = intersec.slave_iter();
792 
793  add_sides(slave, 0, dofs, dirichlet);
794  add_sides(master, 3, dofs, dirichlet);
795 
796  double master_sigma=darcy_.data_.sigma.value( master->centre(), master->element_accessor());
797 
798 /*
799  * Local coordinates on 1D
800  * t0
801  * node 0: 0.0
802  * node 1: 1.0
803  *
804  * base fce points
805  * t0 = 0.0 on side 0 node 0
806  * t0 = 1.0 on side 1 node 1
807  *
808  * Local coordinates on 2D
809  * t0 t1
810  * node 0: 0.0 0.0
811  * node 1: 1.0 0.0
812  * node 2: 0.0 1.0
813  *
814  * base fce points
815  * t0=0.5, t1=0.0 on side 0 nodes (0,1)
816  * t0=0.5, t1=0.5 on side 1 nodes (1,2)
817  * t0=0.0, t1=0.5 on side 2 nodes (2,0)
818  */
819 
820 
821 
822  arma::vec point_Y(1);
823  point_Y.fill(1.0);
824  arma::vec point_2D_Y(intersec.map_to_slave(point_Y)); // local coordinates of Y on slave
825  arma::vec point_1D_Y(intersec.map_to_master(point_Y)); // local coordinates of Y on master
826 
827  arma::vec point_X(1);
828  point_X.fill(0.0);
829  arma::vec point_2D_X(intersec.map_to_slave(point_X)); // local coordinates of X on slave
830  arma::vec point_1D_X(intersec.map_to_master(point_X)); // local coordinates of X on master
831 
832  arma::mat base_2D(3, 3);
833  // base fce = a0 + a1*t0 + a2*t1
834  // a0 a1 a2
835  base_2D << 1.0 << 0.0 << -2.0 << arma::endr //point on side 0
836  << -1.0 << 2.0 << 2.0 << arma::endr // point on side 1
837  << 1.0 << -2.0 << 0.0 << arma::endr;// point on side 2
838 
839  arma::mat base_1D(2, 2);
840  // base fce = a0 + a1 * t0
841  // a0 a1
842  base_1D << 1.0 << -1.0 << arma::endr // point on side 0,
843  << 0.0 << 1.0 << arma::endr; // point on side 1,
844 
845 
846  arma::vec difference_in_Y(5);
847  arma::vec difference_in_X(5);
848 
849  // slave sides 0,1,2
850  difference_in_Y.subvec(0, 2) = -base_2D * point_2D_Y;
851  difference_in_X.subvec(0, 2) = -base_2D * point_2D_X;
852  // master sides 3,4
853  difference_in_Y.subvec(3, 4) = base_1D * point_1D_Y;
854  difference_in_X.subvec(3, 4) = base_1D * point_1D_X;
855 
856  //prvky matice A[i,j]
857  arma::mat A(5, 5);
858  for (int i = 0; i < 5; ++i) {
859  for (int j = 0; j < 5; ++j) {
860  A(i, j) = -master_sigma * intersec.intersection_true_size() *
861  ( difference_in_Y[i] * difference_in_Y[j]
862  + difference_in_Y[i] * difference_in_X[j]/2
863  + difference_in_X[i] * difference_in_Y[j]/2
864  + difference_in_X[i] * difference_in_X[j]
865  ) * (1.0 / 3.0);
866 
867  }
868  }
869  auto dofs_cp=dofs;
870  ls.set_values( dofs_cp, dofs_cp, A, rhs, dirichlet, dirichlet);
871 
872  }
873 }
874 
875 
876 
877 /*******************************************************************************
878  * COMPOSE WATER MH MATRIX WITHOUT SCHUR COMPLEMENT
879  ******************************************************************************/
880 
881 void DarcyFlowMH_Steady::create_linear_system() {
882 
883  START_TIMER("preallocation");
884 
885  auto in_rec = this->input_record_.val<Input::AbstractRecord>("solver");
886 
887  if (schur0 == NULL) { // create Linear System for MH matrix
888 
889  if (in_rec.type() == LinSys_BDDC::input_type) {
890 #ifdef HAVE_BDDCML
891  xprintf(Warn, "For BDDC is using no Schur complements.");
892  n_schur_compls = 0;
893  LinSys_BDDC *ls = new LinSys_BDDC(global_row_4_sub_row->size(), &(*rows_ds),
894  3, // 3 == la::BddcmlWrapper::SPD_VIA_SYMMETRICGENERAL
895  1, // 1 == number of subdomains per process
896  true); // swap signs of matrix and rhs to make the matrix SPD
897  ls->set_from_input(in_rec);
898  ls->set_solution( NULL );
899  // possible initialization particular to BDDC
900  START_TIMER("BDDC set mesh data");
901  set_mesh_data_for_bddc(ls);
902  schur0=ls;
903  END_TIMER("BDDC set mesh data");
904 #else
905  xprintf(Err, "Flow123d was not build with BDDCML support.\n");
906 #endif
907  }
908  else if (in_rec.type() == LinSys_PETSC::input_type) {
909  // use PETSC for serial case even when user wants BDDC
910  if (n_schur_compls > 2) {
911  xprintf(Warn, "Invalid number of Schur Complements. Using 2.");
912  n_schur_compls = 2;
913  }
914 
915  LinSys_PETSC *schur1, *schur2;
916 
917  if (n_schur_compls == 0) {
918  LinSys_PETSC *ls = new LinSys_PETSC( &(*rows_ds) );
919 
920  // temporary solution; we have to set precision also for sequantial case of BDDC
921  // final solution should be probably call of direct solver for oneproc case
922  if (in_rec.type() != LinSys_BDDC::input_type) ls->set_from_input(in_rec);
923  else {
924  ls->LinSys::set_from_input(in_rec); // get only common options
925  }
926 
927  ls->set_solution( NULL );
928  schur0=ls;
929  } else {
930  IS is;
931  ISCreateStride(PETSC_COMM_WORLD, side_ds->lsize(), rows_ds->begin(), 1, &is);
932  //ASSERT(err == 0,"Error in ISCreateStride.");
933 
934  SchurComplement *ls = new SchurComplement(is, &(*rows_ds));
935  ls->set_from_input(in_rec);
936  ls->set_solution( NULL );
937 
938  // make schur1
939  Distribution *ds = ls->make_complement_distribution();
940  if (n_schur_compls==1) {
941  schur1 = new LinSys_PETSC(ds);
942  schur1->set_positive_definite();
943  } else {
944  IS is;
945  ISCreateStride(PETSC_COMM_WORLD, el_ds->lsize(), ls->get_distribution()->begin(), 1, &is);
946  //ASSERT(err == 0,"Error in ISCreateStride.");
947  SchurComplement *ls1 = new SchurComplement(is, ds); // is is deallocated by SchurComplement
948  ls1->set_negative_definite();
949 
950  // make schur2
951  schur2 = new LinSys_PETSC( ls1->make_complement_distribution() );
952  schur2->set_positive_definite();
953  ls1->set_complement( schur2 );
954  schur1 = ls1;
955  }
956  ls->set_complement( schur1 );
957  schur0=ls;
958  }
959 
960  START_TIMER("PETSC PREALLOCATION");
961  schur0->set_symmetric();
962  schur0->start_allocation();
963  assembly_steady_mh_matrix(); // preallocation
964  VecZeroEntries(schur0->get_solution());
965  END_TIMER("PETSC PREALLOCATION");
966  }
967  else {
968  xprintf(Err, "Unknown solver type. Internal error.\n");
969  }
970  }
971 
972 
973  END_TIMER("preallocation");
974 
975  make_serial_scatter();
976 
977 }
978 
979 
980 
981 
982 void DarcyFlowMH_Steady::assembly_linear_system() {
983 
984  data_.set_time(time_->step());
985  //DBGMSG("Assembly linear system\n");
986  if (data_.changed()) {
987  //DBGMSG(" Data changed\n");
988  // currently we have no optimization for cases when just time term data or RHS data are changed
989  START_TIMER("full assembly");
990  if (typeid(*schur0) != typeid(LinSys_BDDC)) {
991  schur0->start_add_assembly(); // finish allocation and create matrix
992  }
993  schur0->mat_zero_entries();
994  schur0->rhs_zero_entries();
995  assembly_steady_mh_matrix(); // fill matrix
996  schur0->finish_assembly();
997  schur0->set_matrix_changed();
998  //MatView( *const_cast<Mat*>(schur0->get_matrix()), PETSC_VIEWER_STDOUT_WORLD );
999  //VecView( *const_cast<Vec*>(schur0->get_rhs()), PETSC_VIEWER_STDOUT_WORLD);
1000 
1001  if (!time_->is_steady()) {
1002  //DBGMSG(" setup time term\n");
1003  // assembly time term and rhs
1004  setup_time_term();
1005  modify_system();
1006  }
1007  else if (balance_ != nullptr)
1008  {
1009  balance_->start_mass_assembly(water_balance_idx_);
1010  balance_->finish_mass_assembly(water_balance_idx_);
1011  }
1012  END_TIMER("full assembly");
1013  } else {
1014  START_TIMER("modify system");
1015  if (!time_->is_steady()) {
1016  modify_system();
1017  } else {
1018  xprintf(PrgErr, "Planned computation time for steady solver, but data are not changed.\n");
1019  }
1020  END_TIMER("modiffy system");
1021  }
1022 
1023 }
1024 
1025 
1026 
1027 void DarcyFlowMH_Steady::set_mesh_data_for_bddc(LinSys_BDDC * bddc_ls) {
1028  // prepare mesh for BDDCML
1029  // initialize arrays
1030  // auxiliary map for creating coordinates of local dofs and global-to-local numbering
1031  std::map<int,arma::vec3> localDofMap;
1032  // connectivity for the subdomain, i.e. global dof numbers on element, stored element-by-element
1033  // Indices of Nodes on Elements
1034  std::vector<int> inet;
1035  // number of degrees of freedom on elements - determines elementwise chunks of INET array
1036  // Number of Nodes on Elements
1037  std::vector<int> nnet;
1038  // Indices of Subdomain Elements in Global Numbering - for local elements, their global indices
1039  std::vector<int> isegn;
1040 
1041  // This array is currently not used in BDDCML, it was used as an interface scaling alternative to scaling
1042  // by diagonal. It corresponds to the rho-scaling.
1043  std::vector<double> element_permeability;
1044 
1045  // maximal and minimal dimension of elements
1046  int elDimMax = 1;
1047  int elDimMin = 3;
1048  for ( unsigned int i_loc = 0; i_loc < el_ds->lsize(); i_loc++ ) {
1049  // for each element, create local numbering of dofs as fluxes (sides), pressure (element centre), Lagrange multipliers (edges), compatible connections
1050  ElementFullIter el = mesh_->element(el_4_loc[i_loc]);
1051  int e_idx = el.index();
1052 
1053  int elDim = el->dim();
1054  elDimMax = std::max( elDimMax, elDim );
1055  elDimMin = std::min( elDimMin, elDim );
1056 
1057  isegn.push_back( e_idx );
1058  int nne = 0;
1059 
1060  FOR_ELEMENT_SIDES(el,si) {
1061  // insert local side dof
1062  int side_row = side_row_4_id[ mh_dh.side_dof( el->side(si) ) ];
1063  arma::vec3 coord = el->side(si)->centre();
1064 
1065  localDofMap.insert( std::make_pair( side_row, coord ) );
1066  inet.push_back( side_row );
1067  nne++;
1068  }
1069 
1070  // insert local pressure dof
1071  int el_row = row_4_el[ el_4_loc[i_loc] ];
1072  arma::vec3 coord = el->centre();
1073  localDofMap.insert( std::make_pair( el_row, coord ) );
1074  inet.push_back( el_row );
1075  nne++;
1076 
1077  FOR_ELEMENT_SIDES(el,si) {
1078  // insert local edge dof
1079  int edge_row = row_4_edge[ el->side(si)->edge_idx() ];
1080  arma::vec3 coord = el->side(si)->centre();
1081 
1082  localDofMap.insert( std::make_pair( edge_row, coord ) );
1083  inet.push_back( edge_row );
1084  nne++;
1085  }
1086 
1087  // insert dofs related to compatible connections
1088  for ( unsigned int i_neigh = 0; i_neigh < el->n_neighs_vb; i_neigh++) {
1089  int edge_row = row_4_edge[ el->neigh_vb[i_neigh]->edge_idx() ];
1090  arma::vec3 coord = el->neigh_vb[i_neigh]->edge()->side(0)->centre();
1091 
1092  localDofMap.insert( std::make_pair( edge_row, coord ) );
1093  inet.push_back( edge_row );
1094  nne++;
1095  }
1096 
1097  nnet.push_back( nne );
1098 
1099  // version for rho scaling
1100  // trace computation
1101  arma::vec3 centre = el->centre();
1102  double conduct = data_.conductivity.value( centre , el->element_accessor() );
1103  arma::mat33 aniso = data_.anisotropy.value( centre, el->element_accessor() );
1104 
1105  // compute mean on the diagonal
1106  double coef = 0.;
1107  for ( int i = 0; i < 3; i++) {
1108  coef = coef + aniso.at(i,i);
1109  }
1110  // Maybe divide by cs
1111  coef = conduct*coef / 3;
1112 
1113  ASSERT( coef > 0.,
1114  "Zero coefficient of hydrodynamic resistance %f . \n ", coef );
1115  element_permeability.push_back( 1. / coef );
1116  }
1117  //convert set of dofs to vectors
1118  // number of nodes (= dofs) on the subdomain
1119  int numNodeSub = localDofMap.size();
1120  ASSERT_EQUAL( (unsigned int)numNodeSub, global_row_4_sub_row->size() );
1121  // Indices of Subdomain Nodes in Global Numbering - for local nodes, their global indices
1122  std::vector<int> isngn( numNodeSub );
1123  // pseudo-coordinates of local nodes (i.e. dofs)
1124  // they need not be exact, they are used just for some geometrical considerations in BDDCML,
1125  // such as selection of corners maximizing area of a triangle, bounding boxes fro subdomains to
1126  // find candidate neighbours etc.
1127  std::vector<double> xyz( numNodeSub * 3 ) ;
1128  int ind = 0;
1129  std::map<int,arma::vec3>::iterator itB = localDofMap.begin();
1130  for ( ; itB != localDofMap.end(); ++itB ) {
1131  isngn[ind] = itB -> first;
1132 
1133  arma::vec3 coord = itB -> second;
1134  for ( int j = 0; j < 3; j++ ) {
1135  xyz[ j*numNodeSub + ind ] = coord[j];
1136  }
1137 
1138  ind++;
1139  }
1140  localDofMap.clear();
1141 
1142  // Number of Nodal Degrees of Freedom
1143  // nndf is trivially one - dofs coincide with nodes
1144  std::vector<int> nndf( numNodeSub, 1 );
1145 
1146  // prepare auxiliary map for renumbering nodes
1147  typedef std::map<int,int> Global2LocalMap_; //! type for storage of global to local map
1148  Global2LocalMap_ global2LocalNodeMap;
1149  for ( unsigned ind = 0; ind < isngn.size(); ++ind ) {
1150  global2LocalNodeMap.insert( std::make_pair( static_cast<unsigned>( isngn[ind] ), ind ) );
1151  }
1152 
1153  // renumber nodes in the inet array to locals
1154  int indInet = 0;
1155  for ( unsigned int iEle = 0; iEle < isegn.size(); iEle++ ) {
1156  int nne = nnet[ iEle ];
1157  for ( int ien = 0; ien < nne; ien++ ) {
1158 
1159  int indGlob = inet[indInet];
1160  // map it to local node
1161  Global2LocalMap_::iterator pos = global2LocalNodeMap.find( indGlob );
1162  ASSERT( pos != global2LocalNodeMap.end(),
1163  "Cannot remap node index %d to local indices. \n ", indGlob );
1164  int indLoc = static_cast<int> ( pos -> second );
1165 
1166  // store the node
1167  inet[ indInet++ ] = indLoc;
1168  }
1169  }
1170 
1171  int numNodes = size;
1172  int numDofsInt = size;
1173  int spaceDim = 3; // TODO: what is the proper value here?
1174  int meshDim = elDimMax;
1175 
1176  bddc_ls -> load_mesh( spaceDim, numNodes, numDofsInt, inet, nnet, nndf, isegn, isngn, isngn, xyz, element_permeability, meshDim );
1177 }
1178 
1179 
1180 
1181 
1182 //=============================================================================
1183 // DESTROY WATER MH SYSTEM STRUCTURE
1184 //=============================================================================
1185 DarcyFlowMH_Steady::~DarcyFlowMH_Steady() {
1186  if (schur0 != NULL) delete schur0;
1187 
1188  delete edge_ds;
1189  delete el_ds;
1190  delete side_ds;
1191 
1192  xfree(el_4_loc);
1193  xfree(row_4_el);
1194  xfree(side_id_4_loc);
1195  xfree(side_row_4_id);
1196  xfree(edge_4_loc);
1197  xfree(row_4_edge);
1198 
1199  if (solution != NULL) {
1200  VecDestroy(&sol_vec);
1201  xfree(solution);
1202  }
1203 
1204  delete output_object;
1205 
1206  VecScatterDestroy(&par_to_all);
1207 }
1208 
1209 
1210 // ================================================
1211 // PARALLLEL PART
1212 //
1213 
1214 // ========================================================================
1215 // to finish row_4_id arrays we have to convert individual numberings of
1216 // sides/els/edges to whole numbering of rows. To this end we count shifts
1217 // for sides/els/edges on each proc and then we apply them on row_4_id
1218 // arrays.
1219 // we employ macros to avoid code redundancy
1220 // =======================================================================
1221 void DarcyFlowMH_Steady::make_row_numberings() {
1222  int i, shift;
1223  int np = edge_ds->np();
1224  int edge_shift[np], el_shift[np], side_shift[np];
1225  unsigned int rows_starts[np];
1226 
1227  int edge_n_id = mesh_->n_edges(),
1228  el_n_id = mesh_->element.size(),
1229  side_n_id = mesh_->n_sides();
1230 
1231  // compute shifts on every proc
1232  shift = 0; // in this var. we count new starts of arrays chunks
1233  for (i = 0; i < np; i++) {
1234  side_shift[i] = shift - (side_ds->begin(i)); // subtract actual start of the chunk
1235  shift += side_ds->lsize(i);
1236  el_shift[i] = shift - (el_ds->begin(i));
1237  shift += el_ds->lsize(i);
1238  edge_shift[i] = shift - (edge_ds->begin(i));
1239  shift += edge_ds->lsize(i);
1240  rows_starts[i] = shift;
1241  }
1242  // apply shifts
1243  for (i = 0; i < side_n_id; i++) {
1244  int &what = side_row_4_id[i];
1245  if (what >= 0)
1246  what += side_shift[side_ds->get_proc(what)];
1247  }
1248  for (i = 0; i < el_n_id; i++) {
1249  int &what = row_4_el[i];
1250  if (what >= 0)
1251  what += el_shift[el_ds->get_proc(what)];
1252 
1253  }
1254  for (i = 0; i < edge_n_id; i++) {
1255  int &what = row_4_edge[i];
1256  if (what >= 0)
1257  what += edge_shift[edge_ds->get_proc(what)];
1258  }
1259  // make distribution of rows
1260  for (i = np - 1; i > 0; i--)
1261  rows_starts[i] -= rows_starts[i - 1];
1262 
1263  rows_ds = boost::make_shared<Distribution>(&(rows_starts[0]), PETSC_COMM_WORLD);
1264 }
1265 
1266 void DarcyFlowMH_Steady::make_serial_scatter() {
1267  START_TIMER("prepare scatter");
1268  // prepare Scatter form parallel to sequantial in original numbering
1269  {
1270  IS is_loc;
1271  int i, *loc_idx; //, si;
1272 
1273  // create local solution vector
1274  solution = (double *) xmalloc(size * sizeof(double));
1275  VecCreateSeqWithArray(PETSC_COMM_SELF,1, size, solution,
1276  &(sol_vec));
1277 
1278  // create seq. IS to scatter par solutin to seq. vec. in original order
1279  // use essentialy row_4_id arrays
1280  loc_idx = (int *) xmalloc(size * sizeof(int));
1281  i = 0;
1282  FOR_ELEMENTS(mesh_, ele) {
1283  FOR_ELEMENT_SIDES(ele,si) {
1284  loc_idx[i++] = side_row_4_id[ mh_dh.side_dof( ele->side(si) ) ];
1285  }
1286  }
1287  FOR_ELEMENTS(mesh_, ele) {
1288  loc_idx[i++] = row_4_el[ele.index()];
1289  }
1290  for(unsigned int i_edg=0; i_edg < mesh_->n_edges(); i_edg++) {
1291  loc_idx[i++] = row_4_edge[i_edg];
1292  }
1293  ASSERT( i==size,"Size of array does not match number of fills.\n");
1294  //DBGPRINT_INT("loc_idx",size,loc_idx);
1295  ISCreateGeneral(PETSC_COMM_SELF, size, loc_idx, PETSC_COPY_VALUES, &(is_loc));
1296  xfree(loc_idx);
1297  VecScatterCreate(schur0->get_solution(), is_loc, sol_vec,
1298  PETSC_NULL, &par_to_all);
1299  ISDestroy(&(is_loc));
1300  }
1301  solution_changed_for_scatter=true;
1302 
1303  END_TIMER("prepare scatter");
1304 
1305 }
1306 
1307 // ====================================================================================
1308 // - compute optimal edge partitioning
1309 // - compute appropriate partitioning of elements and sides
1310 // - make arrays: *_id_4_loc and *_row_4_id to allow parallel assembly of the MH matrix
1311 // ====================================================================================
1312 void DarcyFlowMH_Steady::prepare_parallel( const Input::AbstractRecord in_rec) {
1313 
1314  START_TIMER("prepare parallel");
1315 
1316  int *loc_part; // optimal (edge,el) partitioning (local chunk)
1317  int *id_4_old; // map from old idx to ids (edge,el)
1318  int i, loc_i;
1319 
1320  int e_idx;
1321 
1322 
1323  //ierr = MPI_Barrier(PETSC_COMM_WORLD);
1324  //ASSERT(ierr == 0, "Error in MPI_Barrier.");
1325 
1326  id_4_old = new int[mesh_->n_elements()];
1327  i = 0;
1328  FOR_ELEMENTS(mesh_, el) id_4_old[i++] = el.index();
1329 
1330  mesh_->get_part()->id_maps(mesh_->element.size(), id_4_old, el_ds, el_4_loc, row_4_el);
1331  delete[] id_4_old;
1332 
1333  //optimal element part; loc. els. id-> new el. numbering
1334  Distribution init_edge_ds(DistributionLocalized(), mesh_->n_edges(), PETSC_COMM_WORLD);
1335  // partitioning of edges, edge belongs to the proc of his first element
1336  // this is not optimal but simple
1337  loc_part = new int[init_edge_ds.lsize()];
1338  id_4_old = new int[mesh_->n_edges()];
1339  {
1340  loc_i = 0;
1341  FOR_EDGES(mesh_, edg ) {
1342  unsigned int i_edg = edg - mesh_->edges.begin();
1343  // partition
1344  e_idx = mesh_->element.index(edg->side(0)->element());
1345  if (init_edge_ds.is_local(i_edg)) {
1346  // find (new) proc of the first element of the edge
1347  loc_part[loc_i++] = el_ds->get_proc(row_4_el[e_idx]);
1348  }
1349  // id array
1350  id_4_old[i_edg] = i_edg;
1351  }
1352  }
1353 
1354  Partitioning::id_maps(mesh_->n_edges(), id_4_old, init_edge_ds, loc_part, edge_ds, edge_4_loc, row_4_edge);
1355  delete[] loc_part;
1356  delete[] id_4_old;
1357 
1358  //optimal side part; loc. sides; id-> new side numbering
1359  Distribution init_side_ds(DistributionBlock(), mesh_->n_sides(), PETSC_COMM_WORLD);
1360  // partitioning of sides follows elements
1361  loc_part = new int[init_side_ds.lsize()];
1362  id_4_old = new int[mesh_->n_sides()];
1363  {
1364  int is = 0;
1365  loc_i = 0;
1366  FOR_SIDES(mesh_, side ) {
1367  // partition
1368  if (init_side_ds.is_local(is)) {
1369  // find (new) proc of the element of the side
1370  loc_part[loc_i++] = el_ds->get_proc(
1371  row_4_el[mesh_->element.index(side->element())]);
1372  }
1373  // id array
1374  id_4_old[is++] = mh_dh.side_dof( side );
1375  }
1376  }
1377 
1378  Partitioning::id_maps(mesh_->n_sides(), id_4_old, init_side_ds, loc_part, side_ds,
1379  side_id_4_loc, side_row_4_id);
1380  delete [] loc_part;
1381  delete [] id_4_old;
1382 
1383  // convert row_4_id arrays from separate numberings to global numbering of rows
1384  make_row_numberings();
1385 
1386  // prepare global_row_4_sub_row
1387 
1388 #ifdef HAVE_BDDCML
1389  if (in_rec.type() == LinSys_BDDC::input_type) {
1390  // auxiliary
1391  Element *el;
1392  int side_row, edge_row;
1393 
1394  global_row_4_sub_row = boost::make_shared<LocalToGlobalMap>(rows_ds);
1395 
1396  //
1397  // ordering of dofs
1398  // for each subdomain:
1399  // | velocities (at sides) | pressures (at elements) | L. mult. (at edges) |
1400  for (unsigned int i_loc = 0; i_loc < el_ds->lsize(); i_loc++) {
1401  el = mesh_->element(el_4_loc[i_loc]);
1402  int el_row = row_4_el[el_4_loc[i_loc]];
1403 
1404  global_row_4_sub_row->insert( el_row );
1405 
1406  unsigned int nsides = el->n_sides();
1407  for (unsigned int i = 0; i < nsides; i++) {
1408  side_row = side_row_4_id[ mh_dh.side_dof( el->side(i) ) ];
1409  edge_row = row_4_edge[el->side(i)->edge_idx()];
1410 
1411  global_row_4_sub_row->insert( side_row );
1412  global_row_4_sub_row->insert( edge_row );
1413  }
1414 
1415  for (unsigned int i_neigh = 0; i_neigh < el->n_neighs_vb; i_neigh++) {
1416  // mark this edge
1417  edge_row = row_4_edge[el->neigh_vb[i_neigh]->edge_idx() ];
1418  global_row_4_sub_row->insert( edge_row );
1419  }
1420  }
1421  global_row_4_sub_row->finalize();
1422  }
1423 #endif // HAVE_BDDCML
1424 
1425 }
1426 
1427 
1428 
1429 void mat_count_off_proc_values(Mat m, Vec v) {
1430  int n, first, last;
1431  const PetscInt *cols;
1432  Distribution distr(v);
1433 
1434  int n_off = 0;
1435  int n_on = 0;
1436  int n_off_rows = 0;
1437  MatGetOwnershipRange(m, &first, &last);
1438  for (int row = first; row < last; row++) {
1439  MatGetRow(m, row, &n, &cols, PETSC_NULL);
1440  bool exists_off = false;
1441  for (int i = 0; i < n; i++)
1442  if (distr.get_proc(cols[i]) != distr.myp())
1443  n_off++, exists_off = true;
1444  else
1445  n_on++;
1446  if (exists_off)
1447  n_off_rows++;
1448  MatRestoreRow(m, row, &n, &cols, PETSC_NULL);
1449  }
1450 }
1451 
1452 
1453 
1454 
1455 
1456 
1457 
1458 
1459 
1460 
1461 
1462 
1463 // ========================
1464 // unsteady
1465 
1466 DarcyFlowMH_Unsteady::DarcyFlowMH_Unsteady(Mesh &mesh_in, const Input::Record in_rec)
1467  : DarcyFlowMH_Steady(mesh_in, in_rec, false)
1468 {
1469  time_ = new TimeGovernor(in_rec.val<Input::Record>("time"));
1470  data_.mark_input_times(this->mark_type());
1471  data_.set_limit_side(LimitSide::right);
1472  data_.set_time(time_->step());
1473 
1474  output_object = new DarcyFlowMHOutput(this, in_rec.val<Input::Record>("output"));
1475  //balance_->units(output_object->get_output_fields().field_ele_pressure.units()*data_.cross_section.units()*data_.storativity.units());
1476 
1477  //time_->fix_dt_until_mark();
1478  create_linear_system();
1479 
1480  VecDuplicate(schur0->get_solution(), &previous_solution);
1481  VecCreateMPI(PETSC_COMM_WORLD,rows_ds->lsize(),PETSC_DETERMINE,&(steady_diagonal));
1482  VecDuplicate(steady_diagonal,& new_diagonal);
1483  VecZeroEntries(new_diagonal);
1484  VecDuplicate(*( schur0->get_rhs()), &steady_rhs);
1485 
1486  assembly_linear_system();
1487  read_init_condition();
1488 
1489  output_data();
1490 }
1491 
1492 void DarcyFlowMH_Unsteady::read_init_condition()
1493 {
1494 
1495  // read inital condition
1496  VecZeroEntries(schur0->get_solution());
1497 
1498  double *local_sol = schur0->get_solution_array();
1499 
1500  // cycle over local element rows
1501  ElementFullIter ele = ELEMENT_FULL_ITER(mesh_, NULL);
1502 
1503  DBGMSG("Setup with dt: %f\n",time_->dt());
1504  for (unsigned int i_loc_el = 0; i_loc_el < el_ds->lsize(); i_loc_el++) {
1505  ele = mesh_->element(el_4_loc[i_loc_el]);
1506  int i_loc_row = i_loc_el + side_ds->lsize();
1507 
1508  // set initial condition
1509  local_sol[i_loc_row] = data_.init_pressure.value(ele->centre(),ele->element_accessor());
1510  }
1511 
1512  solution_changed_for_scatter=true;
1513 
1514 }
1515 
1516 void DarcyFlowMH_Unsteady::setup_time_term() {
1517  // save diagonal of steady matrix
1518  MatGetDiagonal(*( schur0->get_matrix() ), steady_diagonal);
1519  // save RHS
1520  VecCopy(*( schur0->get_rhs()), steady_rhs);
1521 
1522 
1523  PetscScalar *local_diagonal;
1524  VecGetArray(new_diagonal,& local_diagonal);
1525 
1526  ElementFullIter ele = ELEMENT_FULL_ITER(mesh_, NULL);
1527  DBGMSG("Setup with dt: %f\n",time_->dt());
1528 
1529  if (balance_ != nullptr)
1530  balance_->start_mass_assembly(water_balance_idx_);
1531 
1532  for (unsigned int i_loc_el = 0; i_loc_el < el_ds->lsize(); i_loc_el++) {
1533  ele = mesh_->element(el_4_loc[i_loc_el]);
1534  int i_loc_row = i_loc_el + side_ds->lsize();
1535 
1536  // set new diagonal
1537  double diagonal_coeff = data_.cross_section.value(ele->centre(), ele->element_accessor())
1538  * data_.storativity.value(ele->centre(), ele->element_accessor())
1539  * ele->measure();
1540  local_diagonal[i_loc_row]= - diagonal_coeff / time_->dt();
1541 
1542  if (balance_ != nullptr)
1543  balance_->add_mass_matrix_values(water_balance_idx_, ele->region().bulk_idx(), {i_loc_row}, {diagonal_coeff});
1544  }
1545  VecRestoreArray(new_diagonal,& local_diagonal);
1546  MatDiagonalSet(*( schur0->get_matrix() ), new_diagonal, ADD_VALUES);
1547 
1548  solution_changed_for_scatter=true;
1549  schur0->set_matrix_changed();
1550 
1551  if (balance_ != nullptr)
1552  balance_->finish_mass_assembly(water_balance_idx_);
1553 }
1554 
1555 void DarcyFlowMH_Unsteady::modify_system() {
1556  START_TIMER("modify system");
1557  if (time_->is_changed_dt() && time_->step().index()>0) {
1558  double scale_factor=time_->step(-2).length()/time_->step().length();
1559  if (scale_factor != 1.0) {
1560  // if time step has changed and setup_time_term not called
1561  MatDiagonalSet(*( schur0->get_matrix() ),steady_diagonal, INSERT_VALUES);
1562 
1563  VecScale(new_diagonal, time_->last_dt()/time_->dt());
1564  MatDiagonalSet(*( schur0->get_matrix() ),new_diagonal, ADD_VALUES);
1565  schur0->set_matrix_changed();
1566  }
1567  }
1568 
1569  // modify RHS - add previous solution
1570  VecPointwiseMult(*( schur0->get_rhs()), new_diagonal, schur0->get_solution());
1571  VecAXPY(*( schur0->get_rhs()), 1.0, steady_rhs);
1572  schur0->set_rhs_changed();
1573 
1574  // swap solutions
1575  VecSwap(previous_solution, schur0->get_solution());
1576 }
1577 
1578 // ========================
1579 // unsteady
1580 
1581 DarcyFlowLMH_Unsteady::DarcyFlowLMH_Unsteady(Mesh &mesh_in, const Input::Record in_rec)
1582  : DarcyFlowMH_Steady(mesh_in, in_rec,false)
1583 {
1584  time_ = new TimeGovernor(in_rec.val<Input::Record>("time"));
1585  data_.mark_input_times(this->mark_type());
1586 
1587 
1588  data_.set_limit_side(LimitSide::right);
1589  data_.set_time(time_->step());
1590 
1591  output_object = new DarcyFlowMHOutput(this, in_rec.val<Input::Record>("output"));
1592  //balance_->units(output_object->get_output_fields().field_ele_pressure.units()*data_.cross_section.units()*data_.storativity.units());
1593 
1594  //time_->fix_dt_until_mark();
1595  create_linear_system();
1596  VecDuplicate(schur0->get_solution(), &previous_solution);
1597  VecCreateMPI(PETSC_COMM_WORLD,rows_ds->lsize(),PETSC_DETERMINE,&(steady_diagonal));
1598  VecDuplicate(steady_diagonal,& new_diagonal);
1599  VecDuplicate(*( schur0->get_rhs()), &steady_rhs);
1600 
1601  assembly_linear_system();
1602  read_init_condition();
1603  output_data();
1604 }
1605 
1606 void DarcyFlowLMH_Unsteady::read_init_condition()
1607 {
1608  VecZeroEntries(schur0->get_solution());
1609 
1610  // apply initial condition
1611  // cycle over local element rows
1612 
1613  ElementFullIter ele = ELEMENT_FULL_ITER(mesh_, NULL);
1614  double init_value;
1615 
1616  for (unsigned int i_loc_el = 0; i_loc_el < el_ds->lsize(); i_loc_el++) {
1617  ele = mesh_->element(el_4_loc[i_loc_el]);
1618 
1619  init_value = data_.init_pressure.value(ele->centre(), ele->element_accessor());
1620 
1621  FOR_ELEMENT_SIDES(ele,i) {
1622  int edge_row = row_4_edge[ele->side(i)->edge_idx()];
1623  VecSetValue(schur0->get_solution(),edge_row,init_value/ele->n_sides(),ADD_VALUES);
1624  }
1625  }
1626  VecAssemblyBegin(schur0->get_solution());
1627  VecAssemblyEnd(schur0->get_solution());
1628 
1629  solution_changed_for_scatter=true;
1630 }
1631 
1632 
1633 
1634 void DarcyFlowLMH_Unsteady::setup_time_term()
1635 {
1636  // save diagonal of steady matrix
1637  MatGetDiagonal(*( schur0->get_matrix() ), steady_diagonal);
1638  // save RHS
1639  VecCopy(*( schur0->get_rhs()),steady_rhs);
1640 
1641  VecZeroEntries(new_diagonal);
1642 
1643  // modify matrix diagonal
1644  // cycle over local element rows
1645  ElementFullIter ele = ELEMENT_FULL_ITER(mesh_, NULL);
1646  DBGMSG("setup time term with dt: %f\n", time_->dt());
1647 
1648  if (balance_ != nullptr)
1649  balance_->start_mass_assembly(water_balance_idx_);
1650 
1651  for (unsigned int i_loc_el = 0; i_loc_el < el_ds->lsize(); i_loc_el++) {
1652  ele = mesh_->element(el_4_loc[i_loc_el]);
1653 
1654  data_.init_pressure.value(ele->centre(), ele->element_accessor());
1655 
1656  FOR_ELEMENT_SIDES(ele,i) {
1657  int edge_row = row_4_edge[ele->side(i)->edge_idx()];
1658  // set new diagonal
1659  double diagonal_coef = ele->measure() *
1660  data_.storativity.value(ele->centre(), ele->element_accessor()) *
1661  data_.cross_section.value(ele->centre(), ele->element_accessor())
1662  / ele->n_sides();
1663  VecSetValue(new_diagonal, edge_row, -diagonal_coef / time_->dt(), ADD_VALUES);
1664 
1665  if (balance_ != nullptr)
1666  balance_->add_mass_matrix_values(water_balance_idx_, ele->region().bulk_idx(), {edge_row}, {diagonal_coef});
1667 
1668 
1669  }
1670  }
1671  VecAssemblyBegin(new_diagonal);
1672  VecAssemblyEnd(new_diagonal);
1673 
1674  MatDiagonalSet(*( schur0->get_matrix() ),new_diagonal, ADD_VALUES);
1675 
1676  solution_changed_for_scatter=true;
1677  schur0->set_matrix_changed();
1678 
1679  if (balance_ != nullptr)
1680  balance_->finish_mass_assembly(water_balance_idx_);
1681 }
1682 
1683 void DarcyFlowLMH_Unsteady::modify_system() {
1684  START_TIMER("modify system");
1685  //if (time_->step().index()>0)
1686  // DBGMSG("dt: %f dt-1: %f indexchanged: %d matrix: %d\n", time_->step().length(), time_->step(-1).length(), time_->is_changed_dt(), schur0->is_matrix_changed() );
1687 
1688  if (time_->is_changed_dt() && time_->step().index()>0) {
1689  // if time step has changed and setup_time_term not called
1690 
1691  double scale_factor=time_->step(-2).length()/time_->step().length();
1692  if (scale_factor != 1.0) {
1693  MatDiagonalSet(*( schur0->get_matrix() ),steady_diagonal, INSERT_VALUES);
1694 
1695  //DBGMSG("Scale factor: %f\n",scale_factor);
1696  VecScale(new_diagonal, scale_factor);
1697  MatDiagonalSet(*( schur0->get_matrix() ),new_diagonal, ADD_VALUES);
1698  schur0->set_matrix_changed();
1699  }
1700  }
1701 
1702  // modify RHS - add previous solution
1703  VecPointwiseMult(*( schur0->get_rhs()), new_diagonal, schur0->get_solution());
1704  VecAXPY(*( schur0->get_rhs()), 1.0, steady_rhs);
1705  schur0->set_rhs_changed();
1706 
1707  // swap solutions
1708  VecSwap(previous_solution, schur0->get_solution());
1709 }
1710 
1711 
1712 void DarcyFlowLMH_Unsteady::assembly_source_term()
1713 {
1714  if (balance_ != nullptr)
1715  balance_->start_source_assembly(water_balance_idx_);
1716 
1717  for (unsigned int i_loc = 0; i_loc < el_ds->lsize(); i_loc++)
1718  {
1719  ElementFullIter ele = mesh_->element(el_4_loc[i_loc]);
1720 
1721  // set lumped source
1722  double diagonal_coef = ele->measure()
1723  * data_.cross_section.value(ele->centre(), ele->element_accessor())
1724  * data_.water_source_density.value(ele->centre(), ele->element_accessor())
1725  / ele->n_sides();
1726 
1727  FOR_ELEMENT_SIDES(ele,i)
1728  {
1729  int edge_row = row_4_edge[ele->side(i)->edge_idx()];
1730 
1731  schur0->rhs_set_value(edge_row, -diagonal_coef);
1732 
1733  if (balance_ != nullptr)
1734  balance_->add_source_rhs_values(water_balance_idx_, ele->region().bulk_idx(), {edge_row}, {diagonal_coef});
1735  }
1736  }
1737 
1738  if (balance_ != nullptr)
1739  balance_->finish_source_assembly(water_balance_idx_);
1740 }
1741 
1742 
1743 void DarcyFlowLMH_Unsteady::postprocess() {
1744  int side_row, loc_edge_row, i;
1745  Edge* edg;
1746  ElementIter ele;
1747  double new_pressure, old_pressure, time_coef;
1748 
1749  PetscScalar *loc_prev_sol;
1750  VecGetArray(previous_solution, &loc_prev_sol);
1751 
1752  // modify side fluxes in parallel
1753  // for every local edge take time term on diagonal and add it to the corresponding flux
1754  for (unsigned int i_loc = 0; i_loc < edge_ds->lsize(); i_loc++) {
1755 
1756  edg = &( mesh_->edges[ edge_4_loc[i_loc] ] );
1757  loc_edge_row = side_ds->lsize() + el_ds->lsize() + i_loc;
1758 
1759  new_pressure = (schur0->get_solution_array())[loc_edge_row];
1760  old_pressure = loc_prev_sol[loc_edge_row];
1761  FOR_EDGE_SIDES(edg,i) {
1762  ele = edg->side(i)->element();
1763  side_row = side_row_4_id[ mh_dh.side_dof( edg->side(i) ) ];
1764  time_coef = - ele->measure() *
1765  data_.cross_section.value(ele->centre(), ele->element_accessor()) *
1766  data_.storativity.value(ele->centre(), ele->element_accessor()) /
1767  time_->dt() / ele->n_sides();
1768  VecSetValue(schur0->get_solution(), side_row, time_coef * (new_pressure - old_pressure), ADD_VALUES);
1769  }
1770  }
1771  VecRestoreArray(previous_solution, &loc_prev_sol);
1772 
1773  VecAssemblyBegin(schur0->get_solution());
1774  VecAssemblyEnd(schur0->get_solution());
1775 
1776  int side_rows[4];
1777  double values[4];
1778 
1779  // modify side fluxes in parallel
1780  // for every local edge take time term on digonal and add it to the corresponding flux
1781 
1782  for (unsigned int i_loc = 0; i_loc < el_ds->lsize(); i_loc++) {
1783  ele = mesh_->element(el_4_loc[i_loc]);
1784  FOR_ELEMENT_SIDES(ele,i) {
1785  side_rows[i] = side_row_4_id[ mh_dh.side_dof( ele->side(i) ) ];
1786  values[i] = 1.0 * ele->measure() *
1787  data_.cross_section.value(ele->centre(), ele->element_accessor()) *
1788  data_.water_source_density.value(ele->centre(), ele->element_accessor()) /
1789  ele->n_sides();
1790  }
1791  VecSetValues(schur0->get_solution(), ele->n_sides(), side_rows, values, ADD_VALUES);
1792  }
1793  VecAssemblyBegin(schur0->get_solution());
1794  VecAssemblyEnd(schur0->get_solution());
1795 }
1796 
1797 //-----------------------------------------------------------------------------
1798 // vim: set cindent:
FieldSet & data()
Definition: equation.hh:188
void set_input_list(Input::Array input_list)
Definition: field_set.hh:164
Class MappingP1 implements the affine transformation of the unit cell onto the actual cell...
FieldSet * eq_data_
Definition: equation.hh:227
void set_limit_side(LimitSide side)
Definition: field_set.hh:171
void reinit(Mesh *mesh)
double measure() const
Definition: elements.cc:101
Output class for darcy_flow_mh model.
virtual void get_solution_vector(double *&vec, unsigned int &vec_size)
Accessor to input data conforming to declared Array.
Definition: accessors.hh:558
Solver based on the original PETSc solver using MPIAIJ matrix and succesive Schur complement construc...
void make_intersec_elements()
Definition: mesh.cc:631
int tlevel() const
static Input::Type::Record input_type
Class Input::Type::Default specifies default value of keys of a Input::Type::Record.
Definition: type_record.hh:39
unsigned int edge_idx() const
Definition: side_impl.hh:50
MortarMethod mortar_method_
Boundary * cond() const
Definition: side_impl.hh:59
Wrappers for linear systems based on MPIAIJ and MATIS format.
virtual void rhs_set_values(int nrow, int *rows, double *vals)=0
static Input::Type::Record input_type
Main balance input record type.
friend class P1_CouplingAssembler
bool is_end() const
Returns true if the actual time is greater than or equal to the end time.
static Input::Type::Selection mh_mortar_selection
void assembly(LinSys &ls)
void next_time()
Proceed to the next time according to current estimated time step.
bool solution_changed_for_scatter
const arma::vec::fixed< spacedim > normal_vector(unsigned int point_no)
Returns the normal vector to a side at given quadrature point.
Definition: fe_values.cc:196
arma::vec3 centre() const
Definition: sides.cc:135
TimeMark::Type type_output()
Definition: time_marks.hh:203
static Default obligatory()
Definition: type_record.hh:89
???
void set_time(const TimeStep &time)
Definition: field_set.hh:186
friend class P0_CouplingAssembler
#define ELEMENT_FULL_ITER(_mesh_, i)
Definition: mesh.h:372
Definition: mesh.h:109
Iterator< Ret > find(const string &key) const
static Input::Type::Record input_type
Class FEValues calculates finite element data on the actual cells such as shape function values...
boost::shared_ptr< Distribution > rows_ds
Record & derive_from(AbstractRecord &parent)
Definition: type_record.cc:197
bool is_current(const TimeMark::Type &mask) const
virtual double get_solution_precision()=0
BCField< 3, FieldValue< 3 >::Scalar > bc_flux
void pressure_diff(int i_ele, vector< int > &dofs, unsigned int &ele_type, double &delta, arma::vec &dirichlet)
static Input::Type::Record input_type
BCField< 3, FieldValue< 3 >::Enum > bc_type
const TimeStep & step(int index=-1) const
Field< 3, FieldValue< 3 >::Scalar > storativity
Class for declaration of the integral input data.
Definition: type_base.hh:355
double t() const
#define ADD_FIELD(name,...)
Definition: field_set.hh:260
Basic time management functionality for unsteady (and steady) solvers (class Equation).
FieldCommon & units(const UnitSI &units)
Set basic units of the field.
unsigned int n_sides()
Definition: mesh.cc:172
static TimeMarks & marks()
Definition: system.hh:72
Basic time management class.
Class for declaration of inputs sequences.
Definition: type_base.hh:239
void view(const char *name="") const
Symmetric Gauss-Legendre quadrature formulae on simplices.
BCField< 3, FieldValue< 3 >::Scalar > bc_pressure
bool is_preallocated()
Definition: linsys.hh:516
Assembly explicit Schur complement for the given linear system. Provides method for resolution of the...
void mat_set_value(int row, int col, double val)
Definition: linsys.hh:324
virtual void update_solution()
unsigned int n_elements() const
Definition: mesh.h:141
virtual void get_parallel_solution_vector(Vec &vector)
#define ASSERT(...)
Definition: global_defs.h:121
static FileName input()
Definition: type_base.hh:464
virtual void assembly_source_term()
Source term is implemented differently in LMH version.
boost::shared_ptr< Balance > balance_
object for calculation and writing the water balance to file.
static Input::Type::AbstractRecord input_type
Definition: linsys.hh:90
Mesh & mesh()
Definition: equation.hh:171
void update(ElementFullIter ele, FieldType &cond_anisothropy, FieldType_Scalar &cross_section, FieldType_Scalar &conductivity)
Definition: mh_fe_values.cc:34
unsigned int edge_idx()
const Vec & get_solution()
Definition: linsys.hh:256
Definitions of basic Lagrangean finite elements with polynomial shape functions.
Field< 3, FieldValue< 3 >::TensorFixed > anisotropy
SideIter side()
Selection & add_value(const int value, const std::string &key, const std::string &description="")
Accessor to the data with type Type::Record.
Definition: accessors.hh:327
FieldCommon & input_selection(const Input::Type::Selection *element_selection)
const Ret val(const string &key) const
unsigned int n_sides() const
#define xprintf(...)
Definition: system.hh:100
const Element * slave_iter() const
#define START_TIMER(tag)
Starts a timer with specified tag.
unsigned int water_balance_idx_
index of water balance within the Balance object.
Class for declaration of polymorphic Record.
Definition: type_record.hh:487
Mesh * mesh_
Definition: equation.hh:218
Element * element()
Definition: boundaries.cc:47
virtual Value::return_type const & value(const Point &p, const ElementAccessor< spacedim > &elm) const
Definition: field.hh:312
static string flow_old_bcd_file_key()
Definition: old_bcd.hh:55
unsigned int side_dof(const SideIter side) const
SideIter side(const unsigned int loc_index)
Mixed-hybrid model of linear Darcy flow, possibly unsteady.
void mark_input_times(TimeMark::Type mark_type)
Definition: field_set.hh:201
static Input::Type::Record input_type
double solution_precision() const
AbstractRecord & declare_key(const string &key, const KeyType &type, const Default &default_value, const string &description)
Definition: type_record.cc:466
double measure() const
Definition: sides.cc:42
bool is_steady() const
Returns true if the time governor is used for steady problem.
Accessor to the polymorphic input data of a type given by an AbstracRecord object.
Definition: accessors.hh:448
Normal vectors.
double * local_matrix()
Definition: mh_fe_values.cc:63
Record & copy_keys(const Record &other)
Definition: type_record.cc:211
Definition: system.hh:72
Support classes for parallel programing.
Field< 3, FieldValue< 3 >::Scalar > conductivity
static Input::Type::AbstractRecord input_type
static OldBcdInput * instance()
Definition: old_bcd.cc:30
void set_values(int nrow, int *rows, int ncol, int *cols, PetscScalar *mat_vals, PetscScalar *rhs_vals)
Set values in the system matrix and values in the right-hand side vector on corresponding rows...
Definition: linsys.hh:341
Distribution * el_ds
Field< 3, FieldValue< 3 >::Scalar > sigma
double intersection_true_size() const
Definition: intersection.cc:90
virtual void postprocess()
postprocess velocity field (add sources)
auto disable_where(const Field< spacedim, typename FieldValue< spacedim >::Enum > &control_field, const vector< FieldEnum > &value_list) -> Field &
Definition: field.impl.hh:162
MH_DofHandler mh_dh
ElementFullIter element() const
Definition: side_impl.hh:41
double dt() const
void reinit(ElementFullIter &cell, unsigned int sid)
Update cell-dependent data (gradients, Jacobians etc.)
Definition: fe_values.cc:312
TimeGovernor const & time()
Definition: equation.hh:143
void assembly_steady_mh_matrix()
void add_factory(std::shared_ptr< FactoryBase > factory)
Definition: field.impl.hh:469
friend class DarcyFlowMHOutput
Distributed sparse graphs, partitioning.
FullIter full_iter(Iter it)
Definition: sys_vector.hh:438
Abstract linear system class.
Definitions of particular quadrature rules on simplices.
BCField< 3, FieldValue< 3 >::Scalar > bc_robin_sigma
#define END_TIMER(tag)
Ends a timer with specified tag.
static Input::Type::Record input_type
TimeMark::Type mark_type()
Definition: equation.hh:179
arma::vec::fixed< spacedim > centre() const
Definition: accessors.hh:81
mixed-hybrid model of linear Darcy flow, possibly unsteady.
unsigned int el_idx() const
Definition: side_impl.hh:70
void set_mesh(const Mesh &mesh)
Definition: field_set.hh:157
Record type proxy class.
Definition: type_record.hh:169
virtual void output_data() override
Write computed fields.
Field< 3, FieldValue< 3 >::Scalar > cross_section
virtual void mat_set_values(int nrow, int *rows, int ncol, int *cols, double *vals)=0
unsigned int n_edges() const
Definition: mesh.h:149
static Input::Type::Selection bc_type_selection
Class for representation SI units of Fields.
Definition: unit_si.hh:31
Field< 3, FieldValue< 3 >::Scalar > init_pressure
#define MPI_Barrier(comm)
Definition: mpi.h:531
DarcyFlowMHOutput * output_object
DarcyFlowMH_Steady(Mesh &mesh, const Input::Record in_rec, bool make_tg=true)
CREATE AND FILL GLOBAL MH MATRIX OF THE WATER MODEL.
static UnitSI & dimensionless()
Returns dimensionless unit.
Definition: unit_si.cc:44
EqData()
Collect all fields.
Template for classes storing finite set of named values.
virtual int solve()=0
Field< 3, FieldValue< 3 >::Scalar > water_source_density
void prepare_parallel(const Input::AbstractRecord in_rec)
ElementAccessor< 3 > element_accessor()
Definition: boundaries.cc:55
void rhs_set_value(int row, double val)
Definition: linsys.hh:336
Solver based on Multilevel BDDC - using corresponding class of OpenFTL package.
TimeGovernor * time_
Definition: equation.hh:219
ElementVector element
Vector of elements of the mesh.
Definition: mesh.h:205
void output()
Calculate values for output.
Calculates finite element data on a side.
Definition: fe_values.hh:415
unsigned int lsize(int proc) const
get local size
Record & declare_key(const string &key, const KeyType &type, const Default &default_value, const string &description)
Definition: type_record.cc:430