Flow123d  jenkins-Flow123d-windows32-release-multijob-28
darcy_flow_mh.cc
Go to the documentation of this file.
1 /*!
2  *
3  * Copyright (C) 2007 Technical University of Liberec. All rights reserved.
4  *
5  * Please make a following refer to Flow123d on your project site if you use the program for any purpose,
6  * especially for academic research:
7  * Flow123d, Research Centre: Advanced Remedial Technologies, Technical University of Liberec, Czech Republic
8  *
9  * This program is free software; you can redistribute it and/or modify it under the terms
10  * of the GNU General Public License version 3 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
13  * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14  * See the GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License along with this program; if not,
17  * write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 021110-1307, USA.
18  *
19  *
20  * $Id$
21  * $Revision$
22  * $LastChangedBy$
23  * $LastChangedDate$
24  *
25  *
26  * @file
27  * @ingroup flow
28  * @brief Setup and solve linear system of mixed-hybrid discretization of the linear
29  * porous media flow with possible preferential flow in fractures and chanels.
30  *
31  */
32 
33 #include "petscmat.h"
34 #include "petscviewer.h"
35 #include "petscerror.h"
36 #include <armadillo>
37 #include <boost/foreach.hpp>
38 
39 
40 #include "system/system.hh"
41 
42 #include "mesh/mesh.h"
43 #include "mesh/intersection.hh"
44 #include "mesh/partitioning.hh"
45 #include "la/distribution.hh"
46 #include "la/linsys.hh"
47 #include "la/linsys_PETSC.hh"
48 #include "la/linsys_BDDC.hh"
49 //#include "la/solve.h"
50 #include "la/schur.hh"
51 #include "la/sparse_graph.hh"
53 
54 #include "system/file_path.hh"
55 #include "flow/mh_fe_values.hh"
56 #include "flow/darcy_flow_mh.hh"
57 
59 
60 #include "fem/mapping_p1.hh"
61 #include "fem/fe_p.hh"
62 #include "fem/fe_values.hh"
64 
65 #include <limits>
66 #include <set>
67 #include <vector>
68 #include <iostream>
69 #include <iterator>
70 #include <algorithm>
71 
73 
75 #include "fields/field.hh"
76 #include "fields/field_values.hh"
77 #include "system/sys_profiler.hh"
78 
79 
80 
81 
82 namespace it = Input::Type;
83 
85  = it::Selection("MH_MortarMethod")
86  .add_value(NoMortar, "None", "Mortar space: P0 on elements of lower dimension.")
87  .add_value(MortarP0, "P0", "Mortar space: P0 on elements of lower dimension.")
88  .add_value(MortarP1, "P1", "Mortar space: P1 on intersections, using non-conforming pressures.");
89 
90 
92  it::Selection("EqData_bc_Type")
93  .add_value(none, "none", "Homogeneous Neoumann BC.")
94  .add_value(dirichlet, "dirichlet")
95  .add_value(neumann, "neumann")
96  .add_value(robin, "robin")
97  .add_value(total_flux, "total_flux");
98 
99 //new input type with FIELDS
101  it::AbstractRecord("DarcyFlowMH", "Mixed-Hybrid solver for saturated Darcy flow.")
102  .declare_key("n_schurs", it::Integer(0,2), it::Default("2"),
103  "Number of Schur complements to perform when solving MH sytem.")
105  "Linear solver for MH problem.")
107  "Parameters of output form MH module.")
108  .declare_key("mortar_method", mh_mortar_selection, it::Default("None"),
109  "Method for coupling Darcy flow between dimensions." );
110 
111 
113  = it::Record("Steady_MH", "Mixed-Hybrid solver for STEADY saturated Darcy flow.")
114  .derive_from(DarcyFlowMH::input_type)
115  .declare_key("input_fields", it::Array(
116  DarcyFlowMH_Steady::EqData().make_field_descriptor_type("DarcyFlowMH")
117  .declare_key("bc_piezo_head", FieldAlgorithmBase< 3, FieldValue<3>::Scalar >::get_input_type(), "Boundary condition for pressure as piezometric head." )
118  .declare_key("init_piezo_head", FieldAlgorithmBase< 3, FieldValue<3>::Scalar >::get_input_type(), "Initial condition for pressure as piezometric head." )
119  .declare_key(OldBcdInput::flow_old_bcd_file_key(), it::FileName::input(), "File with mesh dependent boundary conditions (obsolete).")
120  ), it::Default::obligatory(), "" );
121 
123  = it::Record("Unsteady_MH", "Mixed-Hybrid solver for unsteady saturated Darcy flow.")
124  .derive_from(DarcyFlowMH::input_type)
126  "Time governor setting for the unsteady Darcy flow model.")
127  .copy_keys(DarcyFlowMH_Steady::input_type);
128 
129 
131  = it::Record("Unsteady_LMH", "Lumped Mixed-Hybrid solver for unsteady saturated Darcy flow.")
132  .derive_from(DarcyFlowMH::input_type)
134  "Time governor setting for the unsteady Darcy flow model.")
135  .copy_keys(DarcyFlowMH_Steady::input_type);
136 
137 
138 
139 
140 // gravity vector + constant shift of values
141 arma::vec4 DarcyFlowMH::EqData::gravity_=arma::vec4("0 0 -1 0");
142 
143 
145 {
146  gravity_ = arma::vec4("0 0 -1 0");
147 
148  ADD_FIELD(anisotropy, "Anisotropy of the conductivity tensor.", "1.0" );
149  ADD_FIELD(cross_section, "Complement dimension parameter (cross section for 1D, thickness for 2D).", "1.0" );
150  ADD_FIELD(conductivity, "Isotropic conductivity scalar.", "1.0" );
151  ADD_FIELD(sigma, "Transition coefficient between dimensions.", "1.0");
152  ADD_FIELD(water_source_density, "Water source density.", "0.0");
153 
154  ADD_FIELD(bc_type,"Boundary condition type, possible values:", "\"none\"" );
157 
158  ADD_FIELD(bc_pressure,"Dirichlet BC condition value for pressure.");
161 
162 
163  ADD_FIELD(bc_flux,"Flux in Neumman or Robin boundary condition.");
166 
167  ADD_FIELD(bc_robin_sigma,"Conductivity coefficient in Robin boundary condition.");
170 
171  //these are for unsteady
172  ADD_FIELD(init_pressure, "Initial condition as pressure", "0.0" );
173  ADD_FIELD(storativity,"Storativity.", "1.0" );
174 
175  time_term_fields = this->subset({"storativity"});
176  main_matrix_fields = this->subset({"anisotropy", "conductivity", "cross_section", "sigma", "bc_type", "bc_robin_sigma"});
177  rhs_fields = this->subset({"water_source_density", "bc_pressure", "bc_flux"});
178 
179 }
180 
181 
182 
183 
184 //=============================================================================
185 // CREATE AND FILL GLOBAL MH MATRIX OF THE WATER MODEL
186 // - do it in parallel:
187 // - initial distribution of elements, edges
188 //
189 /*! @brief CREATE AND FILL GLOBAL MH MATRIX OF THE WATER MODEL
190  *
191  * Parameters {Solver,NSchurs} number of performed Schur
192  * complements (0,1,2) for water flow MH-system
193  *
194  */
195 //=============================================================================
196 DarcyFlowMH_Steady::DarcyFlowMH_Steady(Mesh &mesh_in, const Input::Record in_rec, bool make_tg )
197 : DarcyFlowMH(mesh_in, in_rec)
198 
199 {
200  using namespace Input;
201  START_TIMER("Darcy constructor");
202 
203  this->eq_data_ = &data_;
204 
205  //connecting data fields with mesh
206  START_TIMER("data init");
207  data_.set_mesh(mesh_in);
208  data_.set_input_list( in_rec.val<Input::Array>("input_fields") );
209 
210 
211 
212 
213  END_TIMER("data init");
214 
215 
216  size = mesh_->n_elements() + mesh_->n_sides() + mesh_->n_edges();
217  n_schur_compls = in_rec.val<int>("n_schurs");
218 
219  solution = NULL;
220  schur0 = NULL;
221 
222 
223  mortar_method_= in_rec.val<MortarMethod>("mortar_method");
224  if (mortar_method_ != NoMortar) {
226  }
227 
228  mh_dh.reinit(mesh_);
229 
230  prepare_parallel(in_rec.val<AbstractRecord>("solver"));
231 
232  //side_ds->view( std::cout );
233  //el_ds->view( std::cout );
234  //edge_ds->view( std::cout );
235  //rows_ds->view( std::cout );
236 
237 
238 
239 
240  if (make_tg) {
241  // steady time governor
242  time_ = new TimeGovernor();
245  data_.set_time(*time_);
246 
247  output_object = new DarcyFlowMHOutput(this, in_rec.val<Input::Record>("output"));
249 
250  //make_serial_scatter();
251  }
252 
253 
254 }
255 
256 
257 
258 
259 //=============================================================================
260 // COMPOSE and SOLVE WATER MH System possibly through Schur complements
261 //=============================================================================
263  START_TIMER("Solving MH system");
264 
265 
266  if (time_->is_end()) return;
267 
268  if (! time_->is_steady()) time_->next_time();
269 
270 
271 
273  int convergedReason = schur0->solve();
274 
275  xprintf(MsgLog, "Linear solver ended with reason: %d \n", convergedReason );
276  ASSERT( convergedReason >= 0, "Linear solver failed to converge. Convergence reason %d \n", convergedReason );
277 
278  this -> postprocess();
279 
281 
282  output_data();
283 
284  if (time_->is_steady()) time_->next_time();
285 }
286 
288 {
289  START_TIMER("postprocess");
290  //ElementFullIter ele = ELEMENT_FULL_ITER(mesh_, NULL);
291 
292  // modify side fluxes in parallel
293  // for every local edge take time term on digonal and add it to the corresponding flux
294  /*
295  for (unsigned int i_loc = 0; i_loc < el_ds->lsize(); i_loc++) {
296  ele = mesh_->element(el_4_loc[i_loc]);
297  FOR_ELEMENT_SIDES(ele,i) {
298  side_rows[i] = side_row_4_id[ mh_dh.side_dof( ele->side(i) ) ];
299  values[i] = -1.0 * ele->measure() *
300  data.cross_section.value(ele->centre(), ele->element_accessor()) *
301  data.water_source_density.value(ele->centre(), ele->element_accessor()) /
302  ele->n_sides();
303  }
304  VecSetValues(schur0->get_solution(), ele->n_sides(), side_rows, values, ADD_VALUES);
305  }
306  VecAssemblyBegin(schur0->get_solution());
307  VecAssemblyEnd(schur0->get_solution());
308  */
309 }
310 
311 
313  time_->view("DARCY"); //time governor information output
314  this->output_object->output();
315 }
316 
318 {
319  return schur0->get_solution_precision();
320 }
321 
322 
323 void DarcyFlowMH_Steady::get_solution_vector(double * &vec, unsigned int &vec_size)
324 {
325  // TODO: make class for vectors (wrapper for PETSC or other) derived from LazyDependency
326  // and use its mechanism to manage dependency between vectors
328  // // scatter solution to all procs
329  // VecScatterBegin(par_to_all, schur0->get_solution(), sol_vec,
330  // INSERT_VALUES, SCATTER_FORWARD);
331  // VecScatterEnd(par_to_all, schur0->get_solution(), sol_vec,
332  // INSERT_VALUES, SCATTER_FORWARD);
333  // solution_changed_for_scatter=false;
334 
335  std::vector<double> sol_disordered(this->size);
336  schur0 -> get_whole_solution( sol_disordered );
337 
338  // reorder solution to application ordering
339  if ( solution_.empty() ) solution_.resize( this->size, 0. );
340  for ( int i = 0; i < this->size; i++ ) {
341  solution_[i] = sol_disordered[solver_indices_[i]];
342  }
343  }
344 
345  vec=&(solution_[0]);
346  vec_size = solution_.size();
347  ASSERT(vec != NULL, "Requested solution is not allocated!\n");
348 }
349 
350 void DarcyFlowMH_Steady::get_partitioning_vector(int * &elem_part, unsigned &lelem_part)
351 {
352 
353 // elem_part=&(element_part[0]);
354 // lelem_part = element_part.size();
355 // ASSERT(elem_part != NULL, "Requested vector is not allocated!\n");
356 }
357 
359 {
360  vec=schur0->get_solution();
361  ASSERT(vec != NULL, "Requested solution is not allocated!\n");
362 }
363 
364 
365 
366 // ===========================================================================================
367 //
368 // MATRIX ASSEMBLY - we use abstract assembly routine, where LS Mat/Vec SetValues
369 // are in fact pointers to allocating or filling functions - this is governed by Linsystem roitunes
370 //
371 // =======================================================================================
372 
373 
374 // ******************************************
375 // ABSTRACT ASSEMBLY OF MH matrix
376 // TODO: matice by se mela sestavovat zvlast pro kazdou dimenzi (objem, pukliny, pruseciky puklin)
377 // konekce by se mely sestavovat cyklem pres konekce, konekce by mely byt paralelizovany podle
378 // distribuce elementu nizssi dimenze
379 // k tomuto je treba nejdriv spojit s JK verzi, aby se vedelo co se deje v transportu a
380 // predelat mesh a neigbouring
381 // *****************************************
383  LinSys *ls = schur0;
385  MHFEValues fe_values;
386 
387  // We use FESideValues for calculating normal vectors.
388  // For initialization of FESideValues some auxiliary objects are needed.
389  MappingP1<1,3> map1;
390  MappingP1<2,3> map2;
391  MappingP1<3,3> map3;
392  QGauss<0> q0(1);
393  QGauss<1> q1(1);
394  QGauss<2> q2(1);
395  FE_P_disc<1,1,3> fe1;
396  FE_P_disc<0,2,3> fe2;
397  FE_P_disc<0,3,3> fe3;
398  FESideValues<1,3> fe_side_values1(map1, q0, fe1, update_normal_vectors);
399  FESideValues<2,3> fe_side_values2(map2, q1, fe2, update_normal_vectors);
400  FESideValues<3,3> fe_side_values3(map3, q2, fe3, update_normal_vectors);
401 
402  class Boundary *bcd;
403  class Neighbour *ngh;
404 
405  bool fill_matrix = schur0->is_preallocated();
406  //DBGMSG("fill_matrix: %d\n", fill_matrix);
407  int el_row, side_row, edge_row;
408  int tmp_rows[100];
409  //int nsides;
410  int side_rows[4], edge_rows[4]; // rows for sides and edges of one element
411  double local_vb[4]; // 2x2 matrix
412 
413  // to make space for second schur complement, max. 10 neighbour edges of one el.
414  double zeros[1000];
415  for(int i=0; i<1000; i++) zeros[i]=0.0;
416 
417  double minus_ones[4] = { -1.0, -1.0, -1.0, -1.0 };
418  double loc_side_rhs[4];
419 
420 
421  for (unsigned int i_loc = 0; i_loc < el_ds->lsize(); i_loc++) {
422 
423  ele = mesh_->element(el_4_loc[i_loc]);
424  el_row = row_4_el[el_4_loc[i_loc]];
425  unsigned int nsides = ele->n_sides();
426  if (fill_matrix) fe_values.update(ele, data_.anisotropy, data_.cross_section, data_.conductivity);
427  double cross_section = data_.cross_section.value(ele->centre(), ele->element_accessor());
428 
429  for (unsigned int i = 0; i < nsides; i++) {
430  side_row = side_rows[i] = side_row_4_id[ mh_dh.side_dof( ele->side(i) ) ];
431  edge_row = edge_rows[i] = row_4_edge[ele->side(i)->edge_idx()];
432  bcd=ele->side(i)->cond();
433 
434  // gravity term on RHS
435  loc_side_rhs[i] = (ele->centre()[ 2 ] - ele->side(i)->centre()[ 2 ]);
436 
437  // set block C and C': side-edge, edge-side
438  double c_val = 1.0;
439 
440  if (bcd) {
441  ElementAccessor<3> b_ele = bcd->element_accessor();
442  EqData::BC_Type type = (EqData::BC_Type)data_.bc_type.value(b_ele.centre(), b_ele);
443  //DBGMSG("BCD id: %d sidx: %d type: %d\n", ele->id(), i, type);
444  if ( type == EqData::none) {
445  // homogeneous neumann
446  } else if ( type == EqData::dirichlet ) {
447  c_val = 0.0;
448  double bc_pressure = data_.bc_pressure.value(b_ele.centre(), b_ele);
449  loc_side_rhs[i] -= bc_pressure;
450  ls->rhs_set_value(edge_row, -bc_pressure);
451  ls->mat_set_value(edge_row, edge_row, -1.0);
452 
453  } else if ( type == EqData::neumann) {
454  double bc_flux = data_.bc_flux.value(b_ele.centre(), b_ele);
455  ls->rhs_set_value(edge_row, bc_flux * bcd->element()->measure() * cross_section);
456  //DBGMSG("neumann edge_row, ele_index,el_idx: %d \t %d \t %d\n", edge_row, ele->index(), ele->side(i)->el_idx());
457 
458  } else if ( type == EqData::robin) {
459  double bc_pressure = data_.bc_pressure.value(b_ele.centre(), b_ele);
460  double bc_sigma = data_.bc_robin_sigma.value(b_ele.centre(), b_ele);
461  ls->rhs_set_value(edge_row, -bcd->element()->measure() * bc_sigma * bc_pressure * cross_section );
462  ls->mat_set_value(edge_row, edge_row, -bcd->element()->measure() * bc_sigma * cross_section );
463 
464  } else {
465  xprintf(UsrErr, "BC type not supported.\n");
466  }
467  }
468  ls->mat_set_value(side_row, edge_row, c_val);
469  ls->mat_set_value(edge_row, side_row, c_val);
470 
471  // assemble matrix for weights in BDDCML
472  // approximation to diagonal of
473  // S = -C - B*inv(A)*B'
474  // as
475  // diag(S) ~ - diag(C) - 1./diag(A)
476  // the weights form a partition of unity to average a discontinuous solution from neighbouring subdomains
477  // to a continuous one
478  // it is important to scale the effect - if conductivity is low for one subdomain and high for the other,
479  // trust more the one with low conductivity - it will be closer to the truth than an arithmetic average
480  if ( typeid(*ls) == typeid(LinSys_BDDC) ) {
481  double val_side = (fe_values.local_matrix())[i*nsides+i];
482  double val_edge = -1./ (fe_values.local_matrix())[i*nsides+i];
483 
484  static_cast<LinSys_BDDC*>(ls)->diagonal_weights_set_value( side_row, val_side );
485  static_cast<LinSys_BDDC*>(ls)->diagonal_weights_set_value( edge_row, val_edge );
486  }
487  }
488 
489  ls->rhs_set_values(nsides, side_rows, loc_side_rhs);
490 
491 
492  // set block A: side-side on one element - block diagonal matrix
493 
494  //std::cout << "subMat in flow" << std::endl;
495  //for ( unsigned i = 0; i < nsides; i++) {
496  // for ( unsigned j = 0; j < nsides; j++) {
497  // std::cout << fe_values.local_matrix()[i*nsides+j] << " ";
498  // }
499  // std::cout << std::endl;
500  //}
501 
502  ls->mat_set_values(nsides, side_rows, nsides, side_rows, fe_values.local_matrix() );
503  // set block B, B': element-side, side-element
504  ls->mat_set_values(1, &el_row, nsides, side_rows, minus_ones);
505  ls->mat_set_values(nsides, side_rows, 1, &el_row, minus_ones);
506 
507 
508  // set sources
509  ls->rhs_set_value(el_row, -1.0 * ele->measure() *
510  data_.cross_section.value(ele->centre(), ele->element_accessor()) *
511  data_.water_source_density.value(ele->centre(), ele->element_accessor()) );
512 
513 
514  // D block: non-compatible conections and diagonal: element-element
515 
516  ls->mat_set_value(el_row, el_row, 0.0); // maybe this should be in virtual block for schur preallocation
517 
518  if ( typeid(*ls) == typeid(LinSys_BDDC) ) {
519  double val_ele = 1.;
520  static_cast<LinSys_BDDC*>(ls)->diagonal_weights_set_value( el_row, val_ele );
521  }
522 
523  // D, E',E block: compatible connections: element-edge
524 
525  for (unsigned int i = 0; i < ele->n_neighs_vb; i++) {
526  // every compatible connection adds a 2x2 matrix involving
527  // current element pressure and a connected edge pressure
528  ngh= ele->neigh_vb[i];
529  tmp_rows[0]=el_row;
530  tmp_rows[1]=row_4_edge[ ngh->edge_idx() ];
531 
532  // compute normal vector to side
533  arma::vec3 nv;
534  ElementFullIter ele_higher = mesh_->element.full_iter(ngh->side()->element());
535  switch (ele_higher->dim()) {
536  case 1:
537  fe_side_values1.reinit(ele_higher, ngh->side()->el_idx());
538  nv = fe_side_values1.normal_vector(0);
539  break;
540  case 2:
541  fe_side_values2.reinit(ele_higher, ngh->side()->el_idx());
542  nv = fe_side_values2.normal_vector(0);
543  break;
544  case 3:
545  fe_side_values3.reinit(ele_higher, ngh->side()->el_idx());
546  nv = fe_side_values3.normal_vector(0);
547  break;
548  }
549 
550  double value = data_.sigma.value( ele->centre(), ele->element_accessor()) *
551  2*data_.conductivity.value( ele->centre(), ele->element_accessor()) *
552  arma::dot(data_.anisotropy.value( ele->centre(), ele->element_accessor())*nv, nv) *
553  data_.cross_section.value( ngh->side()->centre(), ele_higher->element_accessor() ) * // cross-section of higher dim. (2d)
554  data_.cross_section.value( ngh->side()->centre(), ele_higher->element_accessor() ) /
555  data_.cross_section.value( ele->centre(), ele->element_accessor() ) * // crossection of lower dim.
556  ngh->side()->measure();
557 
558 
559  local_vb[0] = -value; local_vb[1] = value;
560  local_vb[2] = value; local_vb[3] = -value;
561 
562  ls->mat_set_values(2, tmp_rows, 2, tmp_rows, local_vb);
563 
564  // update matrix for weights in BDDCML
565  if ( typeid(*ls) == typeid(LinSys_BDDC) ) {
566  int ind = tmp_rows[1];
567  // there is -value on diagonal in block C!
568  double new_val = - value;
569  static_cast<LinSys_BDDC*>(ls)->diagonal_weights_set_value( ind, new_val );
570  }
571 
572  if (n_schur_compls == 2) {
573  // for 2. Schur: N dim edge is conected with N dim element =>
574  // there are nz between N dim edge and N-1 dim edges of the element
575 
576  ls->mat_set_values(nsides, edge_rows, 1, tmp_rows+1, zeros);
577  ls->mat_set_values(1, tmp_rows+1, nsides, edge_rows, zeros);
578 
579  // save all global edge indices to higher positions
580  tmp_rows[2+i] = tmp_rows[1];
581  }
582  }
583 
584 
585  // add virtual values for schur complement allocation
586  switch (n_schur_compls) {
587  case 2:
588  // Connections between edges of N+1 dim. elements neighboring with actual N dim element 'ele'
589  ASSERT(ele->n_neighs_vb*ele->n_neighs_vb<1000, "Too many values in E block.");
590  ls->mat_set_values(ele->n_neighs_vb, tmp_rows+2,
591  ele->n_neighs_vb, tmp_rows+2, zeros);
592 
593  // ls->mat_set_values(nsides, edge_rows, ele->e_row_count, tmp_rows, zeros);
594  // ls->mat_set_values(ele->e_row_count, tmp_rows, nsides, edge_rows, zeros);
595  case 1: // included also for case 2
596  // -(C')*(A-)*B block and its transpose conect edge with its elements
597  ls->mat_set_values(1, &el_row, nsides, edge_rows, zeros);
598  ls->mat_set_values(nsides, edge_rows, 1, &el_row, zeros);
599  // -(C')*(A-)*C block conect all edges of every element
600  ls->mat_set_values(nsides, edge_rows, nsides, edge_rows, zeros);
601  }
602  }
603 
604  //if (! mtx->ins_mod == ALLOCATE ) {
605  // MatAssemblyBegin(mtx->A,MAT_FINAL_ASSEMBLY);
606  // MatAssemblyEnd(mtx->A,MAT_FINAL_ASSEMBLY);
607  // }
608  // set block F - diagonal: edge-edge from Newton BC
609  // also Dirichlet BC
610  /*
611  for (i_loc = 0; i_loc < edge_ds->lsize(); i_loc++) {
612  edge_row = row_4_edge[edge_4_loc[i_loc]];
613  EdgeFullIter edg = mesh_->edge(edge_4_loc[i_loc]);
614 
615  //xprintf(Msg,"F: %d %f\n",old_4_new[edge_row],edg->f_val);
616  //ls->mat_set_value(edge_row, edge_row, edg->f_val);
617  //ls->rhs_set_value(edge_row, edg->f_rhs);
618  }*/
619 
620 
621  if (mortar_method_ == MortarP0) {
622  P0_CouplingAssembler(*this).assembly(*ls);
623  } else if (mortar_method_ == MortarP1) {
624  P1_CouplingAssembler(*this).assembly(*ls);
625  }
626 }
627 
629  vector<int> &dofs, unsigned int &ele_type, double &delta, arma::vec &dirichlet) {
630 
631  const Element *ele;
632 
633  if (i_ele == (int)(ml_it_->size()) ) { // master element .. 1D
634  ele_type = 0;
635  delta = -delta_0;
636  ele=master_;
637  } else {
638  ele_type = 1;
639  const Intersection &isect=intersections_[ (*ml_it_)[i_ele] ];
640  delta = isect.intersection_true_size();
641  ele = isect.slave_iter();
642  }
643 
644  dofs.resize(ele->n_sides());
645 
646  for(unsigned int i_side=0; i_side < ele->n_sides(); i_side++ ) {
647  dofs[i_side]=darcy_.row_4_edge[ele->side(i_side)->edge_idx()];
648  Boundary * bcd = ele->side(i_side)->cond();
649  if (bcd) {
650  dirichlet.resize(ele->n_sides());
651  ElementAccessor<3> b_ele = bcd->element_accessor();
652  DarcyFlowMH::EqData::BC_Type type = (DarcyFlowMH::EqData::BC_Type)darcy_.data_.bc_type.value(b_ele.centre(), b_ele);
653  //DBGMSG("bcd id: %d sidx: %d type: %d\n", ele->id(), i_side, type);
654  if (type == DarcyFlowMH::EqData::dirichlet) {
655  //DBGMSG("Dirichlet: %d\n", ele->index());
656  dofs[i_side] = -dofs[i_side];
657  double bc_pressure = darcy_.data_.bc_pressure.value(b_ele.centre(), b_ele);
658  dirichlet[i_side] = bc_pressure;
659  }
660  }
661  }
662 
663 }
664 
665 /**
666  * Works well but there is large error next to the boundary.
667  */
669  double delta_i, delta_j;
670  arma::mat product;
671  arma::vec dirichlet_i, dirichlet_j;
672  unsigned int ele_type_i, ele_type_j;
673 
674  unsigned int i,j;
675  vector<int> dofs_i,dofs_j;
676 
677  for(ml_it_ = master_list_.begin(); ml_it_ != master_list_.end(); ++ml_it_) {
678 
679  if (ml_it_->size() == 0) continue; // skip empty masters
680 
681 
682  // on the intersection element we consider
683  // * intersection dofs for master and slave
684  // those are dofs of the space into which we interpolate
685  // base functions from individual master and slave elements
686  // For the master dofs both are usualy eqivalent.
687  // * original dofs - for master same as intersection dofs, for slave
688  // all dofs of slave elements
689 
690  // form list of intersection dofs, in this case pressures in barycenters
691  // but we do not use those form MH system in order to allow second schur somplement. We rather map these
692  // dofs to pressure traces, i.e. we use averages of traces as barycentric values.
693 
694 
695  master_ = intersections_[ml_it_->front()].master_iter();
696  delta_0 = master_->measure();
697 
698  double master_sigma=darcy_.data_.sigma.value( master_->centre(), master_->element_accessor());
699 
700  // rows
701  for(i = 0; i <= ml_it_->size(); ++i) {
702  //cout << "Intersection:" << master_->index() << ", "
703  // << intersections_[ (*ml_it_)[i] ].slave_iter()->index();
704  pressure_diff(i, dofs_i, ele_type_i, delta_i, dirichlet_i);
705  //columns
706  for (j = 0; j <= ml_it_->size(); ++j) {
707  pressure_diff(j, dofs_j, ele_type_j, delta_j, dirichlet_j);
708 
709  double scale = -master_sigma * delta_i * delta_j / delta_0;
710  product = scale * tensor_average[ele_type_i][ele_type_j];
711 
712 
713  //cout << product << endl;
714 
715 
716  arma::vec rhs(dofs_i.size());
717  rhs.zeros();
718  ls.set_values( dofs_i, dofs_j, product, rhs, dirichlet_i, dirichlet_j);
719  }
720  }
721  } // loop over master elements
722 }
723 
724 
725 
726  void P1_CouplingAssembler::add_sides(const Element * ele, unsigned int shift, vector<int> &dofs, vector<double> &dirichlet)
727  {
728 
729  for(unsigned int i_side=0; i_side < ele->n_sides(); i_side++ ) {
730  dofs[shift+i_side] = darcy_.row_4_edge[ele->side(i_side)->edge_idx()];
731  Boundary * bcd = ele->side(i_side)->cond();
732 
733  if (bcd) {
734  ElementAccessor<3> b_ele = bcd->element_accessor();
735  DarcyFlowMH::EqData::BC_Type type = (DarcyFlowMH::EqData::BC_Type)darcy_.data_.bc_type.value(b_ele.centre(), b_ele);
736  //DBGMSG("bcd id: %d sidx: %d type: %d\n", ele->id(), i_side, type);
737  if (type == DarcyFlowMH::EqData::dirichlet) {
738  //DBGMSG("Dirichlet: %d\n", ele->index());
739  dofs[shift + i_side] = -dofs[shift + i_side];
740  double bc_pressure = darcy_.data_.bc_pressure.value(b_ele.centre(), b_ele);
741  dirichlet[shift + i_side] = bc_pressure;
742  }
743  }
744  }
745  }
746 
747 
748 /**
749  * P1 coonection of different dimensions
750  * - demonstrated convergence, but still major open questions:
751  * ? in all test cases the error on the fracture is less on the left and greater on the right
752  * with incresing trend
753  * tried:
754  * - changed order of 1d elements (no change)
755  * - higher precision of ngh output and linear solver (no change)
756  * ? seems that there should be some factor 6.0 in the communication term
757  * ? in the case of infinite k2 -> simplest 1d-constant communication, the biggest difference on borders,
758  * numerical solution greater then analytical
759  *
760  * TODO:
761  * * full implementation of Dirichlet BC ( also for 2d sides)
762  */
763 
765 
766  for (const Intersection &intersec : intersections_) {
767  const Element * master = intersec.master_iter();
768  const Element * slave = intersec.slave_iter();
769 
770  add_sides(master, 0, dofs, dirichlet);
771  add_sides(slave, 2, dofs, dirichlet);
772  double master_sigma=darcy_.data_.sigma.value( master->centre(), master->element_accessor());
773 
774 /*
775  * Local coordinates on 1D
776  * t0
777  * node 0: 0.0
778  * node 1: 1.0
779  *
780  * base fce points
781  * t0 = 0.0 on side 0 node 0
782  * t0 = 1.0 on side 1 node 1
783  *
784  * Local coordinates on 2D
785  * t0 t1
786  * node 0: 0.0 0.0
787  * node 1: 1.0 0.0
788  * node 2: 0.0 1.0
789  *
790  * base fce points
791  * t0=0.5, t1=0.0 on side 0 nodes (0,1)
792  * t0=0.5, t1=0.5 on side 1 nodes (1,2)
793  * t0=0.0, t1=0.5 on side 2 nodes (2,0)
794  */
795 
796 
797 
798  arma::vec point_Y(1);
799  point_Y.fill(1.0);
800  arma::vec point_2D_Y(intersec.map_to_slave(point_Y)); // local coordinates of Y on slave
801  arma::vec point_1D_Y(intersec.map_to_master(point_Y)); // local coordinates of Y on master
802 
803  arma::vec point_X(1);
804  point_X.fill(0.0);
805  arma::vec point_2D_X(intersec.map_to_slave(point_X)); // local coordinates of X on slave
806  arma::vec point_1D_X(intersec.map_to_master(point_X)); // local coordinates of X on master
807 
808  arma::mat base_2D(3, 3);
809  // base fce = a0 + a1*t0 + a2*t1
810  // a0 a1 a2
811  base_2D << 1.0 << 0.0 << -2.0 << arma::endr //point on side 0
812  << -1.0 << 2.0 << 2.0 << arma::endr // point on side 1
813  << 1.0 << -2.0 << 0.0 << arma::endr;// point on side 2
814 
815  arma::mat base_1D(2, 2);
816  // base fce = a0 + a1 * t0
817  // a0 a1
818  base_1D << 1.0 << -1.0 << arma::endr // point on side 0,
819  << 0.0 << 1.0 << arma::endr; // point on side 1,
820 
821 
822  //base_1D.print("base_1D: ");
823  //base_2D.print("base_2D: ");
824  //point_2D_X.print("point_2D_X: ");
825  //point_2D_Y.print("point_2D_Y: ");
826 
827  arma::vec difference_in_Y(5);
828  arma::vec difference_in_X(5);
829 
830  // slave sides 0,1,2
831  difference_in_Y.subvec(0, 2) = -base_2D * point_2D_Y;
832  difference_in_X.subvec(0, 2) = -base_2D * point_2D_X;
833  // master sides 3,4
834  difference_in_Y.subvec(3, 4) = base_1D * point_1D_Y;
835  difference_in_X.subvec(3, 4) = base_1D * point_1D_X;
836 
837  //difference_in_X.print("difference_in_X: ");
838  //difference_in_Y.print("difference_in_Y: ");
839 
840  //prvky matice A[i,j]
841  arma::mat A(5, 5);
842  for (int i = 0; i < 5; ++i) {
843  for (int j = 0; j < 5; ++j) {
844  A(i, j) = -master_sigma * intersec.intersection_true_size() *
845  ( difference_in_Y[i] * difference_in_Y[j]
846  + difference_in_Y[i] * difference_in_X[j]/2
847  + difference_in_X[i] * difference_in_Y[j]/2
848  + difference_in_X[i] * difference_in_X[j]
849  ) * (1.0 / 3.0);
850 
851  }
852  }
853  //for(int i=0;i<5;i++) DBGMSG("idx %d: %d\n",i, dofs[i]);
854  //A.print("A:");
855 
856 
857  ls.set_values( dofs, dofs, A, rhs, dirichlet, dirichlet);
858 
859  }
860 }
861 
862 
863 
864 /*******************************************************************************
865  * COMPOSE WATER MH MATRIX WITHOUT SCHUR COMPLEMENT
866  ******************************************************************************/
867 
869 
870  START_TIMER("preallocation");
871 
872  //xprintf(Msg,"****************** problem statistics \n");
873  //xprintf(Msg,"edges: %d \n",mesh_->n_edges());
874  //xprintf(Msg,"sides: %d \n",mesh_->n_sides());
875  //xprintf(Msg,"elements: %d \n",mesh_->n_elements());
876  //xprintf(Msg,"************************************* \n");
877  //xprintf(Msg,"problem size: %d \n",this->size);
878  //xprintf(Msg,"****************** problem statistics \n");
879 
880  auto in_rec = this->input_record_.val<Input::AbstractRecord>("solver");
881 
882  if (schur0 == NULL) { // create Linear System for MH matrix
883 
884  if (in_rec.type() == LinSys_BDDC::input_type) {
885 #ifdef HAVE_BDDCML
886  xprintf(Warn, "For BDDC is using no Schur complements.");
887  n_schur_compls = 0;
888  LinSys_BDDC *ls = new LinSys_BDDC(global_row_4_sub_row->size(), &(*rows_ds),
889  3, // 3 == la::BddcmlWrapper::SPD_VIA_SYMMETRICGENERAL
890  1, // 1 == number of subdomains per process
891  true); // swap signs of matrix and rhs to make the matrix SPD
892  ls->set_from_input(in_rec);
893  ls->set_solution( NULL );
894  // possible initialization particular to BDDC
895  START_TIMER("BDDC set mesh data");
897  schur0=ls;
898  END_TIMER("BDDC set mesh data");
899 #else
900  xprintf(Err, "Flow123d was not build with BDDCML support.\n");
901 #endif
902  }
903  else if (in_rec.type() == LinSys_PETSC::input_type) {
904  // use PETSC for serial case even when user wants BDDC
905  if (n_schur_compls > 2) {
906  xprintf(Warn, "Invalid number of Schur Complements. Using 2.");
907  n_schur_compls = 2;
908  }
909 
910  LinSys_PETSC *schur1, *schur2;
911 
912  if (n_schur_compls == 0) {
913  LinSys_PETSC *ls = new LinSys_PETSC( &(*rows_ds) );
914 
915  // temporary solution; we have to set precision also for sequantial case of BDDC
916  // final solution should be probably call of direct solver for oneproc case
917  if (in_rec.type() != LinSys_BDDC::input_type) ls->set_from_input(in_rec);
918  else {
919  ls->LinSys::set_from_input(in_rec); // get only common options
920  }
921 
922  ls->set_solution( NULL );
923  schur0=ls;
924  } else {
925  IS is;
926  ISCreateStride(PETSC_COMM_WORLD, side_ds->lsize(), rows_ds->begin(), 1, &is);
927  //ASSERT(err == 0,"Error in ISCreateStride.");
928 
929  SchurComplement *ls = new SchurComplement(is, &(*rows_ds));
930  ls->set_from_input(in_rec);
931  ls->set_solution( NULL );
932  ls->set_positive_definite();
933 
934  // make schur1
935  Distribution *ds = ls->make_complement_distribution();
936  if (n_schur_compls==1) {
937  schur1 = new LinSys_PETSC(ds);
938  } else {
939  IS is;
940  ISCreateStride(PETSC_COMM_WORLD, el_ds->lsize(), ls->get_distribution()->begin(), 1, &is);
941  //ASSERT(err == 0,"Error in ISCreateStride.");
942  SchurComplement *ls1 = new SchurComplement(is, ds); // is is deallocated by SchurComplement
943  ls1->set_negative_definite();
944 
945  // make schur2
946  schur2 = new LinSys_PETSC( ls1->make_complement_distribution() );
947  ls1->set_complement( schur2 );
948  schur1 = ls1;
949  }
950  ls->set_complement( schur1 );
951  schur0=ls;
952  }
953 
954  START_TIMER("PETSC PREALLOCATION");
957  assembly_steady_mh_matrix(); // preallocation
958  VecZeroEntries(schur0->get_solution());
959  END_TIMER("PETSC PREALLOCATION");
960  }
961  else {
962  xprintf(Err, "Unknown solver type. Internal error.\n");
963  }
964  }
965 
966 
967  END_TIMER("preallocation");
968 }
969 
970 
971 
972 
974 
975  data_.set_time(*time_);
976  DBGMSG("Assembly linear system\n");
977  if (data_.changed()) {
978  DBGMSG(" Data changed\n");
979  // currently we have no optimization for cases when just time term data or RHS data are changed
980  START_TIMER("full assembly");
981  if (typeid(*schur0) != typeid(LinSys_BDDC)) {
982  schur0->start_add_assembly(); // finish allocation and create matrix
983  }
986  assembly_steady_mh_matrix(); // fill matrix
989 
990  if (!time_->is_steady()) {
991  DBGMSG(" setup time term\n");
992  // assembly time term and rhs
993  setup_time_term();
994  modify_system();
995  }
996  END_TIMER("full assembly");
997  } else {
998  START_TIMER("modify system");
999  if (!time_->is_steady()) {
1000  modify_system();
1001  } else {
1002  xprintf(PrgErr, "Planned computation time for steady solver, but data are not changed.\n");
1003  }
1004  END_TIMER("modiffy system");
1005  }
1006 
1007 
1008  //schur0->view_local_matrix();
1009  //PetscViewer myViewer;
1010  //PetscViewerASCIIOpen(PETSC_COMM_WORLD,"matis.m",&myViewer);
1011  //PetscViewerSetFormat(myViewer,PETSC_VIEWER_ASCII_MATLAB);
1012 
1013  //MatView( schur0->get_matrix(),PETSC_VIEWER_STDOUT_WORLD );
1014  //DBGMSG("RHS\n");
1015  //VecView(schur0->get_rhs(), PETSC_VIEWER_STDOUT_WORLD);
1016  //VecView(schur0->get_solution(), PETSC_VIEWER_STDOUT_WORLD);
1017 
1018  //PetscViewerDestroy(myViewer);
1019 
1020 }
1021 
1022 
1023 
1025  // prepare mesh for BDDCML
1026  // initialize arrays
1027  // auxiliary map for creating coordinates of local dofs and global-to-local numbering
1028  std::map<int,arma::vec3> localDofMap;
1029  // connectivity for the subdomain, i.e. global dof numbers on element, stored element-by-element
1030  // Indices of Nodes on Elements
1031  std::vector<int> inet;
1032  // number of degrees of freedom on elements - determines elementwise chunks of INET array
1033  // Number of Nodes on Elements
1034  std::vector<int> nnet;
1035  // Indices of Subdomain Elements in Global Numbering - for local elements, their global indices
1036  std::vector<int> isegn;
1037 
1038  // This array is currently not used in BDDCML, it was used as an interface scaling alternative to scaling
1039  // by diagonal. It corresponds to the rho-scaling.
1040  std::vector<double> element_permeability;
1041 
1042  // maximal and minimal dimension of elements
1043  int elDimMax = 1;
1044  int elDimMin = 3;
1045  for ( unsigned int i_loc = 0; i_loc < el_ds->lsize(); i_loc++ ) {
1046  // for each element, create local numbering of dofs as fluxes (sides), pressure (element centre), Lagrange multipliers (edges), compatible connections
1047  ElementFullIter el = mesh_->element(el_4_loc[i_loc]);
1048  int e_idx = el.index();
1049 
1050  int elDim = el->dim();
1051  elDimMax = std::max( elDimMax, elDim );
1052  elDimMin = std::min( elDimMin, elDim );
1053 
1054  isegn.push_back( e_idx );
1055  int nne = 0;
1056 
1057  FOR_ELEMENT_SIDES(el,si) {
1058  // insert local side dof
1059  int side_row = side_row_4_id[ mh_dh.side_dof( el->side(si) ) ];
1060  arma::vec3 coord = el->side(si)->centre();
1061 
1062  localDofMap.insert( std::make_pair( side_row, coord ) );
1063  inet.push_back( side_row );
1064  nne++;
1065  }
1066 
1067  // insert local pressure dof
1068  int el_row = row_4_el[ el_4_loc[i_loc] ];
1069  arma::vec3 coord = el->centre();
1070  localDofMap.insert( std::make_pair( el_row, coord ) );
1071  inet.push_back( el_row );
1072  nne++;
1073 
1074  FOR_ELEMENT_SIDES(el,si) {
1075  //Edge *edg=el->side(si)->edge();
1076 
1077  // insert local edge dof
1078  int edge_row = row_4_edge[ el->side(si)->edge_idx() ];
1079  arma::vec3 coord = el->side(si)->centre();
1080 
1081  localDofMap.insert( std::make_pair( edge_row, coord ) );
1082  inet.push_back( edge_row );
1083  nne++;
1084  }
1085 
1086  // insert dofs related to compatible connections
1087  for ( unsigned int i_neigh = 0; i_neigh < el->n_neighs_vb; i_neigh++) {
1088  int edge_row = row_4_edge[ el->neigh_vb[i_neigh]->edge_idx() ];
1089  arma::vec3 coord = el->neigh_vb[i_neigh]->edge()->side(0)->centre();
1090 
1091  localDofMap.insert( std::make_pair( edge_row, coord ) );
1092  inet.push_back( edge_row );
1093  nne++;
1094  }
1095 
1096  nnet.push_back( nne );
1097 
1098  // version for rho scaling
1099  // trace computation
1100  arma::vec3 centre = el->centre();
1101  double conduct = data_.conductivity.value( centre , el->element_accessor() );
1102  //double cs = data_.cross_section.value( centre, el->element_accessor() );
1103  arma::mat33 aniso = data_.anisotropy.value( centre, el->element_accessor() );
1104 
1105  // compute mean on the diagonal
1106  double coef = 0.;
1107  for ( int i = 0; i < 3; i++) {
1108  coef = coef + aniso.at(i,i);
1109  }
1110  // Maybe divide by cs
1111  coef = conduct*coef / 3;
1112 
1113  ASSERT( coef > 0.,
1114  "Zero coefficient of hydrodynamic resistance %f . \n ", coef );
1115  element_permeability.push_back( 1. / coef );
1116  }
1117  //convert set of dofs to vectors
1118  // number of nodes (= dofs) on the subdomain
1119  int numNodeSub = localDofMap.size();
1120  ASSERT_EQUAL( (unsigned int)numNodeSub, global_row_4_sub_row->size() );
1121  // Indices of Subdomain Nodes in Global Numbering - for local nodes, their global indices
1122  std::vector<int> isngn( numNodeSub );
1123  // pseudo-coordinates of local nodes (i.e. dofs)
1124  // they need not be exact, they are used just for some geometrical considerations in BDDCML,
1125  // such as selection of corners maximizing area of a triangle, bounding boxes fro subdomains to
1126  // find candidate neighbours etc.
1127  std::vector<double> xyz( numNodeSub * 3 ) ;
1128  int ind = 0;
1129  std::map<int,arma::vec3>::iterator itB = localDofMap.begin();
1130  for ( ; itB != localDofMap.end(); ++itB ) {
1131  isngn[ind] = itB -> first;
1132 
1133  arma::vec3 coord = itB -> second;
1134  for ( int j = 0; j < 3; j++ ) {
1135  xyz[ j*numNodeSub + ind ] = coord[j];
1136  }
1137 
1138  ind++;
1139  }
1140  localDofMap.clear();
1141 
1142  // Number of Nodal Degrees of Freedom
1143  // nndf is trivially one - dofs coincide with nodes
1144  std::vector<int> nndf( numNodeSub, 1 );
1145 
1146  // prepare auxiliary map for renumbering nodes
1147  typedef std::map<int,int> Global2LocalMap_; //! type for storage of global to local map
1148  Global2LocalMap_ global2LocalNodeMap;
1149  for ( unsigned ind = 0; ind < isngn.size(); ++ind ) {
1150  global2LocalNodeMap.insert( std::make_pair( static_cast<unsigned>( isngn[ind] ), ind ) );
1151  }
1152 
1153  //std::cout << "INET: \n";
1154  //std::copy( inet.begin(), inet.end(), std::ostream_iterator<int>( std::cout, " " ) );
1155  //std::cout << std::endl;
1156  //std::cout << "ISNGN: \n";
1157  //std::copy( isngn.begin(), isngn.end(), std::ostream_iterator<int>( std::cout, " " ) );
1158  //std::cout << std::endl << std::flush;
1159  //std::cout << "ISEGN: \n";
1160  //std::copy( isegn.begin(), isegn.end(), std::ostream_iterator<int>( std::cout, " " ) );
1161  //std::cout << std::endl << std::flush;
1162  //MPI_Barrier( PETSC_COMM_WORLD );
1163 
1164  // renumber nodes in the inet array to locals
1165  int indInet = 0;
1166  for ( unsigned int iEle = 0; iEle < isegn.size(); iEle++ ) {
1167  int nne = nnet[ iEle ];
1168  for ( int ien = 0; ien < nne; ien++ ) {
1169 
1170  int indGlob = inet[indInet];
1171  // map it to local node
1172  Global2LocalMap_::iterator pos = global2LocalNodeMap.find( indGlob );
1173  ASSERT( pos != global2LocalNodeMap.end(),
1174  "Cannot remap node index %d to local indices. \n ", indGlob );
1175  int indLoc = static_cast<int> ( pos -> second );
1176 
1177  // store the node
1178  inet[ indInet++ ] = indLoc;
1179  }
1180  }
1181 
1182  int numNodes = size;
1183  int numDofsInt = size;
1184  int spaceDim = 3; // TODO: what is the proper value here?
1185  int meshDim = elDimMax;
1186  //std::cout << "I have identified following dimensions: max " << elDimMax << ", min " << elDimMin << std::endl;
1187 
1188  bddc_ls -> load_mesh( spaceDim, numNodes, numDofsInt, inet, nnet, nndf, isegn, isngn, isngn, xyz, element_permeability, meshDim );
1189 }
1190 
1191 
1192 
1193 
1194 //=============================================================================
1195 // DESTROY WATER MH SYSTEM STRUCTURE
1196 //=============================================================================
1198  //if (schur2 != NULL) {
1199  // delete schur2;
1200  //ISDestroy(&IS2);
1201  //}
1202  //if (schur1 != NULL) {
1203  // delete schur1;
1204  //ISDestroy(&IS1);
1205  //}
1206  if (schur0 != NULL) delete schur0;
1207 
1208  delete edge_ds;
1209  delete el_ds;
1210  delete side_ds;
1211 
1212  xfree(el_4_loc);
1213  xfree(row_4_el);
1216  xfree(edge_4_loc);
1217  xfree(row_4_edge);
1218 
1219  if (solution != NULL) {
1220  VecDestroy(&sol_vec);
1221  xfree(solution);
1222  }
1223 
1224  delete output_object;
1225 
1226  //VecScatterDestroy(&par_to_all);
1227 
1228 }
1229 
1230 
1231 // ================================================
1232 // PARALLLEL PART
1233 //
1234 
1235 /**
1236  * Make connectivity graph of the second Schur complement and compute optimal partitioning.
1237  * This verison assign 1D and 2D edges to one processor, represent them as
1238  * a weighted vertex, and 2D-3D neghbourings as weighted edges. 3D edges are
1239  * then distributed over all procesors.
1240  */
1241 /*
1242 void make_edge_conection_graph(Mesh *mesh, SparseGraph * &graph) {
1243 
1244  Distribution edistr = graph->get_distr();
1245  //Edge *edg;
1246  Element *ele;
1247  int li, eid, , i_edg;
1248  unsigned int si, i_neigh;
1249  int e_weight;
1250 
1251  int edge_dim_weights[3] = { 100, 10, 1 };
1252 
1253  i_edg=0;
1254  FOR_EDGES(mesh, edg) {
1255 
1256  // skip non-local edges
1257  if (!edistr.is_local(i_edg)) continue;
1258 
1259  e_weight = edge_dim_weights[edg->side(0)->element()->dim() - 1];
1260  // for all connected elements
1261  FOR_EDGE_SIDES( edg, li ) {
1262  ASSERT( edg->side(li)->valid(),"NULL side of edge.");
1263  ele = edg->side(li)->element();
1264  ASSERT(NONULL(ele),"NULL element of side.");
1265 
1266  // for sides of connected element, excluding edge itself
1267  for(si=0; si<ele->n_sides(); si++) {
1268  eid = ele->side(si)->edge_idx();
1269  if (eid != i_edg)
1270  graph->set_edge(i_edg, eid, e_weight);
1271  }
1272 
1273  // include connections from lower dim. edge
1274  // to the higher dimension
1275  for (i_neigh = 0; i_neigh < ele->n_neighs_vb; i_neigh++) {
1276  eid = ele->neigh_vb[i_neigh]->edge_idx();
1277  graph->set_edge(i_edg, eid, e_weight);
1278  graph->set_edge(eid, i_edg, e_weight);
1279  }
1280  }
1281  i_edg++;
1282  }
1283 
1284  graph->finalize();
1285 }
1286 */
1287 /**
1288  * Make connectivity graph of elements of mesh - dual graph: elements vertices of graph.
1289  */
1290 /*
1291 void make_element_connection_graph(Mesh *mesh, SparseGraph * &graph, bool neigh_on) {
1292 
1293  Distribution edistr = graph->get_distr();
1294 
1295  Edge *edg;
1296  int li, e_idx, i_neigh;
1297  int i_s, n_s;
1298 
1299  //int elDimMax = 1;
1300  //int elDimMin = 3;
1301  //FOR_ELEMENTS(mesh, ele) {
1302  // //xprintf(Msg,"Element id %d , its index %d.\n",ele.id(), i_ele);
1303  // int elDim = ele->dim();
1304  // elDimMax = std::max( elDimMax, elDim );
1305  // elDimMin = std::min( elDimMin, elDim );
1306  //}
1307  //std::cout << "max and min element dimensions: " << elDimMax << " " << elDimMin << std::endl;
1308 
1309  FOR_ELEMENTS(mesh, ele) {
1310  //xprintf(Msg,"Element id %d , its index %d.\n",ele.id(), i_ele);
1311 
1312  // skip non-local elements
1313  if (!edistr.is_local(ele.index()))
1314  continue;
1315 
1316  // for all connected elements
1317  FOR_ELEMENT_SIDES( ele, si ) {
1318  edg = ele->side(si)->edge();
1319 
1320  FOR_EDGE_SIDES( edg, li ) {
1321  ASSERT(edg->side(li)->valid(),"NULL side of edge.");
1322  e_idx = ELEMENT_FULL_ITER(mesh, edg->side(li)->element()).index();
1323 
1324  // for elements of connected elements, excluding element itself
1325  if (e_idx != ele.index()) {
1326  graph->set_edge(ele.index(), e_idx);
1327  }
1328  }
1329  }
1330 
1331  // include connections from lower dim. edge
1332  // to the higher dimension
1333  if (neigh_on) {
1334  for (i_neigh = 0; i_neigh < ele->n_neighs_vb; i_neigh++) {
1335  n_s = ele->neigh_vb[i_neigh]->edge()->n_sides;
1336  for (i_s = 0; i_s < n_s; i_s++) {
1337  e_idx=ELEMENT_FULL_ITER(mesh, ele->neigh_vb[i_neigh]->edge()->side(i_s)->element()).index();
1338  graph->set_edge(ele.index(), e_idx);
1339  graph->set_edge(e_idx, ele.index());
1340  }
1341  }
1342  }
1343  }
1344  graph->finalize();
1345 }*/
1346 
1347 
1348 // ========================================================================
1349 // to finish row_4_id arrays we have to convert individual numberings of
1350 // sides/els/edges to whole numbering of rows. To this end we count shifts
1351 // for sides/els/edges on each proc and then we apply them on row_4_id
1352 // arrays.
1353 // we employ macros to avoid code redundancy
1354 // =======================================================================
1356  int i, shift;
1357  int np = edge_ds->np();
1358  int edge_shift[np], el_shift[np], side_shift[np];
1359  unsigned int rows_starts[np];
1360 
1361  int edge_n_id = mesh_->n_edges(),
1362  el_n_id = mesh_->element.size(),
1363  side_n_id = mesh_->n_sides();
1364 
1365  // compute shifts on every proc
1366  shift = 0; // in this var. we count new starts of arrays chunks
1367  for (i = 0; i < np; i++) {
1368  side_shift[i] = shift - (side_ds->begin(i)); // subtract actual start of the chunk
1369  shift += side_ds->lsize(i);
1370  el_shift[i] = shift - (el_ds->begin(i));
1371  shift += el_ds->lsize(i);
1372  edge_shift[i] = shift - (edge_ds->begin(i));
1373  shift += edge_ds->lsize(i);
1374  rows_starts[i] = shift;
1375  }
1376  //DBGPRINT_INT("side_shift",np,side_shift);
1377  //DBGPRINT_INT("el_shift",np,el_shift);
1378  //DBGPRINT_INT("edge_shift",np,edge_shift);
1379  // apply shifts
1380  for (i = 0; i < side_n_id; i++) {
1381  int &what = side_row_4_id[i];
1382  if (what >= 0)
1383  what += side_shift[side_ds->get_proc(what)];
1384  }
1385  for (i = 0; i < el_n_id; i++) {
1386  int &what = row_4_el[i];
1387  if (what >= 0)
1388  what += el_shift[el_ds->get_proc(what)];
1389 
1390  }
1391  for (i = 0; i < edge_n_id; i++) {
1392  int &what = row_4_edge[i];
1393  if (what >= 0)
1394  what += edge_shift[edge_ds->get_proc(what)];
1395  }
1396  // make distribution of rows
1397  for (i = np - 1; i > 0; i--)
1398  rows_starts[i] -= rows_starts[i - 1];
1399 
1400  rows_ds = boost::make_shared<Distribution>(&(rows_starts[0]), PETSC_COMM_WORLD);
1401 }
1402 
1404  START_TIMER("prepare scatter");
1405  // prepare Scatter form parallel to sequantial in original numbering
1406  {
1407  IS is_loc;
1408  int i, *loc_idx; //, si;
1409 
1410  // create local solution vector
1411  solution = (double *) xmalloc(size * sizeof(double));
1412  VecCreateSeqWithArray(PETSC_COMM_SELF,1, size, solution,
1413  &(sol_vec));
1414 
1415  // create seq. IS to scatter par solutin to seq. vec. in original order
1416  // use essentialy row_4_id arrays
1417  loc_idx = (int *) xmalloc(size * sizeof(int));
1418  i = 0;
1419  FOR_ELEMENTS(mesh_, ele) {
1420  FOR_ELEMENT_SIDES(ele,si) {
1421  loc_idx[i++] = side_row_4_id[ mh_dh.side_dof( ele->side(si) ) ];
1422  }
1423  }
1424  FOR_ELEMENTS(mesh_, ele) {
1425  loc_idx[i++] = row_4_el[ele.index()];
1426  }
1427  for(unsigned int i_edg=0; i_edg < mesh_->n_edges(); i_edg++) {
1428  loc_idx[i++] = row_4_edge[i_edg];
1429  }
1430  ASSERT( i==size,"Size of array does not match number of fills.\n");
1431  //DBGPRINT_INT("loc_idx",size,loc_idx);
1432  ISCreateGeneral(PETSC_COMM_SELF, size, loc_idx, PETSC_COPY_VALUES, &(is_loc));
1433  xfree(loc_idx);
1434  VecScatterCreate(schur0->get_solution(), is_loc, sol_vec,
1435  PETSC_NULL, &par_to_all);
1436  ISDestroy(&(is_loc));
1437  }
1439 
1440  END_TIMER("prepare scatter");
1441 
1442 }
1443 
1444 // ====================================================================================
1445 // - compute optimal edge partitioning
1446 // - compute appropriate partitioning of elements and sides
1447 // - make arrays: *_id_4_loc and *_row_4_id to allow parallel assembly of the MH matrix
1448 // ====================================================================================
1450 
1451  START_TIMER("prepare parallel");
1452 
1453  int *loc_part; // optimal (edge,el) partitioning (local chunk)
1454  int *id_4_old; // map from old idx to ids (edge,el)
1455  int i, loc_i;
1456 
1457  int e_idx;
1458 
1459 
1460  //ierr = MPI_Barrier(PETSC_COMM_WORLD);
1461  //ASSERT(ierr == 0, "Error in MPI_Barrier.");
1462 
1463  id_4_old = new int[mesh_->n_elements()];
1464  i = 0;
1465  FOR_ELEMENTS(mesh_, el) id_4_old[i++] = el.index();
1466 
1467  mesh_->get_part()->id_maps(mesh_->element.size(), id_4_old, el_ds, el_4_loc, row_4_el);
1468  //DBGPRINT_INT("el_4_loc",el_ds->lsize(),el_4_loc);
1469  //xprintf(Msg,"Number of elements in subdomain %d \n",el_ds->lsize());
1470  delete[] id_4_old;
1471 
1472  //el_ds->view( std::cout );
1473  //
1474  //DBGMSG("Compute appropriate edge partitioning ...\n");
1475  //optimal element part; loc. els. id-> new el. numbering
1476  Distribution init_edge_ds(DistributionLocalized(), mesh_->n_edges(), PETSC_COMM_WORLD);
1477  // partitioning of edges, edge belongs to the proc of his first element
1478  // this is not optimal but simple
1479  loc_part = new int[init_edge_ds.lsize()];
1480  id_4_old = new int[mesh_->n_edges()];
1481  {
1482  loc_i = 0;
1483  FOR_EDGES(mesh_, edg ) {
1484  unsigned int i_edg = edg - mesh_->edges.begin();
1485  // partition
1486  e_idx = mesh_->element.index(edg->side(0)->element());
1487  //xprintf(Msg,"Index of edge: %d first element: %d \n",edgid,e_idx);
1488  if (init_edge_ds.is_local(i_edg)) {
1489  // find (new) proc of the first element of the edge
1490  loc_part[loc_i++] = el_ds->get_proc(row_4_el[e_idx]);
1491  }
1492  // id array
1493  id_4_old[i_edg] = i_edg;
1494  }
1495  }
1496 
1497  Partitioning::id_maps(mesh_->n_edges(), id_4_old, init_edge_ds, loc_part, edge_ds, edge_4_loc, row_4_edge);
1498  delete[] loc_part;
1499  delete[] id_4_old;
1500 
1501  //optimal side part; loc. sides; id-> new side numbering
1502  Distribution init_side_ds(DistributionBlock(), mesh_->n_sides(), PETSC_COMM_WORLD);
1503  // partitioning of sides follows elements
1504  loc_part = new int[init_side_ds.lsize()];
1505  id_4_old = new int[mesh_->n_sides()];
1506  {
1507  int is = 0;
1508  loc_i = 0;
1509  FOR_SIDES(mesh_, side ) {
1510  // partition
1511  if (init_side_ds.is_local(is)) {
1512  // find (new) proc of the element of the side
1513  loc_part[loc_i++] = el_ds->get_proc(
1515  }
1516  // id array
1517  id_4_old[is++] = mh_dh.side_dof( side );
1518  }
1519  }
1520 
1521  Partitioning::id_maps(mesh_->n_sides(), id_4_old, init_side_ds, loc_part, side_ds,
1523  delete [] loc_part;
1524  delete [] id_4_old;
1525 
1526  //edge_ds->view( std::cout );
1527  //side_ds->view( std::cout );
1528 
1529  /*
1530  DBGPRINT_INT("edge_id_4_loc",edge_ds->lsize,edge_id_4_loc);
1531  DBGPRINT_INT("el_4_loc",el_ds->lsize,el_4_loc);
1532  DBGPRINT_INT("side_id_4_loc",side_ds->lsize,side_id_4_loc);
1533  DBGPRINT_INT("edge_row_4_id",mesh_->n_edges,edge_row_4_id);
1534  DBGPRINT_INT("el_row_4_id",mesh_->max_elm_id+1,el_row_4_id);
1535  DBGPRINT_INT("side_row_4_id",mesh_->max_side_id+1,side_row_4_id);
1536  */
1537  // convert row_4_id arrays from separate numberings to global numbering of rows
1539  //DBGPRINT_INT("edge_row_4_id",mesh_->n_edges,edge_row_4_id);
1540  //DBGPRINT_INT("el_row_4_id",mesh_->max_elm_id+1,el_row_4_id);
1541  //DBGPRINT_INT("side_row_4_id",mesh_->max_side_id+1,side_row_4_id);
1542 
1543  //lsize = side_ds->lsize() + el_ds->lsize() + edge_ds->lsize();
1544 
1545 
1546  // prepare global_row_4_sub_row
1547 
1548 #ifdef HAVE_BDDCML
1549  if (in_rec.type() == LinSys_BDDC::input_type) {
1550  // auxiliary
1551  Element *el;
1552  int side_row, edge_row;
1553 
1554  //xprintf(Msg,"Compute mapping of local subdomain rows to global rows.\n");
1555 
1556  global_row_4_sub_row = boost::make_shared<LocalToGlobalMap>(rows_ds);
1557 
1558  //
1559  // ordering of dofs
1560  // for each subdomain:
1561  // | velocities (at sides) | pressures (at elements) | L. mult. (at edges) |
1562  for (unsigned int i_loc = 0; i_loc < el_ds->lsize(); i_loc++) {
1563  el = mesh_->element(el_4_loc[i_loc]);
1564  int el_row = row_4_el[el_4_loc[i_loc]];
1565 
1566  global_row_4_sub_row->insert( el_row );
1567 
1568  unsigned int nsides = el->n_sides();
1569  for (unsigned int i = 0; i < nsides; i++) {
1570  side_row = side_row_4_id[ mh_dh.side_dof( el->side(i) ) ];
1571  edge_row = row_4_edge[el->side(i)->edge_idx()];
1572 
1573  global_row_4_sub_row->insert( side_row );
1574  global_row_4_sub_row->insert( edge_row );
1575 
1576  // edge neighbouring overlap
1577  //if (edg->neigh_vb != NULL) {
1578  // int neigh_el_row=row_4_el[mesh_->element.index(edg->neigh_vb->element[0])];
1579  //}
1580  }
1581 
1582  for (unsigned int i_neigh = 0; i_neigh < el->n_neighs_vb; i_neigh++) {
1583  // mark this edge
1584  edge_row = row_4_edge[el->neigh_vb[i_neigh]->edge_idx() ];
1585  global_row_4_sub_row->insert( edge_row );
1586  }
1587  }
1588  global_row_4_sub_row->finalize();
1589  }
1590 #endif // HAVE_BDDCML
1591 
1592  // common to both solvers - create renumbering of unknowns
1593  solver_indices_.reserve(size);
1594  FOR_ELEMENTS(mesh_, ele) {
1595  FOR_ELEMENT_SIDES(ele,si) {
1596  solver_indices_.push_back( side_row_4_id[ mh_dh.side_dof( ele->side(si) ) ] );
1597  }
1598  }
1599  FOR_ELEMENTS(mesh_, ele) {
1600  solver_indices_.push_back( row_4_el[ele.index()] );
1601  }
1602 
1603  unsigned int i_edg=0;
1604  FOR_EDGES(mesh_, edg) {
1605  solver_indices_.push_back( row_4_edge[i_edg++] );
1606  }
1607  ASSERT( solver_indices_.size() == (unsigned int)size, "Size of array does not match number of fills.\n" );
1608  //std::cout << "Solve rindices:" << std::endl;
1609  //std::copy( solver_indices_.begin(), solver_indices_.end(), std::ostream_iterator<int>( std::cout, " " ) );
1610 }
1611 
1612 
1613 
1614 void mat_count_off_proc_values(Mat m, Vec v) {
1615  int n, first, last;
1616  const PetscInt *cols;
1617  Distribution distr(v);
1618 
1619  int n_off = 0;
1620  int n_on = 0;
1621  int n_off_rows = 0;
1622  MatGetOwnershipRange(m, &first, &last);
1623  for (int row = first; row < last; row++) {
1624  MatGetRow(m, row, &n, &cols, PETSC_NULL);
1625  bool exists_off = false;
1626  for (int i = 0; i < n; i++)
1627  if (distr.get_proc(cols[i]) != distr.myp())
1628  n_off++, exists_off = true;
1629  else
1630  n_on++;
1631  if (exists_off)
1632  n_off_rows++;
1633  MatRestoreRow(m, row, &n, &cols, PETSC_NULL);
1634  }
1635  //printf("[%d] rows: %d off_rows: %d on: %d off: %d\n",distr.myp(),last-first,n_off_rows,n_on,n_off);
1636 }
1637 
1638 
1639 
1640 
1641 
1642 
1643 
1644 
1645 
1646 
1647 
1648 
1649 // ========================
1650 // unsteady
1651 
1653  : DarcyFlowMH_Steady(mesh_in, in_rec, false)
1654 {
1655  time_ = new TimeGovernor(in_rec.val<Input::Record>("time"));
1656  data_.mark_input_times(this->mark_type());
1658  data_.set_time(*time_);
1659 
1660  output_object = new DarcyFlowMHOutput(this, in_rec.val<Input::Record>("output"));
1661 
1664 
1665  VecDuplicate(schur0->get_solution(), &previous_solution);
1666  VecCreateMPI(PETSC_COMM_WORLD,rows_ds->lsize(),PETSC_DETERMINE,&(steady_diagonal));
1667  VecDuplicate(steady_diagonal,& new_diagonal);
1668  VecZeroEntries(new_diagonal);
1669  VecDuplicate(*( schur0->get_rhs()), &steady_rhs);
1670 
1673 
1674 
1675 /*
1676  VecDuplicate(*( schur0->get_rhs()), &time_term);
1677  */
1678  //setup_time_term();
1679  output_data();
1680 }
1681 
1683 {
1684 
1685  // read inital condition
1686  VecZeroEntries(schur0->get_solution());
1687 
1688  double *local_sol = schur0->get_solution_array();
1689 
1690  // cycle over local element rows
1692 
1693  DBGMSG("Setup with dt: %f\n",time_->dt());
1694  for (unsigned int i_loc_el = 0; i_loc_el < el_ds->lsize(); i_loc_el++) {
1695  ele = mesh_->element(el_4_loc[i_loc_el]);
1696  int i_loc_row = i_loc_el + side_ds->lsize();
1697 
1698  // set initial condition
1699  local_sol[i_loc_row] = data_.init_pressure.value(ele->centre(),ele->element_accessor());
1700  }
1701 
1703 
1704 }
1705 
1707  // save diagonal of steady matrix
1708  MatGetDiagonal(*( schur0->get_matrix() ), steady_diagonal);
1709  // save RHS
1710  VecCopy(*( schur0->get_rhs()), steady_rhs);
1711 
1712 
1713  PetscScalar *local_diagonal;
1714  VecGetArray(new_diagonal,& local_diagonal);
1715 
1717  DBGMSG("Setup with dt: %f\n",time_->dt());
1718  for (unsigned int i_loc_el = 0; i_loc_el < el_ds->lsize(); i_loc_el++) {
1719  ele = mesh_->element(el_4_loc[i_loc_el]);
1720  int i_loc_row = i_loc_el + side_ds->lsize();
1721 
1722  // set new diagonal
1723  local_diagonal[i_loc_row]= - data_.storativity.value(ele->centre(), ele->element_accessor()) *
1724  ele->measure() / time_->dt();
1725  }
1726  VecRestoreArray(new_diagonal,& local_diagonal);
1727  MatDiagonalSet(*( schur0->get_matrix() ), new_diagonal, ADD_VALUES);
1728 
1731 }
1732 
1734  START_TIMER("modify system");
1735  if (time_->is_changed_dt() && !schur0->is_matrix_changed()) {
1736  // if time step has changed and setup_time_term not called
1737  MatDiagonalSet(*( schur0->get_matrix() ),steady_diagonal, INSERT_VALUES);
1738 
1739  VecScale(new_diagonal, time_->last_dt()/time_->dt());
1740  MatDiagonalSet(*( schur0->get_matrix() ),new_diagonal, ADD_VALUES);
1742  }
1743 
1744  // modify RHS - add previous solution
1745  VecPointwiseMult(*( schur0->get_rhs()), new_diagonal, schur0->get_solution());
1746  VecAXPY(*( schur0->get_rhs()), 1.0, steady_rhs);
1748 
1749  // swap solutions
1750  VecSwap(previous_solution, schur0->get_solution());
1751 }
1752 
1753 // ========================
1754 // unsteady
1755 
1757  : DarcyFlowMH_Steady(mesh_in, in_rec,false)
1758 {
1759  time_ = new TimeGovernor(in_rec.val<Input::Record>("time"));
1760  data_.mark_input_times(this->mark_type());
1761 
1762 
1764  data_.set_time(*time_);
1765 
1766  output_object = new DarcyFlowMHOutput(this, in_rec.val<Input::Record>("output"));
1767 
1770  VecDuplicate(schur0->get_solution(), &previous_solution);
1771  VecCreateMPI(PETSC_COMM_WORLD,rows_ds->lsize(),PETSC_DETERMINE,&(steady_diagonal));
1772  VecDuplicate(steady_diagonal,& new_diagonal);
1773  VecDuplicate(*( schur0->get_rhs()), &steady_rhs);
1774 
1777  output_data();
1778 }
1779 
1781 {
1782  VecZeroEntries(schur0->get_solution());
1783 
1784  // apply initial condition
1785  // cycle over local element rows
1786 
1788  double init_value;
1789 
1790  for (unsigned int i_loc_el = 0; i_loc_el < el_ds->lsize(); i_loc_el++) {
1791  ele = mesh_->element(el_4_loc[i_loc_el]);
1792 
1793  init_value = data_.init_pressure.value(ele->centre(), ele->element_accessor());
1794 
1795  FOR_ELEMENT_SIDES(ele,i) {
1796  int edge_row = row_4_edge[ele->side(i)->edge_idx()];
1797  VecSetValue(schur0->get_solution(),edge_row,init_value/ele->n_sides(),ADD_VALUES);
1798  }
1799  }
1800  VecAssemblyBegin(schur0->get_solution());
1801  VecAssemblyEnd(schur0->get_solution());
1802 
1804 }
1805 
1806 
1807 
1809 {
1810  // save diagonal of steady matrix
1811  MatGetDiagonal(*( schur0->get_matrix() ), steady_diagonal);
1812  // save RHS
1813  VecCopy(*( schur0->get_rhs()),steady_rhs);
1814 
1815  VecZeroEntries(new_diagonal);
1816 
1817  // modify matrix diagonal
1818  // cycle over local element rows
1820 
1821  for (unsigned int i_loc_el = 0; i_loc_el < el_ds->lsize(); i_loc_el++) {
1822  ele = mesh_->element(el_4_loc[i_loc_el]);
1823 
1824  data_.init_pressure.value(ele->centre(), ele->element_accessor());
1825 
1826  FOR_ELEMENT_SIDES(ele,i) {
1827  int edge_row = row_4_edge[ele->side(i)->edge_idx()];
1828  // set new diagonal
1829  VecSetValue(new_diagonal,edge_row, - ele->measure() *
1830  data_.storativity.value(ele->centre(), ele->element_accessor()) *
1831  data_.cross_section.value(ele->centre(), ele->element_accessor()) /
1832  time_->dt() / ele->n_sides(),ADD_VALUES);
1833  }
1834  }
1835  VecAssemblyBegin(new_diagonal);
1836  VecAssemblyEnd(new_diagonal);
1837 
1838  MatDiagonalSet(*( schur0->get_matrix() ),new_diagonal, ADD_VALUES);
1839 
1842 }
1843 
1845  START_TIMER("modify system");
1846  if (time_->is_changed_dt() && !schur0->is_matrix_changed()) {
1847  // if time step has changed and setup_time_term not called
1848 
1849  MatDiagonalSet(*( schur0->get_matrix() ),steady_diagonal, INSERT_VALUES);
1850  VecScale(new_diagonal, time_->last_dt()/time_->dt());
1851  MatDiagonalSet(*( schur0->get_matrix() ),new_diagonal, ADD_VALUES);
1853  }
1854 
1855  // modify RHS - add previous solution
1856  VecPointwiseMult(*( schur0->get_rhs()), new_diagonal, schur0->get_solution());
1857  VecAXPY(*( schur0->get_rhs()), 1.0, steady_rhs);
1859 
1860  // swap solutions
1861  VecSwap(previous_solution, schur0->get_solution());
1862 }
1863 
1864 
1865 
1867  int side_row, loc_edge_row, i;
1868  Edge* edg;
1869  ElementIter ele;
1870  double new_pressure, old_pressure, time_coef;
1871 
1872  PetscScalar *loc_prev_sol;
1873  VecGetArray(previous_solution, &loc_prev_sol);
1874 
1875  // modify side fluxes in parallel
1876  // for every local edge take time term on diagonal and add it to the corresponding flux
1877  for (unsigned int i_loc = 0; i_loc < edge_ds->lsize(); i_loc++) {
1878 
1879  edg = &( mesh_->edges[ edge_4_loc[i_loc] ] );
1880  loc_edge_row = side_ds->lsize() + el_ds->lsize() + i_loc;
1881 
1882  new_pressure = (schur0->get_solution_array())[loc_edge_row];
1883  old_pressure = loc_prev_sol[loc_edge_row];
1884  FOR_EDGE_SIDES(edg,i) {
1885  ele = edg->side(i)->element();
1886  side_row = side_row_4_id[ mh_dh.side_dof( edg->side(i) ) ];
1887  time_coef = - ele->measure() *
1888  data_.cross_section.value(ele->centre(), ele->element_accessor()) *
1889  data_.storativity.value(ele->centre(), ele->element_accessor()) /
1890  time_->dt() / ele->n_sides();
1891  VecSetValue(schur0->get_solution(), side_row, time_coef * (new_pressure - old_pressure), ADD_VALUES);
1892  }
1893  }
1894  VecRestoreArray(previous_solution, &loc_prev_sol);
1895 
1896  VecAssemblyBegin(schur0->get_solution());
1897  VecAssemblyEnd(schur0->get_solution());
1898 
1899  //DarcyFlowMH_Steady::postprocess();
1900 
1901  int side_rows[4];
1902  double values[4];
1903  //ElementFullIter ele = ELEMENT_FULL_ITER(mesh_, NULL);
1904 
1905  // modify side fluxes in parallel
1906  // for every local edge take time term on digonal and add it to the corresponding flux
1907 
1908  for (unsigned int i_loc = 0; i_loc < el_ds->lsize(); i_loc++) {
1909  ele = mesh_->element(el_4_loc[i_loc]);
1910  FOR_ELEMENT_SIDES(ele,i) {
1911  side_rows[i] = side_row_4_id[ mh_dh.side_dof( ele->side(i) ) ];
1912  values[i] = -1.0 * ele->measure() *
1913  data_.cross_section.value(ele->centre(), ele->element_accessor()) *
1915  ele->n_sides();
1916  }
1917  VecSetValues(schur0->get_solution(), ele->n_sides(), side_rows, values, ADD_VALUES);
1918  }
1919  VecAssemblyBegin(schur0->get_solution());
1920  VecAssemblyEnd(schur0->get_solution());
1921 }
1922 
1923 //-----------------------------------------------------------------------------
1924 // vim: set cindent:
void set_input_list(Input::Array input_list)
Definition: field_set.hh:164
Class MappingP1 implements the affine transformation of the unit cell onto the actual cell...
unsigned int get_proc(unsigned int idx) const
get processor of the given index
FieldSet * eq_data_
Definition: equation.hh:237
void set_limit_side(LimitSide side)
Definition: field_set.hh:171
#define DBGMSG(...)
Definition: global_defs.h:195
void reinit(Mesh *mesh)
double measure() const
Definition: elements.cc:107
Output class for darcy_flow_mh model.
virtual void postprocess()
postprocess velocity field (add sources)
virtual void get_solution_vector(double *&vec, unsigned int &vec_size)
Accessor to input data conforming to declared Array.
Definition: accessors.hh:521
SchurComplement SchurComplement
Definition: linsys.hh:89
virtual void setup_time_term()
void set_symmetric(bool flag=true)
Definition: linsys.hh:489
Solver based on the original PETSc solver using MPIAIJ matrix and succesive Schur complement construc...
void make_intersec_elements()
Definition: mesh.cc:691
void id_maps(int n_ids, int *id_4_old, Distribution *&new_ds, int *&id_4_loc, int *&new_4_id)
static Input::Type::Record input_type
#define FOR_EDGE_SIDES(i, j)
Definition: edges.h:53
Class Input::Type::Default specifies default value of keys of a Input::Type::Record.
Definition: type_record.hh:39
#define FOR_ELEMENTS(_mesh_, __i)
Definition: mesh.h:359
unsigned int edge_idx() const
Definition: side_impl.hh:52
MortarMethod mortar_method_
virtual void start_add_assembly()
Definition: linsys.hh:323
Boundary * cond() const
Definition: side_impl.hh:61
FieldBasePtr(* read_field_descriptor_hook)(Input::Record rec, const FieldCommon &field)
Definition: field.hh:71
double fix_dt_until_mark()
Fixing time step until fixed time mark.
virtual PetscErrorCode mat_zero_entries()
Definition: linsys.hh:248
Wrappers for linear systems based on MPIAIJ and MATIS format.
virtual void rhs_set_values(int nrow, int *rows, double *vals)=0
friend class P1_CouplingAssembler
void mat_count_off_proc_values(Mat m, Vec v)
bool is_end() const
Returns true if the actual time is greater than or equal to the end time.
static Input::Type::Selection mh_mortar_selection
void assembly(LinSys &ls)
void next_time()
Proceed to the next time according to current estimated time step.
bool solution_changed_for_scatter
const arma::vec::fixed< spacedim > normal_vector(unsigned int point_no)
Returns the normal vector to a side at given quadrature point.
Definition: fe_values.cc:196
void set_rhs_changed()
Definition: linsys.hh:211
arma::vec3 centre() const
Definition: sides.cc:139
static Default obligatory()
Definition: type_record.hh:87
???
friend class P0_CouplingAssembler
bool is_matrix_changed()
Definition: linsys.hh:217
#define ELEMENT_FULL_ITER(_mesh_, i)
Definition: mesh.h:367
void assembly(LinSys &ls)
Definition: mesh.h:108
virtual void start_allocation()
Definition: linsys.hh:315
static Input::Type::Record input_type
Class FEValues calculates finite element data on the actual cells such as shape function values...
#define FOR_ELEMENT_SIDES(i, j)
Definition: elements.h:151
boost::shared_ptr< Distribution > rows_ds
Record & derive_from(AbstractRecord &parent)
Definition: type_record.cc:172
int index() const
Definition: sys_vector.hh:88
virtual void finish_assembly()=0
virtual double get_solution_precision()=0
static Input::Type::Record input_type
Definition: linsys_PETSC.hh:46
Definition: edges.h:38
BCField< 3, FieldValue< 3 >::Scalar > bc_flux
void pressure_diff(int i_ele, vector< int > &dofs, unsigned int &ele_type, double &delta, arma::vec &dirichlet)
static Input::Type::Record input_type
void set_from_input(const Input::Record in_rec)
BCField< 3, FieldValue< 3 >::Enum > bc_type
Field< 3, FieldValue< 3 >::Scalar > storativity
Class for declaration of the integral input data.
Definition: type_base.hh:341
#define ADD_FIELD(name,...)
Definition: field_set.hh:260
Basic time management functionality for unsteady (and steady) solvers (class Equation).
Partitioning * get_part()
Definition: mesh.cc:163
unsigned int n_sides()
Definition: mesh.cc:152
Definition: system.hh:75
Basic time management class.
Class for declaration of inputs sequences.
Definition: type_base.hh:230
static arma::vec4 gravity_
void view(const char *name="") const
void modify_system() override
Symmetric Gauss-Legendre quadrature formulae on simplices.
BCField< 3, FieldValue< 3 >::Scalar > bc_pressure
bool is_preallocated()
Definition: linsys.hh:537
Assembly explicit Schur complement for the given linear system. Provides method for resolution of the...
void mat_set_value(int row, int col, double val)
Definition: linsys.hh:351
unsigned int size() const
Returns size of the container. This is independent of the allocated space.
Definition: sys_vector.hh:412
virtual void update_solution()
unsigned int n_elements() const
Definition: mesh.h:137
virtual void get_parallel_solution_vector(Vec &vector)
virtual void modify_system()
#define ASSERT(...)
Definition: global_defs.h:120
Definition: system.hh:75
static FileName input()
Definition: type_base.hh:443
static Input::Type::AbstractRecord input_type
Definition: linsys.hh:92
double * get_solution_array()
Definition: linsys.hh:290
void update(ElementFullIter ele, FieldType &cond_anisothropy, FieldType_Scalar &cross_section, FieldType_Scalar &conductivity)
Definition: mh_fe_values.cc:34
unsigned int edge_idx()
#define ASSERT_EQUAL(a, b)
Definition: global_defs.h:135
Neighbour ** neigh_vb
Definition: elements.h:129
const Vec & get_solution()
Definition: linsys.hh:266
Input::Type::Record type() const
Definition: accessors.cc:228
Definitions of basic Lagrangean finite elements with polynomial shape functions.
Field< 3, FieldValue< 3 >::TensorFixed > anisotropy
unsigned int begin(int proc) const
get starting local index
SideIter side()
Selection & add_value(const int value, const std::string &key, const std::string &description="")
Accessor to the data with type Type::Record.
Definition: accessors.hh:308
FieldCommon & input_selection(const Input::Type::Selection *element_selection)
void modify_system() override
const Ret val(const string &key) const
unsigned int n_sides() const
#define xprintf(...)
Definition: system.hh:104
static std::shared_ptr< FieldAlgorithmBase< 3, FieldValue< 3 >::Scalar > > bc_piezo_head_hook(Input::Record rec, const FieldCommon &field)
const Element * slave_iter() const
bool is_changed_dt() const
#define START_TIMER(tag)
Starts a timer with specified tag.
Class for declaration of polymorphic Record.
Definition: type_record.hh:467
Mesh * mesh_
Definition: equation.hh:227
Element * element()
Definition: boundaries.cc:47
unsigned int index(const T *pointer) const
Definition: sys_vector.hh:394
virtual Value::return_type const & value(const Point &p, const ElementAccessor< spacedim > &elm) const
Definition: field.hh:399
void read_init_condition() override
static string flow_old_bcd_file_key()
Definition: old_bcd.hh:53
unsigned int side_dof(const SideIter side) const
SideIter side(const unsigned int loc_index)
Mixed-hybrid model of linear Darcy flow, possibly unsteady.
void mark_input_times(TimeMark::Type mark_type)
Definition: field_set.hh:201
static Input::Type::Record input_type
unsigned int np() const
get num of processors
double solution_precision() const
AbstractRecord & declare_key(const string &key, const KeyType &type, const Default &default_value, const string &description)
Definition: type_record.cc:426
void set_solution(double *sol_array)
Definition: linsys.hh:274
static FieldBaseEnum flow_type_hook(Input::Record rec, const FieldCommon &field)
Definition: old_bcd.hh:81
double measure() const
Definition: sides.cc:42
bool is_steady() const
Returns true if the time governor is used for steady problem.
virtual const Vec * get_rhs()
Definition: linsys.hh:196
void * xmalloc(size_t size)
Memory allocation with checking.
Definition: system.cc:218
void set_time(const TimeGovernor &time)
Definition: field_set.hh:186
Accessor to the polymorphic input data of a type given by an AbstracRecord object.
Definition: accessors.hh:423
Normal vectors.
virtual PetscErrorCode rhs_zero_entries()
Definition: linsys.hh:257
double * local_matrix()
Definition: mh_fe_values.cc:64
Record & copy_keys(const Record &other)
Definition: type_record.cc:186
unsigned int myp() const
get my processor
Definition: system.hh:75
Support classes for parallel programing.
Field< 3, FieldValue< 3 >::Scalar > conductivity
static Input::Type::AbstractRecord input_type
void set_values(int nrow, int *rows, int ncol, int *cols, PetscScalar *mat_vals, PetscScalar *rhs_vals)
Set values in the system matrix and values in the right-hand side vector on corresponding rows...
Definition: linsys.hh:368
Distribution * el_ds
static Input::Type::Record input_type
Definition: linsys_BDDC.hh:54
void get_partitioning_vector(int *&elem_part, unsigned &lelem_part)
Field< 3, FieldValue< 3 >::Scalar > sigma
double intersection_true_size() const
Definition: intersection.cc:91
virtual void postprocess()
postprocess velocity field (add sources)
Input::Record input_record_
Definition: equation.hh:230
auto disable_where(const Field< spacedim, typename FieldValue< spacedim >::Enum > &control_field, const vector< FieldEnum > &value_list) -> Field &
Definition: field.impl.hh:149
MH_DofHandler mh_dh
ElementFullIter element() const
Definition: side_impl.hh:43
boost::shared_ptr< LocalToGlobalMap > global_row_4_sub_row
double dt() const
arma::vec3 centre() const
Definition: elements.cc:138
Definition: system.hh:75
void reinit(ElementFullIter &cell, unsigned int sid)
Update cell-dependent data (gradients, Jacobians etc.)
Definition: fe_values.cc:313
void assembly_steady_mh_matrix()
std::vector< Edge > edges
Vector of MH edges, this should not be part of the geometrical mesh.
Definition: mesh.h:209
friend class DarcyFlowMHOutput
Distributed sparse graphs, partitioning.
FullIter full_iter(Iter it)
Definition: sys_vector.hh:449
Abstract linear system class.
Mixed-hybrid of steady Darcy flow with sources and variable density.
unsigned int n_neighs_vb
Definition: elements.h:127
Definitions of particular quadrature rules on simplices.
BCField< 3, FieldValue< 3 >::Scalar > bc_robin_sigma
#define END_TIMER(tag)
Ends a timer with specified tag.
static Input::Type::Record input_type
TimeMark::Type mark_type()
Definition: equation.hh:188
arma::vec::fixed< spacedim > centre() const
Definition: accessors.hh:81
void set_mesh_data_for_bddc(LinSys_BDDC *bddc_ls)
Definition: system.hh:75
void set_matrix_changed()
Definition: linsys.hh:205
ElementAccessor< 3 > element_accessor() const
Gets ElementAccessor of this element.
Definition: elements.cc:167
mixed-hybrid model of linear Darcy flow, possibly unsteady.
unsigned int el_idx() const
Definition: side_impl.hh:72
void set_mesh(const Mesh &mesh)
Definition: field_set.hh:157
void read_init_condition() override
void add_sides(const Element *ele, unsigned int shift, vector< int > &dofs, vector< double > &dirichlet)
Record type proxy class.
Definition: type_record.hh:161
virtual void output_data() override
Write computed fields.
Field< 3, FieldValue< 3 >::Scalar > cross_section
Distribution * side_ds
virtual void mat_set_values(int nrow, int *rows, int ncol, int *cols, double *vals)=0
Distribution * edge_ds
unsigned int n_edges() const
Definition: mesh.h:145
static Input::Type::Selection bc_type_selection
#define xfree(p)
Definition: system.hh:112
std::vector< double > solution_
Field< 3, FieldValue< 3 >::Scalar > init_pressure
virtual const Mat * get_matrix()
Definition: linsys.hh:180
double last_dt() const
DarcyFlowMHOutput * output_object
DarcyFlowMH_Steady(Mesh &mesh, const Input::Record in_rec, bool make_tg=true)
CREATE AND FILL GLOBAL MH MATRIX OF THE WATER MODEL.
#define FOR_SIDES(_mesh_, it)
Definition: mesh.h:403
static FieldBaseScalar flow_flux_hook(Input::Record rec, const FieldCommon &field)
Definition: old_bcd.hh:99
void set_from_input(const Input::Record in_rec)
Definition: linsys_BDDC.cc:308
EqData()
Collect all fields.
#define FOR_EDGES(_mesh_, __i)
Definition: mesh.h:398
SideIter side(const unsigned int i) const
Definition: edges.h:43
bool changed() const
Definition: field_set.cc:121
Template for classes storing finite set of named values.
std::vector< unsigned > solver_indices_
virtual int solve()=0
Field< 3, FieldValue< 3 >::Scalar > water_source_density
void prepare_parallel(const Input::AbstractRecord in_rec)
ElementAccessor< 3 > element_accessor()
Definition: boundaries.cc:55
void rhs_set_value(int row, double val)
Definition: linsys.hh:363
static FieldBaseScalar flow_sigma_hook(Input::Record rec, const FieldCommon &field)
Definition: old_bcd.hh:108
Solver based on Multilevel BDDC - using corresponding class of OpenFTL package.
TimeGovernor * time_
Definition: equation.hh:228
ElementVector element
Vector of elements of the mesh.
Definition: mesh.h:198
void output()
Calculate values for output.
Calculates finite element data on a side.
Definition: fe_values.hh:415
unsigned int lsize(int proc) const
get local size
Record & declare_key(const string &key, const KeyType &type, const Default &default_value, const string &description)
Definition: type_record.cc:390