Flow123d
darcy_flow_mh.cc
Go to the documentation of this file.
1 /*!
2  *
3  * Copyright (C) 2007 Technical University of Liberec. All rights reserved.
4  *
5  * Please make a following refer to Flow123d on your project site if you use the program for any purpose,
6  * especially for academic research:
7  * Flow123d, Research Centre: Advanced Remedial Technologies, Technical University of Liberec, Czech Republic
8  *
9  * This program is free software; you can redistribute it and/or modify it under the terms
10  * of the GNU General Public License version 3 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
13  * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14  * See the GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License along with this program; if not,
17  * write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 021110-1307, USA.
18  *
19  *
20  * $Id$
21  * $Revision$
22  * $LastChangedBy$
23  * $LastChangedDate$
24  *
25  *
26  * @file
27  * @ingroup flow
28  * @brief Setup and solve linear system of mixed-hybrid discretization of the linear
29  * porous media flow with possible preferential flow in fractures and chanels.
30  *
31  */
32 
33 #include "petscmat.h"
34 #include "petscviewer.h"
35 #include "petscerror.h"
36 #include <armadillo>
37 #include <boost/foreach.hpp>
38 
39 
40 #include "system/system.hh"
41 
42 #include "mesh/mesh.h"
43 #include "mesh/intersection.hh"
44 #include "mesh/partitioning.hh"
45 #include "la/distribution.hh"
46 #include "la/linsys.hh"
47 #include "la/linsys_PETSC.hh"
48 #include "la/linsys_BDDC.hh"
49 //#include "la/solve.h"
50 #include "la/schur.hh"
51 #include "la/sparse_graph.hh"
53 
54 #include "system/file_path.hh"
55 #include "flow/mh_fe_values.hh"
56 #include "flow/darcy_flow_mh.hh"
57 
59 
60 #include "fem/mapping_p1.hh"
61 #include "fem/fe_p.hh"
62 #include "fem/fe_values.hh"
64 
65 #include <limits>
66 #include <set>
67 #include <vector>
68 #include <iostream>
69 #include <iterator>
70 #include <algorithm>
71 
73 
74 #include "fields/field_base.hh"
75 #include "fields/field.hh"
76 #include "fields/field_values.hh"
77 #include "system/sys_profiler.hh"
78 
79 
80 
81 
82 namespace it = Input::Type;
83 
85  = it::Selection("MH_MortarMethod")
86  .add_value(NoMortar, "None", "Mortar space: P0 on elements of lower dimension.")
87  .add_value(MortarP0, "P0", "Mortar space: P0 on elements of lower dimension.")
88  .add_value(MortarP1, "P1", "Mortar space: P1 on intersections, using non-conforming pressures.");
89 
90 
92  it::Selection("EqData_bc_Type")
93  .add_value(none, "none", "Homogeneous Neoumann BC.")
94  .add_value(dirichlet, "dirichlet")
95  .add_value(neumann, "neumann")
96  .add_value(robin, "robin")
97  .add_value(total_flux, "total_flux");
98 
99 //new input type with FIELDS
101  it::AbstractRecord("DarcyFlowMH", "Mixed-Hybrid solver for saturated Darcy flow.")
102  .declare_key("n_schurs", it::Integer(0,2), it::Default("2"),
103  "Number of Schur complements to perform when solving MH sytem.")
105  "Linear solver for MH problem.")
107  "Parameters of output form MH module.")
108  .declare_key("mortar_method", mh_mortar_selection, it::Default("None"),
109  "Method for coupling Darcy flow between dimensions." );
110 
111 
113  = it::Record("Steady_MH", "Mixed-Hybrid solver for STEADY saturated Darcy flow.")
114  .derive_from(DarcyFlowMH::input_type)
115  .declare_key("input_fields", it::Array(
116  DarcyFlowMH_Steady::EqData().make_field_descriptor_type("DarcyFlowMH")
117  .declare_key("bc_piezo_head", FieldBase< 3, FieldValue<3>::Scalar >::get_input_type(), "Boundary condition for pressure as piezometric head." )
118  .declare_key("init_piezo_head", FieldBase< 3, FieldValue<3>::Scalar >::get_input_type(), "Initial condition for pressure as piezometric head." )
119  .declare_key(OldBcdInput::flow_old_bcd_file_key(), it::FileName::input(), "File with mesh dependent boundary conditions (obsolete).")
120  ), it::Default::obligatory(), "" );
121 
123  = it::Record("Unsteady_MH", "Mixed-Hybrid solver for unsteady saturated Darcy flow.")
124  .derive_from(DarcyFlowMH::input_type)
126  "Time governor setting for the unsteady Darcy flow model.")
127  .copy_keys(DarcyFlowMH_Steady::input_type);
128 
129 
131  = it::Record("Unsteady_LMH", "Lumped Mixed-Hybrid solver for unsteady saturated Darcy flow.")
132  .derive_from(DarcyFlowMH::input_type)
134  "Time governor setting for the unsteady Darcy flow model.")
135  .copy_keys(DarcyFlowMH_Steady::input_type);
136 
137 
138 
139 
140 // gravity vector + constant shift of values
141 arma::vec4 DarcyFlowMH::EqData::gravity_=arma::vec4("0 0 -1 0");
142 
143 
145 {
146  gravity_ = arma::vec4("0 0 -1 0");
147 
148  ADD_FIELD(anisotropy, "Anisotropy of the conductivity tensor.", "1.0" );
149  ADD_FIELD(cross_section, "Complement dimension parameter (cross section for 1D, thickness for 2D).", "1.0" );
150  ADD_FIELD(conductivity, "Isotropic conductivity scalar.", "1.0" );
151  ADD_FIELD(sigma, "Transition coefficient between dimensions.", "1.0");
152  ADD_FIELD(water_source_density, "Water source density.", "0.0");
153 
154  ADD_FIELD(bc_type,"Boundary condition type, possible values:", "\"none\"" );
157 
158  ADD_FIELD(bc_pressure,"Dirichlet BC condition value for pressure.");
161 
162 
163  ADD_FIELD(bc_flux,"Flux in Neumman or Robin boundary condition.");
166 
167  ADD_FIELD(bc_robin_sigma,"Conductivity coefficient in Robin boundary condition.");
170 
171  //these are for unsteady
172  ADD_FIELD(init_pressure, "Initial condition as pressure", "0.0" );
173  ADD_FIELD(storativity,"Storativity.", "1.0" );
174 
175  time_term_fields = this->subset({"storativity"});
176  main_matrix_fields = this->subset({"anisotropy", "conductivity", "cross_section", "sigma", "bc_type", "bc_robin_sigma"});
177  rhs_fields = this->subset({"water_source_density", "bc_pressure", "bc_flux"});
178 
179 }
180 
181 
182 
183 
184 //=============================================================================
185 // CREATE AND FILL GLOBAL MH MATRIX OF THE WATER MODEL
186 // - do it in parallel:
187 // - initial distribution of elements, edges
188 //
189 /*! @brief CREATE AND FILL GLOBAL MH MATRIX OF THE WATER MODEL
190  *
191  * Parameters {Solver,NSchurs} number of performed Schur
192  * complements (0,1,2) for water flow MH-system
193  *
194  */
195 //=============================================================================
196 DarcyFlowMH_Steady::DarcyFlowMH_Steady(Mesh &mesh_in, const Input::Record in_rec, bool make_tg )
197 : DarcyFlowMH(mesh_in, in_rec)
198 
199 {
200  using namespace Input;
201  START_TIMER("Darcy constructor");
202 
203  this->eq_data_ = &data_;
204 
205  //connecting data fields with mesh
206  START_TIMER("data init");
207  data_.set_mesh(mesh_in);
208  data_.set_input_list( in_rec.val<Input::Array>("input_fields") );
209 
210 
211 
212 
213  END_TIMER("data init");
214 
215 
216  size = mesh_->n_elements() + mesh_->n_sides() + mesh_->n_edges();
217  n_schur_compls = in_rec.val<int>("n_schurs");
218 
219  solution = NULL;
220  schur0 = NULL;
221 
222 
223  mortar_method_= in_rec.val<MortarMethod>("mortar_method");
224  if (mortar_method_ != NoMortar) {
226  }
227 
228  mh_dh.reinit(mesh_);
229 
230  prepare_parallel(in_rec.val<AbstractRecord>("solver"));
231 
232  //side_ds->view( std::cout );
233  //el_ds->view( std::cout );
234  //edge_ds->view( std::cout );
235  //rows_ds->view( std::cout );
236 
237 
238 
239 
240  if (make_tg) {
241  // steady time governor
242  time_ = new TimeGovernor();
244  data_.set_limit_side(LimitSide::right);
245  data_.set_time(*time_);
246 
247  output_object = new DarcyFlowMHOutput(this, in_rec.val<Input::Record>("output"));
249 
250  //make_serial_scatter();
251  }
252 
253 
254 }
255 
256 
257 
258 
259 //=============================================================================
260 // COMPOSE and SOLVE WATER MH System possibly through Schur complements
261 //=============================================================================
263  START_TIMER("Solving MH system");
264  F_ENTRY;
265 
266 
267 
268  if (time_->is_end()) return;
269 
270  if (! time_->is_steady()) time_->next_time();
271 
272 
273 
275  int convergedReason = schur0->solve();
276 
277  xprintf(MsgLog, "Linear solver ended with reason: %d \n", convergedReason );
278  ASSERT( convergedReason >= 0, "Linear solver failed to converge. Convergence reason %d \n", convergedReason );
279 
280  this -> postprocess();
281 
283 
284  output_data();
285 
286  if (time_->is_steady()) time_->next_time();
287 }
288 
290 {
291  START_TIMER("postprocess");
292  int side_rows[4];
293  double values[4];
295 
296  // modify side fluxes in parallel
297  // for every local edge take time term on digonal and add it to the corresponding flux
298  /*
299  for (unsigned int i_loc = 0; i_loc < el_ds->lsize(); i_loc++) {
300  ele = mesh_->element(el_4_loc[i_loc]);
301  FOR_ELEMENT_SIDES(ele,i) {
302  side_rows[i] = side_row_4_id[ mh_dh.side_dof( ele->side(i) ) ];
303  values[i] = -1.0 * ele->measure() *
304  data.cross_section.value(ele->centre(), ele->element_accessor()) *
305  data.water_source_density.value(ele->centre(), ele->element_accessor()) /
306  ele->n_sides();
307  }
308  VecSetValues(schur0->get_solution(), ele->n_sides(), side_rows, values, ADD_VALUES);
309  }
310  VecAssemblyBegin(schur0->get_solution());
311  VecAssemblyEnd(schur0->get_solution());
312  */
313 }
314 
315 
317  time_->view("DARCY"); //time governor information output
318  this->output_object->output();
319 }
320 
322 {
323  return schur0->get_solution_precision();
324 }
325 
326 
327 void DarcyFlowMH_Steady::get_solution_vector(double * &vec, unsigned int &vec_size)
328 {
329  // TODO: make class for vectors (wrapper for PETSC or other) derived from LazyDependency
330  // and use its mechanism to manage dependency between vectors
332  // // scatter solution to all procs
333  // VecScatterBegin(par_to_all, schur0->get_solution(), sol_vec,
334  // INSERT_VALUES, SCATTER_FORWARD);
335  // VecScatterEnd(par_to_all, schur0->get_solution(), sol_vec,
336  // INSERT_VALUES, SCATTER_FORWARD);
337  // solution_changed_for_scatter=false;
338 
339  std::vector<double> sol_disordered(this->size);
340  schur0 -> get_whole_solution( sol_disordered );
341 
342  // reorder solution to application ordering
343  if ( solution_.empty() ) solution_.resize( this->size, 0. );
344  for ( int i = 0; i < this->size; i++ ) {
345  solution_[i] = sol_disordered[solver_indices_[i]];
346  }
347  }
348 
349  vec=&(solution_[0]);
350  vec_size = solution_.size();
351  ASSERT(vec != NULL, "Requested solution is not allocated!\n");
352 }
353 
354 void DarcyFlowMH_Steady::get_partitioning_vector(int * &elem_part, unsigned &lelem_part)
355 {
356 
357 // elem_part=&(element_part[0]);
358 // lelem_part = element_part.size();
359 // ASSERT(elem_part != NULL, "Requested vector is not allocated!\n");
360 }
361 
363 {
364  vec=schur0->get_solution();
365  ASSERT(vec != NULL, "Requested solution is not allocated!\n");
366 }
367 
368 
369 
370 // ===========================================================================================
371 //
372 // MATRIX ASSEMBLY - we use abstract assembly routine, where LS Mat/Vec SetValues
373 // are in fact pointers to allocating or filling functions - this is governed by Linsystem roitunes
374 //
375 // =======================================================================================
376 
377 
378 // ******************************************
379 // ABSTRACT ASSEMBLY OF MH matrix
380 // TODO: matice by se mela sestavovat zvlast pro kazdou dimenzi (objem, pukliny, pruseciky puklin)
381 // konekce by se mely sestavovat cyklem pres konekce, konekce by mely byt paralelizovany podle
382 // distribuce elementu nizssi dimenze
383 // k tomuto je treba nejdriv spojit s JK verzi, aby se vedelo co se deje v transportu a
384 // predelat mesh a neigbouring
385 // *****************************************
387  LinSys *ls = schur0;
389  MHFEValues fe_values;
390 
391  // We use FESideValues for calculating normal vectors.
392  // For initialization of FESideValues some auxiliary objects are needed.
393  MappingP1<1,3> map1;
394  MappingP1<2,3> map2;
395  MappingP1<3,3> map3;
396  QGauss<0> q0(1);
397  QGauss<1> q1(1);
398  QGauss<2> q2(1);
399  FE_P_disc<1,1,3> fe1;
400  FE_P_disc<0,2,3> fe2;
401  FE_P_disc<0,3,3> fe3;
402  FESideValues<1,3> fe_side_values1(map1, q0, fe1, update_normal_vectors);
403  FESideValues<2,3> fe_side_values2(map2, q1, fe2, update_normal_vectors);
404  FESideValues<3,3> fe_side_values3(map3, q2, fe3, update_normal_vectors);
405 
406  class Boundary *bcd;
407  class Neighbour *ngh;
408 
409  bool fill_matrix = schur0->is_preallocated();
410  //DBGMSG("fill_matrix: %d\n", fill_matrix);
411  int el_row, side_row, edge_row;
412  int tmp_rows[100];
413  //int nsides;
414  int side_rows[4], edge_rows[4]; // rows for sides and edges of one element
415  double local_vb[4]; // 2x2 matrix
416 
417  // to make space for second schur complement, max. 10 neighbour edges of one el.
418  double zeros[1000];
419  for(int i=0; i<1000; i++) zeros[i]=0.0;
420 
421  double minus_ones[4] = { -1.0, -1.0, -1.0, -1.0 };
422  double loc_side_rhs[4];
423  F_ENTRY;
424 
425 
426  for (unsigned int i_loc = 0; i_loc < el_ds->lsize(); i_loc++) {
427 
428  ele = mesh_->element(el_4_loc[i_loc]);
429  el_row = row_4_el[el_4_loc[i_loc]];
430  unsigned int nsides = ele->n_sides();
431  if (fill_matrix) fe_values.update(ele, data_.anisotropy, data_.cross_section, data_.conductivity);
432  double cross_section = data_.cross_section.value(ele->centre(), ele->element_accessor());
433 
434  for (unsigned int i = 0; i < nsides; i++) {
435  side_row = side_rows[i] = side_row_4_id[ mh_dh.side_dof( ele->side(i) ) ];
436  edge_row = edge_rows[i] = row_4_edge[ele->side(i)->edge_idx()];
437  bcd=ele->side(i)->cond();
438 
439  // gravity term on RHS
440  loc_side_rhs[i] = (ele->centre()[ 2 ] - ele->side(i)->centre()[ 2 ]);
441 
442  // set block C and C': side-edge, edge-side
443  double c_val = 1.0;
444 
445  if (bcd) {
446  ElementAccessor<3> b_ele = bcd->element_accessor();
447  EqData::BC_Type type = (EqData::BC_Type)data_.bc_type.value(b_ele.centre(), b_ele);
448  //DBGMSG("BCD id: %d sidx: %d type: %d\n", ele->id(), i, type);
449  if ( type == EqData::none) {
450  // homogeneous neumann
451  } else if ( type == EqData::dirichlet ) {
452  c_val = 0.0;
453  double bc_pressure = data_.bc_pressure.value(b_ele.centre(), b_ele);
454  loc_side_rhs[i] -= bc_pressure;
455  ls->rhs_set_value(edge_row, -bc_pressure);
456  ls->mat_set_value(edge_row, edge_row, -1.0);
457 
458  } else if ( type == EqData::neumann) {
459  double bc_flux = data_.bc_flux.value(b_ele.centre(), b_ele);
460  ls->rhs_set_value(edge_row, bc_flux * bcd->element()->measure() * cross_section);
461  //DBGMSG("neumann edge_row, ele_index,el_idx: %d \t %d \t %d\n", edge_row, ele->index(), ele->side(i)->el_idx());
462 
463  } else if ( type == EqData::robin) {
464  double bc_pressure = data_.bc_pressure.value(b_ele.centre(), b_ele);
465  double bc_sigma = data_.bc_robin_sigma.value(b_ele.centre(), b_ele);
466  ls->rhs_set_value(edge_row, -bcd->element()->measure() * bc_sigma * bc_pressure * cross_section );
467  ls->mat_set_value(edge_row, edge_row, -bcd->element()->measure() * bc_sigma * cross_section );
468 
469  } else {
470  xprintf(UsrErr, "BC type not supported.\n");
471  }
472  }
473  ls->mat_set_value(side_row, edge_row, c_val);
474  ls->mat_set_value(edge_row, side_row, c_val);
475 
476  // assemble matrix for weights in BDDCML
477  // approximation to diagonal of
478  // S = -C - B*inv(A)*B'
479  // as
480  // diag(S) ~ - diag(C) - 1./diag(A)
481  // the weights form a partition of unity to average a discontinuous solution from neighbouring subdomains
482  // to a continuous one
483  // it is important to scale the effect - if conductivity is low for one subdomain and high for the other,
484  // trust more the one with low conductivity - it will be closer to the truth than an arithmetic average
485  if ( typeid(*ls) == typeid(LinSys_BDDC) ) {
486  double val_side = (fe_values.local_matrix())[i*nsides+i];
487  double val_edge = -1./ (fe_values.local_matrix())[i*nsides+i];
488 
489  static_cast<LinSys_BDDC*>(ls)->diagonal_weights_set_value( side_row, val_side );
490  static_cast<LinSys_BDDC*>(ls)->diagonal_weights_set_value( edge_row, val_edge );
491  }
492  }
493 
494  ls->rhs_set_values(nsides, side_rows, loc_side_rhs);
495 
496 
497  // set block A: side-side on one element - block diagonal matrix
498 
499  //std::cout << "subMat in flow" << std::endl;
500  //for ( unsigned i = 0; i < nsides; i++) {
501  // for ( unsigned j = 0; j < nsides; j++) {
502  // std::cout << fe_values.local_matrix()[i*nsides+j] << " ";
503  // }
504  // std::cout << std::endl;
505  //}
506 
507  ls->mat_set_values(nsides, side_rows, nsides, side_rows, fe_values.local_matrix() );
508  // set block B, B': element-side, side-element
509  ls->mat_set_values(1, &el_row, nsides, side_rows, minus_ones);
510  ls->mat_set_values(nsides, side_rows, 1, &el_row, minus_ones);
511 
512 
513  // set sources
514  ls->rhs_set_value(el_row, -1.0 * ele->measure() *
515  data_.cross_section.value(ele->centre(), ele->element_accessor()) *
516  data_.water_source_density.value(ele->centre(), ele->element_accessor()) );
517 
518 
519  // D block: non-compatible conections and diagonal: element-element
520 
521  ls->mat_set_value(el_row, el_row, 0.0); // maybe this should be in virtual block for schur preallocation
522 
523  if ( typeid(*ls) == typeid(LinSys_BDDC) ) {
524  double val_ele = 1.;
525  static_cast<LinSys_BDDC*>(ls)->diagonal_weights_set_value( el_row, val_ele );
526  }
527 
528  // D, E',E block: compatible connections: element-edge
529 
530  for (unsigned int i = 0; i < ele->n_neighs_vb; i++) {
531  // every compatible connection adds a 2x2 matrix involving
532  // current element pressure and a connected edge pressure
533  ngh= ele->neigh_vb[i];
534  tmp_rows[0]=el_row;
535  tmp_rows[1]=row_4_edge[ ngh->edge_idx() ];
536 
537  // compute normal vector to side
538  arma::vec3 nv;
539  ElementFullIter ele_higher = mesh_->element.full_iter(ngh->side()->element());
540  switch (ele_higher->dim()) {
541  case 1:
542  fe_side_values1.reinit(ele_higher, ngh->side()->el_idx());
543  nv = fe_side_values1.normal_vector(0);
544  break;
545  case 2:
546  fe_side_values2.reinit(ele_higher, ngh->side()->el_idx());
547  nv = fe_side_values2.normal_vector(0);
548  break;
549  case 3:
550  fe_side_values3.reinit(ele_higher, ngh->side()->el_idx());
551  nv = fe_side_values3.normal_vector(0);
552  break;
553  }
554 
555  double value = data_.sigma.value( ele->centre(), ele->element_accessor()) *
556  2*data_.conductivity.value( ele->centre(), ele->element_accessor()) *
557  arma::dot(data_.anisotropy.value( ele->centre(), ele->element_accessor())*nv, nv) *
558  data_.cross_section.value( ngh->side()->centre(), ele_higher->element_accessor() ) * // cross-section of higher dim. (2d)
559  data_.cross_section.value( ngh->side()->centre(), ele_higher->element_accessor() ) /
560  data_.cross_section.value( ele->centre(), ele->element_accessor() ) * // crossection of lower dim.
561  ngh->side()->measure();
562 
563 
564  local_vb[0] = -value; local_vb[1] = value;
565  local_vb[2] = value; local_vb[3] = -value;
566 
567  ls->mat_set_values(2, tmp_rows, 2, tmp_rows, local_vb);
568 
569  // update matrix for weights in BDDCML
570  if ( typeid(*ls) == typeid(LinSys_BDDC) ) {
571  int ind = tmp_rows[1];
572  // there is -value on diagonal in block C!
573  double new_val = - value;
574  static_cast<LinSys_BDDC*>(ls)->diagonal_weights_set_value( ind, new_val );
575  }
576 
577  if (n_schur_compls == 2) {
578  // for 2. Schur: N dim edge is conected with N dim element =>
579  // there are nz between N dim edge and N-1 dim edges of the element
580 
581  ls->mat_set_values(nsides, edge_rows, 1, tmp_rows+1, zeros);
582  ls->mat_set_values(1, tmp_rows+1, nsides, edge_rows, zeros);
583 
584  // save all global edge indices to higher positions
585  tmp_rows[2+i] = tmp_rows[1];
586  }
587  }
588 
589 
590  // add virtual values for schur complement allocation
591  switch (n_schur_compls) {
592  case 2:
593  // Connections between edges of N+1 dim. elements neighboring with actual N dim element 'ele'
594  ASSERT(ele->n_neighs_vb*ele->n_neighs_vb<1000, "Too many values in E block.");
595  ls->mat_set_values(ele->n_neighs_vb, tmp_rows+2,
596  ele->n_neighs_vb, tmp_rows+2, zeros);
597 
598  // ls->mat_set_values(nsides, edge_rows, ele->e_row_count, tmp_rows, zeros);
599  // ls->mat_set_values(ele->e_row_count, tmp_rows, nsides, edge_rows, zeros);
600  case 1: // included also for case 2
601  // -(C')*(A-)*B block and its transpose conect edge with its elements
602  ls->mat_set_values(1, &el_row, nsides, edge_rows, zeros);
603  ls->mat_set_values(nsides, edge_rows, 1, &el_row, zeros);
604  // -(C')*(A-)*C block conect all edges of every element
605  ls->mat_set_values(nsides, edge_rows, nsides, edge_rows, zeros);
606  }
607  }
608 
609  //if (! mtx->ins_mod == ALLOCATE ) {
610  // MatAssemblyBegin(mtx->A,MAT_FINAL_ASSEMBLY);
611  // MatAssemblyEnd(mtx->A,MAT_FINAL_ASSEMBLY);
612  // }
613  // set block F - diagonal: edge-edge from Newton BC
614  // also Dirichlet BC
615  /*
616  for (i_loc = 0; i_loc < edge_ds->lsize(); i_loc++) {
617  edge_row = row_4_edge[edge_4_loc[i_loc]];
618  EdgeFullIter edg = mesh_->edge(edge_4_loc[i_loc]);
619 
620  //xprintf(Msg,"F: %d %f\n",old_4_new[edge_row],edg->f_val);
621  //ls->mat_set_value(edge_row, edge_row, edg->f_val);
622  //ls->rhs_set_value(edge_row, edg->f_rhs);
623  }*/
624 
625 
626  if (mortar_method_ == MortarP0) {
627  P0_CouplingAssembler(*this).assembly(*ls);
628  } else if (mortar_method_ == MortarP1) {
629  P1_CouplingAssembler(*this).assembly(*ls);
630  }
631 }
632 
634  vector<int> &dofs, unsigned int &ele_type, double &delta, arma::vec &dirichlet) {
635 
636  const Element *ele;
637 
638  if (i_ele == ml_it_->size() ) { // master element .. 1D
639  ele_type = 0;
640  delta = -delta_0;
641  ele=master_;
642  } else {
643  ele_type = 1;
644  const Intersection &isect=intersections_[ (*ml_it_)[i_ele] ];
645  delta = isect.intersection_true_size();
646  ele = isect.slave_iter();
647  }
648 
649  dofs.resize(ele->n_sides());
650 
651  for(unsigned int i_side=0; i_side < ele->n_sides(); i_side++ ) {
652  dofs[i_side]=darcy_.row_4_edge[ele->side(i_side)->edge_idx()];
653  Boundary * bcd = ele->side(i_side)->cond();
654  if (bcd) {
655  dirichlet.resize(ele->n_sides());
656  ElementAccessor<3> b_ele = bcd->element_accessor();
657  DarcyFlowMH::EqData::BC_Type type = (DarcyFlowMH::EqData::BC_Type)darcy_.data_.bc_type.value(b_ele.centre(), b_ele);
658  //DBGMSG("bcd id: %d sidx: %d type: %d\n", ele->id(), i_side, type);
659  if (type == DarcyFlowMH::EqData::dirichlet) {
660  //DBGMSG("Dirichlet: %d\n", ele->index());
661  dofs[i_side] = -dofs[i_side];
662  double bc_pressure = darcy_.data_.bc_pressure.value(b_ele.centre(), b_ele);
663  dirichlet[i_side] = bc_pressure;
664  }
665  }
666  }
667 
668 }
669 
670 /**
671  * Works well but there is large error next to the boundary.
672  */
674  double delta_i, delta_j;
675  arma::mat product;
676  arma::vec dirichlet_i, dirichlet_j;
677  unsigned int ele_type_i, ele_type_j;
678 
679  unsigned int i,j;
680  vector<int> dofs_i,dofs_j;
681 
682  for(ml_it_ = master_list_.begin(); ml_it_ != master_list_.end(); ++ml_it_) {
683 
684  if (ml_it_->size() == 0) continue; // skip empty masters
685 
686 
687  // on the intersection element we consider
688  // * intersection dofs for master and slave
689  // those are dofs of the space into which we interpolate
690  // base functions from individual master and slave elements
691  // For the master dofs both are usualy eqivalent.
692  // * original dofs - for master same as intersection dofs, for slave
693  // all dofs of slave elements
694 
695  // form list of intersection dofs, in this case pressures in barycenters
696  // but we do not use those form MH system in order to allow second schur somplement. We rather map these
697  // dofs to pressure traces, i.e. we use averages of traces as barycentric values.
698 
699 
700  master_ = intersections_[ml_it_->front()].master_iter();
701  delta_0 = master_->measure();
702 
703  double master_sigma=darcy_.data_.sigma.value( master_->centre(), master_->element_accessor());
704 
705  // rows
706  for(i = 0; i <= ml_it_->size(); ++i) {
707  //cout << "Intersection:" << master_->index() << ", "
708  // << intersections_[ (*ml_it_)[i] ].slave_iter()->index();
709  pressure_diff(i, dofs_i, ele_type_i, delta_i, dirichlet_i);
710  //columns
711  for (j = 0; j <= ml_it_->size(); ++j) {
712  pressure_diff(j, dofs_j, ele_type_j, delta_j, dirichlet_j);
713 
714  double scale = -master_sigma * delta_i * delta_j / delta_0;
715  product = scale * tensor_average[ele_type_i][ele_type_j];
716 
717 
718  //cout << product << endl;
719 
720 
721  arma::vec rhs(dofs_i.size());
722  rhs.zeros();
723  ls.set_values( dofs_i, dofs_j, product, rhs, dirichlet_i, dirichlet_j);
724  }
725  }
726  } // loop over master elements
727 }
728 
729 
730 
731  void P1_CouplingAssembler::add_sides(const Element * ele, unsigned int shift, vector<int> &dofs, vector<double> &dirichlet)
732  {
733 
734  for(unsigned int i_side=0; i_side < ele->n_sides(); i_side++ ) {
735  dofs[shift+i_side] = darcy_.row_4_edge[ele->side(i_side)->edge_idx()];
736  Boundary * bcd = ele->side(i_side)->cond();
737 
738  if (bcd) {
739  ElementAccessor<3> b_ele = bcd->element_accessor();
740  DarcyFlowMH::EqData::BC_Type type = (DarcyFlowMH::EqData::BC_Type)darcy_.data_.bc_type.value(b_ele.centre(), b_ele);
741  //DBGMSG("bcd id: %d sidx: %d type: %d\n", ele->id(), i_side, type);
742  if (type == DarcyFlowMH::EqData::dirichlet) {
743  //DBGMSG("Dirichlet: %d\n", ele->index());
744  dofs[shift + i_side] = -dofs[shift + i_side];
745  double bc_pressure = darcy_.data_.bc_pressure.value(b_ele.centre(), b_ele);
746  dirichlet[shift + i_side] = bc_pressure;
747  }
748  }
749  }
750  }
751 
752 
753 /**
754  * P1 coonection of different dimensions
755  * - demonstrated convergence, but still major open questions:
756  * ? in all test cases the error on the fracture is less on the left and greater on the right
757  * with incresing trend
758  * tried:
759  * - changed order of 1d elements (no change)
760  * - higher precision of ngh output and linear solver (no change)
761  * ? seems that there should be some factor 6.0 in the communication term
762  * ? in the case of infinite k2 -> simplest 1d-constant communication, the biggest difference on borders,
763  * numerical solution greater then analytical
764  *
765  * TODO:
766  * * full implementation of Dirichlet BC ( also for 2d sides)
767  */
768 
770 
771  for (const Intersection &intersec : intersections_) {
772  const Element * master = intersec.master_iter();
773  const Element * slave = intersec.slave_iter();
774 
775  add_sides(master, 0, dofs, dirichlet);
776  add_sides(slave, 2, dofs, dirichlet);
777  double master_sigma=darcy_.data_.sigma.value( master->centre(), master->element_accessor());
778 
779 /*
780  * Local coordinates on 1D
781  * t0
782  * node 0: 0.0
783  * node 1: 1.0
784  *
785  * base fce points
786  * t0 = 0.0 on side 0 node 0
787  * t0 = 1.0 on side 1 node 1
788  *
789  * Local coordinates on 2D
790  * t0 t1
791  * node 0: 0.0 0.0
792  * node 1: 1.0 0.0
793  * node 2: 0.0 1.0
794  *
795  * base fce points
796  * t0=0.5, t1=0.0 on side 0 nodes (0,1)
797  * t0=0.5, t1=0.5 on side 1 nodes (1,2)
798  * t0=0.0, t1=0.5 on side 2 nodes (2,0)
799  */
800 
801 
802 
803  arma::vec point_Y(1);
804  point_Y.fill(1.0);
805  arma::vec point_2D_Y(intersec.map_to_slave(point_Y)); // local coordinates of Y on slave
806  arma::vec point_1D_Y(intersec.map_to_master(point_Y)); // local coordinates of Y on master
807 
808  arma::vec point_X(1);
809  point_X.fill(0.0);
810  arma::vec point_2D_X(intersec.map_to_slave(point_X)); // local coordinates of X on slave
811  arma::vec point_1D_X(intersec.map_to_master(point_X)); // local coordinates of X on master
812 
813  arma::mat base_2D(3, 3);
814  // base fce = a0 + a1*t0 + a2*t1
815  // a0 a1 a2
816  base_2D << 1.0 << 0.0 << -2.0 << arma::endr //point on side 0
817  << -1.0 << 2.0 << 2.0 << arma::endr // point on side 1
818  << 1.0 << -2.0 << 0.0 << arma::endr;// point on side 2
819 
820  arma::mat base_1D(2, 2);
821  // base fce = a0 + a1 * t0
822  // a0 a1
823  base_1D << 1.0 << -1.0 << arma::endr // point on side 0,
824  << 0.0 << 1.0 << arma::endr; // point on side 1,
825 
826 
827  //base_1D.print("base_1D: ");
828  //base_2D.print("base_2D: ");
829  //point_2D_X.print("point_2D_X: ");
830  //point_2D_Y.print("point_2D_Y: ");
831 
832  arma::vec difference_in_Y(5);
833  arma::vec difference_in_X(5);
834 
835  // slave sides 0,1,2
836  difference_in_Y.subvec(0, 2) = -base_2D * point_2D_Y;
837  difference_in_X.subvec(0, 2) = -base_2D * point_2D_X;
838  // master sides 3,4
839  difference_in_Y.subvec(3, 4) = base_1D * point_1D_Y;
840  difference_in_X.subvec(3, 4) = base_1D * point_1D_X;
841 
842  //difference_in_X.print("difference_in_X: ");
843  //difference_in_Y.print("difference_in_Y: ");
844 
845  //prvky matice A[i,j]
846  arma::mat A(5, 5);
847  for (int i = 0; i < 5; ++i) {
848  for (int j = 0; j < 5; ++j) {
849  A(i, j) = -master_sigma * intersec.intersection_true_size() *
850  ( difference_in_Y[i] * difference_in_Y[j]
851  + difference_in_Y[i] * difference_in_X[j]/2
852  + difference_in_X[i] * difference_in_Y[j]/2
853  + difference_in_X[i] * difference_in_X[j]
854  ) * (1.0 / 3.0);
855 
856  }
857  }
858  //for(int i=0;i<5;i++) DBGMSG("idx %d: %d\n",i, dofs[i]);
859  //A.print("A:");
860 
861 
862  ls.set_values( dofs, dofs, A, rhs, dirichlet, dirichlet);
863 
864  }
865 }
866 
867 
868 
869 /*******************************************************************************
870  * COMPOSE WATER MH MATRIX WITHOUT SCHUR COMPLEMENT
871  ******************************************************************************/
872 
874 
875  START_TIMER("preallocation");
876  int i_loc, el_row;
877  Element *ele;
878  Vec aux;
879  PetscErrorCode err;
880 
881  F_ENTRY;
882 
883  //xprintf(Msg,"****************** problem statistics \n");
884  //xprintf(Msg,"edges: %d \n",mesh_->n_edges());
885  //xprintf(Msg,"sides: %d \n",mesh_->n_sides());
886  //xprintf(Msg,"elements: %d \n",mesh_->n_elements());
887  //xprintf(Msg,"************************************* \n");
888  //xprintf(Msg,"problem size: %d \n",this->size);
889  //xprintf(Msg,"****************** problem statistics \n");
890 
891  auto in_rec = this->input_record_.val<Input::AbstractRecord>("solver");
892 
893  if (schur0 == NULL) { // create Linear System for MH matrix
894 
895  if (in_rec.type() == LinSys_BDDC::input_type) {
896 #ifdef HAVE_BDDCML
897  xprintf(Warn, "For BDDC is using no Schur complements.");
898  n_schur_compls = 0;
899  LinSys_BDDC *ls = new LinSys_BDDC(global_row_4_sub_row->size(), &(*rows_ds),
900  3, // 3 == la::BddcmlWrapper::SPD_VIA_SYMMETRICGENERAL
901  1, // 1 == number of subdomains per process
902  true); // swap signs of matrix and rhs to make the matrix SPD
903  ls->set_from_input(in_rec);
904  ls->set_solution( NULL );
905  // possible initialization particular to BDDC
906  START_TIMER("BDDC set mesh data");
908  schur0=ls;
909  END_TIMER("BDDC set mesh data");
910 #else
911  xprintf(Err, "Flow123d was not build with BDDCML support.\n");
912 #endif
913  }
914  else if (in_rec.type() == LinSys_PETSC::input_type) {
915  // use PETSC for serial case even when user wants BDDC
916  if (n_schur_compls > 2) {
917  xprintf(Warn, "Invalid number of Schur Complements. Using 2.");
918  n_schur_compls = 2;
919  }
920 
921  LinSys_PETSC *schur1, *schur2;
922 
923  if (n_schur_compls == 0) {
924  LinSys_PETSC *ls = new LinSys_PETSC( &(*rows_ds) );
925 
926  // temporary solution; we have to set precision also for sequantial case of BDDC
927  // final solution should be probably call of direct solver for oneproc case
928  if (in_rec.type() != LinSys_BDDC::input_type) ls->set_from_input(in_rec);
929  else {
930  ls->LinSys::set_from_input(in_rec); // get only common options
931  }
932 
933  ls->set_solution( NULL );
934  schur0=ls;
935  } else {
936  IS is;
937  err = ISCreateStride(PETSC_COMM_WORLD, side_ds->lsize(), rows_ds->begin(), 1, &is);
938  ASSERT(err == 0,"Error in ISCreateStride.");
939 
940  SchurComplement *ls = new SchurComplement(is, &(*rows_ds));
941  ls->set_from_input(in_rec);
942  ls->set_solution( NULL );
943  ls->set_positive_definite();
944 
945  // make schur1
947  if (n_schur_compls==1) {
948  schur1 = new LinSys_PETSC(ds);
949  } else {
950  IS is;
951  err = ISCreateStride(PETSC_COMM_WORLD, el_ds->lsize(), ls->get_distribution()->begin(), 1, &is);
952  ASSERT(err == 0,"Error in ISCreateStride.");
953  SchurComplement *ls1 = new SchurComplement(is, ds); // is is deallocated by SchurComplement
954  ls1->set_negative_definite();
955 
956  // make schur2
957  schur2 = new LinSys_PETSC( ls1->make_complement_distribution() );
958  ls1->set_complement( schur2 );
959  schur1 = ls1;
960  }
961  ls->set_complement( schur1 );
962  schur0=ls;
963  }
964 
965  START_TIMER("PETSC PREALLOCATION");
968  assembly_steady_mh_matrix(); // preallocation
969  VecZeroEntries(schur0->get_solution());
970  END_TIMER("PETSC PREALLOCATION");
971  }
972  else {
973  xprintf(Err, "Unknown solver type. Internal error.\n");
974  }
975  }
976 
977 
978  END_TIMER("preallocation");
979 }
980 
981 
982 
983 
985 
986  data_.set_time(*time_);
987  DBGMSG("Assembly linear system\n");
988  if (data_.changed()) {
989  DBGMSG(" Data changed\n");
990  // currently we have no optimization for cases when just time term data or RHS data are changed
991  START_TIMER("full assembly");
992  if (typeid(*schur0) != typeid(LinSys_BDDC)) {
993  schur0->start_add_assembly(); // finish allocation and create matrix
994  }
997  assembly_steady_mh_matrix(); // fill matrix
1000 
1001  if (!time_->is_steady()) {
1002  DBGMSG(" setup time term\n");
1003  // assembly time term and rhs
1004  setup_time_term();
1005  modify_system();
1006  }
1007  END_TIMER("full assembly");
1008  } else {
1009  START_TIMER("modify system");
1010  if (!time_->is_steady()) {
1011  modify_system();
1012  } else {
1013  xprintf(PrgErr, "Planned computation time for steady solver, but data are not changed.\n");
1014  }
1015  END_TIMER("modiffy system");
1016  }
1017 
1018 
1019  //schur0->view_local_matrix();
1020  //PetscViewer myViewer;
1021  //PetscViewerASCIIOpen(PETSC_COMM_WORLD,"matis.m",&myViewer);
1022  //PetscViewerSetFormat(myViewer,PETSC_VIEWER_ASCII_MATLAB);
1023 
1024  //MatView( schur0->get_matrix(),PETSC_VIEWER_STDOUT_WORLD );
1025  //DBGMSG("RHS\n");
1026  //VecView(schur0->get_rhs(), PETSC_VIEWER_STDOUT_WORLD);
1027  //VecView(schur0->get_solution(), PETSC_VIEWER_STDOUT_WORLD);
1028 
1029  //PetscViewerDestroy(myViewer);
1030 
1031 }
1032 
1033 
1034 
1036  // prepare mesh for BDDCML
1037  // initialize arrays
1038  // auxiliary map for creating coordinates of local dofs and global-to-local numbering
1039  std::map<int,arma::vec3> localDofMap;
1040  // connectivity for the subdomain, i.e. global dof numbers on element, stored element-by-element
1041  // Indices of Nodes on Elements
1042  std::vector<int> inet;
1043  // number of degrees of freedom on elements - determines elementwise chunks of INET array
1044  // Number of Nodes on Elements
1045  std::vector<int> nnet;
1046  // Indices of Subdomain Elements in Global Numbering - for local elements, their global indices
1047  std::vector<int> isegn;
1048 
1049  // This array is currently not used in BDDCML, it was used as an interface scaling alternative to scaling
1050  // by diagonal. It corresponds to the rho-scaling.
1051  std::vector<double> element_permeability;
1052 
1053  // maximal and minimal dimension of elements
1054  int elDimMax = 1;
1055  int elDimMin = 3;
1056  for ( int i_loc = 0; i_loc < el_ds->lsize(); i_loc++ ) {
1057  // for each element, create local numbering of dofs as fluxes (sides), pressure (element centre), Lagrange multipliers (edges), compatible connections
1058  ElementFullIter el = mesh_->element(el_4_loc[i_loc]);
1059  int e_idx = el.index();
1060 
1061  int elDim = el->dim();
1062  elDimMax = std::max( elDimMax, elDim );
1063  elDimMin = std::min( elDimMin, elDim );
1064 
1065  isegn.push_back( e_idx );
1066  int nne = 0;
1067 
1068  FOR_ELEMENT_SIDES(el,si) {
1069  // insert local side dof
1070  int side_row = side_row_4_id[ mh_dh.side_dof( el->side(si) ) ];
1071  arma::vec3 coord = el->side(si)->centre();
1072 
1073  localDofMap.insert( std::make_pair( side_row, coord ) );
1074  inet.push_back( side_row );
1075  nne++;
1076  }
1077 
1078  // insert local pressure dof
1079  int el_row = row_4_el[ el_4_loc[i_loc] ];
1080  arma::vec3 coord = el->centre();
1081  localDofMap.insert( std::make_pair( el_row, coord ) );
1082  inet.push_back( el_row );
1083  nne++;
1084 
1085  FOR_ELEMENT_SIDES(el,si) {
1086  Edge *edg=el->side(si)->edge();
1087 
1088  // insert local edge dof
1089  int edge_row = row_4_edge[ el->side(si)->edge_idx() ];
1090  arma::vec3 coord = el->side(si)->centre();
1091 
1092  localDofMap.insert( std::make_pair( edge_row, coord ) );
1093  inet.push_back( edge_row );
1094  nne++;
1095  }
1096 
1097  // insert dofs related to compatible connections
1098  for ( int i_neigh = 0; i_neigh < el->n_neighs_vb; i_neigh++) {
1099  int edge_row = row_4_edge[ el->neigh_vb[i_neigh]->edge_idx() ];
1100  arma::vec3 coord = el->neigh_vb[i_neigh]->edge()->side(0)->centre();
1101 
1102  localDofMap.insert( std::make_pair( edge_row, coord ) );
1103  inet.push_back( edge_row );
1104  nne++;
1105  }
1106 
1107  nnet.push_back( nne );
1108 
1109  // version for rho scaling
1110  // trace computation
1111  arma::vec3 centre = el->centre();
1112  double conduct = data_.conductivity.value( centre , el->element_accessor() );
1113  double cs = data_.cross_section.value( centre, el->element_accessor() );
1114  arma::mat33 aniso = data_.anisotropy.value( centre, el->element_accessor() );
1115 
1116  // compute mean on the diagonal
1117  double coef = 0.;
1118  for ( int i = 0; i < 3; i++) {
1119  coef = coef + aniso.at(i,i);
1120  }
1121  // Maybe divide by cs
1122  coef = conduct*coef / 3;
1123 
1124  ASSERT( coef > 0.,
1125  "Zero coefficient of hydrodynamic resistance %f . \n ", coef );
1126  element_permeability.push_back( 1. / coef );
1127  }
1128  //convert set of dofs to vectors
1129  // number of nodes (= dofs) on the subdomain
1130  int numNodeSub = localDofMap.size();
1131  ASSERT_EQUAL( numNodeSub, global_row_4_sub_row->size() );
1132  // Indices of Subdomain Nodes in Global Numbering - for local nodes, their global indices
1133  std::vector<int> isngn( numNodeSub );
1134  // pseudo-coordinates of local nodes (i.e. dofs)
1135  // they need not be exact, they are used just for some geometrical considerations in BDDCML,
1136  // such as selection of corners maximizing area of a triangle, bounding boxes fro subdomains to
1137  // find candidate neighbours etc.
1138  std::vector<double> xyz( numNodeSub * 3 ) ;
1139  int ind = 0;
1140  std::map<int,arma::vec3>::iterator itB = localDofMap.begin();
1141  for ( ; itB != localDofMap.end(); ++itB ) {
1142  isngn[ind] = itB -> first;
1143 
1144  arma::vec3 coord = itB -> second;
1145  for ( int j = 0; j < 3; j++ ) {
1146  xyz[ j*numNodeSub + ind ] = coord[j];
1147  }
1148 
1149  ind++;
1150  }
1151  localDofMap.clear();
1152 
1153  // Number of Nodal Degrees of Freedom
1154  // nndf is trivially one - dofs coincide with nodes
1155  std::vector<int> nndf( numNodeSub, 1 );
1156 
1157  // prepare auxiliary map for renumbering nodes
1158  typedef std::map<int,int> Global2LocalMap_; //! type for storage of global to local map
1159  Global2LocalMap_ global2LocalNodeMap;
1160  for ( unsigned ind = 0; ind < isngn.size(); ++ind ) {
1161  global2LocalNodeMap.insert( std::make_pair( static_cast<unsigned>( isngn[ind] ), ind ) );
1162  }
1163 
1164  //std::cout << "INET: \n";
1165  //std::copy( inet.begin(), inet.end(), std::ostream_iterator<int>( std::cout, " " ) );
1166  //std::cout << std::endl;
1167  //std::cout << "ISNGN: \n";
1168  //std::copy( isngn.begin(), isngn.end(), std::ostream_iterator<int>( std::cout, " " ) );
1169  //std::cout << std::endl << std::flush;
1170  //std::cout << "ISEGN: \n";
1171  //std::copy( isegn.begin(), isegn.end(), std::ostream_iterator<int>( std::cout, " " ) );
1172  //std::cout << std::endl << std::flush;
1173  //MPI_Barrier( PETSC_COMM_WORLD );
1174 
1175  // renumber nodes in the inet array to locals
1176  int indInet = 0;
1177  for ( int iEle = 0; iEle < isegn.size(); iEle++ ) {
1178  int nne = nnet[ iEle ];
1179  for ( unsigned ien = 0; ien < nne; ien++ ) {
1180 
1181  int indGlob = inet[indInet];
1182  // map it to local node
1183  Global2LocalMap_::iterator pos = global2LocalNodeMap.find( indGlob );
1184  ASSERT( pos != global2LocalNodeMap.end(),
1185  "Cannot remap node index %d to local indices. \n ", indGlob );
1186  int indLoc = static_cast<int> ( pos -> second );
1187 
1188  // store the node
1189  inet[ indInet++ ] = indLoc;
1190  }
1191  }
1192 
1193  int numNodes = size;
1194  int numDofsInt = size;
1195  int spaceDim = 3; // TODO: what is the proper value here?
1196  int meshDim = elDimMax;
1197  //std::cout << "I have identified following dimensions: max " << elDimMax << ", min " << elDimMin << std::endl;
1198 
1199  bddc_ls -> load_mesh( spaceDim, numNodes, numDofsInt, inet, nnet, nndf, isegn, isngn, isngn, xyz, element_permeability, meshDim );
1200 }
1201 
1202 
1203 
1204 
1205 //=============================================================================
1206 // DESTROY WATER MH SYSTEM STRUCTURE
1207 //=============================================================================
1209  //if (schur2 != NULL) {
1210  // delete schur2;
1211  //ISDestroy(&IS2);
1212  //}
1213  //if (schur1 != NULL) {
1214  // delete schur1;
1215  //ISDestroy(&IS1);
1216  //}
1217  if (schur0 != NULL) delete schur0;
1218 
1219  delete edge_ds;
1220  delete el_ds;
1221  delete side_ds;
1222 
1223  xfree(el_4_loc);
1224  xfree(row_4_el);
1227  xfree(edge_4_loc);
1228  xfree(row_4_edge);
1229 
1230  if (solution != NULL) {
1231  VecDestroy(&sol_vec);
1232  xfree(solution);
1233  }
1234 
1235  delete output_object;
1236 
1237  //VecScatterDestroy(&par_to_all);
1238 
1239 }
1240 
1241 
1242 // ================================================
1243 // PARALLLEL PART
1244 //
1245 
1246 /**
1247  * Make connectivity graph of the second Schur complement and compute optimal partitioning.
1248  * This verison assign 1D and 2D edges to one processor, represent them as
1249  * a weighted vertex, and 2D-3D neghbourings as weighted edges. 3D edges are
1250  * then distributed over all procesors.
1251  */
1252 /*
1253 void make_edge_conection_graph(Mesh *mesh, SparseGraph * &graph) {
1254 
1255  Distribution edistr = graph->get_distr();
1256  //Edge *edg;
1257  Element *ele;
1258  int li, eid, , i_edg;
1259  unsigned int si, i_neigh;
1260  int e_weight;
1261 
1262  int edge_dim_weights[3] = { 100, 10, 1 };
1263  F_ENTRY;
1264 
1265  i_edg=0;
1266  FOR_EDGES(mesh, edg) {
1267 
1268  // skip non-local edges
1269  if (!edistr.is_local(i_edg)) continue;
1270 
1271  e_weight = edge_dim_weights[edg->side(0)->element()->dim() - 1];
1272  // for all connected elements
1273  FOR_EDGE_SIDES( edg, li ) {
1274  ASSERT( edg->side(li)->valid(),"NULL side of edge.");
1275  ele = edg->side(li)->element();
1276  ASSERT(NONULL(ele),"NULL element of side.");
1277 
1278  // for sides of connected element, excluding edge itself
1279  for(si=0; si<ele->n_sides(); si++) {
1280  eid = ele->side(si)->edge_idx();
1281  if (eid != i_edg)
1282  graph->set_edge(i_edg, eid, e_weight);
1283  }
1284 
1285  // include connections from lower dim. edge
1286  // to the higher dimension
1287  for (i_neigh = 0; i_neigh < ele->n_neighs_vb; i_neigh++) {
1288  eid = ele->neigh_vb[i_neigh]->edge_idx();
1289  graph->set_edge(i_edg, eid, e_weight);
1290  graph->set_edge(eid, i_edg, e_weight);
1291  }
1292  }
1293  i_edg++;
1294  }
1295 
1296  graph->finalize();
1297 }
1298 */
1299 /**
1300  * Make connectivity graph of elements of mesh - dual graph: elements vertices of graph.
1301  */
1302 /*
1303 void make_element_connection_graph(Mesh *mesh, SparseGraph * &graph, bool neigh_on) {
1304 
1305  Distribution edistr = graph->get_distr();
1306 
1307  Edge *edg;
1308  int li, e_idx, i_neigh;
1309  int i_s, n_s;
1310  F_ENTRY;
1311 
1312  //int elDimMax = 1;
1313  //int elDimMin = 3;
1314  //FOR_ELEMENTS(mesh, ele) {
1315  // //xprintf(Msg,"Element id %d , its index %d.\n",ele.id(), i_ele);
1316  // int elDim = ele->dim();
1317  // elDimMax = std::max( elDimMax, elDim );
1318  // elDimMin = std::min( elDimMin, elDim );
1319  //}
1320  //std::cout << "max and min element dimensions: " << elDimMax << " " << elDimMin << std::endl;
1321 
1322  FOR_ELEMENTS(mesh, ele) {
1323  //xprintf(Msg,"Element id %d , its index %d.\n",ele.id(), i_ele);
1324 
1325  // skip non-local elements
1326  if (!edistr.is_local(ele.index()))
1327  continue;
1328 
1329  // for all connected elements
1330  FOR_ELEMENT_SIDES( ele, si ) {
1331  edg = ele->side(si)->edge();
1332 
1333  FOR_EDGE_SIDES( edg, li ) {
1334  ASSERT(edg->side(li)->valid(),"NULL side of edge.");
1335  e_idx = ELEMENT_FULL_ITER(mesh, edg->side(li)->element()).index();
1336 
1337  // for elements of connected elements, excluding element itself
1338  if (e_idx != ele.index()) {
1339  graph->set_edge(ele.index(), e_idx);
1340  }
1341  }
1342  }
1343 
1344  // include connections from lower dim. edge
1345  // to the higher dimension
1346  if (neigh_on) {
1347  for (i_neigh = 0; i_neigh < ele->n_neighs_vb; i_neigh++) {
1348  n_s = ele->neigh_vb[i_neigh]->edge()->n_sides;
1349  for (i_s = 0; i_s < n_s; i_s++) {
1350  e_idx=ELEMENT_FULL_ITER(mesh, ele->neigh_vb[i_neigh]->edge()->side(i_s)->element()).index();
1351  graph->set_edge(ele.index(), e_idx);
1352  graph->set_edge(e_idx, ele.index());
1353  }
1354  }
1355  }
1356  }
1357  graph->finalize();
1358 }*/
1359 
1360 
1361 // ========================================================================
1362 // to finish row_4_id arrays we have to convert individual numberings of
1363 // sides/els/edges to whole numbering of rows. To this end we count shifts
1364 // for sides/els/edges on each proc and then we apply them on row_4_id
1365 // arrays.
1366 // we employ macros to avoid code redundancy
1367 // =======================================================================
1369  int i, shift;
1370  int np = edge_ds->np();
1371  int edge_shift[np], el_shift[np], side_shift[np];
1372  unsigned int rows_starts[np];
1373 
1374  int edge_n_id = mesh_->n_edges(),
1375  el_n_id = mesh_->element.size(),
1376  side_n_id = mesh_->n_sides();
1377 
1378  // compute shifts on every proc
1379  shift = 0; // in this var. we count new starts of arrays chunks
1380  for (i = 0; i < np; i++) {
1381  side_shift[i] = shift - (side_ds->begin(i)); // subtract actual start of the chunk
1382  shift += side_ds->lsize(i);
1383  el_shift[i] = shift - (el_ds->begin(i));
1384  shift += el_ds->lsize(i);
1385  edge_shift[i] = shift - (edge_ds->begin(i));
1386  shift += edge_ds->lsize(i);
1387  rows_starts[i] = shift;
1388  }
1389  //DBGPRINT_INT("side_shift",np,side_shift);
1390  //DBGPRINT_INT("el_shift",np,el_shift);
1391  //DBGPRINT_INT("edge_shift",np,edge_shift);
1392  // apply shifts
1393  for (i = 0; i < side_n_id; i++) {
1394  int &what = side_row_4_id[i];
1395  if (what >= 0)
1396  what += side_shift[side_ds->get_proc(what)];
1397  }
1398  for (i = 0; i < el_n_id; i++) {
1399  int &what = row_4_el[i];
1400  if (what >= 0)
1401  what += el_shift[el_ds->get_proc(what)];
1402 
1403  }
1404  for (i = 0; i < edge_n_id; i++) {
1405  int &what = row_4_edge[i];
1406  if (what >= 0)
1407  what += edge_shift[edge_ds->get_proc(what)];
1408  }
1409  // make distribution of rows
1410  for (i = np - 1; i > 0; i--)
1411  rows_starts[i] -= rows_starts[i - 1];
1412 
1413  rows_ds = boost::make_shared<Distribution>(&(rows_starts[0]), PETSC_COMM_WORLD);
1414 }
1415 
1417  START_TIMER("prepare scatter");
1418  // prepare Scatter form parallel to sequantial in original numbering
1419  {
1420  IS is_loc;
1421  int i, *loc_idx; //, si;
1422 
1423  // create local solution vector
1424  solution = (double *) xmalloc(size * sizeof(double));
1425  VecCreateSeqWithArray(PETSC_COMM_SELF,1, size, solution,
1426  &(sol_vec));
1427 
1428  // create seq. IS to scatter par solutin to seq. vec. in original order
1429  // use essentialy row_4_id arrays
1430  loc_idx = (int *) xmalloc(size * sizeof(int));
1431  i = 0;
1432  FOR_ELEMENTS(mesh_, ele) {
1433  FOR_ELEMENT_SIDES(ele,si) {
1434  loc_idx[i++] = side_row_4_id[ mh_dh.side_dof( ele->side(si) ) ];
1435  }
1436  }
1437  FOR_ELEMENTS(mesh_, ele) {
1438  loc_idx[i++] = row_4_el[ele.index()];
1439  }
1440  for(unsigned int i_edg=0; i_edg < mesh_->n_edges(); i_edg++) {
1441  loc_idx[i++] = row_4_edge[i_edg];
1442  }
1443  ASSERT( i==size,"Size of array does not match number of fills.\n");
1444  //DBGPRINT_INT("loc_idx",size,loc_idx);
1445  ISCreateGeneral(PETSC_COMM_SELF, size, loc_idx, PETSC_COPY_VALUES, &(is_loc));
1446  xfree(loc_idx);
1447  VecScatterCreate(schur0->get_solution(), is_loc, sol_vec,
1448  PETSC_NULL, &par_to_all);
1449  ISDestroy(&(is_loc));
1450  }
1452 
1453  END_TIMER("prepare scatter");
1454 
1455 }
1456 
1457 // ====================================================================================
1458 // - compute optimal edge partitioning
1459 // - compute appropriate partitioning of elements and sides
1460 // - make arrays: *_id_4_loc and *_row_4_id to allow parallel assembly of the MH matrix
1461 // ====================================================================================
1463 
1464  START_TIMER("prepare parallel");
1465 
1466  int *loc_part; // optimal (edge,el) partitioning (local chunk)
1467  int *id_4_old; // map from old idx to ids (edge,el)
1468  int i, loc_i;
1469 
1470  int e_idx;
1471 
1472 
1473  F_ENTRY;
1474 
1475  //ierr = MPI_Barrier(PETSC_COMM_WORLD);
1476  //ASSERT(ierr == 0, "Error in MPI_Barrier.");
1477 
1478  id_4_old = new int[mesh_->n_elements()];
1479  i = 0;
1480  FOR_ELEMENTS(mesh_, el) id_4_old[i++] = el.index();
1481 
1482  mesh_->get_part()->id_maps(mesh_->element.size(), id_4_old, el_ds, el_4_loc, row_4_el);
1483  //DBGPRINT_INT("el_4_loc",el_ds->lsize(),el_4_loc);
1484  //xprintf(Msg,"Number of elements in subdomain %d \n",el_ds->lsize());
1485  delete[] id_4_old;
1486 
1487  //el_ds->view( std::cout );
1488  //
1489  //DBGMSG("Compute appropriate edge partitioning ...\n");
1490  //optimal element part; loc. els. id-> new el. numbering
1491  Distribution init_edge_ds(DistributionLocalized(), mesh_->n_edges(), PETSC_COMM_WORLD);
1492  // partitioning of edges, edge belongs to the proc of his first element
1493  // this is not optimal but simple
1494  loc_part = new int[init_edge_ds.lsize()];
1495  id_4_old = new int[mesh_->n_edges()];
1496  {
1497  loc_i = 0;
1498  FOR_EDGES(mesh_, edg ) {
1499  unsigned int i_edg = edg - mesh_->edges.begin();
1500  // partition
1501  e_idx = mesh_->element.index(edg->side(0)->element());
1502  //xprintf(Msg,"Index of edge: %d first element: %d \n",edgid,e_idx);
1503  if (init_edge_ds.is_local(i_edg)) {
1504  // find (new) proc of the first element of the edge
1505  loc_part[loc_i++] = el_ds->get_proc(row_4_el[e_idx]);
1506  }
1507  // id array
1508  id_4_old[i_edg] = i_edg;
1509  }
1510  }
1511 
1512  Partitioning::id_maps(mesh_->n_edges(), id_4_old, init_edge_ds, loc_part, edge_ds, edge_4_loc, row_4_edge);
1513  delete[] loc_part;
1514  delete[] id_4_old;
1515 
1516  //optimal side part; loc. sides; id-> new side numbering
1517  Distribution init_side_ds(DistributionBlock(), mesh_->n_sides(), PETSC_COMM_WORLD);
1518  // partitioning of sides follows elements
1519  loc_part = new int[init_side_ds.lsize()];
1520  id_4_old = new int[mesh_->n_sides()];
1521  {
1522  int is = 0;
1523  loc_i = 0;
1524  FOR_SIDES(mesh_, side ) {
1525  // partition
1526  if (init_side_ds.is_local(is)) {
1527  // find (new) proc of the element of the side
1528  loc_part[loc_i++] = el_ds->get_proc(
1530  }
1531  // id array
1532  id_4_old[is++] = mh_dh.side_dof( side );
1533  }
1534  }
1535 
1536  Partitioning::id_maps(mesh_->n_sides(), id_4_old, init_side_ds, loc_part, side_ds,
1538  delete [] loc_part;
1539  delete [] id_4_old;
1540 
1541  //edge_ds->view( std::cout );
1542  //side_ds->view( std::cout );
1543 
1544  /*
1545  DBGPRINT_INT("edge_id_4_loc",edge_ds->lsize,edge_id_4_loc);
1546  DBGPRINT_INT("el_4_loc",el_ds->lsize,el_4_loc);
1547  DBGPRINT_INT("side_id_4_loc",side_ds->lsize,side_id_4_loc);
1548  DBGPRINT_INT("edge_row_4_id",mesh_->n_edges,edge_row_4_id);
1549  DBGPRINT_INT("el_row_4_id",mesh_->max_elm_id+1,el_row_4_id);
1550  DBGPRINT_INT("side_row_4_id",mesh_->max_side_id+1,side_row_4_id);
1551  */
1552  // convert row_4_id arrays from separate numberings to global numbering of rows
1554  //DBGPRINT_INT("edge_row_4_id",mesh_->n_edges,edge_row_4_id);
1555  //DBGPRINT_INT("el_row_4_id",mesh_->max_elm_id+1,el_row_4_id);
1556  //DBGPRINT_INT("side_row_4_id",mesh_->max_side_id+1,side_row_4_id);
1557 
1558  //lsize = side_ds->lsize() + el_ds->lsize() + edge_ds->lsize();
1559 
1560 
1561  // prepare global_row_4_sub_row
1562 
1563 #ifdef HAVE_BDDCML
1564  if (in_rec.type() == LinSys_BDDC::input_type) {
1565  // auxiliary
1566  Element *el;
1567  int side_row, edge_row;
1568 
1569  //xprintf(Msg,"Compute mapping of local subdomain rows to global rows.\n");
1570 
1571  global_row_4_sub_row = boost::make_shared<LocalToGlobalMap>(rows_ds);
1572 
1573  //
1574  // ordering of dofs
1575  // for each subdomain:
1576  // | velocities (at sides) | pressures (at elements) | L. mult. (at edges) |
1577  for (unsigned int i_loc = 0; i_loc < el_ds->lsize(); i_loc++) {
1578  el = mesh_->element(el_4_loc[i_loc]);
1579  int el_row = row_4_el[el_4_loc[i_loc]];
1580 
1581  global_row_4_sub_row->insert( el_row );
1582 
1583  unsigned int nsides = el->n_sides();
1584  for (unsigned int i = 0; i < nsides; i++) {
1585  int side_row = side_row_4_id[ mh_dh.side_dof( el->side(i) ) ];
1586  int edge_row = row_4_edge[el->side(i)->edge_idx()];
1587 
1588  global_row_4_sub_row->insert( side_row );
1589  global_row_4_sub_row->insert( edge_row );
1590 
1591  // edge neighbouring overlap
1592  //if (edg->neigh_vb != NULL) {
1593  // int neigh_el_row=row_4_el[mesh_->element.index(edg->neigh_vb->element[0])];
1594  //}
1595  }
1596 
1597  for (unsigned int i_neigh = 0; i_neigh < el->n_neighs_vb; i_neigh++) {
1598  // mark this edge
1599  int edge_row = row_4_edge[el->neigh_vb[i_neigh]->edge_idx() ];
1600  global_row_4_sub_row->insert( edge_row );
1601  }
1602  }
1603  global_row_4_sub_row->finalize();
1604  }
1605 #endif // HAVE_BDDCML
1606 
1607  // common to both solvers - create renumbering of unknowns
1608  solver_indices_.reserve(size);
1609  FOR_ELEMENTS(mesh_, ele) {
1610  FOR_ELEMENT_SIDES(ele,si) {
1611  solver_indices_.push_back( side_row_4_id[ mh_dh.side_dof( ele->side(si) ) ] );
1612  }
1613  }
1614  FOR_ELEMENTS(mesh_, ele) {
1615  solver_indices_.push_back( row_4_el[ele.index()] );
1616  }
1617 
1618  unsigned int i_edg=0;
1619  FOR_EDGES(mesh_, edg) {
1620  solver_indices_.push_back( row_4_edge[i_edg++] );
1621  }
1622  ASSERT( solver_indices_.size() == size, "Size of array does not match number of fills.\n" );
1623  //std::cout << "Solve rindices:" << std::endl;
1624  //std::copy( solver_indices_.begin(), solver_indices_.end(), std::ostream_iterator<int>( std::cout, " " ) );
1625 }
1626 
1627 
1628 
1629 void mat_count_off_proc_values(Mat m, Vec v) {
1630  int n, first, last;
1631  const PetscInt *cols;
1632  Distribution distr(v);
1633 
1634  int n_off = 0;
1635  int n_on = 0;
1636  int n_off_rows = 0;
1637  MatGetOwnershipRange(m, &first, &last);
1638  for (int row = first; row < last; row++) {
1639  MatGetRow(m, row, &n, &cols, PETSC_NULL);
1640  bool exists_off = false;
1641  for (int i = 0; i < n; i++)
1642  if (distr.get_proc(cols[i]) != distr.myp())
1643  n_off++, exists_off = true;
1644  else
1645  n_on++;
1646  if (exists_off)
1647  n_off_rows++;
1648  MatRestoreRow(m, row, &n, &cols, PETSC_NULL);
1649  }
1650  //printf("[%d] rows: %d off_rows: %d on: %d off: %d\n",distr.myp(),last-first,n_off_rows,n_on,n_off);
1651 }
1652 
1653 
1654 
1655 
1656 
1657 
1658 
1659 
1660 
1661 
1662 
1663 
1664 // ========================
1665 // unsteady
1666 
1668  : DarcyFlowMH_Steady(mesh_in, in_rec, false)
1669 {
1670  time_ = new TimeGovernor(in_rec.val<Input::Record>("time"));
1671  data_.mark_input_times(this->mark_type());
1672  data_.set_limit_side(LimitSide::right);
1673  data_.set_time(*time_);
1674 
1675  output_object = new DarcyFlowMHOutput(this, in_rec.val<Input::Record>("output"));
1676 
1679 
1680  VecDuplicate(schur0->get_solution(), &previous_solution);
1681  VecCreateMPI(PETSC_COMM_WORLD,rows_ds->lsize(),PETSC_DETERMINE,&(steady_diagonal));
1682  VecDuplicate(steady_diagonal,& new_diagonal);
1683  VecZeroEntries(new_diagonal);
1684  VecDuplicate(schur0->get_rhs(), &steady_rhs);
1685 
1688 
1689 
1690 /*
1691  VecDuplicate(schur0->get_rhs(), &time_term);
1692  */
1693  //setup_time_term();
1694  output_data();
1695 }
1696 
1698 {
1699 
1700  // read inital condition
1701  VecZeroEntries(schur0->get_solution());
1702 
1703  double *local_sol = schur0->get_solution_array();
1704 
1705  // cycle over local element rows
1707 
1708  DBGMSG("Setup with dt: %f\n",time_->dt());
1709  for (unsigned int i_loc_el = 0; i_loc_el < el_ds->lsize(); i_loc_el++) {
1710  ele = mesh_->element(el_4_loc[i_loc_el]);
1711  int i_loc_row = i_loc_el + side_ds->lsize();
1712 
1713  // set initial condition
1714  local_sol[i_loc_row] = data_.init_pressure.value(ele->centre(),ele->element_accessor());
1715  }
1716 
1718 
1719 }
1720 
1722  // save diagonal of steady matrix
1723  MatGetDiagonal(schur0->get_matrix(), steady_diagonal);
1724  // save RHS
1725  VecCopy(schur0->get_rhs(), steady_rhs);
1726 
1727 
1728  PetscScalar *local_diagonal;
1729  VecGetArray(new_diagonal,& local_diagonal);
1730 
1732  DBGMSG("Setup with dt: %f\n",time_->dt());
1733  for (unsigned int i_loc_el = 0; i_loc_el < el_ds->lsize(); i_loc_el++) {
1734  ele = mesh_->element(el_4_loc[i_loc_el]);
1735  int i_loc_row = i_loc_el + side_ds->lsize();
1736 
1737  // set new diagonal
1738  local_diagonal[i_loc_row]= - data_.storativity.value(ele->centre(), ele->element_accessor()) *
1739  ele->measure() / time_->dt();
1740  }
1741  VecRestoreArray(new_diagonal,& local_diagonal);
1742  MatDiagonalSet(schur0->get_matrix(), new_diagonal, ADD_VALUES);
1743 
1746 }
1747 
1749  START_TIMER("modify system");
1750  if (time_->is_changed_dt() && !schur0->is_matrix_changed()) {
1751  // if time step has changed and setup_time_term not called
1752  MatDiagonalSet(schur0->get_matrix(),steady_diagonal, INSERT_VALUES);
1753 
1754  VecScale(new_diagonal, time_->last_dt()/time_->dt());
1755  MatDiagonalSet(schur0->get_matrix(),new_diagonal, ADD_VALUES);
1757  }
1758 
1759  // modify RHS - add previous solution
1760  VecPointwiseMult(schur0->get_rhs(), new_diagonal, schur0->get_solution());
1761  VecAXPY(schur0->get_rhs(), 1.0, steady_rhs);
1763 
1764  // swap solutions
1765  VecSwap(previous_solution, schur0->get_solution());
1766 }
1767 
1768 // ========================
1769 // unsteady
1770 
1772  : DarcyFlowMH_Steady(mesh_in, in_rec,false)
1773 {
1774  time_ = new TimeGovernor(in_rec.val<Input::Record>("time"));
1775  data_.mark_input_times(this->mark_type());
1776 
1777 
1778  data_.set_limit_side(LimitSide::right);
1779  data_.set_time(*time_);
1780 
1781  output_object = new DarcyFlowMHOutput(this, in_rec.val<Input::Record>("output"));
1782 
1785  VecDuplicate(schur0->get_solution(), &previous_solution);
1786  VecCreateMPI(PETSC_COMM_WORLD,rows_ds->lsize(),PETSC_DETERMINE,&(steady_diagonal));
1787  VecDuplicate(steady_diagonal,& new_diagonal);
1788  VecDuplicate(schur0->get_rhs(), &steady_rhs);
1789 
1792  output_data();
1793 }
1794 
1796 {
1797  VecZeroEntries(schur0->get_solution());
1798 
1799  // apply initial condition
1800  // cycle over local element rows
1801 
1803  double init_value;
1804 
1805  for (unsigned int i_loc_el = 0; i_loc_el < el_ds->lsize(); i_loc_el++) {
1806  ele = mesh_->element(el_4_loc[i_loc_el]);
1807 
1808  init_value = data_.init_pressure.value(ele->centre(), ele->element_accessor());
1809 
1810  FOR_ELEMENT_SIDES(ele,i) {
1811  int edge_row = row_4_edge[ele->side(i)->edge_idx()];
1812  VecSetValue(schur0->get_solution(),edge_row,init_value/ele->n_sides(),ADD_VALUES);
1813  }
1814  }
1815  VecAssemblyBegin(schur0->get_solution());
1816  VecAssemblyEnd(schur0->get_solution());
1817 
1819 }
1820 
1821 
1822 
1824 {
1825  // save diagonal of steady matrix
1826  MatGetDiagonal(schur0->get_matrix(), steady_diagonal);
1827  // save RHS
1828  VecCopy(schur0->get_rhs(),steady_rhs);
1829 
1830  VecZeroEntries(new_diagonal);
1831 
1832  // modify matrix diagonal
1833  // cycle over local element rows
1835  double init_value;
1836 
1837  for (unsigned int i_loc_el = 0; i_loc_el < el_ds->lsize(); i_loc_el++) {
1838  ele = mesh_->element(el_4_loc[i_loc_el]);
1839 
1840  init_value = data_.init_pressure.value(ele->centre(), ele->element_accessor());
1841 
1842  FOR_ELEMENT_SIDES(ele,i) {
1843  int edge_row = row_4_edge[ele->side(i)->edge_idx()];
1844  // set new diagonal
1845  VecSetValue(new_diagonal,edge_row, - ele->measure() *
1846  data_.storativity.value(ele->centre(), ele->element_accessor()) *
1847  data_.cross_section.value(ele->centre(), ele->element_accessor()) /
1848  time_->dt() / ele->n_sides(),ADD_VALUES);
1849  }
1850  }
1851  VecAssemblyBegin(new_diagonal);
1852  VecAssemblyEnd(new_diagonal);
1853 
1854  MatDiagonalSet(schur0->get_matrix(),new_diagonal, ADD_VALUES);
1855 
1858 }
1859 
1861  START_TIMER("modify system");
1862  if (time_->is_changed_dt() && !schur0->is_matrix_changed()) {
1863  // if time step has changed and setup_time_term not called
1864 
1865  MatDiagonalSet(schur0->get_matrix(),steady_diagonal, INSERT_VALUES);
1866  VecScale(new_diagonal, time_->last_dt()/time_->dt());
1867  MatDiagonalSet(schur0->get_matrix(),new_diagonal, ADD_VALUES);
1869  }
1870 
1871  // modify RHS - add previous solution
1872  VecPointwiseMult(schur0->get_rhs(), new_diagonal, schur0->get_solution());
1873  VecAXPY(schur0->get_rhs(), 1.0, steady_rhs);
1875 
1876  // swap solutions
1877  VecSwap(previous_solution, schur0->get_solution());
1878 }
1879 
1880 
1881 
1883  int i_loc, side_row, loc_edge_row, i;
1884  Edge* edg;
1885  ElementIter ele;
1886  double new_pressure, old_pressure, time_coef;
1887 
1888  PetscScalar *loc_prev_sol;
1889  VecGetArray(previous_solution, &loc_prev_sol);
1890 
1891  // modify side fluxes in parallel
1892  // for every local edge take time term on diagonal and add it to the corresponding flux
1893  for (i_loc = 0; i_loc < edge_ds->lsize(); i_loc++) {
1894 
1895  edg = &( mesh_->edges[ edge_4_loc[i_loc] ] );
1896  loc_edge_row = side_ds->lsize() + el_ds->lsize() + i_loc;
1897 
1898  new_pressure = (schur0->get_solution_array())[loc_edge_row];
1899  old_pressure = loc_prev_sol[loc_edge_row];
1900  FOR_EDGE_SIDES(edg,i) {
1901  ele = edg->side(i)->element();
1902  side_row = side_row_4_id[ mh_dh.side_dof( edg->side(i) ) ];
1903  time_coef = - ele->measure() *
1904  data_.cross_section.value(ele->centre(), ele->element_accessor()) *
1905  data_.storativity.value(ele->centre(), ele->element_accessor()) /
1906  time_->dt() / ele->n_sides();
1907  VecSetValue(schur0->get_solution(), side_row, time_coef * (new_pressure - old_pressure), ADD_VALUES);
1908  }
1909  }
1910  VecRestoreArray(previous_solution, &loc_prev_sol);
1911 
1912  VecAssemblyBegin(schur0->get_solution());
1913  VecAssemblyEnd(schur0->get_solution());
1914 
1915  //DarcyFlowMH_Steady::postprocess();
1916 
1917  int side_rows[4];
1918  double values[4];
1919  //ElementFullIter ele = ELEMENT_FULL_ITER(mesh_, NULL);
1920 
1921  // modify side fluxes in parallel
1922  // for every local edge take time term on digonal and add it to the corresponding flux
1923 
1924  for (unsigned int i_loc = 0; i_loc < el_ds->lsize(); i_loc++) {
1925  ele = mesh_->element(el_4_loc[i_loc]);
1926  FOR_ELEMENT_SIDES(ele,i) {
1927  side_rows[i] = side_row_4_id[ mh_dh.side_dof( ele->side(i) ) ];
1928  values[i] = -1.0 * ele->measure() *
1929  data_.cross_section.value(ele->centre(), ele->element_accessor()) *
1931  ele->n_sides();
1932  }
1933  VecSetValues(schur0->get_solution(), ele->n_sides(), side_rows, values, ADD_VALUES);
1934  }
1935  VecAssemblyBegin(schur0->get_solution());
1936  VecAssemblyEnd(schur0->get_solution());
1937 }
1938 
1939 //-----------------------------------------------------------------------------
1940 // vim: set cindent: