Flow123d
darcy_flow_mh.cc
Go to the documentation of this file.
1 /*!
2  *
3  * Copyright (C) 2007 Technical University of Liberec. All rights reserved.
4  *
5  * Please make a following refer to Flow123d on your project site if you use the program for any purpose,
6  * especially for academic research:
7  * Flow123d, Research Centre: Advanced Remedial Technologies, Technical University of Liberec, Czech Republic
8  *
9  * This program is free software; you can redistribute it and/or modify it under the terms
10  * of the GNU General Public License version 3 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
13  * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14  * See the GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License along with this program; if not,
17  * write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 021110-1307, USA.
18  *
19  *
20  * $Id$
21  * $Revision$
22  * $LastChangedBy$
23  * $LastChangedDate$
24  *
25  *
26  * @file
27  * @ingroup flow
28  * @brief Setup and solve linear system of mixed-hybrid discretization of the linear
29  * porous media flow with possible preferential flow in fractures and chanels.
30  *
31  */
32 
33 #include "petscmat.h"
34 #include "petscviewer.h"
35 #include "petscerror.h"
36 #include <armadillo>
37 #include <boost/foreach.hpp>
38 
39 
40 #include "system/system.hh"
41 
42 #include "mesh/mesh.h"
43 #include "mesh/intersection.hh"
44 #include "mesh/partitioning.hh"
45 #include "la/distribution.hh"
46 #include "la/linsys.hh"
47 #include "la/linsys_PETSC.hh"
48 #include "la/linsys_BDDC.hh"
49 //#include "la/solve.h"
50 #include "la/schur.hh"
51 #include "la/sparse_graph.hh"
53 
54 #include "system/file_path.hh"
55 #include "flow/mh_fe_values.hh"
56 #include "flow/darcy_flow_mh.hh"
57 
59 
60 #include "fem/mapping_p1.hh"
61 #include "fem/fe_p.hh"
62 #include "fem/fe_values.hh"
64 
65 #include <limits>
66 #include <set>
67 #include <vector>
68 #include <iostream>
69 #include <iterator>
70 #include <algorithm>
71 
73 
74 #include "fields/field_base.hh"
75 #include "fields/field.hh"
76 #include "fields/field_values.hh"
77 #include "system/sys_profiler.hh"
78 
79 
80 
81 
82 namespace it = Input::Type;
83 
85  = it::Selection("MH_MortarMethod")
86  .add_value(NoMortar, "None", "Mortar space: P0 on elements of lower dimension.")
87  .add_value(MortarP0, "P0", "Mortar space: P0 on elements of lower dimension.")
88  .add_value(MortarP1, "P1", "Mortar space: P1 on intersections, using non-conforming pressures.");
89 
90 
92  it::Selection("EqData_bc_Type")
93  .add_value(none, "none", "Homogeneous Neoumann BC.")
94  .add_value(dirichlet, "dirichlet")
95  .add_value(neumann, "neumann")
96  .add_value(robin, "robin")
97  .add_value(total_flux, "total_flux");
98 
99 //new input type with FIELDS
101  it::AbstractRecord("DarcyFlowMH", "Mixed-Hybrid solver for saturated Darcy flow.")
102  .declare_key("n_schurs", it::Integer(0,2), it::Default("2"),
103  "Number of Schur complements to perform when solving MH sytem.")
105  "Linear solver for MH problem.")
107  "Parameters of output form MH module.")
108  .declare_key("mortar_method", mh_mortar_selection, it::Default("None"),
109  "Method for coupling Darcy flow between dimensions." );
110 
111 
113  = it::Record("Steady_MH", "Mixed-Hybrid solver for STEADY saturated Darcy flow.")
114  .derive_from(DarcyFlowMH::input_type)
115  .declare_key("input_fields", it::Array(
116  DarcyFlowMH_Steady::EqData().make_field_descriptor_type("DarcyFlowMH")
117  .declare_key("bc_piezo_head", FieldBase< 3, FieldValue<3>::Scalar >::get_input_type(), "Boundary condition for pressure as piezometric head." )
118  .declare_key("init_piezo_head", FieldBase< 3, FieldValue<3>::Scalar >::get_input_type(), "Initial condition for pressure as piezometric head." )
119  .declare_key(OldBcdInput::flow_old_bcd_file_key(), it::FileName::input(), "File with mesh dependent boundary conditions (obsolete).")
120  ), it::Default::obligatory(), "" );
121 
123  = it::Record("Unsteady_MH", "Mixed-Hybrid solver for unsteady saturated Darcy flow.")
124  .derive_from(DarcyFlowMH::input_type)
126  "Time governor setting for the unsteady Darcy flow model.")
127  .copy_keys(DarcyFlowMH_Steady::input_type);
128 
129 
131  = it::Record("Unsteady_LMH", "Lumped Mixed-Hybrid solver for unsteady saturated Darcy flow.")
132  .derive_from(DarcyFlowMH::input_type)
134  "Time governor setting for the unsteady Darcy flow model.")
135  .copy_keys(DarcyFlowMH_Steady::input_type);
136 
137 
138 
139 
140 // gravity vector + constant shift of values
141 arma::vec4 DarcyFlowMH::EqData::gravity_=arma::vec4("0 0 -1 0");
142 
143 
145 {
146  gravity_ = arma::vec4("0 0 -1 0");
147 
148  ADD_FIELD(anisotropy, "Anisotropy of the conductivity tensor.", "1.0" );
149  ADD_FIELD(cross_section, "Complement dimension parameter (cross section for 1D, thickness for 2D).", "1.0" );
150  ADD_FIELD(conductivity, "Isotropic conductivity scalar.", "1.0" );
151  ADD_FIELD(sigma, "Transition coefficient between dimensions.", "1.0");
152  ADD_FIELD(water_source_density, "Water source density.", "0.0");
153 
154  ADD_FIELD(bc_type,"Boundary condition type, possible values:", "\"none\"" );
157 
158  ADD_FIELD(bc_pressure,"Dirichlet BC condition value for pressure.");
161 
162 
163  ADD_FIELD(bc_flux,"Flux in Neumman or Robin boundary condition.");
166 
167  ADD_FIELD(bc_robin_sigma,"Conductivity coefficient in Robin boundary condition.");
170 
171  //these are for unsteady
172  ADD_FIELD(init_pressure, "Initial condition as pressure", "0.0" );
173  ADD_FIELD(storativity,"Storativity.", "1.0" );
174 
175  time_term_fields = this->subset({"storativity"});
176  main_matrix_fields = this->subset({"anisotropy", "conductivity", "cross_section", "sigma", "bc_type", "bc_robin_sigma"});
177  rhs_fields = this->subset({"water_source_density", "bc_pressure", "bc_flux"});
178 
179 }
180 
181 
182 
183 
184 //=============================================================================
185 // CREATE AND FILL GLOBAL MH MATRIX OF THE WATER MODEL
186 // - do it in parallel:
187 // - initial distribution of elements, edges
188 //
189 /*! @brief CREATE AND FILL GLOBAL MH MATRIX OF THE WATER MODEL
190  *
191  * Parameters {Solver,NSchurs} number of performed Schur
192  * complements (0,1,2) for water flow MH-system
193  *
194  */
195 //=============================================================================
196 DarcyFlowMH_Steady::DarcyFlowMH_Steady(Mesh &mesh_in, const Input::Record in_rec, bool make_tg )
197 : DarcyFlowMH(mesh_in, in_rec)
198 
199 {
200  using namespace Input;
201  START_TIMER("Darcy constructor");
202 
203  this->eq_data_ = &data_;
204 
205  //connecting data fields with mesh
206  START_TIMER("data init");
207  data_.set_mesh(mesh_in);
208  data_.set_input_list( in_rec.val<Input::Array>("input_fields") );
209 
210 
211 
212 
213  END_TIMER("data init");
214 
215 
216  size = mesh_->n_elements() + mesh_->n_sides() + mesh_->n_edges();
217  n_schur_compls = in_rec.val<int>("n_schurs");
218 
219  solution = NULL;
220  schur0 = NULL;
221 
222 
223  mortar_method_= in_rec.val<MortarMethod>("mortar_method");
224  if (mortar_method_ != NoMortar) {
226  }
227 
228  mh_dh.reinit(mesh_);
229 
230  prepare_parallel(in_rec.val<AbstractRecord>("solver"));
231 
232  //side_ds->view( std::cout );
233  //el_ds->view( std::cout );
234  //edge_ds->view( std::cout );
235  //rows_ds->view( std::cout );
236 
237 
238 
239 
240  if (make_tg) {
241  // steady time governor
242  time_ = new TimeGovernor();
244  data_.set_limit_side(LimitSide::right);
245  data_.set_time(*time_);
246 
247  output_object = new DarcyFlowMHOutput(this, in_rec.val<Input::Record>("output"));
249 
250  //make_serial_scatter();
251  }
252 
253 
254 }
255 
256 
257 
258 
259 //=============================================================================
260 // COMPOSE and SOLVE WATER MH System possibly through Schur complements
261 //=============================================================================
263  START_TIMER("Solving MH system");
264  F_ENTRY;
265 
266 
267 
268  if (time_->is_end()) return;
269 
270  if (! time_->is_steady()) time_->next_time();
271 
272 
273 
275  int convergedReason = schur0->solve();
276 
277  xprintf(MsgLog, "Linear solver ended with reason: %d \n", convergedReason );
278  ASSERT( convergedReason >= 0, "Linear solver failed to converge. Convergence reason %d \n", convergedReason );
279 
280  this -> postprocess();
281 
283 
284  output_data();
285 
286  if (time_->is_steady()) time_->next_time();
287 }
288 
290 {
291  START_TIMER("postprocess");
292  int side_rows[4];
293  double values[4];
295 
296  // modify side fluxes in parallel
297  // for every local edge take time term on digonal and add it to the corresponding flux
298  /*
299  for (unsigned int i_loc = 0; i_loc < el_ds->lsize(); i_loc++) {
300  ele = mesh_->element(el_4_loc[i_loc]);
301  FOR_ELEMENT_SIDES(ele,i) {
302  side_rows[i] = side_row_4_id[ mh_dh.side_dof( ele->side(i) ) ];
303  values[i] = -1.0 * ele->measure() *
304  data.cross_section.value(ele->centre(), ele->element_accessor()) *
305  data.water_source_density.value(ele->centre(), ele->element_accessor()) /
306  ele->n_sides();
307  }
308  VecSetValues(schur0->get_solution(), ele->n_sides(), side_rows, values, ADD_VALUES);
309  }
310  VecAssemblyBegin(schur0->get_solution());
311  VecAssemblyEnd(schur0->get_solution());
312  */
313 }
314 
315 
317  time_->view("DARCY"); //time governor information output
318  this->output_object->output();
319 }
320 
322 {
323  return schur0->get_solution_precision();
324 }
325 
326 
327 void DarcyFlowMH_Steady::get_solution_vector(double * &vec, unsigned int &vec_size)
328 {
329  // TODO: make class for vectors (wrapper for PETSC or other) derived from LazyDependency
330  // and use its mechanism to manage dependency between vectors
332  // // scatter solution to all procs
333  // VecScatterBegin(par_to_all, schur0->get_solution(), sol_vec,
334  // INSERT_VALUES, SCATTER_FORWARD);
335  // VecScatterEnd(par_to_all, schur0->get_solution(), sol_vec,
336  // INSERT_VALUES, SCATTER_FORWARD);
337  // solution_changed_for_scatter=false;
338 
339  std::vector<double> sol_disordered(this->size);
340  schur0 -> get_whole_solution( sol_disordered );
341 
342  // reorder solution to application ordering
343  if ( solution_.empty() ) solution_.resize( this->size, 0. );
344  for ( int i = 0; i < this->size; i++ ) {
345  solution_[i] = sol_disordered[solver_indices_[i]];
346  }
347  }
348 
349  vec=&(solution_[0]);
350  vec_size = solution_.size();
351  ASSERT(vec != NULL, "Requested solution is not allocated!\n");
352 }
353 
354 void DarcyFlowMH_Steady::get_partitioning_vector(int * &elem_part, unsigned &lelem_part)
355 {
356 
357 // elem_part=&(element_part[0]);
358 // lelem_part = element_part.size();
359 // ASSERT(elem_part != NULL, "Requested vector is not allocated!\n");
360 }
361 
363 {
364  vec=schur0->get_solution();
365  ASSERT(vec != NULL, "Requested solution is not allocated!\n");
366 }
367 
368 
369 
370 // ===========================================================================================
371 //
372 // MATRIX ASSEMBLY - we use abstract assembly routine, where LS Mat/Vec SetValues
373 // are in fact pointers to allocating or filling functions - this is governed by Linsystem roitunes
374 //
375 // =======================================================================================
376 
377 
378 // ******************************************
379 // ABSTRACT ASSEMBLY OF MH matrix
380 // TODO: matice by se mela sestavovat zvlast pro kazdou dimenzi (objem, pukliny, pruseciky puklin)
381 // konekce by se mely sestavovat cyklem pres konekce, konekce by mely byt paralelizovany podle
382 // distribuce elementu nizssi dimenze
383 // k tomuto je treba nejdriv spojit s JK verzi, aby se vedelo co se deje v transportu a
384 // predelat mesh a neigbouring
385 // *****************************************
387  LinSys *ls = schur0;
389  MHFEValues fe_values;
390 
391  // We use FESideValues for calculating normal vectors.
392  // For initialization of FESideValues some auxiliary objects are needed.
393  MappingP1<1,3> map1;
394  MappingP1<2,3> map2;
395  MappingP1<3,3> map3;
396  QGauss<0> q0(1);
397  QGauss<1> q1(1);
398  QGauss<2> q2(1);
399  FE_P_disc<1,1,3> fe1;
400  FE_P_disc<0,2,3> fe2;
401  FE_P_disc<0,3,3> fe3;
402  FESideValues<1,3> fe_side_values1(map1, q0, fe1, update_normal_vectors);
403  FESideValues<2,3> fe_side_values2(map2, q1, fe2, update_normal_vectors);
404  FESideValues<3,3> fe_side_values3(map3, q2, fe3, update_normal_vectors);
405 
406  class Boundary *bcd;
407  class Neighbour *ngh;
408 
409  bool fill_matrix = schur0->is_preallocated();
410  //DBGMSG("fill_matrix: %d\n", fill_matrix);
411  int el_row, side_row, edge_row;
412  int tmp_rows[100];
413  //int nsides;
414  int side_rows[4], edge_rows[4]; // rows for sides and edges of one element
415  double local_vb[4]; // 2x2 matrix
416  double zeros[1000]; // to make space for second schur complement, max. 10 neighbour edges of one el.
417  double minus_ones[4] = { -1.0, -1.0, -1.0, -1.0 };
418  double loc_side_rhs[4];
419  std::map<int,double> subdomain_diagonal_map;
420  F_ENTRY;
421 
422  //DBGPRINT_INT("side_row_4_id",mesh->max_side_id+1,side_row_4_id);
423  //DBGPRINT_INT("el_row_4_id",mesh->max_elm_id+1,el_row_4_id);
424  //DBGPRINT_INT("edge_row_4_id",mesh->max_edg_id+1,edge_row_4_id);
425  //DBGPRINT_INT("el_id_4_loc",el_ds->lsize(),el_id_4_loc);
426 
427  SET_ARRAY_ZERO(zeros,1000);
428  for (unsigned int i_loc = 0; i_loc < el_ds->lsize(); i_loc++) {
429 
430  ele = mesh_->element(el_4_loc[i_loc]);
431  el_row = row_4_el[el_4_loc[i_loc]];
432  unsigned int nsides = ele->n_sides();
433  if (fill_matrix) fe_values.update(ele, data_.anisotropy, data_.cross_section, data_.conductivity);
434  double cross_section = data_.cross_section.value(ele->centre(), ele->element_accessor());
435 
436  for (unsigned int i = 0; i < nsides; i++) {
437  side_row = side_rows[i] = side_row_4_id[ mh_dh.side_dof( ele->side(i) ) ];
438  edge_row = edge_rows[i] = row_4_edge[ele->side(i)->edge_idx()];
439  bcd=ele->side(i)->cond();
440 
441  // gravity term on RHS
442  loc_side_rhs[i] = (ele->centre()[ 2 ] - ele->side(i)->centre()[ 2 ]);
443 
444  // set block C and C': side-edge, edge-side
445  double c_val = 1.0;
446 
447  if (bcd) {
448  ElementAccessor<3> b_ele = bcd->element_accessor();
449  EqData::BC_Type type = (EqData::BC_Type)data_.bc_type.value(b_ele.centre(), b_ele);
450  //DBGMSG("BCD id: %d sidx: %d type: %d\n", ele->id(), i, type);
451  if ( type == EqData::none) {
452  // homogeneous neumann
453  } else if ( type == EqData::dirichlet ) {
454  c_val = 0.0;
455  double bc_pressure = data_.bc_pressure.value(b_ele.centre(), b_ele);
456  loc_side_rhs[i] -= bc_pressure;
457  ls->rhs_set_value(edge_row, -bc_pressure);
458  ls->mat_set_value(edge_row, edge_row, -1.0);
459 
460  } else if ( type == EqData::neumann) {
461  double bc_flux = data_.bc_flux.value(b_ele.centre(), b_ele);
462  ls->rhs_set_value(edge_row, bc_flux * bcd->element()->measure() * cross_section);
463  //DBGMSG("neumann edge_row, ele_index,el_idx: %d \t %d \t %d\n", edge_row, ele->index(), ele->side(i)->el_idx());
464 
465  } else if ( type == EqData::robin) {
466  double bc_pressure = data_.bc_pressure.value(b_ele.centre(), b_ele);
467  double bc_sigma = data_.bc_robin_sigma.value(b_ele.centre(), b_ele);
468  ls->rhs_set_value(edge_row, -bcd->element()->measure() * bc_sigma * bc_pressure * cross_section );
469  ls->mat_set_value(edge_row, edge_row, -bcd->element()->measure() * bc_sigma * cross_section );
470 
471  } else {
472  xprintf(UsrErr, "BC type not supported.\n");
473  }
474  }
475  ls->mat_set_value(side_row, edge_row, c_val);
476  ls->mat_set_value(edge_row, side_row, c_val);
477 
478  // update matrix for weights in BDDCML
479  if ( typeid(*ls) == typeid(LinSys_BDDC) ) {
480  double val_side = (fe_values.local_matrix())[i*nsides+i];
481  double val_edge = -1./ (fe_values.local_matrix())[i*nsides+i];
482  subdomain_diagonal_map.insert( std::make_pair( side_row, val_side ) );
483  subdomain_diagonal_map.insert( std::make_pair( edge_row, val_edge ) );
484  }
485  }
486 
487  ls->rhs_set_values(nsides, side_rows, loc_side_rhs);
488 
489 
490  // set block A: side-side on one element - block diagonal matrix
491 
492  //std::cout << "subMat in flow" << std::endl;
493  //for ( unsigned i = 0; i < nsides; i++) {
494  // for ( unsigned j = 0; j < nsides; j++) {
495  // std::cout << fe_values.local_matrix()[i*nsides+j] << " ";
496  // }
497  // std::cout << std::endl;
498  //}
499 
500  ls->mat_set_values(nsides, side_rows, nsides, side_rows, fe_values.local_matrix() );
501  // set block B, B': element-side, side-element
502  ls->mat_set_values(1, &el_row, nsides, side_rows, minus_ones);
503  ls->mat_set_values(nsides, side_rows, 1, &el_row, minus_ones);
504 
505 
506  // set sources
507  ls->rhs_set_value(el_row, -1.0 * ele->measure() *
508  data_.cross_section.value(ele->centre(), ele->element_accessor()) *
509  data_.water_source_density.value(ele->centre(), ele->element_accessor()) );
510 
511 
512  // D block: non-compatible conections and diagonal: element-element
513 
514  ls->mat_set_value(el_row, el_row, 0.0); // maybe this should be in virtual block for schur preallocation
515 
516  // D, E',E block: compatible connections: element-edge
517 
518  for (unsigned int i = 0; i < ele->n_neighs_vb; i++) {
519  // every compatible connection adds a 2x2 matrix involving
520  // current element pressure and a connected edge pressure
521  ngh= ele->neigh_vb[i];
522  tmp_rows[0]=el_row;
523  tmp_rows[1]=row_4_edge[ ngh->edge_idx() ];
524 
525  // compute normal vector to side
526  arma::vec3 nv;
527  ElementFullIter ele_higher = mesh_->element.full_iter(ngh->side()->element());
528  switch (ele_higher->dim()) {
529  case 1:
530  fe_side_values1.reinit(ele_higher, ngh->side()->el_idx());
531  nv = fe_side_values1.normal_vector(0);
532  break;
533  case 2:
534  fe_side_values2.reinit(ele_higher, ngh->side()->el_idx());
535  nv = fe_side_values2.normal_vector(0);
536  break;
537  case 3:
538  fe_side_values3.reinit(ele_higher, ngh->side()->el_idx());
539  nv = fe_side_values3.normal_vector(0);
540  break;
541  }
542 
543  double value = data_.sigma.value( ele->centre(), ele->element_accessor()) *
544  2*data_.conductivity.value( ele->centre(), ele->element_accessor()) *
545  arma::dot(data_.anisotropy.value( ele->centre(), ele->element_accessor())*nv, nv) *
546  data_.cross_section.value( ngh->side()->centre(), ele_higher->element_accessor() ) * // cross-section of higher dim. (2d)
547  data_.cross_section.value( ngh->side()->centre(), ele_higher->element_accessor() ) /
548  data_.cross_section.value( ele->centre(), ele->element_accessor() ) * // crossection of lower dim.
549  ngh->side()->measure();
550 
551 
552  local_vb[0] = -value; local_vb[1] = value;
553  local_vb[2] = value; local_vb[3] = -value;
554 
555  ls->mat_set_values(2, tmp_rows, 2, tmp_rows, local_vb);
556 
557  // update matrix for weights in BDDCML
558  if ( typeid(*ls) == typeid(LinSys_BDDC) ) {
559  int ind = tmp_rows[1];
560  double new_val = value;
561  std::map<int,double>::iterator it = subdomain_diagonal_map.find( ind );
562  if ( it != subdomain_diagonal_map.end() ) {
563  new_val = new_val + it->second;
564  }
565  subdomain_diagonal_map.insert( std::make_pair( ind, new_val ) );
566  }
567 
568  if (n_schur_compls == 2) {
569  // for 2. Schur: N dim edge is conected with N dim element =>
570  // there are nz between N dim edge and N-1 dim edges of the element
571 
572  ls->mat_set_values(nsides, edge_rows, 1, tmp_rows+1, zeros);
573  ls->mat_set_values(1, tmp_rows+1, nsides, edge_rows, zeros);
574 
575  // save all global edge indices to higher positions
576  tmp_rows[2+i] = tmp_rows[1];
577  }
578  }
579 
580 
581  // add virtual values for schur complement allocation
582  switch (n_schur_compls) {
583  case 2:
584  // Connections between edges of N+1 dim. elements neighboring with actual N dim element 'ele'
585  ASSERT(ele->n_neighs_vb*ele->n_neighs_vb<1000, "Too many values in E block.");
586  ls->mat_set_values(ele->n_neighs_vb, tmp_rows+2,
587  ele->n_neighs_vb, tmp_rows+2, zeros);
588 
589  // ls->mat_set_values(nsides, edge_rows, ele->e_row_count, tmp_rows, zeros);
590  // ls->mat_set_values(ele->e_row_count, tmp_rows, nsides, edge_rows, zeros);
591  case 1: // included also for case 2
592  // -(C')*(A-)*B block and its transpose conect edge with its elements
593  ls->mat_set_values(1, &el_row, nsides, edge_rows, zeros);
594  ls->mat_set_values(nsides, edge_rows, 1, &el_row, zeros);
595  // -(C')*(A-)*C block conect all edges of every element
596  ls->mat_set_values(nsides, edge_rows, nsides, edge_rows, zeros);
597  }
598  }
599 
600  if ( typeid(*ls) == typeid(LinSys_BDDC) ) {
601  ls->load_diagonal( subdomain_diagonal_map );
602  }
603 
604  //if (! mtx->ins_mod == ALLOCATE ) {
605  // MatAssemblyBegin(mtx->A,MAT_FINAL_ASSEMBLY);
606  // MatAssemblyEnd(mtx->A,MAT_FINAL_ASSEMBLY);
607  // }
608  // set block F - diagonal: edge-edge from Newton BC
609  // also Dirichlet BC
610  /*
611  for (i_loc = 0; i_loc < edge_ds->lsize(); i_loc++) {
612  edge_row = row_4_edge[edge_4_loc[i_loc]];
613  EdgeFullIter edg = mesh_->edge(edge_4_loc[i_loc]);
614 
615  //xprintf(Msg,"F: %d %f\n",old_4_new[edge_row],edg->f_val);
616  //ls->mat_set_value(edge_row, edge_row, edg->f_val);
617  //ls->rhs_set_value(edge_row, edg->f_rhs);
618  }*/
619 
620 
621  if (mortar_method_ == MortarP0) {
622  P0_CouplingAssembler(*this).assembly(*ls);
623  } else if (mortar_method_ == MortarP1) {
624  P1_CouplingAssembler(*this).assembly(*ls);
625  }
626 }
627 
629  vector<int> &dofs, unsigned int &ele_type, double &delta, arma::vec &dirichlet) {
630 
631  const Element *ele;
632 
633  if (i_ele == ml_it_->size() ) { // master element .. 1D
634  ele_type = 0;
635  delta = -delta_0;
636  ele=master_;
637  } else {
638  ele_type = 1;
639  const Intersection &isect=intersections_[ (*ml_it_)[i_ele] ];
640  delta = isect.intersection_true_size();
641  ele = isect.slave_iter();
642  }
643 
644  dofs.resize(ele->n_sides());
645 
646  for(unsigned int i_side=0; i_side < ele->n_sides(); i_side++ ) {
647  dofs[i_side]=darcy_.row_4_edge[ele->side(i_side)->edge_idx()];
648  Boundary * bcd = ele->side(i_side)->cond();
649  if (bcd) {
650  dirichlet.resize(ele->n_sides());
651  ElementAccessor<3> b_ele = bcd->element_accessor();
652  DarcyFlowMH::EqData::BC_Type type = (DarcyFlowMH::EqData::BC_Type)darcy_.data_.bc_type.value(b_ele.centre(), b_ele);
653  //DBGMSG("bcd id: %d sidx: %d type: %d\n", ele->id(), i_side, type);
654  if (type == DarcyFlowMH::EqData::dirichlet) {
655  //DBGMSG("Dirichlet: %d\n", ele->index());
656  dofs[i_side] = -dofs[i_side];
657  double bc_pressure = darcy_.data_.bc_pressure.value(b_ele.centre(), b_ele);
658  dirichlet[i_side] = bc_pressure;
659  }
660  }
661  }
662 
663 }
664 
665 /**
666  * Works well but there is large error next to the boundary.
667  */
669  double delta_i, delta_j;
670  arma::mat product;
671  arma::vec dirichlet_i, dirichlet_j;
672  unsigned int ele_type_i, ele_type_j;
673 
674  unsigned int i,j;
675  vector<int> dofs_i,dofs_j;
676 
677  for(ml_it_ = master_list_.begin(); ml_it_ != master_list_.end(); ++ml_it_) {
678 
679  if (ml_it_->size() == 0) continue; // skip empty masters
680 
681 
682  // on the intersection element we consider
683  // * intersection dofs for master and slave
684  // those are dofs of the space into which we interpolate
685  // base functions from individual master and slave elements
686  // For the master dofs both are usualy eqivalent.
687  // * original dofs - for master same as intersection dofs, for slave
688  // all dofs of slave elements
689 
690  // form list of intersection dofs, in this case pressures in barycenters
691  // but we do not use those form MH system in order to allow second schur somplement. We rather map these
692  // dofs to pressure traces, i.e. we use averages of traces as barycentric values.
693 
694 
695  master_ = intersections_[ml_it_->front()].master_iter();
696  delta_0 = master_->measure();
697 
698  double master_sigma=darcy_.data_.sigma.value( master_->centre(), master_->element_accessor());
699 
700  // rows
701  for(i = 0; i <= ml_it_->size(); ++i) {
702  //cout << "Intersection:" << master_->index() << ", "
703  // << intersections_[ (*ml_it_)[i] ].slave_iter()->index();
704  pressure_diff(i, dofs_i, ele_type_i, delta_i, dirichlet_i);
705  //columns
706  for (j = 0; j <= ml_it_->size(); ++j) {
707  pressure_diff(j, dofs_j, ele_type_j, delta_j, dirichlet_j);
708 
709  double scale = -master_sigma * delta_i * delta_j / delta_0;
710  product = scale * tensor_average[ele_type_i][ele_type_j];
711 
712 
713  //cout << product << endl;
714 
715 
716  arma::vec rhs(dofs_i.size());
717  rhs.zeros();
718  ls.set_values( dofs_i, dofs_j, product, rhs, dirichlet_i, dirichlet_j);
719  }
720  }
721  } // loop over master elements
722 }
723 
724 
725 
726  void P1_CouplingAssembler::add_sides(const Element * ele, unsigned int shift, vector<int> &dofs, vector<double> &dirichlet)
727  {
728 
729  for(unsigned int i_side=0; i_side < ele->n_sides(); i_side++ ) {
730  dofs[shift+i_side] = darcy_.row_4_edge[ele->side(i_side)->edge_idx()];
731  Boundary * bcd = ele->side(i_side)->cond();
732 
733  if (bcd) {
734  ElementAccessor<3> b_ele = bcd->element_accessor();
735  DarcyFlowMH::EqData::BC_Type type = (DarcyFlowMH::EqData::BC_Type)darcy_.data_.bc_type.value(b_ele.centre(), b_ele);
736  //DBGMSG("bcd id: %d sidx: %d type: %d\n", ele->id(), i_side, type);
737  if (type == DarcyFlowMH::EqData::dirichlet) {
738  //DBGMSG("Dirichlet: %d\n", ele->index());
739  dofs[shift + i_side] = -dofs[shift + i_side];
740  double bc_pressure = darcy_.data_.bc_pressure.value(b_ele.centre(), b_ele);
741  dirichlet[shift + i_side] = bc_pressure;
742  }
743  }
744  }
745  }
746 
747 
748 /**
749  * P1 coonection of different dimensions
750  * - demonstrated convergence, but still major open questions:
751  * ? in all test cases the error on the fracture is less on the left and greater on the right
752  * with incresing trend
753  * tried:
754  * - changed order of 1d elements (no change)
755  * - higher precision of ngh output and linear solver (no change)
756  * ? seems that there should be some factor 6.0 in the communication term
757  * ? in the case of infinite k2 -> simplest 1d-constant communication, the biggest difference on borders,
758  * numerical solution greater then analytical
759  *
760  * TODO:
761  * * full implementation of Dirichlet BC ( also for 2d sides)
762  */
763 
765 
766  for (const Intersection &intersec : intersections_) {
767  const Element * master = intersec.master_iter();
768  const Element * slave = intersec.slave_iter();
769 
770  add_sides(master, 0, dofs, dirichlet);
771  add_sides(slave, 2, dofs, dirichlet);
772  double master_sigma=darcy_.data_.sigma.value( master->centre(), master->element_accessor());
773 
774 /*
775  * Local coordinates on 1D
776  * t0
777  * node 0: 0.0
778  * node 1: 1.0
779  *
780  * base fce points
781  * t0 = 0.0 on side 0 node 0
782  * t0 = 1.0 on side 1 node 1
783  *
784  * Local coordinates on 2D
785  * t0 t1
786  * node 0: 0.0 0.0
787  * node 1: 1.0 0.0
788  * node 2: 0.0 1.0
789  *
790  * base fce points
791  * t0=0.5, t1=0.0 on side 0 nodes (0,1)
792  * t0=0.5, t1=0.5 on side 1 nodes (1,2)
793  * t0=0.0, t1=0.5 on side 2 nodes (2,0)
794  */
795 
796 
797 
798  arma::vec point_Y(1);
799  point_Y.fill(1.0);
800  arma::vec point_2D_Y(intersec.map_to_slave(point_Y)); // local coordinates of Y on slave
801  arma::vec point_1D_Y(intersec.map_to_master(point_Y)); // local coordinates of Y on master
802 
803  arma::vec point_X(1);
804  point_X.fill(0.0);
805  arma::vec point_2D_X(intersec.map_to_slave(point_X)); // local coordinates of X on slave
806  arma::vec point_1D_X(intersec.map_to_master(point_X)); // local coordinates of X on master
807 
808  arma::mat base_2D(3, 3);
809  // base fce = a0 + a1*t0 + a2*t1
810  // a0 a1 a2
811  base_2D << 1.0 << 0.0 << -2.0 << arma::endr //point on side 0
812  << -1.0 << 2.0 << 2.0 << arma::endr // point on side 1
813  << 1.0 << -2.0 << 0.0 << arma::endr;// point on side 2
814 
815  arma::mat base_1D(2, 2);
816  // base fce = a0 + a1 * t0
817  // a0 a1
818  base_1D << 1.0 << -1.0 << arma::endr // point on side 0,
819  << 0.0 << 1.0 << arma::endr; // point on side 1,
820 
821 
822  //base_1D.print("base_1D: ");
823  //base_2D.print("base_2D: ");
824  //point_2D_X.print("point_2D_X: ");
825  //point_2D_Y.print("point_2D_Y: ");
826 
827  arma::vec difference_in_Y(5);
828  arma::vec difference_in_X(5);
829 
830  // slave sides 0,1,2
831  difference_in_Y.subvec(0, 2) = -base_2D * point_2D_Y;
832  difference_in_X.subvec(0, 2) = -base_2D * point_2D_X;
833  // master sides 3,4
834  difference_in_Y.subvec(3, 4) = base_1D * point_1D_Y;
835  difference_in_X.subvec(3, 4) = base_1D * point_1D_X;
836 
837  //difference_in_X.print("difference_in_X: ");
838  //difference_in_Y.print("difference_in_Y: ");
839 
840  //prvky matice A[i,j]
841  arma::mat A(5, 5);
842  for (int i = 0; i < 5; ++i) {
843  for (int j = 0; j < 5; ++j) {
844  A(i, j) = -master_sigma * intersec.intersection_true_size() *
845  ( difference_in_Y[i] * difference_in_Y[j]
846  + difference_in_Y[i] * difference_in_X[j]/2
847  + difference_in_X[i] * difference_in_Y[j]/2
848  + difference_in_X[i] * difference_in_X[j]
849  ) * (1.0 / 3.0);
850 
851  }
852  }
853  //for(int i=0;i<5;i++) DBGMSG("idx %d: %d\n",i, dofs[i]);
854  //A.print("A:");
855 
856 
857  ls.set_values( dofs, dofs, A, rhs, dirichlet, dirichlet);
858 
859  }
860 }
861 
862 
863 
864 /*******************************************************************************
865  * COMPOSE WATER MH MATRIX WITHOUT SCHUR COMPLEMENT
866  ******************************************************************************/
867 
869 
870  START_TIMER("preallocation");
871  int i_loc, el_row;
872  Element *ele;
873  Vec aux;
874  PetscErrorCode err;
875 
876  F_ENTRY;
877 
878  //xprintf(Msg,"****************** problem statistics \n");
879  //xprintf(Msg,"edges: %d \n",mesh_->n_edges());
880  //xprintf(Msg,"sides: %d \n",mesh_->n_sides());
881  //xprintf(Msg,"elements: %d \n",mesh_->n_elements());
882  //xprintf(Msg,"************************************* \n");
883  //xprintf(Msg,"problem size: %d \n",this->size);
884  //xprintf(Msg,"****************** problem statistics \n");
885 
886  auto in_rec = this->input_record_.val<Input::AbstractRecord>("solver");
887 
888  if (schur0 == NULL) { // create Linear System for MH matrix
889 
890  if (in_rec.type() == LinSys_BDDC::input_type) {
891  xprintf(Warn, "BDDC is using no Schur complements.");
892  n_schur_compls = 0;
893  } else if (n_schur_compls > 2) {
894  xprintf(Warn, "Invalid number of Schur Complements. Using 2.");
895  n_schur_compls = 2;
896  }
897  if (in_rec.type() == LinSys_BDDC::input_type && rows_ds->np() > 1) {
898 #ifdef HAVE_BDDCML
899  LinSys_BDDC *ls = new LinSys_BDDC(global_row_4_sub_row->size(), &(*rows_ds),
900  3, // 3 == la::BddcmlWrapper::SPD_VIA_SYMMETRICGENERAL
901  1, // 1 == number of subdomains per process
902  true); // swap signs of matrix and rhs to make the matrix SPD
903  ls->set_from_input(in_rec);
904  ls->set_solution( NULL );
905  // possible initialization particular to BDDC
906  START_TIMER("BDDC set mesh data");
908  schur0=ls;
909  END_TIMER("BDDC set mesh data");
910 #else
911  xprintf(Err, "Flow123d was not build with BDDCML support.\n");
912 #endif
913  }
914 
915 
916  // use PETSC for serial case even when user want BDDC
917  if (in_rec.type() == LinSys_PETSC::input_type || schur0==NULL) {
918  LinSys_PETSC *schur1, *schur2;
919 
920  if (n_schur_compls == 0) {
921  LinSys_PETSC *ls = new LinSys_PETSC( &(*rows_ds) );
922 
923  // temporary solution; we have to set precision also for sequantial case of BDDC
924  // final solution should be probably call of direct solver for oneproc case
925  if (in_rec.type() != LinSys_BDDC::input_type) ls->set_from_input(in_rec);
926  else {
927  ls->LinSys::set_from_input(in_rec); // get only common options
928  }
929 
930  ls->set_solution( NULL );
931  schur0=ls;
932  } else {
933  IS is;
934  err = ISCreateStride(PETSC_COMM_WORLD, side_ds->lsize(), rows_ds->begin(), 1, &is);
935  ASSERT(err == 0,"Error in ISCreateStride.");
936 
937  SchurComplement *ls = new SchurComplement(is, &(*rows_ds));
938  ls->set_from_input(in_rec);
939  ls->set_solution( NULL );
940  ls->set_positive_definite();
941 
942  // make schur1
944  if (n_schur_compls==1) {
945  schur1 = new LinSys_PETSC(ds);
946  } else {
947  IS is;
948  err = ISCreateStride(PETSC_COMM_WORLD, el_ds->lsize(), ls->get_distribution()->begin(), 1, &is);
949  ASSERT(err == 0,"Error in ISCreateStride.");
950  SchurComplement *ls1 = new SchurComplement(is, ds); // is is deallocated by SchurComplement
951  ls1->set_negative_definite();
952 
953  // make schur2
954  schur2 = new LinSys_PETSC( ls1->make_complement_distribution() );
955  ls1->set_complement( schur2 );
956  schur1 = ls1;
957  }
958  ls->set_complement( schur1 );
959  schur0=ls;
960  }
961 
962  START_TIMER("PETSC PREALLOCATION");
965  assembly_steady_mh_matrix(); // preallocation
966  VecZeroEntries(schur0->get_solution());
967  END_TIMER("PETSC PREALLOCATION");
968  }
969 
970  if (schur0==NULL) {
971  xprintf(Err, "Unknown solver type. Internal error.\n");
972  }
973  }
974 
975 
976  END_TIMER("preallocation");
977 }
978 
979 
980 
981 
983 
984  data_.set_time(*time_);
985  DBGMSG("Assembly linear system\n");
986  if (data_.changed()) {
987  DBGMSG(" Data changed\n");
988  // currently we have no optimization for cases when just time term data or RHS data are changed
989  START_TIMER("full assembly");
990  if (typeid(*schur0) != typeid(LinSys_BDDC)) {
991  schur0->start_add_assembly(); // finish allocation and create matrix
992  }
995  assembly_steady_mh_matrix(); // fill matrix
998 
999  if (!time_->is_steady()) {
1000  DBGMSG(" setup time term\n");
1001  // assembly time term and rhs
1002  setup_time_term();
1003  modify_system();
1004  }
1005  END_TIMER("full assembly");
1006  } else {
1007  START_TIMER("modify system");
1008  if (!time_->is_steady()) {
1009  modify_system();
1010  } else {
1011  xprintf(PrgErr, "Planned computation time for steady solver, but data are not changed.\n");
1012  }
1013  END_TIMER("modiffy system");
1014  }
1015 
1016 
1017  //schur0->view_local_matrix();
1018  //PetscViewer myViewer;
1019  //PetscViewerASCIIOpen(PETSC_COMM_WORLD,"matis.m",&myViewer);
1020  //PetscViewerSetFormat(myViewer,PETSC_VIEWER_ASCII_MATLAB);
1021 
1022  //MatView( schur0->get_matrix(),PETSC_VIEWER_STDOUT_WORLD );
1023  //DBGMSG("RHS\n");
1024  //VecView(schur0->get_rhs(), PETSC_VIEWER_STDOUT_WORLD);
1025  //VecView(schur0->get_solution(), PETSC_VIEWER_STDOUT_WORLD);
1026 
1027  //PetscViewerDestroy(myViewer);
1028 
1029 }
1030 
1031 
1032 
1034  // prepare mesh for BDDCML
1035  // initialize arrays
1036  std::map<int,arma::vec3> localDofMap;
1037  std::vector<int> inet;
1038  std::vector<int> nnet;
1039  std::vector<int> isegn;
1040 
1041  std::vector<double> element_permeability;
1042 
1043  // maximal and minimal dimension of elements
1044  int elDimMax = 1;
1045  int elDimMin = 3;
1046  for ( int i_loc = 0; i_loc < el_ds->lsize(); i_loc++ ) {
1047  // for each element, create local numbering of dofs as fluxes (sides), pressure (element centre), Lagrange multipliers (edges), compatible connections
1048  ElementFullIter el = mesh_->element(el_4_loc[i_loc]);
1049  int e_idx = el.index();
1050 
1051  int elDim = el->dim();
1052  elDimMax = std::max( elDimMax, elDim );
1053  elDimMin = std::min( elDimMin, elDim );
1054 
1055  isegn.push_back( e_idx );
1056  int nne = 0;
1057 
1058  FOR_ELEMENT_SIDES(el,si) {
1059  // insert local side dof
1060  int side_row = side_row_4_id[ mh_dh.side_dof( el->side(si) ) ];
1061  arma::vec3 coord = el->side(si)->centre();
1062 
1063  localDofMap.insert( std::make_pair( side_row, coord ) );
1064  inet.push_back( side_row );
1065  nne++;
1066  }
1067 
1068  // insert local pressure dof
1069  int el_row = row_4_el[ el_4_loc[i_loc] ];
1070  arma::vec3 coord = el->centre();
1071  localDofMap.insert( std::make_pair( el_row, coord ) );
1072  inet.push_back( el_row );
1073  nne++;
1074 
1075  FOR_ELEMENT_SIDES(el,si) {
1076  Edge *edg=el->side(si)->edge();
1077 
1078  // insert local edge dof
1079  int edge_row = row_4_edge[ el->side(si)->edge_idx() ];
1080  arma::vec3 coord = el->side(si)->centre();
1081 
1082  localDofMap.insert( std::make_pair( edge_row, coord ) );
1083  inet.push_back( edge_row );
1084  nne++;
1085  }
1086 
1087  // insert dofs related to compatible connections
1088  for ( int i_neigh = 0; i_neigh < el->n_neighs_vb; i_neigh++) {
1089  int edge_row = row_4_edge[ el->neigh_vb[i_neigh]->edge_idx() ];
1090  arma::vec3 coord = el->neigh_vb[i_neigh]->edge()->side(0)->centre();
1091 
1092  localDofMap.insert( std::make_pair( edge_row, coord ) );
1093  inet.push_back( edge_row );
1094  nne++;
1095  }
1096 
1097  nnet.push_back( nne );
1098 
1099  // version for rho scaling
1100  // trace computation
1101  arma::vec3 centre = el->centre();
1102  double conduct = data_.conductivity.value( centre , el->element_accessor() );
1103  double cs = data_.cross_section.value( centre, el->element_accessor() );
1104  arma::mat33 aniso = data_.anisotropy.value( centre, el->element_accessor() );
1105 
1106  // compute mean on the diagonal
1107  double coef = 0.;
1108  for ( int i = 0; i < 3; i++) {
1109  coef = coef + aniso.at(i,i);
1110  }
1111  // Maybe divide by cs
1112  coef = conduct*coef / 3;
1113 
1114  ASSERT( coef > 0.,
1115  "Zero coefficient of hydrodynamic resistance %f . \n ", coef );
1116  element_permeability.push_back( 1. / coef );
1117  }
1118  //convert set of dofs to vectors
1119  int numNodeSub = localDofMap.size();
1120  ASSERT_EQUAL( numNodeSub, global_row_4_sub_row->size() );
1121  std::vector<int> isngn( numNodeSub );
1122  std::vector<double> xyz( numNodeSub * 3 ) ;
1123  int ind = 0;
1124  std::map<int,arma::vec3>::iterator itB = localDofMap.begin();
1125  for ( ; itB != localDofMap.end(); ++itB ) {
1126  isngn[ind] = itB -> first;
1127 
1128  arma::vec3 coord = itB -> second;
1129  for ( int j = 0; j < 3; j++ ) {
1130  xyz[ j*numNodeSub + ind ] = coord[j];
1131  }
1132 
1133  ind++;
1134  }
1135  localDofMap.clear();
1136 
1137  // nndf is trivially one
1138  std::vector<int> nndf( numNodeSub, 1 );
1139 
1140  // prepare auxiliary map for renumbering nodes
1141  typedef std::map<int,int> Global2LocalMap_; //! type for storage of global to local map
1142  Global2LocalMap_ global2LocalNodeMap;
1143  for ( unsigned ind = 0; ind < isngn.size(); ++ind ) {
1144  global2LocalNodeMap.insert( std::make_pair( static_cast<unsigned>( isngn[ind] ), ind ) );
1145  }
1146 
1147  //std::cout << "INET: \n";
1148  //std::copy( inet.begin(), inet.end(), std::ostream_iterator<int>( std::cout, " " ) );
1149  //std::cout << std::endl;
1150  //std::cout << "ISNGN: \n";
1151  //std::copy( isngn.begin(), isngn.end(), std::ostream_iterator<int>( std::cout, " " ) );
1152  //std::cout << std::endl << std::flush;
1153  //std::cout << "ISEGN: \n";
1154  //std::copy( isegn.begin(), isegn.end(), std::ostream_iterator<int>( std::cout, " " ) );
1155  //std::cout << std::endl << std::flush;
1156  //MPI_Barrier( PETSC_COMM_WORLD );
1157 
1158  // renumber nodes in the inet array to locals
1159  int indInet = 0;
1160  for ( int iEle = 0; iEle < isegn.size(); iEle++ ) {
1161  int nne = nnet[ iEle ];
1162  for ( unsigned ien = 0; ien < nne; ien++ ) {
1163 
1164  int indGlob = inet[indInet];
1165  // map it to local node
1166  Global2LocalMap_::iterator pos = global2LocalNodeMap.find( indGlob );
1167  ASSERT( pos != global2LocalNodeMap.end(),
1168  "Cannot remap node index %d to local indices. \n ", indGlob );
1169  int indLoc = static_cast<int> ( pos -> second );
1170 
1171  // store the node
1172  inet[ indInet++ ] = indLoc;
1173  }
1174  }
1175 
1176  int numNodes = size;
1177  int numDofsInt = size;
1178  int spaceDim = 3; // TODO: what is the proper value here?
1179  int meshDim = elDimMax;
1180  //std::cout << "I have identified following dimensions: max " << elDimMax << ", min " << elDimMin << std::endl;
1181 
1182  bddc_ls -> load_mesh( spaceDim, numNodes, numDofsInt, inet, nnet, nndf, isegn, isngn, isngn, xyz, element_permeability, meshDim );
1183 }
1184 
1185 
1186 
1187 
1188 //=============================================================================
1189 // DESTROY WATER MH SYSTEM STRUCTURE
1190 //=============================================================================
1192  //if (schur2 != NULL) {
1193  // delete schur2;
1194  //ISDestroy(&IS2);
1195  //}
1196  //if (schur1 != NULL) {
1197  // delete schur1;
1198  //ISDestroy(&IS1);
1199  //}
1200  if (schur0 != NULL) delete schur0;
1201 
1202  delete edge_ds;
1203  delete el_ds;
1204  delete side_ds;
1205 
1206  xfree(el_4_loc);
1207  xfree(row_4_el);
1210  xfree(edge_4_loc);
1211  xfree(row_4_edge);
1212 
1213  if (solution != NULL) {
1214  VecDestroy(&sol_vec);
1215  xfree(solution);
1216  }
1217 
1218  delete output_object;
1219 
1220  //VecScatterDestroy(&par_to_all);
1221 
1222 }
1223 
1224 
1225 // ================================================
1226 // PARALLLEL PART
1227 //
1228 
1229 /**
1230  * Make connectivity graph of the second Schur complement and compute optimal partitioning.
1231  * This verison assign 1D and 2D edges to one processor, represent them as
1232  * a weighted vertex, and 2D-3D neghbourings as weighted edges. 3D edges are
1233  * then distributed over all procesors.
1234  */
1235 /*
1236 void make_edge_conection_graph(Mesh *mesh, SparseGraph * &graph) {
1237 
1238  Distribution edistr = graph->get_distr();
1239  //Edge *edg;
1240  Element *ele;
1241  int li, eid, , i_edg;
1242  unsigned int si, i_neigh;
1243  int e_weight;
1244 
1245  int edge_dim_weights[3] = { 100, 10, 1 };
1246  F_ENTRY;
1247 
1248  i_edg=0;
1249  FOR_EDGES(mesh, edg) {
1250 
1251  // skip non-local edges
1252  if (!edistr.is_local(i_edg)) continue;
1253 
1254  e_weight = edge_dim_weights[edg->side(0)->element()->dim() - 1];
1255  // for all connected elements
1256  FOR_EDGE_SIDES( edg, li ) {
1257  ASSERT( edg->side(li)->valid(),"NULL side of edge.");
1258  ele = edg->side(li)->element();
1259  ASSERT(NONULL(ele),"NULL element of side.");
1260 
1261  // for sides of connected element, excluding edge itself
1262  for(si=0; si<ele->n_sides(); si++) {
1263  eid = ele->side(si)->edge_idx();
1264  if (eid != i_edg)
1265  graph->set_edge(i_edg, eid, e_weight);
1266  }
1267 
1268  // include connections from lower dim. edge
1269  // to the higher dimension
1270  for (i_neigh = 0; i_neigh < ele->n_neighs_vb; i_neigh++) {
1271  eid = ele->neigh_vb[i_neigh]->edge_idx();
1272  graph->set_edge(i_edg, eid, e_weight);
1273  graph->set_edge(eid, i_edg, e_weight);
1274  }
1275  }
1276  i_edg++;
1277  }
1278 
1279  graph->finalize();
1280 }
1281 */
1282 /**
1283  * Make connectivity graph of elements of mesh - dual graph: elements vertices of graph.
1284  */
1285 /*
1286 void make_element_connection_graph(Mesh *mesh, SparseGraph * &graph, bool neigh_on) {
1287 
1288  Distribution edistr = graph->get_distr();
1289 
1290  Edge *edg;
1291  int li, e_idx, i_neigh;
1292  int i_s, n_s;
1293  F_ENTRY;
1294 
1295  //int elDimMax = 1;
1296  //int elDimMin = 3;
1297  //FOR_ELEMENTS(mesh, ele) {
1298  // //xprintf(Msg,"Element id %d , its index %d.\n",ele.id(), i_ele);
1299  // int elDim = ele->dim();
1300  // elDimMax = std::max( elDimMax, elDim );
1301  // elDimMin = std::min( elDimMin, elDim );
1302  //}
1303  //std::cout << "max and min element dimensions: " << elDimMax << " " << elDimMin << std::endl;
1304 
1305  FOR_ELEMENTS(mesh, ele) {
1306  //xprintf(Msg,"Element id %d , its index %d.\n",ele.id(), i_ele);
1307 
1308  // skip non-local elements
1309  if (!edistr.is_local(ele.index()))
1310  continue;
1311 
1312  // for all connected elements
1313  FOR_ELEMENT_SIDES( ele, si ) {
1314  edg = ele->side(si)->edge();
1315 
1316  FOR_EDGE_SIDES( edg, li ) {
1317  ASSERT(edg->side(li)->valid(),"NULL side of edge.");
1318  e_idx = ELEMENT_FULL_ITER(mesh, edg->side(li)->element()).index();
1319 
1320  // for elements of connected elements, excluding element itself
1321  if (e_idx != ele.index()) {
1322  graph->set_edge(ele.index(), e_idx);
1323  }
1324  }
1325  }
1326 
1327  // include connections from lower dim. edge
1328  // to the higher dimension
1329  if (neigh_on) {
1330  for (i_neigh = 0; i_neigh < ele->n_neighs_vb; i_neigh++) {
1331  n_s = ele->neigh_vb[i_neigh]->edge()->n_sides;
1332  for (i_s = 0; i_s < n_s; i_s++) {
1333  e_idx=ELEMENT_FULL_ITER(mesh, ele->neigh_vb[i_neigh]->edge()->side(i_s)->element()).index();
1334  graph->set_edge(ele.index(), e_idx);
1335  graph->set_edge(e_idx, ele.index());
1336  }
1337  }
1338  }
1339  }
1340  graph->finalize();
1341 }*/
1342 
1343 
1344 // ========================================================================
1345 // to finish row_4_id arrays we have to convert individual numberings of
1346 // sides/els/edges to whole numbering of rows. To this end we count shifts
1347 // for sides/els/edges on each proc and then we apply them on row_4_id
1348 // arrays.
1349 // we employ macros to avoid code redundancy
1350 // =======================================================================
1352  int i, shift;
1353  int np = edge_ds->np();
1354  int edge_shift[np], el_shift[np], side_shift[np];
1355  unsigned int rows_starts[np];
1356 
1357  int edge_n_id = mesh_->n_edges(),
1358  el_n_id = mesh_->element.size(),
1359  side_n_id = mesh_->n_sides();
1360 
1361  // compute shifts on every proc
1362  shift = 0; // in this var. we count new starts of arrays chunks
1363  for (i = 0; i < np; i++) {
1364  side_shift[i] = shift - (side_ds->begin(i)); // subtract actual start of the chunk
1365  shift += side_ds->lsize(i);
1366  el_shift[i] = shift - (el_ds->begin(i));
1367  shift += el_ds->lsize(i);
1368  edge_shift[i] = shift - (edge_ds->begin(i));
1369  shift += edge_ds->lsize(i);
1370  rows_starts[i] = shift;
1371  }
1372  //DBGPRINT_INT("side_shift",np,side_shift);
1373  //DBGPRINT_INT("el_shift",np,el_shift);
1374  //DBGPRINT_INT("edge_shift",np,edge_shift);
1375  // apply shifts
1376  for (i = 0; i < side_n_id; i++) {
1377  int &what = side_row_4_id[i];
1378  if (what >= 0)
1379  what += side_shift[side_ds->get_proc(what)];
1380  }
1381  for (i = 0; i < el_n_id; i++) {
1382  int &what = row_4_el[i];
1383  if (what >= 0)
1384  what += el_shift[el_ds->get_proc(what)];
1385 
1386  }
1387  for (i = 0; i < edge_n_id; i++) {
1388  int &what = row_4_edge[i];
1389  if (what >= 0)
1390  what += edge_shift[edge_ds->get_proc(what)];
1391  }
1392  // make distribution of rows
1393  for (i = np - 1; i > 0; i--)
1394  rows_starts[i] -= rows_starts[i - 1];
1395 
1396  rows_ds = boost::make_shared<Distribution>(&(rows_starts[0]), PETSC_COMM_WORLD);
1397 }
1398 
1400  START_TIMER("prepare scatter");
1401  // prepare Scatter form parallel to sequantial in original numbering
1402  {
1403  IS is_loc;
1404  int i, *loc_idx; //, si;
1405 
1406  // create local solution vector
1407  solution = (double *) xmalloc(size * sizeof(double));
1408  VecCreateSeqWithArray(PETSC_COMM_SELF,1, size, solution,
1409  &(sol_vec));
1410 
1411  // create seq. IS to scatter par solutin to seq. vec. in original order
1412  // use essentialy row_4_id arrays
1413  loc_idx = (int *) xmalloc(size * sizeof(int));
1414  i = 0;
1415  FOR_ELEMENTS(mesh_, ele) {
1416  FOR_ELEMENT_SIDES(ele,si) {
1417  loc_idx[i++] = side_row_4_id[ mh_dh.side_dof( ele->side(si) ) ];
1418  }
1419  }
1420  FOR_ELEMENTS(mesh_, ele) {
1421  loc_idx[i++] = row_4_el[ele.index()];
1422  }
1423  for(unsigned int i_edg=0; i_edg < mesh_->n_edges(); i_edg++) {
1424  loc_idx[i++] = row_4_edge[i_edg];
1425  }
1426  ASSERT( i==size,"Size of array does not match number of fills.\n");
1427  //DBGPRINT_INT("loc_idx",size,loc_idx);
1428  ISCreateGeneral(PETSC_COMM_SELF, size, loc_idx, PETSC_COPY_VALUES, &(is_loc));
1429  xfree(loc_idx);
1430  VecScatterCreate(schur0->get_solution(), is_loc, sol_vec,
1431  PETSC_NULL, &par_to_all);
1432  ISDestroy(&(is_loc));
1433  }
1435 
1436  END_TIMER("prepare scatter");
1437 
1438 }
1439 
1440 // ====================================================================================
1441 // - compute optimal edge partitioning
1442 // - compute appropriate partitioning of elements and sides
1443 // - make arrays: *_id_4_loc and *_row_4_id to allow parallel assembly of the MH matrix
1444 // ====================================================================================
1446 
1447  START_TIMER("prepare parallel");
1448 
1449  int *loc_part; // optimal (edge,el) partitioning (local chunk)
1450  int *id_4_old; // map from old idx to ids (edge,el)
1451  int i, loc_i;
1452 
1453  int e_idx;
1454 
1455 
1456  F_ENTRY;
1457 
1458  //ierr = MPI_Barrier(PETSC_COMM_WORLD);
1459  //ASSERT(ierr == 0, "Error in MPI_Barrier.");
1460 
1461  id_4_old = new int[mesh_->n_elements()];
1462  i = 0;
1463  FOR_ELEMENTS(mesh_, el) id_4_old[i++] = el.index();
1464 
1465  mesh_->get_part()->id_maps(mesh_->element.size(), id_4_old, el_ds, el_4_loc, row_4_el);
1466  //DBGPRINT_INT("el_4_loc",el_ds->lsize(),el_4_loc);
1467  //xprintf(Msg,"Number of elements in subdomain %d \n",el_ds->lsize());
1468  delete[] id_4_old;
1469 
1470  //el_ds->view( std::cout );
1471  //
1472  //DBGMSG("Compute appropriate edge partitioning ...\n");
1473  //optimal element part; loc. els. id-> new el. numbering
1474  Distribution init_edge_ds(DistributionLocalized(), mesh_->n_edges(), PETSC_COMM_WORLD);
1475  // partitioning of edges, edge belongs to the proc of his first element
1476  // this is not optimal but simple
1477  loc_part = new int[init_edge_ds.lsize()];
1478  id_4_old = new int[mesh_->n_edges()];
1479  {
1480  loc_i = 0;
1481  FOR_EDGES(mesh_, edg ) {
1482  unsigned int i_edg = edg - mesh_->edges.begin();
1483  // partition
1484  e_idx = mesh_->element.index(edg->side(0)->element());
1485  //xprintf(Msg,"Index of edge: %d first element: %d \n",edgid,e_idx);
1486  if (init_edge_ds.is_local(i_edg)) {
1487  // find (new) proc of the first element of the edge
1488  loc_part[loc_i++] = el_ds->get_proc(row_4_el[e_idx]);
1489  }
1490  // id array
1491  id_4_old[i_edg] = i_edg;
1492  }
1493  }
1494 
1495  Partitioning::id_maps(mesh_->n_edges(), id_4_old, init_edge_ds, loc_part, edge_ds, edge_4_loc, row_4_edge);
1496  delete[] loc_part;
1497  delete[] id_4_old;
1498 
1499  //optimal side part; loc. sides; id-> new side numbering
1500  Distribution init_side_ds(DistributionBlock(), mesh_->n_sides(), PETSC_COMM_WORLD);
1501  // partitioning of sides follows elements
1502  loc_part = new int[init_side_ds.lsize()];
1503  id_4_old = new int[mesh_->n_sides()];
1504  {
1505  int is = 0;
1506  loc_i = 0;
1507  FOR_SIDES(mesh_, side ) {
1508  // partition
1509  if (init_side_ds.is_local(is)) {
1510  // find (new) proc of the element of the side
1511  loc_part[loc_i++] = el_ds->get_proc(
1513  }
1514  // id array
1515  id_4_old[is++] = mh_dh.side_dof( side );
1516  }
1517  }
1518 
1519  Partitioning::id_maps(mesh_->n_sides(), id_4_old, init_side_ds, loc_part, side_ds,
1521  delete [] loc_part;
1522  delete [] id_4_old;
1523 
1524  //edge_ds->view( std::cout );
1525  //side_ds->view( std::cout );
1526 
1527  /*
1528  DBGPRINT_INT("edge_id_4_loc",edge_ds->lsize,edge_id_4_loc);
1529  DBGPRINT_INT("el_4_loc",el_ds->lsize,el_4_loc);
1530  DBGPRINT_INT("side_id_4_loc",side_ds->lsize,side_id_4_loc);
1531  DBGPRINT_INT("edge_row_4_id",mesh_->n_edges,edge_row_4_id);
1532  DBGPRINT_INT("el_row_4_id",mesh_->max_elm_id+1,el_row_4_id);
1533  DBGPRINT_INT("side_row_4_id",mesh_->max_side_id+1,side_row_4_id);
1534  */
1535  // convert row_4_id arrays from separate numberings to global numbering of rows
1537  //DBGPRINT_INT("edge_row_4_id",mesh_->n_edges,edge_row_4_id);
1538  //DBGPRINT_INT("el_row_4_id",mesh_->max_elm_id+1,el_row_4_id);
1539  //DBGPRINT_INT("side_row_4_id",mesh_->max_side_id+1,side_row_4_id);
1540 
1541  //lsize = side_ds->lsize() + el_ds->lsize() + edge_ds->lsize();
1542 
1543 
1544  // prepare global_row_4_sub_row
1545 
1546 #ifdef HAVE_BDDCML
1547  if (in_rec.type() == LinSys_BDDC::input_type) {
1548  // auxiliary
1549  Element *el;
1550  int side_row, edge_row;
1551 
1552  //xprintf(Msg,"Compute mapping of local subdomain rows to global rows.\n");
1553 
1554  global_row_4_sub_row = boost::make_shared<LocalToGlobalMap>(rows_ds);
1555 
1556  //
1557  // ordering of dofs
1558  // for each subdomain:
1559  // | velocities (at sides) | pressures (at elements) | L. mult. (at edges) |
1560  for (unsigned int i_loc = 0; i_loc < el_ds->lsize(); i_loc++) {
1561  el = mesh_->element(el_4_loc[i_loc]);
1562  int el_row = row_4_el[el_4_loc[i_loc]];
1563 
1564  global_row_4_sub_row->insert( el_row );
1565 
1566  unsigned int nsides = el->n_sides();
1567  for (unsigned int i = 0; i < nsides; i++) {
1568  int side_row = side_row_4_id[ mh_dh.side_dof( el->side(i) ) ];
1569  int edge_row = row_4_edge[el->side(i)->edge_idx()];
1570 
1571  global_row_4_sub_row->insert( side_row );
1572  global_row_4_sub_row->insert( edge_row );
1573 
1574  // edge neighbouring overlap
1575  //if (edg->neigh_vb != NULL) {
1576  // int neigh_el_row=row_4_el[mesh_->element.index(edg->neigh_vb->element[0])];
1577  //}
1578  }
1579 
1580  for (unsigned int i_neigh = 0; i_neigh < el->n_neighs_vb; i_neigh++) {
1581  // mark this edge
1582  int edge_row = row_4_edge[el->neigh_vb[i_neigh]->edge_idx() ];
1583  global_row_4_sub_row->insert( edge_row );
1584  }
1585  }
1586  global_row_4_sub_row->finalize();
1587  }
1588 #endif // HAVE_BDDCML
1589 
1590  // common to both solvers - create renumbering of unknowns
1591  solver_indices_.reserve(size);
1592  FOR_ELEMENTS(mesh_, ele) {
1593  FOR_ELEMENT_SIDES(ele,si) {
1594  solver_indices_.push_back( side_row_4_id[ mh_dh.side_dof( ele->side(si) ) ] );
1595  }
1596  }
1597  FOR_ELEMENTS(mesh_, ele) {
1598  solver_indices_.push_back( row_4_el[ele.index()] );
1599  }
1600 
1601  unsigned int i_edg=0;
1602  FOR_EDGES(mesh_, edg) {
1603  solver_indices_.push_back( row_4_edge[i_edg++] );
1604  }
1605  ASSERT( solver_indices_.size() == size, "Size of array does not match number of fills.\n" );
1606  //std::cout << "Solve rindices:" << std::endl;
1607  //std::copy( solver_indices_.begin(), solver_indices_.end(), std::ostream_iterator<int>( std::cout, " " ) );
1608 }
1609 
1610 
1611 
1612 void mat_count_off_proc_values(Mat m, Vec v) {
1613  int n, first, last;
1614  const PetscInt *cols;
1615  Distribution distr(v);
1616 
1617  int n_off = 0;
1618  int n_on = 0;
1619  int n_off_rows = 0;
1620  MatGetOwnershipRange(m, &first, &last);
1621  for (int row = first; row < last; row++) {
1622  MatGetRow(m, row, &n, &cols, PETSC_NULL);
1623  bool exists_off = false;
1624  for (int i = 0; i < n; i++)
1625  if (distr.get_proc(cols[i]) != distr.myp())
1626  n_off++, exists_off = true;
1627  else
1628  n_on++;
1629  if (exists_off)
1630  n_off_rows++;
1631  MatRestoreRow(m, row, &n, &cols, PETSC_NULL);
1632  }
1633  //printf("[%d] rows: %d off_rows: %d on: %d off: %d\n",distr.myp(),last-first,n_off_rows,n_on,n_off);
1634 }
1635 
1636 
1637 
1638 
1639 
1640 
1641 
1642 
1643 
1644 
1645 
1646 
1647 // ========================
1648 // unsteady
1649 
1651  : DarcyFlowMH_Steady(mesh_in, in_rec, false)
1652 {
1653  time_ = new TimeGovernor(in_rec.val<Input::Record>("time"));
1654  data_.mark_input_times(this->mark_type());
1655  data_.set_limit_side(LimitSide::right);
1656  data_.set_time(*time_);
1657 
1658  output_object = new DarcyFlowMHOutput(this, in_rec.val<Input::Record>("output"));
1659 
1662 
1663  VecDuplicate(schur0->get_solution(), &previous_solution);
1664  VecCreateMPI(PETSC_COMM_WORLD,rows_ds->lsize(),PETSC_DETERMINE,&(steady_diagonal));
1665  VecDuplicate(steady_diagonal,& new_diagonal);
1666  VecZeroEntries(new_diagonal);
1667  VecDuplicate(schur0->get_rhs(), &steady_rhs);
1668 
1671 
1672 
1673 /*
1674  VecDuplicate(schur0->get_rhs(), &time_term);
1675  */
1676  //setup_time_term();
1677  output_data();
1678 }
1679 
1681 {
1682 
1683  // read inital condition
1684  VecZeroEntries(schur0->get_solution());
1685 
1686  double *local_sol = schur0->get_solution_array();
1687 
1688  // cycle over local element rows
1690 
1691  DBGMSG("Setup with dt: %f\n",time_->dt());
1692  for (unsigned int i_loc_el = 0; i_loc_el < el_ds->lsize(); i_loc_el++) {
1693  ele = mesh_->element(el_4_loc[i_loc_el]);
1694  int i_loc_row = i_loc_el + side_ds->lsize();
1695 
1696  // set initial condition
1697  local_sol[i_loc_row] = data_.init_pressure.value(ele->centre(),ele->element_accessor());
1698  }
1699 
1701 
1702 }
1703 
1705  // save diagonal of steady matrix
1706  MatGetDiagonal(schur0->get_matrix(), steady_diagonal);
1707  // save RHS
1708  VecCopy(schur0->get_rhs(), steady_rhs);
1709 
1710 
1711  PetscScalar *local_diagonal;
1712  VecGetArray(new_diagonal,& local_diagonal);
1713 
1715  DBGMSG("Setup with dt: %f\n",time_->dt());
1716  for (unsigned int i_loc_el = 0; i_loc_el < el_ds->lsize(); i_loc_el++) {
1717  ele = mesh_->element(el_4_loc[i_loc_el]);
1718  int i_loc_row = i_loc_el + side_ds->lsize();
1719 
1720  // set new diagonal
1721  local_diagonal[i_loc_row]= - data_.storativity.value(ele->centre(), ele->element_accessor()) *
1722  ele->measure() / time_->dt();
1723  }
1724  VecRestoreArray(new_diagonal,& local_diagonal);
1725  MatDiagonalSet(schur0->get_matrix(), new_diagonal, ADD_VALUES);
1726 
1729 }
1730 
1732  START_TIMER("modify system");
1733  if (time_->is_changed_dt() && !schur0->is_matrix_changed()) {
1734  // if time step has changed and setup_time_term not called
1735  MatDiagonalSet(schur0->get_matrix(),steady_diagonal, INSERT_VALUES);
1736 
1737  VecScale(new_diagonal, time_->last_dt()/time_->dt());
1738  MatDiagonalSet(schur0->get_matrix(),new_diagonal, ADD_VALUES);
1740  }
1741 
1742  // modify RHS - add previous solution
1743  VecPointwiseMult(schur0->get_rhs(), new_diagonal, schur0->get_solution());
1744  VecAXPY(schur0->get_rhs(), 1.0, steady_rhs);
1746 
1747  // swap solutions
1748  VecSwap(previous_solution, schur0->get_solution());
1749 }
1750 
1751 // ========================
1752 // unsteady
1753 
1755  : DarcyFlowMH_Steady(mesh_in, in_rec,false)
1756 {
1757  time_ = new TimeGovernor(in_rec.val<Input::Record>("time"));
1758  data_.mark_input_times(this->mark_type());
1759 
1760 
1761  data_.set_limit_side(LimitSide::right);
1762  data_.set_time(*time_);
1763 
1764  output_object = new DarcyFlowMHOutput(this, in_rec.val<Input::Record>("output"));
1765 
1768  VecDuplicate(schur0->get_solution(), &previous_solution);
1769  VecCreateMPI(PETSC_COMM_WORLD,rows_ds->lsize(),PETSC_DETERMINE,&(steady_diagonal));
1770  VecDuplicate(steady_diagonal,& new_diagonal);
1771  VecDuplicate(schur0->get_rhs(), &steady_rhs);
1772 
1775  output_data();
1776 }
1777 
1779 {
1780  VecZeroEntries(schur0->get_solution());
1781 
1782  // apply initial condition
1783  // cycle over local element rows
1784 
1786  double init_value;
1787 
1788  for (unsigned int i_loc_el = 0; i_loc_el < el_ds->lsize(); i_loc_el++) {
1789  ele = mesh_->element(el_4_loc[i_loc_el]);
1790 
1791  init_value = data_.init_pressure.value(ele->centre(), ele->element_accessor());
1792 
1793  FOR_ELEMENT_SIDES(ele,i) {
1794  int edge_row = row_4_edge[ele->side(i)->edge_idx()];
1795  VecSetValue(schur0->get_solution(),edge_row,init_value/ele->n_sides(),ADD_VALUES);
1796  }
1797  }
1798  VecAssemblyBegin(schur0->get_solution());
1799  VecAssemblyEnd(schur0->get_solution());
1800 
1802 }
1803 
1804 
1805 
1807 {
1808  // save diagonal of steady matrix
1809  MatGetDiagonal(schur0->get_matrix(), steady_diagonal);
1810  // save RHS
1811  VecCopy(schur0->get_rhs(),steady_rhs);
1812 
1813  VecZeroEntries(new_diagonal);
1814 
1815  // modify matrix diagonal
1816  // cycle over local element rows
1818  double init_value;
1819 
1820  for (unsigned int i_loc_el = 0; i_loc_el < el_ds->lsize(); i_loc_el++) {
1821  ele = mesh_->element(el_4_loc[i_loc_el]);
1822 
1823  init_value = data_.init_pressure.value(ele->centre(), ele->element_accessor());
1824 
1825  FOR_ELEMENT_SIDES(ele,i) {
1826  int edge_row = row_4_edge[ele->side(i)->edge_idx()];
1827  // set new diagonal
1828  VecSetValue(new_diagonal,edge_row, - ele->measure() *
1829  data_.storativity.value(ele->centre(), ele->element_accessor()) *
1830  data_.cross_section.value(ele->centre(), ele->element_accessor()) /
1831  time_->dt() / ele->n_sides(),ADD_VALUES);
1832  }
1833  }
1834  VecAssemblyBegin(new_diagonal);
1835  VecAssemblyEnd(new_diagonal);
1836 
1837  MatDiagonalSet(schur0->get_matrix(),new_diagonal, ADD_VALUES);
1838 
1841 }
1842 
1844  START_TIMER("modify system");
1845  if (time_->is_changed_dt() && !schur0->is_matrix_changed()) {
1846  // if time step has changed and setup_time_term not called
1847 
1848  MatDiagonalSet(schur0->get_matrix(),steady_diagonal, INSERT_VALUES);
1849  VecScale(new_diagonal, time_->last_dt()/time_->dt());
1850  MatDiagonalSet(schur0->get_matrix(),new_diagonal, ADD_VALUES);
1852  }
1853 
1854  // modify RHS - add previous solution
1855  VecPointwiseMult(schur0->get_rhs(), new_diagonal, schur0->get_solution());
1856  VecAXPY(schur0->get_rhs(), 1.0, steady_rhs);
1858 
1859  // swap solutions
1860  VecSwap(previous_solution, schur0->get_solution());
1861 }
1862 
1863 
1864 
1866  int i_loc, side_row, loc_edge_row, i;
1867  Edge* edg;
1868  ElementIter ele;
1869  double new_pressure, old_pressure, time_coef;
1870 
1871  PetscScalar *loc_prev_sol;
1872  VecGetArray(previous_solution, &loc_prev_sol);
1873 
1874  // modify side fluxes in parallel
1875  // for every local edge take time term on diagonal and add it to the corresponding flux
1876  for (i_loc = 0; i_loc < edge_ds->lsize(); i_loc++) {
1877 
1878  edg = &( mesh_->edges[ edge_4_loc[i_loc] ] );
1879  loc_edge_row = side_ds->lsize() + el_ds->lsize() + i_loc;
1880 
1881  new_pressure = (schur0->get_solution_array())[loc_edge_row];
1882  old_pressure = loc_prev_sol[loc_edge_row];
1883  FOR_EDGE_SIDES(edg,i) {
1884  ele = edg->side(i)->element();
1885  side_row = side_row_4_id[ mh_dh.side_dof( edg->side(i) ) ];
1886  time_coef = - ele->measure() *
1887  data_.cross_section.value(ele->centre(), ele->element_accessor()) *
1888  data_.storativity.value(ele->centre(), ele->element_accessor()) /
1889  time_->dt() / ele->n_sides();
1890  VecSetValue(schur0->get_solution(), side_row, time_coef * (new_pressure - old_pressure), ADD_VALUES);
1891  }
1892  }
1893  VecRestoreArray(previous_solution, &loc_prev_sol);
1894 
1895  VecAssemblyBegin(schur0->get_solution());
1896  VecAssemblyEnd(schur0->get_solution());
1897 
1898  //DarcyFlowMH_Steady::postprocess();
1899 
1900  int side_rows[4];
1901  double values[4];
1902  //ElementFullIter ele = ELEMENT_FULL_ITER(mesh_, NULL);
1903 
1904  // modify side fluxes in parallel
1905  // for every local edge take time term on digonal and add it to the corresponding flux
1906 
1907  for (unsigned int i_loc = 0; i_loc < el_ds->lsize(); i_loc++) {
1908  ele = mesh_->element(el_4_loc[i_loc]);
1909  FOR_ELEMENT_SIDES(ele,i) {
1910  side_rows[i] = side_row_4_id[ mh_dh.side_dof( ele->side(i) ) ];
1911  values[i] = -1.0 * ele->measure() *
1912  data_.cross_section.value(ele->centre(), ele->element_accessor()) *
1914  ele->n_sides();
1915  }
1916  VecSetValues(schur0->get_solution(), ele->n_sides(), side_rows, values, ADD_VALUES);
1917  }
1918  VecAssemblyBegin(schur0->get_solution());
1919  VecAssemblyEnd(schur0->get_solution());
1920 }
1921 
1922 //-----------------------------------------------------------------------------
1923 // vim: set cindent: