Flow123d  JS_before_hm-984-g3a19f2f
bddcml_wrapper.cc
Go to the documentation of this file.
1 //! @author Jakub Sistek
2 //! @date 7/2012
3 
4 #include "la/bddcml_wrapper.hh"
5 
6 //------------------------------------------------------------------------------
7 /** Constructor for the BDDCML solver object
8  */
9 la::BddcmlWrapper::BddcmlWrapper( const unsigned numDofs,
10  const unsigned numDofsSub,
11  const MatrixType matrixType,
12  const MPI_Comm comm,
13  int numSubLoc )
14  : numDofs_( static_cast<int>( numDofs ) ),
15  numDofsSub_( static_cast<int>( numDofsSub ) ),
16  matrixType_( matrixType ),
17  comm_ (comm),
18  numSubLoc_ ( numSubLoc )
19 {
20 
21  // Orient in the communicator
24 
25  // prepare RHS vector
26  rhsVec_.resize( numDofsSub_, 0. );
27 
28  // prepare arrays to mark boundary conditions
29  ifix_.resize( numDofsSub_, 0 );
30  fixv_.resize( numDofsSub_, 0. );
31 
32  // initialize subdomain solution vector
33  sol_.resize( numDofsSub_, 0. );
34  normSol_ = 0.;
35  numIter_ = 0;
36  convergedReason_ = 0;
37  condNumber_ = 0.;
38 
39  // initialize state of mesh and matrix
40  meshLoaded_ = false;
41  isMatAssembled_ = false;
42 }
43 //------------------------------------------------------------------------------
44 /** Destructor frees the BDDC solver structures
45  */
47 {
48  // clear RHS
49  rhsVec_.clear();
50 
51  // clear BC
52  ifix_.clear();
53  fixv_.clear();
54 
55  // clear solution
56  sol_.clear();
57 
58  // clear mesh data
59  inet_.clear();
60  nnet_.clear();
61  nndf_.clear();
62  xyz_.clear();
63  isngn_.clear();
64  isvgvn_.clear();
65  isegn_.clear();
66 
67  // clear global2local map
68  global2LocalDofMap_.clear();
69 
70  // clear triplet
71  coo_.clear();
72 
73 }
74 
75 //------------------------------------------------------------------------------
76 /** Routine for loading raw mesh data into the solver - for cases of strange meshes,
77  * where these are not Mesh or Grid objects, user can create own raw description
78  * to exploit the flexibility of mesh format underlaying BDDCML.
79  */
80 void la::BddcmlWrapper::loadRawMesh( const int nDim, const int numNodes,
81  const std::vector<int> & inet,
82  const std::vector<int> & nnet,
83  const std::vector<int> & nndf,
84  const std::vector<int> & isegn,
85  const std::vector<int> & isngn,
86  const std::vector<int> & isvgvn,
87  const std::vector<double> & xyz,
88  const std::vector<double> & element_data,
89  const int meshDim )
90 {
91  // space dimension
92  nDim_ = nDim;
93 
94  // set topological dimension of mesh
95  if ( meshDim == 0 ) { // if not specified, use space dimension
96  meshDim_ = nDim_;
97  }
98  else {
99  meshDim_ = static_cast<int>( meshDim ); // if specified, use it
100  }
101 
102  // fill internal variables about mesh
103  numElemSub_ = isegn.size( );
104  numNodesSub_ = isngn.size( );
105 
106  // global number of nodes
107  numNodes_ = numNodes;
108 
109  std::vector<int> procElemStarts( nProc_ + 1 );
110  int numElemSubInt = static_cast<int>( numElemSub_ );
111  MPI_Allgather( &numElemSubInt, 1, MPI_INT,
112  &(procElemStarts[0]), 1, MPI_INT, comm_ );
113  // now procElemStarts_ contains counts of local elements at each process
114  // change it to starts
115  for ( unsigned i = 1; i < static_cast<unsigned>( nProc_ ); i++ )
116  procElemStarts[i] = procElemStarts[i-1] + procElemStarts[i];
117  // shift it one back
118  for ( unsigned i = nProc_; i > 0; i-- )
119  procElemStarts[i] = procElemStarts[i-1];
120  // start from zero
121  procElemStarts[0] = 0;
122 
123  // find global number of elements - assumes nonoverlapping division
124  numElem_ = procElemStarts[ nProc_ ];
125 
126  // check sizes of arrays
127  OLD_ASSERT( std::accumulate( nnet.begin(), nnet.end(), 0 ) == (int)(inet.size()),
128  "array inet size mismatch \n " );
129  OLD_ASSERT( std::accumulate( nndf.begin(), nndf.end(), 0 ) == numDofsSub_,
130  "array nndf content mismatch: %d %d \n ", std::accumulate( nndf.begin(), nndf.end(), 0 ), numDofsSub_ );
131  OLD_ASSERT( static_cast<int>(isvgvn.size()) == numDofsSub_,
132  "array isvgvn size mismatch \n " );
133  OLD_ASSERT( static_cast<int>(nnet.size()) == numElemSub_,
134  "array nnet size mismatch \n " );
135  OLD_ASSERT( static_cast<int>(nndf.size()) == numNodesSub_,
136  "array nndf size mismatch \n " );
137  OLD_ASSERT( static_cast<int>(xyz.size()) == numNodesSub_ * nDim_,
138  "array xyz size mismatch: %d %d \n ", static_cast<int>(xyz.size()), numNodesSub_ * nDim_ );
139 
140  // Simply copy input data into the private object equivalents
141  inet_.resize( inet.size() );
142  std::copy( inet.begin(), inet.end(), inet_.begin() );
143  nnet_.resize( nnet.size() );
144  std::copy( nnet.begin(), nnet.end(), nnet_.begin() );
145  nndf_.resize( nndf.size() );
146  std::copy( nndf.begin(), nndf.end(), nndf_.begin() );
147  isegn_.resize( isegn.size() );
148  std::copy( isegn.begin(), isegn.end(), isegn_.begin() );
149  isngn_.resize( isngn.size() );
150  std::copy( isngn.begin(), isngn.end(), isngn_.begin() );
151  isvgvn_.resize( isvgvn.size() );
152  std::copy( isvgvn.begin(), isvgvn.end(), isvgvn_.begin() );
153  xyz_.resize( xyz.size() );
154  std::copy( xyz.begin(), xyz.end(), xyz_.begin() );
155  element_data_.resize( element_data.size() );
156  std::copy( element_data.begin(), element_data.end(), element_data_.begin() );
157 
158  // prepare auxiliary map for renumbering dofs in the global array
159  for ( unsigned ind = 0; ind < isvgvn_.size(); ++ind ) {
160  global2LocalDofMap_.insert( std::make_pair( static_cast<unsigned>( isvgvn_[ind] ), ind ) );
161  }
162 
163  // change the state
164  meshLoaded_ = true;
165 
166  return;
167 }
168 
169 //------------------------------------------------------------------------------
170 /** Routine for preparing MATRIX assembly. It reserves memory in triplet.
171  */
172 void la::BddcmlWrapper::prepareMatAssembly( unsigned numElements, unsigned elMatSize )
173 {
174  unsigned length = numElements * elMatSize;
175  coo_.prepareAssembly( length );
176 
177  return;
178 }
179 //------------------------------------------------------------------------------
180 /** Insert element stiffness matrix into global system matrix
181  */
183  const VecUint_ & rowIndices,
184  const VecUint_ & colIndices )
185 {
186  // copy dense matrix into triplet
187  for ( unsigned i = 0; i < rowIndices.size(); i++) {
188  for ( unsigned j = 0; j < colIndices.size(); j++) {
189 
190  // insert value
191  if (subMat(i,j) != 0.0)
192  coo_.insert( rowIndices[i], colIndices[j], subMat(i,j) );
193  }
194  }
195 
196  // set flag
197  isMatAssembled_ = false;
198 
199  return;
200 }
201 
202 //------------------------------------------------------------------------------
203 /** Routine for triplet finalization.
204  */
206 {
207  // do nothing if already called
208  if ( isMatAssembled_ ) return;
209 
210  // finish assembly of triplet
211  coo_.finishAssembly( );
212 
213  // set flag
214  isMatAssembled_ = true;
215 
216  return;
217 }
218 //------------------------------------------------------------------------------
219 /** Insert subvector to the system's RHS vector
220  */
222  const VecUint_ & dofIndices )
223 {
224  for ( unsigned i = 0; i < dofIndices.size( ); i ++ ) {
225 
226  // map global dof index to subdomain dof
227  Global2LocalMap_::iterator pos = global2LocalDofMap_.find( dofIndices[i] );
228  /*
229  OLD_ASSERT( pos != global2LocalDofMap_.end(),
230  "Cannot remap index %d to local indices in right-hand side,"
231  " perhaps missing call to loadMesh() member function. \n ", dofIndices[i] );
232  */
233  if (pos == global2LocalDofMap_.end()) {
234  DebugOut().every_proc() << "RHS, Missing map to local for idx: " << dofIndices[i];
235  }
236  const unsigned indLoc = pos -> second;
237 
238  rhsVec_[ indLoc ] += subVec[i];
239  }
240  return;
241 }
242 
243 //------------------------------------------------------------------------------
244 /** Apply boundary conditions
245  * Since BDDCML handles constraints, just fill proper BC arrays.
246  * Scalar is not used, it is kept for compatibility with constraint functors.
247  */
249  const double factor, FMT_UNUSED const double scalar )
250 {
251  // Constraint iterators
252  ConstraintVec_::const_iterator cIter = constraints.begin( );
253  ConstraintVec_::const_iterator cEnd = constraints.end( );
254  // collect global dof indices and the correpsonding values
255  for ( ; cIter != cEnd; ++cIter ) {
256  unsigned globalDof = cIter -> first;
257  double value = (cIter -> second );
258 
259  // map global dof index to subdomain dof
260  Global2LocalMap_::iterator pos = global2LocalDofMap_.find( globalDof );
261  if ( pos != global2LocalDofMap_.end() ) {
262  // I contain the dof
263  unsigned indLoc = pos -> second;
264 
265  ifix_[ indLoc ] = 1;
266  fixv_[ indLoc ] = factor * value;
267  }
268  }
269 
270  return;
271 }
272 //------------------------------------------------------------------------------
273 /** Simply set the dof with index 'index' to zero
274  */
275 void la::BddcmlWrapper::fixDOF( const unsigned index, FMT_UNUSED const double scalar )
276 {
278  thisConstraint.push_back( std::make_pair( index, 0. ) );
279  this -> applyConstraints( thisConstraint, 1., scalar );
280  thisConstraint.clear();
281  return;
282 }
283 
284 //------------------------------------------------------------------------------
285 /** Insert elements on diagonal for weights.
286  */
288  const double & value )
289 {
290  // insert value
291  diagWeightsCoo_.insert( index, index, value );
292 
293  // set flag
294  isDiagAssembled_ = false;
295 
296  return;
297 }
298 
299 //------------------------------------------------------------------------------
300 /** Routine for diagonal finalization.
301  */
303 {
304  // do nothing if already called
305  if ( isDiagAssembled_ ) return;
306 
307  // finish assembly of triplet
309 
310  // set flag
311  isDiagAssembled_ = true;
312 
313  return;
314 }
315 
316 //------------------------------------------------------------------------------
317 /** Solve the system by BDDCML
318  */
319 void la::BddcmlWrapper::solveSystem( double tol, int numLevels, std::vector<int> * numSubAtLevels,
320  int verboseLevel, int maxIt, int ndecrMax, bool use_adaptive )
321 {
322 
323  // check that mesh was loaded
325  "Subdomain mesh was not loaded, perhaps missing call to loadMesh() member function. \n " );
326 
327  //============= initialize BDDC
328  int numSub; //!< number of subdomains on first level
329  // Number of subdomains at first level - if not given, deduce it from MPI communicator
330  MPI_Allreduce ( &numSubLoc_, &numSub, 1, MPI_INT, MPI_SUM, comm_ );
331 
332  // Number of subdomains at levels
333  std::vector<int> numSubLev( numLevels ); //!< numbers of subdomains on levels ( should be monotonically decreasing, e.g. for 3 levels 1024, 32, 1 )
334  if ( numSubAtLevels != NULL ) {
335  // numbers are given
336 
337  OLD_ASSERT( numLevels == static_cast<int>((*numSubAtLevels).size()),
338  "Inconsistent size of numbers of subdomains at levels: %d %d \n",
339  numLevels, static_cast<int>((*numSubAtLevels).size()) );
340  OLD_ASSERT( (*numSubAtLevels)[0] == numSub,
341  "Inconsistent number of subdomains at first level: %d %d \n",
342  (*numSubAtLevels)[0], numSub );
343 
344  std::copy( (*numSubAtLevels).begin(), (*numSubAtLevels).end(),
345  numSubLev.begin() );
346  }
347  else {
348  // assume 2-level BDDC
349  // generate default vector for two levels
350  if ( numLevels == 2 ) {
351  numSubLev[0] = numSub;
352  numSubLev[1] = 1;
353  }
354  else if ( numLevels > 2 ) {
355  double coarsening = pow( numSub, 1. / double(numLevels - 1) );
356  // prescribe number of subdomains on levels so that coarsening is fixed between levels
357  numSubLev[0] = numSub;
358  for (int i = 1; i <= numLevels-2; i++) {
359  int ir = numLevels - i - 1;
360  numSubLev[i] = (int) pow( coarsening, ir );
361  if ( numSubLev[i]%2 != 0 ) {
362  numSubLev[i] = numSubLev[i] + 1;
363  }
364  }
365  numSubLev[numLevels-1] = 1;
366  }
367  else {
368  OLD_ASSERT( false, "Missing numbers of subdomains at levels. \n" );
369  }
370  }
371  //std::cout << "numSubLev ";
372  //std::copy( numSubLev.begin(), numSubLev.end(), std::ostream_iterator<int>( std::cout, " " ) );
373  //std::cout << std::endl;
374 
375  int lnumSubLev = numSubLev.size();
376  int commInt = MPI_Comm_c2f( comm_ ); // translate C MPI communicator hanlder to Fortran integer
377  int numBase = 0; // indexing starts with 0 for C++
378  int justDirectSolve = 0;
379 
380  bddcml_init_c( &numLevels, &(numSubLev[0]), &lnumSubLev, &numSubLoc_, &commInt, &verboseLevel, &numBase, &justDirectSolve );
381 
382 
383  //============= uploading data for subdomain
384  // at the moment, number of subdomains equals number of processors
385  int iSub = rank_;
386 
387  int linet = inet_.size();
388  int lnnet = nnet_.size();
389  int lnndf = nndf_.size();
390  int lisngn = isngn_.size();
391  int lisvgvn = isvgvn_.size();
392  int lisegn = isegn_.size();
393 
394  int lxyz1 = numNodesSub_;
395  int lxyz2 = nDim_;
396 
397  int lifix = ifix_.size();
398  int lfixv = fixv_.size();
399 
400  int lrhsVec = rhsVec_.size();
401  int isRhsCompleteInt = 0; // since only local subassembly of right-hand side was performed, it does not contain
402  // repeated entries
403 
404  std::vector<int> i_sparse;
405  std::vector<int> j_sparse;
406  std::vector<double> a_sparse;
407 
408  // specify if matrix is symmetric, if yes, use only upper triangle
409  bool onlyUpperTriangle;
410  if ( matrixType_ == GENERAL ) {
411  onlyUpperTriangle = false;
412  }
413  else {
414  onlyUpperTriangle = true;
415  }
416 
417  // copy out arrays from triplet
418  this -> finishMatAssembly();
419  coo_.extractArrays( i_sparse, j_sparse, a_sparse, onlyUpperTriangle );
420  coo_.clear();
421 
422  // map global row and column indices to local subdomain indices
423  int indRow = -1;
424  int indCol = -1;
425  int indRowLoc = -1;
426  int indColLoc = -1;
427  for ( unsigned inz = 0; inz < a_sparse.size(); inz++ ) {
428  if ( i_sparse[inz] != indRow ) {
429  indRow = i_sparse[inz];
430  Global2LocalMap_::iterator pos = global2LocalDofMap_.find( static_cast<unsigned> ( indRow ) );
431  OLD_ASSERT( pos != global2LocalDofMap_.end(),
432  "Cannot remap index %d to local indices. \n ", indRow );
433  indRowLoc = static_cast<int> ( pos -> second );
434  }
435  if ( j_sparse[inz] != indCol ) {
436  indCol = j_sparse[inz];
437  Global2LocalMap_::iterator pos = global2LocalDofMap_.find( static_cast<unsigned> ( indCol ) );
438  OLD_ASSERT( pos != global2LocalDofMap_.end(),
439  "Cannot remap index %d to local indices. \n ", indCol );
440  indColLoc = static_cast<int> ( pos -> second );
441  }
442 
443  i_sparse[ inz ] = indRowLoc;
444  j_sparse[ inz ] = indColLoc;
445 
446  //std::cout << " row: " << i_sparse[ inz ] << " col: " << j_sparse[ inz ] << std::endl;
447  }
448  // due to remap, it is not guaranteed that the triplet is still correctly ordered
449  int isMatAssembledInt = 0;
450 
451  int la = a_sparse.size();
452 
453  // diagonal weights for BDDC loaded by user
455  "It appears that diagonal weights for BDDC are not loaded. This is currently mandatory. \n " );
456 
457  std::vector<int> i_diag_sparse;
458  std::vector<int> j_diag_sparse;
459  std::vector<double> diag_sparse;
460 
461  // copy out arrays from triplet
462  this -> finishDiagAssembly();
463  diagWeightsCoo_.extractArrays( i_diag_sparse, j_diag_sparse, diag_sparse );
465 
466  // map global row indices to local subdomain indices
467  indRow = -1;
468  indRowLoc = -1;
469  for ( unsigned inz = 0; inz < diag_sparse.size(); inz++ ) {
470  indRow = i_diag_sparse[inz];
471  Global2LocalMap_::iterator pos = global2LocalDofMap_.find( static_cast<unsigned> ( indRow ) );
472  OLD_ASSERT( pos != global2LocalDofMap_.end(),
473  "Cannot remap index %d to local indices. \n ", indRow );
474  indRowLoc = static_cast<int> ( pos -> second );
475 
476  i_diag_sparse[ inz ] = indRowLoc;
477  }
478 
479  int lsub_diagonal = numDofsSub_;
480  OLD_ASSERT( lsub_diagonal == static_cast<int>(diag_sparse.size()),
481  "Array length mismatch: %d %d . \n ", lsub_diagonal, static_cast<int>(diag_sparse.size()) );
482 
483  std::vector<double> sub_diagonal( lsub_diagonal, -1. );
484  // permute the vector according to subdomain indexing
485  for ( int i = 0; i < lsub_diagonal; i++ ){
486  indRowLoc = i_diag_sparse[i];
487  double value = diag_sparse[i];
488  sub_diagonal[indRowLoc] = value;
489  }
490  OLD_ASSERT( std::find(sub_diagonal.begin(), sub_diagonal.end(), -1. ) == sub_diagonal.end(),
491  "There are missing entries in the diagonal of weights. \n " );
492 
493  // remove const attribute
494  int numDofsInt = static_cast<int> ( numDofs_ );
495  int numDofsSubInt = static_cast<int> ( numDofsSub_ );
496 
497  int matrixTypeInt;
498  if ( matrixType_ == GENERAL ) matrixTypeInt = 0 ;
499  else if ( matrixType_ == SPD ) matrixTypeInt = 1 ;
500  else if ( matrixType_ == SYMMETRICGENERAL ) matrixTypeInt = 2 ;
501  else if ( matrixType_ == SPD_VIA_SYMMETRICGENERAL ) matrixTypeInt = 2 ;
502  else {OLD_ASSERT( false, "Illegal matrixType \n " );}
503 
504  // download local solution
505  int lsol = sol_.size();
506 
507  // user constraints are not used
508  std::vector<double> userConstraints(1,0.);
509  int lUserConstraints1 = 0;
510  int lUserConstraints2 = 0;
511 
512  int lelement_data1 = 1;
513  int lelement_data2 = element_data_.size();
514 
515  // if > 0, BDDCML will check if the mesh is not composed of several components and if
516  // yes, BDDCML will handle them as independent subdomains for selection of coarse
517  // degrees of freedom
518  int find_components_int = 1;
519 
520  // upload local data to the solver
521  bddcml_upload_subdomain_data_c( &numElem_, &numNodes_, &numDofsInt, &nDim_, &meshDim_,
522  &iSub, &numElemSub_, &numNodesSub_, &numDofsSubInt,
523  &(inet_[0]),&linet, &(nnet_[0]),&lnnet, &(nndf_[0]),&lnndf,
524  &(isngn_[0]),&lisngn, &(isvgvn_[0]),&lisvgvn, &(isegn_[0]),&lisegn,
525  &(xyz_[0]),&lxyz1,&lxyz2,
526  &(ifix_[0]),&lifix, &(fixv_[0]),&lfixv,
527  &(rhsVec_[0]),&lrhsVec, &isRhsCompleteInt,
528  &(sol_[0]), &lsol,
529  &matrixTypeInt, &(i_sparse[0]), &(j_sparse[0]), &(a_sparse[0]), &la, &isMatAssembledInt,
530  &(userConstraints[0]),&lUserConstraints1,&lUserConstraints2,
531  &(element_data_[0]),&lelement_data1,&lelement_data2,
532  &(sub_diagonal[0]), &lsub_diagonal,
533  &find_components_int);
534  i_sparse.clear();
535  j_sparse.clear();
536  a_sparse.clear();
537 
538 
539  //============= setup of BDDC preconditioner
540 
541  int use_defaults_int = 0;
542  int parallel_division_int = 0;
543  int use_arithmetic_int = 1;
544  int use_adaptive_int;
545  if ( use_adaptive ) {
546  use_adaptive_int = 1;
547  }
548  else {
549  use_adaptive_int = 0;
550  }
551  int use_user_constraints_int = 0;
552  int weights_type = 3;
553 
554  // if > 0, BDDCML will use constraints on connectivity at corners
555  int use_corner_constraints_int = 1;
556 
557  // setup multilevel BDDC preconditioner
558  START_TIMER("BDDC preconditioner setup");
559  bddcml_setup_preconditioner_c( & matrixTypeInt, & use_defaults_int,
560  & parallel_division_int, & use_arithmetic_int,
561  & use_corner_constraints_int,
562  & use_adaptive_int, & use_user_constraints_int,
563  & weights_type );
564  END_TIMER("BDDC preconditioner setup");
565 
566  //============= compute the norm on arrays with overlapping entries - apply weights
567  double normSquaredLoc = 0.;
568  if (nProc_ > 1 ) {
569  bddcml_dotprod_subdomain_c( &iSub, &(rhsVec_[0]), &lrhsVec, &(rhsVec_[0]), &lrhsVec, &normSquaredLoc );
570  } else {
571  for ( double elem: rhsVec_ ) normSquaredLoc += elem*elem;
572  }
573 
574  // sum over processes
575  double normSquared = 0.;
576  MPI_Allreduce ( &normSquaredLoc, &normSquared, 1, MPI_DOUBLE, MPI_SUM, comm_ );
577  normRhs_ = std::sqrt( normSquared );
578 
579  //============= solution of the problem
580  int method;
581  if ( matrixType_ == SPD )
582  method = 0; // PCG - set for SPD matrix
584  method = 0; // PCG - set for benign matrix
585  else
586  method = 1; // BICGSTAB - set for any other matrix
587 
588  int recycling_int = 0;
589  int max_number_of_stored_vectors = 100;
590 
591  // call iterative solver
592  START_TIMER("BDDC solve");
593  bddcml_solve_c( & commInt, & method, & tol, & maxIt, & ndecrMax,
594  & recycling_int, & max_number_of_stored_vectors,
596  END_TIMER("BDDC solve");
597 
598 
599  //============= downloading solution from BDDCML solver
600 
601  // download local solution
602  lsol = sol_.size();
603 
604  bddcml_download_local_solution_c( &iSub, &(sol_[0]), &lsol );
605 
606  //============= compute the norm on arrays with overlapping entries - apply weights
607  normSquaredLoc = 0.;
608  if (nProc_ > 1 ) {
609  bddcml_dotprod_subdomain_c( &iSub, &(sol_[0]), &lsol, &(sol_[0]), &lsol, &normSquaredLoc );
610  } else {
611  for ( double elem: sol_ ) normSquaredLoc += elem*elem;
612  }
613 
614  // sum over processes
615  normSquared = 0.;
616  MPI_Allreduce ( &normSquaredLoc, &normSquared, 1, MPI_DOUBLE, MPI_SUM, comm_ );
617  normSol_ = std::sqrt( normSquared );
618 
619  //============= erase memory used by BDDCML solver
620  bddcml_finalize_c();
621 
622  return;
623 }
624 
void applyConstraints(ConstraintVec_ &constraints, const double factor, const double scalar)
void prepareAssembly(const IndexType length)
Definition: matrix_coo.hpp:82
std::vector< double > fixv_
values of fixed boundary conditions at places of ifix_ - values outside fixed dofs are ignored ...
void finishAssembly()
Finalize matrix assembly - build the assembled input and truncate matrix.
Definition: matrix_coo.hpp:111
BddcmlWrapper(const unsigned numDofs, const unsigned numDofsSub, const MatrixType matrixType=GENERAL, const MPI_Comm comm=MPI_COMM_WORLD, int numSubLoc=1)
int MPI_Comm
Definition: mpi.h:141
general (default),
int numIter_
required number of iterations
int nProc_
number of processors in communicator
void finishMatAssembly()
Finalize assembly of matrix.
void prepareMatAssembly(unsigned numElements, unsigned elMatSize)
Prepare assembly of matrix - reserve space in underlying coordinate matrix object.
int rank_
ID of processors in communicator.
std::vector< int > inet_
safety state variable to warn if solver is called without loading subdomain mesh
std::vector< int > nndf_
~BddcmlWrapper()
Destructor frees the BDDCML structures.
std::vector< double > xyz_
#define MPI_SUM
Definition: mpi.h:196
int numNodes_
total number of nodes
Global2LocalMap_ global2LocalDofMap_
type for storage of global to local map
enum matrixTypeEnum MatrixType
IndexType nnz() const
Return number of non-zeros.
Definition: matrix_coo.hpp:77
la::MatrixCoo< int, double > coo_
matrix in coordinate format (COO)
void insertToMatrix(const SubMat_ &eStiff, const VecUint_ &rowIndices, const VecUint_ &colIndices)
insert submatrix to system matrix - into coordinate matrix object
double normSol_
norm of the global solution
int nDim_
space dimension
const MatrixType matrixType_
type of matrix
static constexpr bool value
Definition: json.hpp:87
int numElem_
total number of elements
int numSubLoc_
number of subdomains on process
#define OLD_ASSERT(...)
Definition: global_defs.h:131
std::vector< double > sol_
distributed solution vector
#define FMT_UNUSED
Definition: posix.h:75
std::vector< int > isvgvn_
Indices of Subdomain Variables in Global Variable Numbering ( i.e. dof mapping )
std::vector< int > isegn_
Indices of Subdomain Elements in Global Numbering.
int numElemSub_
number of elements in subdomain
#define START_TIMER(tag)
Starts a timer with specified tag.
#define MPI_Allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm)
Definition: mpi.h:580
#define MPI_Comm_c2f(comm)
Definition: mpi.h:261
std::vector< double > rhsVec_
vector with RHS values restricted to subdomain, i.e. values are repeated at shared nodes ...
bool isMatAssembled_
true if matrix is assembled
#define MPI_Comm_size
Definition: mpi.h:235
#define MPI_DOUBLE
Definition: mpi.h:156
void insert(const IndexType i, const IndexType j, const ValueType value)
Insert a value to unassembled vector.
Definition: matrix_coo.hpp:90
void insertToDiagonalWeights(const int &index, const double &value)
#define MPI_Comm_rank
Definition: mpi.h:236
std::vector< int > ifix_
indices of fixed boundary conditions - ( 0 for free variable, 1 for constrained dof ) ...
#define MPI_Allreduce(sendbuf, recvbuf, count, datatype, op, comm)
Definition: mpi.h:612
symmetric positive definite,
general symmetric ( e.g. saddle point ),
bool isDiagAssembled_
true if diagonal is assembled
void loadRawMesh(const int nDim, const int numNodes, const std::vector< int > &inet, const std::vector< int > &nnet, const std::vector< int > &nndf, const std::vector< int > &isegn, const std::vector< int > &isngn, const std::vector< int > &isvgvn, const std::vector< double > &xyz, const std::vector< double > &element_data, const int meshDim=0)
Load raw data about mesh.
void fixDOF(const unsigned index, const double scalar=1.)
void extractArrays(std::vector< IndexType > &rowIndices, std::vector< IndexType > &colIndices, std::vector< ValueType > &values, bool onlyUpperTriangle=false)
Fill independent arrays with column indices, row pointers and values.
Definition: matrix_coo.hpp:290
std::vector< int > isngn_
Indices of Subdomain Nodes in Global Numbering.
#define MPI_INT
Definition: mpi.h:160
int meshDim_
mesh dimension, may differ from space dimension - e.g. 2 for shells in 3D
void solveSystem(double tol=1.e-7, int numLevels=2, std::vector< int > *numSubAtLevels=NULL, int verboseLevel=0, int maxIt=1000, int ndecrMax=30, bool use_adaptive=false)
Solve the system.
int numNodesSub_
number of nodes in subdomain
std::vector< int > nnet_
void finishDiagAssembly()
Finalize assembly of diagonal.
#define END_TIMER(tag)
Ends a timer with specified tag.
const MPI_Comm comm_
communicator
ublas::vector< double > SubVec_
std::vector< double > element_data_
array with data on elements, here used for permeability for each element
la::MatrixCoo< int, double > diagWeightsCoo_
diagonal of subdomain matrix in sparse format and global numbers
const int numDofsSub_
number of subdomain dofs
const int numDofs_
number of total dofs
ublas::matrix< double > SubMat_
void clear()
destroy all contents
Definition: matrix_coo.hpp:325
#define DebugOut()
Macro defining &#39;debug&#39; record of log.
Definition: logger.hh:264
double normRhs_
norm of the global right-hand side
void insertToRhs(const SubVec_ &eForce, const VecUint_ &dofIndices)
insert subvector to system rhs vector