Flow123d  3.9.0-960b954f9
bddcml_wrapper.cc
Go to the documentation of this file.
1 //! @author Jakub Sistek
2 //! @date 7/2012
3 
4 #include "la/bddcml_wrapper.hh"
5 
6 #ifdef FLOW123D_HAVE_BDDCML
7 
8 //------------------------------------------------------------------------------
9 /** Constructor for the BDDCML solver object
10  */
11 la::BddcmlWrapper::BddcmlWrapper( const unsigned numDofs,
12  const unsigned numDofsSub,
13  const MatrixType matrixType,
14  const MPI_Comm comm,
15  int numSubLoc )
16  : numDofs_( static_cast<int>( numDofs ) ),
17  numDofsSub_( static_cast<int>( numDofsSub ) ),
18  matrixType_( matrixType ),
19  comm_ (comm),
20  numSubLoc_ ( numSubLoc )
21 {
22 
23  // Orient in the communicator
24  MPI_Comm_size( comm_, &nProc_ );
25  MPI_Comm_rank( comm_, &rank_ );
26 
27  // prepare RHS vector
28  rhsVec_.resize( numDofsSub_, 0. );
29 
30  // prepare arrays to mark boundary conditions
31  ifix_.resize( numDofsSub_, 0 );
32  fixv_.resize( numDofsSub_, 0. );
33 
34  // initialize subdomain solution vector
35  sol_.resize( numDofsSub_, 0. );
36  normSol_ = 0.;
37  numIter_ = 0;
38  convergedReason_ = 0;
39  condNumber_ = 0.;
40 
41  // initialize state of mesh and matrix
42  meshLoaded_ = false;
43  isMatAssembled_ = false;
44 }
45 //------------------------------------------------------------------------------
46 /** Destructor frees the BDDC solver structures
47  */
49 {
50  // clear RHS
51  rhsVec_.clear();
52 
53  // clear BC
54  ifix_.clear();
55  fixv_.clear();
56 
57  // clear solution
58  sol_.clear();
59 
60  // clear mesh data
61  inet_.clear();
62  nnet_.clear();
63  nndf_.clear();
64  xyz_.clear();
65  isngn_.clear();
66  isvgvn_.clear();
67  isegn_.clear();
68 
69  // clear global2local map
70  global2LocalDofMap_.clear();
71 
72  // clear triplet
73  coo_.clear();
74 
75 }
76 
77 //------------------------------------------------------------------------------
78 /** Routine for loading raw mesh data into the solver - for cases of strange meshes,
79  * where these are not Mesh or Grid objects, user can create own raw description
80  * to exploit the flexibility of mesh format underlaying BDDCML.
81  */
82 void la::BddcmlWrapper::loadRawMesh( const int nDim, const int numNodes,
83  const std::vector<int> & inet,
84  const std::vector<int> & nnet,
85  const std::vector<int> & nndf,
86  const std::vector<int> & isegn,
87  const std::vector<int> & isngn,
88  const std::vector<int> & isvgvn,
89  const std::vector<double> & xyz,
90  const std::vector<double> & element_data,
91  const int meshDim )
92 {
93  // space dimension
94  nDim_ = nDim;
95 
96  // set topological dimension of mesh
97  if ( meshDim == 0 ) { // if not specified, use space dimension
98  meshDim_ = nDim_;
99  }
100  else {
101  meshDim_ = static_cast<int>( meshDim ); // if specified, use it
102  }
103 
104  // fill internal variables about mesh
105  numElemSub_ = isegn.size( );
106  numNodesSub_ = isngn.size( );
107 
108  // global number of nodes
109  numNodes_ = numNodes;
110 
111  std::vector<int> procElemStarts( nProc_ + 1 );
112  int numElemSubInt = static_cast<int>( numElemSub_ );
113  MPI_Allgather( &numElemSubInt, 1, MPI_INT,
114  &(procElemStarts[0]), 1, MPI_INT, comm_ );
115  // now procElemStarts_ contains counts of local elements at each process
116  // change it to starts
117  for ( unsigned i = 1; i < static_cast<unsigned>( nProc_ ); i++ )
118  procElemStarts[i] = procElemStarts[i-1] + procElemStarts[i];
119  // shift it one back
120  for ( unsigned i = nProc_; i > 0; i-- )
121  procElemStarts[i] = procElemStarts[i-1];
122  // start from zero
123  procElemStarts[0] = 0;
124 
125  // find global number of elements - assumes nonoverlapping division
126  numElem_ = procElemStarts[ nProc_ ];
127 
128  // check sizes of arrays
129  ASSERT_PERMANENT_EQ( std::accumulate( nnet.begin(), nnet.end(), 0 ), (int)(inet.size()) )
130  .error("array inet size mismatch \n");
131  ASSERT_PERMANENT_EQ( std::accumulate( nndf.begin(), nndf.end(), 0 ), numDofsSub_ )
132  .error("array nndf content mismatch \n");
133  ASSERT_PERMANENT_EQ( static_cast<int>(isvgvn.size()), numDofsSub_ )
134  .error("array isvgvn size mismatch \n");
135  ASSERT_PERMANENT_EQ( static_cast<int>(nnet.size()), numElemSub_)
136  .error("array nnet size mismatch \n");
137  ASSERT_PERMANENT_EQ( static_cast<int>(nndf.size()), numNodesSub_)
138  .error("array nndf size mismatch \n");
139  ASSERT_PERMANENT_EQ( static_cast<int>(xyz.size()), (numNodesSub_ * nDim_) )
140  .error("array xyz size mismatch \n");
141 
142  // Simply copy input data into the private object equivalents
143  inet_.resize( inet.size() );
144  std::copy( inet.begin(), inet.end(), inet_.begin() );
145  nnet_.resize( nnet.size() );
146  std::copy( nnet.begin(), nnet.end(), nnet_.begin() );
147  nndf_.resize( nndf.size() );
148  std::copy( nndf.begin(), nndf.end(), nndf_.begin() );
149  isegn_.resize( isegn.size() );
150  std::copy( isegn.begin(), isegn.end(), isegn_.begin() );
151  isngn_.resize( isngn.size() );
152  std::copy( isngn.begin(), isngn.end(), isngn_.begin() );
153  isvgvn_.resize( isvgvn.size() );
154  std::copy( isvgvn.begin(), isvgvn.end(), isvgvn_.begin() );
155  xyz_.resize( xyz.size() );
156  std::copy( xyz.begin(), xyz.end(), xyz_.begin() );
157  element_data_.resize( element_data.size() );
158  std::copy( element_data.begin(), element_data.end(), element_data_.begin() );
159 
160  // prepare auxiliary map for renumbering dofs in the global array
161  for ( unsigned ind = 0; ind < isvgvn_.size(); ++ind ) {
162  global2LocalDofMap_.insert( std::make_pair( static_cast<unsigned>( isvgvn_[ind] ), ind ) );
163  }
164 
165  // change the state
166  meshLoaded_ = true;
167 
168  return;
169 }
170 
171 //------------------------------------------------------------------------------
172 /** Routine for preparing MATRIX assembly. It reserves memory in triplet.
173  */
174 void la::BddcmlWrapper::prepareMatAssembly( unsigned numElements, unsigned elMatSize )
175 {
176  unsigned length = numElements * elMatSize;
177  coo_.prepareAssembly( length );
178 
179  return;
180 }
181 //------------------------------------------------------------------------------
182 /** Insert element stiffness matrix into global system matrix
183  */
184 void la::BddcmlWrapper::insertToMatrix( const SubMat_ & subMat,
185  const VecUint_ & rowIndices,
186  const VecUint_ & colIndices )
187 {
188  // copy dense matrix into triplet
189  for ( unsigned i = 0; i < rowIndices.size(); i++) {
190  for ( unsigned j = 0; j < colIndices.size(); j++) {
191 
192  // insert value
193  if (subMat(i,j) != 0.0)
194  coo_.insert( rowIndices[i], colIndices[j], subMat(i,j) );
195  }
196  }
197 
198  // set flag
199  isMatAssembled_ = false;
200 
201  return;
202 }
203 
204 //------------------------------------------------------------------------------
205 /** Routine for triplet finalization.
206  */
208 {
209  // do nothing if already called
210  if ( isMatAssembled_ ) return;
211 
212  // finish assembly of triplet
213  coo_.finishAssembly( );
214 
215  // set flag
216  isMatAssembled_ = true;
217 
218  return;
219 }
220 //------------------------------------------------------------------------------
221 /** Insert subvector to the system's RHS vector
222  */
223 void la::BddcmlWrapper::insertToRhs( const SubVec_ & subVec,
224  const VecUint_ & dofIndices )
225 {
226  for ( unsigned i = 0; i < dofIndices.size( ); i ++ ) {
227 
228  // map global dof index to subdomain dof
229  Global2LocalMap_::iterator pos = global2LocalDofMap_.find( dofIndices[i] );
230  /*
231  ASSERT_PERMANENT( pos != global2LocalDofMap_.end() )(dofIndices[i]).error(
232  "Cannot remap index to local indices in right-hand side, perhaps missing call to loadMesh() member function. \n");
233  */
234  if (pos == global2LocalDofMap_.end()) {
235  DebugOut().every_proc() << "RHS, Missing map to local for idx: " << dofIndices[i];
236  }
237  const unsigned indLoc = pos -> second;
238 
239  rhsVec_[ indLoc ] += subVec[i];
240  }
241  return;
242 }
243 
244 //------------------------------------------------------------------------------
245 /** Apply boundary conditions
246  * Since BDDCML handles constraints, just fill proper BC arrays.
247  * Scalar is not used, it is kept for compatibility with constraint functors.
248  */
249 void la::BddcmlWrapper::applyConstraints( ConstraintVec_ & constraints,
250  const double factor, FMT_UNUSED const double scalar )
251 {
252  // Constraint iterators
253  ConstraintVec_::const_iterator cIter = constraints.begin( );
254  ConstraintVec_::const_iterator cEnd = constraints.end( );
255  // collect global dof indices and the correpsonding values
256  for ( ; cIter != cEnd; ++cIter ) {
257  unsigned globalDof = cIter -> first;
258  double value = (cIter -> second );
259 
260  // map global dof index to subdomain dof
261  Global2LocalMap_::iterator pos = global2LocalDofMap_.find( globalDof );
262  if ( pos != global2LocalDofMap_.end() ) {
263  // I contain the dof
264  unsigned indLoc = pos -> second;
265 
266  ifix_[ indLoc ] = 1;
267  fixv_[ indLoc ] = factor * value;
268  }
269  }
270 
271  return;
272 }
273 //------------------------------------------------------------------------------
274 /** Simply set the dof with index 'index' to zero
275  */
276 void la::BddcmlWrapper::fixDOF( const unsigned index, FMT_UNUSED const double scalar )
277 {
279  thisConstraint.push_back( std::make_pair( index, 0. ) );
280  this -> applyConstraints( thisConstraint, 1., scalar );
281  thisConstraint.clear();
282  return;
283 }
284 
285 //------------------------------------------------------------------------------
286 /** Insert elements on diagonal for weights.
287  */
288 void la::BddcmlWrapper::insertToDiagonalWeights( const int & index,
289  const double & value )
290 {
291  // insert value
292  diagWeightsCoo_.insert( index, index, value );
293 
294  // set flag
295  isDiagAssembled_ = false;
296 
297  return;
298 }
299 
300 //------------------------------------------------------------------------------
301 /** Routine for diagonal finalization.
302  */
304 {
305  // do nothing if already called
306  if ( isDiagAssembled_ ) return;
307 
308  // finish assembly of triplet
309  diagWeightsCoo_.finishAssembly( );
310 
311  // set flag
312  isDiagAssembled_ = true;
313 
314  return;
315 }
316 
317 //------------------------------------------------------------------------------
318 /** Solve the system by BDDCML
319  */
320 void la::BddcmlWrapper::solveSystem( double tol, int numLevels, std::vector<int> * numSubAtLevels,
321  int verboseLevel, int maxIt, int ndecrMax, bool use_adaptive )
322 {
323 
324  // check that mesh was loaded
325  ASSERT_PERMANENT(meshLoaded_).error("Subdomain mesh was not loaded, perhaps missing call to loadMesh() member function. \n ");
326 
327  //============= initialize BDDC
328  int numSub; //!< number of subdomains on first level
329  // Number of subdomains at first level - if not given, deduce it from MPI communicator
330  MPI_Allreduce ( &numSubLoc_, &numSub, 1, MPI_INT, MPI_SUM, comm_ );
331 
332  // Number of subdomains at levels
333  std::vector<int> numSubLev( numLevels ); //!< numbers of subdomains on levels ( should be monotonically decreasing, e.g. for 3 levels 1024, 32, 1 )
334  if ( numSubAtLevels != NULL ) {
335  // numbers are given
336 
337  ASSERT_PERMANENT_EQ( numLevels, static_cast<int>((*numSubAtLevels).size()) )
338  .error("Inconsistent size of numbers of subdomains at given levels\n");
339  ASSERT_PERMANENT_EQ( (*numSubAtLevels)[0], numSub )
340  .error("Inconsistent number of subdomains at first level\n");
341 
342  std::copy( (*numSubAtLevels).begin(), (*numSubAtLevels).end(),
343  numSubLev.begin() );
344  }
345  else {
346  // assume 2-level BDDC
347  // generate default vector for two levels
348  if ( numLevels == 2 ) {
349  numSubLev[0] = numSub;
350  numSubLev[1] = 1;
351  }
352  else if ( numLevels > 2 ) {
353  double coarsening = pow( numSub, 1. / double(numLevels - 1) );
354  // prescribe number of subdomains on levels so that coarsening is fixed between levels
355  numSubLev[0] = numSub;
356  for (int i = 1; i <= numLevels-2; i++) {
357  int ir = numLevels - i - 1;
358  numSubLev[i] = (int) pow( coarsening, ir );
359  if ( numSubLev[i]%2 != 0 ) {
360  numSubLev[i] = numSubLev[i] + 1;
361  }
362  }
363  numSubLev[numLevels-1] = 1;
364  }
365  else {
366  ASSERT_PERMANENT( false ).error("Missing numbers of subdomains at levels. \n" );
367  }
368  }
369  //std::cout << "numSubLev ";
370  //std::copy( numSubLev.begin(), numSubLev.end(), std::ostream_iterator<int>( std::cout, " " ) );
371  //std::cout << std::endl;
372 
373  int lnumSubLev = numSubLev.size();
374  int commInt = MPI_Comm_c2f( comm_ ); // translate C MPI communicator hanlder to Fortran integer
375  int numBase = 0; // indexing starts with 0 for C++
376  int justDirectSolve = 0;
377 
378  bddcml_init_c( &numLevels, &(numSubLev[0]), &lnumSubLev, &numSubLoc_, &commInt, &verboseLevel, &numBase, &justDirectSolve );
379 
380 
381  //============= uploading data for subdomain
382  // at the moment, number of subdomains equals number of processors
383  int iSub = rank_;
384 
385  int linet = inet_.size();
386  int lnnet = nnet_.size();
387  int lnndf = nndf_.size();
388  int lisngn = isngn_.size();
389  int lisvgvn = isvgvn_.size();
390  int lisegn = isegn_.size();
391 
392  int lxyz1 = numNodesSub_;
393  int lxyz2 = nDim_;
394 
395  int lifix = ifix_.size();
396  int lfixv = fixv_.size();
397 
398  int lrhsVec = rhsVec_.size();
399  int isRhsCompleteInt = 0; // since only local subassembly of right-hand side was performed, it does not contain
400  // repeated entries
401 
402  std::vector<int> i_sparse;
403  std::vector<int> j_sparse;
404  std::vector<double> a_sparse;
405 
406  // specify if matrix is symmetric, if yes, use only upper triangle
407  bool onlyUpperTriangle;
408  if ( matrixType_ == GENERAL ) {
409  onlyUpperTriangle = false;
410  }
411  else {
412  onlyUpperTriangle = true;
413  }
414 
415  // copy out arrays from triplet
416  this -> finishMatAssembly();
417  coo_.extractArrays( i_sparse, j_sparse, a_sparse, onlyUpperTriangle );
418  coo_.clear();
419 
420  // map global row and column indices to local subdomain indices
421  int indRow = -1;
422  int indCol = -1;
423  int indRowLoc = -1;
424  int indColLoc = -1;
425  for ( unsigned inz = 0; inz < a_sparse.size(); inz++ ) {
426  if ( i_sparse[inz] != indRow ) {
427  indRow = i_sparse[inz];
428  Global2LocalMap_::iterator pos = global2LocalDofMap_.find( static_cast<unsigned> ( indRow ) );
429  ASSERT_PERMANENT( pos != global2LocalDofMap_.end() )(indRow)
430  .error("Cannot remap 'indRow' to local indices. \n");
431  indRowLoc = static_cast<int> ( pos -> second );
432  }
433  if ( j_sparse[inz] != indCol ) {
434  indCol = j_sparse[inz];
435  Global2LocalMap_::iterator pos = global2LocalDofMap_.find( static_cast<unsigned> ( indCol ) );
436  ASSERT_PERMANENT( pos != global2LocalDofMap_.end() )(indCol)
437  .error("Cannot remap 'indCol' to local indices. \n");
438  indColLoc = static_cast<int> ( pos -> second );
439  }
440 
441  i_sparse[ inz ] = indRowLoc;
442  j_sparse[ inz ] = indColLoc;
443 
444  //std::cout << " row: " << i_sparse[ inz ] << " col: " << j_sparse[ inz ] << std::endl;
445  }
446  // due to remap, it is not guaranteed that the triplet is still correctly ordered
447  int isMatAssembledInt = 0;
448 
449  int la = a_sparse.size();
450 
451  // diagonal weights for BDDC loaded by user
452  ASSERT_PERMANENT_GT( diagWeightsCoo_.nnz(), 0 )
453  .error("It appears that diagonal weights for BDDC are not loaded. This is currently mandatory. \n");
454 
455  std::vector<int> i_diag_sparse;
456  std::vector<int> j_diag_sparse;
457  std::vector<double> diag_sparse;
458 
459  // copy out arrays from triplet
460  this -> finishDiagAssembly();
461  diagWeightsCoo_.extractArrays( i_diag_sparse, j_diag_sparse, diag_sparse );
462  diagWeightsCoo_.clear();
463 
464  // map global row indices to local subdomain indices
465  indRow = -1;
466  indRowLoc = -1;
467  for ( unsigned inz = 0; inz < diag_sparse.size(); inz++ ) {
468  indRow = i_diag_sparse[inz];
469  Global2LocalMap_::iterator pos = global2LocalDofMap_.find( static_cast<unsigned> ( indRow ) );
470  ASSERT_PERMANENT( pos != global2LocalDofMap_.end() )(indRow)
471  .error("Cannot remap 'indRow' to local indices. \n");
472  indRowLoc = static_cast<int> ( pos -> second );
473 
474  i_diag_sparse[ inz ] = indRowLoc;
475  }
476 
477  int lsub_diagonal = numDofsSub_;
478  ASSERT_PERMANENT_EQ( lsub_diagonal, static_cast<int>(diag_sparse.size()) )
479  .error("Array length mismatch.\n");
480 
481  std::vector<double> sub_diagonal( lsub_diagonal, -1. );
482  // permute the vector according to subdomain indexing
483  for ( int i = 0; i < lsub_diagonal; i++ ){
484  indRowLoc = i_diag_sparse[i];
485  double value = diag_sparse[i];
486  sub_diagonal[indRowLoc] = value;
487  }
488  ASSERT_PERMANENT( std::find(sub_diagonal.begin(), sub_diagonal.end(), -1. ) == sub_diagonal.end() )
489  .error("There are missing entries in the diagonal of weights. \n");
490 
491  // remove const attribute
492  int numDofsInt = static_cast<int> ( numDofs_ );
493  int numDofsSubInt = static_cast<int> ( numDofsSub_ );
494 
495  int matrixTypeInt;
496  if ( matrixType_ == GENERAL ) matrixTypeInt = 0 ;
497  else if ( matrixType_ == SPD ) matrixTypeInt = 1 ;
498  else if ( matrixType_ == SYMMETRICGENERAL ) matrixTypeInt = 2 ;
499  else if ( matrixType_ == SPD_VIA_SYMMETRICGENERAL ) matrixTypeInt = 2 ;
500  else {ASSERT_PERMANENT( false ).error("Illegal matrixType \n" );}
501 
502  // download local solution
503  int lsol = sol_.size();
504 
505  // user constraints are not used
506  std::vector<double> userConstraints(1,0.);
507  int lUserConstraints1 = 0;
508  int lUserConstraints2 = 0;
509 
510  int lelement_data1 = 1;
511  int lelement_data2 = element_data_.size();
512 
513  // if > 0, BDDCML will check if the mesh is not composed of several components and if
514  // yes, BDDCML will handle them as independent subdomains for selection of coarse
515  // degrees of freedom
516  int find_components_int = 1;
517  int use_dual_mesh_graph_int = 0;
518  int neighbouring = 0; // How many common nodes between elements forms and edge of dual graph, only available for use_dual_mesh_graph_int == 1.
519 
520  // upload local data to the solver
521  bddcml_upload_subdomain_data_c( &numElem_, &numNodes_, &numDofsInt, &nDim_, &meshDim_,
522  &iSub, &numElemSub_, &numNodesSub_, &numDofsSubInt,
523  &(inet_[0]),&linet, &(nnet_[0]),&lnnet, &(nndf_[0]),&lnndf,
524  &(isngn_[0]),&lisngn, &(isvgvn_[0]),&lisvgvn, &(isegn_[0]),&lisegn,
525  &(xyz_[0]),&lxyz1,&lxyz2,
526  &(ifix_[0]),&lifix, &(fixv_[0]),&lfixv,
527  &(rhsVec_[0]),&lrhsVec, &isRhsCompleteInt,
528  &(sol_[0]), &lsol,
529  &matrixTypeInt, &(i_sparse[0]), &(j_sparse[0]), &(a_sparse[0]), &la, &isMatAssembledInt,
530  &(userConstraints[0]),&lUserConstraints1,&lUserConstraints2,
531  &(element_data_[0]),&lelement_data1,&lelement_data2,
532  &(sub_diagonal[0]), &lsub_diagonal,
533  &find_components_int, &use_dual_mesh_graph_int, &neighbouring );
534  i_sparse.clear();
535  j_sparse.clear();
536  a_sparse.clear();
537 
538 
539  //============= setup of BDDC preconditioner
540 
541  int use_defaults_int = 0;
542  int parallel_division_int = 0;
543  int use_arithmetic_int = 1;
544  int use_adaptive_int;
545  if ( use_adaptive ) {
546  use_adaptive_int = 1;
547  }
548  else {
549  use_adaptive_int = 0;
550  }
551  int use_user_constraints_int = 0;
552  int weights_type = 3;
553 
554  // if > 0, BDDCML will use constraints on connectivity at corners
555  int use_corner_constraints_int = 1;
556 
557  // setup multilevel BDDC preconditioner
558  START_TIMER("BDDC preconditioner setup");
559  bddcml_setup_preconditioner_c( & matrixTypeInt, & use_defaults_int,
560  & parallel_division_int, & use_arithmetic_int,
561  & use_corner_constraints_int,
562  & use_adaptive_int, & use_user_constraints_int,
563  & weights_type );
564  END_TIMER("BDDC preconditioner setup");
565 
566  //============= compute the norm on arrays with overlapping entries - apply weights
567  double normSquaredLoc = 0.;
568  if (nProc_ > 1 ) {
569  bddcml_dotprod_subdomain_c( &iSub, &(rhsVec_[0]), &lrhsVec, &(rhsVec_[0]), &lrhsVec, &normSquaredLoc );
570  } else {
571  for ( double elem: rhsVec_ ) normSquaredLoc += elem*elem;
572  }
573 
574  // sum over processes
575  double normSquared = 0.;
576  MPI_Allreduce ( &normSquaredLoc, &normSquared, 1, MPI_DOUBLE, MPI_SUM, comm_ );
577  normRhs_ = std::sqrt( normSquared );
578 
579  //============= solution of the problem
580  int method;
581  if ( matrixType_ == SPD )
582  method = 0; // PCG - set for SPD matrix
583  else if ( matrixType_ == SPD_VIA_SYMMETRICGENERAL)
584  method = 0; // PCG - set for benign matrix
585  else
586  method = 1; // BICGSTAB - set for any other matrix
587 
588  int recycling_int = 0;
589  int max_number_of_stored_vectors = 100;
590 
591  // call iterative solver
592  START_TIMER("BDDC solve");
593  bddcml_solve_c( & commInt, & method, & tol, & maxIt, & ndecrMax,
594  & recycling_int, & max_number_of_stored_vectors,
595  & numIter_, & convergedReason_, & condNumber_);
596  END_TIMER("BDDC solve");
597 
598 
599  //============= downloading solution from BDDCML solver
600 
601  // download local solution
602  lsol = sol_.size();
603 
604  bddcml_download_local_solution_c( &iSub, &(sol_[0]), &lsol );
605 
606  //============= compute the norm on arrays with overlapping entries - apply weights
607  normSquaredLoc = 0.;
608  if (nProc_ > 1 ) {
609  bddcml_dotprod_subdomain_c( &iSub, &(sol_[0]), &lsol, &(sol_[0]), &lsol, &normSquaredLoc );
610  } else {
611  for ( double elem: sol_ ) normSquaredLoc += elem*elem;
612  }
613 
614  // sum over processes
615  normSquared = 0.;
616  MPI_Allreduce ( &normSquaredLoc, &normSquared, 1, MPI_DOUBLE, MPI_SUM, comm_ );
617  normSol_ = std::sqrt( normSquared );
618 
619  //============= erase memory used by BDDCML solver
620  bddcml_finalize_c();
621 
622  return;
623 }
624 
625 #endif
la::BddcmlWrapper::insertToDiagonalWeights
void insertToDiagonalWeights(const int &index, const double &value)
la::BddcmlWrapper::sol_
std::vector< double > sol_
distributed solution vector
Definition: bddcml_wrapper.hh:288
la::BddcmlWrapper::~BddcmlWrapper
~BddcmlWrapper()
Destructor frees the BDDCML structures.
la::BddcmlWrapper::fixv_
std::vector< double > fixv_
values of fixed boundary conditions at places of ifix_ - values outside fixed dofs are ignored
Definition: bddcml_wrapper.hh:278
la::BddcmlWrapper::insertToRhs
void insertToRhs(const SubVec_ &eForce, const VecUint_ &dofIndices)
insert subvector to system rhs vector
la::BddcmlWrapper::rhsVec_
std::vector< double > rhsVec_
vector with RHS values restricted to subdomain, i.e. values are repeated at shared nodes
Definition: bddcml_wrapper.hh:285
value
static constexpr bool value
Definition: json.hpp:87
MPI_DOUBLE
#define MPI_DOUBLE
Definition: mpi.h:156
std::vector< int >
la::BddcmlWrapper::isvgvn_
std::vector< int > isvgvn_
Indices of Subdomain Variables in Global Variable Numbering ( i.e. dof mapping )
Definition: bddcml_wrapper.hh:270
la::BddcmlWrapper::ifix_
std::vector< int > ifix_
indices of fixed boundary conditions - ( 0 for free variable, 1 for constrained dof )
Definition: bddcml_wrapper.hh:277
la::BddcmlWrapper::applyConstraints
void applyConstraints(ConstraintVec_ &constraints, const double factor, const double scalar)
MPI_Comm_rank
#define MPI_Comm_rank
Definition: mpi.h:236
MPI_Comm_size
#define MPI_Comm_size
Definition: mpi.h:235
ASSERT_PERMANENT_GT
#define ASSERT_PERMANENT_GT(a, b)
Definition of comparative assert macro (Greater Than)
Definition: asserts.hh:313
la::BddcmlWrapper::isngn_
std::vector< int > isngn_
Indices of Subdomain Nodes in Global Numbering.
Definition: bddcml_wrapper.hh:269
ASSERT_PERMANENT
#define ASSERT_PERMANENT(expr)
Allow use shorter versions of macro names if these names is not used with external library.
Definition: asserts.hh:348
la::BddcmlWrapper::isegn_
std::vector< int > isegn_
Indices of Subdomain Elements in Global Numbering.
Definition: bddcml_wrapper.hh:271
la::BddcmlWrapper::fixDOF
void fixDOF(const unsigned index, const double scalar=1.)
la::BddcmlWrapper::prepareMatAssembly
void prepareMatAssembly(unsigned numElements, unsigned elMatSize)
Prepare assembly of matrix - reserve space in underlying coordinate matrix object.
la::BddcmlWrapper::nnet_
std::vector< int > nnet_
Definition: bddcml_wrapper.hh:260
MPI_INT
#define MPI_INT
Definition: mpi.h:160
la::BddcmlWrapper::finishDiagAssembly
void finishDiagAssembly()
Finalize assembly of diagonal.
la::BddcmlWrapper::xyz_
std::vector< double > xyz_
Definition: bddcml_wrapper.hh:265
la
Definition: bddcml_wrapper.hh:41
la::MatrixCoo::clear
void clear()
destroy all contents
Definition: matrix_coo.hpp:325
la::BddcmlWrapper::solveSystem
void solveSystem(double tol=1.e-7, int numLevels=2, std::vector< int > *numSubAtLevels=NULL, int verboseLevel=0, int maxIt=1000, int ndecrMax=30, bool use_adaptive=false)
Solve the system.
la::BddcmlWrapper::nndf_
std::vector< int > nndf_
Definition: bddcml_wrapper.hh:263
ASSERT_PERMANENT_EQ
#define ASSERT_PERMANENT_EQ(a, b)
Definition of comparative assert macro (EQual)
Definition: asserts.hh:329
MPI_SUM
#define MPI_SUM
Definition: mpi.h:196
MPI_Comm
int MPI_Comm
Definition: mpi.h:141
la::BddcmlWrapper::finishMatAssembly
void finishMatAssembly()
Finalize assembly of matrix.
bddcml_wrapper.hh
MPI_Allreduce
#define MPI_Allreduce(sendbuf, recvbuf, count, datatype, op, comm)
Definition: mpi.h:612
la::BddcmlWrapper::BddcmlWrapper
BddcmlWrapper(const unsigned numDofs, const unsigned numDofsSub, const MatrixType matrixType=GENERAL, const MPI_Comm comm=MPI_COMM_WORLD, int numSubLoc=1)
MPI_Allgather
#define MPI_Allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm)
Definition: mpi.h:580
MPI_Comm_c2f
#define MPI_Comm_c2f(comm)
Definition: mpi.h:261
la::BddcmlWrapper::loadRawMesh
void loadRawMesh(const int nDim, const int numNodes, const std::vector< int > &inet, const std::vector< int > &nnet, const std::vector< int > &nndf, const std::vector< int > &isegn, const std::vector< int > &isngn, const std::vector< int > &isvgvn, const std::vector< double > &xyz, const std::vector< double > &element_data, const int meshDim=0)
Load raw data about mesh.
la::BddcmlWrapper::coo_
la::MatrixCoo< int, double > coo_
matrix in coordinate format (COO)
Definition: bddcml_wrapper.hh:280
la::BddcmlWrapper::insertToMatrix
void insertToMatrix(const SubMat_ &eStiff, const VecUint_ &rowIndices, const VecUint_ &colIndices)
insert submatrix to system matrix - into coordinate matrix object
DebugOut
#define DebugOut()
Macro defining 'debug' record of log.
Definition: logger.hh:284
START_TIMER
#define START_TIMER(tag)
Starts a timer with specified tag.
Definition: sys_profiler.hh:115
END_TIMER
#define END_TIMER(tag)
Ends a timer with specified tag.
Definition: sys_profiler.hh:149
la::BddcmlWrapper::inet_
std::vector< int > inet_
safety state variable to warn if solver is called without loading subdomain mesh
Definition: bddcml_wrapper.hh:259
la::BddcmlWrapper::global2LocalDofMap_
Global2LocalMap_ global2LocalDofMap_
type for storage of global to local map
Definition: bddcml_wrapper.hh:274
FMT_UNUSED
#define FMT_UNUSED
Definition: posix.h:75