Flow123d  JS_before_hm-2277-gb15131e45
dofhandler.cc
Go to the documentation of this file.
1 /*!
2  *
3  * Copyright (C) 2015 Technical University of Liberec. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it under
6  * the terms of the GNU General Public License version 3 as published by the
7  * Free Software Foundation. (http://www.gnu.org/licenses/gpl-3.0.en.html)
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11  * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
12  *
13  *
14  * @file dofhandler.cc
15  * @brief Declaration of class which handles the ordering of degrees of freedom (dof) and mappings between local and global dofs.
16  * @author Jan Stebel
17  */
18 
19 #include "system/index_types.hh"
20 #include "fem/dofhandler.hh"
21 #include "fem/finite_element.hh"
22 #include "fem/fe_system.hh"
23 #include "fem/dh_cell_accessor.hh"
24 #include "mesh/mesh.h"
25 #include "mesh/duplicate_nodes.h"
26 #include "mesh/partitioning.hh"
27 #include "mesh/accessors.hh"
28 #include "mesh/range_wrapper.hh"
29 #include "mesh/neighbours.h"
30 #include "la/distribution.hh"
31 
32 
36 const int DOFHandlerMultiDim::INVALID_DOF = -1;
37 
38 
39 
40 
42 {}
43 
44 
45 
46 
47 
48 
50  : DOFHandlerBase(_mesh),
51  ds_(nullptr),
52  is_parallel_(true),
53  dh_seq_(nullptr),
54  scatter_to_seq_(nullptr),
55  el_ds_(nullptr)
56 {
57  // Set up flag that ensures that edges are allocated, so that dofs can be distributed on edges.
58  // Currently this works only for Mesh objects, not for BCMesh.
59  if (dynamic_cast<Mesh*>(mesh_) != nullptr) {
60  distribute_edge_dofs = true;
61  } else {
62  distribute_edge_dofs = false;
63  }
64 
65  if (make_elem_part) make_elem_partitioning();
66 }
67 
68 
69 std::shared_ptr<DOFHandlerMultiDim> DOFHandlerMultiDim::sequential()
70 {
72  return dh_seq_;
73 }
74 
75 
76 std::shared_ptr<VecScatter> DOFHandlerMultiDim::sequential_scatter()
77 {
79  return scatter_to_seq_;
80 }
81 
82 
84 {
85  // get number of dofs per element and then set up cell_starts
87  for (auto cell : this->local_range())
88  {
89  cell_starts[cell.local_idx()+1] = cell.n_dofs();
90  max_elem_dofs_ = max( (int)max_elem_dofs_, (int)cell.n_dofs() );
91  }
92  for (unsigned int i=0; i<cell_starts.size()-1; ++i)
93  cell_starts[i+1] += cell_starts[i];
94 }
95 
96 
98  std::vector<LongIdx> &node_dof_starts,
99  std::vector<LongIdx> &edge_dof_starts)
100 {
101  // initialize dofs on nodes
102  // We must separate dofs for dimensions because FE functions
103  // may be discontinuous on nodes shared by different
104  // dimensions.
105  unsigned int n_node_dofs = 0;
106  for (unsigned int nid=0; nid<mesh_->duplicate_nodes()->n_nodes(); nid++)
107  {
108  node_dof_starts.push_back(n_node_dofs);
109  n_node_dofs += ds_->n_node_dofs(nid);
110  }
111  node_dof_starts.push_back(n_node_dofs);
112 
113  unsigned int n_edge_dofs = 0;
114  for (auto edge : mesh_->edge_range())
115  {
116  edge_dof_starts.push_back(n_edge_dofs);
117  n_edge_dofs += ds_->n_edge_dofs(edge);
118  }
119  edge_dof_starts.push_back(n_edge_dofs);
120 }
121 
122 
124  std::vector<short int> &node_status,
125  std::vector<short int> &edge_status)
126 {
127  // mark local dofs
128  for (auto cell : this->own_range())
129  {
130  for (unsigned int n=0; n<cell.dim()+1; n++)
131  {
132  unsigned int nid = mesh_->duplicate_nodes()->objects(cell.dim())[mesh_->duplicate_nodes()->obj_4_el()[cell.elm_idx()]].nodes[n];
133  node_status[nid] = VALID_NFACE;
134  }
135  }
136 
137  // unmark dofs on ghost cells from lower procs
138  for (auto cell : this->ghost_range())
139  {
140  if (cell.elm().proc() < el_ds_->myp())
141  {
142  for (unsigned int n=0; n<cell.dim()+1; n++)
143  {
144  unsigned int nid = mesh_->duplicate_nodes()->objects(cell.dim())[mesh_->duplicate_nodes()->obj_4_el()[cell.elm_idx()]].nodes[n];
145  node_status[nid] = INVALID_NFACE;
146  }
147  }
148  }
149 
151  {
152  // mark local edges
153  for (auto eid : edg_4_loc)
154  edge_status[eid] = VALID_NFACE;
155 
156  // unmark dofs on ghost cells from lower procs
157  for (auto cell : this->ghost_range())
158  {
159  if (cell.elm().proc() < el_ds_->myp())
160  {
161  for (unsigned int n=0; n<cell.dim()+1; n++)
162  {
163  unsigned int eid = cell.elm().side(n)->edge_idx();
164  edge_status[eid] = INVALID_NFACE;
165  }
166  }
167  }
168  }
169  else
170  {
171  for (auto eid : edg_4_loc)
172  edge_status[eid] = INVALID_NFACE;
173  }
174 
175 }
176 
177 
179 {
180  // send number of elements required from the other processor
181  unsigned int n_elems = ghost_proc_el[proc].size();
182  MPI_Send(&n_elems, 1, MPI_UNSIGNED, proc, 0, MPI_COMM_WORLD);
183 
184  // send indices of elements required
185  MPI_Send(&(ghost_proc_el[proc][0]), n_elems, MPI_LONG_IDX, proc, 1, MPI_COMM_WORLD);
186 
187  // receive numbers of dofs on required elements
188  vector<unsigned int> n_dofs(n_elems);
189  MPI_Recv(&(n_dofs[0]), n_elems, MPI_UNSIGNED, proc, 2, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
190 
191  // receive dofs on required elements
192  unsigned int n_dofs_sum = 0;
193  for (auto nd : n_dofs) n_dofs_sum += nd;
194  dofs.resize(n_dofs_sum);
195  MPI_Recv(&(dofs[0]), n_dofs_sum, MPI_LONG_IDX, proc, 3, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
196 }
197 
198 
199 void DOFHandlerMultiDim::send_ghost_dofs(unsigned int proc)
200 {
201  // receive number of elements required by the other processor
202  unsigned int n_elems;
203  MPI_Recv(&n_elems, 1, MPI_UNSIGNED, proc, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
204 
205  // receive indices of elements required
206  vector<LongIdx> elems(n_elems);
207  MPI_Recv(&(elems[0]), n_elems, MPI_LONG_IDX, proc, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
208 
209  // send numbers of dofs on required elements
210  vector<unsigned int> n_dofs;
211  for (LongIdx el : elems)
212  {
213  auto cell = this->cell_accessor_from_element(el);
214  n_dofs.push_back(cell_starts[cell.local_idx()+1] - cell_starts[cell.local_idx()]);
215  }
216  MPI_Send(&(n_dofs[0]), n_elems, MPI_UNSIGNED, proc, 2, MPI_COMM_WORLD);
217 
218  // send dofs on the required elements
219  vector<LongIdx> dofs;
220  for (LongIdx el : elems)
221  {
222  auto cell = this->cell_accessor_from_element(el);
223  for (LongIdx i=cell_starts[cell.local_idx()]; i<cell_starts[cell.local_idx()+1]; i++)
224  dofs.push_back(local_to_global_dof_idx_[dof_indices[i]]);
225  }
226  MPI_Send(&(dofs[0]), dofs.size(), MPI_LONG_IDX, proc, 3, MPI_COMM_WORLD);
227 }
228 
229 
231  const std::vector<bool> &update_cells,
232  const std::vector<LongIdx> &dofs,
233  const std::vector<LongIdx> &node_dof_starts,
234  std::vector<LongIdx> &node_dofs,
235  const std::vector<LongIdx> &edge_dof_starts,
236  std::vector<LongIdx> &edge_dofs)
237 {
238  // update dof_indices on ghost cells
239  unsigned int dof_offset=0;
240  for (unsigned int gid=0; gid<ghost_proc_el[proc].size(); gid++)
241  {
242  DHCellAccessor dh_cell = this->cell_accessor_from_element( ghost_proc_el[proc][gid] );
243 
244  vector<unsigned int> loc_node_dof_count(dh_cell.elm()->n_nodes(), 0);
245  vector<unsigned int> loc_edge_dof_count(dh_cell.elm()->n_sides(), 0);
246  for (unsigned int idof = 0; idof<dh_cell.n_dofs(); ++idof)
247  {
248  if (dh_cell.cell_dof(idof).dim == 0)
249  { // update nodal dof
250  unsigned int dof_nface_idx = dh_cell.cell_dof(idof).n_face_idx;
251  unsigned int nid = mesh_->duplicate_nodes()->objects(dh_cell.dim())[mesh_->duplicate_nodes()->obj_4_el()[dh_cell.elm_idx()]].nodes[dof_nface_idx];
252  unsigned int node_dof_idx = node_dof_starts[nid]+loc_node_dof_count[dof_nface_idx];
253 
254  if (node_dofs[node_dof_idx] == INVALID_DOF)
255  {
256  node_dofs[node_dof_idx] = local_to_global_dof_idx_.size();
257  local_to_global_dof_idx_.push_back(dofs[dof_offset+idof]);
258  }
259  dof_indices[cell_starts[dh_cell.local_idx()]+idof] = node_dofs[node_dof_idx];
260 
261  loc_node_dof_count[dof_nface_idx]++;
262  }
263  else if (dh_cell.cell_dof(idof).dim == dh_cell.dim()-1)
264  { // update edge dof
265  if (!distribute_edge_dofs) break;
266 
267  unsigned int dof_nface_idx = dh_cell.cell_dof(idof).n_face_idx;
268  unsigned int eid = dh_cell.elm().side(dof_nface_idx)->edge_idx();
269  unsigned int edge_dof_idx = edge_dof_starts[eid]+loc_edge_dof_count[dof_nface_idx];
270 
271  if (edge_dofs[edge_dof_idx] == INVALID_DOF)
272  {
273  edge_dofs[edge_dof_idx] = local_to_global_dof_idx_.size();
274  local_to_global_dof_idx_.push_back(dofs[dof_offset+idof]);
275  }
276  dof_indices[cell_starts[dh_cell.local_idx()]+idof] = edge_dofs[edge_dof_idx];
277 
278  loc_edge_dof_count[dof_nface_idx]++;
279  } else if (dh_cell.cell_dof(idof).dim == dh_cell.dim())
280  {
281  dof_indices[cell_starts[dh_cell.local_idx()]+idof] = local_to_global_dof_idx_.size();
282  local_to_global_dof_idx_.push_back(dofs[dof_offset+idof]);
283  }
284  }
285 
286  dof_offset += dh_cell.n_dofs();
287  }
288 
289  // update dof_indices on local elements
290  for (auto cell : this->own_range())
291  {
292  if (!update_cells[cell.local_idx()]) continue;
293 
294  // loop over element dofs
295  vector<unsigned int> loc_node_dof_count(cell.elm()->n_nodes(), 0);
296  vector<unsigned int> loc_edge_dof_count(cell.elm()->n_sides(), 0);
297  for (unsigned int idof = 0; idof<cell.n_dofs(); ++idof)
298  {
299  unsigned int dof_nface_idx = cell.cell_dof(idof).n_face_idx;
300  if (cell.cell_dof(idof).dim == 0)
301  {
302  if (dof_indices[cell_starts[cell.local_idx()]+idof] == INVALID_DOF)
303  { // update nodal dof
304  unsigned int nid = mesh_->duplicate_nodes()->objects(cell.dim())[mesh_->duplicate_nodes()->obj_4_el()[cell.elm_idx()]].nodes[dof_nface_idx];
305  dof_indices[cell_starts[cell.local_idx()]+idof] = node_dofs[node_dof_starts[nid]+loc_node_dof_count[dof_nface_idx]];
306  }
307  loc_node_dof_count[dof_nface_idx]++;
308  } else if (cell.cell_dof(idof).dim == cell.dim()-1)
309  {
310  if (!distribute_edge_dofs) break;
311 
312  if (dof_indices[cell_starts[cell.local_idx()]+idof] == INVALID_DOF)
313  { // update edge dof
314  unsigned int eid = cell.elm().side(dof_nface_idx)->edge_idx();
315  dof_indices[cell_starts[cell.local_idx()]+idof] = edge_dofs[edge_dof_starts[eid]+loc_edge_dof_count[dof_nface_idx]];
316  }
317  loc_edge_dof_count[dof_nface_idx]++;
318  }
319  }
320  }
321 }
322 
323 
324 void DOFHandlerMultiDim::distribute_dofs(std::shared_ptr<DiscreteSpace> ds)
325 {
326  // First check if dofs are already distributed.
327  ASSERT_PERMANENT(ds_ == nullptr).error("Attempt to distribute DOFs multiple times!");
328 
329  ds_ = ds;
330 
331  std::vector<LongIdx> node_dofs, node_dof_starts, edge_dofs, edge_dof_starts;
333  edge_status(mesh_->n_edges(), INVALID_NFACE);
334  std::vector<bool> update_cells(el_ds_->lsize(), false);
335  unsigned int next_free_dof = 0;
336 
338  init_dof_starts(node_dof_starts, edge_dof_starts);
339  node_dofs.resize(node_dof_starts[node_dof_starts.size()-1], (LongIdx)INVALID_DOF);
340  edge_dofs.resize(edge_dof_starts[edge_dof_starts.size()-1], (LongIdx)INVALID_DOF);
341  init_status(node_status, edge_status);
342 
343  // Distribute dofs on local elements.
344  dof_indices.resize(cell_starts[cell_starts.size()-1]);
345  local_to_global_dof_idx_.reserve(dof_indices.size());
346  for (auto cell : this->own_range())
347  {
348 
349  // loop over element dofs
350  vector<unsigned int> loc_node_dof_count(cell.elm()->n_nodes(), 0);
351  vector<unsigned int> loc_edge_dof_count(cell.elm()->dim()+1, 0);
352  for (unsigned int idof = 0; idof<cell.n_dofs(); ++idof)
353  {
354  unsigned int dof_dim = cell.cell_dof(idof).dim;
355  unsigned int dof_nface_idx = cell.cell_dof(idof).n_face_idx;
356 
357  if (dof_dim == 0)
358  { // add dofs shared by nodes
359  unsigned int nid = mesh_->duplicate_nodes()->objects(cell.dim())[mesh_->duplicate_nodes()->obj_4_el()[cell.elm_idx()]].nodes[dof_nface_idx];
360  unsigned int node_dof_idx = node_dof_starts[nid]+loc_node_dof_count[dof_nface_idx];
361 
362  switch (node_status[nid])
363  {
364  case VALID_NFACE:
365  for (int i=0; i<node_dof_starts[nid+1] - node_dof_starts[nid]; i++)
366  {
367  local_to_global_dof_idx_.push_back(next_free_dof);
368  node_dofs[node_dof_starts[nid]+i] = next_free_dof++;
369  }
370  node_status[nid] = ASSIGNED_NFACE;
371  break;
372  case INVALID_NFACE:
373  node_dofs[node_dof_idx] = INVALID_DOF;
374  update_cells[cell.local_idx()] = true;
375  break;
376  }
377  dof_indices[cell_starts[cell.local_idx()]+idof] = node_dofs[node_dof_idx];
378  loc_node_dof_count[dof_nface_idx]++;
379  }
380  else if (dof_dim == cell.dim()-1)
381  { // add dofs shared by edges
382  if (!distribute_edge_dofs) break;
383 
384  unsigned int eid = cell.elm().side(dof_nface_idx)->edge_idx();
385  unsigned int edge_dof_idx = edge_dof_starts[eid]+loc_edge_dof_count[dof_nface_idx];
386  switch (edge_status[eid])
387  {
388  case VALID_NFACE:
389  for (int i=0; i<edge_dof_starts[eid+1] - edge_dof_starts[eid]; i++)
390  {
391  local_to_global_dof_idx_.push_back(next_free_dof);
392  edge_dofs[edge_dof_starts[eid]+i] = next_free_dof++;
393  }
394  edge_status[eid] = ASSIGNED_NFACE;
395  break;
396  case INVALID_NFACE:
397  edge_dofs[edge_dof_idx] = INVALID_DOF;
398  update_cells[cell.local_idx()] = true;
399  break;
400  }
401  dof_indices[cell_starts[cell.local_idx()]+idof] = edge_dofs[edge_dof_idx];
402  loc_edge_dof_count[dof_nface_idx]++;
403  }
404  else if (dof_dim == cell.dim())
405  { // add dofs owned only by the element
406  local_to_global_dof_idx_.push_back(next_free_dof);
407  dof_indices[cell_starts[cell.local_idx()]+idof] = next_free_dof++;
408  }
409  else
410  ASSERT_PERMANENT(false).error("Unsupported dof n_face.");
411  }
412  }
413  node_status.clear();
414  edge_status.clear();
415 
416  lsize_ = next_free_dof;
417 
418  // communicate n_dofs across all processes
419  dof_ds_ = std::make_shared<Distribution>(lsize_, PETSC_COMM_WORLD);
420  n_global_dofs_ = dof_ds_->size();
421 
422  // shift dof indices
423  loffset_ = dof_ds_->get_starts_array()[dof_ds_->myp()];
424  if (loffset_ > 0)
425  {
426  for (auto &i : local_to_global_dof_idx_)
427  i += loffset_;
428  }
429 
430  // communicate dofs from ghost cells
431  // first propagate from lower procs to higher procs and then vice versa
432  for (unsigned int from_higher = 0; from_higher < 2; from_higher++)
433  {
434  for (unsigned int proc : ghost_proc)
435  {
436  if ((proc > el_ds_->myp()) == from_higher)
437  { // receive dofs from master processor
438  vector<LongIdx> dofs;
439  receive_ghost_dofs(proc, dofs);
440 
441  // update dof_indices and node_dofs on ghost elements
442  update_local_dofs(proc,
443  update_cells,
444  dofs,
445  node_dof_starts,
446  node_dofs,
447  edge_dof_starts,
448  edge_dofs
449  );
450 
451  }
452  else
453  send_ghost_dofs(proc);
454  }
455  }
456  update_cells.clear();
457  node_dofs.clear();
458  node_dof_starts.clear();
459  edge_dofs.clear();
460  edge_dof_starts.clear();
461 }
462 
463 
465 {
466  if (dh_seq_ != nullptr) return;
467 
468  if ( !is_parallel_ )
469  {
470  dh_seq_ = std::make_shared<DOFHandlerMultiDim>(*this);
471  return;
472  }
473 
474  dh_seq_ = std::make_shared<DOFHandlerMultiDim>(*mesh_);
475 
476  dh_seq_->n_global_dofs_ = n_global_dofs_;
477  dh_seq_->lsize_ = n_global_dofs_;
478  dh_seq_->loffset_ = 0;
479  dh_seq_->mesh_ = mesh_;
480  dh_seq_->dof_ds_ = dof_ds_; // should be sequential distribution
481  dh_seq_->ds_ = ds_;
482  dh_seq_->is_parallel_ = false;
483  for (unsigned int i=0; i<n_global_dofs_; i++) dh_seq_->local_to_global_dof_idx_.push_back(i);
484 
487  &dh_seq_->max_elem_dofs_,
488  1,
489  MPI_UNSIGNED,
490  MPI_MAX,
492 
493  for (unsigned int i=0; i<mesh_->n_elements(); i++) dh_seq_->global_to_local_el_idx_[i] = mesh_->get_row_4_el()[i];
494 
495  // Auxiliary vectors cell_starts_loc and dof_indices_loc contain
496  // only local element data (without ghost elements).
497  // Then it is possible to create sequential vectors by simple reduce/gather operation.
498  vector<LongIdx> cell_starts_loc(mesh_->n_elements()+1, 0);
499  vector<LongIdx> dof_indices_loc;
500 
501  // construct cell_starts_loc
502  for (auto cell : this->own_range())
503  {
504  cell_starts_loc[mesh_->get_row_4_el()[cell.elm_idx()]+1] = cell.n_dofs();
505  }
506  for (unsigned int i=0; i<mesh_->n_elements(); ++i)
507  cell_starts_loc[i+1] += cell_starts_loc[i];
508 
509  // construct dof_indices_loc
510  dof_indices_loc.resize(cell_starts_loc[mesh_->n_elements()]);
511  for (auto cell : this->own_range())
512  {
513  for (unsigned int idof=0; idof<cell.n_dofs(); idof++)
514  dof_indices_loc[cell_starts_loc[mesh_->get_row_4_el()[cell.elm_idx()]]+idof] = local_to_global_dof_idx_[dof_indices[cell_starts[cell.local_idx()]+idof]];
515  }
516 
517  Distribution distr(dof_indices_loc.size(), PETSC_COMM_WORLD);
518  dh_seq_->cell_starts.resize(mesh_->n_elements()+1);
519  dh_seq_->dof_indices.resize(distr.size());
520 
521  MPI_Allreduce( cell_starts_loc.data(),
522  dh_seq_->cell_starts.data(),
523  cell_starts_loc.size(),
524  MPI_LONG_IDX,
525  MPI_SUM,
526  MPI_COMM_WORLD );
527 
528  MPI_Allgatherv( dof_indices_loc.data(),
529  dof_indices_loc.size(),
530  MPI_LONG_IDX,
531  dh_seq_->dof_indices.data(),
532  (const int *)distr.get_lsizes_array(),
533  (const int *)distr.get_starts_array(),
534  MPI_LONG_IDX,
535  MPI_COMM_WORLD );
536 
537  // create scatter from parallel to sequential vector
538  Vec v_from;
539  VecCreateMPI(PETSC_COMM_WORLD, lsize_, PETSC_DETERMINE, &v_from);
540  scatter_to_seq_ = std::make_shared<VecScatter>();
541  VecScatterCreateToAll(v_from, scatter_to_seq_.get(), NULL);
542  VecDestroy(&v_from);
543 
544  // create scatter for sequential dof handler (Warning: not tested)
545  Vec v_seq;
546  VecCreateSeq(PETSC_COMM_SELF, n_global_dofs_, &v_seq);
547  dh_seq_->scatter_to_seq_ = std::make_shared<VecScatter>();
548  VecScatterCreateToAll(v_seq, dh_seq_->scatter_to_seq_.get(), NULL);
549  VecDestroy(&v_seq);
550 }
551 
552 
554 {
555  if (is_parallel_ && el_ds_->np() > 1)
556  { // for parallel DH create vector with ghost values
558  VectorMPI vec(lsize_, ghost_dofs);
559  return vec;
560  } else {
561  VectorMPI vec(lsize_, PETSC_COMM_SELF);
562  return vec;
563  }
564 }
565 
566 
568 {
569  unsigned int ndofs = 0;
570  ndofs = cell_starts[cell.local_idx()+1]-cell_starts[cell.local_idx()];
571  indices.resize(ndofs);
572  for (unsigned int k=0; k<ndofs; k++)
574 
575  return ndofs;
576 }
577 
578 
580 {}
581 
582 
583 
585 {
586  // create local arrays of elements
587  el_ds_ = mesh_->get_el_ds();
588 
589  // create local array of edges
590  for (auto edge : mesh_->edge_range())
591  {
592  bool is_edge_local = false;
593  for (uint sid=0; sid<edge.n_sides(); sid++)
594  if ( el_is_local(edge.side(sid)->element().idx()) )
595  {
596  is_edge_local = true;
597  break;
598  }
599  if (is_edge_local)
600  edg_4_loc.push_back(edge.idx());
601  }
602 
603  // create local array of neighbours
604  for (unsigned int inb=0; inb<mesh_->n_vb_neighbours(); inb++)
605  {
606  const Neighbour *nb = &mesh_->vb_neighbour(inb);
607  if ( el_is_local(nb->element().idx())
608  || el_is_local(nb->side()->element().idx()) )
609  nb_4_loc.push_back(inb);
610  }
611 
612  // init global to local element map with locally owned elements (later add ghost elements)
613  for ( unsigned int iel = 0; iel < el_ds_->lsize(); iel++ )
615 
616  // create array of local nodes
617  std::vector<bool> node_is_local(mesh_->duplicate_nodes()->n_nodes(), false);
618  for (auto cell : this->own_range())
619  {
620  unsigned int obj_idx = mesh_->duplicate_nodes()->obj_4_el()[cell.elm_idx()];
621  for (unsigned int nid=0; nid<cell.elm()->n_nodes(); nid++)
622  node_is_local[mesh_->duplicate_nodes()->objects(cell.dim())[obj_idx].nodes[nid]] = true;
623  }
624 
625  // create array of local ghost cells
626  for ( auto cell : mesh_->elements_range() )
627  {
628  if (cell.proc() != el_ds_->myp())
629  {
630  bool has_local_node = false;
631  unsigned int obj_idx = mesh_->duplicate_nodes()->obj_4_el()[cell.idx()];
632  for (unsigned int nid=0; nid<cell->n_nodes(); nid++)
633  if (node_is_local[mesh_->duplicate_nodes()->objects(cell->dim())[obj_idx].nodes[nid]])
634  {
635  has_local_node = true;
636  break;
637  }
638  if (has_local_node)
639  {
640  ghost_4_loc.push_back(cell.idx());
641  ghost_proc.insert(cell.proc());
642  ghost_proc_el[cell.proc()].push_back(cell.idx());
643  global_to_local_el_idx_[cell.idx()] = el_ds_->lsize() - 1 + ghost_4_loc.size();
644  }
645  }
646  }
647  for (auto nb : nb_4_loc)
648  {
649  auto cell = mesh_->vb_neighbour(nb).element();
650  if (!el_is_local(cell.idx()) && find(ghost_4_loc.begin(), ghost_4_loc.end(), cell.idx()) == ghost_4_loc.end())
651  {
652  ghost_4_loc.push_back(cell.idx());
653  ghost_proc.insert(cell.proc());
654  ghost_proc_el[cell.proc()].push_back(cell.idx());
655  global_to_local_el_idx_[cell.idx()] = el_ds_->lsize() - 1 + ghost_4_loc.size();
656  }
657  cell = mesh_->vb_neighbour(nb).side()->element();
658  if (!el_is_local(cell.idx()) && find(ghost_4_loc.begin(), ghost_4_loc.end(), cell.idx()) == ghost_4_loc.end())
659  {
660  ghost_4_loc.push_back(cell.idx());
661  ghost_proc.insert(cell.proc());
662  ghost_proc_el[cell.proc()].push_back(cell.idx());
663  global_to_local_el_idx_[cell.idx()] = el_ds_->lsize() - 1 + ghost_4_loc.size();
664  }
665  }
666 }
667 
668 
669 bool DOFHandlerMultiDim::el_is_local(int index) const
670 {
671  return el_ds_->is_local(mesh_->get_row_4_el()[index]);
672 }
673 
674 
675 std::size_t DOFHandlerMultiDim::hash() const {
676  return this->n_global_dofs_;
677 }
678 
679 
681  auto bgn_it = make_iter<DHCellAccessor>( DHCellAccessor(this, 0) );
682  auto end_it = make_iter<DHCellAccessor>( DHCellAccessor(this, el_ds_->lsize()) );
683  return Range<DHCellAccessor>(bgn_it, end_it);
684 }
685 
686 
688  auto bgn_it = make_iter<DHCellAccessor>( DHCellAccessor(this, 0) );
689  auto end_it = make_iter<DHCellAccessor>( DHCellAccessor(this, el_ds_->lsize()+ghost_4_loc.size()) );
690  return Range<DHCellAccessor>(bgn_it, end_it);
691 }
692 
693 
695  auto bgn_it = make_iter<DHCellAccessor>( DHCellAccessor(this, el_ds_->lsize()) );
696  auto end_it = make_iter<DHCellAccessor>( DHCellAccessor(this, el_ds_->lsize()+ghost_4_loc.size()) );
697  return Range<DHCellAccessor>(bgn_it, end_it);
698 }
699 
700 
702  auto map_it = global_to_local_el_idx_.find((LongIdx)elm_idx); // find in global to local map
703  ASSERT( map_it != global_to_local_el_idx_.end() )(elm_idx).error("DH accessor can be create only for own or ghost elements!\n");
704  return DHCellAccessor(this, map_it->second);
705 }
706 
707 
709  stringstream s;
711 
712  s << "DOFHandlerMultiDim structure:" << endl;
713  s << "- is parallel: " << (is_parallel_?"true":"false") << endl;
714  s << "- proc id: " << el_ds_->myp() << endl;
715  s << "- global number of dofs: " << n_global_dofs_ << endl;
716  s << "- number of locally owned cells: " << el_ds_->lsize() << endl;
717  s << "- number of ghost cells: " << ghost_4_loc.size() << endl;
718  s << "- dofs on locally owned cells:" << endl;
719 
720  for (auto cell : own_range())
721  {
722  auto ndofs = cell.get_dof_indices(dofs);
723  s << "-- cell " << cell.elm().idx() << ": ";
724  for (unsigned int idof=0; idof<ndofs; idof++) s << dofs[idof] << " "; s << endl;
725  }
726  s << "- dofs on ghost cells:" << endl;
727  for (auto cell : ghost_range())
728  {
729  auto ndofs = cell.get_dof_indices(dofs);
730  s << "-- cell " << cell.elm().idx() << ": ";
731  for (unsigned int idof=0; idof<ndofs; idof++) s << dofs[idof] << " "; s << endl;
732  }
733  s << "- locally owned dofs (" << lsize_ << "): ";
734  for (unsigned int i=0; i<lsize_; i++) s << local_to_global_dof_idx_[i] << " "; s << endl;
735  s << "- ghost dofs (" << local_to_global_dof_idx_.size() - lsize_ << "): ";
736  for (unsigned int i=lsize_; i<local_to_global_dof_idx_.size(); i++) s << local_to_global_dof_idx_[i] << " "; s << endl;
737  s << "- global-to-local-cell map:" << endl;
738  for (auto cell : global_to_local_el_idx_) s << "-- " << cell.first << " -> " << cell.second << " " << endl;
739  s << endl;
740 
741  printf("%s", s.str().c_str());
742 }
743 
744 
745 
746 
747 
748 
749 
750 
751 
752 SubDOFHandlerMultiDim::SubDOFHandlerMultiDim(std::shared_ptr<DOFHandlerMultiDim> dh, unsigned int component_idx)
753 : DOFHandlerMultiDim(*dh->mesh()),
754  parent_(dh),
755  fe_idx_(component_idx)
756 {
757  // create discrete space, we assume equal type of FE on each cell.
758  ASSERT( dynamic_cast<EqualOrderDiscreteSpace *>(dh->ds().get()) != nullptr )
759  .error("sub_handler can be used only with dof handler using EqualOrderDiscreteSpace!");
760  FESystem<0> *fe_sys0 = dynamic_cast<FESystem<0>*>( dh->ds()->fe()[0_d].get() );
761  FESystem<1> *fe_sys1 = dynamic_cast<FESystem<1>*>( dh->ds()->fe()[1_d].get() );
762  FESystem<2> *fe_sys2 = dynamic_cast<FESystem<2>*>( dh->ds()->fe()[2_d].get() );
763  FESystem<3> *fe_sys3 = dynamic_cast<FESystem<3>*>( dh->ds()->fe()[3_d].get() );
764  ASSERT( fe_sys0 != nullptr ).error("sub_handler assumes that dof handler uses FESystem<0>!");
765  ASSERT( fe_sys1 != nullptr ).error("sub_handler assumes that dof handler uses FESystem<1>!");
766  ASSERT( fe_sys2 != nullptr ).error("sub_handler assumes that dof handler uses FESystem<2>!");
767  ASSERT( fe_sys3 != nullptr ).error("sub_handler assumes that dof handler uses FESystem<3>!");
768  MixedPtr<FiniteElement> sub_mixed(fe_sys0->fe()[component_idx],
769  fe_sys1->fe()[component_idx],
770  fe_sys2->fe()[component_idx],
771  fe_sys3->fe()[component_idx]);
772  ds_ = std::make_shared<EqualOrderDiscreteSpace>( mesh_, sub_mixed);
773 
774  is_parallel_ = dh->is_parallel_;
775 
776  // create list of dofs for sub_dh on one cell
777  FESystemFunctionSpace *fs[4] = {
778  dynamic_cast<FESystemFunctionSpace*>( fe_sys0->function_space_.get() ),
779  dynamic_cast<FESystemFunctionSpace*>( fe_sys1->function_space_.get() ),
780  dynamic_cast<FESystemFunctionSpace*>( fe_sys2->function_space_.get() ),
781  dynamic_cast<FESystemFunctionSpace*>( fe_sys3->function_space_.get() ) };
782  for (unsigned int d=0; d<=3; d++)
783  ASSERT( fs[d] != nullptr ).error("Function space must be of type FESystemFunctionSpace!" );
784  vector<unsigned int> sub_fe_dofs[4];
785  for (unsigned int i=0; i<fe_sys0->n_dofs(); i++)
786  if (fs[0]->dof_indices()[i].fe_index == component_idx) sub_fe_dofs[0].push_back(i);
787  for (unsigned int i=0; i<fe_sys1->n_dofs(); i++)
788  if (fs[1]->dof_indices()[i].fe_index == component_idx) sub_fe_dofs[1].push_back(i);
789  for (unsigned int i=0; i<fe_sys2->n_dofs(); i++)
790  if (fs[2]->dof_indices()[i].fe_index == component_idx) sub_fe_dofs[2].push_back(i);
791  for (unsigned int i=0; i<fe_sys3->n_dofs(); i++)
792  if (fs[3]->dof_indices()[i].fe_index == component_idx) sub_fe_dofs[3].push_back(i);
793 
795  dof_indices.resize(cell_starts[cell_starts.size()-1]);
796  // sub_local_indices maps local dofs of parent handler to local dofs of sub-handler
797  vector<LongIdx> sub_local_indices(dh->local_to_global_dof_idx_.size(), INVALID_DOF);
798  map<LongIdx,LongIdx> global_to_local_dof_idx;
799  // first add owned dofs to local_to_global_dof_idx_ and sub_local_indices
800  for (auto cell : dh->local_range())
801  {
802  LocDofVec cell_dof_indices = cell.get_loc_dof_indices();
803  for (auto sub_dof : sub_fe_dofs[cell.dim()])
804  {
805  if (cell_dof_indices[sub_dof] < static_cast<int>(dh->lsize_) &&
806  sub_local_indices[cell_dof_indices[sub_dof]] == INVALID_DOF)
807  {
808  sub_local_indices[cell_dof_indices[sub_dof]] = local_to_global_dof_idx_.size();
809  parent_dof_idx_.push_back(cell_dof_indices[sub_dof]);
810  global_to_local_dof_idx[parent_->local_to_global_dof_idx_[cell_dof_indices[sub_dof]]] = local_to_global_dof_idx_.size();
812  }
813  }
814  }
816  // then do the same for ghost dofs and set dof_indices
817  for (auto cell : dh->local_range())
818  {
819  LocDofVec cell_dof_indices = cell.get_loc_dof_indices();
820  unsigned int idof = 0;
821  for (auto sub_dof : sub_fe_dofs[cell.dim()])
822  {
823  if (sub_local_indices[cell_dof_indices[sub_dof]] == INVALID_DOF)
824  {
825  sub_local_indices[cell_dof_indices[sub_dof]] = local_to_global_dof_idx_.size();
826  parent_dof_idx_.push_back(cell_dof_indices[sub_dof]);
827  // temporarily we keep the global dof idx of parent dh, we replace it later from the owning processor
828  local_to_global_dof_idx_.push_back(parent_->local_to_global_dof_idx_[cell_dof_indices[sub_dof]]);
829  }
830  dof_indices[cell_starts[cell.local_idx()]+idof++] = sub_local_indices[cell_dof_indices[sub_dof]];
831  }
832  }
833 
834  dof_ds_ = std::make_shared<Distribution>(lsize_, PETSC_COMM_WORLD);
835  n_global_dofs_ = dof_ds_->size();
836  loffset_ = dof_ds_->get_starts_array()[dof_ds_->myp()];
837 
838  // shift dof indices
839  if (loffset_ > 0)
840  for (unsigned int i=0; i<lsize_; i++)
842 
843  // communicate ghost values
844  // first propagate from lower procs to higher procs and then vice versa
845  for (unsigned int from_higher = 0; from_higher < 2; from_higher++)
846  {
847  for (unsigned int proc : ghost_proc)
848  {
849  if ((proc > el_ds_->myp()) == from_higher)
850  { // receive dofs from master processor
851  vector<LongIdx> dofs;
852  receive_sub_ghost_dofs(proc, dofs);
853  }
854  else
855  send_sub_ghost_dofs(proc, global_to_local_dof_idx);
856  }
857  }
858 }
859 
860 
862 {
863  // send number of ghost dofs required from the other processor
865  for (unsigned int i=lsize_; i<local_to_global_dof_idx_.size(); i++)
866  if (parent_->dof_ds_->get_proc(parent_->local_to_global_dof_idx_[parent_dof_idx_[i]]) == proc)
867  dof_indices.push_back(parent_->local_to_global_dof_idx_[parent_dof_idx_[i]]);
868  unsigned int n_ghosts = dof_indices.size();
869  MPI_Send(&n_ghosts, 1, MPI_UNSIGNED, proc, 0, MPI_COMM_WORLD);
870 
871  // send indices of dofs required
872  MPI_Send(dof_indices.data(), n_ghosts, MPI_LONG_IDX, proc, 1, MPI_COMM_WORLD);
873 
874  // receive dofs
875  dofs.resize(n_ghosts);
876  MPI_Recv(dofs.data(), n_ghosts, MPI_LONG_IDX, proc, 2, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
877 
878  // update ghost dofs
879  unsigned int idof = 0;
880  for (unsigned int i=lsize_; i<local_to_global_dof_idx_.size(); i++)
881  if (parent_->dof_ds_->get_proc(parent_->local_to_global_dof_idx_[parent_dof_idx_[i]]) == proc)
882  local_to_global_dof_idx_[i] = dofs[idof++];
883 }
884 
885 
886 void SubDOFHandlerMultiDim::send_sub_ghost_dofs(unsigned int proc, const map<LongIdx,LongIdx> &global_to_local_dof_idx)
887 {
888  // receive number of dofs required by the other processor
889  unsigned int n_ghosts;
890  MPI_Recv(&n_ghosts, 1, MPI_UNSIGNED, proc, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
891 
892  // receive global indices of dofs required
893  vector<LongIdx> dof_indices(n_ghosts);
895 
896  // send global dof indices relative to the sub-handler
897  vector<LongIdx> dofs;
898  for (auto global_dof : dof_indices)
899  dofs.push_back(global_to_local_dof_idx.at(global_dof) + dof_ds_->begin());
900  MPI_Send(dofs.data(), dofs.size(), MPI_LONG_IDX, proc, 2, MPI_COMM_WORLD);
901 }
902 
903 
905 {
906  ASSERT( vec.size() == parent_->local_to_global_dof_idx_.size() ).error("Incompatible parent vector in update_subvector()!");
907  ASSERT( subvec.size() == local_to_global_dof_idx_.size() ).error("Incompatible subvector in update_subvector()!");
908 
909  for (unsigned int i=0; i<parent_dof_idx_.size(); i++)
910  subvec.set( i, vec.get(parent_dof_idx_[i]) );
911 }
912 
913 
915 {
916  ASSERT( vec.size() == parent_->local_to_global_dof_idx_.size() ).error("Incompatible parent vector in update_subvector()!");
917  ASSERT( subvec.size() == local_to_global_dof_idx_.size() ).error("Incompatible subvector in update_subvector()!");
918 
919  for (unsigned int i=0; i<parent_dof_idx_.size(); i++)
920  vec.set( parent_dof_idx_[i], subvec.get(i) );
921 }
922 
923 
924 
925 
926 
927 
928 
929 
930 
MeshBase::get_row_4_el
LongIdx * get_row_4_el() const
Definition: mesh.h:125
DOFHandlerMultiDim::ghost_range
Range< DHCellAccessor > ghost_range() const
Returns range over ghosts DOF handler cells.
Definition: dofhandler.cc:694
LocDofVec
arma::Col< IntIdx > LocDofVec
Definition: index_types.hh:28
DOFHandlerMultiDim::local_to_global_dof_idx_
std::vector< LongIdx > local_to_global_dof_idx_
Maps local and ghost dof indices to global ones.
Definition: dofhandler.hh:435
Distribution::np
unsigned int np() const
get num of processors
Definition: distribution.hh:105
DOFHandlerMultiDim::INVALID_DOF
static const int INVALID_DOF
Definition: dofhandler.hh:389
Dof::n_face_idx
unsigned int n_face_idx
Index of n-face to which the dof is associated.
Definition: finite_element.hh:95
Armor::vec
ArmaVec< double, N > vec
Definition: armor.hh:885
DOFHandlerMultiDim::global_to_local_el_idx_
std::unordered_map< LongIdx, LongIdx > global_to_local_el_idx_
Maps global element index into local/ghost index (obsolete).
Definition: dofhandler.hh:440
DOFHandlerMultiDim::ghost_proc
set< unsigned int > ghost_proc
Processors of ghost elements.
Definition: dofhandler.hh:455
Distribution::lsize
unsigned int lsize(int proc) const
get local size
Definition: distribution.hh:115
DOFHandlerMultiDim::distribute_edge_dofs
bool distribute_edge_dofs
Temporary flag which prevents using dof handler on meshes where edges are not allocated (currently BC...
Definition: dofhandler.hh:461
neighbours.h
DOFHandlerMultiDim::ds
std::shared_ptr< DiscreteSpace > ds() const
Return pointer to discrete space for which the handler distributes dofs.
Definition: dofhandler.hh:255
Distribution::myp
unsigned int myp() const
get my processor
Definition: distribution.hh:107
ASSERT
#define ASSERT(expr)
Definition: asserts.hh:350
DOFHandlerMultiDim::SubDOFHandlerMultiDim
friend class SubDOFHandlerMultiDim
Definition: dofhandler.hh:268
MeshBase::get_el_ds
Distribution * get_el_ds() const
Definition: mesh.h:119
distribution.hh
Support classes for parallel programing.
DuplicateNodes::obj_4_el
const std::vector< unsigned int > & obj_4_el() const
Definition: duplicate_nodes.h:110
MPI_MAX
#define MPI_MAX
Definition: mpi.h:197
FESystemFunctionSpace
Definition: fe_system.hh:55
DOFHandlerBase::mesh_
MeshBase * mesh_
Pointer to the mesh to which the dof handler is associated.
Definition: dofhandler.hh:126
SubDOFHandlerMultiDim::update_subvector
void update_subvector(const VectorMPI &vec, VectorMPI &subvec)
Update values in subvector from parent vector.
Definition: dofhandler.cc:904
MeshBase::n_edges
unsigned int n_edges() const
Definition: mesh.h:114
DOFHandlerMultiDim::ghost_4_loc
vector< LongIdx > ghost_4_loc
Indices of ghost cells (neighbouring with local elements).
Definition: dofhandler.hh:452
std::vector< LongIdx >
Neighbour::side
SideIter side() const
Definition: neighbours.h:147
MPI_Send
#define MPI_Send(buf, count, datatype, dest, tag, comm)
Definition: mpi.h:263
DOFHandlerBase::~DOFHandlerBase
virtual ~DOFHandlerBase()
Destructor.
Definition: dofhandler.cc:41
DHCellAccessor::dim
unsigned int dim() const
Return dimension of element appropriate to cell.
Definition: dh_cell_accessor.hh:101
DOFHandlerMultiDim::el_ds_
Distribution * el_ds_
Distribution of elements.
Definition: dofhandler.hh:443
dofhandler.hh
Declaration of class which handles the ordering of degrees of freedom (dof) and mappings between loca...
uint
unsigned int uint
Definition: mh_dofhandler.hh:101
DOFHandlerMultiDim::local_range
Range< DHCellAccessor > local_range() const
Returns range over own and ghost cells of DOF handler.
Definition: dofhandler.cc:687
DOFHandlerMultiDim::ghost_proc_el
map< unsigned int, vector< LongIdx > > ghost_proc_el
Arrays of ghost cells for each neighbouring processor.
Definition: dofhandler.hh:458
duplicate_nodes.h
Neighbour
Definition: neighbours.h:117
MeshBase::elements_range
Range< ElementAccessor< 3 > > elements_range() const
Returns range of mesh elements.
Definition: mesh.cc:1168
index_types.hh
SubDOFHandlerMultiDim::receive_sub_ghost_dofs
void receive_sub_ghost_dofs(unsigned int proc, vector< LongIdx > &dofs)
Get global dof indices of ghost dofs for sub-handler.
Definition: dofhandler.cc:861
fe_system.hh
Class FESystem for compound finite elements.
Dof::dim
unsigned int dim
Association to n-face of given dimension (point, line, triangle, tetrahedron.
Definition: finite_element.hh:92
Neighbour::element
ElementAccessor< 3 > element() const
Definition: neighbours.h:163
MeshBase::vb_neighbour
const Neighbour & vb_neighbour(unsigned int nb) const
Return neighbour with given index.
Definition: mesh.cc:1360
VectorMPI::set
void set(unsigned int pos, double val)
Set value on given position.
Definition: vector_mpi.hh:111
FESystem::fe
const std::vector< std::shared_ptr< FiniteElement< dim > > > & fe() const
Definition: fe_system.hh:147
Distribution
Definition: distribution.hh:50
MPI_Recv
#define MPI_Recv(buf, count, datatype, source, tag, comm, status)
Definition: mpi.h:271
dh_cell_accessor.hh
Side::edge_idx
unsigned int edge_idx() const
Returns global index of the edge connected to the side.
Definition: accessors_impl.hh:223
DOFHandlerMultiDim::update_local_dofs
void update_local_dofs(unsigned int proc, const std::vector< bool > &update_cells, const std::vector< LongIdx > &dofs, const std::vector< LongIdx > &node_dof_starts, std::vector< LongIdx > &node_dofs, const std::vector< LongIdx > &edge_dof_starts, std::vector< LongIdx > &edge_dofs)
Update dofs on local elements from ghost element dofs.
Definition: dofhandler.cc:230
accessors.hh
DOFHandlerMultiDim::dh_seq_
std::shared_ptr< DOFHandlerMultiDim > dh_seq_
Sequential dof handler associated to the current (parallel) one.
Definition: dofhandler.hh:399
ASSERT_PERMANENT
#define ASSERT_PERMANENT(expr)
Allow use shorter versions of macro names if these names is not used with external library.
Definition: asserts.hh:347
DOFHandlerMultiDim::el_is_local
bool el_is_local(int index) const
Definition: dofhandler.cc:669
DOFHandlerMultiDim
Provides the numbering of the finite element degrees of freedom on the computational mesh.
Definition: dofhandler.hh:151
SubDOFHandlerMultiDim::update_parent_vector
void update_parent_vector(VectorMPI &vec, const VectorMPI &subvec)
Update values in parent vector from values of subvector.
Definition: dofhandler.cc:914
DHCellAccessor::local_idx
unsigned int local_idx() const
Return local index to element (index of DOF handler).
Definition: dh_cell_accessor.hh:58
DOFHandlerMultiDim::VALID_NFACE
static const int VALID_NFACE
Definition: dofhandler.hh:387
DOFHandlerMultiDim::own_range
Range< DHCellAccessor > own_range() const
Returns range of DOF handler cells (only range of own without ghost cells)
Definition: dofhandler.cc:680
finite_element.hh
Abstract class for description of finite elements.
DOFHandlerMultiDim::INVALID_NFACE
static const int INVALID_NFACE
Definition: dofhandler.hh:386
DOFHandlerMultiDim::distribute_dofs
void distribute_dofs(std::shared_ptr< DiscreteSpace > ds)
Distributes degrees of freedom on the mesh needed for the given discrete space.
Definition: dofhandler.cc:324
DOFHandlerMultiDim::receive_ghost_dofs
void receive_ghost_dofs(unsigned int proc, std::vector< LongIdx > &dofs)
Obtain dof numbers on ghost elements from other processor.
Definition: dofhandler.cc:178
DHCellAccessor::elm
const ElementAccessor< 3 > elm() const
Return ElementAccessor to element of loc_ele_idx_.
Definition: dh_cell_accessor.hh:71
DuplicateNodes::n_nodes
unsigned int n_nodes() const
Definition: duplicate_nodes.h:104
DOFHandlerMultiDim::create_vector
virtual VectorMPI create_vector()
Allocates PETSc vector according to the dof distribution.
Definition: dofhandler.cc:553
DOFHandlerBase::n_global_dofs_
unsigned int n_global_dofs_
Number of global dofs assigned by the handler.
Definition: dofhandler.hh:108
DOFHandlerBase::loffset_
unsigned int loffset_
Index of the first dof on the local process.
Definition: dofhandler.hh:118
SubDOFHandlerMultiDim::parent_
std::shared_ptr< DOFHandlerMultiDim > parent_
Parent dof handler.
Definition: dofhandler.hh:506
DOFHandlerMultiDim::sequential
std::shared_ptr< DOFHandlerMultiDim > sequential()
Returns sequential version of the current dof handler.
Definition: dofhandler.cc:69
Side::element
ElementAccessor< 3 > element() const
Returns iterator to the element of the side.
Definition: accessors_impl.hh:218
MPI_LONG_IDX
#define MPI_LONG_IDX
Definition: index_types.hh:30
mesh.h
DHCellAccessor::n_dofs
unsigned int n_dofs() const
Return number of dofs on given cell.
Definition: dh_cell_accessor.hh:420
DOFHandlerMultiDim::init_status
void init_status(std::vector< short int > &node_status, std::vector< short int > &edge_status)
Initialize node_status and edge_status.
Definition: dofhandler.cc:123
std::map
Definition: doxy_dummy_defs.hh:11
DOFHandlerBase::distr
std::shared_ptr< Distribution > distr() const
Definition: dofhandler.hh:73
DHCellAccessor::elm_idx
unsigned int elm_idx() const
Return serial idx to element of loc_ele_idx_.
Definition: dh_cell_accessor.hh:64
DOFHandlerMultiDim::DHCellAccessor
friend class DHCellAccessor
Definition: dofhandler.hh:265
partitioning.hh
FiniteElement::n_dofs
unsigned int n_dofs() const
Returns the number of degrees of freedom needed by the finite element.
Definition: finite_element.hh:262
DOFHandlerMultiDim::init_dof_starts
void init_dof_starts(std::vector< LongIdx > &node_dof_starts, std::vector< LongIdx > &edge_dof_starts)
Initialize auxiliary vector of starting indices of nodal/edge dofs.
Definition: dofhandler.cc:97
MPI_SUM
#define MPI_SUM
Definition: mpi.h:196
LongIdx
int LongIdx
Define type that represents indices of large arrays (elements, nodes, dofs etc.)
Definition: index_types.hh:24
DOFHandlerMultiDim::cell_accessor_from_element
const DHCellAccessor cell_accessor_from_element(unsigned int elm_idx) const
Return DHCellAccessor appropriate to ElementAccessor of given idx.
Definition: dofhandler.cc:701
Mesh
Definition: mesh.h:359
Element::n_sides
unsigned int n_sides() const
Definition: elements.h:131
MPI_STATUS_IGNORE
#define MPI_STATUS_IGNORE
Definition: mpi.h:203
DOFHandlerMultiDim::~DOFHandlerMultiDim
~DOFHandlerMultiDim() override
Destructor.
Definition: dofhandler.cc:579
DOFHandlerMultiDim::nb_4_loc
vector< LongIdx > nb_4_loc
Local neighbour index -> global neighbour index.
Definition: dofhandler.hh:449
Range
Range helper class.
Definition: range_wrapper.hh:65
DOFHandlerMultiDim::ds_
std::shared_ptr< DiscreteSpace > ds_
Pointer to the discrete space for which the handler distributes dofs.
Definition: dofhandler.hh:393
DOFHandlerBase
Definition: dofhandler.hh:47
DHCellAccessor
Cell accessor allow iterate over DOF handler cells.
Definition: dh_cell_accessor.hh:43
MPI_Allreduce
#define MPI_Allreduce(sendbuf, recvbuf, count, datatype, op, comm)
Definition: mpi.h:612
fmt::printf
void printf(BasicWriter< Char > &w, BasicCStringRef< Char > format, ArgList args)
Definition: printf.h:444
MeshBase::get_el_4_loc
LongIdx * get_el_4_loc() const
Definition: mesh.h:122
DOFHandlerMultiDim::cell_starts
std::vector< LongIdx > cell_starts
Starting indices for local (owned+ghost) element dofs.
Definition: dofhandler.hh:419
DOFHandlerMultiDim::print
void print() const
Output structure of dof handler.
Definition: dofhandler.cc:708
DuplicateNodes::objects
const std::vector< MeshObject > & objects(unsigned int dim) const
Definition: duplicate_nodes.h:107
DOFHandlerMultiDim::make_elem_partitioning
void make_elem_partitioning()
Prepare parallel distribution of elements, edges and neighbours.
Definition: dofhandler.cc:584
MeshBase
Base class for Mesh and BCMesh.
Definition: mesh.h:96
DOFHandlerMultiDim::dof_indices
std::vector< IntIdx > dof_indices
Dof numbers on local and ghost elements.
Definition: dofhandler.hh:427
DOFHandlerBase::max_elem_dofs_
unsigned int max_elem_dofs_
Max. number of dofs per element.
Definition: dofhandler.hh:121
DOFHandlerMultiDim::create_sequential
void create_sequential()
Communicate local dof indices to all processors and create new sequential dof handler.
Definition: dofhandler.cc:464
DOFHandlerBase::lsize_
unsigned int lsize_
Number of dofs associated to local process.
Definition: dofhandler.hh:113
DOFHandlerMultiDim::DOFHandlerMultiDim
DOFHandlerMultiDim(MeshBase &_mesh, bool make_elem_part=true)
Constructor.
Definition: dofhandler.cc:49
MixedPtr< FiniteElement >
SubDOFHandlerMultiDim::send_sub_ghost_dofs
void send_sub_ghost_dofs(unsigned int proc, const map< LongIdx, LongIdx > &global_to_local_dof_idx)
Send global indices of dofs that are ghost on other processors.
Definition: dofhandler.cc:886
MeshBase::n_vb_neighbours
unsigned int n_vb_neighbours() const
Definition: mesh.cc:137
SubDOFHandlerMultiDim::parent_dof_idx_
std::vector< LongIdx > parent_dof_idx_
Local indices in the parent handler.
Definition: dofhandler.hh:512
MPI_COMM_WORLD
#define MPI_COMM_WORLD
Definition: mpi.h:123
std::vector::data
T data
Definition: doxy_dummy_defs.hh:7
DOFHandlerMultiDim::scatter_to_seq_
std::shared_ptr< VecScatter > scatter_to_seq_
Scatter context for parallel to sequential vectors.
Definition: dofhandler.hh:402
MeshBase::edge_range
Range< Edge > edge_range() const
Return range of edges.
Definition: mesh.cc:125
ElementAccessor::idx
unsigned int idx() const
We need this method after replacing Region by RegionIdx, and movinf RegionDB instance into particular...
Definition: accessors.hh:215
FiniteElement::function_space_
std::shared_ptr< FunctionSpace > function_space_
Function space defining the FE.
Definition: finite_element.hh:385
DOFHandlerMultiDim::sequential_scatter
std::shared_ptr< VecScatter > sequential_scatter()
Returns scatter context from parallel to sequential vectors.
Definition: dofhandler.cc:76
MPI_Allgatherv
#define MPI_Allgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm)
Definition: mpi.h:587
VectorMPI::get
double get(unsigned int pos) const
Return value on given position.
Definition: vector_mpi.hh:105
DOFHandlerMultiDim::is_parallel_
bool is_parallel_
Indicator for parallel/sequential dof handler.
Definition: dofhandler.hh:396
MeshBase::n_elements
unsigned int n_elements() const
Definition: mesh.h:111
DOFHandlerMultiDim::ASSIGNED_NFACE
static const int ASSIGNED_NFACE
Definition: dofhandler.hh:388
VectorMPI
Definition: vector_mpi.hh:43
edge
@ edge
Definition: generic_assembly.hh:34
MPI_UNSIGNED
#define MPI_UNSIGNED
Definition: mpi.h:165
DOFHandlerMultiDim::hash
std::size_t hash() const override
Definition: dofhandler.cc:675
DOFHandlerMultiDim::send_ghost_dofs
void send_ghost_dofs(unsigned int proc)
Send dof numbers to other processor.
Definition: dofhandler.cc:199
ElementAccessor::side
SideIter side(const unsigned int loc_index)
Definition: accessors_impl.hh:131
DHCellAccessor::cell_dof
const Dof & cell_dof(unsigned int idof) const
Return dof on a given cell.
Definition: dh_cell_accessor.hh:440
EqualOrderDiscreteSpace
Definition: discrete_space.hh:92
FESystem
Compound finite element on dim dimensional simplex.
Definition: fe_system.hh:101
Distribution::is_local
bool is_local(unsigned int idx) const
identify local index
Definition: distribution.hh:120
DOFHandlerMultiDim::get_dof_indices
unsigned int get_dof_indices(const DHCellAccessor &cell, std::vector< LongIdx > &indices) const override
Returns the global indices of dofs associated to the cell.
Definition: dofhandler.cc:567
MeshBase::duplicate_nodes
const DuplicateNodes * duplicate_nodes() const
Definition: mesh.h:147
VectorMPI::size
unsigned int size() const
Return size of output data.
Definition: vector_mpi.hh:98
DOFHandlerBase::dof_ds_
std::shared_ptr< Distribution > dof_ds_
Distribution of dofs associated to local process.
Definition: dofhandler.hh:131
DOFHandlerMultiDim::init_cell_starts
void init_cell_starts()
Initialize vector of starting indices for elements.
Definition: dofhandler.cc:83
Element::n_nodes
unsigned int n_nodes() const
Definition: elements.h:125
range_wrapper.hh
Implementation of range helper class.
DOFHandlerMultiDim::edg_4_loc
vector< LongIdx > edg_4_loc
Local edge index -> global edge index.
Definition: dofhandler.hh:446