Flow123d  release_3.0.0-1008-gca43bb7
dofhandler.cc
Go to the documentation of this file.
1 /*!
2  *
3  * Copyright (C) 2015 Technical University of Liberec. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it under
6  * the terms of the GNU General Public License version 3 as published by the
7  * Free Software Foundation. (http://www.gnu.org/licenses/gpl-3.0.en.html)
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11  * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
12  *
13  *
14  * @file dofhandler.cc
15  * @brief Declaration of class which handles the ordering of degrees of freedom (dof) and mappings between local and global dofs.
16  * @author Jan Stebel
17  */
18 
19 #include "fem/dofhandler.hh"
20 #include "fem/finite_element.hh"
21 #include "fem/dh_cell_accessor.hh"
22 #include "mesh/mesh.h"
23 #include "mesh/duplicate_nodes.h"
24 #include "mesh/partitioning.hh"
25 #include "mesh/long_idx.hh"
26 #include "mesh/accessors.hh"
27 #include "mesh/range_wrapper.hh"
28 #include "mesh/neighbours.h"
29 #include "la/distribution.hh"
30 
31 
32 
33 
35 {}
36 
37 
38 
39 
40 
41 
42 DOFHandlerMultiDim::DOFHandlerMultiDim(Mesh& _mesh, bool make_elem_part)
43  : DOFHandlerBase(_mesh),
44  ds_(nullptr),
45  is_parallel_(true),
46  dh_seq_(nullptr),
47  scatter_to_seq_(nullptr),
48  el_ds_(nullptr)
49 {
50  if (make_elem_part) make_elem_partitioning();
51 }
52 
53 
54 std::shared_ptr<DOFHandlerMultiDim> DOFHandlerMultiDim::sequential()
55 {
57  return dh_seq_;
58 }
59 
60 
61 std::shared_ptr<VecScatter> DOFHandlerMultiDim::sequential_scatter()
62 {
64  return scatter_to_seq_;
65 }
66 
67 
69 {
70  // get number of dofs per element and then set up cell_starts
72  for (auto cell : this->local_range())
73  {
74  cell_starts[cell.local_idx()+1] = cell.n_dofs();
75  max_elem_dofs_ = max( (int)max_elem_dofs_, (int)cell.n_dofs() );
76  }
77  for (unsigned int i=0; i<cell_starts.size()-1; ++i)
78  cell_starts[i+1] += cell_starts[i];
79 }
80 
81 
83  std::vector<LongIdx> &node_dof_starts,
84  std::vector<LongIdx> &edge_dof_starts)
85 {
86  // initialize dofs on nodes
87  // We must separate dofs for dimensions because FE functions
88  // may be discontinuous on nodes shared by different
89  // dimensions.
90  unsigned int n_node_dofs = 0;
91  for (unsigned int nid=0; nid<mesh_->tree->n_nodes(); nid++)
92  {
93  node_dof_starts.push_back(n_node_dofs);
94  n_node_dofs += ds_->n_node_dofs(nid);
95  }
96  node_dof_starts.push_back(n_node_dofs);
97 
98  unsigned int n_edge_dofs = 0;
99  for (unsigned int i=0; i<mesh_->n_edges(); i++)
100  {
101  edge_dof_starts.push_back(n_edge_dofs);
102  n_edge_dofs += ds_->n_edge_dofs(mesh_->edges[i]);
103  }
104  edge_dof_starts.push_back(n_edge_dofs);
105 }
106 
107 
109  std::vector<short int> &node_status,
110  std::vector<short int> &edge_status)
111 {
112  // mark local dofs
113  for (auto cell : this->own_range())
114  {
115  for (unsigned int n=0; n<cell.dim()+1; n++)
116  {
117  unsigned int nid = mesh_->tree->objects(cell.dim())[mesh_->tree->obj_4_el()[cell.elm_idx()]].nodes[n];
118  node_status[nid] = VALID_NFACE;
119  }
120  }
121 
122  // unmark dofs on ghost cells from lower procs
123  for (auto cell : this->ghost_range())
124  {
125  if (cell.elm().proc() < el_ds_->myp())
126  {
127  for (unsigned int n=0; n<cell.dim()+1; n++)
128  {
129  unsigned int nid = mesh_->tree->objects(cell.dim())[mesh_->tree->obj_4_el()[cell.elm_idx()]].nodes[n];
130  node_status[nid] = INVALID_NFACE;
131  }
132  }
133  }
134 
135  // mark local edges
136  for (auto eid : edg_4_loc)
137  edge_status[eid] = VALID_NFACE;
138 
139  // unmark dofs on ghost cells from lower procs
140  for (auto cell : this->ghost_range())
141  {
142  if (cell.elm().proc() < el_ds_->myp())
143  {
144  for (unsigned int n=0; n<cell.dim()+1; n++)
145  {
146  unsigned int eid = cell.elm().side(n)->edge_idx();
147  edge_status[eid] = INVALID_NFACE;
148  }
149  }
150  }
151 }
152 
153 
155 {
156  // send number of elements required from the other processor
157  unsigned int n_elems = ghost_proc_el[proc].size();
158  MPI_Send(&n_elems, 1, MPI_UNSIGNED, proc, 0, MPI_COMM_WORLD);
159 
160  // send indices of elements required
161  MPI_Send(&(ghost_proc_el[proc][0]), n_elems, MPI_LONG_IDX, proc, 1, MPI_COMM_WORLD);
162 
163  // receive numbers of dofs on required elements
164  vector<unsigned int> n_dofs(n_elems);
165  MPI_Recv(&(n_dofs[0]), n_elems, MPI_UNSIGNED, proc, 2, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
166 
167  // receive dofs on required elements
168  unsigned int n_dofs_sum = 0;
169  for (auto nd : n_dofs) n_dofs_sum += nd;
170  dofs.resize(n_dofs_sum);
171  MPI_Recv(&(dofs[0]), n_dofs_sum, MPI_LONG_IDX, proc, 3, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
172 }
173 
174 
175 void DOFHandlerMultiDim::send_ghost_dofs(unsigned int proc)
176 {
177  // receive number of elements required by the other processor
178  unsigned int n_elems;
179  MPI_Recv(&n_elems, 1, MPI_UNSIGNED, proc, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
180 
181  // receive indices of elements required
182  vector<LongIdx> elems(n_elems);
183  MPI_Recv(&(elems[0]), n_elems, MPI_LONG_IDX, proc, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
184 
185  // send numbers of dofs on required elements
186  vector<unsigned int> n_dofs;
187  for (LongIdx el : elems)
188  {
189  auto cell = this->cell_accessor_from_element(el);
190  n_dofs.push_back(cell_starts[cell.local_idx()+1] - cell_starts[cell.local_idx()]);
191  }
192  MPI_Send(&(n_dofs[0]), n_elems, MPI_UNSIGNED, proc, 2, MPI_COMM_WORLD);
193 
194  // send dofs on the required elements
195  vector<LongIdx> dofs;
196  for (LongIdx el : elems)
197  {
198  auto cell = this->cell_accessor_from_element(el);
199  for (unsigned int i=cell_starts[cell.local_idx()]; i<cell_starts[cell.local_idx()+1]; i++)
200  dofs.push_back(local_to_global_dof_idx_[dof_indices[i]]);
201  }
202  MPI_Send(&(dofs[0]), dofs.size(), MPI_LONG_IDX, proc, 3, MPI_COMM_WORLD);
203 }
204 
205 
207  const std::vector<bool> &update_cells,
208  const std::vector<LongIdx> &dofs,
209  const std::vector<LongIdx> &node_dof_starts,
210  std::vector<LongIdx> &node_dofs,
211  const std::vector<LongIdx> &edge_dof_starts,
212  std::vector<LongIdx> &edge_dofs)
213 {
214  // update dof_indices on ghost cells
215  unsigned int dof_offset=0;
216  for (unsigned int gid=0; gid<ghost_proc_el[proc].size(); gid++)
217  {
218  DHCellAccessor dh_cell = this->cell_accessor_from_element( ghost_proc_el[proc][gid] );
219 
220  vector<unsigned int> loc_node_dof_count(dh_cell.elm()->n_nodes(), 0);
221  vector<unsigned int> loc_edge_dof_count(dh_cell.elm()->n_sides(), 0);
222  for (unsigned int idof = 0; idof<dh_cell.n_dofs(); ++idof)
223  {
224  if (dh_cell.cell_dof(idof).dim == 0)
225  { // update nodal dof
226  unsigned int dof_nface_idx = dh_cell.cell_dof(idof).n_face_idx;
227  unsigned int nid = mesh_->tree->objects(dh_cell.dim())[mesh_->tree->obj_4_el()[dh_cell.elm_idx()]].nodes[dof_nface_idx];
228  unsigned int node_dof_idx = node_dof_starts[nid]+loc_node_dof_count[dof_nface_idx];
229 
230  if (node_dofs[node_dof_idx] == INVALID_DOF)
231  {
232  node_dofs[node_dof_idx] = local_to_global_dof_idx_.size();
233  local_to_global_dof_idx_.push_back(dofs[dof_offset+idof]);
234  }
235  dof_indices[cell_starts[dh_cell.local_idx()]+idof] = node_dofs[node_dof_idx];
236 
237  loc_node_dof_count[dof_nface_idx]++;
238  }
239  else if (dh_cell.cell_dof(idof).dim == dh_cell.dim()-1)
240  { // update edge dof
241  unsigned int dof_nface_idx = dh_cell.cell_dof(idof).n_face_idx;
242  unsigned int eid = dh_cell.elm().side(dof_nface_idx)->edge_idx();
243  unsigned int edge_dof_idx = edge_dof_starts[eid]+loc_edge_dof_count[dof_nface_idx];
244 
245  if (edge_dofs[edge_dof_idx] == INVALID_DOF)
246  {
247  edge_dofs[edge_dof_idx] = local_to_global_dof_idx_.size();
248  local_to_global_dof_idx_.push_back(dofs[dof_offset+idof]);
249  }
250  dof_indices[cell_starts[dh_cell.local_idx()]+idof] = edge_dofs[edge_dof_idx];
251 
252  loc_edge_dof_count[dof_nface_idx]++;
253  } else if (dh_cell.cell_dof(idof).dim == dh_cell.dim())
254  {
255  dof_indices[cell_starts[dh_cell.local_idx()]+idof] = local_to_global_dof_idx_.size();
256  local_to_global_dof_idx_.push_back(dofs[dof_offset+idof]);
257  }
258  }
259 
260  dof_offset += dh_cell.n_dofs();
261  }
262 
263  // update dof_indices on local elements
264  for (auto cell : this->own_range())
265  {
266  if (!update_cells[cell.local_idx()]) continue;
267 
268  // loop over element dofs
269  vector<unsigned int> loc_node_dof_count(cell.elm()->n_nodes(), 0);
270  vector<unsigned int> loc_edge_dof_count(cell.elm()->n_sides(), 0);
271  for (unsigned int idof = 0; idof<cell.n_dofs(); ++idof)
272  {
273  unsigned int dof_nface_idx = cell.cell_dof(idof).n_face_idx;
274  if (cell.cell_dof(idof).dim == 0)
275  {
276  if (dof_indices[cell_starts[cell.local_idx()]+idof] == INVALID_DOF)
277  { // update nodal dof
278  unsigned int nid = mesh_->tree->objects(cell.dim())[mesh_->tree->obj_4_el()[cell.elm_idx()]].nodes[dof_nface_idx];
279  dof_indices[cell_starts[cell.local_idx()]+idof] = node_dofs[node_dof_starts[nid]+loc_node_dof_count[dof_nface_idx]];
280  }
281  loc_node_dof_count[dof_nface_idx]++;
282  } else if (cell.cell_dof(idof).dim == cell.dim()-1)
283  {
284  if (dof_indices[cell_starts[cell.local_idx()]+idof] == INVALID_DOF)
285  { // update edge dof
286  unsigned int eid = cell.elm().side(dof_nface_idx)->edge_idx();
287  dof_indices[cell_starts[cell.local_idx()]+idof] = edge_dofs[edge_dof_starts[eid]+loc_edge_dof_count[dof_nface_idx]];
288  }
289  loc_edge_dof_count[dof_nface_idx]++;
290  }
291  }
292  }
293 }
294 
295 
296 void DOFHandlerMultiDim::distribute_dofs(std::shared_ptr<DiscreteSpace> ds)
297 {
298  // First check if dofs are already distributed.
299  OLD_ASSERT(ds_ == nullptr, "Attempt to distribute DOFs multiple times!");
300 
301  ds_ = ds;
302 
303  std::vector<LongIdx> node_dofs, node_dof_starts, edge_dofs, edge_dof_starts;
305  edge_status(mesh_->n_edges(), INVALID_NFACE);
306  std::vector<bool> update_cells(el_ds_->lsize(), false);
307  unsigned int next_free_dof = 0;
308 
310  init_dof_starts(node_dof_starts, edge_dof_starts);
311  node_dofs.resize(node_dof_starts[node_dof_starts.size()-1], (LongIdx)INVALID_DOF);
312  edge_dofs.resize(edge_dof_starts[edge_dof_starts.size()-1], (LongIdx)INVALID_DOF);
313  init_status(node_status, edge_status);
314 
315  // Distribute dofs on local elements.
316  dof_indices.resize(cell_starts[cell_starts.size()-1]);
317  local_to_global_dof_idx_.reserve(dof_indices.size());
318  for (auto cell : this->own_range())
319  {
320 
321  // loop over element dofs
322  vector<unsigned int> loc_node_dof_count(cell.elm()->n_nodes(), 0);
323  vector<unsigned int> loc_edge_dof_count(cell.elm()->dim()+1, 0);
324  for (unsigned int idof = 0; idof<cell.n_dofs(); ++idof)
325  {
326  unsigned int dof_dim = cell.cell_dof(idof).dim;
327  unsigned int dof_nface_idx = cell.cell_dof(idof).n_face_idx;
328 
329  if (dof_dim == 0)
330  { // add dofs shared by nodes
331  unsigned int nid = mesh_->tree->objects(cell.dim())[mesh_->tree->obj_4_el()[cell.elm_idx()]].nodes[dof_nface_idx];
332  unsigned int node_dof_idx = node_dof_starts[nid]+loc_node_dof_count[dof_nface_idx];
333 
334  switch (node_status[nid])
335  {
336  case VALID_NFACE:
337  for (int i=0; i<node_dof_starts[nid+1] - node_dof_starts[nid]; i++)
338  {
339  local_to_global_dof_idx_.push_back(next_free_dof);
340  node_dofs[node_dof_starts[nid]+i] = next_free_dof++;
341  }
342  node_status[nid] = ASSIGNED_NFACE;
343  break;
344  case INVALID_NFACE:
345  node_dofs[node_dof_idx] = INVALID_DOF;
346  update_cells[cell.local_idx()] = true;
347  break;
348  }
349  dof_indices[cell_starts[cell.local_idx()]+idof] = node_dofs[node_dof_idx];
350  loc_node_dof_count[dof_nface_idx]++;
351  }
352  else if (dof_dim == cell.dim()-1)
353  { // add dofs shared by edges
354  unsigned int eid = cell.elm().side(dof_nface_idx)->edge_idx();
355  unsigned int edge_dof_idx = edge_dof_starts[eid]+loc_edge_dof_count[dof_nface_idx];
356  switch (edge_status[eid])
357  {
358  case VALID_NFACE:
359  for (int i=0; i<edge_dof_starts[eid+1] - edge_dof_starts[eid]; i++)
360  {
361  local_to_global_dof_idx_.push_back(next_free_dof);
362  edge_dofs[edge_dof_starts[eid]+i] = next_free_dof++;
363  }
364  edge_status[eid] = ASSIGNED_NFACE;
365  break;
366  case INVALID_NFACE:
367  edge_dofs[edge_dof_idx] = INVALID_DOF;
368  update_cells[cell.local_idx()] = true;
369  break;
370  }
371  dof_indices[cell_starts[cell.local_idx()]+idof] = edge_dofs[edge_dof_idx];
372  loc_edge_dof_count[dof_nface_idx]++;
373  }
374  else if (dof_dim == cell.dim())
375  { // add dofs owned only by the element
376  local_to_global_dof_idx_.push_back(next_free_dof);
377  dof_indices[cell_starts[cell.local_idx()]+idof] = next_free_dof++;
378  }
379  else
380  ASSERT(false).error("Unsupported dof n_face.");
381  }
382  }
383  node_status.clear();
384  edge_status.clear();
385 
386  lsize_ = next_free_dof;
387 
388  // communicate n_dofs across all processes
389  dof_ds_ = std::make_shared<Distribution>(lsize_, PETSC_COMM_WORLD);
390  n_global_dofs_ = dof_ds_->size();
391 
392  // shift dof indices
393  loffset_ = dof_ds_->get_starts_array()[dof_ds_->myp()];
394  if (loffset_ > 0)
395  {
396  for (auto &i : local_to_global_dof_idx_)
397  i += loffset_;
398  }
399 
400  // communicate dofs from ghost cells
401  // first propagate from lower procs to higher procs and then vice versa
402  for (unsigned int from_higher = 0; from_higher < 2; from_higher++)
403  {
404  for (unsigned int proc : ghost_proc)
405  {
406  if ((proc > el_ds_->myp()) == from_higher)
407  { // receive dofs from master processor
408  vector<LongIdx> dofs;
409  receive_ghost_dofs(proc, dofs);
410 
411  // update dof_indices and node_dofs on ghost elements
412  update_local_dofs(proc,
413  update_cells,
414  dofs,
415  node_dof_starts,
416  node_dofs,
417  edge_dof_starts,
418  edge_dofs
419  );
420 
421  }
422  else
423  send_ghost_dofs(proc);
424  }
425  }
426  update_cells.clear();
427  node_dofs.clear();
428  node_dof_starts.clear();
429  edge_dofs.clear();
430  edge_dof_starts.clear();
431 }
432 
433 
435 {
436  if (dh_seq_ != nullptr) return;
437 
438  if ( !is_parallel_ )
439  {
440  dh_seq_ = std::make_shared<DOFHandlerMultiDim>(*this);
441  return;
442  }
443 
444  dh_seq_ = std::make_shared<DOFHandlerMultiDim>(*mesh_);
445 
446  dh_seq_->n_global_dofs_ = n_global_dofs_;
447  dh_seq_->lsize_ = n_global_dofs_;
448  dh_seq_->loffset_ = 0;
449  dh_seq_->mesh_ = mesh_;
450  dh_seq_->dof_ds_ = dof_ds_; // should be sequential distribution
451  dh_seq_->ds_ = ds_;
452  dh_seq_->is_parallel_ = false;
453  for (unsigned int i=0; i<n_global_dofs_; i++) dh_seq_->local_to_global_dof_idx_.push_back(i);
454 
457  &dh_seq_->max_elem_dofs_,
458  1,
459  MPI_UNSIGNED,
460  MPI_MAX,
462 
463  for (unsigned int i=0; i<mesh_->n_elements(); i++) dh_seq_->global_to_local_el_idx_[i] = mesh_->get_row_4_el()[i];
464 
465  // Auxiliary vectors cell_starts_loc and dof_indices_loc contain
466  // only local element data (without ghost elements).
467  // Then it is possible to create sequential vectors by simple reduce/gather operation.
468  vector<LongIdx> cell_starts_loc(mesh_->n_elements()+1, 0);
469  vector<LongIdx> dof_indices_loc;
470 
471  // construct cell_starts_loc
472  for (auto cell : this->own_range())
473  {
474  cell_starts_loc[mesh_->get_row_4_el()[cell.elm_idx()]+1] = cell.n_dofs();
475  }
476  for (unsigned int i=0; i<mesh_->n_elements(); ++i)
477  cell_starts_loc[i+1] += cell_starts_loc[i];
478 
479  // construct dof_indices_loc
480  dof_indices_loc.resize(cell_starts_loc[mesh_->n_elements()]);
481  for (auto cell : this->own_range())
482  {
483  for (unsigned int idof=0; idof<cell.n_dofs(); idof++)
484  dof_indices_loc[cell_starts_loc[mesh_->get_row_4_el()[cell.elm_idx()]]+idof] = local_to_global_dof_idx_[dof_indices[cell_starts[cell.local_idx()]+idof]];
485  }
486 
487  Distribution distr(dof_indices_loc.size(), PETSC_COMM_WORLD);
488  dh_seq_->cell_starts.resize(mesh_->n_elements()+1);
489  dh_seq_->dof_indices.resize(distr.size());
490 
491  MPI_Allreduce( cell_starts_loc.data(),
492  dh_seq_->cell_starts.data(),
493  cell_starts_loc.size(),
494  MPI_LONG_IDX,
495  MPI_SUM,
496  MPI_COMM_WORLD );
497 
498  MPI_Allgatherv( dof_indices_loc.data(),
499  dof_indices_loc.size(),
500  MPI_LONG_IDX,
501  dh_seq_->dof_indices.data(),
502  (const int *)distr.get_lsizes_array(),
503  (const int *)distr.get_starts_array(),
504  MPI_LONG_IDX,
505  MPI_COMM_WORLD );
506 
507  // create scatter from parallel to sequential vector
508  Vec v_from;
509  VecCreateMPI(PETSC_COMM_WORLD, lsize_, PETSC_DETERMINE, &v_from);
510  scatter_to_seq_ = std::make_shared<VecScatter>();
511  VecScatterCreateToAll(v_from, scatter_to_seq_.get(), NULL);
512  VecDestroy(&v_from);
513 
514  // create scatter for sequential dof handler (Warning: not tested)
515  Vec v_seq;
516  VecCreateSeq(PETSC_COMM_SELF, n_global_dofs_, &v_seq);
517  dh_seq_->scatter_to_seq_ = std::make_shared<VecScatter>();
518  VecScatterCreateToAll(v_seq, dh_seq_->scatter_to_seq_.get(), NULL);
519  VecDestroy(&v_seq);
520 }
521 
522 
524 {
525  if (is_parallel_ && el_ds_->np() > 1)
526  { // for parallel DH create vector with ghost values
528  VectorMPI vec(lsize_, ghost_dofs);
529  return vec;
530  } else {
531  VectorMPI vec(lsize_, PETSC_COMM_SELF);
532  return vec;
533  }
534 }
535 
536 
537 unsigned int DOFHandlerMultiDim::get_dof_indices(const DHCellAccessor &cell, std::vector<int> &indices) const
538 {
539  unsigned int ndofs = 0;
540  ndofs = cell_starts[cell.local_idx()+1]-cell_starts[cell.local_idx()];
541  for (unsigned int k=0; k<ndofs; k++)
542  indices[k] = local_to_global_dof_idx_[dof_indices[cell_starts[cell.local_idx()]+k]];
543 
544  return ndofs;
545 }
546 
547 
548 
550 {
551  unsigned int ndofs = 0;
552  ndofs = cell_starts[cell.local_idx()+1]-cell_starts[cell.local_idx()];
553  for (unsigned int k=0; k<ndofs; k++)
554  indices[k] = dof_indices[cell_starts[cell.local_idx()]+k];
555 
556  return ndofs;
557 }
558 
559 
561 {}
562 
563 
564 
566 {
567  // create local arrays of elements
568  el_ds_ = mesh_->get_el_ds();
569 
570  // create local array of edges
571  for (unsigned int iedg=0; iedg<mesh_->n_edges(); iedg++)
572  {
573  bool is_edge_local = false;
574  Edge *edg = &mesh_->edges[iedg];
575  for (int sid=0; sid<edg->n_sides; sid++)
576  if ( el_is_local(edg->side(sid)->element().idx()) )
577  {
578  is_edge_local = true;
579  break;
580  }
581  if (is_edge_local)
582  edg_4_loc.push_back(iedg);
583  }
584 
585  // create local array of neighbours
586  for (unsigned int inb=0; inb<mesh_->n_vb_neighbours(); inb++)
587  {
588  Neighbour *nb = &mesh_->vb_neighbours_[inb];
589  if ( el_is_local(nb->element().idx())
590  || el_is_local(nb->side()->element().idx()) )
591  nb_4_loc.push_back(inb);
592  }
593 
594  // init global to local element map with locally owned elements (later add ghost elements)
595  for ( unsigned int iel = 0; iel < el_ds_->lsize(); iel++ )
597 
598  // create array of local nodes
599  std::vector<bool> node_is_local(mesh_->tree->n_nodes(), false);
600  for (auto cell : this->own_range())
601  {
602  unsigned int obj_idx = mesh_->tree->obj_4_el()[cell.elm_idx()];
603  for (unsigned int nid=0; nid<cell.elm()->n_nodes(); nid++)
604  node_is_local[mesh_->tree->objects(cell.dim())[obj_idx].nodes[nid]] = true;
605  }
606 
607  // create array of local ghost cells
608  for ( auto cell : mesh_->elements_range() )
609  {
610  if (cell.proc() != el_ds_->myp())
611  {
612  bool has_local_node = false;
613  unsigned int obj_idx = mesh_->tree->obj_4_el()[cell.idx()];
614  for (unsigned int nid=0; nid<cell->n_nodes(); nid++)
615  if (node_is_local[mesh_->tree->objects(cell->dim())[obj_idx].nodes[nid]])
616  {
617  has_local_node = true;
618  break;
619  }
620  if (has_local_node)
621  {
622  ghost_4_loc.push_back(cell.idx());
623  ghost_proc.insert(cell.proc());
624  ghost_proc_el[cell.proc()].push_back(cell.idx());
625  global_to_local_el_idx_[cell.idx()] = el_ds_->lsize() - 1 + ghost_4_loc.size();
626  }
627  }
628  }
629  for (auto nb : nb_4_loc)
630  {
631  auto cell = mesh_->vb_neighbours_[nb].element();
632  if (!el_is_local(cell.idx()) && find(ghost_4_loc.begin(), ghost_4_loc.end(), cell.idx()) == ghost_4_loc.end())
633  {
634  ghost_4_loc.push_back(cell.idx());
635  ghost_proc.insert(cell.proc());
636  ghost_proc_el[cell.proc()].push_back(cell.idx());
637  global_to_local_el_idx_[cell.idx()] = el_ds_->lsize() - 1 + ghost_4_loc.size();
638  }
639  cell = mesh_->vb_neighbours_[nb].side()->element();
640  if (!el_is_local(cell.idx()) && find(ghost_4_loc.begin(), ghost_4_loc.end(), cell.idx()) == ghost_4_loc.end())
641  {
642  ghost_4_loc.push_back(cell.idx());
643  ghost_proc.insert(cell.proc());
644  ghost_proc_el[cell.proc()].push_back(cell.idx());
645  global_to_local_el_idx_[cell.idx()] = el_ds_->lsize() - 1 + ghost_4_loc.size();
646  }
647  }
648 }
649 
650 
651 bool DOFHandlerMultiDim::el_is_local(int index) const
652 {
653  return el_ds_->is_local(mesh_->get_row_4_el()[index]);
654 }
655 
656 
657 std::size_t DOFHandlerMultiDim::hash() const {
658  return this->n_global_dofs_;
659 }
660 
661 
663  auto bgn_it = make_iter<DHCellAccessor>( DHCellAccessor(this, 0) );
664  auto end_it = make_iter<DHCellAccessor>( DHCellAccessor(this, el_ds_->lsize()) );
665  return Range<DHCellAccessor>(bgn_it, end_it);
666 }
667 
668 
670  auto bgn_it = make_iter<DHCellAccessor>( DHCellAccessor(this, 0) );
671  auto end_it = make_iter<DHCellAccessor>( DHCellAccessor(this, el_ds_->lsize()+ghost_4_loc.size()) );
672  return Range<DHCellAccessor>(bgn_it, end_it);
673 }
674 
675 
677  auto bgn_it = make_iter<DHCellAccessor>( DHCellAccessor(this, el_ds_->lsize()) );
678  auto end_it = make_iter<DHCellAccessor>( DHCellAccessor(this, el_ds_->lsize()+ghost_4_loc.size()) );
679  return Range<DHCellAccessor>(bgn_it, end_it);
680 }
681 
682 
684  auto map_it = global_to_local_el_idx_.find((LongIdx)elm_idx); // find in global to local map
685  ASSERT( map_it != global_to_local_el_idx_.end() )(elm_idx).error("DH accessor can be create only for own or ghost elements!\n");
686  return DHCellAccessor(this, map_it->second);
687 }
688 
689 
691  stringstream s;
693 
694  s << "DOFHandlerMultiDim structure:" << endl;
695  s << "- is parallel: " << (is_parallel_?"true":"false") << endl;
696  s << "- proc id: " << el_ds_->myp() << endl;
697  s << "- global number of dofs: " << n_global_dofs_ << endl;
698  s << "- number of locally owned cells: " << el_ds_->lsize() << endl;
699  s << "- number of ghost cells: " << ghost_4_loc.size() << endl;
700  s << "- dofs on locally owned cells:" << endl;
701 
702  for (auto cell : own_range())
703  {
704  auto ndofs = cell.get_dof_indices(dofs);
705  s << "-- cell " << cell.elm().index() << ": ";
706  for (unsigned int idof=0; idof<ndofs; idof++) s << dofs[idof] << " "; s << endl;
707  }
708  s << "- dofs on ghost cells:" << endl;
709  for (auto cell : ghost_range())
710  {
711  auto ndofs = cell.get_dof_indices(dofs);
712  s << "-- cell " << cell.elm().index() << ": ";
713  for (unsigned int idof=0; idof<ndofs; idof++) s << dofs[idof] << " "; s << endl;
714  }
715  s << "- locally owned dofs (" << lsize_ << "): ";
716  for (unsigned int i=0; i<lsize_; i++) s << local_to_global_dof_idx_[i] << " "; s << endl;
717  s << "- ghost dofs (" << local_to_global_dof_idx_.size() - lsize_ << "): ";
718  for (unsigned int i=lsize_; i<local_to_global_dof_idx_.size(); i++) s << local_to_global_dof_idx_[i] << " "; s << endl;
719  s << "- global-to-local-cell map:" << endl;
720  for (auto cell : global_to_local_el_idx_) s << "-- " << cell.first << " -> " << cell.second << " " << endl;
721  s << endl;
722 
723  printf("%s", s.str().c_str());
724 }
725 
726 
727 
728 
729 
730 
731 
732 
733 
734 
735 
736 
737 
738 
739 
740 
741 
#define MPI_Recv(buf, count, datatype, source, tag, comm, status)
Definition: mpi.h:271
void receive_ghost_dofs(unsigned int proc, std::vector< LongIdx > &dofs)
Obtain dof numbers on ghost elements from other processor.
Definition: dofhandler.cc:154
int LongIdx
Define type that represents indices of large arrays (elements, nodes, dofs etc.)
Definition: long_idx.hh:22
std::shared_ptr< DOFHandlerMultiDim > sequential()
Returns sequential version of the current dof handler.
Definition: dofhandler.cc:54
const Dof & cell_dof(unsigned int idof) const
Return dof on a given cell.
virtual ~DOFHandlerBase()
Destructor.
Definition: dofhandler.cc:34
void create_sequential()
Communicate local dof indices to all processors and create new sequential dof handler.
Definition: dofhandler.cc:434
void init_cell_starts()
Initialize vector of starting indices for elements.
Definition: dofhandler.cc:68
Declaration of class which handles the ordering of degrees of freedom (dof) and mappings between loca...
unsigned int n_nodes() const
Definition: elements.h:129
unsigned int elm_idx() const
Return serial idx to element of loc_ele_idx_.
const DHCellAccessor cell_accessor_from_element(unsigned int elm_idx) const
Return DHCellAccessor appropriate to ElementAccessor of given idx.
Definition: dofhandler.cc:683
LongIdx * get_row_4_el() const
Definition: mesh.h:167
void init_dof_starts(std::vector< LongIdx > &node_dof_starts, std::vector< LongIdx > &edge_dof_starts)
Initialize auxiliary vector of starting indices of nodal/edge dofs.
Definition: dofhandler.cc:82
const std::vector< MeshObject > & objects(unsigned int dim) const
#define MPI_LONG_IDX
Definition: long_idx.hh:24
unsigned int edge_idx() const
Definition: side_impl.hh:62
DuplicateNodes * tree
Definition: mesh.h:268
Distribution * el_ds_
Distribution of elements.
Definition: dofhandler.hh:421
static const int INVALID_NFACE
Definition: dofhandler.hh:364
unsigned int dim() const
Return dimension of element appropriate to cell.
unsigned int dim
Association to n-face of given dimension (point, line, triangle, tetrahedron.
unsigned int local_idx() const
Return local index to element (index of DOF handler).
void distribute_dofs(std::shared_ptr< DiscreteSpace > ds)
Distributes degrees of freedom on the mesh needed for the given discrete space.
Definition: dofhandler.cc:296
#define MPI_SUM
Definition: mpi.h:196
Definition: mesh.h:76
Cell accessor allow iterate over DOF handler cells.
std::shared_ptr< VecScatter > sequential_scatter()
Returns scatter context from parallel to sequential vectors.
Definition: dofhandler.cc:61
std::vector< LongIdx > local_to_global_dof_idx_
Maps local and ghost dof indices to global ones.
Definition: dofhandler.hh:413
int n_sides
Definition: edges.h:36
Definition: edges.h:26
SideIter side(const unsigned int loc_index)
Definition: accessors.hh:137
#define MPI_Send(buf, count, datatype, dest, tag, comm)
Definition: mpi.h:263
#define ASSERT(expr)
Allow use shorter versions of macro names if these names is not used with external library...
Definition: asserts.hh:346
void init_status(std::vector< short int > &node_status, std::vector< short int > &edge_status)
Initialize node_status and edge_status.
Definition: dofhandler.cc:108
std::shared_ptr< Distribution > dof_ds_
Distribution of dofs associated to local process.
Definition: dofhandler.hh:132
#define MPI_Allgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm)
Definition: mpi.h:587
unsigned int n_dofs() const
Return number of dofs on given cell.
ElementAccessor< 3 > element() const
Definition: side_impl.hh:53
std::shared_ptr< DOFHandlerMultiDim > dh_seq_
Sequential dof handler associated to the current (parallel) one.
Definition: dofhandler.hh:377
unsigned int lsize_
Number of dofs associated to local process.
Definition: dofhandler.hh:114
unsigned int n_face_idx
Index of n-face to which the dof is associated.
map< unsigned int, vector< LongIdx > > ghost_proc_el
Arrays of ghost cells for each neighbouring processor.
Definition: dofhandler.hh:436
static const int ASSIGNED_NFACE
Definition: dofhandler.hh:366
void send_ghost_dofs(unsigned int proc)
Send dof numbers to other processor.
Definition: dofhandler.cc:175
std::vector< LongIdx > cell_starts
Starting indices for local (owned+ghost) element dofs.
Definition: dofhandler.hh:397
#define OLD_ASSERT(...)
Definition: global_defs.h:131
unsigned int n_vb_neighbours() const
Definition: mesh.cc:235
ElementAccessor< 3 > element()
Definition: neighbours.h:163
const ElementAccessor< 3 > elm() const
Return ElementAccessor to element of loc_ele_idx_.
bool is_local(unsigned int idx) const
identify local index
Mesh * mesh_
Pointer to the mesh to which the dof handler is associated.
Definition: dofhandler.hh:127
Range helper class.
SideIter side()
Definition: neighbours.h:147
std::vector< LongIdx > dof_indices
Dof numbers on local and ghost elements.
Definition: dofhandler.hh:405
unsigned int n_sides() const
Definition: elements.h:135
static const int VALID_NFACE
Definition: dofhandler.hh:365
Range< DHCellAccessor > ghost_range() const
Returns range over ghosts DOF handler cells.
Definition: dofhandler.cc:676
vector< LongIdx > nb_4_loc
Local neighbour index -> global neighbour index.
Definition: dofhandler.hh:427
unsigned int max_elem_dofs_
Max. number of dofs per element.
Definition: dofhandler.hh:122
vector< LongIdx > ghost_4_loc
Indices of ghost cells (neighbouring with local elements).
Definition: dofhandler.hh:430
std::unordered_map< LongIdx, LongIdx > global_to_local_el_idx_
Maps global element index into local/ghost index (obsolete).
Definition: dofhandler.hh:418
virtual Range< ElementAccessor< 3 > > elements_range() const
Returns range of bulk elements.
Definition: mesh.cc:1040
unsigned int n_nodes() const
unsigned int np() const
get num of processors
Distribution * get_el_ds() const
Definition: mesh.h:164
void make_elem_partitioning()
Prepare parallel distribution of elements, edges and neighbours.
Definition: dofhandler.cc:565
void print() const
Output structure of dof handler.
Definition: dofhandler.cc:690
std::shared_ptr< Distribution > distr() const
Definition: dofhandler.hh:73
unsigned int myp() const
get my processor
Support classes for parallel programing.
#define MPI_Allreduce(sendbuf, recvbuf, count, datatype, op, comm)
Definition: mpi.h:612
vector< Neighbour > vb_neighbours_
Definition: mesh.h:273
#define MPI_STATUS_IGNORE
Definition: mpi.h:203
vector< LongIdx > edg_4_loc
Local edge index -> global edge index.
Definition: dofhandler.hh:424
Range< DHCellAccessor > own_range() const
Returns range of DOF handler cells (only range of own without ghost cells)
Definition: dofhandler.cc:662
unsigned int loffset_
Index of the first dof on the local process.
Definition: dofhandler.hh:119
static const int INVALID_DOF
Definition: dofhandler.hh:367
unsigned int get_loc_dof_indices(const DHCellAccessor &cell, std::vector< LongIdx > &indices) const override
Returns the indices of dofs associated to the cell on the local process.
Definition: dofhandler.cc:549
set< unsigned int > ghost_proc
Processors of ghost elements.
Definition: dofhandler.hh:433
~DOFHandlerMultiDim() override
Destructor.
Definition: dofhandler.cc:560
std::vector< Edge > edges
Vector of MH edges, this should not be part of the geometrical mesh.
Definition: mesh.h:252
#define MPI_COMM_WORLD
Definition: mpi.h:123
std::shared_ptr< DiscreteSpace > ds_
Pointer to the discrete space for which the handler distributes dofs.
Definition: dofhandler.hh:371
#define MPI_MAX
Definition: mpi.h:197
#define MPI_UNSIGNED
Definition: mpi.h:165
unsigned int get_dof_indices(const DHCellAccessor &cell, std::vector< LongIdx > &indices) const override
Returns the global indices of dofs associated to the cell.
Definition: dofhandler.cc:537
Abstract class for description of finite elements.
VectorMPI create_vector()
Allocates PETSc vector according to the dof distribution.
Definition: dofhandler.cc:523
unsigned int n_global_dofs_
Number of global dofs assigned by the handler.
Definition: dofhandler.hh:109
unsigned int n_edges() const
Definition: mesh.h:137
bool el_is_local(int index) const
Definition: dofhandler.cc:651
DOFHandlerMultiDim(Mesh &_mesh, bool make_elem_part=true)
Constructor.
Definition: dofhandler.cc:42
void update_local_dofs(unsigned int proc, const std::vector< bool > &update_cells, const std::vector< LongIdx > &dofs, const std::vector< LongIdx > &node_dof_starts, std::vector< LongIdx > &node_dofs, const std::vector< LongIdx > &edge_dof_starts, std::vector< LongIdx > &edge_dofs)
Update dofs on local elements from ghost element dofs.
Definition: dofhandler.cc:206
Range< DHCellAccessor > local_range() const
Returns range over own and ghost cells of DOF handler.
Definition: dofhandler.cc:669
const std::vector< unsigned int > & obj_4_el() const
bool is_parallel_
Indicator for parallel/sequential dof handler.
Definition: dofhandler.hh:374
unsigned int idx() const
Return local idx of element in boundary / bulk part of element vector.
Definition: accessors.hh:111
std::shared_ptr< VecScatter > scatter_to_seq_
Scatter context for parallel to sequential vectors.
Definition: dofhandler.hh:380
LongIdx * get_el_4_loc() const
Definition: mesh.h:170
SideIter side(const unsigned int i) const
Definition: edges.h:31
Implementation of range helper class.
friend class DHCellAccessor
Definition: dofhandler.hh:249
std::size_t hash() const override
Definition: dofhandler.cc:657
void printf(BasicWriter< Char > &w, BasicCStringRef< Char > format, ArgList args)
Definition: printf.h:444
std::shared_ptr< DiscreteSpace > ds() const
Return pointer to discrete space for which the handler distributes dofs.
Definition: dofhandler.hh:241
unsigned int lsize(int proc) const
get local size