47 scatter_to_seq_(nullptr),
88 unsigned int n_node_dofs = 0;
92 node_dof_starts.push_back(n_node_dofs);
93 n_node_dofs +=
ds_->n_node_dofs(nid);
95 node_dof_starts.push_back(n_node_dofs);
104 for (
unsigned int n=0; n<cell.dim()+1; n++)
116 for (
unsigned int n=0; n<cell.dim()+1; n++)
140 unsigned int n_dofs_sum = 0;
141 for (
auto nd : n_dofs) n_dofs_sum += nd;
142 dofs.resize(n_dofs_sum);
150 unsigned int n_elems;
185 unsigned int dof_offset=0;
186 for (
unsigned int gid=0; gid<
ghost_proc_el[proc].size(); gid++)
190 for (
unsigned dof=0; dof<dh_cell.
n_dofs(); dof++)
194 for (
unsigned int idof = 0; idof<dh_cell.
n_dofs(); ++idof)
201 if (node_dofs[node_dof_starts[nid]+loc_node_dof_count[dof_nface_idx]] ==
INVALID_DOF)
203 node_dofs[node_dof_starts[nid]+loc_node_dof_count[dof_nface_idx]] = dofs[dof_offset+idof];
207 loc_node_dof_count[dof_nface_idx]++;
215 dof_offset += dh_cell.
n_dofs();
221 if (!update_cells[cell.local_idx()])
continue;
225 for (
unsigned int idof = 0; idof<cell.n_dofs(); ++idof)
227 if (cell.cell_dof(idof).dim == 0)
229 unsigned int dof_nface_idx = cell.cell_dof(idof).n_face_idx;
233 dof_indices[
cell_starts[cell.local_idx()]+idof] = node_dofs[node_dof_starts[nid]+loc_node_dof_count[dof_nface_idx]];
235 loc_node_dof_count[dof_nface_idx]++;
245 OLD_ASSERT(
ds_ ==
nullptr,
"Attempt to distribute DOFs multiple times!");
252 unsigned int next_free_dof = 0;
267 for (
unsigned int idof = 0; idof<cell.n_dofs(); ++idof)
269 unsigned int dof_dim = cell.cell_dof(idof).dim;
270 unsigned int dof_nface_idx = cell.cell_dof(idof).n_face_idx;
275 unsigned int node_dof_idx = node_dof_starts[nid]+loc_node_dof_count[dof_nface_idx];
277 switch (node_status[nid])
280 for (
int i=0; i<node_dof_starts[nid+1] - node_dof_starts[nid]; i++)
283 node_dofs[node_dof_starts[nid]+i] = next_free_dof++;
289 update_cells[cell.local_idx()] =
true;
293 loc_node_dof_count[dof_nface_idx]++;
295 else if (dof_dim == cell.dim())
302 ASSERT(
false).error(
"Unsupported dof n_face.");
310 dof_ds_ = std::make_shared<Distribution>(
lsize_, PETSC_COMM_WORLD);
326 for (
unsigned int from_higher = 0; from_higher < 2; from_higher++)
330 if ((proc >
el_ds_->
myp()) == from_higher)
343 update_cells.clear();
345 node_dof_starts.clear();
351 if (
dh_seq_ !=
nullptr)
return;
355 dh_seq_ = std::make_shared<DOFHandlerMultiDim>(*this);
359 dh_seq_ = std::make_shared<DOFHandlerMultiDim>(*mesh_);
377 for (
unsigned int i=0; i<mesh_->n_elements(); i++)
dh_seq_->global_to_local_el_idx_[i] = mesh_->get_row_4_el()[i];
388 cell_starts_loc[mesh_->get_row_4_el()[cell.elm_idx()]+1] = cell.n_dofs();
390 for (
unsigned int i=0; i<mesh_->n_elements(); ++i)
391 cell_starts_loc[i+1] += cell_starts_loc[i];
394 dof_indices_loc.resize(cell_starts_loc[mesh_->n_elements()]);
397 for (
unsigned int idof=0; idof<cell.n_dofs(); idof++)
398 dof_indices_loc[cell_starts_loc[mesh_->get_row_4_el()[cell.elm_idx()]]+idof] =
dof_indices[
cell_starts[cell.local_idx()]+idof];
402 dh_seq_->cell_starts.resize(mesh_->n_elements()+1);
407 cell_starts_loc.size(),
413 dof_indices_loc.size(),
416 (
const int *)
distr.get_lsizes_array(),
417 (
const int *)
distr.get_starts_array(),
423 VecCreateMPI(PETSC_COMM_WORLD,
lsize_, PETSC_DETERMINE, &v_from);
431 dh_seq_->scatter_to_seq_ = std::make_shared<VecScatter>();
432 VecScatterCreateToAll(v_seq,
dh_seq_->scatter_to_seq_.get(), NULL);
453 unsigned int ndofs = 0;
456 for (
unsigned int k=0; k<ndofs; k++)
466 unsigned int ndofs = 0;
469 for (
unsigned int k=0; k<ndofs; k++)
470 indices[k] = cell_starts[cell_idx]+k;
487 for (
unsigned int iedg=0; iedg<
mesh_->
n_edges(); iedg++)
489 bool is_edge_local =
false;
491 for (
int sid=0; sid<edg->
n_sides; sid++)
494 is_edge_local =
true;
515 for (
unsigned int nid=0; nid<cell.elm()->n_nodes(); nid++)
520 for (
unsigned int iel = 0; iel <
el_ds_->
lsize(); iel++ )
528 bool has_local_node =
false;
530 for (
unsigned int nid=0; nid<cell->n_nodes(); nid++)
533 has_local_node =
true;
579 auto bgn_it = make_iter<DHCellAccessor>(
DHCellAccessor(
this, 0) );
586 auto bgn_it = make_iter<DHCellAccessor>(
DHCellAccessor(
this, 0) );
610 s <<
"DOFHandlerMultiDim structure:" << endl;
611 s <<
"- is parallel: " << (
is_parallel_?
"true":
"false") << endl;
612 s <<
"- proc id: " <<
el_ds_->
myp() << endl;
614 s <<
"- number of locally owned cells: " <<
el_ds_->
lsize() << endl;
615 s <<
"- number of ghost cells: " <<
ghost_4_loc.size() << endl;
616 s <<
"- dofs on locally owned cells:" << endl;
620 auto ndofs = cell.get_dof_indices(dofs);
621 s <<
"-- cell " << cell.elm().index() <<
": ";
622 for (
unsigned int idof=0; idof<ndofs; idof++) s << dofs[idof] <<
" "; s << endl;
624 s <<
"- dofs on ghost cells:" << endl;
627 auto ndofs = cell.get_dof_indices(dofs);
628 s <<
"-- cell " << cell.elm().index() <<
": ";
629 for (
unsigned int idof=0; idof<ndofs; idof++) s << dofs[idof] <<
" "; s << endl;
631 s <<
"- locally owned dofs (" <<
lsize_ <<
"): ";
635 s <<
"- global-to-local-cell map:" << endl;
639 printf(
"%s", s.str().c_str());
#define MPI_Recv(buf, count, datatype, source, tag, comm, status)
void receive_ghost_dofs(unsigned int proc, std::vector< LongIdx > &dofs)
Obtain dof numbers on ghost elements from other processor.
int LongIdx
Define type that represents indices of large arrays (elements, nodes, dofs etc.)
std::shared_ptr< DOFHandlerMultiDim > sequential()
Returns sequential version of the current dof handler.
const Dof & cell_dof(unsigned int idof) const
Return dof on a given cell.
virtual ~DOFHandlerBase()
Destructor.
void create_sequential()
Communicate local dof indices to all processors and create new sequential dof handler.
void init_cell_starts()
Initialize vector of starting indices for elements.
Declaration of class which handles the ordering of degrees of freedom (dof) and mappings between loca...
unsigned int n_nodes() const
unsigned int elm_idx() const
Return serial idx to element of loc_ele_idx_.
unsigned int get_loc_dof_indices(const ElementAccessor< 3 > &cell, std::vector< LongIdx > &indices) const override
Returns the indices of dofs associated to the cell on the local process.
const DHCellAccessor cell_accessor_from_element(unsigned int elm_idx) const
Return DHCellAccessor appropriate to ElementAccessor of given idx.
LongIdx * get_row_4_el() const
const std::vector< MeshObject > & objects(unsigned int dim) const
Distribution * el_ds_
Distribution of elements.
unsigned int dim() const
Return dimension of element appropriate to cell.
unsigned int dim
Association to n-face of given dimension (point, line, triangle, tetrahedron.
unsigned int local_idx() const
Return local index to element (index of DOF handler).
void distribute_dofs(std::shared_ptr< DiscreteSpace > ds)
Distributes degrees of freedom on the mesh needed for the given discrete space.
std::shared_ptr< VecScatter > sequential_scatter()
Returns scatter context from parallel to sequential vectors.
std::vector< LongIdx > local_to_global_dof_idx_
Maps local and ghost dof indices to global ones.
#define MPI_Send(buf, count, datatype, dest, tag, comm)
#define ASSERT(expr)
Allow use shorter versions of macro names if these names is not used with external library...
std::shared_ptr< Distribution > dof_ds_
Distribution of dofs associated to local process.
#define MPI_Allgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm)
void update_local_dofs(unsigned int proc, const std::vector< bool > &update_cells, const std::vector< LongIdx > &dofs, const std::vector< LongIdx > &node_dof_starts, std::vector< LongIdx > &node_dofs)
Update dofs on local elements from ghost element dofs.
unsigned int n_dofs() const
Return number of dofs on given cell.
ElementAccessor< 3 > element() const
std::shared_ptr< DOFHandlerMultiDim > dh_seq_
Sequential dof handler associated to the current (parallel) one.
unsigned int lsize_
Number of dofs associated to local process.
unsigned int n_face_idx
Index of n-face to which the dof is associated.
map< unsigned int, vector< LongIdx > > ghost_proc_el
Arrays of ghost cells for each neighbouring processor.
void send_ghost_dofs(unsigned int proc)
Send dof numbers to other processor.
void init_node_status(std::vector< short int > &node_status)
Initialize node_status.
std::vector< LongIdx > cell_starts
Starting indices for element dofs.
unsigned int n_vb_neighbours() const
ElementAccessor< 3 > element()
static const int VALID_NODE
const ElementAccessor< 3 > elm() const
Return ElementAccessor to element of loc_ele_idx_.
unsigned int get_dof_indices(const ElementAccessor< 3 > &cell, std::vector< LongIdx > &indices) const override
Returns the global indices of dofs associated to the cell.
bool is_local(unsigned int idx) const
identify local index
Mesh * mesh_
Pointer to the mesh to which the dof handler is associated.
std::vector< LongIdx > dof_indices
Dof numbers on local and ghost elements.
Range< DHCellAccessor > ghost_range() const
Returns range over ghosts DOF handler cells.
vector< LongIdx > nb_4_loc
Local neighbour index -> global neighbour index.
unsigned int max_elem_dofs_
Max. number of dofs per element.
vector< LongIdx > ghost_4_loc
Indices of ghost cells (neighbouring with local elements).
std::unordered_map< LongIdx, LongIdx > global_to_local_el_idx_
Maps global element index into local/ghost index (obsolete).
virtual Range< ElementAccessor< 3 > > elements_range() const
Returns range of bulk elements.
unsigned int n_nodes() const
void init_node_dof_starts(std::vector< LongIdx > &node_dof_starts)
Initialize auxiliary vector of starting indices of nodal dofs.
unsigned int np() const
get num of processors
static const int ASSIGNED_NODE
Distribution * get_el_ds() const
void make_elem_partitioning()
Prepare parallel distribution of elements, edges and neighbours.
static const int INVALID_NODE
void print() const
Output structure of dof handler.
std::shared_ptr< Distribution > distr() const
unsigned int myp() const
get my processor
Support classes for parallel programing.
#define MPI_Allreduce(sendbuf, recvbuf, count, datatype, op, comm)
vector< Neighbour > vb_neighbours_
#define MPI_STATUS_IGNORE
vector< LongIdx > edg_4_loc
Local edge index -> global edge index.
Range< DHCellAccessor > own_range() const
Returns range of DOF handler cells (only range of own without ghost cells)
unsigned int loffset_
Index of the first dof on the local process.
static const int INVALID_DOF
set< unsigned int > ghost_proc
Processors of ghost elements.
~DOFHandlerMultiDim() override
Destructor.
std::vector< Edge > edges
Vector of MH edges, this should not be part of the geometrical mesh.
std::shared_ptr< DiscreteSpace > ds_
Pointer to the discrete space for which the handler distributes dofs.
Abstract class for description of finite elements.
VectorMPI create_vector()
Allocates PETSc vector according to the dof distribution.
unsigned int n_global_dofs_
Number of global dofs assigned by the handler.
unsigned int n_edges() const
bool el_is_local(int index) const
DOFHandlerMultiDim(Mesh &_mesh, bool make_elem_part=true)
Constructor.
Range< DHCellAccessor > local_range() const
Returns range over own and ghost cells of DOF handler.
const std::vector< unsigned int > & obj_4_el() const
bool is_parallel_
Indicator for parallel/sequential dof handler.
unsigned int idx() const
Return local idx of element in boundary / bulk part of element vector.
std::shared_ptr< VecScatter > scatter_to_seq_
Scatter context for parallel to sequential vectors.
LongIdx * get_el_4_loc() const
SideIter side(const unsigned int i) const
Implementation of range helper class.
friend class DHCellAccessor
std::size_t hash() const override
void printf(BasicWriter< Char > &w, BasicCStringRef< Char > format, ArgList args)
std::shared_ptr< DiscreteSpace > ds() const
Return pointer to discrete space for which the handler distributes dofs.
unsigned int lsize(int proc) const
get local size