Flow123d  release_2.2.0-36-g163dc99
mh_dofhandler.cc
Go to the documentation of this file.
1 /*!
2  *
3  * Copyright (C) 2015 Technical University of Liberec. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it under
6  * the terms of the GNU General Public License version 3 as published by the
7  * Free Software Foundation. (http://www.gnu.org/licenses/gpl-3.0.en.html)
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11  * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
12  *
13  *
14  * @file mh_dofhandler.cc
15  * @brief
16  */
17 
18 #include "flow/mh_dofhandler.hh"
19 #include "mesh/mesh.h"
20 #include "mesh/side_impl.hh"
21 #include "system/sys_profiler.hh"
22 
24 :
25 el_4_loc(nullptr),
26 row_4_el(nullptr),
27 side_id_4_loc(nullptr),
28 side_row_4_id(nullptr),
29 edge_4_loc(nullptr),
30 row_4_edge(nullptr),
31 edge_ds(nullptr),
32 el_ds(nullptr),
33 side_ds(nullptr)
34 {}
35 
37 {
38  delete edge_ds;
39  // delete el_ds;
40  delete side_ds;
41 
42  // xfree(el_4_loc);
43  delete [] row_4_el;
44  delete [] side_id_4_loc;
45  delete [] side_row_4_id;
46  delete [] edge_4_loc;
47  delete [] row_4_edge;
48 }
49 
50 
52  mesh_ = mesh;
53  elem_side_to_global.resize(mesh->n_elements() );
54  FOR_ELEMENTS(mesh, ele) elem_side_to_global[ele.index()].resize(ele->n_sides());
55 
56  unsigned int i_side_global=0;
57  FOR_ELEMENTS(mesh, ele) {
58  for(unsigned int i_lside=0; i_lside < ele->n_sides(); i_lside++)
59  elem_side_to_global[ele.index()][i_lside] = i_side_global++;
60  }
61 
63 
64 }
65 
66 
67 
68 
69 // ====================================================================================
70 // - compute optimal edge partitioning
71 // - compute appropriate partitioning of elements and sides
72 // - make arrays: *_id_4_loc and *_row_4_id to allow parallel assembly of the MH matrix
73 // ====================================================================================
75 
76  START_TIMER("prepare parallel");
77 
78  int *loc_part; // optimal (edge,el) partitioning (local chunk)
79  int *id_4_old; // map from old idx to ids (edge,el)
80  int loc_i;
81 
82  int e_idx;
83 
84 
85  //ierr = MPI_Barrier(PETSC_COMM_WORLD);
86  //OLD_ASSERT(ierr == 0, "Error in MPI_Barrier.");
87 
88  // row_4_el will be modified so we make a copy of the array from mesh
89  row_4_el = new int[mesh_->n_elements()];
92  el_ds = mesh_->get_el_ds();
93 
94  //optimal element part; loc. els. id-> new el. numbering
95  Distribution init_edge_ds(DistributionLocalized(), mesh_->n_edges(), PETSC_COMM_WORLD);
96  // partitioning of edges, edge belongs to the proc of his first element
97  // this is not optimal but simple
98  loc_part = new int[init_edge_ds.lsize()];
99  id_4_old = new int[mesh_->n_edges()];
100  {
101  loc_i = 0;
102  FOR_EDGES(mesh_, edg ) {
103  unsigned int i_edg = edg - mesh_->edges.begin();
104  // partition
105  e_idx = mesh_->element.index(edg->side(0)->element());
106  if (init_edge_ds.is_local(i_edg)) {
107  // find (new) proc of the first element of the edge
108  loc_part[loc_i++] = el_ds->get_proc(row_4_el[e_idx]);
109  }
110  // id array
111  id_4_old[i_edg] = i_edg;
112  }
113  }
114  Partitioning::id_maps(mesh_->n_edges(), id_4_old, init_edge_ds, loc_part, edge_ds, edge_4_loc, row_4_edge);
115  delete[] loc_part;
116  delete[] id_4_old;
117 
118  // create map from mesh global edge id to new local edge id
119  unsigned int loc_edge_idx=0;
120  for (unsigned int i_el_loc = 0; i_el_loc < el_ds->lsize(); i_el_loc++) {
121  auto ele = mesh_->element(el_4_loc[i_el_loc]);
122  for (unsigned int i = 0; i < ele->n_sides(); i++) {
123  unsigned int mesh_edge_idx= ele->side(i)->edge_idx();
124  if ( edge_new_local_4_mesh_idx_.count(mesh_edge_idx) == 0 )
125  // new local edge
126  edge_new_local_4_mesh_idx_[mesh_edge_idx] = loc_edge_idx++;
127  }
128  }
129 
130  //optimal side part; loc. sides; id-> new side numbering
131  Distribution init_side_ds(DistributionBlock(), mesh_->n_sides(), PETSC_COMM_WORLD);
132  // partitioning of sides follows elements
133  loc_part = new int[init_side_ds.lsize()];
134  id_4_old = new int[mesh_->n_sides()];
135  {
136  int is = 0;
137  loc_i = 0;
138  FOR_SIDES(mesh_, side ) {
139  // partition
140  if (init_side_ds.is_local(is)) {
141  // find (new) proc of the element of the side
142  loc_part[loc_i++] = el_ds->get_proc(
143  row_4_el[mesh_->element.index(side->element())]);
144  }
145  // id array
146  id_4_old[is++] = side_dof( side );
147  }
148  }
149 
150  Partitioning::id_maps(mesh_->n_sides(), id_4_old, init_side_ds, loc_part, side_ds,
152  delete [] loc_part;
153  delete [] id_4_old;
154 
155  // convert row_4_id arrays from separate numberings to global numbering of rows
157 
158 }
159 
160 // ========================================================================
161 // to finish row_4_id arrays we have to convert individual numberings of
162 // sides/els/edges to whole numbering of rows. To this end we count shifts
163 // for sides/els/edges on each proc and then we apply them on row_4_id
164 // arrays.
165 // we employ macros to avoid code redundancy
166 // =======================================================================
168  int i, shift;
169  int np = edge_ds->np();
170  int edge_shift[np], el_shift[np], side_shift[np];
171  unsigned int rows_starts[np];
172 
173  int edge_n_id = mesh_->n_edges(),
174  el_n_id = mesh_->element.size(),
175  side_n_id = mesh_->n_sides();
176 
177  // compute shifts on every proc
178  shift = 0; // in this var. we count new starts of arrays chunks
179  for (i = 0; i < np; i++) {
180  side_shift[i] = shift - (side_ds->begin(i)); // subtract actual start of the chunk
181  shift += side_ds->lsize(i);
182  el_shift[i] = shift - (el_ds->begin(i));
183  shift += el_ds->lsize(i);
184  edge_shift[i] = shift - (edge_ds->begin(i));
185  shift += edge_ds->lsize(i);
186  rows_starts[i] = shift;
187  }
188  // apply shifts
189  for (i = 0; i < side_n_id; i++) {
190  int &what = side_row_4_id[i];
191  if (what >= 0)
192  what += side_shift[side_ds->get_proc(what)];
193  }
194  for (i = 0; i < el_n_id; i++) {
195  int &what = row_4_el[i];
196  if (what >= 0)
197  what += el_shift[el_ds->get_proc(what)];
198 
199  }
200  for (i = 0; i < edge_n_id; i++) {
201  int &what = row_4_edge[i];
202  if (what >= 0)
203  what += edge_shift[edge_ds->get_proc(what)];
204  }
205  // make distribution of rows
206  for (i = np - 1; i > 0; i--)
207  rows_starts[i] -= rows_starts[i - 1];
208 
209  rows_ds = std::make_shared<Distribution>(&(rows_starts[0]), PETSC_COMM_WORLD);
210 }
211 
212 
214 #ifdef FLOW123D_HAVE_BDDCML
215  // auxiliary
216  Element *el;
217  int side_row, edge_row;
218 
219  global_row_4_sub_row = std::make_shared<LocalToGlobalMap>(rows_ds);
220 
221  //
222  // ordering of dofs
223  // for each subdomain:
224  // | velocities (at sides) | pressures (at elements) | L. mult. (at edges) |
225  for (unsigned int i_loc = 0; i_loc < el_ds->lsize(); i_loc++) {
226  el = mesh_->element(el_4_loc[i_loc]);
227  int el_row = row_4_el[el_4_loc[i_loc]];
228 
229  global_row_4_sub_row->insert( el_row );
230 
231  unsigned int nsides = el->n_sides();
232  for (unsigned int i = 0; i < nsides; i++) {
233  side_row = side_row_4_id[ side_dof( el->side(i) ) ];
234  edge_row = row_4_edge[el->side(i)->edge_idx()];
235 
236  global_row_4_sub_row->insert( side_row );
237  global_row_4_sub_row->insert( edge_row );
238  }
239 
240  for (unsigned int i_neigh = 0; i_neigh < el->n_neighs_vb; i_neigh++) {
241  // mark this edge
242  edge_row = row_4_edge[el->neigh_vb[i_neigh]->edge_idx() ];
243  global_row_4_sub_row->insert( edge_row );
244  }
245  }
246  global_row_4_sub_row->finalize();
247 #endif // FLOW123D_HAVE_BDDCML
248 }
249 
250 
251 
252 unsigned int MH_DofHandler::side_dof(const SideIter side) const {
253  return elem_side_to_global[ side->element().index() ][ side->el_idx() ];
254 }
255 
256 
257 void MH_DofHandler::set_solution( double time, double * solution, double precision) {
258  OLD_ASSERT( solution != NULL, "Empty solution.\n");
259  mh_solution = solution;
261  time_ = time;
262 }
263 
264 /// temporary replacement for DofHandler accessor, flux through given side
265 double MH_DofHandler::side_flux(const Side &side) const {
266  return mh_solution[ elem_side_to_global[ side.element().index() ][ side.el_idx() ] ];
267 }
268 
269 /// temporary replacement for DofHandler accessor, scalar (pressure) on edge of the side
270 double MH_DofHandler::side_scalar(const Side &side) const {
271  unsigned int i_edg = side.edge_idx();
272  return mh_solution[ side.mesh()->n_sides() + side.mesh()->n_elements() + i_edg ];
273 }
274 
275 
277  return mh_solution[ ele->mesh_->n_sides() + ele.index() ];
278 }
279 
280 
282  return LocalElementAccessorBase<3>(this, local_ele_idx);
283 }
Definition: sides.h:31
unsigned int get_proc(unsigned int idx) const
get processor of the given index
Distribution * side_ds
void reinit(Mesh *mesh)
void make_row_numberings()
unsigned int uint
void id_maps(int n_ids, int *id_4_old, Distribution *&new_ds, int *&id_4_loc, int *&new_4_id)
#define FOR_ELEMENTS(_mesh_, __i)
Definition: mesh.h:426
unsigned int edge_idx() const
Definition: side_impl.hh:61
Definition: mesh.h:97
Distribution * el_ds
int index() const
Definition: sys_vector.hh:78
int * get_el_4_loc() const
Definition: mesh.h:180
unsigned int n_sides()
Definition: mesh.cc:175
std::unordered_map< unsigned int, unsigned int > edge_new_local_4_mesh_idx_
std::shared_ptr< LocalToGlobalMap > global_row_4_sub_row
Necessary only for BDDC solver.
double precision() const
unsigned int size() const
Returns size of the container. This is independent of the allocated space.
Definition: sys_vector.hh:391
vector< vector< unsigned int > > elem_side_to_global
#define OLD_ASSERT(...)
Definition: global_defs.h:131
unsigned int n_elements() const
Definition: mesh.h:141
double element_scalar(ElementFullIter &ele) const
temporary replacement for DofHandler accessor, scalar (pressure) on element
unsigned int edge_idx()
Neighbour ** neigh_vb
Definition: elements.h:166
unsigned int begin(int proc) const
get starting local index
unsigned int n_sides() const
#define START_TIMER(tag)
Starts a timer with specified tag.
Distribution * edge_ds
unsigned int index(const T *pointer) const
Definition: sys_vector.hh:373
unsigned int side_dof(const SideIter side) const
SideIter side(const unsigned int loc_index)
unsigned int np() const
get num of processors
Distribution * get_el_ds() const
Definition: mesh.h:174
double side_flux(const Side &side) const
temporary replacement for DofHandler accessor, flux through given side
void prepare_parallel()
ElementFullIter element() const
Definition: side_impl.hh:52
int * get_row_4_el() const
Definition: mesh.h:177
std::shared_ptr< Distribution > rows_ds
LocalElementAccessorBase< 3 > accessor(uint local_ele_idx)
std::vector< Edge > edges
Vector of MH edges, this should not be part of the geometrical mesh.
Definition: mesh.h:239
unsigned int n_neighs_vb
Definition: elements.h:164
unsigned int el_idx() const
Definition: side_impl.hh:81
void prepare_parallel_bddc()
double * mh_solution
void set_solution(double time, double *solution, double precision)
unsigned int n_edges() const
Definition: mesh.h:149
double side_scalar(const Side &side) const
temporary replacement for DofHandler accessor, scalar (pressure) on edge of the side ...
double solution_precision
Mesh * mesh() const
Definition: side_impl.hh:57
#define FOR_SIDES(_mesh_, it)
Definition: mesh.h:470
#define FOR_EDGES(_mesh_, __i)
Definition: mesh.h:465
ElementVector element
Vector of elements of the mesh.
Definition: mesh.h:228
unsigned int lsize(int proc) const
get local size