Flow123d  jenkins-Flow123d-windows-release-multijob-285
partitioning.cc
Go to the documentation of this file.
1 /*
2  * partitioning.cc
3  *
4  * Created on: May 3, 2013
5  * Author: jb
6  */
7 
8 #include "mesh/partitioning.hh"
9 #include "mesh/mesh.h"
10 #include "la/sparse_graph.hh"
11 #include "la/distribution.hh"
12 
13 #include "petscao.h"
14 
15 
16 namespace IT = Input::Type;
18  =IT::Selection("GraphType",
19  "Different algorithms to make the sparse graph with weighted edges\n"
20  "from the multidimensional mesh. Main difference is dealing with \n"
21  "neighborings of elements of different dimension.")
22  .add_value(any_neighboring, "any_neighboring", "Add edge for any pair of neighboring elements.")
23  .add_value(any_weight_lower_dim_cuts, "any_wight_lower_dim_cuts", "Same as before and assign higher weight to cuts of lower dimension in order to make them stick to one face.")
24  .add_value(same_dimension_neighboring, "same_dimension_neghboring", "Add edge for any pair of neighboring elements of same dimension (bad for matrix multiply).")
25  .close();
26 
28  =IT::Selection("PartTool", "Select the partitioning tool to use.")
29  .add_value(PETSc, "PETSc", "Use PETSc interface to various partitioning tools.")
30  .add_value(METIS, "METIS", "Use direct interface to Metis.")
31  .close();
32 
34  = IT::Record("Partition","Setting for various types of mesh partitioning." )
35  .declare_key("tool", Partitioning::tool_sel, IT::Default("METIS"), "Software package used for partitioning. See corresponding selection.")
36  .declare_key("graph_type", Partitioning::graph_type_sel, IT::Default("any_neighboring"), "Algorithm for generating graph and its weights from a multidimensional mesh.")
37  .allow_auto_conversion("graph_type") // mainly in order to allow Default value for the whole record Partition
38  .close();
39 
40 
42 : mesh_(mesh), in_(in), graph_(NULL), loc_part_(NULL), init_el_ds_(NULL)
43 {
45 }
46 
47 
48 
50  if (loc_part_) delete [] loc_part_;
51  loc_part_ = NULL;
52  if (init_el_ds_) delete init_el_ds_;
53  init_el_ds_ = NULL;
54  if (graph_) delete graph_;
55  graph_ = NULL;
56 }
57 
59  ASSERT(init_el_ds_, "NULL initial distribution.");
60  return init_el_ds_;
61 }
62 
63 
64 
65 const int * Partitioning::get_loc_part() const {
66  ASSERT(loc_part_, "NULL local partitioning.");
67  return loc_part_;
68 }
69 
70 
71 
73 
74  Distribution edistr = graph_->get_distr();
75 
76  Edge *edg;
77  int li, e_idx;
78  unsigned int i_neigh;
79  int i_s, n_s;
80 
81  // Add nigbouring edges only for "any_*" graph types
82  bool neigh_on = ( in_.val<PartitionGraphType>("graph_type") != same_dimension_neighboring );
83 
84  FOR_ELEMENTS( mesh_, ele) {
85  // skip non-local elements
86  if (!edistr.is_local(ele.index()))
87  continue;
88 
89  // for all connected elements
90  FOR_ELEMENT_SIDES( ele, si ) {
91  edg = ele->side(si)->edge();
92 
93  FOR_EDGE_SIDES( edg, li ) {
94  ASSERT(edg->side(li)->valid(),"NULL side of edge.");
95  e_idx = ELEMENT_FULL_ITER(mesh_, edg->side(li)->element()).index();
96 
97  // for elements of connected elements, excluding element itself
98  if (e_idx != ele.index()) {
99  graph_->set_edge(ele.index(), e_idx);
100  }
101  }
102  }
103 
104  // include connections from lower dim. edge
105  // to the higher dimension
106  if (neigh_on) {
107  for (i_neigh = 0; i_neigh < ele->n_neighs_vb; i_neigh++) {
108  n_s = ele->neigh_vb[i_neigh]->edge()->n_sides;
109  for (i_s = 0; i_s < n_s; i_s++) {
110  e_idx=ELEMENT_FULL_ITER(mesh_, ele->neigh_vb[i_neigh]->edge()->side(i_s)->element()).index();
111  graph_->set_edge(ele.index(), e_idx);
112  graph_->set_edge(e_idx, ele.index());
113  }
114  }
115  }
116  }
117  graph_->finalize();
118 }
119 
120 
121 
123  // prepare dual graph
124  switch ( in_.val<PartitionTool>("tool")) {
125  case PETSc:
126  init_el_ds_ = new Distribution(DistributionBlock(), mesh_->n_elements(), mesh_->get_comm() ); // initial distr.
128 
129  break;
130  case METIS:
131  init_el_ds_ = new Distribution(DistributionLocalized(), mesh_->n_elements(), mesh_->get_comm() ); // initial distr.
132  graph_ = new SparseGraphMETIS(*init_el_ds_);
133  break;
134  }
136 
137  // compute partitioning
138  loc_part_ = new int[init_el_ds_->lsize()];
140  delete graph_; graph_ = NULL;
141 }
142 
143 
144 
145 
146 /**
147  * Old UGLY, PETSC dependent method for getting new numbering after partitioning.
148  *
149  * n_ids - given maximal ID used in id_4_old
150  * id_4_old - given array of size init_el_ds_.size() - assign ID to an old index
151  *
152  * new_ds - new distribution of elements according to current distributed partitioning loc_part_
153  * id_4_loc - IDs for local elements in new distribution, has size new_ds->lsize()
154  * new_4_id - for given ID, the new index, -1 for unknown IDs
155  *
156  */
157 void Partitioning::id_maps(int n_ids, int *id_4_old,
158  const Distribution &old_ds, int *loc_part,
159  Distribution * &new_ds, int * &id_4_loc, int * &new_4_id) {
160 
161  IS part, new_numbering;
162  unsigned int size = old_ds.size(); // whole size of distr. array
163  int new_counts[old_ds.np()];
164  AO new_old_ao;
165  int *old_4_new;
166  int i_loc;
167 
168  // make distribution and numbering
169  ISCreateGeneral(PETSC_COMM_WORLD, old_ds.lsize(), loc_part, PETSC_COPY_VALUES, &part); // global IS part.
170  ISPartitioningCount(part, old_ds.np(), new_counts); // new size of each proc
171 
172  new_ds = new Distribution((unsigned int *) new_counts, PETSC_COMM_WORLD); // new distribution
173  ISPartitioningToNumbering(part, &new_numbering); // new numbering
174 
175  old_4_new = (int *) xmalloc(size * sizeof(int));
176  id_4_loc = (int *) xmalloc(new_ds->lsize() * sizeof(int));
177  new_4_id = (int *) xmalloc((n_ids + 1) * sizeof(int));
178 
179  // create whole new->old mapping on each proc
180  AOCreateBasicIS(new_numbering, PETSC_NULL, &new_old_ao); // app ordering= new; petsc ordering = old
181  for (unsigned int i = 0; i < size; i++)
182  old_4_new[i] = i;
183  AOApplicationToPetsc(new_old_ao, size, old_4_new);
184  AODestroy(&(new_old_ao));
185 
186  // compute id_4_loc
187  i_loc = 0;
188 
189  for (unsigned int i_new = new_ds->begin(); i_new < new_ds->end(); i_new++) {
190  id_4_loc[i_loc++] = id_4_old[old_4_new[i_new]];
191  }
192  // compute row_4_id
193  for (i_loc = 0; i_loc <= n_ids; i_loc++)
194  new_4_id[i_loc] = -1; // ensure that all ids are initialized
195  for (unsigned int i_new = 0; i_new < size; i_new++)
196  new_4_id[id_4_old[old_4_new[i_new]]] = i_new;
197  xfree(old_4_new);
198 }
199 
200 
201 void Partitioning::id_maps(int n_ids, int *id_4_old, Distribution * &new_ds, int * &id_4_loc, int * &new_4_id) {
202  Partitioning::id_maps(n_ids, id_4_old, *init_el_ds_, loc_part_, new_ds, id_4_loc, new_4_id);
203 }
204 
205 
206 
207 shared_ptr< vector<int> > Partitioning::subdomain_id_field_data() {
208  ASSERT(loc_part_, "Partition is not yet computed.\n");
209  if (!seq_part_) {
210  unsigned int seq_size=(init_el_ds_->myp() == 0) ? init_el_ds_->size() : 1;
211  //seq_part_.resize(seq_size);
212  seq_part_ = make_shared< vector<int> >(seq_size);
213  std::vector<int> &vec = *( seq_part_.get() );
214 
216  &vec[0],
217  (int *)(init_el_ds_->get_lsizes_array()),
218  (int *)(init_el_ds_->get_starts_array()),
219  MPI_INT, 0,init_el_ds_->get_comm() );
220 
221  }
222  return seq_part_;
223 
224 }
unsigned int size() const
get global size
Mesh * mesh_
The input mesh.
Definition: partitioning.hh:89
~Partitioning()
Destructor.
Definition: partitioning.cc:49
shared_ptr< vector< int > > subdomain_id_field_data()
void id_maps(int n_ids, int *id_4_old, Distribution *&new_ds, int *&id_4_loc, int *&new_4_id)
void make_element_connection_graph()
Definition: partitioning.cc:72
#define FOR_EDGE_SIDES(i, j)
Definition: edges.h:53
Class Input::Type::Default specifies default value of keys of a Input::Type::Record.
Definition: type_record.hh:39
#define FOR_ELEMENTS(_mesh_, __i)
Definition: mesh.h:364
???
shared_ptr< vector< int > > seq_part_
Sequential partitioning for output.
#define ELEMENT_FULL_ITER(_mesh_, i)
Definition: mesh.h:372
Definition: mesh.h:109
#define MPI_Gatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, root, comm)
Definition: mpi.h:549
#define FOR_ELEMENT_SIDES(i, j)
Definition: elements.h:151
Definition: edges.h:38
bool valid() const
Definition: side_impl.hh:75
Edge * edge() const
Definition: side_impl.hh:54
const unsigned int * get_lsizes_array()
get local sizes array
Record & allow_auto_conversion(const string &from_key)
Definition: type_record.cc:111
unsigned int n_elements() const
Definition: mesh.h:141
#define ASSERT(...)
Definition: global_defs.h:121
bool is_local(unsigned int idx) const
identify local index
Use PETSc interface to various partitioing tools.
Definition: partitioning.hh:75
Input::Record in_
Input Record accessor.
Definition: partitioning.hh:91
static Input::Type::Record input_type
Definition: partitioning.hh:35
const unsigned int * get_starts_array() const
get local starts array
MPI_Comm get_comm() const
Definition: mesh.h:167
void finalize()
Make sparse graph structures: rows, adj.
static Input::Type::Selection tool_sel
Definition: partitioning.hh:34
virtual void partition(int *loc_part)=0
Selection & add_value(const int value, const std::string &key, const std::string &description="")
Accessor to the data with type Type::Record.
Definition: accessors.hh:327
const Ret val(const string &key) const
Use direct interface to Metis.
Definition: partitioning.hh:76
static Input::Type::Selection graph_type_sel
Input specification objects.
Definition: partitioning.hh:33
const Record & close() const
Definition: type_record.cc:314
unsigned int np() const
get num of processors
Distribution get_distr()
void * xmalloc(size_t size)
Memory allocation with checking.
Definition: system.cc:209
Distribution * init_el_ds_
Original distribution of elements. Depends on type of partitioner.
Definition: partitioning.hh:98
unsigned int myp() const
get my processor
Support classes for parallel programing.
const Selection & close() const
ElementFullIter element() const
Definition: side_impl.hh:41
#define MPI_INT
Definition: mpi.h:160
MPI_Comm get_comm() const
Returns communicator.
Distributed sparse graphs, partitioning.
const Distribution * get_init_distr() const
Definition: partitioning.cc:58
const int * get_loc_part() const
Definition: partitioning.cc:65
void make_partition()
Record type proxy class.
Definition: type_record.hh:169
SparseGraph * graph_
Graph used to partitioning the mesh.
Definition: partitioning.hh:94
Add edge for any pair of neighboring elements of same dimension (bad for matrix multiply) ...
Definition: partitioning.hh:85
Partitioning(Mesh *mesh, Input::Record in)
Definition: partitioning.cc:41
#define xfree(p)
Definition: system.hh:108
SideIter side(const unsigned int i) const
Definition: edges.h:43
Template for classes storing finite set of named values.
void set_edge(const int a, const int b, int weight=1)
Definition: sparse_graph.cc:79
int * loc_part_
Partition numbers for local elements in original distribution of elements given be init_el_ds_...
Definition: partitioning.hh:96
ElementVector element
Vector of elements of the mesh.
Definition: mesh.h:205
unsigned int lsize(int proc) const
get local size
Record & declare_key(const string &key, const KeyType &type, const Default &default_value, const string &description)
Definition: type_record.cc:430