Flow123d  DF_patch_fe_data_tables-b678bc1
sparse_graph.cc
Go to the documentation of this file.
1 /*!
2  *
3  * Copyright (C) 2015 Technical University of Liberec. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it under
6  * the terms of the GNU General Public License version 3 as published by the
7  * Free Software Foundation. (http://www.gnu.org/licenses/gpl-3.0.en.html)
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11  * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
12  *
13  *
14  * @file sparse_graph.cc
15  * @ingroup la
16  * @brief Construction and partitioning of a sparse graph
17  */
18 
19 #include "system/asserts.hh"
20 #include "system/system.hh"
21 #include "la/distribution.hh"
22 #include "sparse_graph.hh"
23 
24 #include "boost/lambda/lambda.hpp"
25 #include <algorithm>
26 
27 extern "C" {
28 #include <metis.h>
29 }
30 
31 
32 /*****************************************************************************************
33  SPARSE GRAPH
34  *****************************************************************************************/
35 
36 
38  : vtx_distr(distr),
39  rows(NULL),
40  adj(NULL),
41  adj_weights(NULL),
42  part_to_check(NULL),
43  adj_of_proc( vtx_distr.np() )
44 {
45  // positions only of local vertexes
46  vtx_XYZ= new float[vtx_distr.lsize()+1];
47  vtx_weights= new int[vtx_distr.lsize()+1];
48  for(unsigned int i=0; i<vtx_distr.lsize(); i++) vtx_weights[i]=1;
49 }
50 
51 
52 
54  : vtx_distr(loc_size, comm),
55  rows(NULL),
56  adj(NULL),
57  adj_weights(NULL),
58  adj_of_proc( vtx_distr.np() )
59 {
60  // positions only of local vertexes
61  vtx_XYZ= new float[vtx_distr.lsize()+1];
62  vtx_weights= new int[vtx_distr.lsize()+1];
63  for(unsigned int i=0; i<vtx_distr.lsize(); i++) vtx_weights[i]=1;
64 }
65 
66 void SparseGraph::set_edge(const int a, const int b,int weight)
67 {
68  Edge e={a,b, weight};
69 
70  adj_of_proc[vtx_distr.get_proc(a)].push(e);
71 }
72 
73 
74 
75 
76 void SparseGraph::set_vtx_position(const int vtx, const float xyz[3],int weight)
77 {
78  ASSERT(vtx_distr.is_local(vtx))(vtx).error("Can not set vertex position for nonlocal vertex.\n");
79  int loc_index=vtx-vtx_distr.begin();
80  memcpy(vtx_XYZ+3*loc_index, xyz, 3*sizeof(float));
81  vtx_weights[loc_index]=weight;
82 }
83 
84 
85 /**
86  * Edge comparison. For lexical sorting of local edges.
87  */
88 
90 {
91  return (a.from < b.from ||
92  ( a.from == b.from && a.to < b.to));
93 }
94 
95 
96 // TODO: split into smaller functions
97 // check trivial case : Localized distribution
99 {
100  ASSERT( adj==NULL ).error("Graph is already finalized\n");
101 
102  unsigned int proc;
103  int total_size;
104  vector< stack<Edge> >::iterator s;
105  unsigned int edge_size=3; // 3 = number of integers in Edge to send
106 
107  /////////////////////////////////////
108  // communicate inserted edges
109 
110  // unroll data in adj_of_proc into send buffer
111  int * sdispls = new int [vtx_distr.np()];
112  int * scounts = new int [vtx_distr.np()];
113 
114  total_size=0;
115  for(proc=0, s=adj_of_proc.begin(); s!=adj_of_proc.end();++s, ++proc)
116  {
117  sdispls[proc] = total_size;
118  scounts[proc] = edge_size * (s)->size();
119  total_size += edge_size * ((s)->size)();
120  }
121  int * sendbuf = new int [(total_size+1)];
122 
123  Edge edge;
124  int buf_pos=0;
125  for(proc=0, s = adj_of_proc.begin(); s!=adj_of_proc.end(); ++s, ++proc)
126  {
127  ASSERT_EQ(sdispls[proc], buf_pos).error(
128  "Mismatch between displacement and buffer position.\n");
129  while ( ! (s)->empty() ) {
130  edge=(s)->top();
131  sendbuf[buf_pos++]=edge.from;
132  sendbuf[buf_pos++]=edge.to;
133  sendbuf[buf_pos++]=edge.weight;
134  (s)->pop();
135  }
136  }
137 
138  /**
139  * Using MPICH valgrind reports condition on uninitialized value
140  * here. Nevertheless we cna not get rid of it even after explicitly set all allocated space
141  * to zero.
142  */
143  // communicate send sizes
144  int * rcounts = new int [vtx_distr.np()];
145  int * rdispls = new int [vtx_distr.np()];
146  MPI_Alltoall( scounts, 1, MPI_INT, rcounts, 1, MPI_INT, vtx_distr.get_comm());
147 
148  // prepare receiving buffers
149  total_size=0;
150  for(proc=0; proc<vtx_distr.np(); proc++)
151  {
152  rdispls[proc] = total_size;
153  total_size += rcounts[proc];
154  }
155 
156  int * recvbuf = new int [total_size+1];
157 
158  MPI_Alltoallv (
159  sendbuf, scounts, sdispls, MPI_INT,
160  recvbuf, rcounts, rdispls, MPI_INT,
161  vtx_distr.get_comm() );
162 
163  delete [] sendbuf;
164  delete [] scounts;
165  delete [] sdispls;
166  delete [] rcounts;
167  delete [] rdispls;
168 
169  /////////////////////////////////////
170  // construct local adj and rows arrays
171  Edge *edges= (Edge *) recvbuf;
172  int size=total_size/edge_size;
173  std::sort(edges, edges + size);
174 
175  allocate_sparse_graph(vtx_distr.lsize() + 1, size+1);
176  rows[0]=0;
177 
178  if (size != 0) {
179 
180  int i;
181  int row=0;
182  int i_adj=0;
183  int loc_from;
184  Edge unknown_edge={-1,-1,0};
185  Edge *last_edge=&unknown_edge;
186 
187  for(i=0;i<size;i++) {
188  if (! (*last_edge < edges[i]) ) continue; // skip equivalent edges
189  last_edge=edges+i;
190 
191  ASSERT(vtx_distr.is_local(edges[i].from))(edges[i].from)(edges[i].to)(i).error(
192  "Received non-local edge at position 'i'\n");
193 
194  loc_from=edges[i].from-vtx_distr.begin();
195  ASSERT( row <= loc_from )(i).error("Decrease in sorted edges at 'i'\n");
196 
197  while ( row < loc_from ) rows[++row]=i;
198  adj[i_adj]=edges[i].to;
199  adj_weights[i_adj]=edges[i].weight;
200  i_adj++;
201  }
202  rows[++row]=i; // i==size (of adj array)
203 
204  }
205 
206  delete [] recvbuf;
207 }
208 
209 
210 /**
211  * Check if the subgraphs of the partitions are connected.
212  */
214 {
215  ASSERT_EQ( vtx_distr.lsize(0), vtx_distr.size() ).error("Check of graph continuity not yet implemented for paralel case.\n");
216  if (vtx_distr.myp()!=0) return(true);
217 
218  part_to_check=part;
219  checked_vtx.resize(vtx_distr.size(), 0);
220  std::vector<bool> checked_proc(vtx_distr.np(), false);
221 
222  unsigned int n_proc=0;
223  for(unsigned int vtx=0; n_proc<vtx_distr.np() && vtx<vtx_distr.size(); vtx++) {
224  if (checked_vtx[vtx] != 2) {
226  // check if the processor is still unvisited
227  if ( checked_proc[proc_to_check] )
228  WarningOut().fmt("Disconnected subgraph {} detected at vertex {}.\n", part_to_check[vtx], vtx);
229  // DFS unvisited vertex
230  checked_vtx[vtx]=1;
231  DFS(vtx);
232  checked_proc[proc_to_check]=true;
233  n_proc++;
234  }
235  }
236  checked_vtx.clear();
237 
238  DebugOut() << "Connectivity of subgraphs is OK.\n";
239  return (true);
240 }
241 
242 /**
243  * Recursive part of Deep First Search algorithm of the connectivity check.
244  */
245 void SparseGraph::DFS(int vtx)
246 {
247  ASSERT( vtx>=0 && vtx< (int) vtx_distr.size() )(vtx).error("Invalid entry vertex in DFS.\n");
248  int neighbour;
249  for(int i_neigh=rows[vtx]; i_neigh< rows[vtx+1];i_neigh++) {
250  neighbour = adj[i_neigh];
251  if (part_to_check[neighbour] == proc_to_check && checked_vtx[neighbour] == 0) {
252  checked_vtx[neighbour]=1;
253  DFS(neighbour);
254  }
255  }
256  checked_vtx[vtx]=2;
257 }
258 
259 
260 
262 {
263  ASSERT( adj ).error("Can not view non finalized graph.\n");
264  int row,col;
265  MessageOut() << "SparseGraph\n";
266  for(row=0; row < (int) vtx_distr.lsize(); row++) {
267  MessageOut().fmt("edges from this vertex: {}\n", rows[row+1]);
268  for(col=rows[row]; col<rows[row+1]; col++) {
269  MessageOut().fmt("edge (v1, v2): {} {}\n", row+vtx_distr.begin(), adj[col]);
270  }
271  }
272 }
273 
274 
276 {
277  ASSERT( rows && adj ).error("Graph is not yet finalized.");
278 
279  int loc_row, row, row_pos;
280  int col_pos,col,loc_col;
281 
282  for(loc_row=0;loc_row< (int) vtx_distr.lsize();loc_row++) {
283  row=loc_row+vtx_distr.begin();
284  for(row_pos=rows[loc_row];row_pos<rows[loc_row+1];row_pos++) {
285  col=adj[row_pos];
286  if (vtx_distr.is_local(col)) {
287  // find the local column
288  loc_col=col-vtx_distr.begin();
289  for(col_pos=rows[loc_col]; col_pos<rows[loc_col+1]; col_pos++)
290  if (adj[col_pos]==row) break;
291  if (adj[col_pos]!=row) return false;
292  }
293  }
294  }
295 
296  return true;
297 }
298 
299 
300 /**
301  * Free all non-null pointers.
302  */
304 {
305  delete[] vtx_XYZ;
306  delete[] vtx_weights;
307 }
308 
309 /*!
310  * @brief Output a sparse graph.
311  * @return Changed ostream.
312  */
313 ostream & operator <<(ostream & out, const SparseGraph &)
314 {
315  // TODO
316  ASSERT_PERMANENT(0).error("Not implemented!");
317  return out;
318 }
319 
320 
321 /*****************************************************************************************
322  SPARSE GRAPH PETSC
323  *****************************************************************************************/
324 
325 void SparseGraphPETSC::allocate_sparse_graph(int lsize_vtxs, int lsize_adj)
326 {
327  PetscMalloc(lsize_vtxs*sizeof(int), &rows);
328  PetscMalloc(lsize_adj*sizeof(int),&adj);
329  PetscMalloc(lsize_adj*sizeof(int),&adj_weights);
330 }
331 
332 
333 void SparseGraphPETSC::partition(int *loc_part)
334 {
335  ASSERT( adj && rows ).error("Can not make partition of non finalized graph.\n");
336 
337  MatCreateMPIAdj(vtx_distr.get_comm(), vtx_distr.lsize(),vtx_distr.size(),
339  MatPartitioningCreate(vtx_distr.get_comm(),& petsc_part);
340  MatPartitioningSetAdjacency(petsc_part, petsc_adj_mat);
341  MatPartitioningSetFromOptions(petsc_part);
342  MatPartitioningApply(petsc_part,&part_IS);
343 
344  // copy the partition array
345  const PetscInt *is_array;
346  ISGetIndices(part_IS, &is_array);
347  memcpy(loc_part, is_array, vtx_distr.lsize()*sizeof(int));
348  ISRestoreIndices(part_IS, &is_array);
349 }
350 
351 
353 {
354  chkerr(ISDestroy(&part_IS));
355  chkerr(MatPartitioningDestroy(&petsc_part));
356  chkerr(MatDestroy(&petsc_adj_mat));
357 
358  if (adj) PetscFree(adj);
359  if (rows) PetscFree(rows);
360  if (adj_weights) PetscFree(adj_weights);
361 
362  // automaticaly calls base class destructor
363 }
364 
365 
366 /*****************************************************************************************
367  SPARSE GRAPH METIS
368  *****************************************************************************************/
369 
370 
371 void SparseGraphMETIS::allocate_sparse_graph(int lsize_vtxs, int lsize_adj)
372 {
373  rows = new int[lsize_vtxs];
374  adj = new int[lsize_adj];
375  adj_weights = new int [lsize_adj];
376 }
377 
378 
380 {
382  .error("METIS could be used only with localized distribution.\n");
383  if (vtx_distr.np()==1) {
384  for(unsigned int i=0;i<vtx_distr.size();i++) part[i]=0;
385  return;
386  } else {
387  if (vtx_distr.myp()==0) {
388  int n_vtx=vtx_distr.size();
389  int n_proc=vtx_distr.np();
390  int num_flag=0; // indexing style (0-C, 1-Fortran)
391  int edgecut;
392 
393 /***********************************************************************************
394  * SETTING OPTIONS
395  */
396 #if (METIS_VER_MAJOR >= 5)
397  if ( sizeof(idx_t) != sizeof(int) ) {
398  printf("ERROR in GRAPH_DIVIDE_C: Wrong type of integers for METIS.\n");
399  abort();
400  }
401  int ncon = 1;
402  // Unbalance tolerance vector. Multi-criteria balance is available in METIS 5.x.
403  real_t ubvec[1];
404  ubvec[0] = 1.001; // This is in fact default value used by METIS.
405  int options[METIS_NOPTIONS];
406 
407  for (unsigned int i = 0;i < METIS_NOPTIONS;i++) options[i] = -1;
408 
409  options[METIS_OPTION_OBJTYPE] = METIS_OBJTYPE_CUT;
410  options[METIS_OPTION_CTYPE] = METIS_CTYPE_RM;
411  options[METIS_OPTION_IPTYPE] = METIS_IPTYPE_GROW;
412  options[METIS_OPTION_RTYPE] = METIS_RTYPE_GREEDY;
413  options[METIS_OPTION_NCUTS] = 1;
414  options[METIS_OPTION_NSEPS] = 1;
415  options[METIS_OPTION_NUMBERING] = num_flag;
416  options[METIS_OPTION_NITER] = 10;
417  options[METIS_OPTION_SEED] = 12345;
418  options[METIS_OPTION_MINCONN] = 1;
419  options[METIS_OPTION_CONTIG] = 1; // enforce contiguous (connected) subdomains
420  options[METIS_OPTION_COMPRESS] = 0;
421  options[METIS_OPTION_CCORDER] = 0;
422  options[METIS_OPTION_UFACTOR] = 30;
423  //options[METIS_OPTION_DBGLVL] = METIS_DBG_INFO | METIS_DBG_TIME;
424  options[METIS_OPTION_DBGLVL] = 0;
425 #else
426  /* weights */
427  int wgtflag=3;
428  int options[5];
429  for (unsigned int i = 0; i < 5; i++ ) options[i] = 0;
430 
431 #endif
432 
433 
434  /* Initialize parts */
435  for (unsigned int i = 0; i < vtx_distr.lsize(); i++ ) part[i] = num_flag;
436 
437 /***********************************************************************************
438  * CALL METIS using optimal algorithm depending on the number of partitions
439  */
440 
441  if (n_proc <= 8) {
442 
443 #if (METIS_VER_MAJOR >= 5)
444  options[METIS_OPTION_PTYPE] = METIS_PTYPE_RB;
445  options[METIS_OPTION_UFACTOR] = 1;
446  METIS_PartGraphRecursive(&n_vtx, &ncon, rows, adj,
447  vtx_weights, NULL, adj_weights, &n_proc, NULL,
448  ubvec, options, &edgecut,part);
449 #else
450  METIS_PartGraphRecursive(&n_vtx, rows, adj,
451  vtx_weights, adj_weights, &wgtflag, &num_flag,
452  &n_proc, options, &edgecut, part);
453 #endif
454  } else {
455 
456 #if (METIS_VER_MAJOR >= 5)
457  options[METIS_OPTION_PTYPE] = METIS_PTYPE_KWAY;
458  options[METIS_OPTION_UFACTOR] = 30;
459  METIS_PartGraphKway(&n_vtx, &ncon, rows, adj,
460  vtx_weights, NULL, adj_weights, &n_proc, NULL,
461  ubvec, options, &edgecut, part);
462 #else
463  METIS_PartGraphKway(&n_vtx,rows,adj, //vtx distr, local vtx begins, edges of local vtxs
464  vtx_weights,adj_weights,&wgtflag,&num_flag, // vertex, edge weights, ...
465  &n_proc,options,&edgecut,part);
466 #endif
467  }
468  }
469  }
470 }
471 
473 {
474  if (adj) delete[](adj);
475  if (rows) delete[](rows);
476  if (adj_weights) delete[](adj_weights);
477 
478  // automaticaly calls base class destructor
479 }
Definitions of ASSERTS.
#define ASSERT(expr)
Definition: asserts.hh:351
#define ASSERT_PERMANENT(expr)
Allow use shorter versions of macro names if these names is not used with external library.
Definition: asserts.hh:348
#define ASSERT_EQ(a, b)
Definition of comparative assert macro (EQual) only for debug mode.
Definition: asserts.hh:333
bool is_local(unsigned int idx) const
identify local index
unsigned int myp() const
get my processor
unsigned int begin(int proc) const
get starting local index
unsigned int lsize(int proc) const
get local size
unsigned int get_proc(unsigned int idx) const
get processor of the given index
MPI_Comm get_comm() const
Returns communicator.
unsigned int size() const
get global size
unsigned int np() const
get num of processors
virtual void partition(int *loc_part)
virtual void allocate_sparse_graph(int lsize_vtxs, int lsize_adj)
virtual ~SparseGraphMETIS()
MatPartitioning petsc_part
virtual void partition(int *loc_part)
virtual void allocate_sparse_graph(int lsize_vtxs, int lsize_adj)
virtual ~SparseGraphPETSC()
Virtual class for construction and partitioning of a distributed sparse graph.
Definition: sparse_graph.hh:56
float * vtx_XYZ
optional vertex coordinates (global array)
int proc_to_check
subgraph to check
int * adj_weights
int * adj
sparse adjency
int * vtx_weights
Vertex weights for computations (optional).
void set_vtx_position(const int vtx, const float xyz[3], int weight=1)
Definition: sparse_graph.cc:76
virtual void allocate_sparse_graph(int lsize_vtxs, int lsize_adj)=0
void set_edge(const int a, const int b, int weight=1)
Definition: sparse_graph.cc:66
void finalize()
Make sparse graph structures: rows, adj.
Definition: sparse_graph.cc:98
vector< stack< Edge > > adj_of_proc
storage for graph edges for individual processors
Distribution vtx_distr
distribution of vertexes
int * part_to_check
created partitioning used through check of connectivity
void DFS(int vtx)
std::vector< int > checked_vtx
coloring of DFS algorithm
virtual ~SparseGraph()
SparseGraph(const Distribution &distr)
Definition: sparse_graph.cc:37
bool is_symmetric()
bool check_subgraph_connectivity(int *part)
Support classes for parallel programing.
@ edge
#define WarningOut()
Macro defining 'warning' record of log.
Definition: logger.hh:278
#define MessageOut()
Macro defining 'message' record of log.
Definition: logger.hh:275
#define DebugOut()
Macro defining 'debug' record of log.
Definition: logger.hh:284
#define MPI_INT
Definition: mpi.h:160
#define MPI_Alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm)
Definition: mpi.h:595
int MPI_Comm
Definition: mpi.h:141
#define MPI_Alltoallv(sendbuf, sendcounts, sdispls, sendtype, recvbuf, recvcounts, rdispls, recvtype, comm)
Definition: mpi.h:602
void printf(BasicWriter< Char > &w, BasicCStringRef< Char > format, ArgList args)
Definition: printf.h:444
ostream & operator<<(ostream &out, const SparseGraph &)
Output a sparse graph.
bool operator<(const SparseGraph::Edge &a, const SparseGraph::Edge &b)
Definition: sparse_graph.cc:89
Distributed sparse graphs, partitioning.
int weight
Edge weights for communication (optional).
void chkerr(unsigned int ierr)
Replacement of new/delete operator in the spirit of xmalloc.
Definition: system.hh:142