Flow123d  jenkins-Flow123d-windows32-release-multijob-51
sparse_graph.cc
Go to the documentation of this file.
1 /*!
2  *
3  * Copyright (C) 2007 Technical University of Liberec. All rights reserved.
4  *
5  * Please make a following refer to Flow123d on your project site if you use the program for any purpose,
6  * especially for academic research:
7  * Flow123d, Research Centre: Advanced Remedial Technologies, Technical University of Liberec, Czech Republic
8  *
9  * This program is free software; you can redistribute it and/or modify it under the terms
10  * of the GNU General Public License version 3 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
13  * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14  * See the GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License along with this program; if not,
17  * write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 021110-1307, USA.
18  *
19  *
20  * $Id$
21  * $Revision$
22  * $LastChangedBy$
23  * $LastChangedDate$
24  *
25  *
26  * @file
27  * @ingroup la
28  * @brief Construction and partitioning of a sparse graph
29  *
30  */
31 
32 #include "system/global_defs.h"
33 #include "system/system.hh"
34 #include "la/distribution.hh"
35 #include "sparse_graph.hh"
36 
37 #include "boost/lambda/lambda.hpp"
38 #include <algorithm>
39 
40 extern "C" {
41 #include <metis.h>
42 }
43 
44 
45 /*****************************************************************************************
46  SPARSE GRAPH
47  *****************************************************************************************/
48 
49 
51  : vtx_distr(distr),
52  rows(NULL),
53  adj(NULL),
54  adj_weights(NULL),
55  part_to_check(NULL),
56  adj_of_proc( vtx_distr.np() )
57 {
58  // positions only of local vertexes
59  vtx_XYZ= new float[vtx_distr.lsize()+1];
60  vtx_weights= new int[vtx_distr.lsize()+1];
61  for(unsigned int i=0; i<vtx_distr.lsize(); i++) vtx_weights[i]=1;
62 }
63 
64 
65 
67  : vtx_distr(loc_size, comm),
68  rows(NULL),
69  adj(NULL),
70  adj_weights(NULL),
71  adj_of_proc( vtx_distr.np() )
72 {
73  // positions only of local vertexes
74  vtx_XYZ= new float[vtx_distr.lsize()+1];
75  vtx_weights= new int[vtx_distr.lsize()+1];
76  for(unsigned int i=0; i<vtx_distr.lsize(); i++) vtx_weights[i]=1;
77 }
78 
79 void SparseGraph::set_edge(const int a, const int b,int weight)
80 {
81  Edge e={a,b, weight};
82 
83  adj_of_proc[vtx_distr.get_proc(a)].push(e);
84 }
85 
86 
87 
88 
89 void SparseGraph::set_vtx_position(const int vtx, const float xyz[3],int weight)
90 {
91  ASSERT(vtx_distr.is_local(vtx),"Can not set vertex position for nonlocal vertex %d.\n",vtx);
92  int loc_index=vtx-vtx_distr.begin();
93  memcpy(vtx_XYZ+3*loc_index, xyz, 3*sizeof(float));
94  vtx_weights[loc_index]=weight;
95 }
96 
97 
98 /**
99  * Edge comparison. For lexical sorting of local edges.
100  */
101 
103 {
104  return (a.from < b.from ||
105  ( a.from == b.from && a.to < b.to));
106 }
107 
108 
109 // TODO: split into smaller functions
110 // check trivial case : Localized distribution
112 {
113  ASSERT( adj==NULL, "Graph is already finalized\n");
114 
115  unsigned int proc;
116  int total_size;
117  vector< stack<Edge> >::iterator s;
118  unsigned int edge_size=3; // 3 = number of integers in Edge to send
119 
120  /////////////////////////////////////
121  // communicate inserted edges
122 
123  // unroll data in adj_of_proc into send buffer
124  int * sdispls = (int *) xmalloc( vtx_distr.np() * sizeof(int) );
125  int * scounts = (int *) xmalloc( vtx_distr.np() * sizeof(int) );
126 
127  total_size=0;
128  for(proc=0, s=adj_of_proc.begin(); s!=adj_of_proc.end();++s, ++proc)
129  {
130  sdispls[proc] = total_size;
131  scounts[proc] = edge_size * (s)->size();
132  total_size += edge_size * ((s)->size)();
133  }
134  int * sendbuf = (int *) xmalloc( (total_size+1) * sizeof(int ) );
135 
136  Edge edge;
137  int buf_pos=0;
138  for(proc=0, s = adj_of_proc.begin(); s!=adj_of_proc.end(); ++s, ++proc)
139  {
140  ASSERT(sdispls[proc] == buf_pos,
141  "Mismatch between displacement %d and buffer position %d. \n", sdispls[proc], buf_pos );
142  while ( ! (s)->empty() ) {
143  edge=(s)->top();
144  sendbuf[buf_pos++]=edge.from;
145  sendbuf[buf_pos++]=edge.to;
146  sendbuf[buf_pos++]=edge.weight;
147  (s)->pop();
148  }
149  }
150 
151  // communicate send sizes
152  int * rcounts = (int *) xmalloc( vtx_distr.np() * sizeof(int) );
153  int * rdispls = (int *) xmalloc( vtx_distr.np() * sizeof(int) );
154  MPI_Alltoall( scounts, 1, MPI_INT, rcounts, 1, MPI_INT, vtx_distr.get_comm());
155 
156  // prepare receiving buffers
157  total_size=0;
158  for(proc=0; proc<vtx_distr.np(); proc++)
159  {
160  rdispls[proc] = total_size;
161  total_size += rcounts[proc];
162  }
163 
164  int * recvbuf = (int *) xmalloc( (total_size+1) * sizeof(int ) );
165 
166  MPI_Alltoallv (
167  sendbuf, scounts, sdispls, MPI_INT,
168  recvbuf, rcounts, rdispls, MPI_INT,
169  vtx_distr.get_comm() );
170 
171  xfree(sendbuf);
172  xfree(scounts);
173  xfree(sdispls);
174  xfree(rcounts);
175  xfree(rdispls);
176 
177  /////////////////////////////////////
178  // construct local adj and rows arrays
179  Edge *edges= (Edge *) recvbuf;
180  int size=total_size/edge_size;
181  std::sort(edges, edges + size);
182 
183  allocate_sparse_graph(vtx_distr.lsize() + 1, size+1);
184  rows[0]=0;
185 
186  if (size != 0) {
187 
188  int i;
189  int row=0;
190  int i_adj=0;
191  int loc_from;
192  Edge unknown_edge={-1,-1,0};
193  Edge *last_edge=&unknown_edge;
194 
195  for(i=0;i<size;i++) {
196  if (! (*last_edge < edges[i]) ) continue; // skip equivalent edges
197  last_edge=edges+i;
198 
199  ASSERT(vtx_distr.is_local(edges[i].from),
200  "Received non-local edge: %d %d at position %d\n",edges[i].from, edges[i].to,i);
201 
202  loc_from=edges[i].from-vtx_distr.begin();
203  ASSERT( row <= loc_from, "Decrease in sorted edges at %d\n",i);
204 
205  while ( row < loc_from ) rows[++row]=i;
206  adj[i_adj]=edges[i].to;
207  adj_weights[i_adj]=edges[i].weight;
208  i_adj++;
209  }
210  rows[++row]=i; // i==size (of adj array)
211 
212  }
213 
214  xfree(recvbuf);
215 }
216 
217 
218 /**
219  * Check if the subgraphs of the partitions are connected.
220  */
222 {
223  ASSERT( vtx_distr.lsize(0)==vtx_distr.size() , "Check of graph continuity not yet implemented for paralel case.\n");
224  if (vtx_distr.myp()!=0) return(true);
225 
226  part_to_check=part;
227  checked_vtx.resize(vtx_distr.size(), 0);
228  std::vector<bool> checked_proc(vtx_distr.np(), false);
229 
230  unsigned int n_proc=0;
231  for(unsigned int vtx=0; n_proc<vtx_distr.np() && vtx<vtx_distr.size(); vtx++) {
232  if (checked_vtx[vtx] != 2) {
234  // check if the processor is still unvisited
235  if ( checked_proc[proc_to_check] )
236  xprintf(Warn, "Disconnected subgraph %d detected at vertex %d.\n",part_to_check[vtx],vtx);
237  // DFS unvisited vertex
238  checked_vtx[vtx]=1;
239  DFS(vtx);
240  checked_proc[proc_to_check]=true;
241  n_proc++;
242  }
243  }
244  checked_vtx.clear();
245 
246  DBGMSG("Connectivity of subgraphs is OK.\n");
247  return (true);
248 }
249 
250 /**
251  * Recursive part of Deep First Search algorithm of the connectivity check.
252  */
253 void SparseGraph::DFS(int vtx)
254 {
255  ASSERT( vtx>=0 && vtx< (int) vtx_distr.size(),"Invalid entry vertex %d in DFS.\n",vtx);
256  int neighbour;
257  for(int i_neigh=rows[vtx]; i_neigh< rows[vtx+1];i_neigh++) {
258  neighbour = adj[i_neigh];
259  if (part_to_check[neighbour] == proc_to_check && checked_vtx[neighbour] == 0) {
260  checked_vtx[neighbour]=1;
261  DFS(neighbour);
262  }
263  }
264  checked_vtx[vtx]=2;
265 }
266 
267 
268 
270 {
271  ASSERT( adj,"Can not view non finalized graph.\n");
272  int row,col;
273  xprintf(Msg,"SparseGraph\n");
274  for(row=0; row < (int) vtx_distr.lsize(); row++) {
275  xprintf(Msg,"edges from this vertex: %d\n",rows[row+1]);
276  for(col=rows[row]; col<rows[row+1]; col++) {
277  xprintf(Msg,"edge (v1, v2): %d %d\n",row+vtx_distr.begin(), adj[col]);
278  }
279  }
280 }
281 
282 
284 {
285  ASSERT( rows && adj, "Graph is not yet finalized.");
286 
287  int loc_row, row, row_pos;
288  int col_pos,col,loc_col;
289 
290  for(loc_row=0;loc_row< (int) vtx_distr.lsize();loc_row++) {
291  row=loc_row+vtx_distr.begin();
292  for(row_pos=rows[loc_row];row_pos<rows[loc_row+1];row_pos++) {
293  col=adj[row_pos];
294  if (vtx_distr.is_local(col)) {
295  // find the local column
296  loc_col=col-vtx_distr.begin();
297  for(col_pos=rows[loc_col]; col_pos<rows[loc_col+1]; col_pos++)
298  if (adj[col_pos]==row) break;
299  if (adj[col_pos]!=row) return false;
300  }
301  }
302  }
303 
304  return true;
305 }
306 
307 
308 /**
309  * Free all non-null pointers.
310  */
312 {
313  delete[] vtx_XYZ;
314  delete[] vtx_weights;
315 }
316 
317 /*!
318  * @brief Output a sparse graph.
319  * @return Changed ostream.
320  */
321 ostream & operator <<(ostream & out, const SparseGraph &fcall)
322 {
323  // TODO
324  return out; //dodelat...
325 }
326 
327 
328 /*****************************************************************************************
329  SPARSE GRAPH PETSC
330  *****************************************************************************************/
331 
332 void SparseGraphPETSC::allocate_sparse_graph(int lsize_vtxs, int lsize_adj)
333 {
334  PetscMalloc(lsize_vtxs*sizeof(int), &rows);
335  PetscMalloc(lsize_adj*sizeof(int),&adj);
336  PetscMalloc(lsize_adj*sizeof(int),&adj_weights);
337 }
338 
339 
340 void SparseGraphPETSC::partition(int *loc_part)
341 {
342  ASSERT( adj && rows,"Can not make partition of non finalized graph.\n");
343 
344  MatCreateMPIAdj(vtx_distr.get_comm(), vtx_distr.lsize(),vtx_distr.size(),
346  MatPartitioningCreate(vtx_distr.get_comm(),& petsc_part);
347  MatPartitioningSetAdjacency(petsc_part, petsc_adj_mat);
348  MatPartitioningSetFromOptions(petsc_part);
349  MatPartitioningApply(petsc_part,&part_IS);
350 
351  // copy the partition array
352  const PetscInt *is_array;
353  ISGetIndices(part_IS, &is_array);
354  memcpy(loc_part, is_array, vtx_distr.lsize()*sizeof(int));
355  ISRestoreIndices(part_IS, &is_array);
356 }
357 
358 
360 {
361  ISDestroy(&part_IS);
362  MatPartitioningDestroy(&petsc_part);
363  MatDestroy(&petsc_adj_mat);
364 
365  if (adj) PetscFree(adj);
366  if (rows) PetscFree(rows);
367  if (adj_weights) PetscFree(adj_weights);
368 
369  // automaticaly calls base class destructor
370 }
371 
372 
373 /*****************************************************************************************
374  SPARSE GRAPH METIS
375  *****************************************************************************************/
376 
377 
378 void SparseGraphMETIS::allocate_sparse_graph(int lsize_vtxs, int lsize_adj)
379 {
380  rows = new int[lsize_vtxs];
381  adj = new int[lsize_adj];
382  adj_weights = new int [lsize_adj];
383 }
384 
386 {
388  "METIS could be used only with localized distribution.\n");
389 
390  if (vtx_distr.np()==1) {
391  for(unsigned int i=0;i<vtx_distr.size();i++) part[i]=0;
392  return;
393  } else {
394  if (vtx_distr.myp()==0) {
395  int n_vtx=vtx_distr.size();
396  int n_proc=vtx_distr.np();
397  int num_flag=0; // indexing style (0-C, 1-Fortran)
398  int edgecut;
399 
400 /***********************************************************************************
401  * SETTING OPTIONS
402  */
403 #if (METIS_VER_MAJOR >= 5)
404  if ( sizeof(idx_t) != sizeof(int) ) {
405  printf("ERROR in GRAPH_DIVIDE_C: Wrong type of integers for METIS.\n");
406  abort();
407  }
408  int ncon = 1;
409  real_t ubvec[1];
410  ubvec[0] = 1.001;
411  int options[METIS_NOPTIONS];
412 
413  for (unsigned int i = 0;i < METIS_NOPTIONS;i++) options[i] = -1;
414 
415  options[METIS_OPTION_OBJTYPE] = METIS_OBJTYPE_CUT;
416  options[METIS_OPTION_CTYPE] = METIS_CTYPE_RM;
417  options[METIS_OPTION_IPTYPE] = METIS_IPTYPE_GROW;
418  options[METIS_OPTION_RTYPE] = METIS_RTYPE_GREEDY;
419  options[METIS_OPTION_NCUTS] = 1;
420  options[METIS_OPTION_NSEPS] = 1;
421  options[METIS_OPTION_NUMBERING] = num_flag;
422  options[METIS_OPTION_NITER] = 10;
423  options[METIS_OPTION_SEED] = 12345;
424  options[METIS_OPTION_MINCONN] = 1;
425  options[METIS_OPTION_CONTIG] = 0;
426  options[METIS_OPTION_COMPRESS] = 0;
427  options[METIS_OPTION_CCORDER] = 0;
428  options[METIS_OPTION_UFACTOR] = 0;
429  /*options[METIS_OPTION_DBGLVL] = METIS_DBG_INFO;*/
430  options[METIS_OPTION_DBGLVL] = 0;
431 #else
432  /* weights */
433  int wgtflag=3;
434  int options[5];
435  for (unsigned int i = 0; i < 5; i++ ) options[i] = 0;
436 
437 #endif
438 
439 
440  /* Initialize parts */
441  for (unsigned int i = 0; i < vtx_distr.lsize(); i++ ) part[i] = num_flag;
442 
443 /***********************************************************************************
444  * CALL METIS using optimal algorithm depending on the number of partitions
445  */
446 
447  if (n_proc <= 8) {
448 
449 #if (METIS_VER_MAJOR >= 5)
450  options[METIS_OPTION_PTYPE] = METIS_PTYPE_RB;
451  options[METIS_OPTION_UFACTOR] = 1;
452  METIS_PartGraphRecursive(&n_vtx, &ncon, rows, adj,
453  vtx_weights, NULL, adj_weights, &n_proc, NULL,
454  ubvec, options, &edgecut,part);
455 #else
456  // has to be checked
457  METIS_PartGraphRecursive(&n_vtx, rows, adj,
458  vtx_weights, adj_weights, &wgtflag, &num_flag,
459  &n_proc, options, &edgecut, part);
460 #endif
461  } else {
462 
463 #if (METIS_VER_MAJOR >= 5)
464  options[METIS_OPTION_PTYPE] = METIS_PTYPE_KWAY;
465  options[METIS_OPTION_UFACTOR] = 30;
466  METIS_PartGraphKway(&n_vtx, &ncon, rows, adj,
467  vtx_weights, NULL, adj_weights, &n_proc, NULL,
468  ubvec, options, &edgecut, part);
469 #else
470  METIS_PartGraphKway(&n_vtx,rows,adj, //vtx distr, local vtx begins, edges of local vtxs
471  vtx_weights,adj_weights,&wgtflag,&num_flag, // vertex, edge weights, ...
472  &n_proc,options,&edgecut,part);
473 #endif
474  }
475  }
476  }
477 }
478 
480 {
481  if (adj) delete[](adj);
482  if (rows) delete[](rows);
483  if (adj_weights) delete[](adj_weights);
484 
485  // automaticaly calls base class destructor
486 }
unsigned int size() const
get global size
MatPartitioning petsc_part
unsigned int get_proc(unsigned int idx) const
get processor of the given index
ostream & operator<<(ostream &out, const SparseGraph &fcall)
Output a sparse graph.
#define DBGMSG(...)
Definition: global_defs.h:196
Definition: system.hh:72
int MPI_Comm
Definition: mpi.h:141
bool check_subgraph_connectivity(int *part)
SparseGraph(const Distribution &distr)
Definition: sparse_graph.cc:50
void DFS(int vtx)
void set_vtx_position(const int vtx, const float xyz[3], int weight=1)
Definition: sparse_graph.cc:89
virtual ~SparseGraphPETSC()
int * part_to_check
created partitioning used through check of connectivity
Global macros to enhance readability and debugging, general constants.
int weight
Edge weights for communication (optional).
#define ASSERT(...)
Definition: global_defs.h:121
bool is_local(unsigned int idx) const
identify local index
Definition: system.hh:72
#define MPI_Alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm)
Definition: mpi.h:595
void finalize()
Make sparse graph structures: rows, adj.
Virtual class for construction and partitioning of a distributed sparse graph.
Definition: sparse_graph.hh:66
Distribution vtx_distr
distribution of vertexes
virtual void allocate_sparse_graph(int lsize_vtxs, int lsize_adj)=0
unsigned int begin(int proc) const
get starting local index
#define xprintf(...)
Definition: system.hh:100
vector< stack< Edge > > adj_of_proc
storage for graph edges for individual processors
virtual void partition(int *loc_part)
virtual ~SparseGraph()
int proc_to_check
subgraph to check
unsigned int np() const
get num of processors
std::vector< int > checked_vtx
coloring of DFS algorithm
void * xmalloc(size_t size)
Memory allocation with checking.
Definition: system.cc:209
unsigned int myp() const
get my processor
Support classes for parallel programing.
float * vtx_XYZ
optional vertex coordinates (global array)
bool operator<(const SparseGraph::Edge &a, const SparseGraph::Edge &b)
virtual ~SparseGraphMETIS()
virtual void allocate_sparse_graph(int lsize_vtxs, int lsize_adj)
#define MPI_INT
Definition: mpi.h:160
MPI_Comm get_comm() const
Returns communicator.
int * adj_weights
Distributed sparse graphs, partitioning.
virtual void allocate_sparse_graph(int lsize_vtxs, int lsize_adj)
int * vtx_weights
Vertex weights for computations (optional).
#define xfree(p)
Definition: system.hh:108
int * adj
sparse adjency
bool is_symmetric()
void set_edge(const int a, const int b, int weight=1)
Definition: sparse_graph.cc:79
virtual void partition(int *loc_part)
#define MPI_Alltoallv(sendbuf, sendcounts, sdispls, sendtype, recvbuf, recvcounts, rdispls, recvtype, comm)
Definition: mpi.h:602
unsigned int lsize(int proc) const
get local size