Flow123d
sparse_graph.cc
Go to the documentation of this file.
1 /*!
2  *
3  * Copyright (C) 2007 Technical University of Liberec. All rights reserved.
4  *
5  * Please make a following refer to Flow123d on your project site if you use the program for any purpose,
6  * especially for academic research:
7  * Flow123d, Research Centre: Advanced Remedial Technologies, Technical University of Liberec, Czech Republic
8  *
9  * This program is free software; you can redistribute it and/or modify it under the terms
10  * of the GNU General Public License version 3 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
13  * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14  * See the GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License along with this program; if not,
17  * write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 021110-1307, USA.
18  *
19  *
20  * $Id$
21  * $Revision$
22  * $LastChangedBy$
23  * $LastChangedDate$
24  *
25  *
26  * @file
27  * @ingroup la
28  * @brief Construction and partitioning of a sparse graph
29  *
30  */
31 
32 #include "system/global_defs.h"
33 #include "system/system.hh"
34 #include "la/distribution.hh"
35 #include "sparse_graph.hh"
36 
37 #include "boost/lambda/lambda.hpp"
38 #include <algorithm>
39 
40 extern "C" {
41 #include <metis.h>
42 }
43 
44 
45 /*****************************************************************************************
46  SPARSE GRAPH
47  *****************************************************************************************/
48 
49 
51  : vtx_distr(distr),
52  rows(NULL),
53  adj(NULL),
54  adj_weights(NULL),
55  part_to_check(NULL),
56  adj_of_proc( vtx_distr.np() )
57 {
58  F_ENTRY;
59 
60  // positions only of local vertexes
61  vtx_XYZ= new float[vtx_distr.lsize()+1];
62  vtx_weights= new int[vtx_distr.lsize()+1];
63  for(int i=0; i<vtx_distr.lsize(); i++) vtx_weights[i]=1;
64 }
65 
66 
67 
69  : vtx_distr(loc_size, comm),
70  rows(NULL),
71  adj(NULL),
72  adj_weights(NULL),
73  adj_of_proc( vtx_distr.np() )
74 {
75  F_ENTRY;
76 
77  // positions only of local vertexes
78  vtx_XYZ= new float[vtx_distr.lsize()+1];
79  vtx_weights= new int[vtx_distr.lsize()+1];
80  for(int i=0; i<vtx_distr.lsize(); i++) vtx_weights[i]=1;
81 }
82 
83 void SparseGraph::set_edge(const int a, const int b,int weight)
84 {
85  F_ENTRY;
86 
87  Edge e={a,b, weight};
88 
89  adj_of_proc[vtx_distr.get_proc(a)].push(e);
90 }
91 
92 
93 
94 
95 void SparseGraph::set_vtx_position(const int vtx, const float xyz[3],int weight)
96 {
97  ASSERT(vtx_distr.is_local(vtx),"Can not set vertex position for nonlocal vertex %d.\n",vtx);
98  int loc_index=vtx-vtx_distr.begin();
99  memcpy(vtx_XYZ+3*loc_index, xyz, 3*sizeof(float));
100  vtx_weights[loc_index]=weight;
101 }
102 
103 
104 /**
105  * Edge comparison. For lexical sorting of local edges.
106  */
107 
109 {
110  return (a.from < b.from ||
111  ( a.from == b.from && a.to < b.to));
112 }
113 
114 
115 // TODO: split into smaller functions
116 // check trivial case : Localized distribution
118 {
119  F_ENTRY;
120  ASSERT( adj==NULL, "Graph is already finalized\n");
121 
122  int proc;
123  int total_size;
124  vector< stack<Edge> >::iterator s;
125  unsigned int edge_size=3; // 3 = number of integers in Edge to send
126 
127  /////////////////////////////////////
128  // communicate inserted edges
129 
130  // unroll data in adj_of_proc into send buffer
131  int * sdispls = (int *) xmalloc( vtx_distr.np() * sizeof(int) );
132  int * scounts = (int *) xmalloc( vtx_distr.np() * sizeof(int) );
133 
134  total_size=0;
135  for(proc=0, s=adj_of_proc.begin(); s!=adj_of_proc.end();++s, ++proc)
136  {
137  sdispls[proc] = total_size;
138  scounts[proc] = edge_size * (s)->size();
139  total_size += edge_size * ((s)->size)();
140  }
141  int * sendbuf = (int *) xmalloc( (total_size+1) * sizeof(int ) );
142 
143  Edge edge;
144  int buf_pos=0;
145  for(proc=0, s = adj_of_proc.begin(); s!=adj_of_proc.end(); ++s, ++proc)
146  {
147  ASSERT(sdispls[proc] == buf_pos,
148  "Mismatch between displacement %d and buffer position %d. \n", sdispls[proc], buf_pos );
149  while ( ! (s)->empty() ) {
150  edge=(s)->top();
151  sendbuf[buf_pos++]=edge.from;
152  sendbuf[buf_pos++]=edge.to;
153  sendbuf[buf_pos++]=edge.weight;
154  (s)->pop();
155  }
156  }
157 
158  // communicate send sizes
159  int * rcounts = (int *) xmalloc( vtx_distr.np() * sizeof(int) );
160  int * rdispls = (int *) xmalloc( vtx_distr.np() * sizeof(int) );
161  MPI_Alltoall( scounts, 1, MPI_INT, rcounts, 1, MPI_INT, vtx_distr.get_comm());
162 
163  // prepare receiving buffers
164  total_size=0;
165  for(proc=0; proc<vtx_distr.np(); proc++)
166  {
167  rdispls[proc] = total_size;
168  total_size += rcounts[proc];
169  }
170 
171  int * recvbuf = (int *) xmalloc( (total_size+1) * sizeof(int ) );
172 
173  MPI_Alltoallv (
174  sendbuf, scounts, sdispls, MPI_INT,
175  recvbuf, rcounts, rdispls, MPI_INT,
176  vtx_distr.get_comm() );
177 
178  xfree(sendbuf);
179  xfree(scounts);
180  xfree(sdispls);
181  xfree(rcounts);
182  xfree(rdispls);
183 
184  /////////////////////////////////////
185  // construct local adj and rows arrays
186  //DBGMSG("construct adj\n");
187  Edge *edges= (Edge *) recvbuf;
188  int size=total_size/edge_size;
189  std::sort(edges, edges + size);
190 
191  allocate_sparse_graph(vtx_distr.lsize() + 1, size+1);
192 /* adj = (int *) xmalloc( (size+1) * sizeof(int) );
193  rows = (int *) xmalloc( (vtx_distr.lsize() + 1) * sizeof(int) );
194  adj_weights = (int *) xmalloc( (size+1) * sizeof(int) );
195 */
196  rows[0]=0;
197 
198  if (size != 0) {
199 
200  int i;
201  int row=0;
202  int i_adj=0;
203  int loc_from;
204  Edge unknown_edge={-1,-1,0};
205  Edge *last_edge=&unknown_edge;
206 
207  for(i=0;i<size;i++) {
208  if (! (*last_edge < edges[i]) ) continue; // skip equivalent edges
209  last_edge=edges+i;
210 
211  ASSERT(vtx_distr.is_local(edges[i].from),
212  "Received non-local edge: %d %d at position %d\n",edges[i].from, edges[i].to,i);
213 
214  loc_from=edges[i].from-vtx_distr.begin();
215  ASSERT( row <= loc_from, "Decrease in sorted edges at %d\n",i);
216 
217  while ( row < loc_from ) rows[++row]=i;
218  adj[i_adj]=edges[i].to;
219  adj_weights[i_adj]=edges[i].weight;
220  i_adj++;
221  }
222  rows[++row]=i; // i==size (of adj array)
223 
224  }
225 
226  xfree(recvbuf);
227 }
228 
229 
230 /**
231  * Check if the subgraphs of the partitions are connected.
232  */
234 {
235  F_ENTRY;
236 
237  ASSERT( vtx_distr.lsize(0)==vtx_distr.size() , "Check of graph continuity not yet implemented for paralel case.\n");
238  if (vtx_distr.myp()!=0) return(true);
239 
240  part_to_check=part;
241  checked_vtx.resize(vtx_distr.size(), 0);
242  std::vector<bool> checked_proc(vtx_distr.np(), false);
243 
244  int n_proc=0;
245  for(unsigned int vtx=0; n_proc<vtx_distr.np() && vtx<vtx_distr.size(); vtx++) {
246  if (checked_vtx[vtx] != 2) {
248  // check if the processor is still unvisited
249  if ( checked_proc[proc_to_check] )
250  xprintf(Warn, "Disconnected subgraph %d detected at vertex %d.\n",part_to_check[vtx],vtx);
251  // DFS unvisited vertex
252  checked_vtx[vtx]=1;
253  DFS(vtx);
254  checked_proc[proc_to_check]=true;
255  n_proc++;
256  }
257  }
258  checked_vtx.clear();
259 
260  DBGMSG("Connectivity of subgraphs is OK.\n");
261  return (true);
262 }
263 
264 /**
265  * Recursive part of Deep First Search algorithm of the connectivity check.
266  */
267 void SparseGraph::DFS(int vtx)
268 {
269  ASSERT( vtx>=0 && vtx< (int) vtx_distr.size(),"Invalid entry vertex %d in DFS.\n",vtx);
270  int neighbour;
271  for(int i_neigh=rows[vtx]; i_neigh< rows[vtx+1];i_neigh++) {
272  neighbour = adj[i_neigh];
273  if (part_to_check[neighbour] == proc_to_check && checked_vtx[neighbour] == 0) {
274  checked_vtx[neighbour]=1;
275  DFS(neighbour);
276  }
277  }
278  checked_vtx[vtx]=2;
279 }
280 
281 
282 
284 {
285  ASSERT( adj,"Can not view non finalized graph.\n");
286  int row,col;
287  xprintf(Msg,"SparseGraph\n");
288  for(row=0; row < (int) vtx_distr.lsize(); row++) {
289  xprintf(Msg,"edges from this vertex: %d\n",rows[row+1]);
290  for(col=rows[row]; col<rows[row+1]; col++) {
291  xprintf(Msg,"edge (v1, v2): %d %d\n",row+vtx_distr.begin(), adj[col]);
292  }
293  }
294 }
295 
296 
298 {
299  ASSERT( rows && adj, "Graph is not yet finalized.");
300 
301  int loc_row, row, row_pos;
302  int col_pos,col,loc_col;
303 
304  for(loc_row=0;loc_row< (int) vtx_distr.lsize();loc_row++) {
305  row=loc_row+vtx_distr.begin();
306  for(row_pos=rows[loc_row];row_pos<rows[loc_row+1];row_pos++) {
307  col=adj[row_pos];
308  if (vtx_distr.is_local(col)) {
309  // find the local column
310  loc_col=col-vtx_distr.begin();
311  for(col_pos=rows[loc_col]; col_pos<rows[loc_col+1]; col_pos++)
312  if (adj[col_pos]==row) break;
313  if (adj[col_pos]!=row) return false;
314  }
315  }
316  }
317 
318  return true;
319 }
320 
321 
322 /**
323  * Free all non-null pointers.
324  */
326 {
327  delete[] vtx_XYZ;
328  delete[] vtx_weights;
329 }
330 
331 /*!
332  * @brief Output a sparse graph.
333  * @return Changed ostream.
334  */
335 ostream & operator <<(ostream & out, const SparseGraph &fcall)
336 {
337  // TODO
338  return out; //dodelat...
339 }
340 
341 
342 /*****************************************************************************************
343  SPARSE GRAPH PETSC
344  *****************************************************************************************/
345 
346 void SparseGraphPETSC::allocate_sparse_graph(int lsize_vtxs, int lsize_adj)
347 {
348  PetscMalloc(lsize_vtxs*sizeof(int), &rows);
349  PetscMalloc(lsize_adj*sizeof(int),&adj);
350  PetscMalloc(lsize_adj*sizeof(int),&adj_weights);
351 }
352 
353 
354 void SparseGraphPETSC::partition(int *loc_part)
355 {
356  ASSERT( adj && rows,"Can not make partition of non finalized graph.\n");
357 
358  MatCreateMPIAdj(vtx_distr.get_comm(), vtx_distr.lsize(),vtx_distr.size(),
360  MatPartitioningCreate(vtx_distr.get_comm(),& petsc_part);
361  MatPartitioningSetAdjacency(petsc_part, petsc_adj_mat);
362  MatPartitioningSetFromOptions(petsc_part);
363  MatPartitioningApply(petsc_part,&part_IS);
364 
365  // copy the partition array
366  const PetscInt *is_array;
367  ISGetIndices(part_IS, &is_array);
368  memcpy(loc_part, is_array, vtx_distr.lsize()*sizeof(int));
369  ISRestoreIndices(part_IS, &is_array);
370 }
371 
372 
374 {
375  ISDestroy(&part_IS);
376  MatPartitioningDestroy(&petsc_part);
377  MatDestroy(&petsc_adj_mat);
378 
379  if (adj) PetscFree(adj);
380  if (rows) PetscFree(rows);
381  if (adj_weights) PetscFree(adj_weights);
382 
383  // automaticaly calls base class destructor
384 }
385 
386 
387 /*****************************************************************************************
388  SPARSE GRAPH METIS
389  *****************************************************************************************/
390 
391 
392 void SparseGraphMETIS::allocate_sparse_graph(int lsize_vtxs, int lsize_adj)
393 {
394  rows = new int[lsize_vtxs];
395  adj = new int[lsize_adj];
396  adj_weights = new int [lsize_adj];
397 }
398 
400 {
401  F_ENTRY;
402 
404  "METIS could be used only with localized distribution.\n");
405 
406  if (vtx_distr.np()==1) {
407  for(unsigned int i=0;i<vtx_distr.size();i++) part[i]=0;
408  return;
409  } else {
410  if (vtx_distr.myp()==0) {
411  int n_vtx=vtx_distr.size();
412  int n_proc=vtx_distr.np();
413  int num_flag=0; // indexing style (0-C, 1-Fortran)
414  int edgecut;
415 
416 /***********************************************************************************
417  * SETTING OPTIONS
418  */
419 #if (METIS_VER_MAJOR >= 5)
420  if ( sizeof(idx_t) != sizeof(int) ) {
421  printf("ERROR in GRAPH_DIVIDE_C: Wrong type of integers for METIS.\n");
422  abort();
423  }
424  /*printf(" METIS >=5.0 recognized.\n");*/
425  int ncon = 1;
426  real_t ubvec[1];
427  ubvec[0] = 1.001;
428  /*int *options = NULL;*/
429  int options[METIS_NOPTIONS];
430 
431  for (unsigned int i = 0;i < METIS_NOPTIONS;i++) options[i] = -1;
432 
433  options[METIS_OPTION_OBJTYPE] = METIS_OBJTYPE_CUT;
434  options[METIS_OPTION_CTYPE] = METIS_CTYPE_RM;
435  options[METIS_OPTION_IPTYPE] = METIS_IPTYPE_GROW;
436  options[METIS_OPTION_RTYPE] = METIS_RTYPE_GREEDY;
437  options[METIS_OPTION_NCUTS] = 1;
438  options[METIS_OPTION_NSEPS] = 1;
439  options[METIS_OPTION_NUMBERING] = num_flag;
440  options[METIS_OPTION_NITER] = 10;
441  options[METIS_OPTION_SEED] = 12345;
442  options[METIS_OPTION_MINCONN] = 1;
443  options[METIS_OPTION_CONTIG] = 0;
444  options[METIS_OPTION_COMPRESS] = 0;
445  options[METIS_OPTION_CCORDER] = 0;
446  options[METIS_OPTION_UFACTOR] = 0;
447  /*options[METIS_OPTION_DBGLVL] = METIS_DBG_INFO;*/
448  options[METIS_OPTION_DBGLVL] = 0;
449 #else
450  /*printf(" METIS < 5.0 recognized.\n");*/
451  /* weights */
452  int wgtflag=3;
453  int options[5];
454  for (unsigned int i = 0; i < 5; i++ ) options[i] = 0;
455 
456  // options[0]=0;
457  // options[4]=255; //dbg_lvl
458 
459 
460 #endif
461 
462 
463  /* Initialize parts */
464  for (unsigned int i = 0; i < vtx_distr.lsize(); i++ ) part[i] = num_flag;
465 
466 /***********************************************************************************
467  * CALL METIS using optimal algorithm depending on the number of partitions
468  */
469 
470  if (n_proc <= 8) {
471 
472 #if (METIS_VER_MAJOR >= 5)
473  options[METIS_OPTION_PTYPE] = METIS_PTYPE_RB;
474  options[METIS_OPTION_UFACTOR] = 1;
475  METIS_PartGraphRecursive(&n_vtx, &ncon, rows, adj,
476  vtx_weights, NULL, adj_weights, &n_proc, NULL,
477  ubvec, options, &edgecut,part);
478 #else
479  // has to be checked
480  METIS_PartGraphRecursive(&n_vtx, rows, adj,
481  vtx_weights, adj_weights, &wgtflag, &num_flag,
482  &n_proc, options, &edgecut, part);
483 #endif
484  } else {
485 
486 #if (METIS_VER_MAJOR >= 5)
487  options[METIS_OPTION_PTYPE] = METIS_PTYPE_KWAY;
488  options[METIS_OPTION_UFACTOR] = 30;
489  METIS_PartGraphKway(&n_vtx, &ncon, rows, adj,
490  vtx_weights, NULL, adj_weights, &n_proc, NULL,
491  ubvec, options, &edgecut, part);
492 #else
493  METIS_PartGraphKway(&n_vtx,rows,adj, //vtx distr, local vtx begins, edges of local vtxs
494  vtx_weights,adj_weights,&wgtflag,&num_flag, // vertex, edge weights, ...
495  &n_proc,options,&edgecut,part);
496 #endif
497  }
498  }
499  }
500 }
501 
503 {
504  if (adj) delete[](adj);
505  if (rows) delete[](rows);
506  if (adj_weights) delete[](adj_weights);
507 
508  // automaticaly calls base class destructor
509 }