Flow123d  jenkins-Flow123d-linux-release-multijob-282
sparse_graph.hh
Go to the documentation of this file.
1 /*!
2  *
3  * Copyright (C) 2007 Technical University of Liberec. All rights reserved.
4  *
5  * Please make a following refer to Flow123d on your project site if you use the program for any purpose,
6  * especially for academic research:
7  * Flow123d, Research Centre: Advanced Remedial Technologies, Technical University of Liberec, Czech Republic
8  *
9  * This program is free software; you can redistribute it and/or modify it under the terms
10  * of the GNU General Public License version 3 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
13  * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14  * See the GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License along with this program; if not,
17  * write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 021110-1307, USA.
18  *
19  *
20  * $Id$
21  * $Revision$
22  * $LastChangedBy$
23  * $LastChangedDate$
24  *
25  * @file
26  * @brief Distributed sparse graphs, partitioning.
27  *
28  *
29  */
30 #ifndef SPARSE_GRAPH_HH_
31 #define SPARSE_GRAPH_HH_
32 
33 
34 #include <vector>
35 #include <stack>
36 #include <ostream>
37 #include <petscmat.h>
38 
39 #include "la/distribution.hh"
40 using namespace std;
41 
42 
43 
44 
45 
46 /**
47  * @brief Virtual class for construction and partitioning of a distributed sparse graph.
48  *
49  * An empty graph is created in constructor (collective). You has to provide either Distribution of the vertices or
50  * local number of vertices and communicator. Than you can add edges with set_edge method. Added edges are
51  * stored in the buffer on calling processor. Than you have to call finalize method (collective) to make sparse graph.
52  * Graph partitioning is either directly through METIS or through PETSC (ParMETIS, and others). There is derived class for
53  * particular partitioning method.
54  *
55  * Actual implementation is not fully scalable. row sizes (vtx degree)
56  * are accumulated in global arrays, then communicated to all processors and finally
57  * only local part is used. There should be only local part of the final array with aditional buffer for ex-processor
58  * values.
59  *
60  * Other issue is that we hav eproblem using Parmetis so acctualy only METIS can be used on
61  * a parallel graph located on the first processor.
62  *
63  * TODO: use Boost parallel graphs and write interface form them to METIS, Parmetis or PETSC
64  *
65  */
66 class SparseGraph {
67 public:
68 
69  /**
70  * Construct an empty graph form given Distribution of vertices.
71  */
72  SparseGraph(const Distribution &distr);
73 
74  /**
75  * Construct an empty graph for given number of local vertices of the graph.
76  * Make its own distribution object.
77  */
78  SparseGraph(int loc_size, MPI_Comm comm);
79 
80  /**
81  * Store an edge. We just store all inserted edges and count support arrays to
82  * communicate edges to the right processors
83  *
84  * @param[in] a - starting vertex of the edge
85  * @param[in] b - terminal vertex of the edge
86  * @param[in] weight - optional weight of the edge (integer)
87  *
88  */
89  void set_edge(const int a, const int b, int weight=1);
90 
91  /**
92  * Set position and possibly weight of a local vertex. Assume that vtx is an index of local vertex.
93  * Positions are used for initial distribution when using ParMETIS.
94  *
95  * @param[in] vtx - global vertex index (from zero)
96  * @param[in] xyz - coordinates of vetrex position
97  * @param[in] weight - optional weight of the vertex
98  *
99  */
100  void set_vtx_position(const int vtx, const float xyz[3], int weight=1);
101 
102  /**
103  * @brief Make sparse graph structures: rows, adj
104  *
105  * 1) send edges to the owner of .from vertex
106  * 2) sort local edges
107  * 3) fill rows, adj; remove duplicities
108  */
109  //Assume that vtx is an index of local vertex.
110  void finalize();
111 
112  /**
113  * Fills an array of integers by the partitioning of the vertices.
114  * The array size is number of local verices according to the distribution of the graph.
115  * see get_distr() method. The array has to be PREALLOCATED by user to the correct size.
116  *
117  */
118  virtual void partition(int *loc_part) = 0;
119 
120  /**
121  * Check if the subgraphs of the given partitioning are connected.
122  * Works only for fully localized graph (on one processor).
123  */
124  bool check_subgraph_connectivity(int *part);
125  void DFS(int vtx);
126 
127  /**
128  * Returns reference to the distribution of the graph.
129  */
130  Distribution get_distr() {return vtx_distr;}
131 
132  /**
133  * Simple graph output. Has to be already finalized.
134  */
135  void view();
136 
137  /**
138  * Check symmetry of the local part of the graph. Has to be already finalized.
139  */
140  bool is_symmetric();
141  virtual ~SparseGraph();
142 
143  //friend ostream & operator <<(ostream & out, const SparseGraph &sg);
144 protected:
145  // Auxiliary Edge type.
146  typedef struct {
147  int from, to, weight; ///< Edge weights for communication (optional).
148  } Edge;
149 
150 
151  Distribution vtx_distr; ///< distribution of vertexes
152  int *rows; ///< starts of vtx rows in adj array,
153  ///< vtx i have edge to vertexes adj[rows[i]] .. adj[rows[i+1]-1]
154  float * vtx_XYZ; ///< optional vertex coordinates (global array)
155  int *vtx_weights; ///< Vertex weights for computations (optional).
156  int *adj; ///< sparse adjency
158 
159  int *part_to_check; ///< created partitioning used through check of connectivity
160  int proc_to_check; ///< subgraph to check
161  std::vector<int> checked_vtx; ///< coloring of DFS algorithm
162 
163  // temporary arrays
164  vector< stack<Edge> > adj_of_proc; ///< storage for graph edges for individual processors
165 
166  /**
167  * Use virtual method for allocation since for PETSC interface we have to use PetscMalloc.
168  */
169  virtual void allocate_sparse_graph(int lsize_vtxs, int lsize_adj)=0;
170 
171  friend bool operator <(const Edge &a,const Edge &b);
172 
173 };
174 
175 ostream & operator <<(ostream & out, const SparseGraph &sg);
176 
177 
178 
179 /**
180  * Sparse Graph that use PETSC for partitioning.
181  */
183 {
184 
185 
186 public:
187  /**
188  * Construct empty graph only from number of vertices, use default initial distribution.
189  */
190  SparseGraphPETSC(int n_vtxs, MPI_Comm comm)
191  : SparseGraph(Distribution(DistributionBlock(), n_vtxs,comm)), petsc_adj_mat(0), petsc_part(0), part_IS(0) {}
192 
193  /**
194  * Construct empty graph with given distribution of vertices.
195  */
197  : SparseGraph(distr), petsc_adj_mat(0), petsc_part(0), part_IS(0) {}
198  /**
199  * Implementation of partitioning using PETSC interface to various partitioning software.
200  */
201  virtual void partition(int *loc_part);
202 
203 private:
205  MatPartitioning petsc_part;
207 
208  virtual void allocate_sparse_graph(int lsize_vtxs, int lsize_adj);
209  virtual ~SparseGraphPETSC();
210 
211 };
212 
213 
214 /**
215  * Sparse Graph that use METIS for partitioning.
216  */
218 {
219 public:
220  /**
221  * Construct empty graph only from number of vertices,
222  * use localized distribution in order to use sequential METIS library.
223  */
224  SparseGraphMETIS(int n_vtxs, MPI_Comm comm)
225  : SparseGraph(Distribution(DistributionLocalized(), n_vtxs,comm)) {}
226 
227  /**
228  * Construct empty graph with given distribution of vertices.
229  */
231  : SparseGraph(distr) {}
232 
233  /**
234  * Implementation of partitioning using METIS.
235  */
236  virtual void partition(int *loc_part);
237 
238 private:
239  virtual void allocate_sparse_graph(int lsize_vtxs, int lsize_adj);
240  virtual ~SparseGraphMETIS();
241 };
242 
243 
244 #endif /* SPARSE_GRAPH_HH_ */
MatPartitioning petsc_part
SparseGraphMETIS(int n_vtxs, MPI_Comm comm)
int MPI_Comm
Definition: mpi.h:141
SparseGraphPETSC(int n_vtxs, MPI_Comm comm)
int * part_to_check
created partitioning used through check of connectivity
int weight
Edge weights for communication (optional).
Virtual class for construction and partitioning of a distributed sparse graph.
Definition: sparse_graph.hh:66
Distribution vtx_distr
distribution of vertexes
vector< stack< Edge > > adj_of_proc
storage for graph edges for individual processors
int proc_to_check
subgraph to check
std::vector< int > checked_vtx
coloring of DFS algorithm
Distribution get_distr()
STREAM & operator<<(STREAM &s, UpdateFlags u)
Support classes for parallel programing.
float * vtx_XYZ
optional vertex coordinates (global array)
bool operator<(const SparseGraph::Edge &a, const SparseGraph::Edge &b)
SparseGraphMETIS(const Distribution &distr)
SparseGraphPETSC(const Distribution &distr)
int * adj_weights
int * vtx_weights
Vertex weights for computations (optional).
int * adj
sparse adjency