Flow123d  DF_profiler_memory_monitor-6d6a629
sparse_graph.hh
Go to the documentation of this file.
1 /*!
2  *
3  * Copyright (C) 2015 Technical University of Liberec. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it under
6  * the terms of the GNU General Public License version 3 as published by the
7  * Free Software Foundation. (http://www.gnu.org/licenses/gpl-3.0.en.html)
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11  * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
12  *
13  *
14  * @file sparse_graph.hh
15  * @brief Distributed sparse graphs, partitioning.
16  */
17 
18 #ifndef SPARSE_GRAPH_HH_
19 #define SPARSE_GRAPH_HH_
20 
21 
22 #include <vector>
23 #include <stack>
24 #include <ostream>
25 #include <petscmat.h>
26 #include "mpi.h" // for MPI_Comm
27 #include "petscistypes.h" // for IS, _p_IS
28 
29 #include "la/distribution.hh"
30 using namespace std;
31 
32 
33 
34 
35 
36 /**
37  * @brief Virtual class for construction and partitioning of a distributed sparse graph.
38  *
39  * An empty graph is created in constructor (collective). You has to provide either Distribution of the vertices or
40  * local number of vertices and communicator. Than you can add edges with set_edge method. Added edges are
41  * stored in the buffer on calling processor. Than you have to call finalize method (collective) to make sparse graph.
42  * Graph partitioning is either directly through METIS or through PETSC (ParMETIS, and others). There is derived class for
43  * particular partitioning method.
44  *
45  * Actual implementation is not fully scalable. row sizes (vtx degree)
46  * are accumulated in global arrays, then communicated to all processors and finally
47  * only local part is used. There should be only local part of the final array with aditional buffer for ex-processor
48  * values.
49  *
50  * Other issue is that we hav eproblem using Parmetis so acctualy only METIS can be used on
51  * a parallel graph located on the first processor.
52  *
53  * TODO: use Boost parallel graphs and write interface form them to METIS, Parmetis or PETSC
54  *
55  */
56 class SparseGraph {
57 public:
58 
59  /**
60  * Construct an empty graph form given Distribution of vertices.
61  */
62  SparseGraph(const Distribution &distr);
63 
64  /**
65  * Construct an empty graph for given number of local vertices of the graph.
66  * Make its own distribution object.
67  */
68  SparseGraph(int loc_size, MPI_Comm comm);
69 
70  /**
71  * Store an edge. We just store all inserted edges and count support arrays to
72  * communicate edges to the right processors
73  *
74  * @param[in] a - starting vertex of the edge
75  * @param[in] b - terminal vertex of the edge
76  * @param[in] weight - optional weight of the edge (integer)
77  *
78  */
79  void set_edge(const int a, const int b, int weight=1);
80 
81  /**
82  * Set position and possibly weight of a local vertex. Assume that vtx is an index of local vertex.
83  * Positions are used for initial distribution when using ParMETIS.
84  *
85  * @param[in] vtx - global vertex index (from zero)
86  * @param[in] xyz - coordinates of vetrex position
87  * @param[in] weight - optional weight of the vertex
88  *
89  */
90  void set_vtx_position(const int vtx, const float xyz[3], int weight=1);
91 
92  /**
93  * @brief Make sparse graph structures: rows, adj
94  *
95  * 1) send edges to the owner of .from vertex
96  * 2) sort local edges
97  * 3) fill rows, adj; remove duplicities
98  */
99  //Assume that vtx is an index of local vertex.
100  void finalize();
101 
102  /**
103  * Fills an array of integers by the partitioning of the vertices.
104  * The array size is number of local verices according to the distribution of the graph.
105  * see get_distr() method. The array has to be PREALLOCATED by user to the correct size.
106  *
107  */
108  virtual void partition(int *loc_part) = 0;
109 
110  /**
111  * Check if the subgraphs of the given partitioning are connected.
112  * Works only for fully localized graph (on one processor).
113  */
114  bool check_subgraph_connectivity(int *part);
115  void DFS(int vtx);
116 
117  /**
118  * Returns reference to the distribution of the graph.
119  */
120  Distribution get_distr() {return vtx_distr;}
121 
122  /**
123  * Simple graph output. Has to be already finalized.
124  */
125  void view();
126 
127  /**
128  * Check symmetry of the local part of the graph. Has to be already finalized.
129  */
130  bool is_symmetric();
131  virtual ~SparseGraph();
132 
133  //friend ostream & operator <<(ostream & out, const SparseGraph &sg);
134 protected:
135  // Auxiliary Edge type.
136  typedef struct {
137  int from, to, weight; ///< Edge weights for communication (optional).
138  } Edge;
139 
140 
141  Distribution vtx_distr; ///< distribution of vertexes
142  int *rows; ///< starts of vtx rows in adj array,
143  ///< vtx i have edge to vertexes adj[rows[i]] .. adj[rows[i+1]-1]
144  float * vtx_XYZ; ///< optional vertex coordinates (global array)
145  int *vtx_weights; ///< Vertex weights for computations (optional).
146  int *adj; ///< sparse adjency
148 
149  int *part_to_check; ///< created partitioning used through check of connectivity
150  int proc_to_check; ///< subgraph to check
151  std::vector<int> checked_vtx; ///< coloring of DFS algorithm
152 
153  // temporary arrays
154  vector< stack<Edge> > adj_of_proc; ///< storage for graph edges for individual processors
155 
156  /**
157  * Use virtual method for allocation since for PETSC interface we have to use PetscMalloc.
158  */
159  virtual void allocate_sparse_graph(int lsize_vtxs, int lsize_adj)=0;
160 
161  friend bool operator <(const Edge &a,const Edge &b);
162 
163 };
164 
165 ostream & operator <<(ostream & out, const SparseGraph &sg);
166 
167 
168 
169 /**
170  * Sparse Graph that use PETSC for partitioning.
171  */
173 {
174 
175 
176 public:
177  /**
178  * Construct empty graph only from number of vertices, use default initial distribution.
179  */
180  SparseGraphPETSC(int n_vtxs, MPI_Comm comm)
181  : SparseGraph(Distribution(DistributionBlock(), n_vtxs,comm)), petsc_adj_mat(0), petsc_part(0), part_IS(0) {}
182 
183  /**
184  * Construct empty graph with given distribution of vertices.
185  */
187  : SparseGraph(distr), petsc_adj_mat(0), petsc_part(0), part_IS(0) {}
188  /**
189  * Implementation of partitioning using PETSC interface to various partitioning software.
190  */
191  virtual void partition(int *loc_part);
192 
193 private:
195  MatPartitioning petsc_part;
197 
198  virtual void allocate_sparse_graph(int lsize_vtxs, int lsize_adj);
199  virtual ~SparseGraphPETSC();
200 
201 };
202 
203 
204 /**
205  * Sparse Graph that use METIS for partitioning.
206  */
208 {
209 public:
210  /**
211  * Construct empty graph only from number of vertices,
212  * use localized distribution in order to use sequential METIS library.
213  */
214  SparseGraphMETIS(int n_vtxs, MPI_Comm comm)
215  : SparseGraph(Distribution(DistributionLocalized(), n_vtxs,comm)) {}
216 
217  /**
218  * Construct empty graph with given distribution of vertices.
219  */
221  : SparseGraph(distr) {}
222 
223  /**
224  * Implementation of partitioning using METIS.
225  */
226  virtual void partition(int *loc_part);
227 
228 private:
229  virtual void allocate_sparse_graph(int lsize_vtxs, int lsize_adj);
230  virtual ~SparseGraphMETIS();
231 };
232 
233 
234 #endif /* SPARSE_GRAPH_HH_ */
SparseGraphMETIS(const Distribution &distr)
SparseGraphMETIS(int n_vtxs, MPI_Comm comm)
SparseGraphPETSC(const Distribution &distr)
MatPartitioning petsc_part
SparseGraphPETSC(int n_vtxs, MPI_Comm comm)
Virtual class for construction and partitioning of a distributed sparse graph.
Definition: sparse_graph.hh:56
float * vtx_XYZ
optional vertex coordinates (global array)
int proc_to_check
subgraph to check
int * adj_weights
int * adj
sparse adjency
int * vtx_weights
Vertex weights for computations (optional).
virtual void allocate_sparse_graph(int lsize_vtxs, int lsize_adj)=0
Distribution get_distr()
vector< stack< Edge > > adj_of_proc
storage for graph edges for individual processors
Distribution vtx_distr
distribution of vertexes
int * part_to_check
created partitioning used through check of connectivity
virtual void partition(int *loc_part)=0
std::vector< int > checked_vtx
coloring of DFS algorithm
Support classes for parallel programing.
bool operator<(IntersectionResult a, IntersectionResult b)
int MPI_Comm
Definition: mpi.h:141
ostream & operator<<(ostream &out, const SparseGraph &sg)
Output a sparse graph.