37 #include "boost/lambda/lambda.hpp"
56 adj_of_proc( vtx_distr.np() )
67 : vtx_distr(loc_size, comm),
71 adj_of_proc( vtx_distr.np() )
93 memcpy(
vtx_XYZ+3*loc_index, xyz, 3*
sizeof(
float));
113 ASSERT(
adj==NULL,
"Graph is already finalized\n");
118 unsigned int edge_size=3;
130 sdispls[proc] = total_size;
131 scounts[proc] = edge_size * (s)->size();
132 total_size += edge_size * ((s)->size)();
134 int * sendbuf = (
int *)
xmalloc( (total_size+1) *
sizeof(int ) );
140 ASSERT(sdispls[proc] == buf_pos,
141 "Mismatch between displacement %d and buffer position %d. \n", sdispls[proc], buf_pos );
142 while ( ! (s)->empty() ) {
144 sendbuf[buf_pos++]=edge.
from;
145 sendbuf[buf_pos++]=edge.
to;
146 sendbuf[buf_pos++]=edge.
weight;
160 rdispls[proc] = total_size;
161 total_size += rcounts[proc];
164 int * recvbuf = (
int *)
xmalloc( (total_size+1) *
sizeof(int ) );
167 sendbuf, scounts, sdispls,
MPI_INT,
168 recvbuf, rcounts, rdispls,
MPI_INT,
180 int size=total_size/edge_size;
181 std::sort(edges, edges + size);
192 Edge unknown_edge={-1,-1,0};
193 Edge *last_edge=&unknown_edge;
195 for(i=0;i<size;i++) {
196 if (! (*last_edge < edges[i]) )
continue;
200 "Received non-local edge: %d %d at position %d\n",edges[i].
from, edges[i].
to,i);
203 ASSERT( row <= loc_from,
"Decrease in sorted edges at %d\n",i);
205 while ( row < loc_from ) rows[++row]=i;
206 adj[i_adj]=edges[i].
to;
230 unsigned int n_proc=0;
246 DBGMSG(
"Connectivity of subgraphs is OK.\n");
257 for(
int i_neigh=
rows[vtx]; i_neigh<
rows[vtx+1];i_neigh++) {
258 neighbour =
adj[i_neigh];
271 ASSERT(
adj,
"Can not view non finalized graph.\n");
276 for(col=
rows[row]; col<
rows[row+1]; col++) {
287 int loc_row, row, row_pos;
288 int col_pos,col,loc_col;
292 for(row_pos=
rows[loc_row];row_pos<
rows[loc_row+1];row_pos++) {
297 for(col_pos=rows[loc_col]; col_pos<rows[loc_col+1]; col_pos++)
298 if (
adj[col_pos]==row)
break;
299 if (
adj[col_pos]!=row)
return false;
334 PetscMalloc(lsize_vtxs*
sizeof(
int), &
rows);
335 PetscMalloc(lsize_adj*
sizeof(
int),&
adj);
342 ASSERT(
adj &&
rows,
"Can not make partition of non finalized graph.\n");
352 const PetscInt *is_array;
353 ISGetIndices(
part_IS, &is_array);
355 ISRestoreIndices(
part_IS, &is_array);
380 rows =
new int[lsize_vtxs];
381 adj =
new int[lsize_adj];
388 "METIS could be used only with localized distribution.\n");
403 #if (METIS_VER_MAJOR >= 5)
404 if (
sizeof(idx_t) !=
sizeof(
int) ) {
405 printf(
"ERROR in GRAPH_DIVIDE_C: Wrong type of integers for METIS.\n");
411 int options[METIS_NOPTIONS];
413 for (
unsigned int i = 0;i < METIS_NOPTIONS;i++) options[i] = -1;
415 options[METIS_OPTION_OBJTYPE] = METIS_OBJTYPE_CUT;
416 options[METIS_OPTION_CTYPE] = METIS_CTYPE_RM;
417 options[METIS_OPTION_IPTYPE] = METIS_IPTYPE_GROW;
418 options[METIS_OPTION_RTYPE] = METIS_RTYPE_GREEDY;
419 options[METIS_OPTION_NCUTS] = 1;
420 options[METIS_OPTION_NSEPS] = 1;
421 options[METIS_OPTION_NUMBERING] = num_flag;
422 options[METIS_OPTION_NITER] = 10;
423 options[METIS_OPTION_SEED] = 12345;
424 options[METIS_OPTION_MINCONN] = 1;
425 options[METIS_OPTION_CONTIG] = 0;
426 options[METIS_OPTION_COMPRESS] = 0;
427 options[METIS_OPTION_CCORDER] = 0;
428 options[METIS_OPTION_UFACTOR] = 0;
430 options[METIS_OPTION_DBGLVL] = 0;
435 for (
unsigned int i = 0; i < 5; i++ ) options[i] = 0;
441 for (
unsigned int i = 0; i <
vtx_distr.
lsize(); i++ ) part[i] = num_flag;
449 #if (METIS_VER_MAJOR >= 5)
450 options[METIS_OPTION_PTYPE] = METIS_PTYPE_RB;
451 options[METIS_OPTION_UFACTOR] = 1;
452 METIS_PartGraphRecursive(&n_vtx, &ncon,
rows,
adj,
454 ubvec, options, &edgecut,part);
457 METIS_PartGraphRecursive(&n_vtx,
rows,
adj,
459 &n_proc, options, &edgecut, part);
463 #if (METIS_VER_MAJOR >= 5)
464 options[METIS_OPTION_PTYPE] = METIS_PTYPE_KWAY;
465 options[METIS_OPTION_UFACTOR] = 30;
466 METIS_PartGraphKway(&n_vtx, &ncon,
rows,
adj,
468 ubvec, options, &edgecut, part);
470 METIS_PartGraphKway(&n_vtx,
rows,
adj,
472 &n_proc,options,&edgecut,part);
unsigned int size() const
get global size
MatPartitioning petsc_part
unsigned int get_proc(unsigned int idx) const
get processor of the given index
bool check_subgraph_connectivity(int *part)
SparseGraph(const Distribution &distr)
void set_vtx_position(const int vtx, const float xyz[3], int weight=1)
virtual ~SparseGraphPETSC()
int * part_to_check
created partitioning used through check of connectivity
Global macros to enhance readability and debugging, general constants.
int weight
Edge weights for communication (optional).
bool is_local(unsigned int idx) const
identify local index
#define MPI_Alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm)
void finalize()
Make sparse graph structures: rows, adj.
Virtual class for construction and partitioning of a distributed sparse graph.
Distribution vtx_distr
distribution of vertexes
virtual void allocate_sparse_graph(int lsize_vtxs, int lsize_adj)=0
unsigned int begin(int proc) const
get starting local index
vector< stack< Edge > > adj_of_proc
storage for graph edges for individual processors
virtual void partition(int *loc_part)
int proc_to_check
subgraph to check
unsigned int np() const
get num of processors
std::vector< int > checked_vtx
coloring of DFS algorithm
void * xmalloc(size_t size)
Memory allocation with checking.
STREAM & operator<<(STREAM &s, UpdateFlags u)
unsigned int myp() const
get my processor
Support classes for parallel programing.
float * vtx_XYZ
optional vertex coordinates (global array)
bool operator<(const SparseGraph::Edge &a, const SparseGraph::Edge &b)
virtual ~SparseGraphMETIS()
virtual void allocate_sparse_graph(int lsize_vtxs, int lsize_adj)
MPI_Comm get_comm() const
Returns communicator.
Distributed sparse graphs, partitioning.
virtual void allocate_sparse_graph(int lsize_vtxs, int lsize_adj)
int * vtx_weights
Vertex weights for computations (optional).
void set_edge(const int a, const int b, int weight=1)
virtual void partition(int *loc_part)
#define MPI_Alltoallv(sendbuf, sendcounts, sdispls, sendtype, recvbuf, recvcounts, rdispls, recvtype, comm)
unsigned int lsize(int proc) const
get local size