Flow123d
release_3.0.0-973-g92f55e826
|
Go to the documentation of this file.
34 "Different algorithms to make the sparse graph with weighted edges\n"
35 "from the multidimensional mesh. Main difference is dealing with \n"
36 "neighboring of elements of different dimension.")
38 .
add_value(
any_weight_lower_dim_cuts,
"any_weight_lower_dim_cuts",
"Same as before and assign higher weight to cuts of lower dimension in order to make them stick to one face.")
44 return IT::Selection(
"PartTool",
"Select the partitioning tool to use.")
45 .
add_value(
PETSc,
"PETSc",
"Use PETSc interface to various partitioning tools.")
51 static IT::Record input_type =
IT::Record(
"Partition",
"Setting for various types of mesh partitioning." )
63 : mesh_(mesh), in_(in), graph_(NULL), loc_part_(NULL), init_el_ds_(NULL)
107 if ( !edistr.
is_local( ele.idx() ) )
111 for (
unsigned int si=0; si<ele->n_sides(); si++) {
114 for (
unsigned int li=0; li<edg->
n_sides; li++) {
119 if ( e_idx != ele.idx() ) {
128 for (i_neigh = 0; i_neigh < ele->n_neighs_vb(); i_neigh++) {
129 n_s = ele->neigh_vb[i_neigh]->edge()->n_sides;
130 for (i_s = 0; i_s < n_s; i_s++) {
131 e_idx = ele->neigh_vb[i_neigh]->edge()->side(i_s)->element().idx();
160 if (mesh_size < num_of_procs) {
161 THROW( ExcDecomposeMesh() << EI_NElems( mesh_size ) << EI_NProcs( num_of_procs ) );
190 IS part, new_numbering;
191 unsigned int size = old_ds.
size();
192 int new_counts[old_ds.
np()];
198 ISCreateGeneral(PETSC_COMM_WORLD, old_ds.
lsize(), loc_part, PETSC_COPY_VALUES, &part);
199 ISPartitioningCount(part, old_ds.
np(), new_counts);
201 new_ds =
new Distribution((
unsigned int *) new_counts, PETSC_COMM_WORLD);
202 ISPartitioningToNumbering(part, &new_numbering);
205 old_4_new =
new int [size];
207 new_4_id =
new LongIdx [ n_ids + 1 ];
210 AOCreateBasicIS(new_numbering, PETSC_NULL, &new_old_ao);
211 ISDestroy(&new_numbering);
212 for (
unsigned int i = 0; i < size; i++)
214 AOApplicationToPetsc(new_old_ao, size, old_4_new);
215 AODestroy(&(new_old_ao));
220 for (
unsigned int i_new = new_ds->
begin(); i_new < new_ds->end(); i_new++) {
221 id_4_loc[i_loc++] = id_4_old[old_4_new[i_new]];
224 for (i_loc = 0; i_loc <= n_ids; i_loc++)
225 new_4_id[i_loc] = -1;
226 for (
unsigned int i_new = 0; i_new < size; i_new++)
227 new_4_id[id_4_old[old_4_new[i_new]]] = i_new;
244 seq_part_ = make_shared< vector<int> >(seq_size);
void set_edge(const int a, const int b, int weight=1)
unsigned int np() const
get num of processors
Partitioning(Mesh *mesh, Input::Record in)
Distributed sparse graphs, partitioning.
Input::Record in_
Input Record accessor.
unsigned int lsize(int proc) const
get local size
@ METIS
Use direct interface to Metis.
unsigned int myp() const
get my processor
#define ASSERT(expr)
Allow use shorter versions of macro names if these names is not used with external library.
int LongIdx
Define type that represents indices of large arrays (elements, nodes, dofs etc.)
virtual Range< ElementAccessor< 3 > > elements_range() const
Returns range of bulk elements.
Support classes for parallel programing.
shared_ptr< vector< int > > subdomain_id_field_data()
Mesh * mesh_
The input mesh.
~Partitioning()
Destructor.
LongIdx * loc_part_
Partition numbers for local elements in original distribution of elements given be init_el_ds_.
#define THROW(whole_exception_expr)
Wrapper for throw. Saves the throwing point.
const Edge * edge() const
unsigned int size() const
get global size
#define MPI_Gatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, root, comm)
const LongIdx * get_loc_part() const
static const Input::Type::Selection & get_graph_type_sel()
Input specification objects.
SparseGraph * graph_
Graph used to partitioning the mesh.
Distribution * init_el_ds_
Original distribution of elements. Depends on type of partitioner.
static const Input::Type::Selection & get_tool_sel()
SideIter side(const unsigned int i) const
@ any_neighboring
Add edge for any pair of neighboring elements.
const unsigned int * get_lsizes_array()
get local sizes array
void finalize()
Make sparse graph structures: rows, adj.
virtual void partition(int *loc_part)=0
ElementAccessor< 3 > element() const
@ same_dimension_neighboring
Add edge for any pair of neighboring elements of same dimension (bad for matrix multiply)
MPI_Comm get_comm() const
static const Input::Type::Record & get_input_type()
const unsigned int * get_starts_array() const
get local starts array
const Distribution * get_init_distr() const
unsigned int idx() const
Return local idx of element in boundary / bulk part of element vector.
shared_ptr< vector< int > > seq_part_
Sequential partitioning for output.
void id_maps(int n_ids, LongIdx *id_4_old, Distribution *&new_ds, LongIdx *&id_4_loc, LongIdx *&new_4_id)
void make_element_connection_graph()
@ PETSc
Use PETSc interface to various partitioing tools.
@ any_weight_lower_dim_cuts
Same as before and assign higher weight to cuts of lower dimension in order to make them stick to one...
virtual unsigned int n_elements(bool boundary=false) const
Returns count of boundary or bulk elements.
MPI_Comm get_comm() const
Returns communicator.
bool is_local(unsigned int idx) const
identify local index
Implementation of range helper class.
unsigned int begin(int proc) const
get starting local index