Flow123d  jenkins-Flow123d-linux-release-multijob-282
distribution.hh
Go to the documentation of this file.
1 /*!
2  *
3  * Copyright (C) 2007 Technical University of Liberec. All rights reserved.
4  *
5  * Please make a following refer to Flow123d on your project site if you use the program for any purpose,
6  * especially for academic research:
7  * Flow123d, Research Centre: Advanced Remedial Technologies, Technical University of Liberec, Czech Republic
8  *
9  * This program is free software; you can redistribute it and/or modify it under the terms
10  * of the GNU General Public License version 3 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
13  * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14  * See the GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License along with this program; if not,
17  * write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 021110-1307, USA.
18  *
19  *
20  * $Id$
21  * $Revision$
22  * $LastChangedBy$
23  * $LastChangedDate$
24  *
25  * @file
26  * @brief Support classes for parallel programing.
27  *
28  * TODO:
29  *
30  * * need better resolution of constructors
31  */
32 
33 
34 #ifndef MESH_PARTITION_HH_
35 #define MESH_PARTITION_HH_
36 
37 #include <mpi.h>
38 #include <ostream>
39 #include <petscvec.h>
40 
42 public:
43 explicit DistributionType(int type) : type_(type) {}
44 int type_;
45 };
46 
49 };
50 
53 };
54 
55 
56 
57 /**
58  * Continuous distribution of an 1D array of indexes.
59  * TODO: after inclusion of LibMesh sources:
60  * - replace constructor Distribution(Vec) with NumericVector.get_distribution()
61  * - derive from ParalellObjectS
62  */
63 
64 class Distribution {
65 public:
66  /**
67  * Type of distribution automatically created only from global number of indices.
68  */
69  typedef enum {
70  /** Distribute indices evenly. */
71  Block=-1,
72  /** Put all on the first processor. */
75 
76  /**
77  * Constructor. It makes distribution from given local number of indices on every processor.
78  *
79  * COLLECTIVE
80  * @param size Local size on calling processor.
81  */
82  //@param comm (optional) MPI Communicator.
83  Distribution(const unsigned int size, MPI_Comm comm);
84 
85  /**
86  * Constructor. It makes distribution from given array of sizes of processors.
87  *
88  * NOT COLLECTIVE, but still use MPI to provide information about processors.
89  *
90  * @param sizes Int array with sizes.
91  */
92  //@param comm (optional) MPI Communicator.
93  Distribution(const unsigned int * const sizes, MPI_Comm comm);
94 
95  /**
96  * Constructor. It makes distribution from distribution of a PETSC vector.
97  *
98  * NOT COLLECTIVE, but still use MPI to provide information about processors.
99  * @param petsc_vector
100  */
101  Distribution(const Vec &petsc_vector);
102 
103  /**
104  * Constructor. It makes distribution from global number of indices according to given scheme.
105  *
106  * NOT COLLECTIVE, but still use MPI to provide information about processors.
107  * @param type Either Block or Localized distribution of indices.
108  * @param global_size Total number of indices to distribute.
109  */
110  //@param comm (optional) MPI Communicator.
111  Distribution(const DistributionType &type, unsigned int global_size, MPI_Comm comm);
112 
113  /**
114  * Copy Constructor.
115  */
116  Distribution(const Distribution &distr);
117 
118  /// get num of processors
119  inline unsigned int np() const {return num_of_procs;}
120  /// get my processor
121  inline unsigned int myp() const {return my_proc;}
122  /// get starting local index
123  inline unsigned int begin(int proc) const {return (starts[proc]);}
124  inline unsigned int begin() const {return ( begin(myp()) );}
125  /// get last local index +1
126  inline unsigned int end(int proc) const {return (starts[proc+1]);}
127  inline unsigned int end() const {return ( end(myp()) );}
128  /// get local size
129  inline unsigned int lsize(int proc) const {return (end(proc)-begin(proc));}
130  inline unsigned int lsize() const {return ( lsize(myp()) );}
131  /// get global size
132  inline unsigned int size() const {return (starts[np()]);}
133  /// identify local index
134  inline bool is_local(unsigned int idx) const {return ( begin()<=(idx) && (idx)<end() );}
135  inline bool is_on_proc(unsigned int idx, unsigned int proc) const {return ( begin(proc)<=(idx) && (idx)<end(proc) );}
136  /// get processor of the given index
137  unsigned int get_proc(unsigned int idx) const;
138  /// get local sizes array
139  const unsigned int * get_lsizes_array();
140  /// get local starts array
141  const unsigned int * get_starts_array() const;
142  /// Returns communicator.
143  inline MPI_Comm get_comm() const {return communicator;}
144  /// distribution view
145  void view(std::ostream &stream) const;
146  ~Distribution();
147 private:
148  /// communicator
150  /// number of procs
152  /// my proc number
153  int my_proc;
154  /// starts[i] index of the first index on the proc i; starts[n_procs]=size of whole array
155  unsigned int *starts;
156  /// local sizes
157  unsigned int *lsizes;
158 };
159 
160 inline std::ostream & operator <<(std::ostream &stream, const Distribution &distr)
161  { distr.view(stream); return stream; }
162 
163 
164 
165 #endif // MESH_PARTITION_HH_
unsigned int size() const
get global size
unsigned int get_proc(unsigned int idx) const
get processor of the given index
unsigned int end() const
int my_proc
my proc number
unsigned int * starts
starts[i] index of the first index on the proc i; starts[n_procs]=size of whole array ...
int MPI_Comm
Definition: mpi.h:141
unsigned int begin() const
Distribution(const unsigned int size, MPI_Comm comm)
Definition: distribution.cc:39
int num_of_procs
number of procs
unsigned int lsize() const
const unsigned int * get_lsizes_array()
get local sizes array
void view(std::ostream &stream) const
distribution view
bool is_local(unsigned int idx) const
identify local index
const MPI_Comm communicator
communicator
const unsigned int * get_starts_array() const
get local starts array
unsigned int begin(int proc) const
get starting local index
DistributionType(int type)
Definition: distribution.hh:43
unsigned int np() const
get num of processors
STREAM & operator<<(STREAM &s, UpdateFlags u)
unsigned int myp() const
get my processor
unsigned int end(int proc) const
get last local index +1
unsigned int * lsizes
local sizes
MPI_Comm get_comm() const
Returns communicator.
bool is_on_proc(unsigned int idx, unsigned int proc) const
unsigned int lsize(int proc) const
get local size