Flow123d  release_2.2.0-914-gf1a3a4f
distribution.hh
Go to the documentation of this file.
1 /*!
2  *
3  * Copyright (C) 2015 Technical University of Liberec. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it under
6  * the terms of the GNU General Public License version 3 as published by the
7  * Free Software Foundation. (http://www.gnu.org/licenses/gpl-3.0.en.html)
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11  * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
12  *
13  *
14  * @file distribution.hh
15  * @brief Support classes for parallel programing.
16  * @todo
17  * - need better resolution of constructors
18  */
19 
20 #ifndef MESH_PARTITION_HH_
21 #define MESH_PARTITION_HH_
22 
23 #include <mpi.h>
24 #include <ostream>
25 #include <petscvec.h>
26 
28 public:
29 explicit DistributionType(int type) : type_(type) {}
30 int type_;
31 };
32 
35 };
36 
39 };
40 
41 
42 
43 /**
44  * Continuous distribution of an 1D array of indexes.
45  * TODO: after inclusion of LibMesh sources:
46  * - replace constructor Distribution(Vec) with NumericVector.get_distribution()
47  * - derive from ParalellObjectS
48  */
49 
50 class Distribution {
51 public:
52  /**
53  * Type of distribution automatically created only from global number of indices.
54  */
55  typedef enum {
56  /** Distribute indices evenly. */
57  Block=-1,
58  /** Put all on the first processor. */
59  Localized=-2
61 
62  /**
63  * Constructor. It makes distribution from given local number of indices on every processor.
64  *
65  * COLLECTIVE
66  * @param size Local size on calling processor.
67  */
68  //@param comm (optional) MPI Communicator.
69  Distribution(const unsigned int size, MPI_Comm comm);
70 
71  /**
72  * Constructor. It makes distribution from given array of sizes of processors.
73  *
74  * NOT COLLECTIVE, but still use MPI to provide information about processors.
75  *
76  * @param sizes Int array with sizes.
77  */
78  //@param comm (optional) MPI Communicator.
79  Distribution(const unsigned int * const sizes, MPI_Comm comm);
80 
81  /**
82  * Constructor. It makes distribution from distribution of a PETSC vector.
83  *
84  * NOT COLLECTIVE, but still use MPI to provide information about processors.
85  * @param petsc_vector
86  */
87  Distribution(const Vec &petsc_vector);
88 
89  /**
90  * Constructor. It makes distribution from global number of indices according to given scheme.
91  *
92  * NOT COLLECTIVE, but still use MPI to provide information about processors.
93  * @param type Either Block or Localized distribution of indices.
94  * @param global_size Total number of indices to distribute.
95  */
96  //@param comm (optional) MPI Communicator.
97  Distribution(const DistributionType &type, unsigned int global_size, MPI_Comm comm);
98 
99  /**
100  * Copy Constructor.
101  */
102  Distribution(const Distribution &distr);
103 
104  /// get num of processors
105  inline unsigned int np() const {return num_of_procs;}
106  /// get my processor
107  inline unsigned int myp() const {return my_proc;}
108  /// get starting local index
109  inline unsigned int begin(int proc) const {return (starts[proc]);}
110  inline unsigned int begin() const {return ( begin(myp()) );}
111  /// get last local index +1
112  inline unsigned int end(int proc) const {return (starts[proc+1]);}
113  inline unsigned int end() const {return ( end(myp()) );}
114  /// get local size
115  inline unsigned int lsize(int proc) const {return (end(proc)-begin(proc));}
116  inline unsigned int lsize() const {return ( lsize(myp()) );}
117  /// get global size
118  inline unsigned int size() const {return (starts[np()]);}
119  /// identify local index
120  inline bool is_local(unsigned int idx) const {return ( begin()<=(idx) && (idx)<end() );}
121  inline bool is_on_proc(unsigned int idx, unsigned int proc) const {return ( begin(proc)<=(idx) && (idx)<end(proc) );}
122  /// get processor of the given index
123  unsigned int get_proc(unsigned int idx) const;
124  /// get local sizes array
125  const unsigned int * get_lsizes_array();
126  /// get local starts array
127  const unsigned int * get_starts_array() const;
128  /// Returns communicator.
129  inline MPI_Comm get_comm() const {return communicator;}
130  /// distribution view
131  void view(std::ostream &stream) const;
132  ~Distribution();
133 private:
134  /// communicator
136  /// number of procs
138  /// my proc number
139  int my_proc;
140  /// starts[i] index of the first index on the proc i; starts[n_procs]=size of whole array
141  unsigned int *starts;
142  /// local sizes
143  unsigned int *lsizes;
144 };
145 
146 inline std::ostream & operator <<(std::ostream &stream, const Distribution &distr)
147  { distr.view(stream); return stream; }
148 
149 
150 
151 #endif // MESH_PARTITION_HH_
unsigned int size() const
get global size
unsigned int end() const
int my_proc
my proc number
unsigned int * starts
starts[i] index of the first index on the proc i; starts[n_procs]=size of whole array ...
int MPI_Comm
Definition: mpi.h:141
unsigned int begin() const
int num_of_procs
number of procs
unsigned int lsize() const
void view(std::ostream &stream) const
distribution view
bool is_local(unsigned int idx) const
identify local index
const MPI_Comm communicator
communicator
unsigned int begin(int proc) const
get starting local index
DistributionType(int type)
Definition: distribution.hh:29
unsigned int np() const
get num of processors
unsigned int myp() const
get my processor
unsigned int end(int proc) const
get last local index +1
unsigned int * lsizes
local sizes
MPI_Comm get_comm() const
Returns communicator.
bool is_on_proc(unsigned int idx, unsigned int proc) const
std::ostream & operator<<(std::ostream &stream, const Distribution &distr)
unsigned int lsize(int proc) const
get local size