Flow123d  DF_profiler_memory_monitor-6d6a629
element_data_cache.cc
Go to the documentation of this file.
1 /*!
2  *
3  * Copyright (C) 2015 Technical University of Liberec. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it under
6  * the terms of the GNU General Public License version 3 as published by the
7  * Free Software Foundation. (http://www.gnu.org/licenses/gpl-3.0.en.html)
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11  * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
12  *
13  *
14  * @file element_data_cache.cc
15  * @brief
16  */
17 
18 
19 #include <limits>
20 #include <ostream>
21 #include "io/element_data_cache.hh"
22 #include "io/msh_basereader.hh"
23 #include "la/distribution.hh"
25 #include "system/system.hh"
26 #include "system/tokenizer.hh"
27 #include "boost/lexical_cast.hpp"
28 
29 
30 
31 template <typename T>
34  check_scale_data_(CheckScaleData::none) {}
35 
36 
37 template <typename T>
38 ElementDataCache<T>::ElementDataCache(std::string field_name, double time, unsigned int row_vec_size, unsigned int boundary_begin)
39 : check_scale_data_(CheckScaleData::none)
40 {
41  this->time_ = time;
42  this->field_input_name_ = field_name;
43  this->data_ = create_data_cache(row_vec_size);
44  this->n_dofs_per_element_ = 1;
45  this->boundary_begin_ = boundary_begin;
46 }
47 
48 
49 template <typename T>
50 ElementDataCache<T>::ElementDataCache(std::string field_name, unsigned int n_comp, unsigned int size, std::string fe_type, unsigned int n_dofs_per_element)
51 : check_scale_data_(CheckScaleData::none)
52 {
53  this->set_vtk_type<T>();
54  this->field_name_ = field_name;
56 
57  this->n_values_ = size;
58  ASSERT_GT(n_comp, 0)(field_name).error("Output field returning variable size vectors. Try convert to MultiField.");
59  this->n_comp_ = n_comp;
60  this->fe_type_ = fe_type;
62  this->boundary_begin_ = size;
63 
65 }
66 
67 
68 template <typename T>
70 
71 
72 template <typename T>
74  return data_;
75 }
76 
77 
78 template <typename T>
80  typename ElementDataCache<T>::CacheData data_cache = std::make_shared<std::vector<T>>();
81  data_cache->resize(row_vec_size, numeric_limits<T>::signaling_NaN());
82  return data_cache;
83 }
84 
85 
86 template <typename T>
87 void ElementDataCache<T>::read_ascii_data(Tokenizer &tok, unsigned int n_components, unsigned int i_row) {
88  unsigned int idx = i_row * n_components;
89  std::vector<T> &vec = *( data_.get() );
90  for (unsigned int i_col=0; i_col < n_components; ++i_col, ++idx) {
91  ASSERT_LT(idx, vec.size());
92  vec[idx] = boost::lexical_cast<T>(*tok);
93  ++tok;
94  }
95 }
96 
97 
98 template <typename T>
99 void ElementDataCache<T>::read_binary_data(std::istream &data_stream, unsigned int n_components, unsigned int i_row) {
100  unsigned int idx = i_row * n_components;
101  std::vector<T> &vec = *( data_.get() );
102  for (unsigned int i_col=0; i_col < n_components; ++i_col, ++idx) {
103  data_stream.read(reinterpret_cast<char *>(&vec[idx]), sizeof(T));
104  }
105 }
106 
107 
108 /**
109  * Output data element on given index @p idx. Method for writing data
110  * to output stream.
111  *
112  * \note This method is used only by MSH file format.
113  */
114 template <typename T>
115 void ElementDataCache<T>::print_ascii(ostream &out_stream, unsigned int idx)
116 {
117  ASSERT_LT(idx, this->n_values_).error();
118  std::vector<T> &vec = *( this->data_.get() );
119  for(unsigned int i = n_comp_*n_dofs_per_element_*idx; i < n_comp_*n_dofs_per_element_*(idx+1); ++i )
120  out_stream << vec[i] << " ";
121 }
122 
123 /**
124  * \brief Print all data stored in output data
125  *
126  * TODO: indicate if the tensor data are output in column-first or raw-first order
127  * and possibly implement transposition. Set such property for individual file formats.
128  * Class OutputData stores always in raw-first order.
129  */
130 template <typename T>
131 void ElementDataCache<T>::print_ascii_all(ostream &out_stream, unsigned int start)
132 {
133  std::vector<T> &vec = *( this->data_.get() );
134  for(unsigned int idx = start; idx < this->n_values_; idx++) {
135  for(unsigned int i = n_comp_*n_dofs_per_element_*idx; i < n_comp_*n_dofs_per_element_*(idx+1); ++i )
136  out_stream << vec[i] << " ";
137  }
138 }
139 
140 
141 /// Prints the whole data vector into stream.
142 template <typename T>
143 void ElementDataCache<T>::print_binary_all(ostream &out_stream, bool print_data_size, unsigned int start)
144 {
145  if (print_data_size) {
146  // write size of data
147  unsigned long long int data_byte_size = this->n_values_ * n_comp_ * sizeof(T);
148  out_stream.write(reinterpret_cast<const char*>(&data_byte_size), sizeof(unsigned long long int));
149  }
150  // write data
151  std::vector<T> &vec = *( this->data_.get() );
152  for(unsigned int idx = start; idx < this->n_values_; idx++) {
153  for(unsigned int i = n_comp_*idx; i < n_comp_*(idx+1); ++i )
154  out_stream.write(reinterpret_cast<const char*>(&(vec[i])), sizeof(T));
155  }
156 }
157 
158 
159 template <typename T>
160 void ElementDataCache<T>::print_yaml_subarray(ostream &out_stream, unsigned int precision, unsigned int begin, unsigned int end)
161 {
162  out_stream << "[ ";
163  std::vector<T> &vec = *( this->data_.get() );
164  for(unsigned int idx = begin; idx < end; idx++) {
165  if (idx != begin) out_stream << " , ";
166  unsigned int vec_pos = n_comp_ * idx; // position of element value in data cache
167  switch (this->n_comp_) {
168  case NumCompValueType::N_SCALAR: {
169  out_stream << field_value_to_yaml( vec[vec_pos], precision );
170  break;
171  }
172  case NumCompValueType::N_VECTOR: {
173  typename arma::Col<T>::template fixed<3> vec_val;
174  for (unsigned int i=0; i<3; ++i, ++vec_pos)
175  vec_val(i) = vec[vec_pos];
176  out_stream << field_value_to_yaml( vec_val, precision );
177  break;
178  }
179  case NumCompValueType::N_TENSOR: {
180  typename arma::Mat<T>::template fixed<3,3> mat_val;
181  for (unsigned int i=0; i<3; ++i)
182  for (unsigned int j=0; j<3; ++j, ++vec_pos)
183  mat_val(i,j) = vec[vec_pos];
184  out_stream << field_value_to_yaml( mat_val, precision );
185  break;
186  }
187  }
188  }
189  out_stream << " ]";
190 }
191 
192 
193 template <typename T>
194 void ElementDataCache<T>::get_min_max_range(double &min, double &max)
195 {
196  min = std::numeric_limits<double>::max();
197  max = std::numeric_limits<double>::min();
198  std::vector<T> &vec = *( this->data_.get() );
199  for(unsigned int idx = 0; idx < this->n_values_; idx++) {
200  for(unsigned int i = n_comp_*n_dofs_per_element_*idx; i < n_comp_*n_dofs_per_element_*(idx+1); ++i ) {
201  if (vec[i] < min) min = vec[i];
202  if (vec[i] > max) max = vec[i];
203  }
204  }
205 }
206 
207 
208 /**
209  * Store data element of given data value under given index.
210  */
211 template <typename T>
212 void ElementDataCache<T>::store_value(unsigned int idx, const T * value) {
213  ASSERT_LT(idx, this->n_values_)(this->field_name_);
214  std::vector<T> &vec = *( this->data_.get() );
215  unsigned int vec_idx = idx*this->n_comp_*n_dofs_per_element_;
216  for(unsigned int i = 0; i < this->n_comp_*n_dofs_per_element_; i++, vec_idx++) {
217  vec[vec_idx] = value[i];
218  }
219 }
220 
221 /**
222  * Add value to given index
223  */
224 template <typename T>
225 void ElementDataCache<T>::add(unsigned int idx, const T * value) {
226  ASSERT_LT(idx, this->n_values_);
227  std::vector<T> &vec = *( this->data_.get() );
228  unsigned int vec_idx = idx*this->n_comp_*n_dofs_per_element_;
229  for(unsigned int i = 0; i < this->n_comp_*n_dofs_per_element_; i++, vec_idx++) {
230  vec[vec_idx] += value[i];
231  }
232 }
233 
234 /**
235  * Reset values at given index
236  */
237 template <typename T>
238 void ElementDataCache<T>::zero(unsigned int idx) {
239  ASSERT_LT(idx, this->n_values_);
240  std::vector<T> &vec = *( this->data_.get() );
241  unsigned int vec_idx = idx*this->n_comp_*n_dofs_per_element_;
242  for(unsigned int i = 0; i < this->n_comp_*n_dofs_per_element_; i++, vec_idx++) {
243  vec[vec_idx] = 0;
244  }
245 }
246 
247 /**
248  * Normalize values at given index
249  */
250 template <typename T>
251 void ElementDataCache<T>::normalize(unsigned int idx, unsigned int divisor) {
252  ASSERT_LT(idx, this->n_values_);
253  std::vector<T> &vec = *( this->data_.get() );
254  unsigned int vec_idx = idx*this->n_comp_*n_dofs_per_element_;
255  for(unsigned int i = 0; i < this->n_comp_*n_dofs_per_element_; i++, vec_idx++) {
256  vec[vec_idx] /= divisor;
257  }
258 }
259 
260 template <typename T>
261 std::shared_ptr< ElementDataCacheBase > ElementDataCache<T>::gather(Distribution *distr, LongIdx *local_to_global) {
262  std::shared_ptr< ElementDataCache<T> > gather_cache;
263  int rank = distr->myp();
264  int n_proc = distr->np();
265 
266  unsigned int n_global_data; // global number of data
267  int rec_starts[n_proc]; // displacement of first value that is received from each process
268  int rec_counts[n_proc]; // number of values that are received from each process
269  int *rec_indices_ids = nullptr; // collective values of local to global indexes map of data
270  T *rec_data = nullptr; // collective values of data
271 
272  n_global_data = distr->size();
273  // collects values of data vectors and local to global indexes map on each process
274  if (rank==0) {
275  for (int i=0; i<n_proc; ++i) {
276  rec_starts[i] = distr->begin(i);
277  rec_counts[i] = distr->lsize(i);
278  }
279  rec_indices_ids = new int [ n_global_data ];
280  }
281  MPI_Gatherv( local_to_global, distr->lsize(), MPI_INT, rec_indices_ids, rec_counts, rec_starts, MPI_INT, 0, MPI_COMM_WORLD);
282  if (rank==0) {
283  for (int i=0; i<n_proc; ++i) {
284  rec_starts[i] = this->n_comp()*this->n_dofs_per_element()*rec_starts[i];
285  rec_counts[i] = this->n_comp()*this->n_dofs_per_element()*rec_counts[i];
286  }
287  rec_data = new T [ this->n_comp() * this->n_dofs_per_element() * n_global_data ];
288  }
289  auto &local_cache_vec = *( this->get_data().get() );
290  MPI_Gatherv( &local_cache_vec[0], this->n_comp()*this->n_dofs_per_element()*distr->lsize(), this->mpi_data_type(), rec_data, rec_counts, rec_starts, this->mpi_data_type(), 0, MPI_COMM_WORLD) ;
291 
292  // create and fill serial cache
293  if (rank==0) {
294  gather_cache = std::make_shared<ElementDataCache<T>>(this->field_input_name_, (unsigned int)this->n_comp(), n_global_data, this->fe_type_, this->n_dofs_per_element_);
295  auto &gather_vec = *( gather_cache->get_data().get() );
296  unsigned int i_global_coord; // counter over serial_mesh->nodes_ cache
297  for (unsigned int i=0; i<n_global_data; ++i) {
298  i_global_coord = this->n_comp() * this->n_dofs_per_element() * rec_indices_ids[i];
299  for (unsigned int j=0; j<this->n_comp() * this->n_dofs_per_element(); ++j) { //loop over coords
300  ASSERT_LT(i_global_coord+j, gather_vec.size());
301  gather_vec[ i_global_coord+j ] = rec_data[ this->n_comp()*this->n_dofs_per_element()*i+j ];
302  }
303  }
304 
305  delete[] rec_indices_ids;
306  delete[] rec_data;
307  }
308 
309  return gather_cache;
310 }
311 
312 
313 template <typename T>
314 std::shared_ptr< ElementDataCacheBase > ElementDataCache<T>::element_node_cache_fixed_size(std::vector<unsigned int> &offset_vec) {
315  unsigned int n_elem = offset_vec.size()-1;
316  std::shared_ptr< ElementDataCache<T> > elem_node_cache = std::make_shared<ElementDataCache<T>>(this->field_input_name_, 4*this->n_comp(), n_elem, this->fe_type_, this->n_dofs_per_element_);
317  auto &data_out_vec = *( elem_node_cache->get_data().get() );
318  std::fill( data_out_vec.begin(), data_out_vec.end(), (T)0 );
319  auto &data_in_vec = *( this->get_data().get() );
320 
321  unsigned int i_node, i_old, i_new;
322  for (unsigned int i_el=0, i_conn=0; i_el<offset_vec.size()-1; i_el++) {
323  for(i_node=4*i_el; i_conn<offset_vec[i_el+1]; i_conn++, i_node++) {
324  i_old = i_conn*this->n_comp_*this->n_dofs_per_element_;
325  i_new = i_node*this->n_comp_*this->n_dofs_per_element_;
326  for(unsigned int i = 0; i < this->n_comp_*this->n_dofs_per_element_; i++) {
327  ASSERT_LT(i_new+i, data_out_vec.size());
328  ASSERT_LT(i_old+i, data_in_vec.size());
329  data_out_vec[i_new+i] = data_in_vec[i_old+i];
330  }
331  }
332  }
333 
334  return elem_node_cache;
335 }
336 
337 
338 template <typename T>
339 std::shared_ptr< ElementDataCacheBase > ElementDataCache<T>::element_node_cache_optimize_size(std::vector<unsigned int> &offset_vec) {
340  std::shared_ptr< ElementDataCache<T> > elem_node_cache = std::make_shared<ElementDataCache<T>>(this->field_input_name_,
341  this->n_comp()/4, offset_vec[offset_vec.size()-1], this->fe_type_, this->n_dofs_per_element_);
342  auto &data_out_vec = *( elem_node_cache->get_data().get() );
343  auto &data_in_vec = *( this->get_data().get() );
344 
345  unsigned int i_node, i_old, i_new;
346  for (unsigned int i_el=0, i_conn=0; i_el<offset_vec.size()-1; i_el++) {
347  for(i_node=4*i_el; i_conn<offset_vec[i_el+1]; i_conn++, i_node++) {
348  i_old = i_node*elem_node_cache->n_comp_*this->n_dofs_per_element_;
349  i_new = i_conn*elem_node_cache->n_comp_*this->n_dofs_per_element_;
350  for(unsigned int i = 0; i < elem_node_cache->n_comp_*this->n_dofs_per_element_; i++) {
351  ASSERT_LT(i_new+i, data_out_vec.size());
352  ASSERT_LT(i_old+i, data_in_vec.size());
353  data_out_vec[i_new+i] = data_in_vec[i_old+i];
354  }
355  }
356  }
357  return elem_node_cache;
358 }
359 
360 
361 template <typename T>
362 std::shared_ptr< ElementDataCacheBase > ElementDataCache<T>::compute_node_data(std::vector<unsigned int> &conn_vec, unsigned int data_size) {
363  ASSERT_EQ(conn_vec.size(), this->n_values());
364  unsigned int idx;
365 
366  // set output data to zero
367  std::shared_ptr< ElementDataCache<T> > node_cache = std::make_shared<ElementDataCache<T>>(this->field_input_name_, this->n_comp(), data_size, this->fe_type_, this->n_dofs_per_element_);
368  std::vector<unsigned int> count(data_size, 0);
369  for (idx=0; idx < node_cache->n_values(); idx++)
370  node_cache->zero(idx);
371 
372  auto &data_in_vec = *( this->get_data().get() );
373  for (idx=0; idx < conn_vec.size(); idx++) {
374  ASSERT_LT(conn_vec[idx], node_cache->n_values());
375  ASSERT_LT(this->n_comp()*this->n_dofs_per_element()*idx, data_in_vec.size());
376  node_cache->add( conn_vec[idx], &(data_in_vec[this->n_comp() * this->n_dofs_per_element() * idx]) );
377  count[ conn_vec[idx] ]++;
378  }
379 
380  // Compute mean values at nodes
381  for(idx=0; idx < node_cache->n_values(); idx++)
382  node_cache->normalize(idx, count[idx]);
383 
384  return node_cache;
385 }
386 
387 
388 template<>
390  return MPI_DOUBLE;
391 }
392 
393 template<>
395  return MPI_INT;
396 }
397 
398 template<>
400  return MPI_UNSIGNED;
401 }
402 
403 
404 /// Access i-th element in the data vector.
405 template <class T>
407 {
408  std::vector<T> &vec = *( this->data_.get() );
409  ASSERT_LT(i, vec.size());
410  return vec[i];
411 }
412 
413 
414 
415 // explicit instantiation of template class
416 template class ElementDataCache<unsigned int>;
417 template class ElementDataCache<int>;
418 template class ElementDataCache<double>;
string field_value_to_yaml(const T &mat, unsigned int prec)
#define ASSERT_LT(a, b)
Definition of comparative assert macro (Less Than) only for debug mode.
Definition: asserts.hh:301
#define ASSERT_EQ(a, b)
Definition of comparative assert macro (EQual) only for debug mode.
Definition: asserts.hh:333
#define ASSERT_GT(a, b)
Definition of comparative assert macro (Greater Than) only for debug mode.
Definition: asserts.hh:317
unsigned int myp() const
get my processor
unsigned int begin(int proc) const
get starting local index
unsigned int lsize(int proc) const
get local size
unsigned int size() const
get global size
unsigned int np() const
get num of processors
unsigned int n_comp() const
unsigned int n_dofs_per_element() const
double time_
time step stored in cache
unsigned int boundary_begin_
Start position of boundary data in cache.
std::string field_input_name_
name of field stored in cache
void read_binary_data(std::istream &data_stream, unsigned int n_components, unsigned int i_row) override
Implements ElementDataCacheBase::read_binary_data.
std::shared_ptr< ElementDataCacheBase > gather(Distribution *distr, LongIdx *local_to_global) override
Implements ElementDataCacheBase::gather.
void print_ascii_all(ostream &out_stream, unsigned int start=0) override
Print all data stored in output data ro ascii format.
void get_min_max_range(double &min, double &max) override
MPI_Datatype mpi_data_type()
Return MPI data type corresponding with template parameter of cache. Needs template specialization.
ElementDataCache()
Default constructor.
void print_yaml_subarray(ostream &out_stream, unsigned int precision, unsigned int begin, unsigned int end) override
void read_ascii_data(Tokenizer &tok, unsigned int n_components, unsigned int i_row) override
Implements ElementDataCacheBase::read_ascii_data.
CheckScaleData
Allow to hold sign, if data in cache is checked and scale (both can be executed only once)
void store_value(unsigned int idx, const T *value)
static CacheData create_data_cache(unsigned int row_vec_size)
std::shared_ptr< std::vector< T > > CacheData
void print_binary_all(ostream &out_stream, bool print_data_size=true, unsigned int start=0) override
Print all data stored in output data to appended binary format.
void zero(unsigned int idx)
std::shared_ptr< ElementDataCacheBase > compute_node_data(std::vector< unsigned int > &conn_vec, unsigned int data_size) override
Implements ElementDataCacheBase::compute_node_data.
std::shared_ptr< ElementDataCacheBase > element_node_cache_fixed_size(std::vector< unsigned int > &offset_vec) override
Implements ElementDataCacheBase::element_node_cache_fixed_size.
CacheData get_data()
Return underlying vector of element data.
T & operator[](unsigned int i)
Access i-th element in the data vector of 0th component.
void print_ascii(ostream &out_stream, unsigned int idx) override
virtual ~ElementDataCache() override
Destructor of ElementDataCache.
void add(unsigned int idx, const T *value)
void normalize(unsigned int idx, unsigned int divisor)
std::shared_ptr< ElementDataCacheBase > element_node_cache_optimize_size(std::vector< unsigned int > &offset_vec) override
Implements ElementDataCacheBase::element_node_cache_optimize_size.
Support classes for parallel programing.
int LongIdx
Define type that represents indices of large arrays (elements, nodes, dofs etc.)
Definition: index_types.hh:24
static constexpr bool value
Definition: json.hpp:87
#define MPI_COMM_WORLD
Definition: mpi.h:123
#define MPI_INT
Definition: mpi.h:160
#define MPI_Gatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, root, comm)
Definition: mpi.h:549
#define MPI_DOUBLE
Definition: mpi.h:156
#define MPI_UNSIGNED
Definition: mpi.h:165
#define MPI_Datatype
Definition: mpi.h:154
ArmaVec< double, N > vec
Definition: armor.hh:933