Flow123d  JS_before_hm-2279-gb109077a9
element_data_cache.cc
Go to the documentation of this file.
1 /*!
2  *
3  * Copyright (C) 2015 Technical University of Liberec. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it under
6  * the terms of the GNU General Public License version 3 as published by the
7  * Free Software Foundation. (http://www.gnu.org/licenses/gpl-3.0.en.html)
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11  * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
12  *
13  *
14  * @file element_data_cache.cc
15  * @brief
16  */
17 
18 
19 #include <limits>
20 #include <ostream>
21 #include "io/element_data_cache.hh"
22 #include "io/msh_basereader.hh"
23 #include "la/distribution.hh"
25 #include "system/system.hh"
26 #include "system/tokenizer.hh"
27 #include "boost/lexical_cast.hpp"
28 
29 
30 
31 template <typename T>
34  check_scale_data_(CheckScaleData::none) {}
35 
36 
37 template <typename T>
38 ElementDataCache<T>::ElementDataCache(std::string field_name, double time, unsigned int row_vec_size)
39 : check_scale_data_(CheckScaleData::none)
40 {
41  this->time_ = time;
42  this->field_input_name_ = field_name;
43  this->data_ = create_data_cache(row_vec_size);
44  this->n_dofs_per_element_ = 1;
45 }
46 
47 
48 template <typename T>
49 ElementDataCache<T>::ElementDataCache(std::string field_name, unsigned int n_comp, unsigned int size, std::string fe_type, unsigned int n_dofs_per_element)
50 : check_scale_data_(CheckScaleData::none)
51 {
52  this->set_vtk_type<T>();
53  this->field_name_ = field_name;
54  this->field_input_name_ = this->field_name_;
55 
56  this->n_values_ = size;
57  ASSERT_GT(n_comp, 0)(field_name).error("Output field returning variable size vectors. Try convert to MultiField.");
58  this->n_comp_ = n_comp;
59  this->fe_type_ = fe_type;
61 
63 }
64 
65 
66 template <typename T>
68 
69 
70 template <typename T>
72  return data_;
73 }
74 
75 
76 template <typename T>
78  typename ElementDataCache<T>::CacheData data_cache = std::make_shared<std::vector<T>>();
79  data_cache->resize(row_vec_size, numeric_limits<T>::signaling_NaN());
80  return data_cache;
81 }
82 
83 
84 template <typename T>
85 void ElementDataCache<T>::read_ascii_data(Tokenizer &tok, unsigned int n_components, unsigned int i_row) {
86  unsigned int idx = i_row * n_components;
87  std::vector<T> &vec = *( data_.get() );
88  for (unsigned int i_col=0; i_col < n_components; ++i_col, ++idx) {
89  ASSERT_LT(idx, vec.size());
90  vec[idx] = boost::lexical_cast<T>(*tok);
91  ++tok;
92  }
93 }
94 
95 
96 template <typename T>
97 void ElementDataCache<T>::read_binary_data(std::istream &data_stream, unsigned int n_components, unsigned int i_row) {
98  unsigned int idx = i_row * n_components;
99  std::vector<T> &vec = *( data_.get() );
100  for (unsigned int i_col=0; i_col < n_components; ++i_col, ++idx) {
101  data_stream.read(reinterpret_cast<char *>(&vec[idx]), sizeof(T));
102  }
103 }
104 
105 
106 /**
107  * Output data element on given index @p idx. Method for writing data
108  * to output stream.
109  *
110  * \note This method is used only by MSH file format.
111  */
112 template <typename T>
113 void ElementDataCache<T>::print_ascii(ostream &out_stream, unsigned int idx)
114 {
115  ASSERT_LT(idx, this->n_values_).error();
116  std::vector<T> &vec = *( this->data_.get() );
117  for(unsigned int i = n_comp_*n_dofs_per_element_*idx; i < n_comp_*n_dofs_per_element_*(idx+1); ++i )
118  out_stream << vec[i] << " ";
119 }
120 
121 /**
122  * \brief Print all data stored in output data
123  *
124  * TODO: indicate if the tensor data are output in column-first or raw-first order
125  * and possibly implement transposition. Set such property for individual file formats.
126  * Class OutputData stores always in raw-first order.
127  */
128 template <typename T>
129 void ElementDataCache<T>::print_ascii_all(ostream &out_stream, unsigned int start)
130 {
131  std::vector<T> &vec = *( this->data_.get() );
132  for(unsigned int idx = start; idx < this->n_values_; idx++) {
133  for(unsigned int i = n_comp_*n_dofs_per_element_*idx; i < n_comp_*n_dofs_per_element_*(idx+1); ++i )
134  out_stream << vec[i] << " ";
135  }
136 }
137 
138 
139 /// Prints the whole data vector into stream.
140 template <typename T>
141 void ElementDataCache<T>::print_binary_all(ostream &out_stream, bool print_data_size, unsigned int start)
142 {
143  if (print_data_size) {
144  // write size of data
145  unsigned long long int data_byte_size = this->n_values_ * n_comp_ * sizeof(T);
146  out_stream.write(reinterpret_cast<const char*>(&data_byte_size), sizeof(unsigned long long int));
147  }
148  // write data
149  std::vector<T> &vec = *( this->data_.get() );
150  for(unsigned int idx = start; idx < this->n_values_; idx++) {
151  for(unsigned int i = n_comp_*idx; i < n_comp_*(idx+1); ++i )
152  out_stream.write(reinterpret_cast<const char*>(&(vec[i])), sizeof(T));
153  }
154 }
155 
156 
157 template <typename T>
158 void ElementDataCache<T>::print_yaml_subarray(ostream &out_stream, unsigned int precision, unsigned int begin, unsigned int end)
159 {
160  out_stream << "[ ";
161  std::vector<T> &vec = *( this->data_.get() );
162  for(unsigned int idx = begin; idx < end; idx++) {
163  if (idx != begin) out_stream << " , ";
164  unsigned int vec_pos = n_comp_ * idx; // position of element value in data cache
165  switch (this->n_comp_) {
166  case NumCompValueType::N_SCALAR: {
167  out_stream << field_value_to_yaml( vec[vec_pos], precision );
168  break;
169  }
170  case NumCompValueType::N_VECTOR: {
171  typename arma::Col<T>::template fixed<3> vec_val;
172  for (unsigned int i=0; i<3; ++i, ++vec_pos)
173  vec_val(i) = vec[vec_pos];
174  out_stream << field_value_to_yaml( vec_val, precision );
175  break;
176  }
177  case NumCompValueType::N_TENSOR: {
178  typename arma::Mat<T>::template fixed<3,3> mat_val;
179  for (unsigned int i=0; i<3; ++i)
180  for (unsigned int j=0; j<3; ++j, ++vec_pos)
181  mat_val(i,j) = vec[vec_pos];
182  out_stream << field_value_to_yaml( mat_val, precision );
183  break;
184  }
185  }
186  }
187  out_stream << " ]";
188 }
189 
190 
191 template <typename T>
192 void ElementDataCache<T>::get_min_max_range(double &min, double &max)
193 {
194  min = std::numeric_limits<double>::max();
195  max = std::numeric_limits<double>::min();
196  std::vector<T> &vec = *( this->data_.get() );
197  for(unsigned int idx = 0; idx < this->n_values_; idx++) {
198  for(unsigned int i = n_comp_*n_dofs_per_element_*idx; i < n_comp_*n_dofs_per_element_*(idx+1); ++i ) {
199  if (vec[i] < min) min = vec[i];
200  if (vec[i] > max) max = vec[i];
201  }
202  }
203 }
204 
205 
206 /**
207  * Store data element of given data value under given index.
208  */
209 template <typename T>
210 void ElementDataCache<T>::store_value(unsigned int idx, const T * value) {
211  ASSERT_LT(idx, this->n_values_)(this->field_name_);
212  std::vector<T> &vec = *( this->data_.get() );
213  unsigned int vec_idx = idx*this->n_comp_*n_dofs_per_element_;
214  for(unsigned int i = 0; i < this->n_comp_*n_dofs_per_element_; i++, vec_idx++) {
215  vec[vec_idx] = value[i];
216  }
217 }
218 
219 /**
220  * Add value to given index
221  */
222 template <typename T>
223 void ElementDataCache<T>::add(unsigned int idx, const T * value) {
224  ASSERT_LT(idx, this->n_values_);
225  std::vector<T> &vec = *( this->data_.get() );
226  unsigned int vec_idx = idx*this->n_comp_*n_dofs_per_element_;
227  for(unsigned int i = 0; i < this->n_comp_*n_dofs_per_element_; i++, vec_idx++) {
228  vec[vec_idx] += value[i];
229  }
230 }
231 
232 /**
233  * Reset values at given index
234  */
235 template <typename T>
236 void ElementDataCache<T>::zero(unsigned int idx) {
237  ASSERT_LT(idx, this->n_values_);
238  std::vector<T> &vec = *( this->data_.get() );
239  unsigned int vec_idx = idx*this->n_comp_*n_dofs_per_element_;
240  for(unsigned int i = 0; i < this->n_comp_*n_dofs_per_element_; i++, vec_idx++) {
241  vec[vec_idx] = 0;
242  }
243 }
244 
245 /**
246  * Normalize values at given index
247  */
248 template <typename T>
249 void ElementDataCache<T>::normalize(unsigned int idx, unsigned int divisor) {
250  ASSERT_LT(idx, this->n_values_);
251  std::vector<T> &vec = *( this->data_.get() );
252  unsigned int vec_idx = idx*this->n_comp_*n_dofs_per_element_;
253  for(unsigned int i = 0; i < this->n_comp_*n_dofs_per_element_; i++, vec_idx++) {
254  vec[vec_idx] /= divisor;
255  }
256 }
257 
258 template <typename T>
259 CheckResult ElementDataCache<T>::check_values(double default_val, double lower_bound, double upper_bound) {
260  if (check_scale_data_ != CheckScaleData::none) return CheckResult::ok; // method is executed only once
261  check_scale_data_ = CheckScaleData::check;
262 
263  bool is_nan = false, out_of_limit = false;
264  std::vector<T> &vec = *( this->data_.get() );
265  for(unsigned int i=0; i<vec.size(); ++i) {
266  if ( std::isnan(vec[i]) ) {
267  if ( std::isnan(default_val) ) is_nan = true;
268  else vec[i] = default_val;
269  }
270  if ( (vec[i] < lower_bound) || (vec[i] > upper_bound) ) out_of_limit = true;
271  }
272 
273  if (is_nan) return CheckResult::not_a_number;
274  else if (out_of_limit) return CheckResult::out_of_limits;
275  else return CheckResult::ok;
276 }
277 
278 template <typename T>
280  if (check_scale_data_ == CheckScaleData::scale) return; // method is executed only once
281  ASSERT(check_scale_data_ == CheckScaleData::check).warning("Data should be checked before scaling. Rather call 'check_values'!\n");
282 
283  std::vector<T> &vec = *( this->data_.get() );
284  for(unsigned int i=0; i<vec.size(); ++i) {
285  vec[i] *= coef;
286  }
287 
288  check_scale_data_ = CheckScaleData::scale;
289 }
290 
291 
292 template <typename T>
293 std::shared_ptr< ElementDataCacheBase > ElementDataCache<T>::gather(Distribution *distr, LongIdx *local_to_global) {
294  std::shared_ptr< ElementDataCache<T> > gather_cache;
295  int rank = distr->myp();
296  int n_proc = distr->np();
297 
298  unsigned int n_global_data; // global number of data
299  int rec_starts[n_proc]; // displacement of first value that is received from each process
300  int rec_counts[n_proc]; // number of values that are received from each process
301  int *rec_indices_ids = nullptr; // collective values of local to global indexes map of data
302  T *rec_data = nullptr; // collective values of data
303 
304  n_global_data = distr->size();
305  // collects values of data vectors and local to global indexes map on each process
306  if (rank==0) {
307  for (int i=0; i<n_proc; ++i) {
308  rec_starts[i] = distr->begin(i);
309  rec_counts[i] = distr->lsize(i);
310  }
311  rec_indices_ids = new int [ n_global_data ];
312  }
313  MPI_Gatherv( local_to_global, distr->lsize(), MPI_INT, rec_indices_ids, rec_counts, rec_starts, MPI_INT, 0, MPI_COMM_WORLD);
314  if (rank==0) {
315  for (int i=0; i<n_proc; ++i) {
316  rec_starts[i] = this->n_comp()*this->n_dofs_per_element()*rec_starts[i];
317  rec_counts[i] = this->n_comp()*this->n_dofs_per_element()*rec_counts[i];
318  }
319  rec_data = new T [ this->n_comp() * this->n_dofs_per_element() * n_global_data ];
320  }
321  auto &local_cache_vec = *( this->get_data().get() );
322  MPI_Gatherv( &local_cache_vec[0], this->n_comp()*this->n_dofs_per_element()*distr->lsize(), this->mpi_data_type(), rec_data, rec_counts, rec_starts, this->mpi_data_type(), 0, MPI_COMM_WORLD) ;
323 
324  // create and fill serial cache
325  if (rank==0) {
326  gather_cache = std::make_shared<ElementDataCache<T>>(this->field_input_name_, (unsigned int)this->n_comp(), n_global_data, this->fe_type_, this->n_dofs_per_element_);
327  auto &gather_vec = *( gather_cache->get_data().get() );
328  unsigned int i_global_coord; // counter over serial_mesh->nodes_ cache
329  for (unsigned int i=0; i<n_global_data; ++i) {
330  i_global_coord = this->n_comp() * this->n_dofs_per_element() * rec_indices_ids[i];
331  for (unsigned int j=0; j<this->n_comp() * this->n_dofs_per_element(); ++j) { //loop over coords
332  ASSERT_LT(i_global_coord+j, gather_vec.size());
333  gather_vec[ i_global_coord+j ] = rec_data[ this->n_comp()*this->n_dofs_per_element()*i+j ];
334  }
335  }
336 
337  delete[] rec_indices_ids;
338  delete[] rec_data;
339  }
340 
341  return gather_cache;
342 }
343 
344 
345 template <typename T>
346 std::shared_ptr< ElementDataCacheBase > ElementDataCache<T>::element_node_cache_fixed_size(std::vector<unsigned int> &offset_vec) {
347  unsigned int n_elem = offset_vec.size()-1;
348  std::shared_ptr< ElementDataCache<T> > elem_node_cache = std::make_shared<ElementDataCache<T>>(this->field_input_name_, 4*this->n_comp(), n_elem, this->fe_type_, this->n_dofs_per_element_);
349  auto &data_out_vec = *( elem_node_cache->get_data().get() );
350  std::fill( data_out_vec.begin(), data_out_vec.end(), (T)0 );
351  auto &data_in_vec = *( this->get_data().get() );
352 
353  unsigned int i_node, i_old, i_new;
354  for (unsigned int i_el=0, i_conn=0; i_el<offset_vec.size()-1; i_el++) {
355  for(i_node=4*i_el; i_conn<offset_vec[i_el+1]; i_conn++, i_node++) {
356  i_old = i_conn*this->n_comp_*this->n_dofs_per_element_;
357  i_new = i_node*this->n_comp_*this->n_dofs_per_element_;
358  for(unsigned int i = 0; i < this->n_comp_*this->n_dofs_per_element_; i++) {
359  ASSERT_LT(i_new+i, data_out_vec.size());
360  ASSERT_LT(i_old+i, data_in_vec.size());
361  data_out_vec[i_new+i] = data_in_vec[i_old+i];
362  }
363  }
364  }
365 
366  return elem_node_cache;
367 }
368 
369 
370 template <typename T>
371 std::shared_ptr< ElementDataCacheBase > ElementDataCache<T>::element_node_cache_optimize_size(std::vector<unsigned int> &offset_vec) {
372  std::shared_ptr< ElementDataCache<T> > elem_node_cache = std::make_shared<ElementDataCache<T>>(this->field_input_name_,
373  this->n_comp()/4, offset_vec[offset_vec.size()-1], this->fe_type_, this->n_dofs_per_element_);
374  auto &data_out_vec = *( elem_node_cache->get_data().get() );
375  auto &data_in_vec = *( this->get_data().get() );
376 
377  unsigned int i_node, i_old, i_new;
378  for (unsigned int i_el=0, i_conn=0; i_el<offset_vec.size()-1; i_el++) {
379  for(i_node=4*i_el; i_conn<offset_vec[i_el+1]; i_conn++, i_node++) {
380  i_old = i_node*elem_node_cache->n_comp_*this->n_dofs_per_element_;
381  i_new = i_conn*elem_node_cache->n_comp_*this->n_dofs_per_element_;
382  for(unsigned int i = 0; i < elem_node_cache->n_comp_*this->n_dofs_per_element_; i++) {
383  ASSERT_LT(i_new+i, data_out_vec.size());
384  ASSERT_LT(i_old+i, data_in_vec.size());
385  data_out_vec[i_new+i] = data_in_vec[i_old+i];
386  }
387  }
388  }
389  return elem_node_cache;
390 }
391 
392 
393 template <typename T>
394 std::shared_ptr< ElementDataCacheBase > ElementDataCache<T>::compute_node_data(std::vector<unsigned int> &conn_vec, unsigned int data_size) {
395  ASSERT_EQ(conn_vec.size(), this->n_values());
396  unsigned int idx;
397 
398  // set output data to zero
399  std::shared_ptr< ElementDataCache<T> > node_cache = std::make_shared<ElementDataCache<T>>(this->field_input_name_, this->n_comp(), data_size, this->fe_type_, this->n_dofs_per_element_);
400  std::vector<unsigned int> count(data_size, 0);
401  for (idx=0; idx < node_cache->n_values(); idx++)
402  node_cache->zero(idx);
403 
404  auto &data_in_vec = *( this->get_data().get() );
405  for (idx=0; idx < conn_vec.size(); idx++) {
406  ASSERT_LT(conn_vec[idx], node_cache->n_values());
407  ASSERT_LT(this->n_comp()*this->n_dofs_per_element()*idx, data_in_vec.size());
408  node_cache->add( conn_vec[idx], &(data_in_vec[this->n_comp() * this->n_dofs_per_element() * idx]) );
409  count[ conn_vec[idx] ]++;
410  }
411 
412  // Compute mean values at nodes
413  for(idx=0; idx < node_cache->n_values(); idx++)
414  node_cache->normalize(idx, count[idx]);
415 
416  return node_cache;
417 }
418 
419 
420 template<>
422  return MPI_DOUBLE;
423 }
424 
425 template<>
427  return MPI_INT;
428 }
429 
430 template<>
432  return MPI_UNSIGNED;
433 }
434 
435 
436 /// Access i-th element in the data vector.
437 template <class T>
439 {
440  std::vector<T> &vec = *( this->data_.get() );
441  ASSERT_LT(i, vec.size());
442  return vec[i];
443 }
444 
445 
446 
447 // explicit instantiation of template class
448 template class ElementDataCache<unsigned int>;
449 template class ElementDataCache<int>;
450 template class ElementDataCache<double>;
Distribution::np
unsigned int np() const
get num of processors
Definition: distribution.hh:105
ElementDataCache::ElementDataCache
ElementDataCache()
Default constructor.
Definition: element_data_cache.cc:32
ElementDataCache::read_ascii_data
void read_ascii_data(Tokenizer &tok, unsigned int n_components, unsigned int i_row) override
Implements ElementDataCacheBase::read_ascii_data.
Definition: element_data_cache.cc:85
element_data_cache.hh
ASSERT_GT
#define ASSERT_GT(a, b)
Definition of comparative assert macro (Greater Than) only for debug mode.
Definition: asserts.hh:316
Armor::vec
ArmaVec< double, N > vec
Definition: armor.hh:885
Distribution::lsize
unsigned int lsize(int proc) const
get local size
Definition: distribution.hh:115
msh_basereader.hh
ElementDataCacheBase::time_
double time_
time step stored in cache
Definition: element_data_cache_base.hh:241
Distribution::myp
unsigned int myp() const
get my processor
Definition: distribution.hh:107
ElementDataCache
Definition: element_data_cache.hh:44
ASSERT
#define ASSERT(expr)
Definition: asserts.hh:350
ElementDataCacheBase::n_dofs_per_element_
unsigned int n_dofs_per_element_
Definition: element_data_cache_base.hh:276
ElementDataCacheBase::n_comp_
unsigned int n_comp_
Definition: element_data_cache_base.hh:260
distribution.hh
Support classes for parallel programing.
ElementDataCache::print_yaml_subarray
void print_yaml_subarray(ostream &out_stream, unsigned int precision, unsigned int begin, unsigned int end) override
Definition: element_data_cache.cc:158
ok
@ ok
All values are not NaN and are in limits.
Definition: element_data_cache.hh:37
ElementDataCache::gather
std::shared_ptr< ElementDataCacheBase > gather(Distribution *distr, LongIdx *local_to_global) override
Implements ElementDataCacheBase::gather.
Definition: element_data_cache.cc:293
value
static constexpr bool value
Definition: json.hpp:87
none
@ none
Definition: mixed_mesh_intersections.hh:38
ElementDataCacheBase::n_comp
unsigned int n_comp() const
Definition: element_data_cache_base.hh:145
MPI_DOUBLE
#define MPI_DOUBLE
Definition: mpi.h:156
std::vector
Definition: doxy_dummy_defs.hh:7
Distribution::size
unsigned int size() const
get global size
Definition: distribution.hh:118
MPI_Gatherv
#define MPI_Gatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, root, comm)
Definition: mpi.h:549
system.hh
field_value_to_yaml
string field_value_to_yaml(const T &mat, unsigned int prec)
Definition: armadillo_tools.cc:138
ElementDataCache::scale_data
void scale_data(double coef)
Definition: element_data_cache.cc:279
ElementDataCacheBase::n_values_
unsigned int n_values_
Definition: element_data_cache_base.hh:255
ElementDataCacheBase::n_dofs_per_element
unsigned int n_dofs_per_element() const
Definition: element_data_cache_base.hh:152
ElementDataCache::CheckScaleData
CheckScaleData
Allow to hold sign, if data in cache is checked and scale (both can be executed only once)
Definition: element_data_cache.hh:187
ASSERT_LT
#define ASSERT_LT(a, b)
Definition of comparative assert macro (Less Than) only for debug mode.
Definition: asserts.hh:300
fmt::internal::check
T check(T value)
Definition: format.h:312
ElementDataCacheBase::fe_type_
std::string fe_type_
Definition: element_data_cache_base.hh:271
Distribution
Definition: distribution.hh:50
ElementDataCache::create_data_cache
static CacheData create_data_cache(unsigned int row_vec_size)
Definition: element_data_cache.cc:77
armadillo_tools.hh
ElementDataCache::print_ascii
void print_ascii(ostream &out_stream, unsigned int idx) override
Definition: element_data_cache.cc:113
ElementDataCache::compute_node_data
std::shared_ptr< ElementDataCacheBase > compute_node_data(std::vector< unsigned int > &conn_vec, unsigned int data_size) override
Implements ElementDataCacheBase::compute_node_data.
Definition: element_data_cache.cc:394
ElementDataCache::get_data
CacheData get_data()
Return underlying vector of element data.
Definition: element_data_cache.cc:71
ElementDataCache::element_node_cache_fixed_size
std::shared_ptr< ElementDataCacheBase > element_node_cache_fixed_size(std::vector< unsigned int > &offset_vec) override
Implements ElementDataCacheBase::element_node_cache_fixed_size.
Definition: element_data_cache.cc:346
CheckResult
CheckResult
Return type of method that checked data stored in ElementDataCache (NaN values, limits)
Definition: element_data_cache.hh:36
ElementDataCache::element_node_cache_optimize_size
std::shared_ptr< ElementDataCacheBase > element_node_cache_optimize_size(std::vector< unsigned int > &offset_vec) override
Implements ElementDataCacheBase::element_node_cache_optimize_size.
Definition: element_data_cache.cc:371
ASSERT_EQ
#define ASSERT_EQ(a, b)
Definition of comparative assert macro (EQual) only for debug mode.
Definition: asserts.hh:332
ElementDataCache::mpi_data_type
MPI_Datatype mpi_data_type()
Return MPI data type corresponding with template parameter of cache. Needs template specialization.
ElementDataCacheBase
Definition: element_data_cache_base.hh:33
MPI_INT
#define MPI_INT
Definition: mpi.h:160
fmt::internal::isnan
DummyInt isnan(...)
Definition: format.h:306
ElementDataCache::print_binary_all
void print_binary_all(ostream &out_stream, bool print_data_size=true, unsigned int start=0) override
Print all data stored in output data to appended binary format.
Definition: element_data_cache.cc:141
not_a_number
@ not_a_number
Some value(s) is set to NaN.
Definition: element_data_cache.hh:39
ElementDataCache::read_binary_data
void read_binary_data(std::istream &data_stream, unsigned int n_components, unsigned int i_row) override
Implements ElementDataCacheBase::read_binary_data.
Definition: element_data_cache.cc:97
MPI_Datatype
#define MPI_Datatype
Definition: mpi.h:154
LongIdx
int LongIdx
Define type that represents indices of large arrays (elements, nodes, dofs etc.)
Definition: index_types.hh:24
ElementDataCache::add
void add(unsigned int idx, const T *value)
Definition: element_data_cache.cc:223
ElementDataCache::data_
CacheData data_
Definition: element_data_cache.hh:205
ElementDataCacheBase::field_input_name_
std::string field_input_name_
name of field stored in cache
Definition: element_data_cache_base.hh:244
ElementDataCache::CacheData
std::shared_ptr< std::vector< T > > CacheData
Definition: element_data_cache.hh:52
ElementDataCache::~ElementDataCache
virtual ~ElementDataCache() override
Destructor of ElementDataCache.
Definition: element_data_cache.cc:67
ElementDataCache::get_min_max_range
void get_min_max_range(double &min, double &max) override
Definition: element_data_cache.cc:192
out_of_limits
@ out_of_limits
Some value(s) is out of limits.
Definition: element_data_cache.hh:38
MPI_COMM_WORLD
#define MPI_COMM_WORLD
Definition: mpi.h:123
ElementDataCacheBase::field_name_
std::string field_name_
Definition: element_data_cache_base.hh:245
MPI_UNSIGNED
#define MPI_UNSIGNED
Definition: mpi.h:165
ElementDataCache::zero
void zero(unsigned int idx)
Definition: element_data_cache.cc:236
ElementDataCache::check_values
CheckResult check_values(double default_val, double lower_bound, double upper_bound)
Definition: element_data_cache.cc:259
ElementDataCache::normalize
void normalize(unsigned int idx, unsigned int divisor)
Definition: element_data_cache.cc:249
ElementDataCache::print_ascii_all
void print_ascii_all(ostream &out_stream, unsigned int start=0) override
Print all data stored in output data ro ascii format.
Definition: element_data_cache.cc:129
ElementDataCache::store_value
void store_value(unsigned int idx, const T *value)
Definition: element_data_cache.cc:210
ElementDataCacheBase::fe_type
std::string fe_type() const
Definition: element_data_cache_base.hh:178
tokenizer.hh
Distribution::begin
unsigned int begin(int proc) const
get starting local index
Definition: distribution.hh:109
ElementDataCache::operator[]
T & operator[](unsigned int i)
Access i-th element in the data vector of 0th component.
Definition: element_data_cache.cc:438