27 #include "boost/lexical_cast.hpp" 51 this->set_vtk_type<T>();
56 ASSERT_GT(n_comp, 0)(field_name).error(
"Output field returning variable size vectors. Try convert to MultiField.");
69 ASSERT_LT(component_idx,
data_.size()).error(
"Index of component is out of range.\n");
70 return data_[component_idx];
77 for (
unsigned int i=0; i<size_of_cache; ++i) {
79 row_vec->resize(row_vec_size, numeric_limits<T>::signaling_NaN());
80 data_cache[i] = row_vec;
90 for (
unsigned int i_vec=0; i_vec<
data_.size(); ++i_vec) {
91 idx = i_row * n_components;
93 for (
unsigned int i_col=0; i_col < n_components; ++i_col, ++idx) {
95 vec[idx] = boost::lexical_cast<T>(*tok);
102 template <
typename T>
105 for (
unsigned int i_vec=0; i_vec<
data_.size(); ++i_vec) {
106 idx = i_row * n_components;
108 for (
unsigned int i_col=0; i_col < n_components; ++i_col, ++idx) {
109 data_stream.read(reinterpret_cast<char *>(&vec[idx]),
sizeof(T));
121 template <
typename T>
127 out_stream << vec[i] <<
" ";
137 template <
typename T>
141 for(
unsigned int idx = start; idx < this->
n_values_; idx++) {
143 out_stream << vec[i] <<
" ";
149 template <
typename T>
152 if (print_data_size) {
154 unsigned long long int data_byte_size = this->
n_values_ *
n_comp_ *
sizeof(T);
155 out_stream.write(reinterpret_cast<const char*>(&data_byte_size),
sizeof(
unsigned long long int));
159 for(
unsigned int idx = start; idx < this->
n_values_; idx++) {
161 out_stream.write(reinterpret_cast<const char*>(&(vec[i])),
sizeof(T));
166 template <
typename T>
171 for(
unsigned int idx = begin; idx < end; idx++) {
172 if (idx != begin) out_stream <<
" , ";
173 unsigned int vec_pos =
n_comp_ * idx;
175 case NumCompValueType::N_SCALAR: {
179 case NumCompValueType::N_VECTOR: {
180 typename arma::Col<T>::template fixed<3> vec_val;
181 for (
unsigned int i=0; i<3; ++i, ++vec_pos)
182 vec_val(i) = vec[vec_pos];
186 case NumCompValueType::N_TENSOR: {
187 typename arma::Mat<T>::template fixed<3,3> mat_val;
188 for (
unsigned int i=0; i<3; ++i)
189 for (
unsigned int j=0; j<3; ++j, ++vec_pos)
190 mat_val(i,j) = vec[vec_pos];
200 template <
typename T>
203 min = std::numeric_limits<double>::max();
204 max = std::numeric_limits<double>::min();
206 for(
unsigned int idx = 0; idx < this->
n_values_; idx++) {
208 if (vec[i] < min) min = vec[i];
209 if (vec[i] > max) max = vec[i];
218 template <
typename T>
222 unsigned int vec_idx = idx*this->
n_comp_;
223 for(
unsigned int i = 0; i < this->
n_comp_; i++, vec_idx++) {
224 vec[vec_idx] = value[i];
231 template <
typename T>
235 unsigned int vec_idx = idx*this->
n_comp_;
236 for(
unsigned int i = 0; i < this->
n_comp_; i++, vec_idx++) {
237 vec[vec_idx] += value[i];
244 template <
typename T>
248 unsigned int vec_idx = idx*this->
n_comp_;
249 for(
unsigned int i = 0; i < this->
n_comp_; i++, vec_idx++) {
257 template <
typename T>
261 unsigned int vec_idx = idx*this->
n_comp_;
262 for(
unsigned int i = 0; i < this->
n_comp_; i++, vec_idx++) {
263 vec[vec_idx] /= divisor;
267 template <
typename T>
272 bool is_nan =
false, out_of_limit =
false;
273 for (
unsigned int j=0; j<
data_.size(); ++j) {
275 for(
unsigned int i=0; i<vec.size(); ++i) {
278 else vec[i] = default_val;
280 if ( (vec[i] < lower_bound) || (vec[i] > upper_bound) ) out_of_limit =
true;
289 template <
typename T>
294 for (
unsigned int j=0; j<
data_.size(); ++j) {
296 for(
unsigned int i=0; i<vec.size(); ++i) {
305 template <
typename T>
307 std::shared_ptr< ElementDataCache<T> > gather_cache;
308 int rank = distr->
myp();
309 int n_proc = distr->
np();
311 unsigned int n_global_data;
312 int rec_starts[n_proc];
313 int rec_counts[n_proc];
314 int *rec_indices_ids =
nullptr;
315 T *rec_data =
nullptr;
319 for (
int i=0; i<n_proc; ++i) {
320 rec_starts[i] = distr->
begin(i);
321 rec_counts[i] = distr->
lsize(i);
323 n_global_data = distr->
size();
324 rec_indices_ids =
new int [ n_global_data ];
328 for (
int i=0; i<n_proc; ++i) {
329 rec_starts[i] = this->
n_comp()*rec_starts[i];
330 rec_counts[i] = this->
n_comp()*rec_counts[i];
332 rec_data =
new T [ this->
n_comp() * n_global_data ];
339 gather_cache = std::make_shared<ElementDataCache<T>>(this->
field_input_name_, (
unsigned int)this->
n_comp(), n_global_data);
340 auto &gather_vec = *( gather_cache->get_component_data(0).get() );
341 unsigned int i_global_coord;
342 for (
unsigned int i=0; i<n_global_data; ++i) {
343 i_global_coord = this->
n_comp() * rec_indices_ids[i];
344 for (
unsigned int j=0; j<this->
n_comp(); ++j) {
345 ASSERT_LT(i_global_coord+j, gather_vec.size());
346 gather_vec[ i_global_coord+j ] = rec_data[ this->
n_comp()*i+j ];
350 delete[] rec_indices_ids;
358 template <
typename T>
360 unsigned int n_elem = offset_vec.size()-1;
361 std::shared_ptr< ElementDataCache<T> > elem_node_cache = std::make_shared<ElementDataCache<T>>(this->
field_input_name_, 4*this->
n_comp(), n_elem);
362 auto &data_out_vec = *( elem_node_cache->get_component_data(0).get() );
363 std::fill( data_out_vec.begin(), data_out_vec.end(), (T)0 );
366 unsigned int i_node, i_old, i_new;
367 for (
unsigned int i_el=0, i_conn=0; i_el<offset_vec.size()-1; i_el++) {
368 for(i_node=4*i_el; i_conn<offset_vec[i_el+1]; i_conn++, i_node++) {
371 for(
unsigned int i = 0; i < this->
n_comp_; i++) {
374 data_out_vec[i_new+i] = data_in_vec[i_old+i];
379 return elem_node_cache;
383 template <
typename T>
385 std::shared_ptr< ElementDataCache<T> > elem_node_cache = std::make_shared<ElementDataCache<T>>(this->
field_input_name_,
386 this->
n_comp()/4, offset_vec[offset_vec.size()-1]);
387 auto &data_out_vec = *( elem_node_cache->get_component_data(0).get() );
390 unsigned int i_node, i_old, i_new;
391 for (
unsigned int i_el=0, i_conn=0; i_el<offset_vec.size()-1; i_el++) {
392 for(i_node=4*i_el; i_conn<offset_vec[i_el+1]; i_conn++, i_node++) {
393 i_old = i_node*elem_node_cache->n_comp_;
394 i_new = i_conn*elem_node_cache->n_comp_;
395 for(
unsigned int i = 0; i < elem_node_cache->n_comp_; i++) {
398 data_out_vec[i_new+i] = data_in_vec[i_old+i];
402 return elem_node_cache;
406 template <
typename T>
412 std::shared_ptr< ElementDataCache<T> > node_cache = std::make_shared<ElementDataCache<T>>(this->
field_input_name_, this->
n_comp(), data_size);
414 for (idx=0; idx < node_cache->n_values(); idx++)
415 node_cache->zero(idx);
418 for (idx=0; idx < conn_vec.size(); idx++) {
419 ASSERT_LT(conn_vec[idx], node_cache->n_values());
421 node_cache->add( conn_vec[idx], &(data_in_vec[this->
n_comp() * idx]) );
422 count[ conn_vec[idx] ]++;
426 for(idx=0; idx < node_cache->n_values(); idx++)
427 node_cache->normalize(idx, count[idx]);
unsigned int size() const
get global size
double time_
time step stored in cache
void read_binary_data(std::istream &data_stream, unsigned int n_components, unsigned int i_row) override
Implements ElementDataCacheBase::read_binary_data.
std::shared_ptr< std::vector< T > > ComponentDataPtr
CheckScaleData
Allow to hold sign, if data in cache is checked and scale (both can be executed only once) ...
void print_ascii(ostream &out_stream, unsigned int idx) override
CheckScaleData check_scale_data_
Sign, if data in cache is checked and scale.
Some value(s) is set to NaN.
unsigned int n_values() const
std::shared_ptr< ElementDataCacheBase > compute_node_data(std::vector< unsigned int > &conn_vec, unsigned int data_size) override
Implements ElementDataCacheBase::compute_node_data.
void store_value(unsigned int idx, const T *value)
#define ASSERT_GT(a, b)
Definition of comparative assert macro (Greater Than)
#define MPI_Gatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, root, comm)
T & operator[](unsigned int i)
Access i-th element in the data vector of 0th component.
void print_ascii_all(ostream &out_stream, unsigned int start=0) override
Print all data stored in output data ro ascii format.
MPI_Datatype mpi_data_type()
Return MPI data type corresponding with template parameter of cache. Needs template specialization...
void read_ascii_data(Tokenizer &tok, unsigned int n_components, unsigned int i_row) override
Implements ElementDataCacheBase::read_ascii_data.
void print_binary_all(ostream &out_stream, bool print_data_size=true, unsigned int start=0) override
Print all data stored in output data to appended binary format.
Some value(s) is out of limits.
static constexpr bool value
ElementDataCache()
Default constructor.
void get_min_max_range(double &min, double &max) override
void add(unsigned int idx, const T *value)
void print_yaml_subarray(ostream &out_stream, unsigned int precision, unsigned int begin, unsigned int end) override
std::shared_ptr< ElementDataCacheBase > gather(Distribution *distr, LongIdx *local_to_global) override
Implements ElementDataCacheBase::gather.
virtual ~ElementDataCache() override
Destructor of ElementDataCache.
static CacheData create_data_cache(unsigned int size_of_cache, unsigned int row_vec_size)
unsigned int begin(int proc) const
get starting local index
void zero(unsigned int idx)
void scale_data(double coef)
unsigned int np() const
get num of processors
Data is neither checked nor scaled.
CheckResult
Return type of method that checked data stored in ElementDataCache (NaN values, limits) ...
CheckResult check_values(double default_val, double lower_bound, double upper_bound)
void normalize(unsigned int idx, unsigned int divisor)
unsigned int myp() const
get my processor
Support classes for parallel programing.
int LongIdx
Define type that represents indices of large arrays (elements, nodes, dofs etc.)
std::string field_input_name_
name of field stored in cache
std::shared_ptr< ElementDataCacheBase > element_node_cache_fixed_size(std::vector< unsigned int > &offset_vec) override
Implements ElementDataCacheBase::element_node_cache_fixed_size.
#define ASSERT_LT(a, b)
Definition of comparative assert macro (Less Than)
std::shared_ptr< ElementDataCacheBase > element_node_cache_optimize_size(std::vector< unsigned int > &offset_vec) override
Implements ElementDataCacheBase::element_node_cache_optimize_size.
ComponentDataPtr get_component_data(unsigned int component_idx)
Return vector of element data for get component.
All values are not NaN and are in limits.
unsigned int n_comp() const
#define ASSERT_EQ(a, b)
Definition of comparative assert macro (EQual)
#define ASSERT_LT_DBG(a, b)
Definition of comparative assert macro (Less Than) only for debug mode.
unsigned int lsize(int proc) const
get local size