27 #include "boost/lexical_cast.hpp" 51 this->set_vtk_type<T>();
56 ASSERT_GT(n_comp, 0)(field_name).error(
"Output field returning variable size vectors. Try convert to MultiField.");
69 ASSERT_LT(component_idx,
data_.size()).error(
"Index of component is out of range.\n");
70 return data_[component_idx];
77 for (
unsigned int i=0; i<size_of_cache; ++i) {
79 row_vec->resize(row_vec_size, numeric_limits<T>::signaling_NaN());
80 data_cache[i] = row_vec;
90 for (
unsigned int i_vec=0; i_vec<
data_.size(); ++i_vec) {
91 idx = i_row * n_components;
93 for (
unsigned int i_col=0; i_col < n_components; ++i_col, ++idx) {
94 vec[idx] = boost::lexical_cast<T>(*tok);
101 template <
typename T>
104 for (
unsigned int i_vec=0; i_vec<
data_.size(); ++i_vec) {
105 idx = i_row * n_components;
107 for (
unsigned int i_col=0; i_col < n_components; ++i_col, ++idx) {
108 data_stream.read(reinterpret_cast<char *>(&vec[idx]),
sizeof(T));
120 template <
typename T>
126 out_stream << vec[i] <<
" ";
136 template <
typename T>
140 for(
unsigned int idx = 0; idx < this->
n_values_; idx++) {
142 out_stream << vec[i] <<
" ";
148 template <
typename T>
151 if (print_data_size) {
153 unsigned long long int data_byte_size = this->
n_values_ *
n_comp_ *
sizeof(T);
154 out_stream.write(reinterpret_cast<const char*>(&data_byte_size),
sizeof(
unsigned long long int));
158 for(
unsigned int idx = 0; idx < this->
n_values_; idx++) {
160 out_stream.write(reinterpret_cast<const char*>(&(vec[i])),
sizeof(T));
165 template <
typename T>
170 for(
unsigned int idx = begin; idx < end; idx++) {
171 if (idx != begin) out_stream <<
" , ";
172 unsigned int vec_pos =
n_comp_ * idx;
174 case NumCompValueType::N_SCALAR: {
178 case NumCompValueType::N_VECTOR: {
179 typename arma::Col<T>::template fixed<3> vec_val;
180 for (
unsigned int i=0; i<3; ++i, ++vec_pos)
181 vec_val(i) = vec[vec_pos];
185 case NumCompValueType::N_TENSOR: {
186 typename arma::Mat<T>::template fixed<3,3> mat_val;
187 for (
unsigned int i=0; i<3; ++i)
188 for (
unsigned int j=0; j<3; ++j, ++vec_pos)
189 mat_val(i,j) = vec[vec_pos];
199 template <
typename T>
202 min = std::numeric_limits<double>::max();
203 max = std::numeric_limits<double>::min();
205 for(
unsigned int idx = 0; idx < this->
n_values_; idx++) {
207 if (vec[i] < min) min = vec[i];
208 if (vec[i] > max) max = vec[i];
217 template <
typename T>
221 unsigned int vec_idx = idx*this->
n_comp_;
222 for(
unsigned int i = 0; i < this->
n_comp_; i++, vec_idx++) {
223 vec[vec_idx] = value[i];
230 template <
typename T>
234 unsigned int vec_idx = idx*this->
n_comp_;
235 for(
unsigned int i = 0; i < this->
n_comp_; i++, vec_idx++) {
236 vec[vec_idx] += value[i];
243 template <
typename T>
247 unsigned int vec_idx = idx*this->
n_comp_;
248 for(
unsigned int i = 0; i < this->
n_comp_; i++, vec_idx++) {
256 template <
typename T>
260 unsigned int vec_idx = idx*this->
n_comp_;
261 for(
unsigned int i = 0; i < this->
n_comp_; i++, vec_idx++) {
262 vec[vec_idx] /= divisor;
266 template <
typename T>
271 bool is_nan =
false, out_of_limit =
false;
272 for (
unsigned int j=0; j<
data_.size(); ++j) {
274 for(
unsigned int i=0; i<vec.size(); ++i) {
277 else vec[i] = default_val;
279 if ( (vec[i] < lower_bound) || (vec[i] > upper_bound) ) out_of_limit =
true;
288 template <
typename T>
293 for (
unsigned int j=0; j<
data_.size(); ++j) {
295 for(
unsigned int i=0; i<vec.size(); ++i) {
304 template <
typename T>
306 std::shared_ptr< ElementDataCache<T> > gather_cache;
307 int rank = distr->
myp();
308 int n_proc = distr->
np();
310 unsigned int n_global_data;
311 int rec_starts[n_proc];
312 int rec_counts[n_proc];
313 int *rec_indices_ids =
nullptr;
314 T *rec_data =
nullptr;
318 for (
int i=0; i<n_proc; ++i) {
319 rec_starts[i] = distr->
begin(i);
320 rec_counts[i] = distr->
lsize(i);
322 n_global_data = distr->
size();
323 rec_indices_ids =
new int [ n_global_data ];
327 for (
int i=0; i<n_proc; ++i) {
328 rec_starts[i] = this->
n_comp()*rec_starts[i];
329 rec_counts[i] = this->
n_comp()*rec_counts[i];
331 rec_data =
new T [ this->
n_comp() * n_global_data ];
338 gather_cache = std::make_shared<ElementDataCache<T>>(this->
field_input_name_, (
unsigned int)this->
n_comp(), n_global_data);
339 auto &gather_vec = *( gather_cache->get_component_data(0).get() );
340 unsigned int i_global_coord;
341 for (
unsigned int i=0; i<n_global_data; ++i) {
342 i_global_coord = this->
n_comp() * rec_indices_ids[i];
343 for (
unsigned int j=0; j<this->
n_comp(); ++j) {
344 ASSERT_LT(i_global_coord+j, gather_vec.size());
345 gather_vec[ i_global_coord+j ] = rec_data[ this->
n_comp()*i+j ];
349 delete[] rec_indices_ids;
357 template <
typename T>
359 unsigned int n_elem = offset_vec.size();
360 std::shared_ptr< ElementDataCache<T> > elem_node_cache = std::make_shared<ElementDataCache<T>>(this->
field_input_name_, 4*this->
n_comp(), n_elem);
361 auto &data_out_vec = *( elem_node_cache->get_component_data(0).get() );
362 std::fill( data_out_vec.begin(), data_out_vec.end(), (T)0 );
365 unsigned int i_node, i_old, i_new;
366 for (
unsigned int i_el=0, i_conn=0; i_el<offset_vec.size(); i_el++) {
367 for(i_node=4*i_el; i_conn<offset_vec[i_el]; i_conn++, i_node++) {
370 for(
unsigned int i = 0; i < this->
n_comp_; i++) {
373 data_out_vec[i_new+i] = data_in_vec[i_old+i];
378 return elem_node_cache;
382 template <
typename T>
384 std::shared_ptr< ElementDataCache<T> > elem_node_cache = std::make_shared<ElementDataCache<T>>(this->
field_input_name_,
385 this->
n_comp()/4, offset_vec[offset_vec.size()-1]);
386 auto &data_out_vec = *( elem_node_cache->get_component_data(0).get() );
389 unsigned int i_node, i_old, i_new;
390 for (
unsigned int i_el=0, i_conn=0; i_el<offset_vec.size(); i_el++) {
391 for(i_node=4*i_el; i_conn<offset_vec[i_el]; i_conn++, i_node++) {
392 i_old = i_node*elem_node_cache->n_comp_;
393 i_new = i_conn*elem_node_cache->n_comp_;
394 for(
unsigned int i = 0; i < elem_node_cache->n_comp_; i++) {
397 data_out_vec[i_new+i] = data_in_vec[i_old+i];
401 return elem_node_cache;
405 template <
typename T>
411 std::shared_ptr< ElementDataCache<T> > node_cache = std::make_shared<ElementDataCache<T>>(this->
field_input_name_, this->
n_comp(), data_size);
413 for (idx=0; idx < node_cache->n_values(); idx++)
414 node_cache->zero(idx);
417 for (idx=0; idx < conn_vec.size(); idx++) {
418 ASSERT_LT(conn_vec[idx], node_cache->n_values());
420 node_cache->add( conn_vec[idx], &(data_in_vec[this->
n_comp() * idx]) );
421 count[ conn_vec[idx] ]++;
425 for(idx=0; idx < node_cache->n_values(); idx++)
426 node_cache->normalize(idx, count[idx]);
int LongIdx
Define type that represents indices of large arrays (elements, nodes, dofs etc.)
unsigned int size() const
get global size
double time_
time step stored in cache
void read_binary_data(std::istream &data_stream, unsigned int n_components, unsigned int i_row) override
Implements ElementDataCacheBase::read_binary_data.
std::shared_ptr< std::vector< T > > ComponentDataPtr
CheckScaleData
Allow to hold sign, if data in cache is checked and scale (both can be executed only once) ...
void print_ascii(ostream &out_stream, unsigned int idx) override
CheckScaleData check_scale_data_
Sign, if data in cache is checked and scale.
Some value(s) is set to NaN.
unsigned int n_values() const
std::shared_ptr< ElementDataCacheBase > compute_node_data(std::vector< unsigned int > &conn_vec, unsigned int data_size) override
Implements ElementDataCacheBase::compute_node_data.
void store_value(unsigned int idx, const T *value)
#define ASSERT_GT(a, b)
Definition of comparative assert macro (Greater Than)
#define MPI_Gatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, root, comm)
T & operator[](unsigned int i)
Access i-th element in the data vector of 0th component.
MPI_Datatype mpi_data_type()
Return MPI data type corresponding with template parameter of cache. Needs template specialization...
void read_ascii_data(Tokenizer &tok, unsigned int n_components, unsigned int i_row) override
Implements ElementDataCacheBase::read_ascii_data.
Some value(s) is out of limits.
static constexpr bool value
ElementDataCache()
Default constructor.
void get_min_max_range(double &min, double &max) override
void add(unsigned int idx, const T *value)
void print_yaml_subarray(ostream &out_stream, unsigned int precision, unsigned int begin, unsigned int end) override
std::shared_ptr< ElementDataCacheBase > gather(Distribution *distr, LongIdx *local_to_global) override
Implements ElementDataCacheBase::gather.
virtual ~ElementDataCache() override
Destructor of ElementDataCache.
static CacheData create_data_cache(unsigned int size_of_cache, unsigned int row_vec_size)
unsigned int begin(int proc) const
get starting local index
void zero(unsigned int idx)
void scale_data(double coef)
unsigned int np() const
get num of processors
Data is neither checked nor scaled.
CheckResult
Return type of method that checked data stored in ElementDataCache (NaN values, limits) ...
CheckResult check_values(double default_val, double lower_bound, double upper_bound)
void normalize(unsigned int idx, unsigned int divisor)
unsigned int myp() const
get my processor
Support classes for parallel programing.
void print_binary_all(ostream &out_stream, bool print_data_size=true) override
Print all data stored in output data to appended binary format.
std::string field_input_name_
name of field stored in cache
std::shared_ptr< ElementDataCacheBase > element_node_cache_fixed_size(std::vector< unsigned int > &offset_vec) override
Implements ElementDataCacheBase::element_node_cache_fixed_size.
#define ASSERT_LT(a, b)
Definition of comparative assert macro (Less Than)
std::shared_ptr< ElementDataCacheBase > element_node_cache_optimize_size(std::vector< unsigned int > &offset_vec) override
Implements ElementDataCacheBase::element_node_cache_optimize_size.
ComponentDataPtr get_component_data(unsigned int component_idx)
Return vector of element data for get component.
void print_ascii_all(ostream &out_stream) override
Print all data stored in output data ro ascii format.
All values are not NaN and are in limits.
unsigned int n_comp() const
#define ASSERT_EQ(a, b)
Definition of comparative assert macro (EQual)
#define ASSERT_LT_DBG(a, b)
Definition of comparative assert macro (Less Than) only for debug mode.
unsigned int lsize(int proc) const
get local size