27 #include "boost/lexical_cast.hpp"
53 this->set_vtk_type<T>();
58 ASSERT_GT(
n_comp, 0)(field_name).error(
"Output field returning variable size vectors. Try convert to MultiField.");
81 data_cache->resize(row_vec_size, numeric_limits<T>::signaling_NaN());
88 unsigned int idx = i_row * n_components;
90 for (
unsigned int i_col=0; i_col < n_components; ++i_col, ++idx) {
92 vec[idx] = boost::lexical_cast<T>(*tok);
100 unsigned int idx = i_row * n_components;
102 for (
unsigned int i_col=0; i_col < n_components; ++i_col, ++idx) {
103 data_stream.read(
reinterpret_cast<char *
>(&
vec[idx]),
sizeof(T));
114 template <
typename T>
119 for(
unsigned int i = n_comp_*n_dofs_per_element_*idx; i < n_comp_*n_dofs_per_element_*(idx+1); ++i )
120 out_stream <<
vec[i] <<
" ";
130 template <
typename T>
134 for(
unsigned int idx = start; idx < this->n_values_; idx++) {
135 for(
unsigned int i = n_comp_*n_dofs_per_element_*idx; i < n_comp_*n_dofs_per_element_*(idx+1); ++i )
136 out_stream <<
vec[i] <<
" ";
142 template <
typename T>
145 if (print_data_size) {
147 unsigned long long int data_byte_size = this->n_values_ * n_comp_ *
sizeof(T);
148 out_stream.write(
reinterpret_cast<const char*
>(&data_byte_size),
sizeof(
unsigned long long int));
152 for(
unsigned int idx = start; idx < this->n_values_; idx++) {
153 for(
unsigned int i = n_comp_*idx; i < n_comp_*(idx+1); ++i )
154 out_stream.write(
reinterpret_cast<const char*
>(&(
vec[i])),
sizeof(T));
159 template <
typename T>
164 for(
unsigned int idx = begin; idx < end; idx++) {
165 if (idx != begin) out_stream <<
" , ";
166 unsigned int vec_pos = n_comp_ * idx;
167 switch (this->n_comp_) {
168 case NumCompValueType::N_SCALAR: {
172 case NumCompValueType::N_VECTOR: {
173 typename arma::Col<T>::template fixed<3> vec_val;
174 for (
unsigned int i=0; i<3; ++i, ++vec_pos)
175 vec_val(i) =
vec[vec_pos];
179 case NumCompValueType::N_TENSOR: {
180 typename arma::Mat<T>::template fixed<3,3> mat_val;
181 for (
unsigned int i=0; i<3; ++i)
182 for (
unsigned int j=0; j<3; ++j, ++vec_pos)
183 mat_val(i,j) =
vec[vec_pos];
193 template <
typename T>
196 min = std::numeric_limits<double>::max();
197 max = std::numeric_limits<double>::min();
199 for(
unsigned int idx = 0; idx < this->n_values_; idx++) {
200 for(
unsigned int i = n_comp_*n_dofs_per_element_*idx; i < n_comp_*n_dofs_per_element_*(idx+1); ++i ) {
201 if (
vec[i] < min) min =
vec[i];
202 if (
vec[i] > max) max =
vec[i];
211 template <
typename T>
213 ASSERT_LT(idx, this->n_values_)(this->field_name_);
215 unsigned int vec_idx = idx*this->n_comp_*n_dofs_per_element_;
216 for(
unsigned int i = 0; i < this->n_comp_*n_dofs_per_element_; i++, vec_idx++) {
224 template <
typename T>
228 unsigned int vec_idx = idx*this->n_comp_*n_dofs_per_element_;
229 for(
unsigned int i = 0; i < this->n_comp_*n_dofs_per_element_; i++, vec_idx++) {
237 template <
typename T>
241 unsigned int vec_idx = idx*this->n_comp_*n_dofs_per_element_;
242 for(
unsigned int i = 0; i < this->n_comp_*n_dofs_per_element_; i++, vec_idx++) {
250 template <
typename T>
254 unsigned int vec_idx = idx*this->n_comp_*n_dofs_per_element_;
255 for(
unsigned int i = 0; i < this->n_comp_*n_dofs_per_element_; i++, vec_idx++) {
256 vec[vec_idx] /= divisor;
260 template <
typename T>
262 std::shared_ptr< ElementDataCache<T> > gather_cache;
263 int rank = distr->
myp();
264 int n_proc = distr->
np();
266 unsigned int n_global_data;
267 int rec_starts[n_proc];
268 int rec_counts[n_proc];
269 int *rec_indices_ids =
nullptr;
270 T *rec_data =
nullptr;
272 n_global_data = distr->
size();
275 for (
int i=0; i<n_proc; ++i) {
276 rec_starts[i] = distr->
begin(i);
277 rec_counts[i] = distr->
lsize(i);
279 rec_indices_ids =
new int [ n_global_data ];
283 for (
int i=0; i<n_proc; ++i) {
284 rec_starts[i] = this->n_comp()*this->n_dofs_per_element()*rec_starts[i];
285 rec_counts[i] = this->n_comp()*this->n_dofs_per_element()*rec_counts[i];
287 rec_data =
new T [ this->n_comp() * this->n_dofs_per_element() * n_global_data ];
289 auto &local_cache_vec = *( this->get_data().get() );
290 MPI_Gatherv( &local_cache_vec[0], this->n_comp()*this->n_dofs_per_element()*distr->
lsize(), this->mpi_data_type(), rec_data, rec_counts, rec_starts, this->mpi_data_type(), 0,
MPI_COMM_WORLD) ;
294 gather_cache = std::make_shared<ElementDataCache<T>>(this->field_input_name_, (
unsigned int)this->n_comp(), n_global_data, this->fe_type_, this->n_dofs_per_element_);
295 auto &gather_vec = *( gather_cache->get_data().get() );
296 unsigned int i_global_coord;
297 for (
unsigned int i=0; i<n_global_data; ++i) {
298 i_global_coord = this->n_comp() * this->n_dofs_per_element() * rec_indices_ids[i];
299 for (
unsigned int j=0; j<this->n_comp() * this->n_dofs_per_element(); ++j) {
300 ASSERT_LT(i_global_coord+j, gather_vec.size());
301 gather_vec[ i_global_coord+j ] = rec_data[ this->n_comp()*this->n_dofs_per_element()*i+j ];
305 delete[] rec_indices_ids;
313 template <
typename T>
315 unsigned int n_elem = offset_vec.size()-1;
316 std::shared_ptr< ElementDataCache<T> > elem_node_cache = std::make_shared<ElementDataCache<T>>(this->field_input_name_, 4*this->n_comp(), n_elem, this->fe_type_, this->n_dofs_per_element_);
317 auto &data_out_vec = *( elem_node_cache->get_data().get() );
318 std::fill( data_out_vec.begin(), data_out_vec.end(), (T)0 );
319 auto &data_in_vec = *( this->get_data().get() );
321 unsigned int i_node, i_old, i_new;
322 for (
unsigned int i_el=0, i_conn=0; i_el<offset_vec.size()-1; i_el++) {
323 for(i_node=4*i_el; i_conn<offset_vec[i_el+1]; i_conn++, i_node++) {
324 i_old = i_conn*this->n_comp_*this->n_dofs_per_element_;
325 i_new = i_node*this->n_comp_*this->n_dofs_per_element_;
326 for(
unsigned int i = 0; i < this->n_comp_*this->n_dofs_per_element_; i++) {
329 data_out_vec[i_new+i] = data_in_vec[i_old+i];
334 return elem_node_cache;
338 template <
typename T>
340 std::shared_ptr< ElementDataCache<T> > elem_node_cache = std::make_shared<ElementDataCache<T>>(this->field_input_name_,
341 this->n_comp()/4, offset_vec[offset_vec.size()-1], this->fe_type_, this->n_dofs_per_element_);
342 auto &data_out_vec = *( elem_node_cache->get_data().get() );
343 auto &data_in_vec = *( this->get_data().get() );
345 unsigned int i_node, i_old, i_new;
346 for (
unsigned int i_el=0, i_conn=0; i_el<offset_vec.size()-1; i_el++) {
347 for(i_node=4*i_el; i_conn<offset_vec[i_el+1]; i_conn++, i_node++) {
348 i_old = i_node*elem_node_cache->n_comp_*this->n_dofs_per_element_;
349 i_new = i_conn*elem_node_cache->n_comp_*this->n_dofs_per_element_;
350 for(
unsigned int i = 0; i < elem_node_cache->n_comp_*this->n_dofs_per_element_; i++) {
353 data_out_vec[i_new+i] = data_in_vec[i_old+i];
357 return elem_node_cache;
361 template <
typename T>
363 ASSERT_EQ(conn_vec.size(), this->n_values());
367 std::shared_ptr< ElementDataCache<T> > node_cache = std::make_shared<ElementDataCache<T>>(this->field_input_name_, this->n_comp(), data_size, this->fe_type_, this->n_dofs_per_element_);
369 for (idx=0; idx < node_cache->n_values(); idx++)
370 node_cache->zero(idx);
372 auto &data_in_vec = *( this->get_data().get() );
373 for (idx=0; idx < conn_vec.size(); idx++) {
374 ASSERT_LT(conn_vec[idx], node_cache->n_values());
375 ASSERT_LT(this->n_comp()*this->n_dofs_per_element()*idx, data_in_vec.size());
376 node_cache->add( conn_vec[idx], &(data_in_vec[this->n_comp() * this->n_dofs_per_element() * idx]) );
377 count[ conn_vec[idx] ]++;
381 for(idx=0; idx < node_cache->n_values(); idx++)
382 node_cache->normalize(idx, count[idx]);
#define ASSERT_LT(a, b)
Definition of comparative assert macro (Less Than) only for debug mode.
#define ASSERT_EQ(a, b)
Definition of comparative assert macro (EQual) only for debug mode.
#define ASSERT_GT(a, b)
Definition of comparative assert macro (Greater Than) only for debug mode.
unsigned int myp() const
get my processor
unsigned int begin(int proc) const
get starting local index
unsigned int lsize(int proc) const
get local size
unsigned int size() const
get global size
unsigned int np() const
get num of processors
std::string fe_type() const
unsigned int n_comp() const
unsigned int n_dofs_per_element() const
double time_
time step stored in cache
unsigned int n_dofs_per_element_
unsigned int boundary_begin_
Start position of boundary data in cache.
std::string field_input_name_
name of field stored in cache
void read_binary_data(std::istream &data_stream, unsigned int n_components, unsigned int i_row) override
Implements ElementDataCacheBase::read_binary_data.
std::shared_ptr< ElementDataCacheBase > gather(Distribution *distr, LongIdx *local_to_global) override
Implements ElementDataCacheBase::gather.
void print_ascii_all(ostream &out_stream, unsigned int start=0) override
Print all data stored in output data ro ascii format.
void get_min_max_range(double &min, double &max) override
MPI_Datatype mpi_data_type()
Return MPI data type corresponding with template parameter of cache. Needs template specialization.
ElementDataCache()
Default constructor.
void print_yaml_subarray(ostream &out_stream, unsigned int precision, unsigned int begin, unsigned int end) override
void read_ascii_data(Tokenizer &tok, unsigned int n_components, unsigned int i_row) override
Implements ElementDataCacheBase::read_ascii_data.
CheckScaleData
Allow to hold sign, if data in cache is checked and scale (both can be executed only once)
void store_value(unsigned int idx, const T *value)
static CacheData create_data_cache(unsigned int row_vec_size)
std::shared_ptr< std::vector< T > > CacheData
void print_binary_all(ostream &out_stream, bool print_data_size=true, unsigned int start=0) override
Print all data stored in output data to appended binary format.
void zero(unsigned int idx)
std::shared_ptr< ElementDataCacheBase > compute_node_data(std::vector< unsigned int > &conn_vec, unsigned int data_size) override
Implements ElementDataCacheBase::compute_node_data.
std::shared_ptr< ElementDataCacheBase > element_node_cache_fixed_size(std::vector< unsigned int > &offset_vec) override
Implements ElementDataCacheBase::element_node_cache_fixed_size.
CacheData get_data()
Return underlying vector of element data.
T & operator[](unsigned int i)
Access i-th element in the data vector of 0th component.
void print_ascii(ostream &out_stream, unsigned int idx) override
virtual ~ElementDataCache() override
Destructor of ElementDataCache.
void add(unsigned int idx, const T *value)
void normalize(unsigned int idx, unsigned int divisor)
std::shared_ptr< ElementDataCacheBase > element_node_cache_optimize_size(std::vector< unsigned int > &offset_vec) override
Implements ElementDataCacheBase::element_node_cache_optimize_size.
Support classes for parallel programing.
int LongIdx
Define type that represents indices of large arrays (elements, nodes, dofs etc.)
static constexpr bool value
#define MPI_Gatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, root, comm)