27 #include "boost/lexical_cast.hpp"
51 this->set_vtk_type<T>();
56 ASSERT_GT(
n_comp, 0)(field_name).error(
"Output field returning variable size vectors. Try convert to MultiField.");
69 ASSERT_LT(component_idx, data_.size()).error(
"Index of component is out of range.\n");
70 return data_[component_idx];
77 for (
unsigned int i=0; i<size_of_cache; ++i) {
79 row_vec->resize(row_vec_size, numeric_limits<T>::signaling_NaN());
80 data_cache[i] = row_vec;
90 for (
unsigned int i_vec=0; i_vec<data_.size(); ++i_vec) {
91 idx = i_row * n_components;
93 for (
unsigned int i_col=0; i_col < n_components; ++i_col, ++idx) {
94 vec[idx] = boost::lexical_cast<T>(*tok);
101 template <
typename T>
104 for (
unsigned int i_vec=0; i_vec<data_.size(); ++i_vec) {
105 idx = i_row * n_components;
107 for (
unsigned int i_col=0; i_col < n_components; ++i_col, ++idx) {
108 data_stream.read(
reinterpret_cast<char *
>(&vec[idx]),
sizeof(T));
120 template <
typename T>
125 for(
unsigned int i = n_comp_*idx; i < n_comp_*(idx+1); ++i )
126 out_stream << vec[i] <<
" ";
136 template <
typename T>
140 for(
unsigned int idx = 0; idx < this->n_values_; idx++) {
141 for(
unsigned int i = n_comp_*idx; i < n_comp_*(idx+1); ++i )
142 out_stream << vec[i] <<
" ";
148 template <
typename T>
151 if (print_data_size) {
153 unsigned long long int data_byte_size = this->n_values_ * n_comp_ *
sizeof(T);
154 out_stream.write(
reinterpret_cast<const char*
>(&data_byte_size),
sizeof(
unsigned long long int));
158 for(
unsigned int idx = 0; idx < this->n_values_; idx++) {
159 for(
unsigned int i = n_comp_*idx; i < n_comp_*(idx+1); ++i )
160 out_stream.write(
reinterpret_cast<const char*
>(&(vec[i])),
sizeof(T));
165 template <
typename T>
170 for(
unsigned int idx = begin; idx < end; idx++) {
171 if (idx != begin) out_stream <<
" , ";
172 unsigned int vec_pos = n_comp_ * idx;
173 switch (this->n_comp_) {
174 case NumCompValueType::N_SCALAR: {
178 case NumCompValueType::N_VECTOR: {
179 typename arma::Col<T>::template fixed<3> vec_val;
180 for (
unsigned int i=0; i<3; ++i, ++vec_pos)
181 vec_val(i) = vec[vec_pos];
185 case NumCompValueType::N_TENSOR: {
186 typename arma::Mat<T>::template fixed<3,3> mat_val;
187 for (
unsigned int i=0; i<3; ++i)
188 for (
unsigned int j=0; j<3; ++j, ++vec_pos)
189 mat_val(i,j) = vec[vec_pos];
199 template <
typename T>
202 min = std::numeric_limits<double>::max();
203 max = std::numeric_limits<double>::min();
205 for(
unsigned int idx = 0; idx < this->n_values_; idx++) {
206 for(
unsigned int i = n_comp_*idx; i < n_comp_*(idx+1); ++i ) {
207 if (vec[i] < min) min = vec[i];
208 if (vec[i] > max) max = vec[i];
217 template <
typename T>
221 unsigned int vec_idx = idx*this->n_comp_;
222 for(
unsigned int i = 0; i < this->n_comp_; i++, vec_idx++) {
223 vec[vec_idx] =
value[i];
230 template <
typename T>
234 unsigned int vec_idx = idx*this->n_comp_;
235 for(
unsigned int i = 0; i < this->n_comp_; i++, vec_idx++) {
236 vec[vec_idx] +=
value[i];
243 template <
typename T>
247 unsigned int vec_idx = idx*this->n_comp_;
248 for(
unsigned int i = 0; i < this->n_comp_; i++, vec_idx++) {
256 template <
typename T>
260 unsigned int vec_idx = idx*this->n_comp_;
261 for(
unsigned int i = 0; i < this->n_comp_; i++, vec_idx++) {
262 vec[vec_idx] /= divisor;
266 template <
typename T>
271 bool is_nan =
false, out_of_limit =
false;
272 for (
unsigned int j=0; j<data_.size(); ++j) {
274 for(
unsigned int i=0; i<vec.size(); ++i) {
277 else vec[i] = default_val;
279 if ( (vec[i] < lower_bound) || (vec[i] > upper_bound) ) out_of_limit =
true;
288 template <
typename T>
290 if (check_scale_data_ == CheckScaleData::scale)
return;
293 for (
unsigned int j=0; j<data_.size(); ++j) {
295 for(
unsigned int i=0; i<vec.size(); ++i) {
300 check_scale_data_ = CheckScaleData::scale;
304 template <
typename T>
306 std::shared_ptr< ElementDataCache<T> > gather_cache;
307 int rank = distr->
myp();
308 int n_proc = distr->
np();
310 unsigned int n_global_data;
311 int rec_starts[n_proc];
312 int rec_counts[n_proc];
313 int *rec_indices_ids;
318 for (
int i=0; i<n_proc; ++i) {
319 rec_starts[i] = distr->
begin(i);
320 rec_counts[i] = distr->
lsize(i);
322 n_global_data = distr->
size();
323 rec_indices_ids =
new int [ n_global_data ];
327 for (
int i=0; i<n_proc; ++i) {
328 rec_starts[i] = this->n_comp()*rec_starts[i];
329 rec_counts[i] = this->n_comp()*rec_counts[i];
331 rec_data =
new T [ this->n_comp() * n_global_data ];
333 auto &local_cache_vec = *( this->get_component_data(0).get() );
334 MPI_Gatherv( &local_cache_vec[0], this->n_comp()*distr->
lsize(), this->mpi_data_type(), rec_data, rec_counts, rec_starts, this->mpi_data_type(), 0,
MPI_COMM_WORLD) ;
338 gather_cache = std::make_shared<ElementDataCache<T>>(this->field_input_name_, (
unsigned int)this->n_comp(), n_global_data);
339 auto &gather_vec = *( gather_cache->get_component_data(0).get() );
340 unsigned int i_global_coord;
341 for (
unsigned int i=0; i<n_global_data; ++i) {
342 i_global_coord = this->n_comp() * rec_indices_ids[i];
343 for (
unsigned int j=0; j<this->n_comp(); ++j) {
344 ASSERT_LT(i_global_coord+j, gather_vec.size());
345 gather_vec[ i_global_coord+j ] = rec_data[ this->n_comp()*i+j ];
349 delete[] rec_indices_ids;
357 template <
typename T>
359 unsigned int n_elem = offset_vec.size();
360 std::shared_ptr< ElementDataCache<T> > elem_node_cache = std::make_shared<ElementDataCache<T>>(this->field_input_name_, 4*this->n_comp(), n_elem);
361 auto &data_out_vec = *( elem_node_cache->get_component_data(0).get() );
362 std::fill( data_out_vec.begin(), data_out_vec.end(), (T)0 );
363 auto &data_in_vec = *( this->get_component_data(0).get() );
365 unsigned int i_node, i_old, i_new;
366 for (
unsigned int i_el=0, i_conn=0; i_el<offset_vec.size(); i_el++) {
367 for(i_node=4*i_el; i_conn<offset_vec[i_el]; i_conn++, i_node++) {
368 i_old = i_conn*this->n_comp_;
369 i_new = i_node*this->n_comp_;
370 for(
unsigned int i = 0; i < this->n_comp_; i++) {
373 data_out_vec[i_new+i] = data_in_vec[i_old+i];
378 return elem_node_cache;
382 template <
typename T>
384 std::shared_ptr< ElementDataCache<T> > elem_node_cache = std::make_shared<ElementDataCache<T>>(this->field_input_name_,
385 this->n_comp()/4, offset_vec[offset_vec.size()-1]);
386 auto &data_out_vec = *( elem_node_cache->get_component_data(0).get() );
387 auto &data_in_vec = *( this->get_component_data(0).get() );
389 unsigned int i_node, i_old, i_new;
390 for (
unsigned int i_el=0, i_conn=0; i_el<offset_vec.size(); i_el++) {
391 for(i_node=4*i_el; i_conn<offset_vec[i_el]; i_conn++, i_node++) {
392 i_old = i_node*elem_node_cache->n_comp_;
393 i_new = i_conn*elem_node_cache->n_comp_;
394 for(
unsigned int i = 0; i < elem_node_cache->n_comp_; i++) {
397 data_out_vec[i_new+i] = data_in_vec[i_old+i];
401 return elem_node_cache;
405 template <
typename T>
407 ASSERT_EQ(conn_vec.size(), this->n_values());
411 std::shared_ptr< ElementDataCache<T> > node_cache = std::make_shared<ElementDataCache<T>>(this->field_input_name_, this->n_comp(), data_size);
413 for (idx=0; idx < node_cache->n_values(); idx++)
414 node_cache->zero(idx);
416 auto &data_in_vec = *( this->get_component_data(0).get() );
417 for (idx=0; idx < conn_vec.size(); idx++) {
418 ASSERT_LT(conn_vec[idx], node_cache->n_values());
419 ASSERT_LT(this->n_comp()*idx, data_in_vec.size());
420 node_cache->add( conn_vec[idx], &(data_in_vec[this->n_comp() * idx]) );
421 count[ conn_vec[idx] ]++;
425 for(idx=0; idx < node_cache->n_values(); idx++)
426 node_cache->normalize(idx, count[idx]);