27 #include "boost/lexical_cast.hpp"
52 this->set_vtk_type<T>();
57 ASSERT_GT(
n_comp, 0)(field_name).error(
"Output field returning variable size vectors. Try convert to MultiField.");
79 data_cache->resize(row_vec_size, numeric_limits<T>::signaling_NaN());
86 unsigned int idx = i_row * n_components;
88 for (
unsigned int i_col=0; i_col < n_components; ++i_col, ++idx) {
90 vec[idx] = boost::lexical_cast<T>(*tok);
98 unsigned int idx = i_row * n_components;
100 for (
unsigned int i_col=0; i_col < n_components; ++i_col, ++idx) {
101 data_stream.read(
reinterpret_cast<char *
>(&
vec[idx]),
sizeof(T));
112 template <
typename T>
117 for(
unsigned int i = n_comp_*n_dofs_per_element_*idx; i < n_comp_*n_dofs_per_element_*(idx+1); ++i )
118 out_stream <<
vec[i] <<
" ";
128 template <
typename T>
132 for(
unsigned int idx = start; idx < this->n_values_; idx++) {
133 for(
unsigned int i = n_comp_*n_dofs_per_element_*idx; i < n_comp_*n_dofs_per_element_*(idx+1); ++i )
134 out_stream <<
vec[i] <<
" ";
140 template <
typename T>
143 if (print_data_size) {
145 unsigned long long int data_byte_size = this->n_values_ * n_comp_ *
sizeof(T);
146 out_stream.write(
reinterpret_cast<const char*
>(&data_byte_size),
sizeof(
unsigned long long int));
150 for(
unsigned int idx = start; idx < this->n_values_; idx++) {
151 for(
unsigned int i = n_comp_*idx; i < n_comp_*(idx+1); ++i )
152 out_stream.write(
reinterpret_cast<const char*
>(&(
vec[i])),
sizeof(T));
157 template <
typename T>
162 for(
unsigned int idx = begin; idx < end; idx++) {
163 if (idx != begin) out_stream <<
" , ";
164 unsigned int vec_pos = n_comp_ * idx;
165 switch (this->n_comp_) {
166 case NumCompValueType::N_SCALAR: {
170 case NumCompValueType::N_VECTOR: {
171 typename arma::Col<T>::template fixed<3> vec_val;
172 for (
unsigned int i=0; i<3; ++i, ++vec_pos)
173 vec_val(i) =
vec[vec_pos];
177 case NumCompValueType::N_TENSOR: {
178 typename arma::Mat<T>::template fixed<3,3> mat_val;
179 for (
unsigned int i=0; i<3; ++i)
180 for (
unsigned int j=0; j<3; ++j, ++vec_pos)
181 mat_val(i,j) =
vec[vec_pos];
191 template <
typename T>
194 min = std::numeric_limits<double>::max();
195 max = std::numeric_limits<double>::min();
197 for(
unsigned int idx = 0; idx < this->n_values_; idx++) {
198 for(
unsigned int i = n_comp_*n_dofs_per_element_*idx; i < n_comp_*n_dofs_per_element_*(idx+1); ++i ) {
199 if (
vec[i] < min) min =
vec[i];
200 if (
vec[i] > max) max =
vec[i];
209 template <
typename T>
211 ASSERT_LT(idx, this->n_values_)(this->field_name_);
213 unsigned int vec_idx = idx*this->n_comp_*n_dofs_per_element_;
214 for(
unsigned int i = 0; i < this->n_comp_*n_dofs_per_element_; i++, vec_idx++) {
222 template <
typename T>
226 unsigned int vec_idx = idx*this->n_comp_*n_dofs_per_element_;
227 for(
unsigned int i = 0; i < this->n_comp_*n_dofs_per_element_; i++, vec_idx++) {
235 template <
typename T>
239 unsigned int vec_idx = idx*this->n_comp_*n_dofs_per_element_;
240 for(
unsigned int i = 0; i < this->n_comp_*n_dofs_per_element_; i++, vec_idx++) {
248 template <
typename T>
252 unsigned int vec_idx = idx*this->n_comp_*n_dofs_per_element_;
253 for(
unsigned int i = 0; i < this->n_comp_*n_dofs_per_element_; i++, vec_idx++) {
254 vec[vec_idx] /= divisor;
258 template <
typename T>
263 bool is_nan =
false, out_of_limit =
false;
265 for(
unsigned int i=0; i<
vec.size(); ++i) {
268 else vec[i] = default_val;
270 if ( (
vec[i] < lower_bound) || (
vec[i] > upper_bound) ) out_of_limit =
true;
278 template <
typename T>
280 if (check_scale_data_ == CheckScaleData::scale)
return;
281 ASSERT(check_scale_data_ ==
CheckScaleData::check).warning(
"Data should be checked before scaling. Rather call 'check_values'!\n");
284 for(
unsigned int i=0; i<
vec.size(); ++i) {
288 check_scale_data_ = CheckScaleData::scale;
292 template <
typename T>
294 std::shared_ptr< ElementDataCache<T> > gather_cache;
295 int rank = distr->
myp();
296 int n_proc = distr->
np();
298 unsigned int n_global_data;
299 int rec_starts[n_proc];
300 int rec_counts[n_proc];
301 int *rec_indices_ids =
nullptr;
302 T *rec_data =
nullptr;
304 n_global_data = distr->
size();
307 for (
int i=0; i<n_proc; ++i) {
308 rec_starts[i] = distr->
begin(i);
309 rec_counts[i] = distr->
lsize(i);
311 rec_indices_ids =
new int [ n_global_data ];
315 for (
int i=0; i<n_proc; ++i) {
316 rec_starts[i] = this->n_comp()*this->n_dofs_per_element()*rec_starts[i];
317 rec_counts[i] = this->n_comp()*this->n_dofs_per_element()*rec_counts[i];
319 rec_data =
new T [ this->n_comp() * this->n_dofs_per_element() * n_global_data ];
321 auto &local_cache_vec = *( this->get_data().get() );
322 MPI_Gatherv( &local_cache_vec[0], this->n_comp()*this->n_dofs_per_element()*distr->
lsize(), this->mpi_data_type(), rec_data, rec_counts, rec_starts, this->mpi_data_type(), 0,
MPI_COMM_WORLD) ;
326 gather_cache = std::make_shared<ElementDataCache<T>>(this->field_input_name_, (
unsigned int)this->n_comp(), n_global_data, this->fe_type_, this->n_dofs_per_element_);
327 auto &gather_vec = *( gather_cache->get_data().get() );
328 unsigned int i_global_coord;
329 for (
unsigned int i=0; i<n_global_data; ++i) {
330 i_global_coord = this->n_comp() * this->n_dofs_per_element() * rec_indices_ids[i];
331 for (
unsigned int j=0; j<this->n_comp() * this->n_dofs_per_element(); ++j) {
332 ASSERT_LT(i_global_coord+j, gather_vec.size());
333 gather_vec[ i_global_coord+j ] = rec_data[ this->n_comp()*this->n_dofs_per_element()*i+j ];
337 delete[] rec_indices_ids;
345 template <
typename T>
347 unsigned int n_elem = offset_vec.size()-1;
348 std::shared_ptr< ElementDataCache<T> > elem_node_cache = std::make_shared<ElementDataCache<T>>(this->field_input_name_, 4*this->n_comp(), n_elem, this->fe_type_, this->n_dofs_per_element_);
349 auto &data_out_vec = *( elem_node_cache->get_data().get() );
350 std::fill( data_out_vec.begin(), data_out_vec.end(), (T)0 );
351 auto &data_in_vec = *( this->get_data().get() );
353 unsigned int i_node, i_old, i_new;
354 for (
unsigned int i_el=0, i_conn=0; i_el<offset_vec.size()-1; i_el++) {
355 for(i_node=4*i_el; i_conn<offset_vec[i_el+1]; i_conn++, i_node++) {
356 i_old = i_conn*this->n_comp_*this->n_dofs_per_element_;
357 i_new = i_node*this->n_comp_*this->n_dofs_per_element_;
358 for(
unsigned int i = 0; i < this->n_comp_*this->n_dofs_per_element_; i++) {
361 data_out_vec[i_new+i] = data_in_vec[i_old+i];
366 return elem_node_cache;
370 template <
typename T>
372 std::shared_ptr< ElementDataCache<T> > elem_node_cache = std::make_shared<ElementDataCache<T>>(this->field_input_name_,
373 this->n_comp()/4, offset_vec[offset_vec.size()-1], this->fe_type_, this->n_dofs_per_element_);
374 auto &data_out_vec = *( elem_node_cache->get_data().get() );
375 auto &data_in_vec = *( this->get_data().get() );
377 unsigned int i_node, i_old, i_new;
378 for (
unsigned int i_el=0, i_conn=0; i_el<offset_vec.size()-1; i_el++) {
379 for(i_node=4*i_el; i_conn<offset_vec[i_el+1]; i_conn++, i_node++) {
380 i_old = i_node*elem_node_cache->n_comp_*this->n_dofs_per_element_;
381 i_new = i_conn*elem_node_cache->n_comp_*this->n_dofs_per_element_;
382 for(
unsigned int i = 0; i < elem_node_cache->n_comp_*this->n_dofs_per_element_; i++) {
385 data_out_vec[i_new+i] = data_in_vec[i_old+i];
389 return elem_node_cache;
393 template <
typename T>
395 ASSERT_EQ(conn_vec.size(), this->n_values());
399 std::shared_ptr< ElementDataCache<T> > node_cache = std::make_shared<ElementDataCache<T>>(this->field_input_name_, this->n_comp(), data_size, this->fe_type_, this->n_dofs_per_element_);
401 for (idx=0; idx < node_cache->n_values(); idx++)
402 node_cache->zero(idx);
404 auto &data_in_vec = *( this->get_data().get() );
405 for (idx=0; idx < conn_vec.size(); idx++) {
406 ASSERT_LT(conn_vec[idx], node_cache->n_values());
407 ASSERT_LT(this->n_comp()*this->n_dofs_per_element()*idx, data_in_vec.size());
408 node_cache->add( conn_vec[idx], &(data_in_vec[this->n_comp() * this->n_dofs_per_element() * idx]) );
409 count[ conn_vec[idx] ]++;
413 for(idx=0; idx < node_cache->n_values(); idx++)
414 node_cache->normalize(idx, count[idx]);