Home ⌂Doc Index ◂Up ▴
Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
enumerable_thread_specific.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2020 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #ifndef __TBB_enumerable_thread_specific_H
18 #define __TBB_enumerable_thread_specific_H
19 
20 #define __TBB_enumerable_thread_specific_H_include_area
22 
23 #include "atomic.h"
24 #include "concurrent_vector.h"
25 #include "tbb_thread.h"
26 #include "tbb_allocator.h"
28 #include "aligned_space.h"
31 #include "tbb_profiling.h"
32 #include <string.h> // for memcpy
33 
34 #if __TBB_PREVIEW_RESUMABLE_TASKS
35 #include "task.h" // for task::suspend_point
36 #endif
37 
38 #if _WIN32||_WIN64
39 #include "machine/windows_api.h"
40 #else
41 #include <pthread.h>
42 #endif
43 
44 #define __TBB_ETS_USE_CPP11 \
45  (__TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT \
46  && __TBB_CPP11_DECLTYPE_PRESENT && __TBB_CPP11_LAMBDAS_PRESENT)
47 
48 namespace tbb {
49 
54 #if __TBB_PREVIEW_RESUMABLE_TASKS
55  , ets_suspend_aware
56 #endif
57 };
58 
59 namespace interface6 {
60 
61  // Forward declaration to use in internal classes
62  template <typename T, typename Allocator, ets_key_usage_type ETS_key_type>
64 
66  namespace internal {
67 
68  using namespace tbb::internal;
69 
70  template <ets_key_usage_type ETS_key_type>
71  struct ets_key_selector {
72  typedef tbb_thread::id key_type;
73  static key_type current_key() {
75  }
76  };
77 
78 #if __TBB_PREVIEW_RESUMABLE_TASKS
79  template <>
80  struct ets_key_selector<ets_suspend_aware> {
81  typedef task::suspend_point key_type;
82  static key_type current_key() {
83  return internal_current_suspend_point();
84  }
85  };
86 
87  inline task::suspend_point atomic_compare_and_swap(task::suspend_point& location,
88  const task::suspend_point& value, const task::suspend_point& comparand) {
89  return as_atomic(location).compare_and_swap(value, comparand);
90  }
91 #endif
92 
93  template<ets_key_usage_type ETS_key_type>
94  class ets_base: tbb::internal::no_copy {
95  protected:
96  typedef typename ets_key_selector<ETS_key_type>::key_type key_type;
97 #if __TBB_PROTECTED_NESTED_CLASS_BROKEN
98  public:
99 #endif
100  struct slot;
101 
102  struct array {
103  array* next;
104  size_t lg_size;
105  slot& at( size_t k ) {
106  return ((slot*)(void*)(this+1))[k];
107  }
108  size_t size() const {return size_t(1)<<lg_size;}
109  size_t mask() const {return size()-1;}
110  size_t start( size_t h ) const {
111  return h>>(8*sizeof(size_t)-lg_size);
112  }
113  };
114  struct slot {
115  key_type key;
116  void* ptr;
117  bool empty() const {return key == key_type();}
118  bool match( key_type k ) const {return key == k;}
119  bool claim( key_type k ) {
120  // TODO: maybe claim ptr, because key_type is not guaranteed to fit into word size
121  return atomic_compare_and_swap(key, k, key_type()) == key_type();
122  }
123  };
124 #if __TBB_PROTECTED_NESTED_CLASS_BROKEN
125  protected:
126 #endif
127 
129 
131  atomic<array*> my_root;
132  atomic<size_t> my_count;
133  virtual void* create_local() = 0;
134  virtual void* create_array(size_t _size) = 0; // _size in bytes
135  virtual void free_array(void* ptr, size_t _size) = 0; // _size in bytes
136  array* allocate( size_t lg_size ) {
137  size_t n = size_t(1)<<lg_size;
138  array* a = static_cast<array*>(create_array( sizeof(array)+n*sizeof(slot) ));
139  a->lg_size = lg_size;
140  std::memset( a+1, 0, n*sizeof(slot) );
141  return a;
142  }
143  void free(array* a) {
144  size_t n = size_t(1)<<(a->lg_size);
145  free_array( (void *)a, size_t(sizeof(array)+n*sizeof(slot)) );
146  }
147 
148  ets_base() {my_root=NULL; my_count=0;}
149  virtual ~ets_base(); // g++ complains if this is not virtual
150  void* table_lookup( bool& exists );
151  void table_clear();
152  // The following functions are not used in concurrent context,
153  // so we don't need synchronization and ITT annotations there.
154  template <ets_key_usage_type E2>
155  void table_elementwise_copy( const ets_base& other,
156  void*(*add_element)(ets_base<E2>&, void*) ) {
157  __TBB_ASSERT(!my_root,NULL);
158  __TBB_ASSERT(!my_count,NULL);
159  if( !other.my_root ) return;
160  array* root = my_root = allocate(other.my_root->lg_size);
161  root->next = NULL;
162  my_count = other.my_count;
163  size_t mask = root->mask();
164  for( array* r=other.my_root; r; r=r->next ) {
165  for( size_t i=0; i<r->size(); ++i ) {
166  slot& s1 = r->at(i);
167  if( !s1.empty() ) {
168  for( size_t j = root->start(tbb::tbb_hash<key_type>()(s1.key)); ; j=(j+1)&mask ) {
169  slot& s2 = root->at(j);
170  if( s2.empty() ) {
171  s2.ptr = add_element(static_cast<ets_base<E2>&>(*this), s1.ptr);
172  s2.key = s1.key;
173  break;
174  }
175  else if( s2.match(s1.key) )
176  break;
177  }
178  }
179  }
180  }
181  }
182  void table_swap( ets_base& other ) {
183  __TBB_ASSERT(this!=&other, "Don't swap an instance with itself");
184  tbb::internal::swap<relaxed>(my_root, other.my_root);
185  tbb::internal::swap<relaxed>(my_count, other.my_count);
186  }
187  };
188 
189  template<ets_key_usage_type ETS_key_type>
190  ets_base<ETS_key_type>::~ets_base() {
191  __TBB_ASSERT(!my_root, NULL);
192  }
193 
194  template<ets_key_usage_type ETS_key_type>
195  void ets_base<ETS_key_type>::table_clear() {
196  while( array* r = my_root ) {
197  my_root = r->next;
198  free(r);
199  }
200  my_count = 0;
201  }
202 
203  template<ets_key_usage_type ETS_key_type>
204  void* ets_base<ETS_key_type>::table_lookup( bool& exists ) {
205  const key_type k = ets_key_selector<ETS_key_type>::current_key();
206 
207  __TBB_ASSERT(k != key_type(),NULL);
208  void* found;
209  size_t h = tbb::tbb_hash<key_type>()(k);
210  for( array* r=my_root; r; r=r->next ) {
212  size_t mask=r->mask();
213  for(size_t i = r->start(h); ;i=(i+1)&mask) {
214  slot& s = r->at(i);
215  if( s.empty() ) break;
216  if( s.match(k) ) {
217  if( r==my_root ) {
218  // Success at top level
219  exists = true;
220  return s.ptr;
221  } else {
222  // Success at some other level. Need to insert at top level.
223  exists = true;
224  found = s.ptr;
225  goto insert;
226  }
227  }
228  }
229  }
230  // Key does not yet exist. The density of slots in the table does not exceed 0.5,
231  // for if this will occur a new table is allocated with double the current table
232  // size, which is swapped in as the new root table. So an empty slot is guaranteed.
233  exists = false;
234  found = create_local();
235  {
236  size_t c = ++my_count;
237  array* r = my_root;
239  if( !r || c>r->size()/2 ) {
240  size_t s = r ? r->lg_size : 2;
241  while( c>size_t(1)<<(s-1) ) ++s;
242  array* a = allocate(s);
243  for(;;) {
244  a->next = r;
246  array* new_r = my_root.compare_and_swap(a,r);
247  if( new_r==r ) break;
248  call_itt_notify(acquired, new_r);
249  if( new_r->lg_size>=s ) {
250  // Another thread inserted an equal or bigger array, so our array is superfluous.
251  free(a);
252  break;
253  }
254  r = new_r;
255  }
256  }
257  }
258  insert:
259  // Whether a slot has been found in an older table, or if it has been inserted at this level,
260  // it has already been accounted for in the total. Guaranteed to be room for it, and it is
261  // not present, so search for empty slot and use it.
262  array* ir = my_root;
264  size_t mask = ir->mask();
265  for(size_t i = ir->start(h);;i=(i+1)&mask) {
266  slot& s = ir->at(i);
267  if( s.empty() ) {
268  if( s.claim(k) ) {
269  s.ptr = found;
270  return found;
271  }
272  }
273  }
274  }
275 
277  template <>
278  class ets_base<ets_key_per_instance>: public ets_base<ets_no_key> {
279  typedef ets_base<ets_no_key> super;
280 #if _WIN32||_WIN64
281 #if __TBB_WIN8UI_SUPPORT
282  typedef DWORD tls_key_t;
283  void create_key() { my_key = FlsAlloc(NULL); }
284  void destroy_key() { FlsFree(my_key); }
285  void set_tls(void * value) { FlsSetValue(my_key, (LPVOID)value); }
286  void* get_tls() { return (void *)FlsGetValue(my_key); }
287 #else
288  typedef DWORD tls_key_t;
289  void create_key() { my_key = TlsAlloc(); }
290  void destroy_key() { TlsFree(my_key); }
291  void set_tls(void * value) { TlsSetValue(my_key, (LPVOID)value); }
292  void* get_tls() { return (void *)TlsGetValue(my_key); }
293 #endif
294 #else
295  typedef pthread_key_t tls_key_t;
296  void create_key() { pthread_key_create(&my_key, NULL); }
297  void destroy_key() { pthread_key_delete(my_key); }
298  void set_tls( void * value ) const { pthread_setspecific(my_key, value); }
299  void* get_tls() const { return pthread_getspecific(my_key); }
300 #endif
301  tls_key_t my_key;
302  virtual void* create_local() __TBB_override = 0;
303  virtual void* create_array(size_t _size) __TBB_override = 0; // _size in bytes
304  virtual void free_array(void* ptr, size_t _size) __TBB_override = 0; // size in bytes
305  protected:
306  ets_base() {create_key();}
307  ~ets_base() {destroy_key();}
308  void* table_lookup( bool& exists ) {
309  void* found = get_tls();
310  if( found ) {
311  exists=true;
312  } else {
313  found = super::table_lookup(exists);
314  set_tls(found);
315  }
316  return found;
317  }
318  void table_clear() {
319  destroy_key();
320  create_key();
321  super::table_clear();
322  }
323  void table_swap( ets_base& other ) {
324  using std::swap;
325  __TBB_ASSERT(this!=&other, "Don't swap an instance with itself");
326  swap(my_key, other.my_key);
327  super::table_swap(other);
328  }
329  };
330 
332  template< typename Container, typename Value >
333  class enumerable_thread_specific_iterator
334 #if defined(_WIN64) && defined(_MSC_VER)
335  // Ensure that Microsoft's internal template function _Val_type works correctly.
336  : public std::iterator<std::random_access_iterator_tag,Value>
337 #endif /* defined(_WIN64) && defined(_MSC_VER) */
338  {
340 
341  Container *my_container;
342  typename Container::size_type my_index;
343  mutable Value *my_value;
344 
345  template<typename C, typename T>
346  friend enumerable_thread_specific_iterator<C,T>
347  operator+( ptrdiff_t offset, const enumerable_thread_specific_iterator<C,T>& v );
348 
349  template<typename C, typename T, typename U>
350  friend bool operator==( const enumerable_thread_specific_iterator<C,T>& i,
351  const enumerable_thread_specific_iterator<C,U>& j );
352 
353  template<typename C, typename T, typename U>
354  friend bool operator<( const enumerable_thread_specific_iterator<C,T>& i,
355  const enumerable_thread_specific_iterator<C,U>& j );
356 
357  template<typename C, typename T, typename U>
358  friend ptrdiff_t operator-( const enumerable_thread_specific_iterator<C,T>& i,
359  const enumerable_thread_specific_iterator<C,U>& j );
360 
361  template<typename C, typename U>
362  friend class enumerable_thread_specific_iterator;
363 
364  public:
365 
366  enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) :
367  my_container(&const_cast<Container &>(container)), my_index(index), my_value(NULL) {}
368 
370  enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {}
371 
372  template<typename U>
373  enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator<Container, U>& other ) :
374  my_container( other.my_container ), my_index( other.my_index), my_value( const_cast<Value *>(other.my_value) ) {}
375 
376  enumerable_thread_specific_iterator operator+( ptrdiff_t offset ) const {
377  return enumerable_thread_specific_iterator(*my_container, my_index + offset);
378  }
379 
380  enumerable_thread_specific_iterator &operator+=( ptrdiff_t offset ) {
381  my_index += offset;
382  my_value = NULL;
383  return *this;
384  }
385 
386  enumerable_thread_specific_iterator operator-( ptrdiff_t offset ) const {
387  return enumerable_thread_specific_iterator( *my_container, my_index-offset );
388  }
389 
390  enumerable_thread_specific_iterator &operator-=( ptrdiff_t offset ) {
391  my_index -= offset;
392  my_value = NULL;
393  return *this;
394  }
395 
396  Value& operator*() const {
397  Value* value = my_value;
398  if( !value ) {
399  value = my_value = (*my_container)[my_index].value();
400  }
401  __TBB_ASSERT( value==(*my_container)[my_index].value(), "corrupt cache" );
402  return *value;
403  }
404 
405  Value& operator[]( ptrdiff_t k ) const {
406  return (*my_container)[my_index + k].value;
407  }
408 
409  Value* operator->() const {return &operator*();}
410 
411  enumerable_thread_specific_iterator& operator++() {
412  ++my_index;
413  my_value = NULL;
414  return *this;
415  }
416 
417  enumerable_thread_specific_iterator& operator--() {
418  --my_index;
419  my_value = NULL;
420  return *this;
421  }
422 
424  enumerable_thread_specific_iterator operator++(int) {
425  enumerable_thread_specific_iterator result = *this;
426  ++my_index;
427  my_value = NULL;
428  return result;
429  }
430 
432  enumerable_thread_specific_iterator operator--(int) {
433  enumerable_thread_specific_iterator result = *this;
434  --my_index;
435  my_value = NULL;
436  return result;
437  }
438 
439  // STL support
440  typedef ptrdiff_t difference_type;
441  typedef Value value_type;
442  typedef Value* pointer;
443  typedef Value& reference;
444  typedef std::random_access_iterator_tag iterator_category;
445  };
446 
447  template<typename Container, typename T>
448  enumerable_thread_specific_iterator<Container,T>
449  operator+( ptrdiff_t offset, const enumerable_thread_specific_iterator<Container,T>& v ) {
450  return enumerable_thread_specific_iterator<Container,T>( v.my_container, v.my_index + offset );
451  }
452 
453  template<typename Container, typename T, typename U>
454  bool operator==( const enumerable_thread_specific_iterator<Container,T>& i,
455  const enumerable_thread_specific_iterator<Container,U>& j ) {
456  return i.my_index==j.my_index && i.my_container == j.my_container;
457  }
458 
459  template<typename Container, typename T, typename U>
460  bool operator!=( const enumerable_thread_specific_iterator<Container,T>& i,
461  const enumerable_thread_specific_iterator<Container,U>& j ) {
462  return !(i==j);
463  }
464 
465  template<typename Container, typename T, typename U>
466  bool operator<( const enumerable_thread_specific_iterator<Container,T>& i,
467  const enumerable_thread_specific_iterator<Container,U>& j ) {
468  return i.my_index<j.my_index;
469  }
470 
471  template<typename Container, typename T, typename U>
472  bool operator>( const enumerable_thread_specific_iterator<Container,T>& i,
473  const enumerable_thread_specific_iterator<Container,U>& j ) {
474  return j<i;
475  }
476 
477  template<typename Container, typename T, typename U>
478  bool operator>=( const enumerable_thread_specific_iterator<Container,T>& i,
479  const enumerable_thread_specific_iterator<Container,U>& j ) {
480  return !(i<j);
481  }
482 
483  template<typename Container, typename T, typename U>
484  bool operator<=( const enumerable_thread_specific_iterator<Container,T>& i,
485  const enumerable_thread_specific_iterator<Container,U>& j ) {
486  return !(j<i);
487  }
488 
489  template<typename Container, typename T, typename U>
490  ptrdiff_t operator-( const enumerable_thread_specific_iterator<Container,T>& i,
491  const enumerable_thread_specific_iterator<Container,U>& j ) {
492  return i.my_index-j.my_index;
493  }
494 
495  template<typename SegmentedContainer, typename Value >
496  class segmented_iterator
497 #if defined(_WIN64) && defined(_MSC_VER)
498  : public std::iterator<std::input_iterator_tag, Value>
499 #endif
500  {
501  template<typename C, typename T, typename U>
502  friend bool operator==(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
503 
504  template<typename C, typename T, typename U>
505  friend bool operator!=(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
506 
507  template<typename C, typename U>
508  friend class segmented_iterator;
509 
510  public:
511 
512  segmented_iterator() {my_segcont = NULL;}
513 
514  segmented_iterator( const SegmentedContainer& _segmented_container ) :
515  my_segcont(const_cast<SegmentedContainer*>(&_segmented_container)),
516  outer_iter(my_segcont->end()) { }
517 
518  ~segmented_iterator() {}
519 
520  typedef typename SegmentedContainer::iterator outer_iterator;
521  typedef typename SegmentedContainer::value_type InnerContainer;
522  typedef typename InnerContainer::iterator inner_iterator;
523 
524  // STL support
525  typedef ptrdiff_t difference_type;
526  typedef Value value_type;
527  typedef typename SegmentedContainer::size_type size_type;
528  typedef Value* pointer;
529  typedef Value& reference;
530  typedef std::input_iterator_tag iterator_category;
531 
532  // Copy Constructor
533  template<typename U>
534  segmented_iterator(const segmented_iterator<SegmentedContainer, U>& other) :
535  my_segcont(other.my_segcont),
536  outer_iter(other.outer_iter),
537  // can we assign a default-constructed iterator to inner if we're at the end?
538  inner_iter(other.inner_iter)
539  {}
540 
541  // assignment
542  template<typename U>
543  segmented_iterator& operator=( const segmented_iterator<SegmentedContainer, U>& other) {
544  if(this != &other) {
545  my_segcont = other.my_segcont;
546  outer_iter = other.outer_iter;
547  if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter;
548  }
549  return *this;
550  }
551 
552  // allow assignment of outer iterator to segmented iterator. Once it is
553  // assigned, move forward until a non-empty inner container is found or
554  // the end of the outer container is reached.
555  segmented_iterator& operator=(const outer_iterator& new_outer_iter) {
556  __TBB_ASSERT(my_segcont != NULL, NULL);
557  // check that this iterator points to something inside the segmented container
558  for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) {
559  if( !outer_iter->empty() ) {
560  inner_iter = outer_iter->begin();
561  break;
562  }
563  }
564  return *this;
565  }
566 
567  // pre-increment
568  segmented_iterator& operator++() {
569  advance_me();
570  return *this;
571  }
572 
573  // post-increment
574  segmented_iterator operator++(int) {
575  segmented_iterator tmp = *this;
576  operator++();
577  return tmp;
578  }
579 
580  bool operator==(const outer_iterator& other_outer) const {
581  __TBB_ASSERT(my_segcont != NULL, NULL);
582  return (outer_iter == other_outer &&
583  (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin()));
584  }
585 
586  bool operator!=(const outer_iterator& other_outer) const {
587  return !operator==(other_outer);
588 
589  }
590 
591  // (i)* RHS
592  reference operator*() const {
593  __TBB_ASSERT(my_segcont != NULL, NULL);
594  __TBB_ASSERT(outer_iter != my_segcont->end(), "Dereferencing a pointer at end of container");
595  __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // should never happen
596  return *inner_iter;
597  }
598 
599  // i->
600  pointer operator->() const { return &operator*();}
601 
602  private:
603  SegmentedContainer* my_segcont;
604  outer_iterator outer_iter;
605  inner_iterator inner_iter;
606 
607  void advance_me() {
608  __TBB_ASSERT(my_segcont != NULL, NULL);
609  __TBB_ASSERT(outer_iter != my_segcont->end(), NULL); // not true if there are no inner containers
610  __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // not true if the inner containers are all empty.
611  ++inner_iter;
612  while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) {
613  inner_iter = outer_iter->begin();
614  }
615  }
616  }; // segmented_iterator
617 
618  template<typename SegmentedContainer, typename T, typename U>
619  bool operator==( const segmented_iterator<SegmentedContainer,T>& i,
620  const segmented_iterator<SegmentedContainer,U>& j ) {
621  if(i.my_segcont != j.my_segcont) return false;
622  if(i.my_segcont == NULL) return true;
623  if(i.outer_iter != j.outer_iter) return false;
624  if(i.outer_iter == i.my_segcont->end()) return true;
625  return i.inner_iter == j.inner_iter;
626  }
627 
628  // !=
629  template<typename SegmentedContainer, typename T, typename U>
630  bool operator!=( const segmented_iterator<SegmentedContainer,T>& i,
631  const segmented_iterator<SegmentedContainer,U>& j ) {
632  return !(i==j);
633  }
634 
635  template<typename T>
636  struct construct_by_default: tbb::internal::no_assign {
637  void construct(void*where) {new(where) T();} // C++ note: the () in T() ensure zero initialization.
638  construct_by_default( int ) {}
639  };
640 
641  template<typename T>
642  struct construct_by_exemplar: tbb::internal::no_assign {
643  const T exemplar;
644  void construct(void*where) {new(where) T(exemplar);}
645  construct_by_exemplar( const T& t ) : exemplar(t) {}
646 #if __TBB_ETS_USE_CPP11
647  construct_by_exemplar( T&& t ) : exemplar(std::move(t)) {}
648 #endif
649  };
650 
651  template<typename T, typename Finit>
652  struct construct_by_finit: tbb::internal::no_assign {
653  Finit f;
654  void construct(void* where) {new(where) T(f());}
655  construct_by_finit( const Finit& f_ ) : f(f_) {}
656 #if __TBB_ETS_USE_CPP11
657  construct_by_finit( Finit&& f_ ) : f(std::move(f_)) {}
658 #endif
659  };
660 
661 #if __TBB_ETS_USE_CPP11
662  template<typename T, typename... P>
663  struct construct_by_args: tbb::internal::no_assign {
664  internal::stored_pack<P...> pack;
665  void construct(void* where) {
666  internal::call( [where](const typename strip<P>::type&... args ){
667  new(where) T(args...);
668  }, pack );
669  }
670  construct_by_args( P&& ... args ) : pack(std::forward<P>(args)...) {}
671  };
672 #endif
673 
674  // storage for initialization function pointer
675  // TODO: consider removing the template parameter T here and in callback_leaf
676  template<typename T>
677  class callback_base {
678  public:
679  // Clone *this
680  virtual callback_base* clone() const = 0;
681  // Destruct and free *this
682  virtual void destroy() = 0;
683  // Need virtual destructor to satisfy GCC compiler warning
684  virtual ~callback_base() { }
685  // Construct T at where
686  virtual void construct(void* where) = 0;
687  };
688 
689  template <typename T, typename Constructor>
690  class callback_leaf: public callback_base<T>, Constructor {
691 #if __TBB_ETS_USE_CPP11
692  template<typename... P> callback_leaf( P&& ... params ) : Constructor(std::forward<P>(params)...) {}
693 #else
694  template<typename X> callback_leaf( const X& x ) : Constructor(x) {}
695 #endif
696  // TODO: make the construction/destruction consistent (use allocator.construct/destroy)
697  typedef typename tbb::tbb_allocator<callback_leaf> my_allocator_type;
698 
699  callback_base<T>* clone() const __TBB_override {
700  return make(*this);
701  }
702 
703  void destroy() __TBB_override {
704  my_allocator_type().destroy(this);
705  my_allocator_type().deallocate(this,1);
706  }
707 
708  void construct(void* where) __TBB_override {
709  Constructor::construct(where);
710  }
711  public:
712 #if __TBB_ETS_USE_CPP11
713  template<typename... P>
714  static callback_base<T>* make( P&& ... params ) {
715  void* where = my_allocator_type().allocate(1);
716  return new(where) callback_leaf( std::forward<P>(params)... );
717  }
718 #else
719  template<typename X>
720  static callback_base<T>* make( const X& x ) {
721  void* where = my_allocator_type().allocate(1);
722  return new(where) callback_leaf(x);
723  }
724 #endif
725  };
726 
728 
736  template<typename U>
737  struct ets_element {
738  tbb::aligned_space<U> my_space;
739  bool is_built;
740  ets_element() { is_built = false; } // not currently-built
741  U* value() { return my_space.begin(); }
742  U* value_committed() { is_built = true; return my_space.begin(); }
743  ~ets_element() {
744  if(is_built) {
745  my_space.begin()->~U();
746  is_built = false;
747  }
748  }
749  };
750 
751  // A predicate that can be used for a compile-time compatibility check of ETS instances
752  // Ideally, it should have been declared inside the ETS class, but unfortunately
753  // in that case VS2013 does not enable the variadic constructor.
754  template<typename T, typename ETS> struct is_compatible_ets { static const bool value = false; };
755  template<typename T, typename U, typename A, ets_key_usage_type C>
756  struct is_compatible_ets< T, enumerable_thread_specific<U,A,C> > { static const bool value = internal::is_same_type<T,U>::value; };
757 
758 #if __TBB_ETS_USE_CPP11
759  // A predicate that checks whether, for a variable 'foo' of type T, foo() is a valid expression
760  template <typename T>
761  class is_callable_no_args {
762  private:
763  typedef char yes[1];
764  typedef char no [2];
765 
766  template<typename U> static yes& decide( decltype(declval<U>()())* );
767  template<typename U> static no& decide(...);
768  public:
769  static const bool value = (sizeof(decide<T>(NULL)) == sizeof(yes));
770  };
771 #endif
772 
773  } // namespace internal
775 
777 
796  template <typename T,
797  typename Allocator=cache_aligned_allocator<T>,
798  ets_key_usage_type ETS_key_type=ets_no_key >
799  class enumerable_thread_specific: internal::ets_base<ETS_key_type> {
800 
801  template<typename U, typename A, ets_key_usage_type C> friend class enumerable_thread_specific;
802 
804 
806  template<typename I>
808  public:
809  typedef T value_type;
810  typedef T& reference;
811  typedef const T& const_reference;
812  typedef I iterator;
813  typedef ptrdiff_t difference_type;
814  generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {}
815  template<typename U>
816  generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {}
818  };
819 
820  typedef typename Allocator::template rebind< padded_element >::other padded_allocator_type;
822 
823  internal::callback_base<T> *my_construct_callback;
824 
826 
827  // TODO: consider unifying the callback mechanism for all create_local* methods below
828  // (likely non-compatible and requires interface version increase)
830  padded_element& lref = *my_locals.grow_by(1);
831  my_construct_callback->construct(lref.value());
832  return lref.value_committed();
833  }
834 
835  static void* create_local_by_copy( internal::ets_base<ETS_key_type>& base, void* p ) {
836  enumerable_thread_specific& ets = static_cast<enumerable_thread_specific&>(base);
837  padded_element& lref = *ets.my_locals.grow_by(1);
838  new(lref.value()) T(*static_cast<T*>(p));
839  return lref.value_committed();
840  }
841 
842 #if __TBB_ETS_USE_CPP11
843  static void* create_local_by_move( internal::ets_base<ETS_key_type>& base, void* p ) {
844  enumerable_thread_specific& ets = static_cast<enumerable_thread_specific&>(base);
845  padded_element& lref = *ets.my_locals.grow_by(1);
846  new(lref.value()) T(std::move(*static_cast<T*>(p)));
847  return lref.value_committed();
848  }
849 #endif
850 
851  typedef typename Allocator::template rebind< uintptr_t >::other array_allocator_type;
852 
853  // _size is in bytes
854  void* create_array(size_t _size) __TBB_override {
855  size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
856  return array_allocator_type().allocate(nelements);
857  }
858 
859  void free_array( void* _ptr, size_t _size) __TBB_override {
860  size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
861  array_allocator_type().deallocate( reinterpret_cast<uintptr_t *>(_ptr),nelements);
862  }
863 
864  public:
865 
867  typedef Allocator allocator_type;
868  typedef T value_type;
869  typedef T& reference;
870  typedef const T& const_reference;
871  typedef T* pointer;
872  typedef const T* const_pointer;
875 
876  // Iterator types
877  typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator;
878  typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator;
879 
880  // Parallel range types
881  typedef generic_range_type< iterator > range_type;
882  typedef generic_range_type< const_iterator > const_range_type;
883 
885  enumerable_thread_specific() : my_construct_callback(
886  internal::callback_leaf<T,internal::construct_by_default<T> >::make(/*dummy argument*/0)
887  ){}
888 
890  template <typename Finit
891 #if __TBB_ETS_USE_CPP11
893 #endif
894  >
895  explicit enumerable_thread_specific( Finit finit ) : my_construct_callback(
896  internal::callback_leaf<T,internal::construct_by_finit<T,Finit> >::make( tbb::internal::move(finit) )
897  ){}
898 
900  explicit enumerable_thread_specific( const T& exemplar ) : my_construct_callback(
901  internal::callback_leaf<T,internal::construct_by_exemplar<T> >::make( exemplar )
902  ){}
903 
904 #if __TBB_ETS_USE_CPP11
905  explicit enumerable_thread_specific( T&& exemplar ) : my_construct_callback(
906  internal::callback_leaf<T,internal::construct_by_exemplar<T> >::make( std::move(exemplar) )
907  ){}
908 
910  template <typename P1, typename... P,
914  >::type>
915  enumerable_thread_specific( P1&& arg1, P&& ... args ) : my_construct_callback(
916  internal::callback_leaf<T,internal::construct_by_args<T,P1,P...> >::make( std::forward<P1>(arg1), std::forward<P>(args)... )
917  ){}
918 #endif
919 
922  if(my_construct_callback) my_construct_callback->destroy();
923  // Deallocate the hash table before overridden free_array() becomes inaccessible
924  this->internal::ets_base<ETS_key_type>::table_clear();
925  }
926 
929  bool exists;
930  return local(exists);
931  }
932 
934  reference local(bool& exists) {
935  void* ptr = this->table_lookup(exists);
936  return *(T*)ptr;
937  }
938 
940  size_type size() const { return my_locals.size(); }
941 
943  bool empty() const { return my_locals.empty(); }
944 
946  iterator begin() { return iterator( my_locals, 0 ); }
948  iterator end() { return iterator(my_locals, my_locals.size() ); }
949 
951  const_iterator begin() const { return const_iterator(my_locals, 0); }
952 
954  const_iterator end() const { return const_iterator(my_locals, my_locals.size()); }
955 
957  range_type range( size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); }
958 
960  const_range_type range( size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); }
961 
963  void clear() {
964  my_locals.clear();
965  this->table_clear();
966  // callback is not destroyed
967  }
968 
969  private:
970 
971  template<typename A2, ets_key_usage_type C2>
973 #if __TBB_ETS_USE_CPP11 && TBB_USE_ASSERT
974  // this tests is_compatible_ets
975  __TBB_STATIC_ASSERT( (internal::is_compatible_ets<T, typename internal::strip<decltype(other)>::type>::value), "is_compatible_ets fails" );
976 #endif
977  // Initialize my_construct_callback first, so that it is valid even if rest of this routine throws an exception.
978  my_construct_callback = other.my_construct_callback->clone();
979  __TBB_ASSERT(my_locals.size()==0,NULL);
980  my_locals.reserve(other.size());
981  this->table_elementwise_copy( other, create_local_by_copy );
982  }
983 
985  using std::swap;
986  __TBB_ASSERT( this!=&other, NULL );
987  swap(my_construct_callback, other.my_construct_callback);
988  // concurrent_vector::swap() preserves storage space,
989  // so addresses to the vector kept in ETS hash table remain valid.
990  swap(my_locals, other.my_locals);
991  this->internal::ets_base<ETS_key_type>::table_swap(other);
992  }
993 
994 #if __TBB_ETS_USE_CPP11
995  template<typename A2, ets_key_usage_type C2>
997 #if TBB_USE_ASSERT
998  // this tests is_compatible_ets
999  __TBB_STATIC_ASSERT( (internal::is_compatible_ets<T, typename internal::strip<decltype(other)>::type>::value), "is_compatible_ets fails" );
1000 #endif
1001  my_construct_callback = other.my_construct_callback;
1002  other.my_construct_callback = NULL;
1003  __TBB_ASSERT(my_locals.size()==0,NULL);
1004  my_locals.reserve(other.size());
1005  this->table_elementwise_copy( other, create_local_by_move );
1006  }
1007 #endif
1008 
1009  public:
1010 
1012  : internal::ets_base<ETS_key_type>() /* prevents GCC warnings with -Wextra */
1013  {
1014  internal_copy(other);
1015  }
1016 
1017  template<typename Alloc, ets_key_usage_type Cachetype>
1019  {
1020  internal_copy(other);
1021  }
1022 
1023 #if __TBB_ETS_USE_CPP11
1024  enumerable_thread_specific( enumerable_thread_specific&& other ) : my_construct_callback()
1025  {
1026  internal_swap(other);
1027  }
1028 
1029  template<typename Alloc, ets_key_usage_type Cachetype>
1031  {
1032  internal_move(std::move(other));
1033  }
1034 #endif
1035 
1037  {
1038  if( this != &other ) {
1039  this->clear();
1040  my_construct_callback->destroy();
1041  internal_copy( other );
1042  }
1043  return *this;
1044  }
1045 
1046  template<typename Alloc, ets_key_usage_type Cachetype>
1048  {
1049  __TBB_ASSERT( static_cast<void*>(this)!=static_cast<const void*>(&other), NULL ); // Objects of different types
1050  this->clear();
1051  my_construct_callback->destroy();
1052  internal_copy(other);
1053  return *this;
1054  }
1055 
1056 #if __TBB_ETS_USE_CPP11
1058  {
1059  if( this != &other )
1060  internal_swap(other);
1061  return *this;
1062  }
1063 
1064  template<typename Alloc, ets_key_usage_type Cachetype>
1066  {
1067  __TBB_ASSERT( static_cast<void*>(this)!=static_cast<const void*>(&other), NULL ); // Objects of different types
1068  this->clear();
1069  my_construct_callback->destroy();
1070  internal_move(std::move(other));
1071  return *this;
1072  }
1073 #endif
1074 
1075  // combine_func_t has signature T(T,T) or T(const T&, const T&)
1076  template <typename combine_func_t>
1077  T combine(combine_func_t f_combine) {
1078  if(begin() == end()) {
1079  internal::ets_element<T> location;
1080  my_construct_callback->construct(location.value());
1081  return *location.value_committed();
1082  }
1083  const_iterator ci = begin();
1084  T my_result = *ci;
1085  while(++ci != end())
1086  my_result = f_combine( my_result, *ci );
1087  return my_result;
1088  }
1089 
1090  // combine_func_t takes T by value or by [const] reference, and returns nothing
1091  template <typename combine_func_t>
1092  void combine_each(combine_func_t f_combine) {
1093  for(iterator ci = begin(); ci != end(); ++ci) {
1094  f_combine( *ci );
1095  }
1096  }
1097 
1098  }; // enumerable_thread_specific
1099 
1100  template< typename Container >
1101  class flattened2d {
1102 
1103  // This intermediate typedef is to address issues with VC7.1 compilers
1104  typedef typename Container::value_type conval_type;
1105 
1106  public:
1107 
1109  typedef typename conval_type::size_type size_type;
1110  typedef typename conval_type::difference_type difference_type;
1111  typedef typename conval_type::allocator_type allocator_type;
1112  typedef typename conval_type::value_type value_type;
1113  typedef typename conval_type::reference reference;
1114  typedef typename conval_type::const_reference const_reference;
1115  typedef typename conval_type::pointer pointer;
1116  typedef typename conval_type::const_pointer const_pointer;
1117 
1118  typedef typename internal::segmented_iterator<Container, value_type> iterator;
1119  typedef typename internal::segmented_iterator<Container, const value_type> const_iterator;
1120 
1121  flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) :
1122  my_container(const_cast<Container*>(&c)), my_begin(b), my_end(e) { }
1123 
1124  explicit flattened2d( const Container &c ) :
1125  my_container(const_cast<Container*>(&c)), my_begin(c.begin()), my_end(c.end()) { }
1126 
1127  iterator begin() { return iterator(*my_container) = my_begin; }
1128  iterator end() { return iterator(*my_container) = my_end; }
1129  const_iterator begin() const { return const_iterator(*my_container) = my_begin; }
1130  const_iterator end() const { return const_iterator(*my_container) = my_end; }
1131 
1132  size_type size() const {
1133  size_type tot_size = 0;
1134  for(typename Container::const_iterator i = my_begin; i != my_end; ++i) {
1135  tot_size += i->size();
1136  }
1137  return tot_size;
1138  }
1139 
1140  private:
1141 
1142  Container *my_container;
1143  typename Container::const_iterator my_begin;
1144  typename Container::const_iterator my_end;
1145 
1146  };
1147 
1148  template <typename Container>
1149  flattened2d<Container> flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) {
1150  return flattened2d<Container>(c, b, e);
1151  }
1152 
1153  template <typename Container>
1154  flattened2d<Container> flatten2d(const Container &c) {
1155  return flattened2d<Container>(c);
1156  }
1157 
1158 } // interface6
1159 
1160 namespace internal {
1161 using interface6::internal::segmented_iterator;
1162 }
1163 
1164 using interface6::enumerable_thread_specific;
1165 using interface6::flattened2d;
1166 using interface6::flatten2d;
1167 
1168 } // namespace tbb
1169 
1171 #undef __TBB_enumerable_thread_specific_H_include_area
1172 
1173 #endif
enumerable_thread_specific(P1 &&arg1, P &&... args)
Variadic constructor with initializer arguments. Each local instance of T is constructed by T(args....
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id id
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp end
tbb_thread_v3::id __TBB_EXPORTED_FUNC thread_get_id_v3()
Definition: tbb_thread.cpp:139
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h
enumerable_thread_specific()
Default constructor. Each local instance of T is default constructed.
tbb::concurrent_vector< padded_element, padded_allocator_type > internal_collection_type
void call(F &&f, Pack &&p)
Calls the given function with arguments taken from a stored_pack.
Allocator::template rebind< padded_element >::other padded_allocator_type
conval_type::difference_type difference_type
enumerable_thread_specific(const enumerable_thread_specific &other)
reference local(bool &exists)
Returns reference to calling thread's local copy, creating one if necessary.
void internal_copy(const enumerable_thread_specific< T, A2, C2 > &other)
bool operator>(const concurrent_vector< T, A1 > &a, const concurrent_vector< T, A2 > &b)
iterator grow_by(size_type delta)
Grow by "delta" elements.
void call_itt_notify(notify_type, void *)
vector_iterator< Container, T > operator+(ptrdiff_t offset, const vector_iterator< Container, T > &v)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp begin
void swap(concurrent_hash_map< Key, T, HashCompare, A > &a, concurrent_hash_map< Key, T, HashCompare, A > &b)
internal::concurrent_vector_base_v3::size_type size_type
const_iterator begin() const
begin const iterator
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle * key
void internal_move(enumerable_thread_specific< T, A2, C2 > &&other)
conval_type::const_reference const_reference
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
reference local()
returns reference to local, discarding exists
enumerable_thread_specific & operator=(enumerable_thread_specific< T, Alloc, Cachetype > &&other)
Base class for types that should not be assigned.
Definition: tbb_stddef.h:322
bool operator==(const memory_pool_allocator< T, P > &a, const memory_pool_allocator< U, P > &b)
Definition: memory_pool.h:180
bool empty() const
Return false if vector is not empty or has elements under construction at least.
internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator
internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator
enumerable_thread_specific & operator=(const enumerable_thread_specific< T, Alloc, Cachetype > &other)
bool operator>=(const concurrent_vector< T, A1 > &a, const concurrent_vector< T, A2 > &b)
conval_type::size_type size_type
Basic types.
#define __TBB_STATIC_ASSERT(condition, msg)
Definition: tbb_stddef.h:553
enumerable_thread_specific & operator=(enumerable_thread_specific &&other)
internal_collection_type::difference_type difference_type
bool operator<(const concurrent_vector< T, A1 > &a, const concurrent_vector< T, A2 > &b)
tick_count::interval_t operator-(const tick_count &t1, const tick_count &t0)
Definition: tick_count.h:126
size_type size() const
Return size of vector. It may include elements under construction.
flattened2d(const Container &c, typename Container::const_iterator b, typename Container::const_iterator e)
size_type size() const
Get the number of local copies.
enumerable_thread_specific(enumerable_thread_specific &&other)
enumerable_thread_specific(enumerable_thread_specific< T, Alloc, Cachetype > &&other)
Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.
Definition: tbb_allocator.h:58
Pads type T to fill out to a multiple of cache line size.
Definition: tbb_stddef.h:261
enumerable_thread_specific(const enumerable_thread_specific< T, Alloc, Cachetype > &other)
void const char const char int ITT_FORMAT __itt_group_sync p
The enumerable_thread_specific container.
flattened2d< Container > flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e)
static void * create_local_by_move(internal::ets_base< ETS_key_type > &base, void *p)
void clear()
Clear container while keeping memory allocated.
Dummy type that distinguishes splitting constructor from copy constructor.
Definition: tbb_stddef.h:416
generic_range_type< const_iterator > const_range_type
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:330
Enables one or the other code branches.
conval_type::allocator_type allocator_type
void free_array(void *_ptr, size_t _size) __TBB_override
#define __TBB_override
Definition: tbb_stddef.h:240
range_type range(size_t grainsize=1)
Get range for parallel algorithms.
Detects whether two given types are the same.
void reserve(size_type n)
Allocate enough space to grow to size n without having to allocate more memory later.
void swap(atomic< T > &lhs, atomic< T > &rhs)
Definition: atomic.h:564
void const char const char int ITT_FORMAT __itt_group_sync s
ets_key_usage_type
enum for selecting between single key and key-per-instance versions
A range over which to iterate.
Definition: blocked_range.h:45
enumerable_thread_specific(Finit finit)
Constructor with initializer functor. Each local instance of T is constructed by T(finit()).
const_iterator end() const
end const iterator
const_range_type range(size_t grainsize=1) const
Get const range for parallel algorithms.
void * create_array(size_t _size) __TBB_override
bool operator!=(const memory_pool_allocator< T, P > &a, const memory_pool_allocator< U, P > &b)
Definition: memory_pool.h:183
internal::segmented_iterator< Container, value_type > iterator
Strips its template type argument from cv- and ref-qualifiers.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int mask
internal::padded< internal::ets_element< T > > padded_element
atomic< T > & as_atomic(T &t)
Definition: atomic.h:572
Allocator::template rebind< uintptr_t >::other array_allocator_type
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
internal::segmented_iterator< Container, const value_type > const_iterator
bool empty() const
true if there have been no local copies created
enumerable_thread_specific & operator=(const enumerable_thread_specific &other)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type type
bool operator<=(const concurrent_vector< T, A1 > &a, const concurrent_vector< T, A2 > &b)
The graph class.
enumerable_thread_specific(const T &exemplar)
Constructor with exemplar. Each local instance of T is copy-constructed from the exemplar.
static void * create_local_by_copy(internal::ets_base< ETS_key_type > &base, void *p)
void internal_swap(enumerable_thread_specific &other)
Identifiers declared inside namespace internal should never be used directly by client code.
Definition: atomic.h:65
A generic range, used to create range objects from the iterators.
conval_type::const_pointer const_pointer
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:319

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.