Home ⌂Doc Index ◂Up ▴
Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
concurrent_vector.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2020 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #ifndef __TBB_concurrent_vector_H
18 #define __TBB_concurrent_vector_H
19 
20 #define __TBB_concurrent_vector_H_include_area
22 
23 #include "tbb_stddef.h"
24 #include "tbb_exception.h"
25 #include "atomic.h"
27 #include "blocked_range.h"
28 #include "tbb_machine.h"
29 #include "tbb_profiling.h"
30 #include <new>
31 #include <cstring> // for memset()
32 #include __TBB_STD_SWAP_HEADER
33 #include <algorithm>
34 #include <iterator>
35 
37 
38 #if _MSC_VER==1500 && !__INTEL_COMPILER
39  // VS2008/VC9 seems to have an issue; limits pull in math.h
40  #pragma warning( push )
41  #pragma warning( disable: 4985 )
42 #endif
43 #include <limits> /* std::numeric_limits */
44 #if _MSC_VER==1500 && !__INTEL_COMPILER
45  #pragma warning( pop )
46 #endif
47 
48 #if __TBB_INITIALIZER_LISTS_PRESENT
49  #include <initializer_list>
50 #endif
51 
52 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
53  // Workaround for overzealous compiler warnings in /Wp64 mode
54  #pragma warning (push)
55 #if defined(_Wp64)
56  #pragma warning (disable: 4267)
57 #endif
58  #pragma warning (disable: 4127) //warning C4127: conditional expression is constant
59 #endif
60 
61 namespace tbb {
62 
63 template<typename T, class A = cache_aligned_allocator<T> >
65 
67 namespace internal {
68 
69  template<typename Container, typename Value>
71 
73  static void *const vector_allocation_error_flag = reinterpret_cast<void*>(size_t(63));
74 
76  template<typename T>
77  void handle_unconstructed_elements(T* array, size_t n_of_elements){
78  std::memset( static_cast<void*>(array), 0, n_of_elements * sizeof( T ) );
79  }
80 
82 
84  protected:
85 
86  // Basic types declarations
87  typedef size_t segment_index_t;
88  typedef size_t size_type;
89 
90  // Using enumerations due to Mac linking problems of static const variables
91  enum {
92  // Size constants
93  default_initial_segments = 1, // 2 initial items
95  pointers_per_short_table = 3, // to fit into 8 words of entire structure
96  pointers_per_long_table = sizeof(segment_index_t) * 8 // one segment per bit
97  };
98 
99  struct segment_not_used {};
100  struct segment_allocated {};
102 
103  class segment_t;
105  void* array;
106  private:
107  //TODO: More elegant way to grant access to selected functions _only_?
108  friend class segment_t;
109  explicit segment_value_t(void* an_array):array(an_array) {}
110  public:
111  friend bool operator==(segment_value_t const& lhs, segment_not_used ) { return lhs.array == 0;}
114  template<typename argument_type>
115  friend bool operator!=(segment_value_t const& lhs, argument_type arg) { return ! (lhs == arg);}
116 
117  template<typename T>
118  T* pointer() const { return static_cast<T*>(const_cast<void*>(array)); }
119  };
120 
122  if(s != segment_allocated()){
123  internal::throw_exception(exception);
124  }
125  }
126 
127  // Segment pointer.
128  class segment_t {
129  atomic<void*> array;
130  public:
131  segment_t(){ store<relaxed>(segment_not_used());}
132  //Copy ctor and assignment operator are defined to ease using of stl algorithms.
133  //These algorithms usually not a synchronization point, so, semantic is
134  //intentionally relaxed here.
135  segment_t(segment_t const& rhs ){ array.store<relaxed>(rhs.array.load<relaxed>());}
136 
137  void swap(segment_t & rhs ){
138  tbb::internal::swap<relaxed>(array, rhs.array);
139  }
140 
142  array.store<relaxed>(rhs.array.load<relaxed>());
143  return *this;
144  }
145 
146  template<memory_semantics M>
147  segment_value_t load() const { return segment_value_t(array.load<M>());}
148 
149  template<memory_semantics M>
151  array.store<M>(0);
152  }
153 
154  template<memory_semantics M>
156  __TBB_ASSERT(load<relaxed>() != segment_allocated(),"transition from \"allocated\" to \"allocation failed\" state looks non-logical");
158  }
159 
160  template<memory_semantics M>
161  void store(void* allocated_segment_pointer) __TBB_NOEXCEPT(true) {
162  __TBB_ASSERT(segment_value_t(allocated_segment_pointer) == segment_allocated(),
163  "other overloads of store should be used for marking segment as not_used or allocation_failed" );
164  array.store<M>(allocated_segment_pointer);
165  }
166 
167 #if TBB_USE_ASSERT
168  ~segment_t() {
169  __TBB_ASSERT(load<relaxed>() != segment_allocated(), "should have been freed by clear" );
170  }
171 #endif /* TBB_USE_ASSERT */
172  };
173  friend void swap(segment_t & , segment_t & ) __TBB_NOEXCEPT(true);
174 
175  // Data fields
176 
178  void* (*vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t);
179 
181  atomic<size_type> my_first_block;
182 
184  atomic<size_type> my_early_size;
185 
187  atomic<segment_t*> my_segment;
188 
191 
192  // Methods
193 
195  //Here the semantic is intentionally relaxed.
196  //The reason this is next:
197  //Object that is in middle of construction (i.e. its constructor is not yet finished)
198  //cannot be used concurrently until the construction is finished.
199  //Thus to flag other threads that construction is finished, some synchronization with
200  //acquire-release semantic should be done by the (external) code that uses the vector.
201  //So, no need to do the synchronization inside the vector.
202 
203  my_early_size.store<relaxed>(0);
204  my_first_block.store<relaxed>(0); // here is not default_initial_segments
205  my_segment.store<relaxed>(my_storage);
206  }
207 
209 
210  //these helpers methods use the fact that segments are allocated so
211  //that every segment size is a (increasing) power of 2.
212  //with one exception 0 segment has size of 2 as well segment 1;
213  //e.g. size of segment with index of 3 is 2^3=8;
215  return segment_index_t( __TBB_Log2( index|1 ) );
216  }
217 
219  return (segment_index_t(1)<<k & ~segment_index_t(1));
220  }
221 
223  segment_index_t k = segment_index_of( index );
224  index -= segment_base(k);
225  return k;
226  }
227 
229  return segment_index_t(1)<<k; // fake value for k==0
230  }
231 
232 
233  static bool is_first_element_in_segment(size_type element_index){
234  //check if element_index is a power of 2 that is at least 2.
235  //The idea is to detect if the iterator crosses a segment boundary,
236  //and 2 is the minimal index for which it's true
237  __TBB_ASSERT(element_index, "there should be no need to call "
238  "is_first_element_in_segment for 0th element" );
239  return is_power_of_two_at_least( element_index, 2 );
240  }
241 
244 
246  typedef void (__TBB_EXPORTED_FUNC *internal_array_op2)(void* dst, const void* src, size_type n );
247 
252  };
253 
254  void __TBB_EXPORTED_METHOD internal_reserve( size_type n, size_type element_size, size_type max_size );
256  void internal_grow( size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src );
257  size_type __TBB_EXPORTED_METHOD internal_grow_by( size_type delta, size_type element_size, internal_array_op2 init, const void *src );
258  void* __TBB_EXPORTED_METHOD internal_push_back( size_type element_size, size_type& index );
260  void* __TBB_EXPORTED_METHOD internal_compact( size_type element_size, void *table, internal_array_op1 destroy, internal_array_op2 copy );
267 
268  void __TBB_EXPORTED_METHOD internal_resize( size_type n, size_type element_size, size_type max_size, const void *src,
269  internal_array_op1 destroy, internal_array_op2 init );
271 
274 private:
276  class helper;
277  friend class helper;
278 
279  template<typename Container, typename Value>
280  friend class vector_iterator;
281 
282  };
283 
285  lhs.swap(rhs);
286  }
287 
289 
291 
293  template<typename Container, typename Value>
294  class vector_iterator
295  {
297  Container* my_vector;
298 
300  size_t my_index;
301 
303 
304  mutable Value* my_item;
305 
306  template<typename C, typename T>
307  friend vector_iterator<C,T> operator+( ptrdiff_t offset, const vector_iterator<C,T>& v );
308 
309  template<typename C, typename T, typename U>
310  friend bool operator==( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
311 
312  template<typename C, typename T, typename U>
313  friend bool operator<( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
314 
315  template<typename C, typename T, typename U>
316  friend ptrdiff_t operator-( const vector_iterator<C,T>& i, const vector_iterator<C,U>& j );
317 
318  template<typename C, typename U>
320 
321 #if !__TBB_TEMPLATE_FRIENDS_BROKEN
322  template<typename T, class A>
324 #else
325 public:
326 #endif
327 
328  vector_iterator( const Container& vector, size_t index, void *ptr = 0 ) :
329  my_vector(const_cast<Container*>(&vector)),
330  my_index(index),
331  my_item(static_cast<Value*>(ptr))
332  {}
333 
334  public:
336  vector_iterator() : my_vector(NULL), my_index(~size_t(0)), my_item(NULL) {}
337 
339  my_vector(other.my_vector),
340  my_index(other.my_index),
341  my_item(other.my_item)
342  {}
343 
345  {
346  my_vector=other.my_vector;
347  my_index=other.my_index;
348  my_item=other.my_item;
349  return *this;
350  }
351 
352  vector_iterator operator+( ptrdiff_t offset ) const {
353  return vector_iterator( *my_vector, my_index+offset );
354  }
355  vector_iterator &operator+=( ptrdiff_t offset ) {
356  my_index+=offset;
357  my_item = NULL;
358  return *this;
359  }
360  vector_iterator operator-( ptrdiff_t offset ) const {
361  return vector_iterator( *my_vector, my_index-offset );
362  }
363  vector_iterator &operator-=( ptrdiff_t offset ) {
364  my_index-=offset;
365  my_item = NULL;
366  return *this;
367  }
368  Value& operator*() const {
369  Value* item = my_item;
370  if( !item ) {
371  item = my_item = &my_vector->internal_subscript(my_index);
372  }
373  __TBB_ASSERT( item==&my_vector->internal_subscript(my_index), "corrupt cache" );
374  return *item;
375  }
376  Value& operator[]( ptrdiff_t k ) const {
377  return my_vector->internal_subscript(my_index+k);
378  }
379  Value* operator->() const {return &operator*();}
380 
383  size_t element_index = ++my_index;
384  if( my_item ) {
385  //TODO: consider using of knowledge about "first_block optimization" here as well?
387  //if the iterator crosses a segment boundary, the pointer become invalid
388  //as possibly next segment is in another memory location
389  my_item= NULL;
390  } else {
391  ++my_item;
392  }
393  }
394  return *this;
395  }
396 
399  __TBB_ASSERT( my_index>0, "operator--() applied to iterator already at beginning of concurrent_vector" );
400  size_t element_index = my_index--;
401  if( my_item ) {
403  //if the iterator crosses a segment boundary, the pointer become invalid
404  //as possibly next segment is in another memory location
405  my_item= NULL;
406  } else {
407  --my_item;
408  }
409  }
410  return *this;
411  }
412 
415  vector_iterator result = *this;
416  operator++();
417  return result;
418  }
419 
422  vector_iterator result = *this;
423  operator--();
424  return result;
425  }
426 
427  // STL support
428 
429  typedef ptrdiff_t difference_type;
430  typedef Value value_type;
431  typedef Value* pointer;
432  typedef Value& reference;
433  typedef std::random_access_iterator_tag iterator_category;
434  };
435 
436  template<typename Container, typename T>
438  return vector_iterator<Container,T>( *v.my_vector, v.my_index+offset );
439  }
440 
441  template<typename Container, typename T, typename U>
443  return i.my_index==j.my_index && i.my_vector == j.my_vector;
444  }
445 
446  template<typename Container, typename T, typename U>
448  return !(i==j);
449  }
450 
451  template<typename Container, typename T, typename U>
453  return i.my_index<j.my_index;
454  }
455 
456  template<typename Container, typename T, typename U>
458  return j<i;
459  }
460 
461  template<typename Container, typename T, typename U>
463  return !(i<j);
464  }
465 
466  template<typename Container, typename T, typename U>
468  return !(j<i);
469  }
470 
471  template<typename Container, typename T, typename U>
473  return ptrdiff_t(i.my_index)-ptrdiff_t(j.my_index);
474  }
475 
476  template<typename T, class A>
478  public:
482  };
483 
484 } // namespace internal
486 
488 
549 template<typename T, class A>
550 class concurrent_vector: protected internal::allocator_base<T, A>,
552 private:
553  template<typename I>
555  public:
556  typedef T value_type;
557  typedef T& reference;
558  typedef const T& const_reference;
559  typedef I iterator;
560  typedef ptrdiff_t difference_type;
561  generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {}
562  template<typename U>
565  };
566 
567  template<typename C, typename U>
569 
570 public:
571  //------------------------------------------------------------------------
572  // STL compatible types
573  //------------------------------------------------------------------------
576 
577  typedef T value_type;
578  typedef ptrdiff_t difference_type;
579  typedef T& reference;
580  typedef const T& const_reference;
581  typedef T *pointer;
582  typedef const T *const_pointer;
583 
586 
587 #if !defined(_MSC_VER) || _CPPLIB_VER>=300
588  // Assume ISO standard definition of std::reverse_iterator
589  typedef std::reverse_iterator<iterator> reverse_iterator;
590  typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
591 #else
592  // Use non-standard std::reverse_iterator
593  typedef std::reverse_iterator<iterator,T,T&,T*> reverse_iterator;
594  typedef std::reverse_iterator<const_iterator,T,const T&,const T*> const_reverse_iterator;
595 #endif /* defined(_MSC_VER) && (_MSC_VER<1300) */
596 
597  //------------------------------------------------------------------------
598  // Parallel algorithm support
599  //------------------------------------------------------------------------
600  typedef generic_range_type<iterator> range_type;
601  typedef generic_range_type<const_iterator> const_range_type;
602 
603  //------------------------------------------------------------------------
604  // STL compatible constructors & destructors
605  //------------------------------------------------------------------------
606 
610  {
612  }
613 
614  //Constructors are not required to have synchronization
615  //(for more details see comment in the concurrent_vector_base constructor).
616 #if __TBB_INITIALIZER_LISTS_PRESENT
617  concurrent_vector(std::initializer_list<T> init_list, const allocator_type &a = allocator_type())
620  {
622  __TBB_TRY {
623  internal_assign_iterators(init_list.begin(), init_list.end());
624  } __TBB_CATCH(...) {
625  segment_t *table = my_segment.load<relaxed>();;
627  __TBB_RETHROW();
628  }
629 
630  }
631 #endif //# __TBB_INITIALIZER_LISTS_PRESENT
632 
636  {
638  __TBB_TRY {
639  internal_copy(vector, sizeof(T), &copy_array);
640  } __TBB_CATCH(...) {
641  segment_t *table = my_segment.load<relaxed>();
643  __TBB_RETHROW();
644  }
645  }
646 
647 #if __TBB_CPP11_RVALUE_REF_PRESENT
648  //TODO add __TBB_NOEXCEPT(true) and static_assert(std::has_nothrow_move_constructor<A>::value)
651  : internal::allocator_base<T, A>(std::move(source)), internal::concurrent_vector_base()
652  {
655  }
656 
659  {
661  //C++ standard requires instances of an allocator being compared for equality,
662  //which means that memory allocated by one instance is possible to deallocate with the other one.
663  if (a == source.my_allocator) {
665  } else {
666  __TBB_TRY {
667  internal_copy(source, sizeof(T), &move_array);
668  } __TBB_CATCH(...) {
669  segment_t *table = my_segment.load<relaxed>();
671  __TBB_RETHROW();
672  }
673  }
674  }
675 
676 #endif
677 
679  template<class M>
682  {
684  __TBB_TRY {
685  internal_copy(vector.internal_vector_base(), sizeof(T), &copy_array);
686  } __TBB_CATCH(...) {
687  segment_t *table = my_segment.load<relaxed>();
689  __TBB_RETHROW();
690  }
691  }
692 
695  {
697  __TBB_TRY {
698  internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array );
699  } __TBB_CATCH(...) {
700  segment_t *table = my_segment.load<relaxed>();
702  __TBB_RETHROW();
703  }
704  }
705 
708  : internal::allocator_base<T, A>(a)
709  {
711  __TBB_TRY {
712  internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
713  } __TBB_CATCH(...) {
714  segment_t *table = my_segment.load<relaxed>();
716  __TBB_RETHROW();
717  }
718  }
719 
721  template<class I>
723  : internal::allocator_base<T, A>(a)
724  {
726  __TBB_TRY {
727  internal_assign_range(first, last, static_cast<is_integer_tag<std::numeric_limits<I>::is_integer> *>(0) );
728  } __TBB_CATCH(...) {
729  segment_t *table = my_segment.load<relaxed>();
731  __TBB_RETHROW();
732  }
733  }
734 
737  if( this != &vector )
738  internal_assign(vector, sizeof(T), &destroy_array, &assign_array, &copy_array);
739  return *this;
740  }
741 
742 #if __TBB_CPP11_RVALUE_REF_PRESENT
743  //TODO: add __TBB_NOEXCEPT()
746  __TBB_ASSERT(this != &other, "Move assignment to itself is prohibited ");
748  if(pocma_t::value || this->my_allocator == other.my_allocator) {
749  concurrent_vector trash (std::move(*this));
750  internal_swap(other);
751  tbb::internal::allocator_move_assignment(this->my_allocator, other.my_allocator, pocma_t());
752  } else {
754  }
755  return *this;
756  }
757 #endif
758  //TODO: add an template assignment operator? (i.e. with different element type)
759 
761  template<class M>
763  if( static_cast<void*>( this ) != static_cast<const void*>( &vector ) )
764  internal_assign(vector.internal_vector_base(),
765  sizeof(T), &destroy_array, &assign_array, &copy_array);
766  return *this;
767  }
768 
769 #if __TBB_INITIALIZER_LISTS_PRESENT
770  concurrent_vector& operator=( std::initializer_list<T> init_list ) {
773  internal_assign_iterators(init_list.begin(), init_list.end());
774  return *this;
775  }
776 #endif //#if __TBB_INITIALIZER_LISTS_PRESENT
777 
778  //------------------------------------------------------------------------
779  // Concurrent operations
780  //------------------------------------------------------------------------
782 
784  return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array, NULL ) : my_early_size.load());
785  }
786 
788 
790  return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array_by, static_cast<const void*>(&t) ) : my_early_size.load());
791  }
792 
794  template<typename I>
796  typename std::iterator_traits<I>::difference_type delta = std::distance(first, last);
797  __TBB_ASSERT( delta >= 0, NULL);
798 
799  return iterator(*this, delta ? internal_grow_by(delta, sizeof(T), &copy_range<I>, static_cast<const void*>(&first)) : my_early_size.load());
800  }
801 
802 #if __TBB_INITIALIZER_LISTS_PRESENT
803 
804  iterator grow_by( std::initializer_list<T> init_list ) {
805  return grow_by( init_list.begin(), init_list.end() );
806  }
807 #endif //#if __TBB_INITIALIZER_LISTS_PRESENT
808 
810 
815  size_type m=0;
816  if( n ) {
817  m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array, NULL );
818  if( m>n ) m=n;
819  }
820  return iterator(*this, m);
821  };
822 
826  size_type m=0;
827  if( n ) {
829  if( m>n ) m=n;
830  }
831  return iterator(*this, m);
832  };
833 
835 
837  {
838  push_back_helper prolog(*this);
839  new(prolog.internal_push_back_result()) T(item);
840  return prolog.return_iterator_and_dismiss();
841  }
842 
843 #if __TBB_CPP11_RVALUE_REF_PRESENT
844 
846  iterator push_back( T&& item )
847  {
848  push_back_helper prolog(*this);
849  new(prolog.internal_push_back_result()) T(std::move(item));
850  return prolog.return_iterator_and_dismiss();
851  }
852 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
853 
855  template<typename... Args>
856  iterator emplace_back( Args&&... args )
857  {
858  push_back_helper prolog(*this);
859  new(prolog.internal_push_back_result()) T(std::forward<Args>(args)...);
860  return prolog.return_iterator_and_dismiss();
861  }
862 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
863 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
864 
868  return internal_subscript(index);
869  }
870 
873  return internal_subscript(index);
874  }
875 
877  reference at( size_type index ) {
879  }
880 
882  const_reference at( size_type index ) const {
884  }
885 
887  range_type range( size_t grainsize = 1 ) {
888  return range_type( begin(), end(), grainsize );
889  }
890 
892  const_range_type range( size_t grainsize = 1 ) const {
893  return const_range_type( begin(), end(), grainsize );
894  }
895 
896  //------------------------------------------------------------------------
897  // Capacity
898  //------------------------------------------------------------------------
900  size_type size() const {
902  return cp < sz ? cp : sz;
903  }
904 
906  bool empty() const {return !my_early_size;}
907 
910 
912 
914  void reserve( size_type n ) {
915  if( n )
916  internal_reserve(n, sizeof(T), max_size());
917  }
918 
920  void resize( size_type n ) {
921  internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array );
922  }
923 
926  internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
927  }
928 
930  void shrink_to_fit();
931 
933  size_type max_size() const {return (~size_type(0))/sizeof(T);}
934 
935  //------------------------------------------------------------------------
936  // STL support
937  //------------------------------------------------------------------------
938 
940  iterator begin() {return iterator(*this,0);}
942  iterator end() {return iterator(*this,size());}
944  const_iterator begin() const {return const_iterator(*this,0);}
946  const_iterator end() const {return const_iterator(*this,size());}
948  const_iterator cbegin() const {return const_iterator(*this,0);}
950  const_iterator cend() const {return const_iterator(*this,size());}
965  __TBB_ASSERT( size()>0, NULL);
966  const segment_value_t& segment_value = my_segment[0].template load<relaxed>();
967  return (segment_value.template pointer<T>())[0];
968  }
971  __TBB_ASSERT( size()>0, NULL);
972  const segment_value_t& segment_value = my_segment[0].template load<relaxed>();
973  return (segment_value.template pointer<const T>())[0];
974  }
977  __TBB_ASSERT( size()>0, NULL);
978  return internal_subscript( size()-1 );
979  }
982  __TBB_ASSERT( size()>0, NULL);
983  return internal_subscript( size()-1 );
984  }
986  allocator_type get_allocator() const { return this->my_allocator; }
987 
989  void assign(size_type n, const_reference t) {
990  clear();
991  internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(&t), &destroy_array, &initialize_array_by );
992  }
993 
995  template<class I>
996  void assign(I first, I last) {
997  clear(); internal_assign_range( first, last, static_cast<is_integer_tag<std::numeric_limits<I>::is_integer> *>(0) );
998  }
999 
1000 #if __TBB_INITIALIZER_LISTS_PRESENT
1001  void assign(std::initializer_list<T> init_list) {
1003  clear(); internal_assign_iterators( init_list.begin(), init_list.end());
1004  }
1005 #endif //# __TBB_INITIALIZER_LISTS_PRESENT
1006 
1008  void swap(concurrent_vector &vector) {
1010  if( this != &vector && (this->my_allocator == vector.my_allocator || pocs_t::value) ) {
1011  concurrent_vector_base_v3::internal_swap(static_cast<concurrent_vector_base_v3&>(vector));
1012  tbb::internal::allocator_swap(this->my_allocator, vector.my_allocator, pocs_t());
1013  }
1014  }
1015 
1017 
1018  void clear() {
1020  }
1021 
1024  segment_t *table = my_segment.load<relaxed>();
1026  // base class destructor call should be then
1027  }
1028 
1030 private:
1033  return static_cast<concurrent_vector<T, A>&>(vb).my_allocator.allocate(k);
1034  }
1036  void internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block);
1037 
1039  T& internal_subscript( size_type index ) const;
1040 
1043 
1046  internal_resize( n, sizeof(T), max_size(), static_cast<const void*>(p), &destroy_array, p? &initialize_array_by : &initialize_array );
1047  }
1048 
1050  /* Functions declarations:
1051  * void foo(is_integer_tag<true>*);
1052  * void foo(is_integer_tag<false>*);
1053  * Usage example:
1054  * foo(static_cast<is_integer_tag<std::numeric_limits<T>::is_integer>*>(0));
1055  */
1056  template<bool B> class is_integer_tag;
1057 
1059  template<class I>
1060  void internal_assign_range(I first, I last, is_integer_tag<true> *) {
1061  internal_assign_n(static_cast<size_type>(first), &static_cast<T&>(last));
1062  }
1064  template<class I>
1065  void internal_assign_range(I first, I last, is_integer_tag<false> *) {
1067  }
1069  template<class I>
1071 
1072  //these functions are marked __TBB_EXPORTED_FUNC as they are called from within the library
1073 
1075  static void __TBB_EXPORTED_FUNC initialize_array( void* begin, const void*, size_type n );
1076 
1078  static void __TBB_EXPORTED_FUNC initialize_array_by( void* begin, const void* src, size_type n );
1079 
1081  static void __TBB_EXPORTED_FUNC copy_array( void* dst, const void* src, size_type n );
1082 
1083 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1084  static void __TBB_EXPORTED_FUNC move_array_if_noexcept( void* dst, const void* src, size_type n );
1086 #endif //__TBB_MOVE_IF_NO_EXCEPT_PRESENT
1087 
1088 #if __TBB_CPP11_RVALUE_REF_PRESENT
1089  static void __TBB_EXPORTED_FUNC move_array( void* dst, const void* src, size_type n );
1091 
1093  static void __TBB_EXPORTED_FUNC move_assign_array( void* dst, const void* src, size_type n );
1094 #endif
1095  template<typename Iterator>
1097  static void __TBB_EXPORTED_FUNC copy_range( void* dst, const void* p_type_erased_iterator, size_type n );
1098 
1100  static void __TBB_EXPORTED_FUNC assign_array( void* dst, const void* src, size_type n );
1101 
1103  static void __TBB_EXPORTED_FUNC destroy_array( void* begin, size_type n );
1104 
1107  public:
1109  const size_type n;
1111 
1112  static const T* as_const_pointer(const void *ptr) { return static_cast<const T *>(ptr); }
1113  static T* as_pointer(const void *src) { return static_cast<T*>(const_cast<void *>(src)); }
1114 
1115  internal_loop_guide(size_type ntrials, void *ptr)
1116  : array(as_pointer(ptr)), n(ntrials), i(0) {}
1117  void init() { for(; i < n; ++i) new( &array[i] ) T(); }
1118  void init(const void *src) { for(; i < n; ++i) new( &array[i] ) T(*as_const_pointer(src)); }
1119  void copy(const void *src) { for(; i < n; ++i) new( &array[i] ) T(as_const_pointer(src)[i]); }
1120  void assign(const void *src) { for(; i < n; ++i) array[i] = as_const_pointer(src)[i]; }
1121 #if __TBB_CPP11_RVALUE_REF_PRESENT
1122  void move_assign(const void *src) { for(; i < n; ++i) array[i] = std::move(as_pointer(src)[i]); }
1123  void move_construct(const void *src) { for(; i < n; ++i) new( &array[i] ) T( std::move(as_pointer(src)[i]) ); }
1124 #endif
1125 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1126  void move_construct_if_noexcept(const void *src) { for(; i < n; ++i) new( &array[i] ) T( std::move_if_noexcept(as_pointer(src)[i]) ); }
1127 #endif //__TBB_MOVE_IF_NOEXCEPT_PRESENT
1128 
1129  //TODO: rename to construct_range
1130  template<class I> void iterate(I &src) { for(; i < n; ++i, ++src) new( &array[i] ) T( *src ); }
1132  if(i < n) {// if an exception was raised, fill the rest of items with zeros
1134  }
1135  }
1136  };
1137 
1141 
1142  element_construction_guard(pointer an_element) : element (an_element){}
1143  void dismiss(){ element = NULL; }
1145  if (element){
1147  }
1148  }
1149  };
1150 
1154 
1156  v(vector),
1157  g (static_cast<T*>(v.internal_push_back(sizeof(T),k)))
1158  {}
1159 
1162  pointer ptr = g.element;
1163  g.dismiss();
1164  return iterator(v, k, ptr);
1165  }
1166  };
1167 };
1168 
1169 #if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
1170 // Deduction guide for the constructor from two iterators
1171 template<typename I,
1172  typename T = typename std::iterator_traits<I>::value_type,
1173  typename A = cache_aligned_allocator<T>
1174 > concurrent_vector(I, I, const A& = A())
1175 -> concurrent_vector<T, A>;
1176 
1177 // Deduction guide for the constructor from a vector and allocator
1178 template<typename T, typename A1, typename A2>
1179 concurrent_vector(const concurrent_vector<T, A1> &, const A2 &)
1180 -> concurrent_vector<T, A2>;
1181 
1182 // Deduction guide for the constructor from an initializer_list
1183 template<typename T, typename A = cache_aligned_allocator<T>
1184 > concurrent_vector(std::initializer_list<T>, const A& = A())
1185 -> concurrent_vector<T, A>;
1186 #endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */
1187 
1188 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1189 #pragma warning (push)
1190 #pragma warning (disable: 4701) // potentially uninitialized local variable "old"
1191 #endif
1192 template<typename T, class A>
1195  __TBB_TRY {
1196  internal_array_op2 copy_or_move_array =
1197 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1198  &move_array_if_noexcept
1199 #else
1200  &copy_array
1201 #endif
1202  ;
1203  if( internal_compact( sizeof(T), &old, &destroy_array, copy_or_move_array ) )
1204  internal_free_segments( old.table, pointers_per_long_table, old.first_block ); // free joined and unnecessary segments
1205  } __TBB_CATCH(...) {
1206  if( old.first_block ) // free segment allocated for compacting. Only for support of exceptions in ctor of user T[ype]
1207  internal_free_segments( old.table, 1, old.first_block );
1208  __TBB_RETHROW();
1209  }
1210 }
1211 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1212 #pragma warning (pop)
1213 #endif // warning 4701 is back
1214 
1215 template<typename T, class A>
1217  // Free the arrays
1218  while( k > first_block ) {
1219  --k;
1220  segment_value_t segment_value = table[k].load<relaxed>();
1221  table[k].store<relaxed>(segment_not_used());
1222  if( segment_value == segment_allocated() ) // check for correct segment pointer
1223  this->my_allocator.deallocate( (segment_value.pointer<T>()), segment_size(k) );
1224  }
1225  segment_value_t segment_value = table[0].load<relaxed>();
1226  if( segment_value == segment_allocated() ) {
1227  __TBB_ASSERT( first_block > 0, NULL );
1228  while(k > 0) table[--k].store<relaxed>(segment_not_used());
1229  this->my_allocator.deallocate( (segment_value.pointer<T>()), segment_size(first_block) );
1230  }
1231 }
1232 
1233 template<typename T, class A>
1235  //TODO: unify both versions of internal_subscript
1236  __TBB_ASSERT( index < my_early_size, "index out of bounds" );
1237  size_type j = index;
1238  segment_index_t k = segment_base_index_of( j );
1239  __TBB_ASSERT( my_segment.load<acquire>() != my_storage || k < pointers_per_short_table, "index is being allocated" );
1240  //no need in load with acquire (load<acquire>) since thread works in own space or gets
1241  //the information about added elements via some form of external synchronization
1242  //TODO: why not make a load of my_segment relaxed as well ?
1243  //TODO: add an assertion that my_segment[k] is properly aligned to please ITT
1244  segment_value_t segment_value = my_segment[k].template load<relaxed>();
1245  __TBB_ASSERT( segment_value != segment_allocation_failed(), "the instance is broken by bad allocation. Use at() instead" );
1246  __TBB_ASSERT( segment_value != segment_not_used(), "index is being allocated" );
1247  return (( segment_value.pointer<T>()))[j];
1248 }
1249 
1250 template<typename T, class A>
1252  if( index >= my_early_size )
1253  internal::throw_exception(internal::eid_out_of_range); // throw std::out_of_range
1254  size_type j = index;
1255  segment_index_t k = segment_base_index_of( j );
1256  //TODO: refactor this condition into separate helper function, e.g. fits_into_small_table
1257  if( my_segment.load<acquire>() == my_storage && k >= pointers_per_short_table )
1259  // no need in load with acquire (load<acquire>) since thread works in own space or gets
1260  //the information about added elements via some form of external synchronization
1261  //TODO: why not make a load of my_segment relaxed as well ?
1262  //TODO: add an assertion that my_segment[k] is properly aligned to please ITT
1263  segment_value_t segment_value = my_segment[k].template load<relaxed>();
1264  enforce_segment_allocated(segment_value, internal::eid_index_range_error);
1265  return (segment_value.pointer<T>())[j];
1266 }
1267 
1268 template<typename T, class A> template<class I>
1270  __TBB_ASSERT(my_early_size == 0, NULL);
1271  size_type n = std::distance(first, last);
1272  if( !n ) return;
1273  internal_reserve(n, sizeof(T), max_size());
1274  my_early_size = n;
1275  segment_index_t k = 0;
1276  //TODO: unify segment iteration code with concurrent_base_v3::helper
1277  size_type sz = segment_size( my_first_block );
1278  while( sz < n ) {
1279  internal_loop_guide loop(sz, my_segment[k].template load<relaxed>().template pointer<void>());
1280  loop.iterate(first);
1281  n -= sz;
1282  if( !k ) k = my_first_block;
1283  else { ++k; sz <<= 1; }
1284  }
1285  internal_loop_guide loop(n, my_segment[k].template load<relaxed>().template pointer<void>());
1286  loop.iterate(first);
1287 }
1288 
1289 template<typename T, class A>
1291  internal_loop_guide loop(n, begin); loop.init();
1292 }
1293 
1294 template<typename T, class A>
1296  internal_loop_guide loop(n, begin); loop.init(src);
1297 }
1298 
1299 template<typename T, class A>
1300 void concurrent_vector<T, A>::copy_array( void* dst, const void* src, size_type n ) {
1301  internal_loop_guide loop(n, dst); loop.copy(src);
1302 }
1303 
1304 #if __TBB_CPP11_RVALUE_REF_PRESENT
1305 template<typename T, class A>
1306 void concurrent_vector<T, A>::move_array( void* dst, const void* src, size_type n ) {
1307  internal_loop_guide loop(n, dst); loop.move_construct(src);
1308 }
1309 template<typename T, class A>
1310 void concurrent_vector<T, A>::move_assign_array( void* dst, const void* src, size_type n ) {
1311  internal_loop_guide loop(n, dst); loop.move_assign(src);
1312 }
1313 #endif
1314 
1315 #if __TBB_MOVE_IF_NOEXCEPT_PRESENT
1316 template<typename T, class A>
1317 void concurrent_vector<T, A>::move_array_if_noexcept( void* dst, const void* src, size_type n ) {
1318  internal_loop_guide loop(n, dst); loop.move_construct_if_noexcept(src);
1319 }
1320 #endif //__TBB_MOVE_IF_NOEXCEPT_PRESENT
1321 
1322 template<typename T, class A>
1323 template<typename I>
1324 void concurrent_vector<T, A>::copy_range( void* dst, const void* p_type_erased_iterator, size_type n ){
1325  internal_loop_guide loop(n, dst);
1326  loop.iterate( *(static_cast<I*>(const_cast<void*>(p_type_erased_iterator))) );
1327 }
1328 
1329 template<typename T, class A>
1330 void concurrent_vector<T, A>::assign_array( void* dst, const void* src, size_type n ) {
1331  internal_loop_guide loop(n, dst); loop.assign(src);
1332 }
1333 
1334 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1335  // Workaround for overzealous compiler warning
1336  #pragma warning (push)
1337  #pragma warning (disable: 4189)
1338 #endif
1339 template<typename T, class A>
1341  T* array = static_cast<T*>(begin);
1342  for( size_type j=n; j>0; --j )
1343  array[j-1].~T(); // destructors are supposed to not throw any exceptions
1344 }
1345 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1346  #pragma warning (pop)
1347 #endif // warning 4189 is back
1348 
1349 // concurrent_vector's template functions
1350 template<typename T, class A1, class A2>
1352  //TODO: call size() only once per vector (in operator==)
1353  // Simply: return a.size() == b.size() && std::equal(a.begin(), a.end(), b.begin());
1354  if(a.size() != b.size()) return false;
1355  typename concurrent_vector<T, A1>::const_iterator i(a.begin());
1356  typename concurrent_vector<T, A2>::const_iterator j(b.begin());
1357  for(; i != a.end(); ++i, ++j)
1358  if( !(*i == *j) ) return false;
1359  return true;
1360 }
1361 
1362 template<typename T, class A1, class A2>
1364 { return !(a == b); }
1365 
1366 template<typename T, class A1, class A2>
1368 { return (std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end())); }
1369 
1370 template<typename T, class A1, class A2>
1372 { return b < a; }
1373 
1374 template<typename T, class A1, class A2>
1376 { return !(b < a); }
1377 
1378 template<typename T, class A1, class A2>
1380 { return !(a < b); }
1381 
1382 template<typename T, class A>
1384 { a.swap( b ); }
1385 
1386 } // namespace tbb
1387 
1388 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1389  #pragma warning (pop)
1390 #endif // warning 4267,4127 are back
1391 
1392 
1393 #undef __TBB_concurrent_vector_H_include_area
1395 
1396 #endif /* __TBB_concurrent_vector_H */
void resize(size_type n)
Resize the vector. Not thread-safe.
const_reverse_iterator crend() const
reverse end const iterator
reference back()
the last item
void __TBB_EXPORTED_METHOD internal_reserve(size_type n, size_type element_size, size_type max_size)
friend bool operator==(const vector_iterator< C, T > &i, const vector_iterator< C, U > &j)
T & internal_subscript_with_exceptions(size_type index) const
Get reference to element at given index with errors checks.
size_type __TBB_EXPORTED_METHOD internal_grow_to_at_least_with_result(size_type new_size, size_type element_size, internal_array_op2 init, const void *src)
const_reference operator[](size_type index) const
Get const reference to element at given index.
friend bool operator==(segment_value_t const &lhs, segment_allocation_failed)
void assign(size_type n, const_reference t)
assign n items by copying t item
concurrent_vector_base_v3 concurrent_vector_base
friend bool operator==(segment_value_t const &lhs, segment_allocated)
static void __TBB_EXPORTED_FUNC copy_range(void *dst, const void *p_type_erased_iterator, size_type n)
Copy-construct n instances of T, starting at "dst" by iterator range of [p_type_erased_iterator,...
const_range_type range(size_t grainsize=1) const
Get const range for iterating with parallel algorithms.
bool operator<=(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
iterator end()
end iterator
range_type range(size_t grainsize=1)
Get range for iterating with parallel algorithms.
const_iterator begin() const
start const iterator
void allocator_swap(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
vector_iterator & operator=(const vector_iterator< Container, typename Container::value_type > &other)
vector_iterator & operator+=(ptrdiff_t offset)
void __TBB_EXPORTED_METHOD internal_swap(concurrent_vector_base_v3 &v)
concurrent_vector(I first, I last, const allocator_type &a=allocator_type())
Construction with copying iteration range and given allocator instance.
static size_type segment_size(segment_index_t k)
atomic< size_type > my_early_size
Requested size of vector.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t new_size
static void __TBB_EXPORTED_FUNC move_array(void *dst, const void *src, size_type n)
Move-construct n instances of T, starting at "dst" by copying according element of src array.
generic_range_type< iterator > range_type
Value & operator[](ptrdiff_t k) const
Concurrent vector container.
bool operator>(const concurrent_vector< T, A1 > &a, const concurrent_vector< T, A2 > &b)
iterator grow_by(size_type delta)
Grow by "delta" elements.
friend void enforce_segment_allocated(segment_value_t const &s, internal::exception_id exception=eid_bad_last_alloc)
size_t my_index
Index into the vector.
T & internal_subscript(size_type index) const
Get reference to element at given index.
const_reverse_iterator rend() const
reverse end const iterator
vector_iterator< Container, T > operator+(ptrdiff_t offset, const vector_iterator< Container, T > &v)
segment_index_t __TBB_EXPORTED_METHOD internal_clear(internal_array_op1 destroy)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp begin
void swap(concurrent_hash_map< Key, T, HashCompare, A > &a, concurrent_hash_map< Key, T, HashCompare, A > &b)
static segment_index_t segment_index_of(size_type index)
void allocator_move_assignment(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
internal::concurrent_vector_base_v3::size_type size_type
static segment_index_t segment_base(segment_index_t k)
static bool is_first_element_in_segment(size_type element_index)
std::reverse_iterator< iterator > reverse_iterator
const_iterator cend() const
end const iterator
vector_iterator & operator-=(ptrdiff_t offset)
const_iterator end() const
end const iterator
#define __TBB_RETHROW()
Definition: tbb_stddef.h:286
concurrent_vector(const allocator_type &a=allocator_type())
Construct empty vector.
concurrent_vector(size_type n)
Construction with initial size specified by argument n.
Base class of concurrent vector implementation.
iterator begin()
start iterator
vector_iterator()
Default constructor.
size_type capacity() const
Maximum size to which array can grow without allocating more memory. Concurrent allocations are not i...
bool operator>=(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
size_type max_size() const
Upper bound on argument to reserve.
#define __TBB_DEPRECATED
Definition: tbb_config.h:636
static void __TBB_EXPORTED_FUNC assign_array(void *dst, const void *src, size_type n)
Assign (using operator=) n instances of T, starting at "dst" by assigning according element of src ar...
void(__TBB_EXPORTED_FUNC * internal_array_op2)(void *dst, const void *src, size_type n)
An operation on n-element destination array and n-element source array.
bool empty() const
Return false if vector is not empty or has elements under construction at least.
const_iterator cbegin() const
start const iterator
auto last(Container &c) -> decltype(begin(c))
bool operator<(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
void __TBB_EXPORTED_METHOD internal_grow_to_at_least(size_type new_size, size_type element_size, internal_array_op2 init, const void *src)
Deprecated entry point for backwards compatibility to TBB 2.1.
allocator_traits< Alloc >::template rebind_alloc< T >::other type
bool operator>=(const concurrent_vector< T, A1 > &a, const concurrent_vector< T, A2 > &b)
void internal_assign_range(I first, I last, is_integer_tag< true > *)
assign integer items by copying when arguments are treated as iterators. See C++ Standard 2003 23....
void shrink_to_fit()
Optimize memory usage and fragmentation.
vector_iterator operator--(int)
Post decrement.
bool operator!=(const cache_aligned_allocator< T > &, const cache_aligned_allocator< U > &)
iterator grow_by(I first, I last)
void handle_unconstructed_elements(T *array, size_t n_of_elements)
Exception helper function.
tbb::internal::allocator_rebind< A, T >::type allocator_type
size_type grainsize() const
The grain size for this range.
Definition: blocked_range.h:82
void internal_assign_n(size_type n, const_pointer p)
assign n items by copying t
static void *const vector_allocation_error_flag
Bad allocation marker.
const_reference back() const
the last item const
size_type __TBB_EXPORTED_METHOD internal_grow_by(size_type delta, size_type element_size, internal_array_op2 init, const void *src)
void internal_assign_range(I first, I last, is_integer_tag< false > *)
inline proxy assign by iterators
__TBB_EXPORTED_METHOD ~concurrent_vector_base_v3()
friend ptrdiff_t operator-(const vector_iterator< C, T > &i, const vector_iterator< C, U > &j)
Exception-aware helper class for filling a segment by exception-danger operators of user class.
void internal_grow(size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src)
#define __TBB_NOEXCEPT(expression)
Definition: tbb_stddef.h:110
bool operator<(const concurrent_vector< T, A1 > &a, const concurrent_vector< T, A2 > &b)
static void * internal_allocator(internal::concurrent_vector_base_v3 &vb, size_t k)
Allocate k items.
static const T * as_const_pointer(const void *ptr)
friend bool operator!=(segment_value_t const &lhs, argument_type arg)
const_iterator begin() const
Beginning of range.
Definition: blocked_range.h:69
size_type size() const
Return size of vector. It may include elements under construction.
void internal_free_segments(segment_t table[], segment_index_t k, segment_index_t first_block)
Free k segments from table.
const internal::concurrent_vector_base_v3 & internal_vector_base() const
iterator emplace_back(Args &&... args)
Push item, create item "in place" with provided arguments.
concurrent_vector(const concurrent_vector &vector, const allocator_type &a=allocator_type())
Copying constructor.
size_type __TBB_EXPORTED_METHOD internal_capacity() const
void __TBB_EXPORTED_METHOD internal_copy(const concurrent_vector_base_v3 &src, size_type element_size, internal_array_op2 copy)
segment_t my_storage[pointers_per_short_table]
embedded storage of segment pointers
generic_range_type(const generic_range_type< U > &r)
void const char const char int ITT_FORMAT __itt_group_sync p
allocator_type get_allocator() const
return allocator object
bool is_power_of_two_at_least(argument_integer_type arg, power2_integer_type power2)
A function to determine if arg is a power of 2 at least as big as another power of 2.
Definition: tbb_stddef.h:392
#define __TBB_EXPORTED_FUNC
push_back_helper(concurrent_vector &vector)
Meets requirements of a forward iterator for STL and a Value for a blocked_range.*/.
Value * my_item
Caches my_vector->internal_subscript(my_index)
Container * my_vector
concurrent_vector over which we are iterating.
bool operator>(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
vector_iterator & operator--()
Pre decrement.
iterator push_back(const_reference item)
Push item.
std::reverse_iterator< const_iterator > const_reverse_iterator
iterator push_back(T &&item)
Push item, move-aware.
generic_range_type(I begin_, I end_, size_t grainsize_=1)
void clear()
Clear container while keeping memory allocated.
iterator grow_by(std::initializer_list< T > init_list)
const_reference at(size_type index) const
Get const reference to element at given index. Throws exceptions on errors.
static void __TBB_EXPORTED_FUNC destroy_array(void *begin, size_type n)
Destroy n instances of T, starting at "begin".
void swap(concurrent_vector &vector)
swap two instances
Dummy type that distinguishes splitting constructor from copy constructor.
Definition: tbb_stddef.h:416
std::random_access_iterator_tag iterator_category
bool operator==(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
__TBB_DEPRECATED concurrent_vector & operator=(const concurrent_vector< T, M > &vector)
Assignment for vector with different allocator type.
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:330
ptrdiff_t operator-(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
const_reverse_iterator crbegin() const
reverse start const iterator
friend bool operator==(segment_value_t const &lhs, segment_not_used)
internal_loop_guide(size_type ntrials, void *ptr)
concurrent_vector & operator=(const concurrent_vector &vector)
Assignment.
vector_iterator operator+(ptrdiff_t offset) const
auto first(Container &c) -> decltype(begin(c))
void internal_assign_iterators(I first, I last)
assign by iterators
concurrent_vector(concurrent_vector &&source, const allocator_type &a)
internal::vector_iterator< concurrent_vector, const T > const_iterator
void reserve(size_type n)
Allocate enough space to grow to size n without having to allocate more memory later.
static void __TBB_EXPORTED_FUNC move_assign_array(void *dst, const void *src, size_type n)
Move-assign (using operator=) n instances of T, starting at "dst" by assigning according element of s...
~concurrent_vector()
Clear and destroy vector.
void assign(I first, I last)
assign range [first, last)
void swap(atomic< T > &lhs, atomic< T > &rhs)
Definition: atomic.h:564
internal::allocator_base< T, A >::allocator_type allocator_type
generic_range_type(generic_range_type &r, split)
allocator_base(const allocator_type &a=allocator_type())
iterator grow_to_at_least(size_type n)
Append minimal sequence of elements such that size()>=n.
True/false function override helper.
void store(void *allocated_segment_pointer) __TBB_NOEXCEPT(true)
void const char const char int ITT_FORMAT __itt_group_sync s
vector_iterator operator++(int)
Post increment.
Class for determining type of std::allocator<T>::value_type.
Definition: tbb_stddef.h:471
intptr_t __TBB_Log2(uintptr_t x)
Definition: tbb_machine.h:860
reverse_iterator rbegin()
reverse start iterator
void resize(size_type n, const_reference t)
Resize the vector, copy t for new elements. Not thread-safe.
internal::vector_iterator< concurrent_vector, T > iterator
#define __TBB_CATCH(e)
Definition: tbb_stddef.h:284
friend vector_iterator< C, T > operator+(ptrdiff_t offset, const vector_iterator< C, T > &v)
A range over which to iterate.
Definition: blocked_range.h:45
Number of slots for segment pointers inside the class.
#define __TBB_EXPORTED_METHOD
Definition: tbb_stddef.h:98
void __TBB_EXPORTED_METHOD internal_resize(size_type n, size_type element_size, size_type max_size, const void *src, internal_array_op1 destroy, internal_array_op2 init)
#define __TBB_TRY
Definition: tbb_stddef.h:283
atomic< size_type > my_first_block
count of segments in the first block
No ordering.
Definition: atomic.h:61
concurrent_vector(concurrent_vector &&source)
Move constructor.
void *__TBB_EXPORTED_METHOD internal_push_back(size_type element_size, size_type &index)
void(__TBB_EXPORTED_FUNC * internal_array_op1)(void *begin, size_type n)
An operation on an n-element array starting at begin.
concurrent_vector(size_type n, const_reference t, const allocator_type &a=allocator_type())
Construction with initial size specified by argument n, initialization by copying of t,...
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
concurrent_vector & operator=(concurrent_vector &&other)
Move assignment.
const_reference front() const
the first item const
void __TBB_EXPORTED_METHOD internal_assign(const concurrent_vector_base_v3 &src, size_type element_size, internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy)
const_reverse_iterator rbegin() const
reverse start const iterator
static void __TBB_EXPORTED_FUNC copy_array(void *dst, const void *src, size_type n)
Copy-construct n instances of T by copying single element pointed to by src, starting at "dst".
static void __TBB_EXPORTED_FUNC initialize_array(void *begin, const void *, size_type n)
Construct n instances of T, starting at "begin".
iterator grow_to_at_least(size_type n, const_reference t)
const_iterator end() const
One past last value in range.
Definition: blocked_range.h:72
void *(* vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t)
allocator function pointer
void *__TBB_EXPORTED_METHOD internal_compact(size_type element_size, void *table, internal_array_op1 destroy, internal_array_op2 copy)
reference at(size_type index)
Get reference to element at given index. Throws exceptions on errors.
generic_range_type< const_iterator > const_range_type
Acquire.
Definition: atomic.h:57
friend void swap(segment_t &, segment_t &) __TBB_NOEXCEPT(true)
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
vector_iterator & operator++()
Pre increment.
friend bool operator<(const vector_iterator< C, T > &i, const vector_iterator< C, U > &j)
bool operator<=(const concurrent_vector< T, A1 > &a, const concurrent_vector< T, A2 > &b)
The graph class.
reverse_iterator rend()
reverse end iterator
iterator grow_by(size_type delta, const_reference t)
Grow by "delta" elements using copying constructor.
atomic< segment_t * > my_segment
Pointer to the segments table.
__TBB_DEPRECATED concurrent_vector(const concurrent_vector< T, M > &vector, const allocator_type &a=allocator_type())
Copying constructor for vector with different allocator type.
void __TBB_EXPORTED_METHOD internal_throw_exception(size_type) const
Obsolete.
bool operator!=(const vector_iterator< Container, T > &i, const vector_iterator< Container, U > &j)
static segment_index_t segment_base_index_of(segment_index_t &index)
vector_iterator(const Container &vector, size_t index, void *ptr=0)
static void __TBB_EXPORTED_FUNC initialize_array_by(void *begin, const void *src, size_type n)
Copy-construct n instances of T, starting at "begin".
vector_iterator(const vector_iterator< Container, typename Container::value_type > &other)
reference operator[](size_type index)
Get reference to element at given index.
reference front()
the first item
bool operator==(const cache_aligned_allocator< T > &, const cache_aligned_allocator< U > &)
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:319
vector_iterator operator-(ptrdiff_t offset) const

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.