Home ⌂Doc Index ◂Up ▴
Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
tbb::internal::concurrent_vector_base_v3::helper Class Reference
Inheritance diagram for tbb::internal::concurrent_vector_base_v3::helper:
Collaboration diagram for tbb::internal::concurrent_vector_base_v3::helper:

Classes

struct  destroy_body
 
struct  init_body
 TODO: turn into lambda functions when available. More...
 
struct  safe_init_body
 
struct  segment_not_used_predicate
 

Public Member Functions

 helper (segment_t *segments, size_type fb, size_type esize, size_type index, size_type s, size_type f) throw ()
 
void first_segment () throw ()
 
void next_segment () throw ()
 
template<typename F >
size_type apply (const F &func)
 
segment_value_t get_segment_value (size_type index, bool wait)
 
 ~helper ()
 
void cleanup ()
 Out of line code to assists destructor in infrequent cases. More...
 

Static Public Member Functions

static bool incompact_predicate (size_type size)
 
static size_type find_segment_end (const concurrent_vector_base_v3 &v)
 
static void assign_first_segment_if_necessary (concurrent_vector_base_v3 &v, segment_index_t k)
 assign first segment size. k - is index of last segment to be allocated, not a count of segments More...
 
static voidallocate_segment (concurrent_vector_base_v3 &v, size_type n)
 
template<typename argument_type >
static void publish_segment (segment_t &s, argument_type rhs)
 Publish segment so other threads can see it. More...
 
static size_type enable_segment (concurrent_vector_base_v3 &v, size_type k, size_type element_size, bool mark_as_not_used_on_failure=false)
 
static void extend_table_if_necessary (concurrent_vector_base_v3 &v, size_type k, size_type start)
 
static void extend_segment_table (concurrent_vector_base_v3 &v, size_type start)
 
static segment_tacquire_segment (concurrent_vector_base_v3 &v, size_type index, size_type element_size, bool owner)
 

Public Attributes

segment_ttable
 
size_type first_block
 
size_type k
 
size_type sz
 
size_type start
 
size_type finish
 
size_type element_size
 

Static Public Attributes

static const size_type page_size = 4096
 memory page size More...
 

Additional Inherited Members

- Private Member Functions inherited from tbb::internal::no_assign
void operator= (const no_assign &)=delete
 
 no_assign (const no_assign &)=default
 
 no_assign ()=default
 

Detailed Description

Definition at line 40 of file concurrent_vector.cpp.

Constructor & Destructor Documentation

◆ helper()

tbb::internal::concurrent_vector_base_v3::helper::helper ( segment_t segments,
size_type  fb,
size_type  esize,
size_type  index,
size_type  s,
size_type  f 
)
throw (
)
inline

◆ ~helper()

tbb::internal::concurrent_vector_base_v3::helper::~helper ( )
inline

Definition at line 160 of file concurrent_vector.cpp.

160  {
161  if( sz >= finish ) return; // the work is done correctly
162  cleanup();
163  }
void cleanup()
Out of line code to assists destructor in infrequent cases.

References cleanup(), finish, and sz.

Here is the call graph for this function:

Member Function Documentation

◆ acquire_segment()

static segment_t& tbb::internal::concurrent_vector_base_v3::helper::acquire_segment ( concurrent_vector_base_v3 v,
size_type  index,
size_type  element_size,
bool  owner 
)
inlinestatic

Definition at line 104 of file concurrent_vector.cpp.

104  {
105  segment_t &s = v.my_segment[index]; // TODO: pass v.my_segment as argument
106  if( s.load<acquire>() == segment_not_used() ) { // do not check for segment_allocation_failed state
107  if( owner ) {
108  enable_segment( v, index, element_size );
109  } else {
110  ITT_NOTIFY(sync_prepare, &s);
111  spin_wait_while(segment_not_used_predicate(s));
112  ITT_NOTIFY(sync_acquired, &s);
113  }
114  } else {
115  ITT_NOTIFY(sync_acquired, &s);
116  }
117  enforce_segment_allocated(s.load<relaxed>()); //it's hard to recover correctly after segment_allocation_failed state
118  return s;
119  }
static size_type enable_segment(concurrent_vector_base_v3 &v, size_type k, size_type element_size, bool mark_as_not_used_on_failure=false)
friend void enforce_segment_allocated(segment_value_t const &s, internal::exception_id exception=eid_bad_last_alloc)
void spin_wait_while(predicate_type condition)
Definition: tbb_machine.h:405
void const char const char int ITT_FORMAT __itt_group_sync s
No ordering.
Definition: atomic.h:61
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:112
Acquire.
Definition: atomic.h:57

References tbb::acquire, element_size, enable_segment(), tbb::internal::concurrent_vector_base_v3::enforce_segment_allocated, ITT_NOTIFY, tbb::internal::concurrent_vector_base_v3::my_segment, tbb::relaxed, s, and tbb::internal::spin_wait_while().

Referenced by tbb::internal::concurrent_vector_base_v3::internal_grow(), and tbb::internal::concurrent_vector_base_v3::internal_push_back().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ allocate_segment()

static void* tbb::internal::concurrent_vector_base_v3::helper::allocate_segment ( concurrent_vector_base_v3 v,
size_type  n 
)
inlinestatic

Definition at line 75 of file concurrent_vector.cpp.

75  {
76  void *ptr = v.vector_allocator_ptr(v, n);
77  if(!ptr) throw_exception(eid_bad_alloc); // check for bad allocation, throw exception
78  return ptr;
79  }
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()

References tbb::internal::eid_bad_alloc, tbb::internal::throw_exception(), and tbb::internal::concurrent_vector_base_v3::vector_allocator_ptr.

Referenced by tbb::internal::concurrent_vector_base_v3::internal_compact().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ apply()

template<typename F >
size_type tbb::internal::concurrent_vector_base_v3::helper::apply ( const F &  func)
inline

Definition at line 141 of file concurrent_vector.cpp.

141  {
142  first_segment();
143  while( sz < finish ) { // work for more than one segment
144  //TODO: remove extra load() of table[k] inside func
145  func( table[k], table[k].load<relaxed>().pointer<char>() + element_size*start, sz - start );
146  next_segment();
147  }
148  func( table[k], table[k].load<relaxed>().pointer<char>() + element_size*start, finish - start );
149  return k;
150  }

References element_size, finish, first_segment(), k, next_segment(), start, sz, and table.

Referenced by tbb::internal::concurrent_vector_base_v3::internal_clear(), tbb::internal::concurrent_vector_base_v3::internal_compact(), tbb::internal::concurrent_vector_base_v3::internal_grow(), and tbb::internal::concurrent_vector_base_v3::internal_resize().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ assign_first_segment_if_necessary()

static void tbb::internal::concurrent_vector_base_v3::helper::assign_first_segment_if_necessary ( concurrent_vector_base_v3 v,
segment_index_t  k 
)
inlinestatic

assign first segment size. k - is index of last segment to be allocated, not a count of segments

Definition at line 60 of file concurrent_vector.cpp.

60  {
61  if( !v.my_first_block ) {
62  /* There was a suggestion to set first segment according to incompact_predicate:
63  while( k && !helper::incompact_predicate(segment_size( k ) * element_size) )
64  --k; // while previous vector size is compact, decrement
65  // reasons to not do it:
66  // * constructor(n) is not ready to accept fragmented segments
67  // * backward compatibility due to that constructor
68  // * current version gives additional guarantee and faster init.
69  // * two calls to reserve() will give the same effect.
70  */
71  v.my_first_block.compare_and_swap(k+1, 0); // store number of segments
72  }
73  }

References k, and tbb::internal::concurrent_vector_base_v3::my_first_block.

Referenced by tbb::internal::concurrent_vector_base_v3::internal_assign(), tbb::internal::concurrent_vector_base_v3::internal_copy(), tbb::internal::concurrent_vector_base_v3::internal_grow(), and tbb::internal::concurrent_vector_base_v3::internal_reserve().

Here is the caller graph for this function:

◆ cleanup()

void tbb::internal::concurrent_vector_base_v3::helper::cleanup ( )

Out of line code to assists destructor in infrequent cases.

Definition at line 281 of file concurrent_vector.cpp.

281  {
282  if( !sz ) { // allocation failed, restore the table
283  segment_index_t k_start = k, k_end = segment_index_of(finish-1);
284  if( segment_base( k_start ) < start )
285  get_segment_value(k_start++, true); // wait
286  if( k_start < first_block ) {
287  segment_value_t segment0 = get_segment_value(0, start>0); // wait if necessary
288  if((segment0 != segment_not_used()) && !k_start ) ++k_start;
289  if(segment0 != segment_allocated())
290  for(; k_start < first_block && k_start <= k_end; ++k_start )
291  publish_segment(table[k_start], segment_allocation_failed());
292  else for(; k_start < first_block && k_start <= k_end; ++k_start )
293  publish_segment(table[k_start], static_cast<void*>(
294  (segment0.pointer<char>()) + segment_base(k_start)*element_size) );
295  }
296  for(; k_start <= k_end; ++k_start ) // not in first block
297  if(table[k_start].load<acquire>() == segment_not_used())
298  publish_segment(table[k_start], segment_allocation_failed());
299  // fill allocated items
300  first_segment();
301  goto recover;
302  }
303  while( sz <= finish ) { // there is still work for at least one segment
304  next_segment();
305 recover:
306  segment_value_t array = table[k].load<relaxed>();
307  if(array == segment_allocated())
308  std::memset( (array.pointer<char>()) + element_size*start, 0, ((sz<finish?sz:finish) - start)*element_size );
309  else __TBB_ASSERT( array == segment_allocation_failed(), NULL );
310  }
311 }
static segment_index_t segment_index_of(size_type index)
static segment_index_t segment_base(segment_index_t k)
static void publish_segment(segment_t &s, argument_type rhs)
Publish segment so other threads can see it.
segment_value_t get_segment_value(size_type index, bool wait)
No ordering.
Definition: atomic.h:61
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165

References __TBB_ASSERT, tbb::internal::concurrent_vector_base_v3::segment_t::load(), tbb::internal::concurrent_vector_base_v3::segment_value_t::pointer(), tbb::relaxed, tbb::internal::concurrent_vector_base_v3::segment_base(), and tbb::internal::concurrent_vector_base_v3::segment_index_of().

Referenced by ~helper().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ enable_segment()

concurrent_vector_base_v3::size_type tbb::internal::concurrent_vector_base_v3::helper::enable_segment ( concurrent_vector_base_v3 v,
concurrent_vector_base_v3::size_type  k,
concurrent_vector_base_v3::size_type  element_size,
bool  mark_as_not_used_on_failure = false 
)
static

Definition at line 222 of file concurrent_vector.cpp.

223  {
224 
225  struct segment_scope_guard : no_copy{
226  segment_t* my_segment_ptr;
227  bool my_mark_as_not_used;
228  segment_scope_guard(segment_t& segment, bool mark_as_not_used) : my_segment_ptr(&segment), my_mark_as_not_used(mark_as_not_used){}
229  void dismiss(){ my_segment_ptr = 0;}
230  ~segment_scope_guard(){
231  if (my_segment_ptr){
232  if (!my_mark_as_not_used){
233  publish_segment(*my_segment_ptr, segment_allocation_failed());
234  }else{
235  publish_segment(*my_segment_ptr, segment_not_used());
236  }
237  }
238  }
239  };
240 
241  segment_t* s = v.my_segment; // TODO: optimize out as argument? Optimize accesses to my_first_block
242  __TBB_ASSERT(s[k].load<relaxed>() != segment_allocated(), "concurrent operation during growth?");
243 
244  size_type size_of_enabled_segment = segment_size(k);
245  size_type size_to_allocate = size_of_enabled_segment;
246  if( !k ) {
248  size_of_enabled_segment = 2 ;
249  size_to_allocate = segment_size(v.my_first_block);
250 
251  } else {
252  spin_wait_while_eq( v.my_first_block, segment_index_t(0) );
253  }
254 
255  if( k && (k < v.my_first_block)){ //no need to allocate anything
256  // s[0].array is changed only once ( 0 -> !0 ) and points to uninitialized memory
257  segment_value_t array0 = s[0].load<acquire>();
258  if(array0 == segment_not_used()){
259  // sync_prepare called only if there is a wait
260  ITT_NOTIFY(sync_prepare, &s[0]);
261  spin_wait_while( segment_not_used_predicate(s[0]));
262  array0 = s[0].load<acquire>();
263  }
264  ITT_NOTIFY(sync_acquired, &s[0]);
265 
266  segment_scope_guard k_segment_guard(s[k], false);
267  enforce_segment_allocated(array0); // initial segment should be allocated
268  k_segment_guard.dismiss();
269 
270  publish_segment( s[k],
271  static_cast<void*>(array0.pointer<char>() + segment_base(k)*element_size )
272  );
273  } else {
274  segment_scope_guard k_segment_guard(s[k], mark_as_not_used_on_failure);
275  publish_segment(s[k], allocate_segment(v, size_to_allocate));
276  k_segment_guard.dismiss();
277  }
278  return size_of_enabled_segment;
279 }
static size_type segment_size(segment_index_t k)
friend void enforce_segment_allocated(segment_value_t const &s, internal::exception_id exception=eid_bad_last_alloc)
static segment_index_t segment_base(segment_index_t k)
void spin_wait_while_eq(const volatile T &location, U value)
Spin WHILE the value of the variable is equal to a given value.
Definition: tbb_machine.h:391
void spin_wait_while(predicate_type condition)
Definition: tbb_machine.h:405
static void publish_segment(segment_t &s, argument_type rhs)
Publish segment so other threads can see it.
static void assign_first_segment_if_necessary(concurrent_vector_base_v3 &v, segment_index_t k)
assign first segment size. k - is index of last segment to be allocated, not a count of segments
static void * allocate_segment(concurrent_vector_base_v3 &v, size_type n)
void const char const char int ITT_FORMAT __itt_group_sync s
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:112
Acquire.
Definition: atomic.h:57

References __TBB_ASSERT, tbb::acquire, tbb::internal::concurrent_vector_base_v3::default_initial_segments, tbb::internal::concurrent_vector_base_v3::enforce_segment_allocated, ITT_NOTIFY, tbb::internal::concurrent_vector_base_v3::my_first_block, tbb::internal::concurrent_vector_base_v3::my_segment, tbb::internal::concurrent_vector_base_v3::segment_value_t::pointer(), s, tbb::internal::concurrent_vector_base_v3::segment_base(), tbb::internal::concurrent_vector_base_v3::segment_size(), tbb::internal::spin_wait_while(), and tbb::internal::spin_wait_while_eq().

Referenced by acquire_segment(), tbb::internal::concurrent_vector_base_v3::internal_assign(), tbb::internal::concurrent_vector_base_v3::internal_copy(), and tbb::internal::concurrent_vector_base_v3::internal_reserve().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ extend_segment_table()

void tbb::internal::concurrent_vector_base_v3::helper::extend_segment_table ( concurrent_vector_base_v3 v,
concurrent_vector_base_v3::size_type  start 
)
static

Definition at line 196 of file concurrent_vector.cpp.

196  {
198  // If other threads are trying to set pointers in the short segment, wait for them to finish their
199  // assignments before we copy the short segment to the long segment. Note: grow_to_at_least depends on it
200  for( segment_index_t i = 0; segment_base(i) < start && v.my_segment == v.my_storage; i++ ){
201  if(v.my_storage[i].load<relaxed>() == segment_not_used()) {
202  ITT_NOTIFY(sync_prepare, &v.my_storage[i]);
203  atomic_backoff backoff(true);
204  while( v.my_segment == v.my_storage && (v.my_storage[i].load<relaxed>() == segment_not_used()) )
205  backoff.pause();
206  ITT_NOTIFY(sync_acquired, &v.my_storage[i]);
207  }
208  }
209  if( v.my_segment != v.my_storage ) return;
210 
211  segment_t* new_segment_table = (segment_t*)NFS_Allocate( pointers_per_long_table, sizeof(segment_t), NULL );
212  __TBB_ASSERT(new_segment_table, "NFS_Allocate should throws exception if it cannot allocate the requested storage, and not returns zero pointer" );
213  std::uninitialized_fill_n(new_segment_table,size_t(pointers_per_long_table),segment_t()); //init newly allocated table
214  //TODO: replace with static assert
215  __TBB_STATIC_ASSERT(pointers_per_long_table >= pointers_per_short_table, "size of the big table should be not lesser than of the small one, as we copy values to it" );
216  std::copy(v.my_storage, v.my_storage+pointers_per_short_table, new_segment_table);//copy values from old table, here operator= of segment_t is used
217  if( v.my_segment.compare_and_swap( new_segment_table, v.my_storage ) != v.my_storage )
218  NFS_Free( new_segment_table );
219  // else TODO: add ITT_NOTIFY signals for v.my_segment?
220 }
static size_type segment_size(segment_index_t k)
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
static segment_index_t segment_base(segment_index_t k)
#define __TBB_STATIC_ASSERT(condition, msg)
Definition: tbb_stddef.h:553
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
Number of slots for segment pointers inside the class.
No ordering.
Definition: atomic.h:61
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:112

References __TBB_ASSERT, __TBB_STATIC_ASSERT, ITT_NOTIFY, tbb::internal::concurrent_vector_base_v3::segment_t::load(), tbb::internal::concurrent_vector_base_v3::my_segment, tbb::internal::concurrent_vector_base_v3::my_storage, tbb::internal::NFS_Allocate(), tbb::internal::NFS_Free(), tbb::internal::atomic_backoff::pause(), tbb::internal::concurrent_vector_base_v3::pointers_per_long_table, tbb::internal::concurrent_vector_base_v3::pointers_per_short_table, tbb::relaxed, tbb::internal::concurrent_vector_base_v3::segment_base(), tbb::internal::concurrent_vector_base_v3::segment_size(), and start.

Referenced by extend_table_if_necessary().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ extend_table_if_necessary()

static void tbb::internal::concurrent_vector_base_v3::helper::extend_table_if_necessary ( concurrent_vector_base_v3 v,
size_type  k,
size_type  start 
)
inlinestatic

Definition at line 92 of file concurrent_vector.cpp.

92  {
93  if(k >= pointers_per_short_table && v.my_segment == v.my_storage)
95  }
static void extend_segment_table(concurrent_vector_base_v3 &v, size_type start)
Number of slots for segment pointers inside the class.

References extend_segment_table(), k, tbb::internal::concurrent_vector_base_v3::my_segment, tbb::internal::concurrent_vector_base_v3::my_storage, tbb::internal::concurrent_vector_base_v3::pointers_per_short_table, and start.

Referenced by tbb::internal::concurrent_vector_base_v3::internal_assign(), tbb::internal::concurrent_vector_base_v3::internal_copy(), tbb::internal::concurrent_vector_base_v3::internal_grow(), tbb::internal::concurrent_vector_base_v3::internal_push_back(), and tbb::internal::concurrent_vector_base_v3::internal_reserve().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ find_segment_end()

static size_type tbb::internal::concurrent_vector_base_v3::helper::find_segment_end ( const concurrent_vector_base_v3 v)
inlinestatic

Definition at line 49 of file concurrent_vector.cpp.

49  {
50  segment_t *s = v.my_segment;
52  segment_index_t k = 0;
53  while( k < u && (s[k].load<relaxed>()==segment_allocated() ))
54  ++k;
55  return k;
56  }
void const char const char int ITT_FORMAT __itt_group_sync s
Number of slots for segment pointers inside the class.

References k, tbb::internal::concurrent_vector_base_v3::my_segment, tbb::internal::concurrent_vector_base_v3::my_storage, tbb::internal::concurrent_vector_base_v3::pointers_per_long_table, tbb::internal::concurrent_vector_base_v3::pointers_per_short_table, and s.

Referenced by tbb::internal::concurrent_vector_base_v3::internal_capacity(), tbb::internal::concurrent_vector_base_v3::internal_clear(), tbb::internal::concurrent_vector_base_v3::internal_compact(), and tbb::internal::concurrent_vector_base_v3::internal_reserve().

Here is the caller graph for this function:

◆ first_segment()

void tbb::internal::concurrent_vector_base_v3::helper::first_segment ( )
throw (
)
inline

Definition at line 126 of file concurrent_vector.cpp.

126  {
127  __TBB_ASSERT( start <= finish, NULL );
128  __TBB_ASSERT( first_block || !finish, NULL );
129  if( k < first_block ) k = 0; // process solid segment at a time
130  size_type base = segment_base( k );
131  __TBB_ASSERT( base <= start, NULL );
132  finish -= base; start -= base; // rebase as offsets from segment k
133  sz = k ? base : segment_size( first_block ); // sz==base for k>0
134  }
static size_type segment_size(segment_index_t k)
static segment_index_t segment_base(segment_index_t k)
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165

References __TBB_ASSERT, finish, first_block, k, tbb::internal::concurrent_vector_base_v3::segment_base(), tbb::internal::concurrent_vector_base_v3::segment_size(), start, and sz.

Referenced by apply().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ get_segment_value()

segment_value_t tbb::internal::concurrent_vector_base_v3::helper::get_segment_value ( size_type  index,
bool  wait 
)
inline

Definition at line 151 of file concurrent_vector.cpp.

151  {
152  segment_t &s = table[index];
153  if( wait && (s.load<acquire>() == segment_not_used()) ) {
154  ITT_NOTIFY(sync_prepare, &s);
155  spin_wait_while(segment_not_used_predicate(s));
156  ITT_NOTIFY(sync_acquired, &s);
157  }
158  return s.load<relaxed>();
159  }
void spin_wait_while(predicate_type condition)
Definition: tbb_machine.h:405
void const char const char int ITT_FORMAT __itt_group_sync s
No ordering.
Definition: atomic.h:61
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:112
Acquire.
Definition: atomic.h:57

References tbb::acquire, ITT_NOTIFY, tbb::relaxed, s, tbb::internal::spin_wait_while(), and table.

Here is the call graph for this function:

◆ incompact_predicate()

static bool tbb::internal::concurrent_vector_base_v3::helper::incompact_predicate ( size_type  size)
inlinestatic

Definition at line 45 of file concurrent_vector.cpp.

45  { // assert size != 0, see source/test/test_vector_layout.cpp
46  return size < page_size || ((size-1)%page_size < page_size/2 && size < page_size * 128); // for more details
47  }
static const size_type page_size
memory page size
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size

References page_size, and size.

Referenced by tbb::internal::concurrent_vector_base_v3::internal_compact().

Here is the caller graph for this function:

◆ next_segment()

void tbb::internal::concurrent_vector_base_v3::helper::next_segment ( )
throw (
)
inline

Definition at line 135 of file concurrent_vector.cpp.

References finish, first_block, k, tbb::internal::concurrent_vector_base_v3::segment_size(), start, and sz.

Referenced by apply().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ publish_segment()

template<typename argument_type >
static void tbb::internal::concurrent_vector_base_v3::helper::publish_segment ( segment_t s,
argument_type  rhs 
)
inlinestatic

Publish segment so other threads can see it.

Definition at line 83 of file concurrent_vector.cpp.

83  {
84  // see also itt_store_pointer_with_release_v3()
86  s.store<release>(rhs);
87  }
Release.
Definition: atomic.h:59
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
void const char const char int ITT_FORMAT __itt_group_sync s
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:112

References ITT_NOTIFY, tbb::release, s, and sync_releasing.

Member Data Documentation

◆ element_size

size_type tbb::internal::concurrent_vector_base_v3::helper::element_size

Definition at line 123 of file concurrent_vector.cpp.

Referenced by acquire_segment(), and apply().

◆ finish

size_type tbb::internal::concurrent_vector_base_v3::helper::finish

Definition at line 123 of file concurrent_vector.cpp.

Referenced by apply(), first_segment(), next_segment(), and ~helper().

◆ first_block

size_type tbb::internal::concurrent_vector_base_v3::helper::first_block

◆ k

size_type tbb::internal::concurrent_vector_base_v3::helper::k

◆ page_size

const size_type tbb::internal::concurrent_vector_base_v3::helper::page_size = 4096
static

memory page size

Definition at line 43 of file concurrent_vector.cpp.

Referenced by incompact_predicate().

◆ start

size_type tbb::internal::concurrent_vector_base_v3::helper::start

◆ sz

size_type tbb::internal::concurrent_vector_base_v3::helper::sz

Definition at line 123 of file concurrent_vector.cpp.

Referenced by apply(), first_segment(), next_segment(), and ~helper().

◆ table

segment_t* tbb::internal::concurrent_vector_base_v3::helper::table

Definition at line 122 of file concurrent_vector.cpp.

Referenced by apply(), and get_segment_value().


The documentation for this class was generated from the following file:

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.