Home ⌂Doc Index ◂Up ▴
Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
tbb::internal::concurrent_queue_base_v3 Class Referenceabstract

For internal use only. More...

#include <_concurrent_queue_impl.h>

Inheritance diagram for tbb::internal::concurrent_queue_base_v3:
Collaboration diagram for tbb::internal::concurrent_queue_base_v3:

Classes

struct  padded_page
 
struct  page
 Prefix on a page. More...
 

Protected Types

enum  copy_specifics { copy, move }
 

Protected Member Functions

__TBB_EXPORTED_METHOD concurrent_queue_base_v3 (size_t item_size)
 
virtual __TBB_EXPORTED_METHOD ~concurrent_queue_base_v3 ()
 
void __TBB_EXPORTED_METHOD internal_push (const void *src)
 Enqueue item at tail of queue using copy operation. More...
 
void __TBB_EXPORTED_METHOD internal_pop (void *dst)
 Dequeue item from head of queue. More...
 
void __TBB_EXPORTED_METHOD internal_abort ()
 Abort all pending queue operations. More...
 
bool __TBB_EXPORTED_METHOD internal_push_if_not_full (const void *src)
 Attempt to enqueue item onto queue using copy operation. More...
 
bool __TBB_EXPORTED_METHOD internal_pop_if_present (void *dst)
 Attempt to dequeue item from queue. More...
 
ptrdiff_t __TBB_EXPORTED_METHOD internal_size () const
 Get size of queue. More...
 
bool __TBB_EXPORTED_METHOD internal_empty () const
 Check if the queue is empty. More...
 
void __TBB_EXPORTED_METHOD internal_set_capacity (ptrdiff_t capacity, size_t element_size)
 Set the queue capacity. More...
 
virtual pageallocate_page ()=0
 custom allocator More...
 
virtual void deallocate_page (page *p)=0
 custom de-allocator More...
 
void __TBB_EXPORTED_METHOD internal_finish_clear ()
 free any remaining pages More...
 
void __TBB_EXPORTED_METHOD internal_throw_exception () const
 throw an exception More...
 
void __TBB_EXPORTED_METHOD assign (const concurrent_queue_base_v3 &src)
 copy internal representation More...
 
void internal_swap (concurrent_queue_base_v3 &src)
 swap queues More...
 
void internal_insert_item (const void *src, copy_specifics op_type)
 Enqueues item at tail of queue using specified operation (copy or move) More...
 
bool internal_insert_if_not_full (const void *src, copy_specifics op_type)
 Attempts to enqueue at tail of queue using specified operation (copy or move) More...
 
void internal_assign (const concurrent_queue_base_v3 &src, copy_specifics op_type)
 Assigns one queue to another using specified operation (copy or move) More...
 

Protected Attributes

ptrdiff_t my_capacity
 Capacity of the queue. More...
 
size_t items_per_page
 Always a power of 2. More...
 
size_t item_size
 Size of an item. More...
 

Private Member Functions

virtual void copy_item (page &dst, size_t index, const void *src)=0
 
virtual void assign_and_destroy_item (void *dst, page &src, size_t index)=0
 
virtual void copy_page_item (page &dst, size_t dindex, const page &src, size_t sindex)=0
 
- Private Member Functions inherited from tbb::internal::no_copy
 no_copy (const no_copy &)=delete
 
 no_copy ()=default
 

Private Attributes

concurrent_queue_repmy_rep
 Internal representation. More...
 

Friends

class concurrent_queue_rep
 
struct micro_queue
 
class micro_queue_pop_finalizer
 
class concurrent_queue_iterator_rep
 
class concurrent_queue_iterator_base_v3
 

Detailed Description

For internal use only.

Type-independent portion of concurrent_queue.

Definition at line 826 of file _concurrent_queue_impl.h.

Member Enumeration Documentation

◆ copy_specifics

Constructor & Destructor Documentation

◆ concurrent_queue_base_v3()

tbb::internal::concurrent_queue_base_v3::concurrent_queue_base_v3 ( size_t  item_size)
protected

Definition at line 337 of file concurrent_queue.cpp.

337  {
338  items_per_page = item_sz<= 8 ? 32 :
339  item_sz<= 16 ? 16 :
340  item_sz<= 32 ? 8 :
341  item_sz<= 64 ? 4 :
342  item_sz<=128 ? 2 :
343  1;
344  my_capacity = size_t(-1)/(item_sz>1 ? item_sz : 2);
345  my_rep = cache_aligned_allocator<concurrent_queue_rep>().allocate(1);
346  __TBB_ASSERT( is_aligned(my_rep, NFS_GetLineSize()), "alignment error" );
347  __TBB_ASSERT( is_aligned(&my_rep->head_counter, NFS_GetLineSize()), "alignment error" );
348  __TBB_ASSERT( is_aligned(&my_rep->tail_counter, NFS_GetLineSize()), "alignment error" );
349  __TBB_ASSERT( is_aligned(&my_rep->array, NFS_GetLineSize()), "alignment error" );
350  std::memset(static_cast<void*>(my_rep),0,sizeof(concurrent_queue_rep));
351  new ( &my_rep->items_avail ) concurrent_monitor();
352  new ( &my_rep->slots_avail ) concurrent_monitor();
353  this->item_size = item_sz;
354 }
bool is_aligned(T *pointer, uintptr_t alignment)
A function to check if passed in pointer is aligned on a specific border.
Definition: tbb_stddef.h:370
ptrdiff_t my_capacity
Capacity of the queue.
concurrent_queue_rep * my_rep
Internal representation.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
size_t __TBB_EXPORTED_FUNC NFS_GetLineSize()
Cache/sector line size.

References __TBB_ASSERT, tbb::cache_aligned_allocator< T >::allocate(), tbb::internal::concurrent_queue_rep::array, tbb::internal::concurrent_queue_rep::head_counter, tbb::internal::is_aligned(), item_size, tbb::internal::concurrent_queue_rep::items_avail, items_per_page, my_capacity, my_rep, tbb::internal::NFS_GetLineSize(), tbb::internal::concurrent_queue_rep::slots_avail, and tbb::internal::concurrent_queue_rep::tail_counter.

Here is the call graph for this function:

◆ ~concurrent_queue_base_v3()

tbb::internal::concurrent_queue_base_v3::~concurrent_queue_base_v3 ( )
protectedvirtual

Definition at line 356 of file concurrent_queue.cpp.

356  {
357  size_t nq = my_rep->n_queue;
358  for( size_t i=0; i<nq; i++ )
359  __TBB_ASSERT( my_rep->array[i].tail_page==NULL, "pages were not freed properly" );
360  cache_aligned_allocator<concurrent_queue_rep>().deallocate(my_rep,1);
361 }
concurrent_queue_rep * my_rep
Internal representation.
static const size_t n_queue
Must be power of 2.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165

References __TBB_ASSERT, tbb::internal::concurrent_queue_rep::array, tbb::cache_aligned_allocator< T >::deallocate(), my_rep, tbb::internal::concurrent_queue_rep::n_queue, and tbb::internal::micro_queue::tail_page.

Here is the call graph for this function:

Member Function Documentation

◆ allocate_page()

virtual page* tbb::internal::concurrent_queue_base_v3::allocate_page ( )
protectedpure virtual

custom allocator

Implemented in tbb::concurrent_bounded_queue< T, A >.

Referenced by tbb::internal::micro_queue::make_copy(), and tbb::internal::micro_queue::push().

Here is the caller graph for this function:

◆ assign()

void __TBB_EXPORTED_METHOD tbb::internal::concurrent_queue_base_v3::assign ( const concurrent_queue_base_v3 src)
protected

copy internal representation

Referenced by tbb::concurrent_bounded_queue< T, A >::concurrent_bounded_queue().

Here is the caller graph for this function:

◆ assign_and_destroy_item()

virtual void tbb::internal::concurrent_queue_base_v3::assign_and_destroy_item ( void dst,
page src,
size_t  index 
)
privatepure virtual

Implemented in tbb::concurrent_bounded_queue< T, A >.

Referenced by tbb::internal::micro_queue::pop().

Here is the caller graph for this function:

◆ copy_item()

virtual void tbb::internal::concurrent_queue_base_v3::copy_item ( page dst,
size_t  index,
const void src 
)
privatepure virtual

Implemented in tbb::concurrent_bounded_queue< T, A >.

Referenced by tbb::internal::micro_queue::push().

Here is the caller graph for this function:

◆ copy_page_item()

virtual void tbb::internal::concurrent_queue_base_v3::copy_page_item ( page dst,
size_t  dindex,
const page src,
size_t  sindex 
)
privatepure virtual

Implemented in tbb::concurrent_bounded_queue< T, A >.

Referenced by tbb::internal::micro_queue::make_copy().

Here is the caller graph for this function:

◆ deallocate_page()

virtual void tbb::internal::concurrent_queue_base_v3::deallocate_page ( page p)
protectedpure virtual

custom de-allocator

Implemented in tbb::concurrent_bounded_queue< T, A >.

Referenced by internal_finish_clear(), and tbb::internal::micro_queue_pop_finalizer::~micro_queue_pop_finalizer().

Here is the caller graph for this function:

◆ internal_abort()

void tbb::internal::concurrent_queue_base_v3::internal_abort ( )
protected

Abort all pending queue operations.

Definition at line 460 of file concurrent_queue.cpp.

460  {
462  ++r.abort_counter;
463  r.items_avail.abort_all();
464  r.slots_avail.abort_all();
465 }
concurrent_queue_rep * my_rep
Internal representation.

References tbb::internal::concurrent_monitor::abort_all(), tbb::internal::concurrent_queue_rep::abort_counter, tbb::internal::concurrent_queue_rep::items_avail, my_rep, and tbb::internal::concurrent_queue_rep::slots_avail.

Here is the call graph for this function:

◆ internal_assign()

void tbb::internal::concurrent_queue_base_v3::internal_assign ( const concurrent_queue_base_v3 src,
copy_specifics  op_type 
)
protected

Assigns one queue to another using specified operation (copy or move)

Definition at line 551 of file concurrent_queue.cpp.

551  {
552  items_per_page = src.items_per_page;
553  my_capacity = src.my_capacity;
554 
555  // copy concurrent_queue_rep.
556  my_rep->head_counter = src.my_rep->head_counter;
557  my_rep->tail_counter = src.my_rep->tail_counter;
558  my_rep->n_invalid_entries = src.my_rep->n_invalid_entries;
559  my_rep->abort_counter = src.my_rep->abort_counter;
560 
561  // copy micro_queues
562  for( size_t i = 0; i<my_rep->n_queue; ++i )
563  my_rep->array[i].assign( src.my_rep->array[i], *this, op_type );
564 
565  __TBB_ASSERT( my_rep->head_counter==src.my_rep->head_counter && my_rep->tail_counter==src.my_rep->tail_counter,
566  "the source concurrent queue should not be concurrently modified." );
567 }
ptrdiff_t my_capacity
Capacity of the queue.
concurrent_queue_rep * my_rep
Internal representation.
static const size_t n_queue
Must be power of 2.
micro_queue & assign(const micro_queue &src, concurrent_queue_base &base, concurrent_queue_base::copy_specifics op_type)
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165

References __TBB_ASSERT, tbb::internal::concurrent_queue_rep::abort_counter, tbb::internal::concurrent_queue_rep::array, tbb::internal::micro_queue::assign(), tbb::internal::concurrent_queue_rep::head_counter, items_per_page, my_capacity, my_rep, tbb::internal::concurrent_queue_rep::n_invalid_entries, tbb::internal::concurrent_queue_rep::n_queue, and tbb::internal::concurrent_queue_rep::tail_counter.

Referenced by tbb::internal::concurrent_queue_base_v8::move_content().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ internal_empty()

bool tbb::internal::concurrent_queue_base_v3::internal_empty ( ) const
protected

Check if the queue is empty.

Definition at line 524 of file concurrent_queue.cpp.

524  {
525  ticket tc = my_rep->tail_counter;
526  ticket hc = my_rep->head_counter;
527  // if tc!=r.tail_counter, the queue was not empty at some point between the two reads.
528  return ( tc==my_rep->tail_counter && ptrdiff_t(tc-hc-my_rep->n_invalid_entries)<=0 );
529 }
concurrent_queue_rep * my_rep
Internal representation.

References tbb::internal::concurrent_queue_rep::head_counter, my_rep, tbb::internal::concurrent_queue_rep::n_invalid_entries, and tbb::internal::concurrent_queue_rep::tail_counter.

Referenced by tbb::concurrent_bounded_queue< T, A >::empty().

Here is the caller graph for this function:

◆ internal_finish_clear()

void tbb::internal::concurrent_queue_base_v3::internal_finish_clear ( )
protected

free any remaining pages

Definition at line 535 of file concurrent_queue.cpp.

535  {
536  size_t nq = my_rep->n_queue;
537  for( size_t i=0; i<nq; ++i ) {
538  page* tp = my_rep->array[i].tail_page;
539  __TBB_ASSERT( my_rep->array[i].head_page==tp, "at most one page should remain" );
540  if( tp!=NULL) {
541  if( tp!=static_invalid_page ) deallocate_page( tp );
542  my_rep->array[i].tail_page = NULL;
543  }
544  }
545 }
concurrent_queue_rep * my_rep
Internal representation.
static void * static_invalid_page
static const size_t n_queue
Must be power of 2.
virtual void deallocate_page(page *p)=0
custom de-allocator
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165

References __TBB_ASSERT, tbb::internal::concurrent_queue_rep::array, deallocate_page(), tbb::internal::micro_queue::head_page, my_rep, tbb::internal::concurrent_queue_rep::n_queue, tbb::internal::static_invalid_page, and tbb::internal::micro_queue::tail_page.

Here is the call graph for this function:

◆ internal_insert_if_not_full()

bool tbb::internal::concurrent_queue_base_v3::internal_insert_if_not_full ( const void src,
copy_specifics  op_type 
)
protected

Attempts to enqueue at tail of queue using specified operation (copy or move)

Definition at line 499 of file concurrent_queue.cpp.

499  {
501  ticket k = r.tail_counter;
502  for(;;) {
503  if( (ptrdiff_t)(k-r.head_counter)>=my_capacity ) {
504  // Queue is full
505  return false;
506  }
507  // Queue had empty slot with ticket k when we looked. Attempt to claim that slot.
508  ticket tk=k;
509  k = r.tail_counter.compare_and_swap( tk+1, tk );
510  if( k==tk )
511  break;
512  // Another thread claimed the slot, so retry.
513  }
514  r.choose(k).push(src, k, *this, op_type);
515  r.items_avail.notify( predicate_leq(k) );
516  return true;
517 }
ptrdiff_t my_capacity
Capacity of the queue.
concurrent_queue_rep * my_rep
Internal representation.

References tbb::internal::concurrent_queue_rep::choose(), tbb::internal::concurrent_queue_rep::head_counter, tbb::internal::concurrent_queue_rep::items_avail, my_capacity, my_rep, tbb::internal::concurrent_monitor::notify(), tbb::internal::micro_queue::push(), and tbb::internal::concurrent_queue_rep::tail_counter.

Referenced by internal_push_if_not_full(), and tbb::internal::concurrent_queue_base_v8::internal_push_move_if_not_full().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ internal_insert_item()

void tbb::internal::concurrent_queue_base_v3::internal_insert_item ( const void src,
copy_specifics  op_type 
)
protected

Enqueues item at tail of queue using specified operation (copy or move)

Definition at line 371 of file concurrent_queue.cpp.

371  {
373  unsigned old_abort_counter = r.abort_counter;
374  ticket k = r.tail_counter++;
375  ptrdiff_t e = my_capacity;
376 #if DO_ITT_NOTIFY
377  bool sync_prepare_done = false;
378 #endif
379  if( (ptrdiff_t)(k-r.head_counter)>=e ) { // queue is full
380 #if DO_ITT_NOTIFY
381  if( !sync_prepare_done ) {
382  ITT_NOTIFY( sync_prepare, &sync_prepare_done );
383  sync_prepare_done = true;
384  }
385 #endif
386  bool slept = false;
387  concurrent_monitor::thread_context thr_ctx;
388  r.slots_avail.prepare_wait( thr_ctx, ((ptrdiff_t)(k-e)) );
389  while( (ptrdiff_t)(k-r.head_counter)>=const_cast<volatile ptrdiff_t&>(e = my_capacity) ) {
390  __TBB_TRY {
391  if( r.abort_counter!=old_abort_counter ) {
392  r.slots_avail.cancel_wait( thr_ctx );
394  }
395  slept = r.slots_avail.commit_wait( thr_ctx );
397  r.choose(k).abort_push(k, *this);
398  __TBB_RETHROW();
399  } __TBB_CATCH(...) {
400  __TBB_RETHROW();
401  }
402  if (slept == true) break;
403  r.slots_avail.prepare_wait( thr_ctx, ((ptrdiff_t)(k-e)) );
404  }
405  if( !slept )
406  r.slots_avail.cancel_wait( thr_ctx );
407  }
408  ITT_NOTIFY( sync_acquired, &sync_prepare_done );
409  __TBB_ASSERT( (ptrdiff_t)(k-r.head_counter)<my_capacity, NULL);
410  r.choose( k ).push( src, k, *this, op_type );
411  r.items_avail.notify( predicate_leq(k) );
412 }
Exception for user-initiated abort.
Definition: tbb_exception.h:46
#define __TBB_RETHROW()
Definition: tbb_stddef.h:286
ptrdiff_t my_capacity
Capacity of the queue.
concurrent_queue_rep * my_rep
Internal representation.
#define __TBB_CATCH(e)
Definition: tbb_stddef.h:284
#define __TBB_TRY
Definition: tbb_stddef.h:283
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:112
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()

References __TBB_ASSERT, __TBB_CATCH, __TBB_RETHROW, __TBB_TRY, tbb::internal::concurrent_queue_rep::abort_counter, tbb::internal::micro_queue::abort_push(), tbb::internal::concurrent_monitor::cancel_wait(), tbb::internal::concurrent_queue_rep::choose(), tbb::internal::concurrent_monitor::commit_wait(), tbb::internal::eid_user_abort, tbb::internal::concurrent_queue_rep::head_counter, tbb::internal::concurrent_queue_rep::items_avail, ITT_NOTIFY, my_capacity, my_rep, tbb::internal::concurrent_monitor::notify(), tbb::internal::concurrent_monitor::prepare_wait(), tbb::internal::micro_queue::push(), tbb::internal::concurrent_queue_rep::slots_avail, tbb::internal::concurrent_queue_rep::tail_counter, and tbb::internal::throw_exception().

Referenced by internal_push(), and tbb::internal::concurrent_queue_base_v8::internal_push_move().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ internal_pop()

void tbb::internal::concurrent_queue_base_v3::internal_pop ( void dst)
protected

Dequeue item from head of queue.

Definition at line 414 of file concurrent_queue.cpp.

414  {
416  ticket k;
417 #if DO_ITT_NOTIFY
418  bool sync_prepare_done = false;
419 #endif
420  unsigned old_abort_counter = r.abort_counter;
421  // This loop is a single pop operation; abort_counter should not be re-read inside
422  do {
423  k=r.head_counter++;
424  if ( (ptrdiff_t)(r.tail_counter-k)<=0 ) { // queue is empty
425 #if DO_ITT_NOTIFY
426  if( !sync_prepare_done ) {
427  ITT_NOTIFY( sync_prepare, dst );
428  sync_prepare_done = true;
429  }
430 #endif
431  bool slept = false;
432  concurrent_monitor::thread_context thr_ctx;
433  r.items_avail.prepare_wait( thr_ctx, k );
434  while( (ptrdiff_t)(r.tail_counter-k)<=0 ) {
435  __TBB_TRY {
436  if( r.abort_counter!=old_abort_counter ) {
437  r.items_avail.cancel_wait( thr_ctx );
439  }
440  slept = r.items_avail.commit_wait( thr_ctx );
442  r.head_counter--;
443  __TBB_RETHROW();
444  } __TBB_CATCH(...) {
445  __TBB_RETHROW();
446  }
447  if (slept == true) break;
448  r.items_avail.prepare_wait( thr_ctx, k );
449  }
450  if( !slept )
451  r.items_avail.cancel_wait( thr_ctx );
452  }
453  __TBB_ASSERT((ptrdiff_t)(r.tail_counter-k)>0, NULL);
454  } while( !r.choose(k).pop(dst,k,*this) );
455 
456  // wake up a producer..
457  r.slots_avail.notify( predicate_leq(k) );
458 }
Exception for user-initiated abort.
Definition: tbb_exception.h:46
#define __TBB_RETHROW()
Definition: tbb_stddef.h:286
concurrent_queue_rep * my_rep
Internal representation.
#define __TBB_CATCH(e)
Definition: tbb_stddef.h:284
#define __TBB_TRY
Definition: tbb_stddef.h:283
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:112
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()

References __TBB_ASSERT, __TBB_CATCH, __TBB_RETHROW, __TBB_TRY, tbb::internal::concurrent_queue_rep::abort_counter, tbb::internal::concurrent_monitor::cancel_wait(), tbb::internal::concurrent_queue_rep::choose(), tbb::internal::concurrent_monitor::commit_wait(), tbb::internal::eid_user_abort, tbb::internal::concurrent_queue_rep::head_counter, tbb::internal::concurrent_queue_rep::items_avail, ITT_NOTIFY, my_rep, tbb::internal::concurrent_monitor::notify(), tbb::internal::micro_queue::pop(), tbb::internal::concurrent_monitor::prepare_wait(), tbb::internal::concurrent_queue_rep::slots_avail, tbb::internal::concurrent_queue_rep::tail_counter, and tbb::internal::throw_exception().

Referenced by tbb::concurrent_bounded_queue< T, A >::pop().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ internal_pop_if_present()

bool tbb::internal::concurrent_queue_base_v3::internal_pop_if_present ( void dst)
protected

Attempt to dequeue item from queue.

NULL if there was no item to dequeue.

Definition at line 467 of file concurrent_queue.cpp.

467  {
469  ticket k;
470  do {
471  k = r.head_counter;
472  for(;;) {
473  if( (ptrdiff_t)(r.tail_counter-k)<=0 ) {
474  // Queue is empty
475  return false;
476  }
477  // Queue had item with ticket k when we looked. Attempt to get that item.
478  ticket tk=k;
479  k = r.head_counter.compare_and_swap( tk+1, tk );
480  if( k==tk )
481  break;
482  // Another thread snatched the item, retry.
483  }
484  } while( !r.choose( k ).pop( dst, k, *this ) );
485 
486  r.slots_avail.notify( predicate_leq(k) );
487 
488  return true;
489 }
concurrent_queue_rep * my_rep
Internal representation.

References tbb::internal::concurrent_queue_rep::choose(), tbb::internal::concurrent_queue_rep::head_counter, my_rep, tbb::internal::concurrent_monitor::notify(), tbb::internal::micro_queue::pop(), tbb::internal::concurrent_queue_rep::slots_avail, and tbb::internal::concurrent_queue_rep::tail_counter.

Referenced by tbb::concurrent_bounded_queue< T, A >::try_pop().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ internal_push()

void tbb::internal::concurrent_queue_base_v3::internal_push ( const void src)
protected

Enqueue item at tail of queue using copy operation.

Definition at line 363 of file concurrent_queue.cpp.

363  {
364  internal_insert_item( src, copy );
365 }
void internal_insert_item(const void *src, copy_specifics op_type)
Enqueues item at tail of queue using specified operation (copy or move)

References copy, and internal_insert_item().

Referenced by tbb::concurrent_bounded_queue< T, A >::push().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ internal_push_if_not_full()

bool tbb::internal::concurrent_queue_base_v3::internal_push_if_not_full ( const void src)
protected

Attempt to enqueue item onto queue using copy operation.

Definition at line 491 of file concurrent_queue.cpp.

491  {
492  return internal_insert_if_not_full( src, copy );
493 }
bool internal_insert_if_not_full(const void *src, copy_specifics op_type)
Attempts to enqueue at tail of queue using specified operation (copy or move)

References copy, and internal_insert_if_not_full().

Referenced by tbb::concurrent_bounded_queue< T, A >::concurrent_bounded_queue(), and tbb::concurrent_bounded_queue< T, A >::try_push().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ internal_set_capacity()

void tbb::internal::concurrent_queue_base_v3::internal_set_capacity ( ptrdiff_t  capacity,
size_t  element_size 
)
protected

Set the queue capacity.

Definition at line 531 of file concurrent_queue.cpp.

531  {
532  my_capacity = capacity<0 ? concurrent_queue_rep::infinite_capacity : capacity;
533 }
ptrdiff_t my_capacity
Capacity of the queue.
static const ptrdiff_t infinite_capacity
Value for effective_capacity that denotes unbounded queue.

References tbb::internal::concurrent_queue_rep::infinite_capacity, and my_capacity.

Referenced by tbb::concurrent_bounded_queue< T, A >::set_capacity().

Here is the caller graph for this function:

◆ internal_size()

ptrdiff_t tbb::internal::concurrent_queue_base_v3::internal_size ( ) const
protected

Get size of queue.

Definition at line 519 of file concurrent_queue.cpp.

519  {
520  __TBB_ASSERT( sizeof(ptrdiff_t)<=sizeof(size_t), NULL );
522 }
concurrent_queue_rep * my_rep
Internal representation.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165

References __TBB_ASSERT, tbb::internal::concurrent_queue_rep::head_counter, my_rep, tbb::internal::concurrent_queue_rep::n_invalid_entries, and tbb::internal::concurrent_queue_rep::tail_counter.

Referenced by tbb::concurrent_bounded_queue< T, A >::size().

Here is the caller graph for this function:

◆ internal_swap()

void tbb::internal::concurrent_queue_base_v3::internal_swap ( concurrent_queue_base_v3 src)
inlineprotected

swap queues

Definition at line 917 of file _concurrent_queue_impl.h.

917  {
920  std::swap( item_size, src.item_size );
921  std::swap( my_rep, src.my_rep );
922  }
ptrdiff_t my_capacity
Capacity of the queue.
concurrent_queue_rep * my_rep
Internal representation.
void swap(atomic< T > &lhs, atomic< T > &rhs)
Definition: atomic.h:564

Referenced by tbb::concurrent_bounded_queue< T, A >::concurrent_bounded_queue().

Here is the caller graph for this function:

◆ internal_throw_exception()

void tbb::internal::concurrent_queue_base_v3::internal_throw_exception ( ) const
protected

throw an exception

Definition at line 547 of file concurrent_queue.cpp.

547  {
549 }
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()

References tbb::internal::eid_bad_alloc, and tbb::internal::throw_exception().

Here is the call graph for this function:

Friends And Related Function Documentation

◆ concurrent_queue_iterator_base_v3

friend class concurrent_queue_iterator_base_v3
friend

Definition at line 835 of file _concurrent_queue_impl.h.

◆ concurrent_queue_iterator_rep

friend class concurrent_queue_iterator_rep
friend

Definition at line 834 of file _concurrent_queue_impl.h.

◆ concurrent_queue_rep

friend class concurrent_queue_rep
friend

Definition at line 831 of file _concurrent_queue_impl.h.

◆ micro_queue

friend struct micro_queue
friend

Definition at line 832 of file _concurrent_queue_impl.h.

◆ micro_queue_pop_finalizer

friend class micro_queue_pop_finalizer
friend

Definition at line 833 of file _concurrent_queue_impl.h.

Member Data Documentation

◆ item_size

size_t tbb::internal::concurrent_queue_base_v3::item_size
protected

◆ items_per_page

◆ my_capacity

ptrdiff_t tbb::internal::concurrent_queue_base_v3::my_capacity
protected

◆ my_rep


The documentation for this class was generated from the following files:

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.