23 #if __TBB_TASK_GROUP_CONTEXT    31 inline char* duplicate_string ( 
const char* src ) {
    34         size_t len = strlen(src) + 1;
    36         strncpy (dst, src, len);
    46     my_exception_name = duplicate_string( a_name );
    47     my_exception_info = duplicate_string( info );
    58         ::new (e) captured_exception();
    59         e->my_exception_name = my_exception_name;
    60         e->my_exception_info = my_exception_info;
    62         my_exception_name = my_exception_info = NULL;
    68     __TBB_ASSERT ( my_dynamic, 
"Method destroy can be used only on objects created by clone or allocate" );
    78         ::new (e) captured_exception(a_name, info);
    85     return my_exception_name;
    89     return my_exception_info;
    97 #if !TBB_USE_CAPTURED_EXCEPTION   102 tbb_exception_ptr* AllocateExceptionContainer( 
const T& src ) {
   105         new (eptr) tbb_exception_ptr(src);
   110     return AllocateExceptionContainer( std::current_exception() );
   114     return AllocateExceptionContainer( std::current_exception() );
   118     tbb_exception_ptr *res = AllocateExceptionContainer( src );
   124     this->tbb_exception_ptr::~tbb_exception_ptr();
   140             uintptr_t local_count_snapshot = my_owner->my_context_state_propagation_epoch;
   141             my_owner->my_local_ctx_list_update.store<
relaxed>(1);
   145             if ( my_owner->my_nonlocal_ctx_list_update.load<
relaxed>() ) {
   147                 my_node.my_prev->my_next = my_node.my_next;
   148                 my_node.my_next->my_prev = my_node.my_prev;
   149                 my_owner->my_local_ctx_list_update.store<
relaxed>(0);
   152                 my_node.my_prev->my_next = my_node.my_next;
   153                 my_node.my_next->my_prev = my_node.my_prev;
   157                 my_owner->my_local_ctx_list_update.store<
release>(0);
   158                 if ( local_count_snapshot != the_context_state_propagation_epoch ) {
   172                 my_node.my_prev->my_next = my_node.my_next;
   173                 my_node.my_next->my_prev = my_node.my_prev;
   177                 my_owner->my_nonlocal_ctx_list_update.fetch_and_increment<
full_fence>();
   180                 my_owner->my_context_list_mutex.lock();
   181                 my_node.my_prev->my_next = my_node.my_next;
   182                 my_node.my_next->my_prev = my_node.my_prev;
   183                 my_owner->my_context_list_mutex.unlock();
   185                 my_owner->my_nonlocal_ctx_list_update.fetch_and_decrement<
full_fence>();
   190     internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env)->~cpu_ctl_env();
   194         my_exception->destroy();
   201     if( ( my_version_and_traits & version_mask ) < 3 )
   202         my_name = internal::CUSTOM_CTX;
   205     __TBB_STATIC_ASSERT ( 
sizeof(my_version_and_traits) >= 4, 
"Layout of my_version_and_traits must be reconsidered on this platform" );
   207     __TBB_ASSERT ( (uintptr_t(
this) & (
sizeof(my_cancellation_requested) - 1)) == 0, 
"Context is improperly aligned" );
   210     my_node.my_next = NULL;
   211     my_node.my_prev = NULL;
   212     my_cancellation_requested = 0;
   217 #if __TBB_TASK_PRIORITY   218     my_priority = normalized_normal_priority;
   221     __TBB_STATIC_ASSERT( 
sizeof(my_cpu_ctl_env) == 
sizeof(internal::uint64_t), 
"The reserved space for FPU settings are not equal sizeof(uint64_t)" );
   222     __TBB_STATIC_ASSERT( 
sizeof(cpu_ctl_env) <= 
sizeof(my_cpu_ctl_env), 
"FPU settings storage does not fit to uint64_t" );
   225     cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);
   226     new ( &ctl ) cpu_ctl_env;
   227     if ( my_version_and_traits & fp_settings )
   234     my_owner = local_sched;
   236     my_node.my_prev = &local_sched->my_context_list_head;
   239     local_sched->my_local_ctx_list_update.store<
relaxed>(1);
   245     if ( local_sched->my_nonlocal_ctx_list_update.load<
relaxed>() ) {
   247         local_sched->my_context_list_head.my_next->my_prev = &my_node;
   248         my_node.my_next = local_sched->my_context_list_head.my_next;
   249         my_owner->my_local_ctx_list_update.store<
relaxed>(0);
   250         local_sched->my_context_list_head.my_next = &my_node;
   253         local_sched->my_context_list_head.my_next->my_prev = &my_node;
   254         my_node.my_next = local_sched->my_context_list_head.my_next;
   255         my_owner->my_local_ctx_list_update.store<
release>(0);
   266     __TBB_ASSERT ( !my_parent, 
"Parent is set before initial binding" );
   267     my_parent = local_sched->my_innermost_running_task->prefix().context;
   270     if ( !(my_version_and_traits & fp_settings) )
   271         copy_fp_settings(*my_parent);
   275     if ( !(my_parent->my_state & may_have_children) )
   276         my_parent->my_state |= may_have_children; 
   277     if ( my_parent->my_parent ) {
   289         uintptr_t local_count_snapshot = 
__TBB_load_with_acquire( my_parent->my_owner->my_context_state_propagation_epoch );
   292         my_cancellation_requested = my_parent->my_cancellation_requested;
   293 #if __TBB_TASK_PRIORITY   294         my_priority = my_parent->my_priority;
   296         register_with( local_sched ); 
   302         if ( local_count_snapshot != the_context_state_propagation_epoch ) {
   304             context_state_propagation_mutex_type::scoped_lock 
lock(the_context_state_propagation_mutex);
   305             my_cancellation_requested = my_parent->my_cancellation_requested;
   306 #if __TBB_TASK_PRIORITY   307             my_priority = my_parent->my_priority;
   312         register_with( local_sched ); 
   316         my_cancellation_requested = my_parent->my_cancellation_requested;
   317 #if __TBB_TASK_PRIORITY   318         my_priority = my_parent->my_priority;
   324 template <
typename T>
   326     if (this->*mptr_state == new_state) {
   334     else if (
this == &src) {
   341         for ( task_group_context *ancestor = my_parent; ancestor != NULL; ancestor = ancestor->my_parent ) {
   342             __TBB_ASSERT(internal::is_alive(ancestor->my_version_and_traits), 
"context tree was corrupted");
   343             if ( ancestor == &src ) {
   344                 for ( task_group_context *ctx = 
this; ctx != ancestor; ctx = ctx->my_parent )
   345                     ctx->*mptr_state = new_state;
   352 template <
typename T>
   353 void generic_scheduler::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {
   359     while ( node != &my_context_list_head ) {
   361         if ( ctx.*mptr_state != new_state )
   362             ctx.propagate_task_group_state( mptr_state, src, new_state );
   363         node = node->my_next;
   364         __TBB_ASSERT( is_alive(ctx.my_version_and_traits), 
"Local context list contains destroyed object" );
   371 template <
typename T>
   372 bool market::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {
   378     context_state_propagation_mutex_type::scoped_lock 
lock(the_context_state_propagation_mutex);
   379     if ( src.*mptr_state != new_state )
   386     for ( 
unsigned i = 0; i < num_workers; ++i ) {
   390             s->propagate_task_group_state( mptr_state, src, new_state );
   395         it->propagate_task_group_state( mptr_state, src, new_state );
   400     __TBB_ASSERT ( my_cancellation_requested == 0 || my_cancellation_requested == 1, 
"Invalid cancellation state");
   401     if ( my_cancellation_requested || 
as_atomic(my_cancellation_requested).compare_and_swap(1, 0) ) {
   412     return my_cancellation_requested != 0;
   420     if ( my_exception )  {
   421         my_exception->destroy();
   424     my_cancellation_requested = 0;
   433     cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);
   434     if ( !(my_version_and_traits & fp_settings) ) {
   435         new ( &ctl ) cpu_ctl_env;
   436         my_version_and_traits |= fp_settings;
   442     __TBB_ASSERT( !(my_version_and_traits & fp_settings), 
"The context already has FPU settings." );
   443     __TBB_ASSERT( src.my_version_and_traits & fp_settings, 
"The source context does not have FPU settings." );
   445     cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);
   446     cpu_ctl_env &src_ctl = *internal::punned_cast<cpu_ctl_env*>(&src.my_cpu_ctl_env);
   447     new (&ctl) cpu_ctl_env( src_ctl );
   448     my_version_and_traits |= fp_settings;
   453     if ( my_cancellation_requested )
   455 #if TBB_USE_EXCEPTIONS   458     } TbbCatchAll( 
this );
   462 #if __TBB_TASK_PRIORITY   465     intptr_t 
p = normalize_priority(prio);
   483         s->my_market->update_arena_priority( *
s->my_arena, 
p );
   487     return static_cast<priority_t>(priority_from_normalized_rep[my_priority]);
 bool __TBB_EXPORTED_METHOD cancel_group_execution()
Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
 
void __TBB_EXPORTED_METHOD destroy() __TBB_override
Destroys objects created by the move() method.
 
void destroy()
Destroys this objects.
 
atomic< unsigned > my_first_unused_worker_idx
First unused index of worker.
 
const char *__TBB_EXPORTED_METHOD name() const __TBB_override
Returns RTTI name of the originally intercepted exception.
 
__TBB_EXPORTED_METHOD ~captured_exception()
 
#define ITT_STACK(precond, name, obj)
 
captured_exception *__TBB_EXPORTED_METHOD move() __TBB_override
Creates and returns pointer to the deep copy of this exception object.
 
void __TBB_EXPORTED_METHOD clear()
 
static captured_exception * allocate(const char *name, const char *info)
Functionally equivalent to {captured_exception e(name,info); return e.move();}.
 
void bind_to(internal::generic_scheduler *local_sched)
Registers this context with the local scheduler and binds it to its parent context.
 
void __TBB_EXPORTED_METHOD register_pending_exception()
Records the pending exception, and cancels the task group.
 
void __TBB_EXPORTED_METHOD reset()
Forcefully reinitializes the context after the task tree it was associated with is completed.
 
market * my_market
The market I am in.
 
void __TBB_EXPORTED_FUNC deallocate_via_handler_v3(void *p)
Deallocates memory using FreeHandler.
 
void __TBB_EXPORTED_METHOD capture_fp_settings()
Captures the current FPU control settings to the context.
 
void __TBB_store_relaxed(volatile T &location, V value)
 
void spin_wait_until_eq(const volatile T &location, const U value)
Spin UNTIL the value of the variable is equal to a given value.
 
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
 
void __TBB_store_with_release(volatile T &location, V value)
 
bool __TBB_EXPORTED_METHOD is_group_execution_cancelled() const
Returns true if the context received cancellation request.
 
__TBB_DEPRECATED_IN_VERBOSE_MODE void set_priority(priority_t)
Changes priority of the task group.
 
#define __TBB_STATIC_ASSERT(condition, msg)
 
void __TBB_EXPORTED_METHOD set(const char *name, const char *info)
 
void propagate_task_group_state(T task_group_context::*mptr_state, task_group_context &src, T new_state)
Propagates any state change detected to *this, and as an optimisation possibly also upward along the ...
 
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
 
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
 
__TBB_EXPORTED_METHOD ~task_group_context()
 
iterator_impl< T > iterator
 
void register_with(internal::generic_scheduler *local_sched)
Registers this context with the local scheduler.
 
task is running, and will be destroyed after method execute() completes.
 
void const char const char int ITT_FORMAT __itt_group_sync p
 
#define __TBB_get_object_ref(class_name, member_name, member_addr)
Returns address of the object containing a member with the given name and address.
 
#define ITT_TASK_GROUP(type, name, parent)
 
intptr_t my_priority
Priority level of the task group (in normalized representation)
 
static generic_scheduler * local_scheduler_if_initialized()
 
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
 
static generic_scheduler * local_scheduler_weak()
 
__TBB_DEPRECATED_IN_VERBOSE_MODE priority_t priority() const
Retrieves current priority of the current task group.
 
static tbb_exception_ptr * allocate()
 
void atomic_fence()
Sequentially consistent full memory fence.
 
void copy_fp_settings(const task_group_context &src)
Copies FPU control setting from another context.
 
void __TBB_EXPORTED_METHOD init()
Out-of-line part of the constructor.
 
void const char const char int ITT_FORMAT __itt_group_sync s
 
#define __TBB_FetchAndAddWrelease(P, V)
 
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
 
T __TBB_load_relaxed(const volatile T &location)
 
atomic< T > & as_atomic(T &t)
 
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
 
const char *__TBB_EXPORTED_METHOD what() const __TBB_override
Returns the result of originally intercepted exception's what() method.
 
T __TBB_load_with_acquire(const volatile T &location)
 
friend class generic_scheduler
 
void *__TBB_EXPORTED_FUNC allocate_via_handler_v3(size_t n)
Allocates memory using MallocHandler.