Home ⌂Doc Index ◂Up ▴
Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
task_group_context.cpp
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2020 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #include "scheduler.h"
18 
19 #include "itt_notify.h"
20 
21 namespace tbb {
22 
23 #if __TBB_TASK_GROUP_CONTEXT
24 
25 using namespace internal;
26 
27 //------------------------------------------------------------------------
28 // captured_exception
29 //------------------------------------------------------------------------
30 
31 inline char* duplicate_string ( const char* src ) {
32  char* dst = NULL;
33  if ( src ) {
34  size_t len = strlen(src) + 1;
35  dst = (char*)allocate_via_handler_v3(len);
36  strncpy (dst, src, len);
37  }
38  return dst;
39 }
40 
42  clear();
43 }
44 
45 void captured_exception::set ( const char* a_name, const char* info ) throw() {
46  my_exception_name = duplicate_string( a_name );
47  my_exception_info = duplicate_string( info );
48 }
49 
50 void captured_exception::clear () throw() {
51  deallocate_via_handler_v3 (const_cast<char*>(my_exception_name));
52  deallocate_via_handler_v3 (const_cast<char*>(my_exception_info));
53 }
54 
55 captured_exception* captured_exception::move () throw() {
56  captured_exception *e = (captured_exception*)allocate_via_handler_v3(sizeof(captured_exception));
57  if ( e ) {
58  ::new (e) captured_exception();
59  e->my_exception_name = my_exception_name;
60  e->my_exception_info = my_exception_info;
61  e->my_dynamic = true;
62  my_exception_name = my_exception_info = NULL;
63  }
64  return e;
65 }
66 
67 void captured_exception::destroy () throw() {
68  __TBB_ASSERT ( my_dynamic, "Method destroy can be used only on objects created by clone or allocate" );
69  if ( my_dynamic ) {
72  }
73 }
74 
75 captured_exception* captured_exception::allocate ( const char* a_name, const char* info ) {
76  captured_exception *e = (captured_exception*)allocate_via_handler_v3( sizeof(captured_exception) );
77  if ( e ) {
78  ::new (e) captured_exception(a_name, info);
79  e->my_dynamic = true;
80  }
81  return e;
82 }
83 
84 const char* captured_exception::name() const throw() {
85  return my_exception_name;
86 }
87 
88 const char* captured_exception::what() const throw() {
89  return my_exception_info;
90 }
91 
92 
93 //------------------------------------------------------------------------
94 // tbb_exception_ptr
95 //------------------------------------------------------------------------
96 
97 #if !TBB_USE_CAPTURED_EXCEPTION
98 
99 namespace internal {
100 
101 template<typename T>
102 tbb_exception_ptr* AllocateExceptionContainer( const T& src ) {
103  tbb_exception_ptr *eptr = (tbb_exception_ptr*)allocate_via_handler_v3( sizeof(tbb_exception_ptr) );
104  if ( eptr )
105  new (eptr) tbb_exception_ptr(src);
106  return eptr;
107 }
108 
109 tbb_exception_ptr* tbb_exception_ptr::allocate () {
110  return AllocateExceptionContainer( std::current_exception() );
111 }
112 
113 tbb_exception_ptr* tbb_exception_ptr::allocate ( const tbb_exception& ) {
114  return AllocateExceptionContainer( std::current_exception() );
115 }
116 
117 tbb_exception_ptr* tbb_exception_ptr::allocate ( captured_exception& src ) {
118  tbb_exception_ptr *res = AllocateExceptionContainer( src );
119  src.destroy();
120  return res;
121 }
122 
123 void tbb_exception_ptr::destroy () throw() {
124  this->tbb_exception_ptr::~tbb_exception_ptr();
126 }
127 
128 } // namespace internal
129 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
130 
131 
132 //------------------------------------------------------------------------
133 // task_group_context
134 //------------------------------------------------------------------------
135 
137  if ( __TBB_load_relaxed(my_kind) == binding_completed ) {
138  if ( governor::is_set(my_owner) ) {
139  // Local update of the context list
140  uintptr_t local_count_snapshot = my_owner->my_context_state_propagation_epoch;
141  my_owner->my_local_ctx_list_update.store<relaxed>(1);
142  // Prevent load of nonlocal update flag from being hoisted before the
143  // store to local update flag.
144  atomic_fence();
145  if ( my_owner->my_nonlocal_ctx_list_update.load<relaxed>() ) {
146  spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex);
147  my_node.my_prev->my_next = my_node.my_next;
148  my_node.my_next->my_prev = my_node.my_prev;
149  my_owner->my_local_ctx_list_update.store<relaxed>(0);
150  }
151  else {
152  my_node.my_prev->my_next = my_node.my_next;
153  my_node.my_next->my_prev = my_node.my_prev;
154  // Release fence is necessary so that update of our neighbors in
155  // the context list was committed when possible concurrent destroyer
156  // proceeds after local update flag is reset by the following store.
157  my_owner->my_local_ctx_list_update.store<release>(0);
158  if ( local_count_snapshot != the_context_state_propagation_epoch ) {
159  // Another thread was propagating cancellation request when we removed
160  // ourselves from the list. We must ensure that it is not accessing us
161  // when this destructor finishes. We'll be able to acquire the lock
162  // below only after the other thread finishes with us.
163  spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex);
164  }
165  }
166  }
167  else {
168  // Nonlocal update of the context list
169  // Synchronizes with generic_scheduler::cleanup_local_context_list()
170  // TODO: evaluate and perhaps relax, or add some lock instead
171  if ( internal::as_atomic(my_kind).fetch_and_store(dying) == detached ) {
172  my_node.my_prev->my_next = my_node.my_next;
173  my_node.my_next->my_prev = my_node.my_prev;
174  }
175  else {
176  //TODO: evaluate and perhaps relax
177  my_owner->my_nonlocal_ctx_list_update.fetch_and_increment<full_fence>();
178  //TODO: evaluate and perhaps remove
179  spin_wait_until_eq( my_owner->my_local_ctx_list_update, 0u );
180  my_owner->my_context_list_mutex.lock();
181  my_node.my_prev->my_next = my_node.my_next;
182  my_node.my_next->my_prev = my_node.my_prev;
183  my_owner->my_context_list_mutex.unlock();
184  //TODO: evaluate and perhaps relax
185  my_owner->my_nonlocal_ctx_list_update.fetch_and_decrement<full_fence>();
186  }
187  }
188  }
189 #if __TBB_FP_CONTEXT
190  internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env)->~cpu_ctl_env();
191 #endif
192  poison_value(my_version_and_traits);
193  if ( my_exception )
194  my_exception->destroy();
195  ITT_STACK(itt_caller != ITT_CALLER_NULL, caller_destroy, itt_caller);
196 }
197 
198 void task_group_context::init () {
199 #if DO_ITT_NOTIFY
200  // Check version of task group context to avoid reporting misleading identifier.
201  if( ( my_version_and_traits & version_mask ) < 3 )
202  my_name = internal::CUSTOM_CTX;
203 #endif
204  ITT_TASK_GROUP(this, my_name, NULL);
205  __TBB_STATIC_ASSERT ( sizeof(my_version_and_traits) >= 4, "Layout of my_version_and_traits must be reconsidered on this platform" );
206  __TBB_STATIC_ASSERT ( sizeof(task_group_context) == 2 * NFS_MaxLineSize, "Context class has wrong size - check padding and members alignment" );
207  __TBB_ASSERT ( (uintptr_t(this) & (sizeof(my_cancellation_requested) - 1)) == 0, "Context is improperly aligned" );
208  __TBB_ASSERT ( __TBB_load_relaxed(my_kind) == isolated || __TBB_load_relaxed(my_kind) == bound, "Context can be created only as isolated or bound" );
209  my_parent = NULL;
210  my_node.my_next = NULL;
211  my_node.my_prev = NULL;
212  my_cancellation_requested = 0;
213  my_exception = NULL;
214  my_owner = NULL;
215  my_state = 0;
216  itt_caller = ITT_CALLER_NULL;
217 #if __TBB_TASK_PRIORITY
218  my_priority = normalized_normal_priority;
219 #endif /* __TBB_TASK_PRIORITY */
220 #if __TBB_FP_CONTEXT
221  __TBB_STATIC_ASSERT( sizeof(my_cpu_ctl_env) == sizeof(internal::uint64_t), "The reserved space for FPU settings are not equal sizeof(uint64_t)" );
222  __TBB_STATIC_ASSERT( sizeof(cpu_ctl_env) <= sizeof(my_cpu_ctl_env), "FPU settings storage does not fit to uint64_t" );
223  suppress_unused_warning( my_cpu_ctl_env.space );
224 
225  cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);
226  new ( &ctl ) cpu_ctl_env;
227  if ( my_version_and_traits & fp_settings )
228  ctl.get_env();
229 #endif
230 }
231 
232 void task_group_context::register_with ( generic_scheduler *local_sched ) {
233  __TBB_ASSERT( local_sched, NULL );
234  my_owner = local_sched;
235  // state propagation logic assumes new contexts are bound to head of the list
236  my_node.my_prev = &local_sched->my_context_list_head;
237  // Notify threads that may be concurrently destroying contexts registered
238  // in this scheduler's list that local list update is underway.
239  local_sched->my_local_ctx_list_update.store<relaxed>(1);
240  // Prevent load of global propagation epoch counter from being hoisted before
241  // speculative stores above, as well as load of nonlocal update flag from
242  // being hoisted before the store to local update flag.
243  atomic_fence();
244  // Finalize local context list update
245  if ( local_sched->my_nonlocal_ctx_list_update.load<relaxed>() ) {
246  spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex);
247  local_sched->my_context_list_head.my_next->my_prev = &my_node;
248  my_node.my_next = local_sched->my_context_list_head.my_next;
249  my_owner->my_local_ctx_list_update.store<relaxed>(0);
250  local_sched->my_context_list_head.my_next = &my_node;
251  }
252  else {
253  local_sched->my_context_list_head.my_next->my_prev = &my_node;
254  my_node.my_next = local_sched->my_context_list_head.my_next;
255  my_owner->my_local_ctx_list_update.store<release>(0);
256  // Thread-local list of contexts allows concurrent traversal by another thread
257  // while propagating state change. To ensure visibility of my_node's members
258  // to the concurrently traversing thread, the list's head is updated by means
259  // of store-with-release.
260  __TBB_store_with_release(local_sched->my_context_list_head.my_next, &my_node);
261  }
262 }
263 
264 void task_group_context::bind_to ( generic_scheduler *local_sched ) {
265  __TBB_ASSERT ( __TBB_load_relaxed(my_kind) == binding_required, "Already bound or isolated?" );
266  __TBB_ASSERT ( !my_parent, "Parent is set before initial binding" );
267  my_parent = local_sched->my_innermost_running_task->prefix().context;
268 #if __TBB_FP_CONTEXT
269  // Inherit FPU settings only if the context has not captured FPU settings yet.
270  if ( !(my_version_and_traits & fp_settings) )
271  copy_fp_settings(*my_parent);
272 #endif
273 
274  // Condition below prevents unnecessary thrashing parent context's cache line
275  if ( !(my_parent->my_state & may_have_children) )
276  my_parent->my_state |= may_have_children; // full fence is below
277  if ( my_parent->my_parent ) {
278  // Even if this context were made accessible for state change propagation
279  // (by placing __TBB_store_with_release(s->my_context_list_head.my_next, &my_node)
280  // above), it still could be missed if state propagation from a grand-ancestor
281  // was underway concurrently with binding.
282  // Speculative propagation from the parent together with epoch counters
283  // detecting possibility of such a race allow to avoid taking locks when
284  // there is no contention.
285 
286  // Acquire fence is necessary to prevent reordering subsequent speculative
287  // loads of parent state data out of the scope where epoch counters comparison
288  // can reliably validate it.
289  uintptr_t local_count_snapshot = __TBB_load_with_acquire( my_parent->my_owner->my_context_state_propagation_epoch );
290  // Speculative propagation of parent's state. The speculation will be
291  // validated by the epoch counters check further on.
292  my_cancellation_requested = my_parent->my_cancellation_requested;
293 #if __TBB_TASK_PRIORITY
294  my_priority = my_parent->my_priority;
295 #endif /* __TBB_TASK_PRIORITY */
296  register_with( local_sched ); // Issues full fence
297 
298  // If no state propagation was detected by the following condition, the above
299  // full fence guarantees that the parent had correct state during speculative
300  // propagation before the fence. Otherwise the propagation from parent is
301  // repeated under the lock.
302  if ( local_count_snapshot != the_context_state_propagation_epoch ) {
303  // Another thread may be propagating state change right now. So resort to lock.
304  context_state_propagation_mutex_type::scoped_lock lock(the_context_state_propagation_mutex);
305  my_cancellation_requested = my_parent->my_cancellation_requested;
306 #if __TBB_TASK_PRIORITY
307  my_priority = my_parent->my_priority;
308 #endif /* __TBB_TASK_PRIORITY */
309  }
310  }
311  else {
312  register_with( local_sched ); // Issues full fence
313  // As we do not have grand-ancestors, concurrent state propagation (if any)
314  // may originate only from the parent context, and thus it is safe to directly
315  // copy the state from it.
316  my_cancellation_requested = my_parent->my_cancellation_requested;
317 #if __TBB_TASK_PRIORITY
318  my_priority = my_parent->my_priority;
319 #endif /* __TBB_TASK_PRIORITY */
320  }
321  __TBB_store_relaxed(my_kind, binding_completed);
322 }
323 
324 template <typename T>
325 void task_group_context::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {
326  if (this->*mptr_state == new_state) {
327  // Nothing to do, whether descending from "src" or not, so no need to scan.
328  // Hopefully this happens often thanks to earlier invocations.
329  // This optimization is enabled by LIFO order in the context lists:
330  // - new contexts are bound to the beginning of lists;
331  // - descendants are newer than ancestors;
332  // - earlier invocations are therefore likely to "paint" long chains.
333  }
334  else if (this == &src) {
335  // This clause is disjunct from the traversal below, which skips src entirely.
336  // Note that src.*mptr_state is not necessarily still equal to new_state (another thread may have changed it again).
337  // Such interference is probably not frequent enough to aim for optimisation by writing new_state again (to make the other thread back down).
338  // Letting the other thread prevail may also be fairer.
339  }
340  else {
341  for ( task_group_context *ancestor = my_parent; ancestor != NULL; ancestor = ancestor->my_parent ) {
342  __TBB_ASSERT(internal::is_alive(ancestor->my_version_and_traits), "context tree was corrupted");
343  if ( ancestor == &src ) {
344  for ( task_group_context *ctx = this; ctx != ancestor; ctx = ctx->my_parent )
345  ctx->*mptr_state = new_state;
346  break;
347  }
348  }
349  }
350 }
351 
352 template <typename T>
353 void generic_scheduler::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {
354  spin_mutex::scoped_lock lock(my_context_list_mutex);
355  // Acquire fence is necessary to ensure that the subsequent node->my_next load
356  // returned the correct value in case it was just inserted in another thread.
357  // The fence also ensures visibility of the correct my_parent value.
358  context_list_node_t *node = __TBB_load_with_acquire(my_context_list_head.my_next);
359  while ( node != &my_context_list_head ) {
360  task_group_context &ctx = __TBB_get_object_ref(task_group_context, my_node, node);
361  if ( ctx.*mptr_state != new_state )
362  ctx.propagate_task_group_state( mptr_state, src, new_state );
363  node = node->my_next;
364  __TBB_ASSERT( is_alive(ctx.my_version_and_traits), "Local context list contains destroyed object" );
365  }
366  // Sync up local propagation epoch with the global one. Release fence prevents
367  // reordering of possible store to *mptr_state after the sync point.
368  __TBB_store_with_release(my_context_state_propagation_epoch, the_context_state_propagation_epoch);
369 }
370 
371 template <typename T>
372 bool market::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {
373  if ( !(src.my_state & task_group_context::may_have_children) )
374  return true;
375  // The whole propagation algorithm is under the lock in order to ensure correctness
376  // in case of concurrent state changes at the different levels of the context tree.
377  // See comment at the bottom of scheduler.cpp
378  context_state_propagation_mutex_type::scoped_lock lock(the_context_state_propagation_mutex);
379  if ( src.*mptr_state != new_state )
380  // Another thread has concurrently changed the state. Back down.
381  return false;
382  // Advance global state propagation epoch
383  __TBB_FetchAndAddWrelease(&the_context_state_propagation_epoch, 1);
384  // Propagate to all workers and masters and sync up their local epochs with the global one
385  unsigned num_workers = my_first_unused_worker_idx;
386  for ( unsigned i = 0; i < num_workers; ++i ) {
387  generic_scheduler *s = my_workers[i];
388  // If the worker is only about to be registered, skip it.
389  if ( s )
390  s->propagate_task_group_state( mptr_state, src, new_state );
391  }
392  // Propagate to all master threads
393  // The whole propagation sequence is locked, thus no contention is expected
394  for( scheduler_list_type::iterator it = my_masters.begin(); it != my_masters.end(); it++ )
395  it->propagate_task_group_state( mptr_state, src, new_state );
396  return true;
397 }
398 
400  __TBB_ASSERT ( my_cancellation_requested == 0 || my_cancellation_requested == 1, "Invalid cancellation state");
401  if ( my_cancellation_requested || as_atomic(my_cancellation_requested).compare_and_swap(1, 0) ) {
402  // This task group and any descendants have already been canceled.
403  // (A newly added descendant would inherit its parent's my_cancellation_requested,
404  // not missing out on any cancellation still being propagated, and a context cannot be uncanceled.)
405  return false;
406  }
407  governor::local_scheduler_weak()->my_market->propagate_task_group_state( &task_group_context::my_cancellation_requested, *this, (uintptr_t)1 );
408  return true;
409 }
410 
412  return my_cancellation_requested != 0;
413 }
414 
415 // IMPORTANT: It is assumed that this method is not used concurrently!
418  // No fences are necessary since this context can be accessed from another thread
419  // only after stealing happened (which means necessary fences were used).
420  if ( my_exception ) {
421  my_exception->destroy();
422  my_exception = NULL;
423  }
424  my_cancellation_requested = 0;
425 }
426 
427 #if __TBB_FP_CONTEXT
428 // IMPORTANT: It is assumed that this method is not used concurrently!
431  // No fences are necessary since this context can be accessed from another thread
432  // only after stealing happened (which means necessary fences were used).
433  cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);
434  if ( !(my_version_and_traits & fp_settings) ) {
435  new ( &ctl ) cpu_ctl_env;
436  my_version_and_traits |= fp_settings;
437  }
438  ctl.get_env();
439 }
440 
441 void task_group_context::copy_fp_settings( const task_group_context &src ) {
442  __TBB_ASSERT( !(my_version_and_traits & fp_settings), "The context already has FPU settings." );
443  __TBB_ASSERT( src.my_version_and_traits & fp_settings, "The source context does not have FPU settings." );
444 
445  cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);
446  cpu_ctl_env &src_ctl = *internal::punned_cast<cpu_ctl_env*>(&src.my_cpu_ctl_env);
447  new (&ctl) cpu_ctl_env( src_ctl );
448  my_version_and_traits |= fp_settings;
449 }
450 #endif /* __TBB_FP_CONTEXT */
451 
453  if ( my_cancellation_requested )
454  return;
455 #if TBB_USE_EXCEPTIONS
456  try {
457  throw;
458  } TbbCatchAll( this );
459 #endif /* TBB_USE_EXCEPTIONS */
460 }
461 
462 #if __TBB_TASK_PRIORITY
464  __TBB_ASSERT( prio == priority_low || prio == priority_normal || prio == priority_high, "Invalid priority level value" );
465  intptr_t p = normalize_priority(prio);
466  if ( my_priority == p && !(my_state & task_group_context::may_have_children))
467  return;
468  my_priority = p;
469  internal::generic_scheduler* s = governor::local_scheduler_if_initialized();
470  if ( !s || !s->my_arena || !s->my_market->propagate_task_group_state(&task_group_context::my_priority, *this, p) )
471  return;
472 
474  // need to find out the right arena for priority update.
475  // The executing status check only guarantees being inside some working arena.
476  if ( s->my_innermost_running_task->state() == task::executing )
477  // Updating arena priority here does not eliminate necessity of checking each
478  // task priority and updating arena priority if necessary before the task execution.
479  // These checks will be necessary because:
480  // a) set_priority() may be invoked before any tasks from this task group are spawned;
481  // b) all spawned tasks from this task group are retrieved from the task pools.
482  // These cases create a time window when arena priority may be lowered.
483  s->my_market->update_arena_priority( *s->my_arena, p );
484 }
485 
487  return static_cast<priority_t>(priority_from_normalized_rep[my_priority]);
488 }
489 #endif /* __TBB_TASK_PRIORITY */
490 
491 #endif /* __TBB_TASK_GROUP_CONTEXT */
492 
493 } // namespace tbb
bool __TBB_EXPORTED_METHOD cancel_group_execution()
Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
#define poison_value(g)
void __TBB_EXPORTED_METHOD destroy() __TBB_override
Destroys objects created by the move() method.
void destroy()
Destroys this objects.
atomic< unsigned > my_first_unused_worker_idx
First unused index of worker.
Definition: market.h:86
const char *__TBB_EXPORTED_METHOD name() const __TBB_override
Returns RTTI name of the originally intercepted exception.
__TBB_EXPORTED_METHOD ~captured_exception()
#define ITT_STACK(precond, name, obj)
Definition: itt_notify.h:118
#define ITT_CALLER_NULL
Definition: itt_notify.h:45
captured_exception *__TBB_EXPORTED_METHOD move() __TBB_override
Creates and returns pointer to the deep copy of this exception object.
void __TBB_EXPORTED_METHOD clear()
static captured_exception * allocate(const char *name, const char *info)
Functionally equivalent to {captured_exception e(name,info); return e.move();}.
void bind_to(internal::generic_scheduler *local_sched)
Registers this context with the local scheduler and binds it to its parent context.
void __TBB_EXPORTED_METHOD register_pending_exception()
Records the pending exception, and cancels the task group.
void __TBB_EXPORTED_METHOD reset()
Forcefully reinitializes the context after the task tree it was associated with is completed.
market * my_market
The market I am in.
Definition: scheduler.h:172
void __TBB_EXPORTED_FUNC deallocate_via_handler_v3(void *p)
Deallocates memory using FreeHandler.
void __TBB_EXPORTED_METHOD capture_fp_settings()
Captures the current FPU control settings to the context.
void __TBB_store_relaxed(volatile T &location, V value)
Definition: tbb_machine.h:739
void spin_wait_until_eq(const volatile T &location, const U value)
Spin UNTIL the value of the variable is equal to a given value.
Definition: tbb_machine.h:399
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
Definition: tbb_stddef.h:398
priority_t
Definition: task.h:317
void __TBB_store_with_release(volatile T &location, V value)
Definition: tbb_machine.h:713
bool __TBB_EXPORTED_METHOD is_group_execution_cancelled() const
Returns true if the context received cancellation request.
__TBB_DEPRECATED_IN_VERBOSE_MODE void set_priority(priority_t)
Changes priority of the task group.
#define __TBB_STATIC_ASSERT(condition, msg)
Definition: tbb_stddef.h:553
Release.
Definition: atomic.h:59
void __TBB_EXPORTED_METHOD set(const char *name, const char *info)
void propagate_task_group_state(T task_group_context::*mptr_state, task_group_context &src, T new_state)
Propagates any state change detected to *this, and as an optimisation possibly also upward along the ...
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
Definition: task.h:440
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
Definition: governor.cpp:120
__TBB_EXPORTED_METHOD ~task_group_context()
void register_with(internal::generic_scheduler *local_sched)
Registers this context with the local scheduler.
task is running, and will be destroyed after method execute() completes.
Definition: task.h:637
void const char const char int ITT_FORMAT __itt_group_sync p
Sequential consistency.
Definition: atomic.h:55
#define __TBB_get_object_ref(class_name, member_name, member_addr)
Returns address of the object containing a member with the given name and address.
Definition: tbb_stddef.h:270
#define ITT_TASK_GROUP(type, name, parent)
Definition: itt_notify.h:120
intptr_t my_priority
Priority level of the task group (in normalized representation)
Definition: task.h:459
static generic_scheduler * local_scheduler_if_initialized()
Definition: governor.h:139
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
Definition: tbb_stddef.h:216
static generic_scheduler * local_scheduler_weak()
Definition: governor.h:134
__TBB_DEPRECATED_IN_VERBOSE_MODE priority_t priority() const
Retrieves current priority of the current task group.
static tbb_exception_ptr * allocate()
void atomic_fence()
Sequentially consistent full memory fence.
Definition: tbb_machine.h:339
void copy_fp_settings(const task_group_context &src)
Copies FPU control setting from another context.
void __TBB_EXPORTED_METHOD init()
Out-of-line part of the constructor.
void const char const char int ITT_FORMAT __itt_group_sync s
No ordering.
Definition: atomic.h:61
#define __TBB_FetchAndAddWrelease(P, V)
Definition: tbb_machine.h:309
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
T __TBB_load_relaxed(const volatile T &location)
Definition: tbb_machine.h:735
atomic< T > & as_atomic(T &t)
Definition: atomic.h:572
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
const char *__TBB_EXPORTED_METHOD what() const __TBB_override
Returns the result of originally intercepted exception's what() method.
The graph class.
T __TBB_load_with_acquire(const volatile T &location)
Definition: tbb_machine.h:709
friend class scoped_lock
Definition: spin_mutex.h:179
friend class generic_scheduler
Definition: market.h:46
void *__TBB_EXPORTED_FUNC allocate_via_handler_v3(size_t n)
Allocates memory using MallocHandler.

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.