Home ⌂Doc Index ◂Up ▴
Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
task_group.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2020 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #ifndef __TBB_task_group_H
18 #define __TBB_task_group_H
19 
20 #define __TBB_task_group_H_include_area
22 
23 #include "task.h"
24 #include "tbb_exception.h"
26 #if TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION
27 #include "task_arena.h"
28 #endif
29 
30 #if __TBB_TASK_GROUP_CONTEXT
31 
32 namespace tbb {
33 
34 namespace internal {
35  template<typename F> class task_handle_task;
36 }
37 
38 class task_group;
40 #if TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION
41 class isolated_task_group;
42 #endif
43 
44 template<typename F>
46  template<typename _F> friend class internal::task_handle_task;
47  friend class task_group;
48  friend class structured_task_group;
49 #if TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION
50  friend class isolated_task_group;
51 #endif
52 
53  static const intptr_t scheduled = 0x1;
54 
56  intptr_t my_state;
57 
58  void mark_scheduled () {
59  // The check here is intentionally lax to avoid the impact of interlocked operation
60  if ( my_state & scheduled )
63  }
64 public:
65  task_handle( const F& f ) : my_func(f), my_state(0) {}
66 #if __TBB_CPP11_RVALUE_REF_PRESENT
67  task_handle( F&& f ) : my_func( std::move(f)), my_state(0) {}
68 #endif
69 
70  void operator() () const { my_func(); }
71 };
72 
77 };
78 
79 namespace internal {
80 
81 template<typename F>
82 class task_handle_task : public task {
85  my_handle();
86  return NULL;
87  }
88 public:
89  task_handle_task( task_handle<F>& h ) : my_handle(h) { h.mark_scheduled(); }
90 };
91 
95  public:
98  }
101  }
102  };
103 protected:
106 
107  template<typename F>
109  __TBB_TRY {
111  // We need to increase the reference count of the root task to notify waiters that
112  // this task group has some work in progress.
113  ref_count_guard guard(*my_root);
114  f();
115  }
116  } __TBB_CATCH( ... ) {
118  }
119  return wait();
120  }
121 
122  template<typename Task, typename F>
124  return new( task::allocate_additional_child_of(*my_root) ) Task( internal::forward<F>(f) );
125  }
126 
127 public:
128  task_group_base( uintptr_t traits = 0 )
129  : my_context(task_group_context::bound, task_group_context::default_traits | traits)
130  {
133  }
134 
136  if( my_root->ref_count() > 1 ) {
137 #if __TBB_CPP17_UNCAUGHT_EXCEPTIONS_PRESENT
138  bool stack_unwinding_in_progress = std::uncaught_exceptions() > 0;
139 #else
140  bool stack_unwinding_in_progress = std::uncaught_exception();
141 #endif
142  // Always attempt to do proper cleanup to avoid inevitable memory corruption
143  // in case of missing wait (for the sake of better testability & debuggability)
144  if ( !is_canceling() )
145  cancel();
146  __TBB_TRY {
148  } __TBB_CATCH (...) {
149  task::destroy(*my_root);
150  __TBB_RETHROW();
151  }
152  task::destroy(*my_root);
153  if ( !stack_unwinding_in_progress )
155  }
156  else {
157  task::destroy(*my_root);
158  }
159  }
160 
161  template<typename F>
162  void run( task_handle<F>& h ) {
163  task::spawn( *prepare_task< internal::task_handle_task<F> >(h) );
164  }
165 
167  __TBB_TRY {
169  } __TBB_CATCH( ... ) {
170  my_context.reset();
171  __TBB_RETHROW();
172  }
174  // TODO: the reset method is not thread-safe. Ensure the correct behavior.
175  my_context.reset();
176  return canceled;
177  }
178  return complete;
179  }
180 
181  bool is_canceling() {
183  }
184 
185  void cancel() {
187  }
188 }; // class task_group_base
189 
190 } // namespace internal
191 
193 public:
194  task_group () : task_group_base( task_group_context::concurrent_wait ) {}
195 
196 #if __SUNPRO_CC
197  template<typename F>
198  void run( task_handle<F>& h ) {
199  internal_run< internal::task_handle_task<F> >( h );
200  }
201 #else
202  using task_group_base::run;
203 #endif
204 
205 #if __TBB_CPP11_RVALUE_REF_PRESENT
206  template<typename F>
207  void run( F&& f ) {
208  task::spawn( *prepare_task< internal::function_task< typename internal::strip<F>::type > >(std::forward<F>(f)) );
209  }
210 #else
211  template<typename F>
212  void run(const F& f) {
213  task::spawn( *prepare_task< internal::function_task<F> >(f) );
214  }
215 #endif
216 
217  template<typename F>
219  return internal_run_and_wait<const F>( f );
220  }
221 
222  // TODO: add task_handle rvalues support
223  template<typename F>
225  h.mark_scheduled();
226  return internal_run_and_wait< task_handle<F> >( h );
227  }
228 }; // class task_group
229 
231 public:
232  // TODO: add task_handle rvalues support
233  template<typename F>
235  h.mark_scheduled();
236  return internal_run_and_wait< task_handle<F> >( h );
237  }
238 
241  my_root->set_ref_count(1);
242  return res;
243  }
244 }; // class structured_task_group
245 
246 #if TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION
247 namespace internal {
248  using interface7::internal::delegate_base;
250 
251  class spawn_delegate : public delegate_base {
252  task* task_to_spawn;
253  void operator()() const __TBB_override {
254  task::spawn(*task_to_spawn);
255  }
256  public:
257  spawn_delegate(task* a_task) : task_to_spawn(a_task) {}
258  };
259 
260  class wait_delegate : public delegate_base {
261  void operator()() const __TBB_override {
262  status = tg.wait();
263  }
264  protected:
265  task_group& tg;
266  task_group_status& status;
267  public:
268  wait_delegate(task_group& a_group, task_group_status& tgs)
269  : tg(a_group), status(tgs) {}
270  };
271 
272  template<typename F>
273  class run_wait_delegate : public wait_delegate {
274  F& func;
275  void operator()() const __TBB_override {
276  status = tg.run_and_wait( func );
277  }
278  public:
279  run_wait_delegate(task_group& a_group, F& a_func, task_group_status& tgs)
280  : wait_delegate(a_group, tgs), func(a_func) {}
281  };
282 } // namespace internal
283 
284 class isolated_task_group : public task_group {
285  intptr_t this_isolation() {
286  return reinterpret_cast<intptr_t>(this);
287  }
288 public:
289  isolated_task_group () : task_group() {}
290 
291 #if __TBB_CPP11_RVALUE_REF_PRESENT
292  template<typename F>
293  void run( F&& f ) {
294  internal::spawn_delegate sd(
295  prepare_task< internal::function_task< typename internal::strip<F>::type > >(std::forward<F>(f))
296  );
297  internal::isolate_within_arena( sd, this_isolation() );
298  }
299 #else
300  template<typename F>
301  void run(const F& f) {
302  internal::spawn_delegate sd( prepare_task< internal::function_task<F> >(f) );
303  internal::isolate_within_arena( sd, this_isolation() );
304  }
305 #endif
306 
307  template<typename F>
308  task_group_status run_and_wait( const F& f ) {
310  internal::run_wait_delegate< const F > rwd( *this, f, result );
311  internal::isolate_within_arena( rwd, this_isolation() );
312  __TBB_ASSERT( result!=not_complete, "premature exit from wait?" );
313  return result;
314  }
315 
316  // TODO: add task_handle rvalues support
317  template<typename F>
318  void run( task_handle<F>& h ) {
319  internal::spawn_delegate sd( prepare_task< internal::task_handle_task<F> >(h) );
320  internal::isolate_within_arena( sd, this_isolation() );
321  }
322 
323  template<typename F>
324  task_group_status run_and_wait ( task_handle<F>& h ) {
326  internal::run_wait_delegate< task_handle<F> > rwd( *this, h, result );
327  internal::isolate_within_arena( rwd, this_isolation() );
328  __TBB_ASSERT( result!=not_complete, "premature exit from wait?" );
329  return result;
330  }
331 
332  task_group_status wait() {
334  internal::wait_delegate wd( *this, result );
335  internal::isolate_within_arena( wd, this_isolation() );
336  __TBB_ASSERT( result!=not_complete, "premature exit from wait?" );
337  return result;
338  }
339 }; // class isolated_task_group
340 #endif // TBB_PREVIEW_ISOLATED_TASK_GROUP && __TBB_TASK_ISOLATION
341 
342 inline
344  return task::self().is_cancelled();
345 }
346 
347 #if __TBB_CPP11_RVALUE_REF_PRESENT
348 template<class F>
350  return task_handle< typename internal::strip<F>::type >( std::forward<F>(f) );
351 }
352 #else
353 template<class F>
354 task_handle<F> make_task( const F& f ) {
355  return task_handle<F>( f );
356 }
357 #endif /* __TBB_CPP11_RVALUE_REF_PRESENT */
358 
359 } // namespace tbb
360 
361 #endif /* __TBB_TASK_GROUP_CONTEXT */
362 
364 #undef __TBB_task_group_H_include_area
365 
366 #endif /* __TBB_task_group_H */
intptr_t my_state
Definition: task_group.h:56
task_handle(const F &f)
Definition: task_group.h:65
~task_group_base() __TBB_NOEXCEPT(false)
Definition: task_group.h:135
bool __TBB_EXPORTED_METHOD cancel_group_execution()
Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
Used to form groups of tasks.
Definition: task.h:358
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h
void wait_for_all()
Wait for reference count to become one, and set reference count to zero.
Definition: task.h:819
task * prepare_task(__TBB_FORWARDING_REF(F) f)
Definition: task_group.h:123
task_group_status
Definition: task_group.h:73
bool is_cancelled() const
Returns true if the context has received cancellation request.
Definition: task.h:974
Base class for user-defined tasks.
Definition: task.h:615
task_handle(F &&f)
Definition: task_group.h:67
task_group_base(uintptr_t traits=0)
Definition: task_group.h:128
task_handle_task(task_handle< F > &h)
Definition: task_group.h:89
task_group_status run_and_wait(task_handle< F > &h)
Definition: task_group.h:224
void run(task_handle< F > &h)
Definition: task_group.h:162
#define __TBB_RETHROW()
Definition: tbb_stddef.h:286
Base class for types that should not be assigned.
Definition: tbb_stddef.h:322
static const intptr_t scheduled
Definition: task_group.h:53
void __TBB_EXPORTED_METHOD register_pending_exception()
Records the pending exception, and cancels the task group.
void __TBB_EXPORTED_METHOD reset()
Forcefully reinitializes the context after the task tree it was associated with is completed.
#define __TBB_DEPRECATED
Definition: tbb_config.h:636
static internal::allocate_root_proxy allocate_root()
Returns proxy for overloaded new that allocates a root task.
Definition: task.h:663
bool __TBB_EXPORTED_METHOD is_group_execution_cancelled() const
Returns true if the context received cancellation request.
task_group_status wait()
Definition: task_group.h:239
task_handle< F > & my_handle
Definition: task_group.h:83
#define __TBB_NOEXCEPT(expression)
Definition: tbb_stddef.h:110
task * execute() __TBB_override
Should be overridden by derived classes.
Definition: task_group.h:84
task_handle< typename internal::strip< F >::type > make_task(F &&f)
Definition: task_group.h:349
task that does nothing. Useful for synchronization.
Definition: task.h:1042
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
static task &__TBB_EXPORTED_FUNC self()
The innermost task being executed or destroyed by the current thread at the moment.
Definition: task.cpp:201
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:330
void run(F &&f)
Definition: task_group.h:207
void mark_scheduled()
Definition: task_group.h:58
int ref_count() const
The internal reference count.
Definition: task.h:915
void set_ref_count(int count)
Set reference count.
Definition: task.h:761
#define __TBB_override
Definition: tbb_stddef.h:240
void __TBB_EXPORTED_FUNC isolate_within_arena(delegate_base &d, intptr_t isolation=0)
#define __TBB_CATCH(e)
Definition: tbb_stddef.h:284
task_group_status internal_run_and_wait(F &f)
Definition: task_group.h:108
#define __TBB_TRY
Definition: tbb_stddef.h:283
#define __TBB_FORWARDING_REF(A)
Definition: tbb_stddef.h:517
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
task_group_context my_context
Definition: task_group.h:105
bool is_current_task_group_canceling()
Definition: task_group.h:343
void increment_ref_count()
Atomically increment reference count.
Definition: task.h:771
task_group_status run_and_wait(const F &f)
Definition: task_group.h:218
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
The graph class.
task_group_status run_and_wait(task_handle< F > &h)
Definition: task_group.h:234
int decrement_ref_count()
Atomically decrement reference count and returns its new value.
Definition: task.h:788
task_group_status wait()
Definition: task_group.h:166
void operator()() const
Definition: task_group.h:70
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:319

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.