xref: /oneTBB/include/oneapi/tbb/task_group.h (revision c4568449)
1 /*
2     Copyright (c) 2005-2023 Intel Corporation
3 
4     Licensed under the Apache License, Version 2.0 (the "License");
5     you may not use this file except in compliance with the License.
6     You may obtain a copy of the License at
7 
8         http://www.apache.org/licenses/LICENSE-2.0
9 
10     Unless required by applicable law or agreed to in writing, software
11     distributed under the License is distributed on an "AS IS" BASIS,
12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13     See the License for the specific language governing permissions and
14     limitations under the License.
15 */
16 
17 #ifndef __TBB_task_group_H
18 #define __TBB_task_group_H
19 
20 #include "detail/_config.h"
21 #include "detail/_namespace_injection.h"
22 #include "detail/_assert.h"
23 #include "detail/_utils.h"
24 #include "detail/_template_helpers.h"
25 #include "detail/_exception.h"
26 #include "detail/_task.h"
27 #include "detail/_small_object_pool.h"
28 #include "detail/_intrusive_list_node.h"
29 #include "detail/_task_handle.h"
30 
31 #include "profiling.h"
32 
33 #include <type_traits>
34 
35 #if _MSC_VER && !defined(__INTEL_COMPILER)
36     // Suppress warning: structure was padded due to alignment specifier
37     #pragma warning(push)
38     #pragma warning(disable:4324)
39 #endif
40 
41 namespace tbb {
42 namespace detail {
43 
44 namespace d1 {
45 class delegate_base;
46 class task_arena_base;
47 class task_group_context;
48 class task_group_base;
49 }
50 
51 namespace r1 {
52 // Forward declarations
53 class tbb_exception_ptr;
54 class cancellation_disseminator;
55 class thread_data;
56 class task_dispatcher;
57 template <bool>
58 class context_guard_helper;
59 struct task_arena_impl;
60 class context_list;
61 
62 TBB_EXPORT void __TBB_EXPORTED_FUNC execute(d1::task_arena_base&, d1::delegate_base&);
63 TBB_EXPORT void __TBB_EXPORTED_FUNC isolate_within_arena(d1::delegate_base&, std::intptr_t);
64 
65 TBB_EXPORT void __TBB_EXPORTED_FUNC initialize(d1::task_group_context&);
66 TBB_EXPORT void __TBB_EXPORTED_FUNC destroy(d1::task_group_context&);
67 TBB_EXPORT void __TBB_EXPORTED_FUNC reset(d1::task_group_context&);
68 TBB_EXPORT bool __TBB_EXPORTED_FUNC cancel_group_execution(d1::task_group_context&);
69 TBB_EXPORT bool __TBB_EXPORTED_FUNC is_group_execution_cancelled(d1::task_group_context&);
70 TBB_EXPORT void __TBB_EXPORTED_FUNC capture_fp_settings(d1::task_group_context&);
71 
72 struct task_group_context_impl;
73 }
74 
75 namespace d2 {
76 
77 namespace {
78 template<typename F>
79 d1::task* task_ptr_or_nullptr(F&& f);
80 }
81 
82 template<typename F>
83 class function_task : public task_handle_task  {
84     //TODO: apply empty base optimization here
85     const F m_func;
86 
87 private:
execute(d1::execution_data & ed)88     d1::task* execute(d1::execution_data& ed) override {
89         __TBB_ASSERT(ed.context == &this->ctx(), "The task group context should be used for all tasks");
90         task* res = task_ptr_or_nullptr(m_func);
91         finalize(&ed);
92         return res;
93     }
cancel(d1::execution_data & ed)94     d1::task* cancel(d1::execution_data& ed) override {
95         finalize(&ed);
96         return nullptr;
97     }
98 public:
99     template<typename FF>
function_task(FF && f,d1::wait_context & wo,d1::task_group_context & ctx,d1::small_object_allocator & alloc)100     function_task(FF&& f, d1::wait_context& wo, d1::task_group_context& ctx, d1::small_object_allocator& alloc)
101         : task_handle_task{wo, ctx, alloc},
102           m_func(std::forward<FF>(f)) {}
103 };
104 
105 #if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
106 namespace {
107     template<typename F>
task_ptr_or_nullptr_impl(std::false_type,F && f)108     d1::task* task_ptr_or_nullptr_impl(std::false_type, F&& f){
109         task_handle th = std::forward<F>(f)();
110         return task_handle_accessor::release(th);
111     }
112 
113     template<typename F>
task_ptr_or_nullptr_impl(std::true_type,F && f)114     d1::task* task_ptr_or_nullptr_impl(std::true_type, F&& f){
115         std::forward<F>(f)();
116         return nullptr;
117     }
118 
119     template<typename F>
task_ptr_or_nullptr(F && f)120     d1::task* task_ptr_or_nullptr(F&& f){
121         using is_void_t = std::is_void<
122             decltype(std::forward<F>(f)())
123             >;
124 
125         return  task_ptr_or_nullptr_impl(is_void_t{}, std::forward<F>(f));
126     }
127 }
128 #else
129 namespace {
130     template<typename F>
task_ptr_or_nullptr(F && f)131     d1::task* task_ptr_or_nullptr(F&& f){
132         std::forward<F>(f)();
133         return nullptr;
134     }
135 }  // namespace
136 #endif // __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
137 } // namespace d2
138 
139 namespace d1 {
140 
141 // This structure is left here for backward compatibility check
142 struct context_list_node {
143     std::atomic<context_list_node*> prev{};
144     std::atomic<context_list_node*> next{};
145 };
146 
147 //! Used to form groups of tasks
148 /** @ingroup task_scheduling
149     The context services explicit cancellation requests from user code, and unhandled
150     exceptions intercepted during tasks execution. Intercepting an exception results
151     in generating internal cancellation requests (which is processed in exactly the
152     same way as external ones).
153 
154     The context is associated with one or more root tasks and defines the cancellation
155     group that includes all the descendants of the corresponding root task(s). Association
156     is established when a context object is passed as an argument to the task::allocate_root()
157     method. See task_group_context::task_group_context for more details.
158 
159     The context can be bound to another one, and other contexts can be bound to it,
160     forming a tree-like structure: parent -> this -> children. Arrows here designate
161     cancellation propagation direction. If a task in a cancellation group is cancelled
162     all the other tasks in this group and groups bound to it (as children) get cancelled too.
163 **/
164 class task_group_context : no_copy {
165 public:
166     enum traits_type {
167         fp_settings     = 1 << 1,
168         concurrent_wait = 1 << 2,
169         default_traits  = 0
170     };
171     enum kind_type {
172         isolated,
173         bound
174     };
175 private:
176     //! Space for platform-specific FPU settings.
177     /** Must only be accessed inside TBB binaries, and never directly in user
178     code or inline methods. */
179     std::uint64_t my_cpu_ctl_env;
180 
181     //! Specifies whether cancellation was requested for this task group.
182     std::atomic<std::uint32_t> my_cancellation_requested;
183 
184     //! Versioning for run-time checks and behavioral traits of the context.
185     enum class task_group_context_version : std::uint8_t {
186         unused = 1       // ensure that new versions, if any, will not clash with previously used ones
187     };
188     task_group_context_version my_version;
189 
190     //! The context traits.
191     struct context_traits {
192         bool fp_settings        : 1;
193         bool concurrent_wait    : 1;
194         bool bound              : 1;
195         bool reserved1          : 1;
196         bool reserved2          : 1;
197         bool reserved3          : 1;
198         bool reserved4          : 1;
199         bool reserved5          : 1;
200     } my_traits;
201 
202     static_assert(sizeof(context_traits) == 1, "Traits shall fit into one byte.");
203 
204     static constexpr std::uint8_t may_have_children = 1;
205     //! The context internal state (currently only may_have_children).
206     std::atomic<std::uint8_t> my_may_have_children;
207 
208     enum class state : std::uint8_t {
209         created,
210         locked,
211         isolated,
212         bound,
213         dead,
214         proxy = std::uint8_t(-1) //the context is not the real one, but proxy to other one
215     };
216 
217     //! The synchronization machine state to manage lifetime.
218     std::atomic<state> my_state;
219 
220     union {
221         //! Pointer to the context of the parent cancellation group. nullptr for isolated contexts.
222         task_group_context* my_parent;
223 
224         //! Pointer to the actual context 'this' context represents a proxy of.
225         task_group_context* my_actual_context;
226     };
227 
228     //! Thread data instance that registered this context in its list.
229     r1::context_list* my_context_list;
230     static_assert(sizeof(std::atomic<r1::thread_data*>) == sizeof(r1::context_list*), "To preserve backward compatibility these types should have the same size");
231 
232     //! Used to form the thread specific list of contexts without additional memory allocation.
233     /** A context is included into the list of the current thread when its binding to
234         its parent happens. Any context can be present in the list of one thread only. **/
235     intrusive_list_node my_node;
236     static_assert(sizeof(intrusive_list_node) == sizeof(context_list_node), "To preserve backward compatibility these types should have the same size");
237 
238     //! Pointer to the container storing exception being propagated across this task group.
239     std::atomic<r1::tbb_exception_ptr*> my_exception;
240     static_assert(sizeof(std::atomic<r1::tbb_exception_ptr*>) == sizeof(r1::tbb_exception_ptr*),
241         "backward compatibility check");
242 
243     //! Used to set and maintain stack stitching point for Intel Performance Tools.
244     void* my_itt_caller;
245 
246     //! Description of algorithm for scheduler based instrumentation.
247     string_resource_index my_name;
248 
249     char padding[max_nfs_size
250         - sizeof(std::uint64_t)                          // my_cpu_ctl_env
251         - sizeof(std::atomic<std::uint32_t>)             // my_cancellation_requested
252         - sizeof(std::uint8_t)                           // my_version
253         - sizeof(context_traits)                         // my_traits
254         - sizeof(std::atomic<std::uint8_t>)              // my_state
255         - sizeof(std::atomic<state>)                     // my_state
256         - sizeof(task_group_context*)                    // my_parent
257         - sizeof(r1::context_list*)                      // my_context_list
258         - sizeof(intrusive_list_node)                    // my_node
259         - sizeof(std::atomic<r1::tbb_exception_ptr*>)    // my_exception
260         - sizeof(void*)                                  // my_itt_caller
261         - sizeof(string_resource_index)                  // my_name
262     ];
263 
task_group_context(context_traits t,string_resource_index name)264     task_group_context(context_traits t, string_resource_index name)
265         : my_version{task_group_context_version::unused}, my_name{name}
266     {
267         my_traits = t; // GCC4.8 issues warning list initialization for bitset (missing-field-initializers)
268         r1::initialize(*this);
269     }
270 
task_group_context(task_group_context * actual_context)271     task_group_context(task_group_context* actual_context)
272         : my_version{task_group_context_version::unused}
273         , my_state{state::proxy}
274         , my_actual_context{actual_context}
275     {
276         __TBB_ASSERT(my_actual_context, "Passed pointer value points to nothing.");
277         my_name = actual_context->my_name;
278 
279         // no need to initialize 'this' context as it acts as a proxy for my_actual_context, which
280         // initialization is a user-side responsibility.
281     }
282 
make_traits(kind_type relation_with_parent,std::uintptr_t user_traits)283     static context_traits make_traits(kind_type relation_with_parent, std::uintptr_t user_traits) {
284         context_traits ct;
285         ct.fp_settings = (user_traits & fp_settings) == fp_settings;
286         ct.concurrent_wait = (user_traits & concurrent_wait) == concurrent_wait;
287         ct.bound = relation_with_parent == bound;
288         ct.reserved1 = ct.reserved2 = ct.reserved3 = ct.reserved4 = ct.reserved5 = false;
289         return ct;
290     }
291 
is_proxy()292     bool is_proxy() const {
293         return my_state.load(std::memory_order_relaxed) == state::proxy;
294     }
295 
actual_context()296     task_group_context& actual_context() noexcept {
297         if (is_proxy()) {
298             __TBB_ASSERT(my_actual_context, "Actual task_group_context is not set.");
299             return *my_actual_context;
300         }
301         return *this;
302     }
303 
actual_context()304     const task_group_context& actual_context() const noexcept {
305         if (is_proxy()) {
306             __TBB_ASSERT(my_actual_context, "Actual task_group_context is not set.");
307             return *my_actual_context;
308         }
309         return *this;
310     }
311 
312 public:
313     //! Default & binding constructor.
314     /** By default a bound context is created. That is this context will be bound
315         (as child) to the context of the currently executing task . Cancellation
316         requests passed to the parent context are propagated to all the contexts
317         bound to it. Similarly priority change is propagated from the parent context
318         to its children.
319 
320         If task_group_context::isolated is used as the argument, then the tasks associated
321         with this context will never be affected by events in any other context.
322 
323         Creating isolated contexts involve much less overhead, but they have limited
324         utility. Normally when an exception occurs in an algorithm that has nested
325         ones running, it is desirably to have all the nested algorithms cancelled
326         as well. Such a behavior requires nested algorithms to use bound contexts.
327 
328         There is one good place where using isolated algorithms is beneficial. It is
329         an external thread. That is if a particular algorithm is invoked directly from
330         the external thread (not from a TBB task), supplying it with explicitly
331         created isolated context will result in a faster algorithm startup.
332 
333         VERSIONING NOTE:
334         Implementation(s) of task_group_context constructor(s) cannot be made
335         entirely out-of-line because the run-time version must be set by the user
336         code. This will become critically important for binary compatibility, if
337         we ever have to change the size of the context object. **/
338 
339     task_group_context(kind_type relation_with_parent = bound,
340                        std::uintptr_t t = default_traits)
task_group_context(make_traits (relation_with_parent,t),CUSTOM_CTX)341         : task_group_context(make_traits(relation_with_parent, t), CUSTOM_CTX) {}
342 
343     // Custom constructor for instrumentation of oneTBB algorithm
task_group_context(string_resource_index name)344     task_group_context(string_resource_index name )
345         : task_group_context(make_traits(bound, default_traits), name) {}
346 
347     // Do not introduce any logic on user side since it might break state propagation assumptions
~task_group_context()348     ~task_group_context() {
349         // When 'this' serves as a proxy, the initialization does not happen - nor should the
350         // destruction.
351         if (!is_proxy())
352         {
353             r1::destroy(*this);
354         }
355     }
356 
357     //! Forcefully reinitializes the context after the task tree it was associated with is completed.
358     /** Because the method assumes that all the tasks that used to be associated with
359         this context have already finished, calling it while the context is still
360         in use somewhere in the task hierarchy leads to undefined behavior.
361 
362         IMPORTANT: This method is not thread safe!
363 
364         The method does not change the context's parent if it is set. **/
reset()365     void reset() {
366         r1::reset(actual_context());
367     }
368 
369     //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
370     /** \return false if cancellation has already been requested, true otherwise.
371 
372         Note that canceling never fails. When false is returned, it just means that
373         another thread (or this one) has already sent cancellation request to this
374         context or to one of its ancestors (if this context is bound). It is guaranteed
375         that when this method is concurrently called on the same not yet cancelled
376         context, true will be returned by one and only one invocation. **/
cancel_group_execution()377     bool cancel_group_execution() {
378         return r1::cancel_group_execution(actual_context());
379     }
380 
381     //! Returns true if the context received cancellation request.
is_group_execution_cancelled()382     bool is_group_execution_cancelled() {
383         return r1::is_group_execution_cancelled(actual_context());
384     }
385 
386 #if __TBB_FP_CONTEXT
387     //! Captures the current FPU control settings to the context.
388     /** Because the method assumes that all the tasks that used to be associated with
389         this context have already finished, calling it while the context is still
390         in use somewhere in the task hierarchy leads to undefined behavior.
391 
392         IMPORTANT: This method is not thread safe!
393 
394         The method does not change the FPU control settings of the context's parent. **/
capture_fp_settings()395     void capture_fp_settings() {
396         r1::capture_fp_settings(actual_context());
397     }
398 #endif
399 
400     //! Returns the user visible context trait
traits()401     std::uintptr_t traits() const {
402         std::uintptr_t t{};
403         const task_group_context& ctx = actual_context();
404         t |= ctx.my_traits.fp_settings ? fp_settings : 0;
405         t |= ctx.my_traits.concurrent_wait ? concurrent_wait : 0;
406         return t;
407     }
408 private:
409     //// TODO: cleanup friends
410     friend class r1::cancellation_disseminator;
411     friend class r1::thread_data;
412     friend class r1::task_dispatcher;
413     template <bool>
414     friend class r1::context_guard_helper;
415     friend struct r1::task_arena_impl;
416     friend struct r1::task_group_context_impl;
417     friend class task_group_base;
418 }; // class task_group_context
419 
420 static_assert(sizeof(task_group_context) == 128, "Wrong size of task_group_context");
421 
422 enum task_group_status {
423     not_complete,
424     complete,
425     canceled
426 };
427 
428 class task_group;
429 class structured_task_group;
430 #if TBB_PREVIEW_ISOLATED_TASK_GROUP
431 class isolated_task_group;
432 #endif
433 
434 template<typename F>
435 class function_task : public task {
436     const F m_func;
437     wait_context& m_wait_ctx;
438     small_object_allocator m_allocator;
439 
finalize(const execution_data & ed)440     void finalize(const execution_data& ed) {
441         // Make a local reference not to access this after destruction.
442         wait_context& wo = m_wait_ctx;
443         // Copy allocator to the stack
444         auto allocator = m_allocator;
445         // Destroy user functor before release wait.
446         this->~function_task();
447         wo.release();
448 
449         allocator.deallocate(this, ed);
450     }
execute(execution_data & ed)451     task* execute(execution_data& ed) override {
452         task* res = d2::task_ptr_or_nullptr(m_func);
453         finalize(ed);
454         return res;
455     }
cancel(execution_data & ed)456     task* cancel(execution_data& ed) override {
457         finalize(ed);
458         return nullptr;
459     }
460 public:
function_task(const F & f,wait_context & wo,small_object_allocator & alloc)461     function_task(const F& f, wait_context& wo, small_object_allocator& alloc)
462         : m_func(f)
463         , m_wait_ctx(wo)
464         , m_allocator(alloc) {}
465 
function_task(F && f,wait_context & wo,small_object_allocator & alloc)466     function_task(F&& f, wait_context& wo, small_object_allocator& alloc)
467         : m_func(std::move(f))
468         , m_wait_ctx(wo)
469         , m_allocator(alloc) {}
470 };
471 
472 template <typename F>
473 class function_stack_task : public task {
474     const F& m_func;
475     wait_context& m_wait_ctx;
476 
finalize()477     void finalize() {
478         m_wait_ctx.release();
479     }
execute(execution_data &)480     task* execute(execution_data&) override {
481         task* res = d2::task_ptr_or_nullptr(m_func);
482         finalize();
483         return res;
484     }
cancel(execution_data &)485     task* cancel(execution_data&) override {
486         finalize();
487         return nullptr;
488     }
489 public:
function_stack_task(const F & f,wait_context & wo)490     function_stack_task(const F& f, wait_context& wo) : m_func(f), m_wait_ctx(wo) {}
491 };
492 
493 class task_group_base : no_copy {
494 protected:
495     wait_context m_wait_ctx;
496     task_group_context m_context;
497 
498     template<typename F>
internal_run_and_wait(const F & f)499     task_group_status internal_run_and_wait(const F& f) {
500         function_stack_task<F> t{ f, m_wait_ctx };
501         m_wait_ctx.reserve();
502         bool cancellation_status = false;
503         try_call([&] {
504             execute_and_wait(t, context(), m_wait_ctx, context());
505         }).on_completion([&] {
506             // TODO: the reset method is not thread-safe. Ensure the correct behavior.
507             cancellation_status = context().is_group_execution_cancelled();
508             context().reset();
509         });
510         return cancellation_status ? canceled : complete;
511     }
512 
internal_run_and_wait(d2::task_handle && h)513     task_group_status internal_run_and_wait(d2::task_handle&& h) {
514         __TBB_ASSERT(h != nullptr, "Attempt to schedule empty task_handle");
515 
516         using acs = d2::task_handle_accessor;
517         __TBB_ASSERT(&acs::ctx_of(h) == &context(), "Attempt to schedule task_handle into different task_group");
518 
519         bool cancellation_status = false;
520         try_call([&] {
521             execute_and_wait(*acs::release(h), context(), m_wait_ctx, context());
522         }).on_completion([&] {
523             // TODO: the reset method is not thread-safe. Ensure the correct behavior.
524             cancellation_status = context().is_group_execution_cancelled();
525             context().reset();
526         });
527         return cancellation_status ? canceled : complete;
528     }
529 
530     template<typename F>
prepare_task(F && f)531     task* prepare_task(F&& f) {
532         m_wait_ctx.reserve();
533         small_object_allocator alloc{};
534         return alloc.new_object<function_task<typename std::decay<F>::type>>(std::forward<F>(f), m_wait_ctx, alloc);
535     }
536 
context()537     task_group_context& context() noexcept {
538         return m_context.actual_context();
539     }
540 
541     template<typename F>
prepare_task_handle(F && f)542     d2::task_handle prepare_task_handle(F&& f) {
543         m_wait_ctx.reserve();
544         small_object_allocator alloc{};
545         using function_task_t =  d2::function_task<typename std::decay<F>::type>;
546         d2::task_handle_task* function_task_p =  alloc.new_object<function_task_t>(std::forward<F>(f), m_wait_ctx, context(), alloc);
547 
548         return d2::task_handle_accessor::construct(function_task_p);
549     }
550 
551 public:
552     task_group_base(uintptr_t traits = 0)
553         : m_wait_ctx(0)
554         , m_context(task_group_context::bound, task_group_context::default_traits | traits)
555     {}
556 
task_group_base(task_group_context & ctx)557     task_group_base(task_group_context& ctx)
558         : m_wait_ctx(0)
559         , m_context(&ctx)
560     {}
561 
noexcept(false)562     ~task_group_base() noexcept(false) {
563         if (m_wait_ctx.continue_execution()) {
564 #if __TBB_CPP17_UNCAUGHT_EXCEPTIONS_PRESENT
565             bool stack_unwinding_in_progress = std::uncaught_exceptions() > 0;
566 #else
567             bool stack_unwinding_in_progress = std::uncaught_exception();
568 #endif
569             // Always attempt to do proper cleanup to avoid inevitable memory corruption
570             // in case of missing wait (for the sake of better testability & debuggability)
571             if (!context().is_group_execution_cancelled())
572                 cancel();
573             d1::wait(m_wait_ctx, context());
574             if (!stack_unwinding_in_progress)
575                 throw_exception(exception_id::missing_wait);
576         }
577     }
578 
wait()579     task_group_status wait() {
580         bool cancellation_status = false;
581         try_call([&] {
582             d1::wait(m_wait_ctx, context());
583         }).on_completion([&] {
584             // TODO: the reset method is not thread-safe. Ensure the correct behavior.
585             cancellation_status = m_context.is_group_execution_cancelled();
586             context().reset();
587         });
588         return cancellation_status ? canceled : complete;
589     }
590 
cancel()591     void cancel() {
592         context().cancel_group_execution();
593     }
594 }; // class task_group_base
595 
596 class task_group : public task_group_base {
597 public:
task_group()598     task_group() : task_group_base(task_group_context::concurrent_wait) {}
task_group(task_group_context & ctx)599     task_group(task_group_context& ctx) : task_group_base(ctx) {}
600 
601     template<typename F>
run(F && f)602     void run(F&& f) {
603         spawn(*prepare_task(std::forward<F>(f)), context());
604     }
605 
run(d2::task_handle && h)606     void run(d2::task_handle&& h) {
607         __TBB_ASSERT(h != nullptr, "Attempt to schedule empty task_handle");
608 
609         using acs = d2::task_handle_accessor;
610         __TBB_ASSERT(&acs::ctx_of(h) == &context(), "Attempt to schedule task_handle into different task_group");
611 
612         spawn(*acs::release(h), context());
613     }
614 
615     template<typename F>
defer(F && f)616     d2::task_handle defer(F&& f) {
617         return prepare_task_handle(std::forward<F>(f));
618 
619     }
620 
621     template<typename F>
run_and_wait(const F & f)622     task_group_status run_and_wait(const F& f) {
623         return internal_run_and_wait(f);
624     }
625 
run_and_wait(d2::task_handle && h)626     task_group_status run_and_wait(d2::task_handle&& h) {
627         return internal_run_and_wait(std::move(h));
628     }
629 }; // class task_group
630 
631 #if TBB_PREVIEW_ISOLATED_TASK_GROUP
632 class spawn_delegate : public delegate_base {
633     task* task_to_spawn;
634     task_group_context& context;
operator()635     bool operator()() const override {
636         spawn(*task_to_spawn, context);
637         return true;
638     }
639 public:
spawn_delegate(task * a_task,task_group_context & ctx)640     spawn_delegate(task* a_task, task_group_context& ctx)
641         : task_to_spawn(a_task), context(ctx)
642     {}
643 };
644 
645 class wait_delegate : public delegate_base {
operator()646     bool operator()() const override {
647         status = tg.wait();
648         return true;
649     }
650 protected:
651     task_group& tg;
652     task_group_status& status;
653 public:
wait_delegate(task_group & a_group,task_group_status & tgs)654     wait_delegate(task_group& a_group, task_group_status& tgs)
655         : tg(a_group), status(tgs) {}
656 };
657 
658 template<typename F>
659 class run_wait_delegate : public wait_delegate {
660     F& func;
operator()661     bool operator()() const override {
662         status = tg.run_and_wait(func);
663         return true;
664     }
665 public:
run_wait_delegate(task_group & a_group,F & a_func,task_group_status & tgs)666     run_wait_delegate(task_group& a_group, F& a_func, task_group_status& tgs)
667         : wait_delegate(a_group, tgs), func(a_func) {}
668 };
669 
670 class isolated_task_group : public task_group {
this_isolation()671     intptr_t this_isolation() {
672         return reinterpret_cast<intptr_t>(this);
673     }
674 public:
isolated_task_group()675     isolated_task_group() : task_group() {}
676 
isolated_task_group(task_group_context & ctx)677     isolated_task_group(task_group_context& ctx) : task_group(ctx) {}
678 
679     template<typename F>
run(F && f)680     void run(F&& f) {
681         spawn_delegate sd(prepare_task(std::forward<F>(f)), context());
682         r1::isolate_within_arena(sd, this_isolation());
683     }
684 
run(d2::task_handle && h)685     void run(d2::task_handle&& h) {
686         __TBB_ASSERT(h != nullptr, "Attempt to schedule empty task_handle");
687 
688         using acs = d2::task_handle_accessor;
689         __TBB_ASSERT(&acs::ctx_of(h) == &context(), "Attempt to schedule task_handle into different task_group");
690 
691         spawn_delegate sd(acs::release(h), context());
692         r1::isolate_within_arena(sd, this_isolation());
693     }
694 
695     template<typename F>
run_and_wait(const F & f)696     task_group_status run_and_wait( const F& f ) {
697         task_group_status result = not_complete;
698         run_wait_delegate<const F> rwd(*this, f, result);
699         r1::isolate_within_arena(rwd, this_isolation());
700         __TBB_ASSERT(result != not_complete, "premature exit from wait?");
701         return result;
702     }
703 
wait()704     task_group_status wait() {
705         task_group_status result = not_complete;
706         wait_delegate wd(*this, result);
707         r1::isolate_within_arena(wd, this_isolation());
708         __TBB_ASSERT(result != not_complete, "premature exit from wait?");
709         return result;
710     }
711 }; // class isolated_task_group
712 #endif // TBB_PREVIEW_ISOLATED_TASK_GROUP
713 
is_current_task_group_canceling()714 inline bool is_current_task_group_canceling() {
715     task_group_context* ctx = current_context();
716     return ctx ? ctx->is_group_execution_cancelled() : false;
717 }
718 
719 } // namespace d1
720 } // namespace detail
721 
722 inline namespace v1 {
723 using detail::d1::task_group_context;
724 using detail::d1::task_group;
725 #if TBB_PREVIEW_ISOLATED_TASK_GROUP
726 using detail::d1::isolated_task_group;
727 #endif
728 
729 using detail::d1::task_group_status;
730 using detail::d1::not_complete;
731 using detail::d1::complete;
732 using detail::d1::canceled;
733 
734 using detail::d1::is_current_task_group_canceling;
735 using detail::r1::missing_wait;
736 
737 using detail::d2::task_handle;
738 }
739 
740 } // namespace tbb
741 
742 #if _MSC_VER && !defined(__INTEL_COMPILER)
743     #pragma warning(pop) // 4324 warning
744 #endif
745 
746 #endif // __TBB_task_group_H
747