Home
last modified time | relevance | path

Searched refs:task (Results 1 – 25 of 79) sorted by relevance

1234

/oneTBB/doc/main/tbb_userguide/Migration_Guide/
H A DTask_API.rst3 Migrating from low-level task API
9 task API is used.
23 #include <tbb/task.h>
58 implement ``tbb::task`` interface that overrides the ``tbb::task* tbb::task::execute()`` virtual
102 #include <tbb/task.h>
121 tbb::task& m_root;
133 tbb::task::spawn(task);
263 task:
339 #include <tbb/task.h>
344 task* execute(){
[all …]
/oneTBB/src/tbb/
H A Darena_slot.h44 static d1::task** const EmptyTaskPool = nullptr;
45 static d1::task** const LockedTaskPool = reinterpret_cast<d1::task**>(~std::intptr_t(0));
56 std::atomic<d1::task**> task_pool; in alignas()
85 d1::task** task_pool_ptr; in alignas()
111 my_task_pool_size = byte_size / sizeof(d1::task*); in allocate_task_pool()
158 void spawn(d1::task& t) { in spawn()
229 d1::task** new_task_pool = task_pool_ptr; in prepare_task_pool()
299 d1::task** expected = task_pool_ptr; in acquire_task_pool()
325 d1::task** lock_task_pool() { in lock_task_pool()
326 d1::task** victim_task_pool; in lock_task_pool()
[all …]
H A Dtask_stream.h122 using lane_t = queue_and_mutex <d1::task*, mutex>;
123 d1::task* get_item( lane_t::queue_base_t& queue ) { in get_item()
124 d1::task* result = queue.front(); in get_item()
133 using lane_t = queue_and_mutex <d1::task*, mutex>;
134 d1::task* get_item( lane_t::queue_base_t& queue ) { in get_item()
135 d1::task* result = nullptr; in get_item()
194 d1::task* popped = nullptr; in pop()
206 d1::task* result = nullptr; in pop_specific()
245 d1::task* try_pop( unsigned lane_idx ) { in try_pop()
248 d1::task* result = nullptr; in try_pop()
[all …]
H A Dscheduler_common.h111 static isolation_type& isolation(d1::task& t) { in isolation()
115 static void set_proxy_trait(d1::task& t) { in set_proxy_trait()
120 static bool is_proxy_task(d1::task& t) { in is_proxy_task()
123 static void set_resume_trait(d1::task& t) { in set_resume_trait()
127 static bool is_resume_task(d1::task& t) { in is_resume_task()
349 inline void assert_task_valid(const d1::task* t) { in assert_task_valid()
358 inline void assert_task_valid(const d1::task*) {} in assert_task_valid() argument
423 struct resume_task final : public d1::task {
490 d1::task* get_critical_task(d1::task*, execution_data_ext&, isolation_type, bool); in alignas()
497 d1::task* local_wait_for_all(d1::task * t, Waiter& waiter); in alignas()
[all …]
H A Dtask_dispatcher.h44 inline d1::task* get_self_recall_task(arena_slot& slot) { in get_self_recall_task()
46 d1::task* t = nullptr; in get_self_recall_task()
125 inline d1::task* task_dispatcher::get_inbox_or_critical_task( in get_inbox_or_critical_task()
146 inline d1::task* task_dispatcher::get_stream_or_critical_task( in get_stream_or_critical_task()
158 inline d1::task* task_dispatcher::steal_or_get_critical( in steal_or_get_critical()
171 d1::task* task_dispatcher::receive_or_steal_task( in receive_or_steal_task()
177 d1::task* t = nullptr; in receive_or_steal_task()
243 d1::task* task_dispatcher::local_wait_for_all(d1::task* t, Waiter& waiter ) { in local_wait_for_all()
396 inline d1::task* task_dispatcher::get_critical_task(d1::task* t, execution_data_ext& ed, isolation_… in get_critical_task()
436 inline d1::task* task_dispatcher::get_critical_task(d1::task* t, execution_data_ext&, isolation_typ… in get_critical_task()
[all …]
H A Dwaiters.h29 inline d1::task* get_self_recall_task(arena_slot& slot);
57 bool continue_execution(arena_slot& slot, d1::task*& t) const { in continue_execution()
78 static bool postpone_execution(d1::task&) { in postpone_execution() argument
127 bool continue_execution(arena_slot& slot, d1::task*& t) const { in continue_execution()
149 static bool postpone_execution(d1::task&) { in postpone_execution() argument
163 bool continue_execution(arena_slot& slot, d1::task*& t) const { in continue_execution()
185 static bool postpone_execution(d1::task& t) { in postpone_execution()
H A Darena_slot.cpp28 d1::task* arena_slot::get_task_impl(size_t T, execution_data_ext& ed, bool& tasks_omitted, isolatio… in get_task_impl()
32 d1::task* result = task_pool_ptr[T]; in get_task_impl()
48 if ( d1::task *t = tp.extract_task<task_proxy::pool_bit>() ) { in get_task_impl()
61 d1::task* arena_slot::get_task(execution_data_ext& ed, isolation_type isolation) { in get_task()
67 d1::task* result = nullptr; in get_task()
148 d1::task* arena_slot::steal_task(arena& a, isolation_type isolation, std::size_t slot_index) { in steal_task()
149 d1::task** victim_pool = lock_task_pool(); in steal_task()
153 d1::task* result = nullptr; in steal_task()
H A Dmailbox.h31 struct task_proxy : public d1::task {
58 static task* task_ptr ( intptr_t tat ) { in task_ptr()
59 return (task*)(tat & ~location_mask); in task_ptr()
64 inline task* extract_task () { in extract_task()
85 task* execute(d1::execution_data&) override { in execute()
89 task* cancel(d1::execution_data&) override { in cancel()
H A Darena.h333 …d1::task* steal_task(unsigned arena_index, FastRandom& frnd, execution_data_ext& ed, isolation_typ…
337 d1::task* get_stream_task(task_stream<accessor>& stream, unsigned& hint);
341 d1::task* get_critical_task(unsigned& hint, isolation_type isolation);
348 void enqueue_task(d1::task&, d1::task_group_context&, thread_data&);
436 inline d1::task* arena::steal_task(unsigned arena_index, FastRandom& frnd, execution_data_ext& ed, … in steal_task()
453 d1::task **pool = victim->task_pool.load(std::memory_order_relaxed); in steal_task()
454 d1::task *t = nullptr; in steal_task()
479 inline d1::task* arena::get_stream_task(task_stream<accessor>& stream, unsigned& hint) { in get_stream_task()
492 inline d1::task* arena::get_critical_task(unsigned& hint, isolation_type isolation) { in get_critical_task()
/oneTBB/test/tbb/
H A Dtest_resumable_tasks.cpp62 void submit(tbb::task::suspend_point ctx) { in submit()
70 tbb::task::suspend_point tag; in asyncLoop()
81 tbb::task::resume(tag); in asyncLoop()
88 std::queue<tbb::task::suspend_point> m_tagQueue;
95 void operator()(tbb::task::suspend_point tag) { in operator ()()
147 tbb::task::suspend([&] (tbb::task::suspend_point sp) { m_asyncActivity.submit(sp); }); in operator ()()
210 void submit(tbb::task::suspend_point ctx) { in submit()
218 tbb::task::suspend_point tag; in asyncLoop()
231 tbb::task::resume(tag); in asyncLoop()
396 tbb::task::suspend([thread_id](tbb::task::suspend_point tag) { in TestObservers()
[all …]
H A Dtest_task.cpp48 class CountingTask : public tbb::detail::d1::task {
119 task.reset(); in test_cancellation_on_exception()
367 tbb::task::resume(my_suspend_tag); in execute()
376 tbb::task::suspend_point my_suspend_tag;
401 … tbb::task::suspend([&wait, &test_context, &test_task, thread_id] (tbb::task::suspend_point tag) { in __anon509d20bd0b02()
430 tbb::task::resume(my_suspend_tag); in execute()
458 tbb::task::suspend_point& my_suspend_tag;
492 … tbb::task::suspend([&resume_flag, &test_suspend_tag, thread_id] (tbb::task::suspend_point tag) { in __anon509d20bd0e02()
547 … tbb::task::suspend([&resume_flag, &test_suspend_tag, thread_id] (tbb::task::suspend_point tag) { in __anon509d20bd1102()
642 tbb::task::suspend([&] (tbb::task::suspend_point sp) { in __anon509d20bd1a02()
[all …]
/oneTBB/doc/main/tbb_userguide/
H A DTask_Scheduler_Bypass.rst6 Scheduler bypass is an optimization where you directly specify the next task to run.
8 the spawning of the new task to be executed by the current thread involves the next steps:
10 - Push a new task onto the thread's deque.
11 - Continue to execute the current task until it is completed.
12 - Take a task from the thread's deque, unless it is stolen by another thread.
15 … using "Task Scheduler Bypass" technique to directly point the preferable task to be executed next
17 the returned task becomes the first candidate for the next task to be executed by the thread. Furth…
18 the task is executed by the current thread and not by any other thread.
H A DFloating_Point_Settings.rst6 To propagate CPU-specific settings for floating-point computations to tasks executed by the task sc…
8 * When a ``task_arena`` or a task scheduler for a given application thread is initialized, they cap…
33 Floating-point settings captured to a task group context prevail over the settings captured during
34 …r a context is not explicitly specified, the settings captured during the task arena initializatio…
36 In a nested call to a parallel algorithm that does not use the context of a task group with explici…
37 …el contexts capture floating-point settings, the settings captured during task arena initializatio…
41 * Floating-point settings are applied to all tasks executed within a task arena, if they are captur…
43 * To a task group context.
51 * A user code inside a task should:
55 * Restore previous settings before the end of the task.
[all …]
H A DHow_Task_Scheduler_Works.rst7 While the task scheduler is not bound to any particular type of parallelism,
12 Let's consider the mapping of fork-join parallelism on the task scheduler in more detail.
20 must be reached. Assuming that the task graph is finite, depth-first is better for
27 - **Minimize space**. Execution of the shallowest task leads to the breadth-first unfolding of a gr…
33 thread spawns a task, it pushes it onto the bottom of its deque.
36 a task obtained by the first rule that applies from the roughly equivalent ruleset:
38 - Get the task returned by the previous one, if any.
40 - Take a task from the bottom of its deque, if any.
42 - Steal a task from the top of another randomly chosen deque. If the
46 The overall effect of rule 2 is to execute the *youngest* task spawned by the thread,
[all …]
H A Ddestroy_graphs_outside_main_thread.rst6 Make sure to enqueue a task to wait for and destroy graphs that run outside the main thread.
10 before destroying it. A common solution is to enqueue a task to build
13 Instead you can enqueue a task that creates the graph and waits for it:
39 In the code snippet above, the enqueued task executes at some point, but
41 task, or even ensure that it completes before the program ends, you will
42 need to use some mechanism to signal from the enqueued task that the
H A DWhen_Task-Based_Programming_Is_Inappropriate.rst7 Using the task scheduler is usually the best approach to threading for
8 performance, however there are cases when the task scheduler is not
9 appropriate. The task scheduler is intended for high-performance
12 performance loss when using the task scheduler because while the thread
17 it is best to use full-blown threads for those. The task scheduler is
H A DThe_Task_Scheduler.rst8 *task scheduler*. The task scheduler is the engine that powers the loop
10 the task scheduler, because the templates hide the complexity of the
12 onto one of the high-level templates, use the task scheduler.
H A DTask-Based_Programming.rst16 - Faster task startup and shutdown
49 terminating a task is about 18 times faster than starting and
53 id. A task in |full_name|, in contrast, is
54 typically a small routine, and also, cannot be preempted at the task
63 program. In task-based programming, the task scheduler does have some
65 Indeed, it often delays starting a task until it can make useful
80 threads, and let the task scheduler choose the mapping from tasks to
85 they let you think at a higher, task-based, level. With thread-based
H A DNodes.rst97 3, the node n will spawn a task to apply the body to the first input, 1.
98 When that task is complete, it will then spawn another task to apply the
99 body to 2. And likewise, the node will wait for that task to complete
100 before spawning a third task to apply the body to 3. The calls to
101 try_put do not block until a task is spawned; if a node cannot
102 immediately spawn a task to process the message, the message will be
104 task will be spawned to process the next buffered message.
124 to spawn a task as soon as a message arrives, regardless of how many
127 important to remember that spawning a task does not mean creating a
/oneTBB/test/conformance/
H A Dconformance_resumable_tasks.cpp37 oneapi::tbb::task::suspend([&t, &suspend, &resume](oneapi::tbb::task::suspend_point sp) { in __anon366fea4d0102()
41 oneapi::tbb::task::resume(sp); in __anon366fea4d0102()
61 … oneapi::tbb::task::suspend([&tg, &suspend, &resume](oneapi::tbb::task::suspend_point sp) { in __anon366fea4d0302()
65 oneapi::tbb::task::resume(sp); in __anon366fea4d0302()
/oneTBB/include/oneapi/tbb/detail/
H A D_task.h42 class task; variable
50 TBB_EXPORT void __TBB_EXPORTED_FUNC spawn(d1::task& t, d1::task_group_context& ctx);
51 TBB_EXPORT void __TBB_EXPORTED_FUNC spawn(d1::task& t, d1::task_group_context& ctx, d1::slot_id id);
180 inline void spawn(task& t, task_group_context& ctx) { in spawn()
185 inline void spawn(task& t, task_group_context& ctx, slot_id id) { in spawn()
190 inline void execute_and_wait(task& t, task_group_context& t_ctx, wait_context& wait_ctx, task_group… in execute_and_wait()
214 class alignas(task_alignment) task : public task_traits { in alignas()
216 virtual ~task() = default; in alignas()
219 virtual task* execute(execution_data&) = 0; in alignas()
220 virtual task* cancel(execution_data&) = 0; in alignas()
[all …]
H A D_flow_graph_impl.h126 class graph_task : public task {
160 class priority_task_selector : public task {
164 task* execute(execution_data& ed) override { in execute()
167 task* t_next = my_task->execute(ed); in execute()
171 task* cancel(execution_data& ed) override { in cancel()
176 task* t_next = my_task->cancel(ed); in cancel()
217 void push_back(graph_task& task) { in push_back() argument
218 task.my_next = nullptr; in push_back()
219 *my_next_ptr = &task; in push_back()
220 my_next_ptr = &task.my_next; in push_back()
[all …]
/oneTBB/include/oneapi/tbb/
H A Dtask_group.h79 d1::task* task_ptr_or_nullptr(F&& f);
90 task* res = task_ptr_or_nullptr(m_func); in execute()
120 d1::task* task_ptr_or_nullptr(F&& f){ in task_ptr_or_nullptr()
131 d1::task* task_ptr_or_nullptr(F&& f){ in task_ptr_or_nullptr()
435 class function_task : public task {
456 task* cancel(execution_data& ed) override { in cancel()
473 class function_stack_task : public task {
480 task* execute(execution_data&) override { in execute()
485 task* cancel(execution_data&) override { in cancel()
531 task* prepare_task(F&& f) { in prepare_task()
[all …]
H A Dparallel_for_each.h117 struct feeder_item_task: public task {
149 task* execute(execution_data& ed) override { in execute()
155 task* cancel(execution_data& ed) override { in cancel()
176 spawn(*task, my_execution_context); in internal_add_copy_impl()
193 spawn(*task, my_execution_context); in internal_add_move()
210 struct for_each_iteration_task: public task {
221 task* execute(execution_data&) override { in execute()
227 task* cancel(execution_data&) override { in cancel()
476 class for_each_root_task_base : public task {
485 task* cancel(execution_data&) override {
[all …]
H A Dparallel_scan.h87 struct final_sum : public task {
139 task* execute(execution_data& ed) override { in execute()
146 task* cancel(execution_data& ed) override { in cancel()
170 struct sum_node : public task {
247 task* execute(execution_data& ed) override { in execute()
265 task* cancel(execution_data& ed) override { in cancel()
281 struct finish_scan : public task {
313 task* cancel(execution_data& ed) override { in cancel()
351 struct start_scan : public task {
390 task* execute( execution_data& ) override;
[all …]

1234