1 /* 2 * kmp_tasking.cpp -- OpenMP 3.0 tasking support. 3 */ 4 5 //===----------------------------------------------------------------------===// 6 // 7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 8 // See https://llvm.org/LICENSE.txt for license information. 9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "kmp.h" 14 #include "kmp_i18n.h" 15 #include "kmp_itt.h" 16 #include "kmp_stats.h" 17 #include "kmp_wait_release.h" 18 #include "kmp_taskdeps.h" 19 20 #if OMPT_SUPPORT 21 #include "ompt-specific.h" 22 #endif 23 24 #include "tsan_annotations.h" 25 26 /* forward declaration */ 27 static void __kmp_enable_tasking(kmp_task_team_t *task_team, 28 kmp_info_t *this_thr); 29 static void __kmp_alloc_task_deque(kmp_info_t *thread, 30 kmp_thread_data_t *thread_data); 31 static int __kmp_realloc_task_threads_data(kmp_info_t *thread, 32 kmp_task_team_t *task_team); 33 static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask); 34 35 #ifdef BUILD_TIED_TASK_STACK 36 37 // __kmp_trace_task_stack: print the tied tasks from the task stack in order 38 // from top do bottom 39 // 40 // gtid: global thread identifier for thread containing stack 41 // thread_data: thread data for task team thread containing stack 42 // threshold: value above which the trace statement triggers 43 // location: string identifying call site of this function (for trace) 44 static void __kmp_trace_task_stack(kmp_int32 gtid, 45 kmp_thread_data_t *thread_data, 46 int threshold, char *location) { 47 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks; 48 kmp_taskdata_t **stack_top = task_stack->ts_top; 49 kmp_int32 entries = task_stack->ts_entries; 50 kmp_taskdata_t *tied_task; 51 52 KA_TRACE( 53 threshold, 54 ("__kmp_trace_task_stack(start): location = %s, gtid = %d, entries = %d, " 55 "first_block = %p, stack_top = %p \n", 56 location, gtid, entries, task_stack->ts_first_block, stack_top)); 57 58 KMP_DEBUG_ASSERT(stack_top != NULL); 59 KMP_DEBUG_ASSERT(entries > 0); 60 61 while (entries != 0) { 62 KMP_DEBUG_ASSERT(stack_top != &task_stack->ts_first_block.sb_block[0]); 63 // fix up ts_top if we need to pop from previous block 64 if (entries & TASK_STACK_INDEX_MASK == 0) { 65 kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(stack_top); 66 67 stack_block = stack_block->sb_prev; 68 stack_top = &stack_block->sb_block[TASK_STACK_BLOCK_SIZE]; 69 } 70 71 // finish bookkeeping 72 stack_top--; 73 entries--; 74 75 tied_task = *stack_top; 76 77 KMP_DEBUG_ASSERT(tied_task != NULL); 78 KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED); 79 80 KA_TRACE(threshold, 81 ("__kmp_trace_task_stack(%s): gtid=%d, entry=%d, " 82 "stack_top=%p, tied_task=%p\n", 83 location, gtid, entries, stack_top, tied_task)); 84 } 85 KMP_DEBUG_ASSERT(stack_top == &task_stack->ts_first_block.sb_block[0]); 86 87 KA_TRACE(threshold, 88 ("__kmp_trace_task_stack(exit): location = %s, gtid = %d\n", 89 location, gtid)); 90 } 91 92 // __kmp_init_task_stack: initialize the task stack for the first time 93 // after a thread_data structure is created. 94 // It should not be necessary to do this again (assuming the stack works). 95 // 96 // gtid: global thread identifier of calling thread 97 // thread_data: thread data for task team thread containing stack 98 static void __kmp_init_task_stack(kmp_int32 gtid, 99 kmp_thread_data_t *thread_data) { 100 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks; 101 kmp_stack_block_t *first_block; 102 103 // set up the first block of the stack 104 first_block = &task_stack->ts_first_block; 105 task_stack->ts_top = (kmp_taskdata_t **)first_block; 106 memset((void *)first_block, '\0', 107 TASK_STACK_BLOCK_SIZE * sizeof(kmp_taskdata_t *)); 108 109 // initialize the stack to be empty 110 task_stack->ts_entries = TASK_STACK_EMPTY; 111 first_block->sb_next = NULL; 112 first_block->sb_prev = NULL; 113 } 114 115 // __kmp_free_task_stack: free the task stack when thread_data is destroyed. 116 // 117 // gtid: global thread identifier for calling thread 118 // thread_data: thread info for thread containing stack 119 static void __kmp_free_task_stack(kmp_int32 gtid, 120 kmp_thread_data_t *thread_data) { 121 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks; 122 kmp_stack_block_t *stack_block = &task_stack->ts_first_block; 123 124 KMP_DEBUG_ASSERT(task_stack->ts_entries == TASK_STACK_EMPTY); 125 // free from the second block of the stack 126 while (stack_block != NULL) { 127 kmp_stack_block_t *next_block = (stack_block) ? stack_block->sb_next : NULL; 128 129 stack_block->sb_next = NULL; 130 stack_block->sb_prev = NULL; 131 if (stack_block != &task_stack->ts_first_block) { 132 __kmp_thread_free(thread, 133 stack_block); // free the block, if not the first 134 } 135 stack_block = next_block; 136 } 137 // initialize the stack to be empty 138 task_stack->ts_entries = 0; 139 task_stack->ts_top = NULL; 140 } 141 142 // __kmp_push_task_stack: Push the tied task onto the task stack. 143 // Grow the stack if necessary by allocating another block. 144 // 145 // gtid: global thread identifier for calling thread 146 // thread: thread info for thread containing stack 147 // tied_task: the task to push on the stack 148 static void __kmp_push_task_stack(kmp_int32 gtid, kmp_info_t *thread, 149 kmp_taskdata_t *tied_task) { 150 // GEH - need to consider what to do if tt_threads_data not allocated yet 151 kmp_thread_data_t *thread_data = 152 &thread->th.th_task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)]; 153 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks; 154 155 if (tied_task->td_flags.team_serial || tied_task->td_flags.tasking_ser) { 156 return; // Don't push anything on stack if team or team tasks are serialized 157 } 158 159 KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED); 160 KMP_DEBUG_ASSERT(task_stack->ts_top != NULL); 161 162 KA_TRACE(20, 163 ("__kmp_push_task_stack(enter): GTID: %d; THREAD: %p; TASK: %p\n", 164 gtid, thread, tied_task)); 165 // Store entry 166 *(task_stack->ts_top) = tied_task; 167 168 // Do bookkeeping for next push 169 task_stack->ts_top++; 170 task_stack->ts_entries++; 171 172 if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0) { 173 // Find beginning of this task block 174 kmp_stack_block_t *stack_block = 175 (kmp_stack_block_t *)(task_stack->ts_top - TASK_STACK_BLOCK_SIZE); 176 177 // Check if we already have a block 178 if (stack_block->sb_next != 179 NULL) { // reset ts_top to beginning of next block 180 task_stack->ts_top = &stack_block->sb_next->sb_block[0]; 181 } else { // Alloc new block and link it up 182 kmp_stack_block_t *new_block = (kmp_stack_block_t *)__kmp_thread_calloc( 183 thread, sizeof(kmp_stack_block_t)); 184 185 task_stack->ts_top = &new_block->sb_block[0]; 186 stack_block->sb_next = new_block; 187 new_block->sb_prev = stack_block; 188 new_block->sb_next = NULL; 189 190 KA_TRACE( 191 30, 192 ("__kmp_push_task_stack(): GTID: %d; TASK: %p; Alloc new block: %p\n", 193 gtid, tied_task, new_block)); 194 } 195 } 196 KA_TRACE(20, ("__kmp_push_task_stack(exit): GTID: %d; TASK: %p\n", gtid, 197 tied_task)); 198 } 199 200 // __kmp_pop_task_stack: Pop the tied task from the task stack. Don't return 201 // the task, just check to make sure it matches the ending task passed in. 202 // 203 // gtid: global thread identifier for the calling thread 204 // thread: thread info structure containing stack 205 // tied_task: the task popped off the stack 206 // ending_task: the task that is ending (should match popped task) 207 static void __kmp_pop_task_stack(kmp_int32 gtid, kmp_info_t *thread, 208 kmp_taskdata_t *ending_task) { 209 // GEH - need to consider what to do if tt_threads_data not allocated yet 210 kmp_thread_data_t *thread_data = 211 &thread->th.th_task_team->tt_threads_data[__kmp_tid_from_gtid(gtid)]; 212 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks; 213 kmp_taskdata_t *tied_task; 214 215 if (ending_task->td_flags.team_serial || ending_task->td_flags.tasking_ser) { 216 // Don't pop anything from stack if team or team tasks are serialized 217 return; 218 } 219 220 KMP_DEBUG_ASSERT(task_stack->ts_top != NULL); 221 KMP_DEBUG_ASSERT(task_stack->ts_entries > 0); 222 223 KA_TRACE(20, ("__kmp_pop_task_stack(enter): GTID: %d; THREAD: %p\n", gtid, 224 thread)); 225 226 // fix up ts_top if we need to pop from previous block 227 if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0) { 228 kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(task_stack->ts_top); 229 230 stack_block = stack_block->sb_prev; 231 task_stack->ts_top = &stack_block->sb_block[TASK_STACK_BLOCK_SIZE]; 232 } 233 234 // finish bookkeeping 235 task_stack->ts_top--; 236 task_stack->ts_entries--; 237 238 tied_task = *(task_stack->ts_top); 239 240 KMP_DEBUG_ASSERT(tied_task != NULL); 241 KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED); 242 KMP_DEBUG_ASSERT(tied_task == ending_task); // If we built the stack correctly 243 244 KA_TRACE(20, ("__kmp_pop_task_stack(exit): GTID: %d; TASK: %p\n", gtid, 245 tied_task)); 246 return; 247 } 248 #endif /* BUILD_TIED_TASK_STACK */ 249 250 // returns 1 if new task is allowed to execute, 0 otherwise 251 // checks Task Scheduling constraint (if requested) and 252 // mutexinoutset dependencies if any 253 static bool __kmp_task_is_allowed(int gtid, const kmp_int32 is_constrained, 254 const kmp_taskdata_t *tasknew, 255 const kmp_taskdata_t *taskcurr) { 256 if (is_constrained && (tasknew->td_flags.tiedness == TASK_TIED)) { 257 // Check if the candidate obeys the Task Scheduling Constraints (TSC) 258 // only descendant of all deferred tied tasks can be scheduled, checking 259 // the last one is enough, as it in turn is the descendant of all others 260 kmp_taskdata_t *current = taskcurr->td_last_tied; 261 KMP_DEBUG_ASSERT(current != NULL); 262 // check if the task is not suspended on barrier 263 if (current->td_flags.tasktype == TASK_EXPLICIT || 264 current->td_taskwait_thread > 0) { // <= 0 on barrier 265 kmp_int32 level = current->td_level; 266 kmp_taskdata_t *parent = tasknew->td_parent; 267 while (parent != current && parent->td_level > level) { 268 // check generation up to the level of the current task 269 parent = parent->td_parent; 270 KMP_DEBUG_ASSERT(parent != NULL); 271 } 272 if (parent != current) 273 return false; 274 } 275 } 276 // Check mutexinoutset dependencies, acquire locks 277 kmp_depnode_t *node = tasknew->td_depnode; 278 if (UNLIKELY(node && (node->dn.mtx_num_locks > 0))) { 279 for (int i = 0; i < node->dn.mtx_num_locks; ++i) { 280 KMP_DEBUG_ASSERT(node->dn.mtx_locks[i] != NULL); 281 if (__kmp_test_lock(node->dn.mtx_locks[i], gtid)) 282 continue; 283 // could not get the lock, release previous locks 284 for (int j = i - 1; j >= 0; --j) 285 __kmp_release_lock(node->dn.mtx_locks[j], gtid); 286 return false; 287 } 288 // negative num_locks means all locks acquired successfully 289 node->dn.mtx_num_locks = -node->dn.mtx_num_locks; 290 } 291 return true; 292 } 293 294 // __kmp_realloc_task_deque: 295 // Re-allocates a task deque for a particular thread, copies the content from 296 // the old deque and adjusts the necessary data structures relating to the 297 // deque. This operation must be done with the deque_lock being held 298 static void __kmp_realloc_task_deque(kmp_info_t *thread, 299 kmp_thread_data_t *thread_data) { 300 kmp_int32 size = TASK_DEQUE_SIZE(thread_data->td); 301 KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) == size); 302 kmp_int32 new_size = 2 * size; 303 304 KE_TRACE(10, ("__kmp_realloc_task_deque: T#%d reallocating deque[from %d to " 305 "%d] for thread_data %p\n", 306 __kmp_gtid_from_thread(thread), size, new_size, thread_data)); 307 308 kmp_taskdata_t **new_deque = 309 (kmp_taskdata_t **)__kmp_allocate(new_size * sizeof(kmp_taskdata_t *)); 310 311 int i, j; 312 for (i = thread_data->td.td_deque_head, j = 0; j < size; 313 i = (i + 1) & TASK_DEQUE_MASK(thread_data->td), j++) 314 new_deque[j] = thread_data->td.td_deque[i]; 315 316 __kmp_free(thread_data->td.td_deque); 317 318 thread_data->td.td_deque_head = 0; 319 thread_data->td.td_deque_tail = size; 320 thread_data->td.td_deque = new_deque; 321 thread_data->td.td_deque_size = new_size; 322 } 323 324 // __kmp_push_task: Add a task to the thread's deque 325 static kmp_int32 __kmp_push_task(kmp_int32 gtid, kmp_task_t *task) { 326 kmp_info_t *thread = __kmp_threads[gtid]; 327 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task); 328 329 // We don't need to map to shadow gtid if it is already hidden helper thread 330 if (taskdata->td_flags.hidden_helper && !KMP_HIDDEN_HELPER_THREAD(gtid)) { 331 gtid = KMP_GTID_TO_SHADOW_GTID(gtid); 332 thread = __kmp_threads[gtid]; 333 } 334 335 kmp_task_team_t *task_team = thread->th.th_task_team; 336 kmp_int32 tid = __kmp_tid_from_gtid(gtid); 337 kmp_thread_data_t *thread_data; 338 339 KA_TRACE(20, 340 ("__kmp_push_task: T#%d trying to push task %p.\n", gtid, taskdata)); 341 342 if (UNLIKELY(taskdata->td_flags.tiedness == TASK_UNTIED)) { 343 // untied task needs to increment counter so that the task structure is not 344 // freed prematurely 345 kmp_int32 counter = 1 + KMP_ATOMIC_INC(&taskdata->td_untied_count); 346 KMP_DEBUG_USE_VAR(counter); 347 KA_TRACE( 348 20, 349 ("__kmp_push_task: T#%d untied_count (%d) incremented for task %p\n", 350 gtid, counter, taskdata)); 351 } 352 353 // The first check avoids building task_team thread data if serialized 354 if (UNLIKELY(taskdata->td_flags.task_serial)) { 355 KA_TRACE(20, ("__kmp_push_task: T#%d team serialized; returning " 356 "TASK_NOT_PUSHED for task %p\n", 357 gtid, taskdata)); 358 return TASK_NOT_PUSHED; 359 } 360 361 // Now that serialized tasks have returned, we can assume that we are not in 362 // immediate exec mode 363 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec); 364 if (UNLIKELY(!KMP_TASKING_ENABLED(task_team))) { 365 __kmp_enable_tasking(task_team, thread); 366 } 367 KMP_DEBUG_ASSERT(TCR_4(task_team->tt.tt_found_tasks) == TRUE); 368 KMP_DEBUG_ASSERT(TCR_PTR(task_team->tt.tt_threads_data) != NULL); 369 370 // Find tasking deque specific to encountering thread 371 thread_data = &task_team->tt.tt_threads_data[tid]; 372 373 // No lock needed since only owner can allocate. If the task is hidden_helper, 374 // we don't need it either because we have initialized the dequeue for hidden 375 // helper thread data. 376 if (UNLIKELY(thread_data->td.td_deque == NULL)) { 377 __kmp_alloc_task_deque(thread, thread_data); 378 } 379 380 int locked = 0; 381 // Check if deque is full 382 if (TCR_4(thread_data->td.td_deque_ntasks) >= 383 TASK_DEQUE_SIZE(thread_data->td)) { 384 if (__kmp_enable_task_throttling && 385 __kmp_task_is_allowed(gtid, __kmp_task_stealing_constraint, taskdata, 386 thread->th.th_current_task)) { 387 KA_TRACE(20, ("__kmp_push_task: T#%d deque is full; returning " 388 "TASK_NOT_PUSHED for task %p\n", 389 gtid, taskdata)); 390 return TASK_NOT_PUSHED; 391 } else { 392 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock); 393 locked = 1; 394 if (TCR_4(thread_data->td.td_deque_ntasks) >= 395 TASK_DEQUE_SIZE(thread_data->td)) { 396 // expand deque to push the task which is not allowed to execute 397 __kmp_realloc_task_deque(thread, thread_data); 398 } 399 } 400 } 401 // Lock the deque for the task push operation 402 if (!locked) { 403 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock); 404 // Need to recheck as we can get a proxy task from thread outside of OpenMP 405 if (TCR_4(thread_data->td.td_deque_ntasks) >= 406 TASK_DEQUE_SIZE(thread_data->td)) { 407 if (__kmp_enable_task_throttling && 408 __kmp_task_is_allowed(gtid, __kmp_task_stealing_constraint, taskdata, 409 thread->th.th_current_task)) { 410 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock); 411 KA_TRACE(20, ("__kmp_push_task: T#%d deque is full on 2nd check; " 412 "returning TASK_NOT_PUSHED for task %p\n", 413 gtid, taskdata)); 414 return TASK_NOT_PUSHED; 415 } else { 416 // expand deque to push the task which is not allowed to execute 417 __kmp_realloc_task_deque(thread, thread_data); 418 } 419 } 420 } 421 // Must have room since no thread can add tasks but calling thread 422 KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) < 423 TASK_DEQUE_SIZE(thread_data->td)); 424 425 thread_data->td.td_deque[thread_data->td.td_deque_tail] = 426 taskdata; // Push taskdata 427 // Wrap index. 428 thread_data->td.td_deque_tail = 429 (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td); 430 TCW_4(thread_data->td.td_deque_ntasks, 431 TCR_4(thread_data->td.td_deque_ntasks) + 1); // Adjust task count 432 KMP_FSYNC_RELEASING(thread->th.th_current_task); // releasing self 433 KMP_FSYNC_RELEASING(taskdata); // releasing child 434 KA_TRACE(20, ("__kmp_push_task: T#%d returning TASK_SUCCESSFULLY_PUSHED: " 435 "task=%p ntasks=%d head=%u tail=%u\n", 436 gtid, taskdata, thread_data->td.td_deque_ntasks, 437 thread_data->td.td_deque_head, thread_data->td.td_deque_tail)); 438 439 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock); 440 441 // Signal one worker thread to execute the task 442 if (taskdata->td_flags.hidden_helper) { 443 // Wake hidden helper threads up if they're sleeping 444 __kmp_hidden_helper_worker_thread_signal(); 445 } 446 447 return TASK_SUCCESSFULLY_PUSHED; 448 } 449 450 // __kmp_pop_current_task_from_thread: set up current task from called thread 451 // when team ends 452 // 453 // this_thr: thread structure to set current_task in. 454 void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr) { 455 KF_TRACE(10, ("__kmp_pop_current_task_from_thread(enter): T#%d " 456 "this_thread=%p, curtask=%p, " 457 "curtask_parent=%p\n", 458 0, this_thr, this_thr->th.th_current_task, 459 this_thr->th.th_current_task->td_parent)); 460 461 this_thr->th.th_current_task = this_thr->th.th_current_task->td_parent; 462 463 KF_TRACE(10, ("__kmp_pop_current_task_from_thread(exit): T#%d " 464 "this_thread=%p, curtask=%p, " 465 "curtask_parent=%p\n", 466 0, this_thr, this_thr->th.th_current_task, 467 this_thr->th.th_current_task->td_parent)); 468 } 469 470 // __kmp_push_current_task_to_thread: set up current task in called thread for a 471 // new team 472 // 473 // this_thr: thread structure to set up 474 // team: team for implicit task data 475 // tid: thread within team to set up 476 void __kmp_push_current_task_to_thread(kmp_info_t *this_thr, kmp_team_t *team, 477 int tid) { 478 // current task of the thread is a parent of the new just created implicit 479 // tasks of new team 480 KF_TRACE(10, ("__kmp_push_current_task_to_thread(enter): T#%d this_thread=%p " 481 "curtask=%p " 482 "parent_task=%p\n", 483 tid, this_thr, this_thr->th.th_current_task, 484 team->t.t_implicit_task_taskdata[tid].td_parent)); 485 486 KMP_DEBUG_ASSERT(this_thr != NULL); 487 488 if (tid == 0) { 489 if (this_thr->th.th_current_task != &team->t.t_implicit_task_taskdata[0]) { 490 team->t.t_implicit_task_taskdata[0].td_parent = 491 this_thr->th.th_current_task; 492 this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[0]; 493 } 494 } else { 495 team->t.t_implicit_task_taskdata[tid].td_parent = 496 team->t.t_implicit_task_taskdata[0].td_parent; 497 this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[tid]; 498 } 499 500 KF_TRACE(10, ("__kmp_push_current_task_to_thread(exit): T#%d this_thread=%p " 501 "curtask=%p " 502 "parent_task=%p\n", 503 tid, this_thr, this_thr->th.th_current_task, 504 team->t.t_implicit_task_taskdata[tid].td_parent)); 505 } 506 507 // __kmp_task_start: bookkeeping for a task starting execution 508 // 509 // GTID: global thread id of calling thread 510 // task: task starting execution 511 // current_task: task suspending 512 static void __kmp_task_start(kmp_int32 gtid, kmp_task_t *task, 513 kmp_taskdata_t *current_task) { 514 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task); 515 kmp_info_t *thread = __kmp_threads[gtid]; 516 517 KA_TRACE(10, 518 ("__kmp_task_start(enter): T#%d starting task %p: current_task=%p\n", 519 gtid, taskdata, current_task)); 520 521 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT); 522 523 // mark currently executing task as suspended 524 // TODO: GEH - make sure root team implicit task is initialized properly. 525 // KMP_DEBUG_ASSERT( current_task -> td_flags.executing == 1 ); 526 current_task->td_flags.executing = 0; 527 528 // Add task to stack if tied 529 #ifdef BUILD_TIED_TASK_STACK 530 if (taskdata->td_flags.tiedness == TASK_TIED) { 531 __kmp_push_task_stack(gtid, thread, taskdata); 532 } 533 #endif /* BUILD_TIED_TASK_STACK */ 534 535 // mark starting task as executing and as current task 536 thread->th.th_current_task = taskdata; 537 538 KMP_DEBUG_ASSERT(taskdata->td_flags.started == 0 || 539 taskdata->td_flags.tiedness == TASK_UNTIED); 540 KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0 || 541 taskdata->td_flags.tiedness == TASK_UNTIED); 542 taskdata->td_flags.started = 1; 543 taskdata->td_flags.executing = 1; 544 KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0); 545 KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0); 546 547 // GEH TODO: shouldn't we pass some sort of location identifier here? 548 // APT: yes, we will pass location here. 549 // need to store current thread state (in a thread or taskdata structure) 550 // before setting work_state, otherwise wrong state is set after end of task 551 552 KA_TRACE(10, ("__kmp_task_start(exit): T#%d task=%p\n", gtid, taskdata)); 553 554 return; 555 } 556 557 #if OMPT_SUPPORT 558 //------------------------------------------------------------------------------ 559 // __ompt_task_init: 560 // Initialize OMPT fields maintained by a task. This will only be called after 561 // ompt_start_tool, so we already know whether ompt is enabled or not. 562 563 static inline void __ompt_task_init(kmp_taskdata_t *task, int tid) { 564 // The calls to __ompt_task_init already have the ompt_enabled condition. 565 task->ompt_task_info.task_data.value = 0; 566 task->ompt_task_info.frame.exit_frame = ompt_data_none; 567 task->ompt_task_info.frame.enter_frame = ompt_data_none; 568 task->ompt_task_info.frame.exit_frame_flags = 569 ompt_frame_runtime | ompt_frame_framepointer; 570 task->ompt_task_info.frame.enter_frame_flags = 571 ompt_frame_runtime | ompt_frame_framepointer; 572 } 573 574 // __ompt_task_start: 575 // Build and trigger task-begin event 576 static inline void __ompt_task_start(kmp_task_t *task, 577 kmp_taskdata_t *current_task, 578 kmp_int32 gtid) { 579 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task); 580 ompt_task_status_t status = ompt_task_switch; 581 if (__kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded) { 582 status = ompt_task_yield; 583 __kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded = 0; 584 } 585 /* let OMPT know that we're about to run this task */ 586 if (ompt_enabled.ompt_callback_task_schedule) { 587 ompt_callbacks.ompt_callback(ompt_callback_task_schedule)( 588 &(current_task->ompt_task_info.task_data), status, 589 &(taskdata->ompt_task_info.task_data)); 590 } 591 taskdata->ompt_task_info.scheduling_parent = current_task; 592 } 593 594 // __ompt_task_finish: 595 // Build and trigger final task-schedule event 596 static inline void __ompt_task_finish(kmp_task_t *task, 597 kmp_taskdata_t *resumed_task, 598 ompt_task_status_t status) { 599 if (ompt_enabled.ompt_callback_task_schedule) { 600 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task); 601 if (__kmp_omp_cancellation && taskdata->td_taskgroup && 602 taskdata->td_taskgroup->cancel_request == cancel_taskgroup) { 603 status = ompt_task_cancel; 604 } 605 606 /* let OMPT know that we're returning to the callee task */ 607 ompt_callbacks.ompt_callback(ompt_callback_task_schedule)( 608 &(taskdata->ompt_task_info.task_data), status, 609 (resumed_task ? &(resumed_task->ompt_task_info.task_data) : NULL)); 610 } 611 } 612 #endif 613 614 template <bool ompt> 615 static void __kmpc_omp_task_begin_if0_template(ident_t *loc_ref, kmp_int32 gtid, 616 kmp_task_t *task, 617 void *frame_address, 618 void *return_address) { 619 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task); 620 kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task; 621 622 KA_TRACE(10, ("__kmpc_omp_task_begin_if0(enter): T#%d loc=%p task=%p " 623 "current_task=%p\n", 624 gtid, loc_ref, taskdata, current_task)); 625 626 if (UNLIKELY(taskdata->td_flags.tiedness == TASK_UNTIED)) { 627 // untied task needs to increment counter so that the task structure is not 628 // freed prematurely 629 kmp_int32 counter = 1 + KMP_ATOMIC_INC(&taskdata->td_untied_count); 630 KMP_DEBUG_USE_VAR(counter); 631 KA_TRACE(20, ("__kmpc_omp_task_begin_if0: T#%d untied_count (%d) " 632 "incremented for task %p\n", 633 gtid, counter, taskdata)); 634 } 635 636 taskdata->td_flags.task_serial = 637 1; // Execute this task immediately, not deferred. 638 __kmp_task_start(gtid, task, current_task); 639 640 #if OMPT_SUPPORT 641 if (ompt) { 642 if (current_task->ompt_task_info.frame.enter_frame.ptr == NULL) { 643 current_task->ompt_task_info.frame.enter_frame.ptr = 644 taskdata->ompt_task_info.frame.exit_frame.ptr = frame_address; 645 current_task->ompt_task_info.frame.enter_frame_flags = 646 taskdata->ompt_task_info.frame.exit_frame_flags = 647 ompt_frame_application | ompt_frame_framepointer; 648 } 649 if (ompt_enabled.ompt_callback_task_create) { 650 ompt_task_info_t *parent_info = &(current_task->ompt_task_info); 651 ompt_callbacks.ompt_callback(ompt_callback_task_create)( 652 &(parent_info->task_data), &(parent_info->frame), 653 &(taskdata->ompt_task_info.task_data), 654 ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(taskdata), 0, 655 return_address); 656 } 657 __ompt_task_start(task, current_task, gtid); 658 } 659 #endif // OMPT_SUPPORT 660 661 KA_TRACE(10, ("__kmpc_omp_task_begin_if0(exit): T#%d loc=%p task=%p,\n", gtid, 662 loc_ref, taskdata)); 663 } 664 665 #if OMPT_SUPPORT 666 OMPT_NOINLINE 667 static void __kmpc_omp_task_begin_if0_ompt(ident_t *loc_ref, kmp_int32 gtid, 668 kmp_task_t *task, 669 void *frame_address, 670 void *return_address) { 671 __kmpc_omp_task_begin_if0_template<true>(loc_ref, gtid, task, frame_address, 672 return_address); 673 } 674 #endif // OMPT_SUPPORT 675 676 // __kmpc_omp_task_begin_if0: report that a given serialized task has started 677 // execution 678 // 679 // loc_ref: source location information; points to beginning of task block. 680 // gtid: global thread number. 681 // task: task thunk for the started task. 682 void __kmpc_omp_task_begin_if0(ident_t *loc_ref, kmp_int32 gtid, 683 kmp_task_t *task) { 684 #if OMPT_SUPPORT 685 if (UNLIKELY(ompt_enabled.enabled)) { 686 OMPT_STORE_RETURN_ADDRESS(gtid); 687 __kmpc_omp_task_begin_if0_ompt(loc_ref, gtid, task, 688 OMPT_GET_FRAME_ADDRESS(1), 689 OMPT_LOAD_RETURN_ADDRESS(gtid)); 690 return; 691 } 692 #endif 693 __kmpc_omp_task_begin_if0_template<false>(loc_ref, gtid, task, NULL, NULL); 694 } 695 696 #ifdef TASK_UNUSED 697 // __kmpc_omp_task_begin: report that a given task has started execution 698 // NEVER GENERATED BY COMPILER, DEPRECATED!!! 699 void __kmpc_omp_task_begin(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task) { 700 kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task; 701 702 KA_TRACE( 703 10, 704 ("__kmpc_omp_task_begin(enter): T#%d loc=%p task=%p current_task=%p\n", 705 gtid, loc_ref, KMP_TASK_TO_TASKDATA(task), current_task)); 706 707 __kmp_task_start(gtid, task, current_task); 708 709 KA_TRACE(10, ("__kmpc_omp_task_begin(exit): T#%d loc=%p task=%p,\n", gtid, 710 loc_ref, KMP_TASK_TO_TASKDATA(task))); 711 return; 712 } 713 #endif // TASK_UNUSED 714 715 // __kmp_free_task: free the current task space and the space for shareds 716 // 717 // gtid: Global thread ID of calling thread 718 // taskdata: task to free 719 // thread: thread data structure of caller 720 static void __kmp_free_task(kmp_int32 gtid, kmp_taskdata_t *taskdata, 721 kmp_info_t *thread) { 722 KA_TRACE(30, ("__kmp_free_task: T#%d freeing data from task %p\n", gtid, 723 taskdata)); 724 725 // Check to make sure all flags and counters have the correct values 726 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT); 727 KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0); 728 KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 1); 729 KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0); 730 KMP_DEBUG_ASSERT(taskdata->td_allocated_child_tasks == 0 || 731 taskdata->td_flags.task_serial == 1); 732 KMP_DEBUG_ASSERT(taskdata->td_incomplete_child_tasks == 0); 733 734 taskdata->td_flags.freed = 1; 735 ANNOTATE_HAPPENS_BEFORE(taskdata); 736 // deallocate the taskdata and shared variable blocks associated with this task 737 #if USE_FAST_MEMORY 738 __kmp_fast_free(thread, taskdata); 739 #else /* ! USE_FAST_MEMORY */ 740 __kmp_thread_free(thread, taskdata); 741 #endif 742 KA_TRACE(20, ("__kmp_free_task: T#%d freed task %p\n", gtid, taskdata)); 743 } 744 745 // __kmp_free_task_and_ancestors: free the current task and ancestors without 746 // children 747 // 748 // gtid: Global thread ID of calling thread 749 // taskdata: task to free 750 // thread: thread data structure of caller 751 static void __kmp_free_task_and_ancestors(kmp_int32 gtid, 752 kmp_taskdata_t *taskdata, 753 kmp_info_t *thread) { 754 // Proxy tasks must always be allowed to free their parents 755 // because they can be run in background even in serial mode. 756 kmp_int32 team_serial = 757 (taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser) && 758 !taskdata->td_flags.proxy; 759 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT); 760 761 kmp_int32 children = KMP_ATOMIC_DEC(&taskdata->td_allocated_child_tasks) - 1; 762 KMP_DEBUG_ASSERT(children >= 0); 763 764 // Now, go up the ancestor tree to see if any ancestors can now be freed. 765 while (children == 0) { 766 kmp_taskdata_t *parent_taskdata = taskdata->td_parent; 767 768 KA_TRACE(20, ("__kmp_free_task_and_ancestors(enter): T#%d task %p complete " 769 "and freeing itself\n", 770 gtid, taskdata)); 771 772 // --- Deallocate my ancestor task --- 773 __kmp_free_task(gtid, taskdata, thread); 774 775 taskdata = parent_taskdata; 776 777 if (team_serial) 778 return; 779 // Stop checking ancestors at implicit task instead of walking up ancestor 780 // tree to avoid premature deallocation of ancestors. 781 if (taskdata->td_flags.tasktype == TASK_IMPLICIT) { 782 if (taskdata->td_dephash) { // do we need to cleanup dephash? 783 int children = KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks); 784 kmp_tasking_flags_t flags_old = taskdata->td_flags; 785 if (children == 0 && flags_old.complete == 1) { 786 kmp_tasking_flags_t flags_new = flags_old; 787 flags_new.complete = 0; 788 if (KMP_COMPARE_AND_STORE_ACQ32( 789 RCAST(kmp_int32 *, &taskdata->td_flags), 790 *RCAST(kmp_int32 *, &flags_old), 791 *RCAST(kmp_int32 *, &flags_new))) { 792 KA_TRACE(100, ("__kmp_free_task_and_ancestors: T#%d cleans " 793 "dephash of implicit task %p\n", 794 gtid, taskdata)); 795 // cleanup dephash of finished implicit task 796 __kmp_dephash_free_entries(thread, taskdata->td_dephash); 797 } 798 } 799 } 800 return; 801 } 802 // Predecrement simulated by "- 1" calculation 803 children = KMP_ATOMIC_DEC(&taskdata->td_allocated_child_tasks) - 1; 804 KMP_DEBUG_ASSERT(children >= 0); 805 } 806 807 KA_TRACE( 808 20, ("__kmp_free_task_and_ancestors(exit): T#%d task %p has %d children; " 809 "not freeing it yet\n", 810 gtid, taskdata, children)); 811 } 812 813 // __kmp_task_finish: bookkeeping to do when a task finishes execution 814 // 815 // gtid: global thread ID for calling thread 816 // task: task to be finished 817 // resumed_task: task to be resumed. (may be NULL if task is serialized) 818 // 819 // template<ompt>: effectively ompt_enabled.enabled!=0 820 // the version with ompt=false is inlined, allowing to optimize away all ompt 821 // code in this case 822 template <bool ompt> 823 static void __kmp_task_finish(kmp_int32 gtid, kmp_task_t *task, 824 kmp_taskdata_t *resumed_task) { 825 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task); 826 kmp_info_t *thread = __kmp_threads[gtid]; 827 kmp_task_team_t *task_team = 828 thread->th.th_task_team; // might be NULL for serial teams... 829 kmp_int32 children = 0; 830 831 KA_TRACE(10, ("__kmp_task_finish(enter): T#%d finishing task %p and resuming " 832 "task %p\n", 833 gtid, taskdata, resumed_task)); 834 835 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT); 836 837 // Pop task from stack if tied 838 #ifdef BUILD_TIED_TASK_STACK 839 if (taskdata->td_flags.tiedness == TASK_TIED) { 840 __kmp_pop_task_stack(gtid, thread, taskdata); 841 } 842 #endif /* BUILD_TIED_TASK_STACK */ 843 844 if (UNLIKELY(taskdata->td_flags.tiedness == TASK_UNTIED)) { 845 // untied task needs to check the counter so that the task structure is not 846 // freed prematurely 847 kmp_int32 counter = KMP_ATOMIC_DEC(&taskdata->td_untied_count) - 1; 848 KA_TRACE( 849 20, 850 ("__kmp_task_finish: T#%d untied_count (%d) decremented for task %p\n", 851 gtid, counter, taskdata)); 852 if (counter > 0) { 853 // untied task is not done, to be continued possibly by other thread, do 854 // not free it now 855 if (resumed_task == NULL) { 856 KMP_DEBUG_ASSERT(taskdata->td_flags.task_serial); 857 resumed_task = taskdata->td_parent; // In a serialized task, the resumed 858 // task is the parent 859 } 860 thread->th.th_current_task = resumed_task; // restore current_task 861 resumed_task->td_flags.executing = 1; // resume previous task 862 KA_TRACE(10, ("__kmp_task_finish(exit): T#%d partially done task %p, " 863 "resuming task %p\n", 864 gtid, taskdata, resumed_task)); 865 return; 866 } 867 } 868 869 // bookkeeping for resuming task: 870 // GEH - note tasking_ser => task_serial 871 KMP_DEBUG_ASSERT( 872 (taskdata->td_flags.tasking_ser || taskdata->td_flags.task_serial) == 873 taskdata->td_flags.task_serial); 874 if (taskdata->td_flags.task_serial) { 875 if (resumed_task == NULL) { 876 resumed_task = taskdata->td_parent; // In a serialized task, the resumed 877 // task is the parent 878 } 879 } else { 880 KMP_DEBUG_ASSERT(resumed_task != 881 NULL); // verify that resumed task is passed as argument 882 } 883 884 /* If the tasks' destructor thunk flag has been set, we need to invoke the 885 destructor thunk that has been generated by the compiler. The code is 886 placed here, since at this point other tasks might have been released 887 hence overlapping the destructor invocations with some other work in the 888 released tasks. The OpenMP spec is not specific on when the destructors 889 are invoked, so we should be free to choose. */ 890 if (UNLIKELY(taskdata->td_flags.destructors_thunk)) { 891 kmp_routine_entry_t destr_thunk = task->data1.destructors; 892 KMP_ASSERT(destr_thunk); 893 destr_thunk(gtid, task); 894 } 895 896 KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0); 897 KMP_DEBUG_ASSERT(taskdata->td_flags.started == 1); 898 KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0); 899 900 bool detach = false; 901 if (UNLIKELY(taskdata->td_flags.detachable == TASK_DETACHABLE)) { 902 if (taskdata->td_allow_completion_event.type == 903 KMP_EVENT_ALLOW_COMPLETION) { 904 // event hasn't been fulfilled yet. Try to detach task. 905 __kmp_acquire_tas_lock(&taskdata->td_allow_completion_event.lock, gtid); 906 if (taskdata->td_allow_completion_event.type == 907 KMP_EVENT_ALLOW_COMPLETION) { 908 // task finished execution 909 KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 1); 910 taskdata->td_flags.executing = 0; // suspend the finishing task 911 912 #if OMPT_SUPPORT 913 // For a detached task, which is not completed, we switch back 914 // the omp_fulfill_event signals completion 915 // locking is necessary to avoid a race with ompt_task_late_fulfill 916 if (ompt) 917 __ompt_task_finish(task, resumed_task, ompt_task_detach); 918 #endif 919 920 // no access to taskdata after this point! 921 // __kmp_fulfill_event might free taskdata at any time from now 922 923 taskdata->td_flags.proxy = TASK_PROXY; // proxify! 924 detach = true; 925 } 926 __kmp_release_tas_lock(&taskdata->td_allow_completion_event.lock, gtid); 927 } 928 } 929 930 if (!detach) { 931 taskdata->td_flags.complete = 1; // mark the task as completed 932 933 #if OMPT_SUPPORT 934 // This is not a detached task, we are done here 935 if (ompt) 936 __ompt_task_finish(task, resumed_task, ompt_task_complete); 937 #endif 938 939 // Only need to keep track of count if team parallel and tasking not 940 // serialized, or task is detachable and event has already been fulfilled 941 if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser) || 942 taskdata->td_flags.detachable == TASK_DETACHABLE || 943 taskdata->td_flags.hidden_helper) { 944 // Predecrement simulated by "- 1" calculation 945 children = 946 KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks) - 1; 947 KMP_DEBUG_ASSERT(children >= 0); 948 if (taskdata->td_taskgroup) 949 KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count); 950 __kmp_release_deps(gtid, taskdata); 951 } else if (task_team && task_team->tt.tt_found_proxy_tasks) { 952 // if we found proxy tasks there could exist a dependency chain 953 // with the proxy task as origin 954 __kmp_release_deps(gtid, taskdata); 955 } 956 // td_flags.executing must be marked as 0 after __kmp_release_deps has been 957 // called. Othertwise, if a task is executed immediately from the 958 // release_deps code, the flag will be reset to 1 again by this same 959 // function 960 KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 1); 961 taskdata->td_flags.executing = 0; // suspend the finishing task 962 } 963 964 KA_TRACE( 965 20, ("__kmp_task_finish: T#%d finished task %p, %d incomplete children\n", 966 gtid, taskdata, children)); 967 968 // Free this task and then ancestor tasks if they have no children. 969 // Restore th_current_task first as suggested by John: 970 // johnmc: if an asynchronous inquiry peers into the runtime system 971 // it doesn't see the freed task as the current task. 972 thread->th.th_current_task = resumed_task; 973 if (!detach) 974 __kmp_free_task_and_ancestors(gtid, taskdata, thread); 975 976 // TODO: GEH - make sure root team implicit task is initialized properly. 977 // KMP_DEBUG_ASSERT( resumed_task->td_flags.executing == 0 ); 978 resumed_task->td_flags.executing = 1; // resume previous task 979 980 KA_TRACE( 981 10, ("__kmp_task_finish(exit): T#%d finished task %p, resuming task %p\n", 982 gtid, taskdata, resumed_task)); 983 984 return; 985 } 986 987 template <bool ompt> 988 static void __kmpc_omp_task_complete_if0_template(ident_t *loc_ref, 989 kmp_int32 gtid, 990 kmp_task_t *task) { 991 KA_TRACE(10, ("__kmpc_omp_task_complete_if0(enter): T#%d loc=%p task=%p\n", 992 gtid, loc_ref, KMP_TASK_TO_TASKDATA(task))); 993 KMP_DEBUG_ASSERT(gtid >= 0); 994 // this routine will provide task to resume 995 __kmp_task_finish<ompt>(gtid, task, NULL); 996 997 KA_TRACE(10, ("__kmpc_omp_task_complete_if0(exit): T#%d loc=%p task=%p\n", 998 gtid, loc_ref, KMP_TASK_TO_TASKDATA(task))); 999 1000 #if OMPT_SUPPORT 1001 if (ompt) { 1002 ompt_frame_t *ompt_frame; 1003 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL); 1004 ompt_frame->enter_frame = ompt_data_none; 1005 ompt_frame->enter_frame_flags = 1006 ompt_frame_runtime | ompt_frame_framepointer; 1007 } 1008 #endif 1009 1010 return; 1011 } 1012 1013 #if OMPT_SUPPORT 1014 OMPT_NOINLINE 1015 void __kmpc_omp_task_complete_if0_ompt(ident_t *loc_ref, kmp_int32 gtid, 1016 kmp_task_t *task) { 1017 __kmpc_omp_task_complete_if0_template<true>(loc_ref, gtid, task); 1018 } 1019 #endif // OMPT_SUPPORT 1020 1021 // __kmpc_omp_task_complete_if0: report that a task has completed execution 1022 // 1023 // loc_ref: source location information; points to end of task block. 1024 // gtid: global thread number. 1025 // task: task thunk for the completed task. 1026 void __kmpc_omp_task_complete_if0(ident_t *loc_ref, kmp_int32 gtid, 1027 kmp_task_t *task) { 1028 #if OMPT_SUPPORT 1029 if (UNLIKELY(ompt_enabled.enabled)) { 1030 __kmpc_omp_task_complete_if0_ompt(loc_ref, gtid, task); 1031 return; 1032 } 1033 #endif 1034 __kmpc_omp_task_complete_if0_template<false>(loc_ref, gtid, task); 1035 } 1036 1037 #ifdef TASK_UNUSED 1038 // __kmpc_omp_task_complete: report that a task has completed execution 1039 // NEVER GENERATED BY COMPILER, DEPRECATED!!! 1040 void __kmpc_omp_task_complete(ident_t *loc_ref, kmp_int32 gtid, 1041 kmp_task_t *task) { 1042 KA_TRACE(10, ("__kmpc_omp_task_complete(enter): T#%d loc=%p task=%p\n", gtid, 1043 loc_ref, KMP_TASK_TO_TASKDATA(task))); 1044 1045 __kmp_task_finish<false>(gtid, task, 1046 NULL); // Not sure how to find task to resume 1047 1048 KA_TRACE(10, ("__kmpc_omp_task_complete(exit): T#%d loc=%p task=%p\n", gtid, 1049 loc_ref, KMP_TASK_TO_TASKDATA(task))); 1050 return; 1051 } 1052 #endif // TASK_UNUSED 1053 1054 // __kmp_init_implicit_task: Initialize the appropriate fields in the implicit 1055 // task for a given thread 1056 // 1057 // loc_ref: reference to source location of parallel region 1058 // this_thr: thread data structure corresponding to implicit task 1059 // team: team for this_thr 1060 // tid: thread id of given thread within team 1061 // set_curr_task: TRUE if need to push current task to thread 1062 // NOTE: Routine does not set up the implicit task ICVS. This is assumed to 1063 // have already been done elsewhere. 1064 // TODO: Get better loc_ref. Value passed in may be NULL 1065 void __kmp_init_implicit_task(ident_t *loc_ref, kmp_info_t *this_thr, 1066 kmp_team_t *team, int tid, int set_curr_task) { 1067 kmp_taskdata_t *task = &team->t.t_implicit_task_taskdata[tid]; 1068 1069 KF_TRACE( 1070 10, 1071 ("__kmp_init_implicit_task(enter): T#:%d team=%p task=%p, reinit=%s\n", 1072 tid, team, task, set_curr_task ? "TRUE" : "FALSE")); 1073 1074 task->td_task_id = KMP_GEN_TASK_ID(); 1075 task->td_team = team; 1076 // task->td_parent = NULL; // fix for CQ230101 (broken parent task info 1077 // in debugger) 1078 task->td_ident = loc_ref; 1079 task->td_taskwait_ident = NULL; 1080 task->td_taskwait_counter = 0; 1081 task->td_taskwait_thread = 0; 1082 1083 task->td_flags.tiedness = TASK_TIED; 1084 task->td_flags.tasktype = TASK_IMPLICIT; 1085 task->td_flags.proxy = TASK_FULL; 1086 1087 // All implicit tasks are executed immediately, not deferred 1088 task->td_flags.task_serial = 1; 1089 task->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec); 1090 task->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0; 1091 1092 task->td_flags.started = 1; 1093 task->td_flags.executing = 1; 1094 task->td_flags.complete = 0; 1095 task->td_flags.freed = 0; 1096 1097 task->td_depnode = NULL; 1098 task->td_last_tied = task; 1099 task->td_allow_completion_event.type = KMP_EVENT_UNINITIALIZED; 1100 1101 if (set_curr_task) { // only do this init first time thread is created 1102 KMP_ATOMIC_ST_REL(&task->td_incomplete_child_tasks, 0); 1103 // Not used: don't need to deallocate implicit task 1104 KMP_ATOMIC_ST_REL(&task->td_allocated_child_tasks, 0); 1105 task->td_taskgroup = NULL; // An implicit task does not have taskgroup 1106 task->td_dephash = NULL; 1107 __kmp_push_current_task_to_thread(this_thr, team, tid); 1108 } else { 1109 KMP_DEBUG_ASSERT(task->td_incomplete_child_tasks == 0); 1110 KMP_DEBUG_ASSERT(task->td_allocated_child_tasks == 0); 1111 } 1112 1113 #if OMPT_SUPPORT 1114 if (UNLIKELY(ompt_enabled.enabled)) 1115 __ompt_task_init(task, tid); 1116 #endif 1117 1118 KF_TRACE(10, ("__kmp_init_implicit_task(exit): T#:%d team=%p task=%p\n", tid, 1119 team, task)); 1120 } 1121 1122 // __kmp_finish_implicit_task: Release resources associated to implicit tasks 1123 // at the end of parallel regions. Some resources are kept for reuse in the next 1124 // parallel region. 1125 // 1126 // thread: thread data structure corresponding to implicit task 1127 void __kmp_finish_implicit_task(kmp_info_t *thread) { 1128 kmp_taskdata_t *task = thread->th.th_current_task; 1129 if (task->td_dephash) { 1130 int children; 1131 task->td_flags.complete = 1; 1132 children = KMP_ATOMIC_LD_ACQ(&task->td_incomplete_child_tasks); 1133 kmp_tasking_flags_t flags_old = task->td_flags; 1134 if (children == 0 && flags_old.complete == 1) { 1135 kmp_tasking_flags_t flags_new = flags_old; 1136 flags_new.complete = 0; 1137 if (KMP_COMPARE_AND_STORE_ACQ32(RCAST(kmp_int32 *, &task->td_flags), 1138 *RCAST(kmp_int32 *, &flags_old), 1139 *RCAST(kmp_int32 *, &flags_new))) { 1140 KA_TRACE(100, ("__kmp_finish_implicit_task: T#%d cleans " 1141 "dephash of implicit task %p\n", 1142 thread->th.th_info.ds.ds_gtid, task)); 1143 __kmp_dephash_free_entries(thread, task->td_dephash); 1144 } 1145 } 1146 } 1147 } 1148 1149 // __kmp_free_implicit_task: Release resources associated to implicit tasks 1150 // when these are destroyed regions 1151 // 1152 // thread: thread data structure corresponding to implicit task 1153 void __kmp_free_implicit_task(kmp_info_t *thread) { 1154 kmp_taskdata_t *task = thread->th.th_current_task; 1155 if (task && task->td_dephash) { 1156 __kmp_dephash_free(thread, task->td_dephash); 1157 task->td_dephash = NULL; 1158 } 1159 } 1160 1161 // Round up a size to a power of two specified by val: Used to insert padding 1162 // between structures co-allocated using a single malloc() call 1163 static size_t __kmp_round_up_to_val(size_t size, size_t val) { 1164 if (size & (val - 1)) { 1165 size &= ~(val - 1); 1166 if (size <= KMP_SIZE_T_MAX - val) { 1167 size += val; // Round up if there is no overflow. 1168 } 1169 } 1170 return size; 1171 } // __kmp_round_up_to_va 1172 1173 // __kmp_task_alloc: Allocate the taskdata and task data structures for a task 1174 // 1175 // loc_ref: source location information 1176 // gtid: global thread number. 1177 // flags: include tiedness & task type (explicit vs. implicit) of the ''new'' 1178 // task encountered. Converted from kmp_int32 to kmp_tasking_flags_t in routine. 1179 // sizeof_kmp_task_t: Size in bytes of kmp_task_t data structure including 1180 // private vars accessed in task. 1181 // sizeof_shareds: Size in bytes of array of pointers to shared vars accessed 1182 // in task. 1183 // task_entry: Pointer to task code entry point generated by compiler. 1184 // returns: a pointer to the allocated kmp_task_t structure (task). 1185 kmp_task_t *__kmp_task_alloc(ident_t *loc_ref, kmp_int32 gtid, 1186 kmp_tasking_flags_t *flags, 1187 size_t sizeof_kmp_task_t, size_t sizeof_shareds, 1188 kmp_routine_entry_t task_entry) { 1189 kmp_task_t *task; 1190 kmp_taskdata_t *taskdata; 1191 kmp_info_t *thread = __kmp_threads[gtid]; 1192 kmp_info_t *encountering_thread = thread; 1193 kmp_team_t *team = thread->th.th_team; 1194 kmp_taskdata_t *parent_task = thread->th.th_current_task; 1195 size_t shareds_offset; 1196 1197 if (UNLIKELY(!TCR_4(__kmp_init_middle))) 1198 __kmp_middle_initialize(); 1199 1200 if (flags->hidden_helper) { 1201 if (__kmp_enable_hidden_helper) { 1202 if (!TCR_4(__kmp_init_hidden_helper)) 1203 __kmp_hidden_helper_initialize(); 1204 1205 // For a hidden helper task encountered by a regular thread, we will push 1206 // the task to the (gtid%__kmp_hidden_helper_threads_num)-th hidden helper 1207 // thread. 1208 if (!KMP_HIDDEN_HELPER_THREAD(gtid)) { 1209 thread = __kmp_threads[KMP_GTID_TO_SHADOW_GTID(gtid)]; 1210 // We don't change the parent-child relation for hidden helper task as 1211 // we need that to do per-task-region synchronization. 1212 } 1213 } else { 1214 // If the hidden helper task is not enabled, reset the flag to FALSE. 1215 flags->hidden_helper = FALSE; 1216 } 1217 } 1218 1219 KA_TRACE(10, ("__kmp_task_alloc(enter): T#%d loc=%p, flags=(0x%x) " 1220 "sizeof_task=%ld sizeof_shared=%ld entry=%p\n", 1221 gtid, loc_ref, *((kmp_int32 *)flags), sizeof_kmp_task_t, 1222 sizeof_shareds, task_entry)); 1223 1224 KMP_DEBUG_ASSERT(parent_task); 1225 if (parent_task->td_flags.final) { 1226 if (flags->merged_if0) { 1227 } 1228 flags->final = 1; 1229 } 1230 1231 if (flags->tiedness == TASK_UNTIED && !team->t.t_serialized) { 1232 // Untied task encountered causes the TSC algorithm to check entire deque of 1233 // the victim thread. If no untied task encountered, then checking the head 1234 // of the deque should be enough. 1235 KMP_CHECK_UPDATE( 1236 encountering_thread->th.th_task_team->tt.tt_untied_task_encountered, 1); 1237 } 1238 1239 // Detachable tasks are not proxy tasks yet but could be in the future. Doing 1240 // the tasking setup 1241 // when that happens is too late. 1242 if (UNLIKELY(flags->proxy == TASK_PROXY || 1243 flags->detachable == TASK_DETACHABLE || flags->hidden_helper)) { 1244 if (flags->proxy == TASK_PROXY) { 1245 flags->tiedness = TASK_UNTIED; 1246 flags->merged_if0 = 1; 1247 } 1248 /* are we running in a sequential parallel or tskm_immediate_exec... we need 1249 tasking support enabled */ 1250 if ((encountering_thread->th.th_task_team) == NULL) { 1251 /* This should only happen if the team is serialized 1252 setup a task team and propagate it to the thread */ 1253 KMP_DEBUG_ASSERT(team->t.t_serialized); 1254 KA_TRACE(30, 1255 ("T#%d creating task team in __kmp_task_alloc for proxy task\n", 1256 gtid)); 1257 __kmp_task_team_setup( 1258 encountering_thread, team, 1259 1); // 1 indicates setup the current team regardless of nthreads 1260 encountering_thread->th.th_task_team = 1261 team->t.t_task_team[encountering_thread->th.th_task_state]; 1262 } 1263 kmp_task_team_t *task_team = encountering_thread->th.th_task_team; 1264 1265 /* tasking must be enabled now as the task might not be pushed */ 1266 if (!KMP_TASKING_ENABLED(task_team)) { 1267 KA_TRACE( 1268 30, 1269 ("T#%d enabling tasking in __kmp_task_alloc for proxy task\n", gtid)); 1270 __kmp_enable_tasking(task_team, encountering_thread); 1271 kmp_int32 tid = encountering_thread->th.th_info.ds.ds_tid; 1272 kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid]; 1273 // No lock needed since only owner can allocate 1274 if (thread_data->td.td_deque == NULL) { 1275 __kmp_alloc_task_deque(encountering_thread, thread_data); 1276 } 1277 } 1278 1279 if ((flags->proxy == TASK_PROXY || flags->detachable == TASK_DETACHABLE) && 1280 task_team->tt.tt_found_proxy_tasks == FALSE) 1281 TCW_4(task_team->tt.tt_found_proxy_tasks, TRUE); 1282 if (flags->hidden_helper && 1283 task_team->tt.tt_hidden_helper_task_encountered == FALSE) 1284 TCW_4(task_team->tt.tt_hidden_helper_task_encountered, TRUE); 1285 } 1286 1287 // Calculate shared structure offset including padding after kmp_task_t struct 1288 // to align pointers in shared struct 1289 shareds_offset = sizeof(kmp_taskdata_t) + sizeof_kmp_task_t; 1290 shareds_offset = __kmp_round_up_to_val(shareds_offset, sizeof(void *)); 1291 1292 // Allocate a kmp_taskdata_t block and a kmp_task_t block. 1293 KA_TRACE(30, ("__kmp_task_alloc: T#%d First malloc size: %ld\n", gtid, 1294 shareds_offset)); 1295 KA_TRACE(30, ("__kmp_task_alloc: T#%d Second malloc size: %ld\n", gtid, 1296 sizeof_shareds)); 1297 1298 // Avoid double allocation here by combining shareds with taskdata 1299 #if USE_FAST_MEMORY 1300 taskdata = (kmp_taskdata_t *)__kmp_fast_allocate( 1301 encountering_thread, shareds_offset + sizeof_shareds); 1302 #else /* ! USE_FAST_MEMORY */ 1303 taskdata = (kmp_taskdata_t *)__kmp_thread_malloc( 1304 encountering_thread, shareds_offset + sizeof_shareds); 1305 #endif /* USE_FAST_MEMORY */ 1306 ANNOTATE_HAPPENS_AFTER(taskdata); 1307 1308 task = KMP_TASKDATA_TO_TASK(taskdata); 1309 1310 // Make sure task & taskdata are aligned appropriately 1311 #if KMP_ARCH_X86 || KMP_ARCH_PPC64 || !KMP_HAVE_QUAD 1312 KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(double) - 1)) == 0); 1313 KMP_DEBUG_ASSERT((((kmp_uintptr_t)task) & (sizeof(double) - 1)) == 0); 1314 #else 1315 KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(_Quad) - 1)) == 0); 1316 KMP_DEBUG_ASSERT((((kmp_uintptr_t)task) & (sizeof(_Quad) - 1)) == 0); 1317 #endif 1318 if (sizeof_shareds > 0) { 1319 // Avoid double allocation here by combining shareds with taskdata 1320 task->shareds = &((char *)taskdata)[shareds_offset]; 1321 // Make sure shareds struct is aligned to pointer size 1322 KMP_DEBUG_ASSERT((((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) == 1323 0); 1324 } else { 1325 task->shareds = NULL; 1326 } 1327 task->routine = task_entry; 1328 task->part_id = 0; // AC: Always start with 0 part id 1329 1330 taskdata->td_task_id = KMP_GEN_TASK_ID(); 1331 taskdata->td_team = thread->th.th_team; 1332 taskdata->td_alloc_thread = encountering_thread; 1333 taskdata->td_parent = parent_task; 1334 taskdata->td_level = parent_task->td_level + 1; // increment nesting level 1335 KMP_ATOMIC_ST_RLX(&taskdata->td_untied_count, 0); 1336 taskdata->td_ident = loc_ref; 1337 taskdata->td_taskwait_ident = NULL; 1338 taskdata->td_taskwait_counter = 0; 1339 taskdata->td_taskwait_thread = 0; 1340 KMP_DEBUG_ASSERT(taskdata->td_parent != NULL); 1341 // avoid copying icvs for proxy tasks 1342 if (flags->proxy == TASK_FULL) 1343 copy_icvs(&taskdata->td_icvs, &taskdata->td_parent->td_icvs); 1344 1345 taskdata->td_flags.tiedness = flags->tiedness; 1346 taskdata->td_flags.final = flags->final; 1347 taskdata->td_flags.merged_if0 = flags->merged_if0; 1348 taskdata->td_flags.destructors_thunk = flags->destructors_thunk; 1349 taskdata->td_flags.proxy = flags->proxy; 1350 taskdata->td_flags.detachable = flags->detachable; 1351 taskdata->td_flags.hidden_helper = flags->hidden_helper; 1352 taskdata->encountering_gtid = gtid; 1353 taskdata->td_task_team = thread->th.th_task_team; 1354 taskdata->td_size_alloc = shareds_offset + sizeof_shareds; 1355 taskdata->td_flags.tasktype = TASK_EXPLICIT; 1356 1357 // GEH - TODO: fix this to copy parent task's value of tasking_ser flag 1358 taskdata->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec); 1359 1360 // GEH - TODO: fix this to copy parent task's value of team_serial flag 1361 taskdata->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0; 1362 1363 // GEH - Note we serialize the task if the team is serialized to make sure 1364 // implicit parallel region tasks are not left until program termination to 1365 // execute. Also, it helps locality to execute immediately. 1366 1367 taskdata->td_flags.task_serial = 1368 (parent_task->td_flags.final || taskdata->td_flags.team_serial || 1369 taskdata->td_flags.tasking_ser || flags->merged_if0); 1370 1371 taskdata->td_flags.started = 0; 1372 taskdata->td_flags.executing = 0; 1373 taskdata->td_flags.complete = 0; 1374 taskdata->td_flags.freed = 0; 1375 1376 taskdata->td_flags.native = flags->native; 1377 1378 KMP_ATOMIC_ST_RLX(&taskdata->td_incomplete_child_tasks, 0); 1379 // start at one because counts current task and children 1380 KMP_ATOMIC_ST_RLX(&taskdata->td_allocated_child_tasks, 1); 1381 taskdata->td_taskgroup = 1382 parent_task->td_taskgroup; // task inherits taskgroup from the parent task 1383 taskdata->td_dephash = NULL; 1384 taskdata->td_depnode = NULL; 1385 if (flags->tiedness == TASK_UNTIED) 1386 taskdata->td_last_tied = NULL; // will be set when the task is scheduled 1387 else 1388 taskdata->td_last_tied = taskdata; 1389 taskdata->td_allow_completion_event.type = KMP_EVENT_UNINITIALIZED; 1390 #if OMPT_SUPPORT 1391 if (UNLIKELY(ompt_enabled.enabled)) 1392 __ompt_task_init(taskdata, gtid); 1393 #endif 1394 // Only need to keep track of child task counts if team parallel and tasking 1395 // not serialized or if it is a proxy or detachable or hidden helper task 1396 if (flags->proxy == TASK_PROXY || flags->detachable == TASK_DETACHABLE || 1397 flags->hidden_helper || 1398 !(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser)) { 1399 KMP_ATOMIC_INC(&parent_task->td_incomplete_child_tasks); 1400 if (parent_task->td_taskgroup) 1401 KMP_ATOMIC_INC(&parent_task->td_taskgroup->count); 1402 // Only need to keep track of allocated child tasks for explicit tasks since 1403 // implicit not deallocated 1404 if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT) { 1405 KMP_ATOMIC_INC(&taskdata->td_parent->td_allocated_child_tasks); 1406 } 1407 } 1408 1409 if (flags->hidden_helper) { 1410 taskdata->td_flags.task_serial = FALSE; 1411 // Increment the number of hidden helper tasks to be executed 1412 KMP_ATOMIC_INC(&__kmp_unexecuted_hidden_helper_tasks); 1413 } 1414 1415 KA_TRACE(20, ("__kmp_task_alloc(exit): T#%d created task %p parent=%p\n", 1416 gtid, taskdata, taskdata->td_parent)); 1417 ANNOTATE_HAPPENS_BEFORE(task); 1418 1419 return task; 1420 } 1421 1422 kmp_task_t *__kmpc_omp_task_alloc(ident_t *loc_ref, kmp_int32 gtid, 1423 kmp_int32 flags, size_t sizeof_kmp_task_t, 1424 size_t sizeof_shareds, 1425 kmp_routine_entry_t task_entry) { 1426 kmp_task_t *retval; 1427 kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags; 1428 __kmp_assert_valid_gtid(gtid); 1429 input_flags->native = FALSE; 1430 // __kmp_task_alloc() sets up all other runtime flags 1431 KA_TRACE(10, ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s %s %s) " 1432 "sizeof_task=%ld sizeof_shared=%ld entry=%p\n", 1433 gtid, loc_ref, input_flags->tiedness ? "tied " : "untied", 1434 input_flags->proxy ? "proxy" : "", 1435 input_flags->detachable ? "detachable" : "", sizeof_kmp_task_t, 1436 sizeof_shareds, task_entry)); 1437 1438 retval = __kmp_task_alloc(loc_ref, gtid, input_flags, sizeof_kmp_task_t, 1439 sizeof_shareds, task_entry); 1440 1441 KA_TRACE(20, ("__kmpc_omp_task_alloc(exit): T#%d retval %p\n", gtid, retval)); 1442 1443 return retval; 1444 } 1445 1446 kmp_task_t *__kmpc_omp_target_task_alloc(ident_t *loc_ref, kmp_int32 gtid, 1447 kmp_int32 flags, 1448 size_t sizeof_kmp_task_t, 1449 size_t sizeof_shareds, 1450 kmp_routine_entry_t task_entry, 1451 kmp_int64 device_id) { 1452 if (__kmp_enable_hidden_helper) { 1453 auto &input_flags = reinterpret_cast<kmp_tasking_flags_t &>(flags); 1454 input_flags.hidden_helper = TRUE; 1455 } 1456 1457 return __kmpc_omp_task_alloc(loc_ref, gtid, flags, sizeof_kmp_task_t, 1458 sizeof_shareds, task_entry); 1459 } 1460 1461 /*! 1462 @ingroup TASKING 1463 @param loc_ref location of the original task directive 1464 @param gtid Global Thread ID of encountering thread 1465 @param new_task task thunk allocated by __kmpc_omp_task_alloc() for the ''new 1466 task'' 1467 @param naffins Number of affinity items 1468 @param affin_list List of affinity items 1469 @return Returns non-zero if registering affinity information was not successful. 1470 Returns 0 if registration was successful 1471 This entry registers the affinity information attached to a task with the task 1472 thunk structure kmp_taskdata_t. 1473 */ 1474 kmp_int32 1475 __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref, kmp_int32 gtid, 1476 kmp_task_t *new_task, kmp_int32 naffins, 1477 kmp_task_affinity_info_t *affin_list) { 1478 return 0; 1479 } 1480 1481 // __kmp_invoke_task: invoke the specified task 1482 // 1483 // gtid: global thread ID of caller 1484 // task: the task to invoke 1485 // current_task: the task to resume after task invocation 1486 static void __kmp_invoke_task(kmp_int32 gtid, kmp_task_t *task, 1487 kmp_taskdata_t *current_task) { 1488 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task); 1489 kmp_info_t *thread; 1490 int discard = 0 /* false */; 1491 KA_TRACE( 1492 30, ("__kmp_invoke_task(enter): T#%d invoking task %p, current_task=%p\n", 1493 gtid, taskdata, current_task)); 1494 KMP_DEBUG_ASSERT(task); 1495 if (UNLIKELY(taskdata->td_flags.proxy == TASK_PROXY && 1496 taskdata->td_flags.complete == 1)) { 1497 // This is a proxy task that was already completed but it needs to run 1498 // its bottom-half finish 1499 KA_TRACE( 1500 30, 1501 ("__kmp_invoke_task: T#%d running bottom finish for proxy task %p\n", 1502 gtid, taskdata)); 1503 1504 __kmp_bottom_half_finish_proxy(gtid, task); 1505 1506 KA_TRACE(30, ("__kmp_invoke_task(exit): T#%d completed bottom finish for " 1507 "proxy task %p, resuming task %p\n", 1508 gtid, taskdata, current_task)); 1509 1510 return; 1511 } 1512 1513 #if OMPT_SUPPORT 1514 // For untied tasks, the first task executed only calls __kmpc_omp_task and 1515 // does not execute code. 1516 ompt_thread_info_t oldInfo; 1517 if (UNLIKELY(ompt_enabled.enabled)) { 1518 // Store the threads states and restore them after the task 1519 thread = __kmp_threads[gtid]; 1520 oldInfo = thread->th.ompt_thread_info; 1521 thread->th.ompt_thread_info.wait_id = 0; 1522 thread->th.ompt_thread_info.state = (thread->th.th_team_serialized) 1523 ? ompt_state_work_serial 1524 : ompt_state_work_parallel; 1525 taskdata->ompt_task_info.frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 1526 } 1527 #endif 1528 1529 // Decreament the counter of hidden helper tasks to be executed 1530 if (taskdata->td_flags.hidden_helper) { 1531 // Hidden helper tasks can only be executed by hidden helper threads 1532 KMP_ASSERT(KMP_HIDDEN_HELPER_THREAD(gtid)); 1533 KMP_ATOMIC_DEC(&__kmp_unexecuted_hidden_helper_tasks); 1534 } 1535 1536 // Proxy tasks are not handled by the runtime 1537 if (taskdata->td_flags.proxy != TASK_PROXY) { 1538 ANNOTATE_HAPPENS_AFTER(task); 1539 __kmp_task_start(gtid, task, current_task); // OMPT only if not discarded 1540 } 1541 1542 // TODO: cancel tasks if the parallel region has also been cancelled 1543 // TODO: check if this sequence can be hoisted above __kmp_task_start 1544 // if cancellation has been enabled for this run ... 1545 if (UNLIKELY(__kmp_omp_cancellation)) { 1546 thread = __kmp_threads[gtid]; 1547 kmp_team_t *this_team = thread->th.th_team; 1548 kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup; 1549 if ((taskgroup && taskgroup->cancel_request) || 1550 (this_team->t.t_cancel_request == cancel_parallel)) { 1551 #if OMPT_SUPPORT && OMPT_OPTIONAL 1552 ompt_data_t *task_data; 1553 if (UNLIKELY(ompt_enabled.ompt_callback_cancel)) { 1554 __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL, NULL); 1555 ompt_callbacks.ompt_callback(ompt_callback_cancel)( 1556 task_data, 1557 ((taskgroup && taskgroup->cancel_request) ? ompt_cancel_taskgroup 1558 : ompt_cancel_parallel) | 1559 ompt_cancel_discarded_task, 1560 NULL); 1561 } 1562 #endif 1563 KMP_COUNT_BLOCK(TASK_cancelled); 1564 // this task belongs to a task group and we need to cancel it 1565 discard = 1 /* true */; 1566 } 1567 } 1568 1569 // Invoke the task routine and pass in relevant data. 1570 // Thunks generated by gcc take a different argument list. 1571 if (!discard) { 1572 if (taskdata->td_flags.tiedness == TASK_UNTIED) { 1573 taskdata->td_last_tied = current_task->td_last_tied; 1574 KMP_DEBUG_ASSERT(taskdata->td_last_tied); 1575 } 1576 #if KMP_STATS_ENABLED 1577 KMP_COUNT_BLOCK(TASK_executed); 1578 switch (KMP_GET_THREAD_STATE()) { 1579 case FORK_JOIN_BARRIER: 1580 KMP_PUSH_PARTITIONED_TIMER(OMP_task_join_bar); 1581 break; 1582 case PLAIN_BARRIER: 1583 KMP_PUSH_PARTITIONED_TIMER(OMP_task_plain_bar); 1584 break; 1585 case TASKYIELD: 1586 KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskyield); 1587 break; 1588 case TASKWAIT: 1589 KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskwait); 1590 break; 1591 case TASKGROUP: 1592 KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskgroup); 1593 break; 1594 default: 1595 KMP_PUSH_PARTITIONED_TIMER(OMP_task_immediate); 1596 break; 1597 } 1598 #endif // KMP_STATS_ENABLED 1599 1600 // OMPT task begin 1601 #if OMPT_SUPPORT 1602 if (UNLIKELY(ompt_enabled.enabled)) 1603 __ompt_task_start(task, current_task, gtid); 1604 #endif 1605 1606 #if USE_ITT_BUILD && USE_ITT_NOTIFY 1607 kmp_uint64 cur_time; 1608 kmp_int32 kmp_itt_count_task = 1609 __kmp_forkjoin_frames_mode == 3 && !taskdata->td_flags.task_serial && 1610 current_task->td_flags.tasktype == TASK_IMPLICIT; 1611 if (kmp_itt_count_task) { 1612 thread = __kmp_threads[gtid]; 1613 // Time outer level explicit task on barrier for adjusting imbalance time 1614 if (thread->th.th_bar_arrive_time) 1615 cur_time = __itt_get_timestamp(); 1616 else 1617 kmp_itt_count_task = 0; // thread is not on a barrier - skip timing 1618 } 1619 KMP_FSYNC_ACQUIRED(taskdata); // acquired self (new task) 1620 #endif 1621 1622 #ifdef KMP_GOMP_COMPAT 1623 if (taskdata->td_flags.native) { 1624 ((void (*)(void *))(*(task->routine)))(task->shareds); 1625 } else 1626 #endif /* KMP_GOMP_COMPAT */ 1627 { 1628 (*(task->routine))(gtid, task); 1629 } 1630 KMP_POP_PARTITIONED_TIMER(); 1631 1632 #if USE_ITT_BUILD && USE_ITT_NOTIFY 1633 if (kmp_itt_count_task) { 1634 // Barrier imbalance - adjust arrive time with the task duration 1635 thread->th.th_bar_arrive_time += (__itt_get_timestamp() - cur_time); 1636 } 1637 KMP_FSYNC_CANCEL(taskdata); // destroy self (just executed) 1638 KMP_FSYNC_RELEASING(taskdata->td_parent); // releasing parent 1639 #endif 1640 } 1641 1642 // Proxy tasks are not handled by the runtime 1643 if (taskdata->td_flags.proxy != TASK_PROXY) { 1644 ANNOTATE_HAPPENS_BEFORE(taskdata->td_parent); 1645 #if OMPT_SUPPORT 1646 if (UNLIKELY(ompt_enabled.enabled)) { 1647 thread->th.ompt_thread_info = oldInfo; 1648 if (taskdata->td_flags.tiedness == TASK_TIED) { 1649 taskdata->ompt_task_info.frame.exit_frame = ompt_data_none; 1650 } 1651 __kmp_task_finish<true>(gtid, task, current_task); 1652 } else 1653 #endif 1654 __kmp_task_finish<false>(gtid, task, current_task); 1655 } 1656 1657 KA_TRACE( 1658 30, 1659 ("__kmp_invoke_task(exit): T#%d completed task %p, resuming task %p\n", 1660 gtid, taskdata, current_task)); 1661 return; 1662 } 1663 1664 // __kmpc_omp_task_parts: Schedule a thread-switchable task for execution 1665 // 1666 // loc_ref: location of original task pragma (ignored) 1667 // gtid: Global Thread ID of encountering thread 1668 // new_task: task thunk allocated by __kmp_omp_task_alloc() for the ''new task'' 1669 // Returns: 1670 // TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to 1671 // be resumed later. 1672 // TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be 1673 // resumed later. 1674 kmp_int32 __kmpc_omp_task_parts(ident_t *loc_ref, kmp_int32 gtid, 1675 kmp_task_t *new_task) { 1676 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task); 1677 1678 KA_TRACE(10, ("__kmpc_omp_task_parts(enter): T#%d loc=%p task=%p\n", gtid, 1679 loc_ref, new_taskdata)); 1680 1681 #if OMPT_SUPPORT 1682 kmp_taskdata_t *parent; 1683 if (UNLIKELY(ompt_enabled.enabled)) { 1684 parent = new_taskdata->td_parent; 1685 if (ompt_enabled.ompt_callback_task_create) { 1686 ompt_callbacks.ompt_callback(ompt_callback_task_create)( 1687 &(parent->ompt_task_info.task_data), &(parent->ompt_task_info.frame), 1688 &(new_taskdata->ompt_task_info.task_data), ompt_task_explicit, 0, 1689 OMPT_GET_RETURN_ADDRESS(0)); 1690 } 1691 } 1692 #endif 1693 1694 /* Should we execute the new task or queue it? For now, let's just always try 1695 to queue it. If the queue fills up, then we'll execute it. */ 1696 1697 if (__kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED) // if cannot defer 1698 { // Execute this task immediately 1699 kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task; 1700 new_taskdata->td_flags.task_serial = 1; 1701 __kmp_invoke_task(gtid, new_task, current_task); 1702 } 1703 1704 KA_TRACE( 1705 10, 1706 ("__kmpc_omp_task_parts(exit): T#%d returning TASK_CURRENT_NOT_QUEUED: " 1707 "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n", 1708 gtid, loc_ref, new_taskdata)); 1709 1710 ANNOTATE_HAPPENS_BEFORE(new_task); 1711 #if OMPT_SUPPORT 1712 if (UNLIKELY(ompt_enabled.enabled)) { 1713 parent->ompt_task_info.frame.enter_frame = ompt_data_none; 1714 } 1715 #endif 1716 return TASK_CURRENT_NOT_QUEUED; 1717 } 1718 1719 // __kmp_omp_task: Schedule a non-thread-switchable task for execution 1720 // 1721 // gtid: Global Thread ID of encountering thread 1722 // new_task:non-thread-switchable task thunk allocated by __kmp_omp_task_alloc() 1723 // serialize_immediate: if TRUE then if the task is executed immediately its 1724 // execution will be serialized 1725 // Returns: 1726 // TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to 1727 // be resumed later. 1728 // TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be 1729 // resumed later. 1730 kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task, 1731 bool serialize_immediate) { 1732 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task); 1733 1734 /* Should we execute the new task or queue it? For now, let's just always try 1735 to queue it. If the queue fills up, then we'll execute it. */ 1736 if (new_taskdata->td_flags.proxy == TASK_PROXY || 1737 __kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED) // if cannot defer 1738 { // Execute this task immediately 1739 kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task; 1740 if (serialize_immediate) 1741 new_taskdata->td_flags.task_serial = 1; 1742 __kmp_invoke_task(gtid, new_task, current_task); 1743 } 1744 1745 ANNOTATE_HAPPENS_BEFORE(new_task); 1746 return TASK_CURRENT_NOT_QUEUED; 1747 } 1748 1749 // __kmpc_omp_task: Wrapper around __kmp_omp_task to schedule a 1750 // non-thread-switchable task from the parent thread only! 1751 // 1752 // loc_ref: location of original task pragma (ignored) 1753 // gtid: Global Thread ID of encountering thread 1754 // new_task: non-thread-switchable task thunk allocated by 1755 // __kmp_omp_task_alloc() 1756 // Returns: 1757 // TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to 1758 // be resumed later. 1759 // TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be 1760 // resumed later. 1761 kmp_int32 __kmpc_omp_task(ident_t *loc_ref, kmp_int32 gtid, 1762 kmp_task_t *new_task) { 1763 kmp_int32 res; 1764 KMP_SET_THREAD_STATE_BLOCK(EXPLICIT_TASK); 1765 1766 #if KMP_DEBUG || OMPT_SUPPORT 1767 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task); 1768 #endif 1769 KA_TRACE(10, ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n", gtid, loc_ref, 1770 new_taskdata)); 1771 __kmp_assert_valid_gtid(gtid); 1772 1773 #if OMPT_SUPPORT 1774 kmp_taskdata_t *parent = NULL; 1775 if (UNLIKELY(ompt_enabled.enabled)) { 1776 if (!new_taskdata->td_flags.started) { 1777 OMPT_STORE_RETURN_ADDRESS(gtid); 1778 parent = new_taskdata->td_parent; 1779 if (!parent->ompt_task_info.frame.enter_frame.ptr) { 1780 parent->ompt_task_info.frame.enter_frame.ptr = 1781 OMPT_GET_FRAME_ADDRESS(0); 1782 } 1783 if (ompt_enabled.ompt_callback_task_create) { 1784 ompt_callbacks.ompt_callback(ompt_callback_task_create)( 1785 &(parent->ompt_task_info.task_data), 1786 &(parent->ompt_task_info.frame), 1787 &(new_taskdata->ompt_task_info.task_data), 1788 ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 0, 1789 OMPT_LOAD_RETURN_ADDRESS(gtid)); 1790 } 1791 } else { 1792 // We are scheduling the continuation of an UNTIED task. 1793 // Scheduling back to the parent task. 1794 __ompt_task_finish(new_task, 1795 new_taskdata->ompt_task_info.scheduling_parent, 1796 ompt_task_switch); 1797 new_taskdata->ompt_task_info.frame.exit_frame = ompt_data_none; 1798 } 1799 } 1800 #endif 1801 1802 res = __kmp_omp_task(gtid, new_task, true); 1803 1804 KA_TRACE(10, ("__kmpc_omp_task(exit): T#%d returning " 1805 "TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n", 1806 gtid, loc_ref, new_taskdata)); 1807 #if OMPT_SUPPORT 1808 if (UNLIKELY(ompt_enabled.enabled && parent != NULL)) { 1809 parent->ompt_task_info.frame.enter_frame = ompt_data_none; 1810 } 1811 #endif 1812 return res; 1813 } 1814 1815 // __kmp_omp_taskloop_task: Wrapper around __kmp_omp_task to schedule 1816 // a taskloop task with the correct OMPT return address 1817 // 1818 // loc_ref: location of original task pragma (ignored) 1819 // gtid: Global Thread ID of encountering thread 1820 // new_task: non-thread-switchable task thunk allocated by 1821 // __kmp_omp_task_alloc() 1822 // codeptr_ra: return address for OMPT callback 1823 // Returns: 1824 // TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to 1825 // be resumed later. 1826 // TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be 1827 // resumed later. 1828 kmp_int32 __kmp_omp_taskloop_task(ident_t *loc_ref, kmp_int32 gtid, 1829 kmp_task_t *new_task, void *codeptr_ra) { 1830 kmp_int32 res; 1831 KMP_SET_THREAD_STATE_BLOCK(EXPLICIT_TASK); 1832 1833 #if KMP_DEBUG || OMPT_SUPPORT 1834 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task); 1835 #endif 1836 KA_TRACE(10, ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n", gtid, loc_ref, 1837 new_taskdata)); 1838 1839 #if OMPT_SUPPORT 1840 kmp_taskdata_t *parent = NULL; 1841 if (UNLIKELY(ompt_enabled.enabled && !new_taskdata->td_flags.started)) { 1842 parent = new_taskdata->td_parent; 1843 if (!parent->ompt_task_info.frame.enter_frame.ptr) 1844 parent->ompt_task_info.frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); 1845 if (ompt_enabled.ompt_callback_task_create) { 1846 ompt_callbacks.ompt_callback(ompt_callback_task_create)( 1847 &(parent->ompt_task_info.task_data), &(parent->ompt_task_info.frame), 1848 &(new_taskdata->ompt_task_info.task_data), 1849 ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 0, 1850 codeptr_ra); 1851 } 1852 } 1853 #endif 1854 1855 res = __kmp_omp_task(gtid, new_task, true); 1856 1857 KA_TRACE(10, ("__kmpc_omp_task(exit): T#%d returning " 1858 "TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n", 1859 gtid, loc_ref, new_taskdata)); 1860 #if OMPT_SUPPORT 1861 if (UNLIKELY(ompt_enabled.enabled && parent != NULL)) { 1862 parent->ompt_task_info.frame.enter_frame = ompt_data_none; 1863 } 1864 #endif 1865 return res; 1866 } 1867 1868 template <bool ompt> 1869 static kmp_int32 __kmpc_omp_taskwait_template(ident_t *loc_ref, kmp_int32 gtid, 1870 void *frame_address, 1871 void *return_address) { 1872 kmp_taskdata_t *taskdata = nullptr; 1873 kmp_info_t *thread; 1874 int thread_finished = FALSE; 1875 KMP_SET_THREAD_STATE_BLOCK(TASKWAIT); 1876 1877 KA_TRACE(10, ("__kmpc_omp_taskwait(enter): T#%d loc=%p\n", gtid, loc_ref)); 1878 KMP_DEBUG_ASSERT(gtid >= 0); 1879 1880 if (__kmp_tasking_mode != tskm_immediate_exec) { 1881 thread = __kmp_threads[gtid]; 1882 taskdata = thread->th.th_current_task; 1883 1884 #if OMPT_SUPPORT && OMPT_OPTIONAL 1885 ompt_data_t *my_task_data; 1886 ompt_data_t *my_parallel_data; 1887 1888 if (ompt) { 1889 my_task_data = &(taskdata->ompt_task_info.task_data); 1890 my_parallel_data = OMPT_CUR_TEAM_DATA(thread); 1891 1892 taskdata->ompt_task_info.frame.enter_frame.ptr = frame_address; 1893 1894 if (ompt_enabled.ompt_callback_sync_region) { 1895 ompt_callbacks.ompt_callback(ompt_callback_sync_region)( 1896 ompt_sync_region_taskwait, ompt_scope_begin, my_parallel_data, 1897 my_task_data, return_address); 1898 } 1899 1900 if (ompt_enabled.ompt_callback_sync_region_wait) { 1901 ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)( 1902 ompt_sync_region_taskwait, ompt_scope_begin, my_parallel_data, 1903 my_task_data, return_address); 1904 } 1905 } 1906 #endif // OMPT_SUPPORT && OMPT_OPTIONAL 1907 1908 // Debugger: The taskwait is active. Store location and thread encountered the 1909 // taskwait. 1910 #if USE_ITT_BUILD 1911 // Note: These values are used by ITT events as well. 1912 #endif /* USE_ITT_BUILD */ 1913 taskdata->td_taskwait_counter += 1; 1914 taskdata->td_taskwait_ident = loc_ref; 1915 taskdata->td_taskwait_thread = gtid + 1; 1916 1917 #if USE_ITT_BUILD 1918 void *itt_sync_obj = NULL; 1919 #if USE_ITT_NOTIFY 1920 KMP_ITT_TASKWAIT_STARTING(itt_sync_obj); 1921 #endif /* USE_ITT_NOTIFY */ 1922 #endif /* USE_ITT_BUILD */ 1923 1924 bool must_wait = 1925 !taskdata->td_flags.team_serial && !taskdata->td_flags.final; 1926 1927 must_wait = must_wait || (thread->th.th_task_team != NULL && 1928 thread->th.th_task_team->tt.tt_found_proxy_tasks); 1929 // If hidden helper thread is encountered, we must enable wait here. 1930 must_wait = 1931 must_wait || 1932 (__kmp_enable_hidden_helper && thread->th.th_task_team != NULL && 1933 thread->th.th_task_team->tt.tt_hidden_helper_task_encountered); 1934 1935 if (must_wait) { 1936 kmp_flag_32<false, false> flag( 1937 RCAST(std::atomic<kmp_uint32> *, 1938 &(taskdata->td_incomplete_child_tasks)), 1939 0U); 1940 while (KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks) != 0) { 1941 flag.execute_tasks(thread, gtid, FALSE, 1942 &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), 1943 __kmp_task_stealing_constraint); 1944 } 1945 } 1946 #if USE_ITT_BUILD 1947 KMP_ITT_TASKWAIT_FINISHED(itt_sync_obj); 1948 KMP_FSYNC_ACQUIRED(taskdata); // acquire self - sync with children 1949 #endif /* USE_ITT_BUILD */ 1950 1951 // Debugger: The taskwait is completed. Location remains, but thread is 1952 // negated. 1953 taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread; 1954 1955 #if OMPT_SUPPORT && OMPT_OPTIONAL 1956 if (ompt) { 1957 if (ompt_enabled.ompt_callback_sync_region_wait) { 1958 ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)( 1959 ompt_sync_region_taskwait, ompt_scope_end, my_parallel_data, 1960 my_task_data, return_address); 1961 } 1962 if (ompt_enabled.ompt_callback_sync_region) { 1963 ompt_callbacks.ompt_callback(ompt_callback_sync_region)( 1964 ompt_sync_region_taskwait, ompt_scope_end, my_parallel_data, 1965 my_task_data, return_address); 1966 } 1967 taskdata->ompt_task_info.frame.enter_frame = ompt_data_none; 1968 } 1969 #endif // OMPT_SUPPORT && OMPT_OPTIONAL 1970 1971 ANNOTATE_HAPPENS_AFTER(taskdata); 1972 } 1973 1974 KA_TRACE(10, ("__kmpc_omp_taskwait(exit): T#%d task %p finished waiting, " 1975 "returning TASK_CURRENT_NOT_QUEUED\n", 1976 gtid, taskdata)); 1977 1978 return TASK_CURRENT_NOT_QUEUED; 1979 } 1980 1981 #if OMPT_SUPPORT && OMPT_OPTIONAL 1982 OMPT_NOINLINE 1983 static kmp_int32 __kmpc_omp_taskwait_ompt(ident_t *loc_ref, kmp_int32 gtid, 1984 void *frame_address, 1985 void *return_address) { 1986 return __kmpc_omp_taskwait_template<true>(loc_ref, gtid, frame_address, 1987 return_address); 1988 } 1989 #endif // OMPT_SUPPORT && OMPT_OPTIONAL 1990 1991 // __kmpc_omp_taskwait: Wait until all tasks generated by the current task are 1992 // complete 1993 kmp_int32 __kmpc_omp_taskwait(ident_t *loc_ref, kmp_int32 gtid) { 1994 #if OMPT_SUPPORT && OMPT_OPTIONAL 1995 if (UNLIKELY(ompt_enabled.enabled)) { 1996 OMPT_STORE_RETURN_ADDRESS(gtid); 1997 return __kmpc_omp_taskwait_ompt(loc_ref, gtid, OMPT_GET_FRAME_ADDRESS(0), 1998 OMPT_LOAD_RETURN_ADDRESS(gtid)); 1999 } 2000 #endif 2001 return __kmpc_omp_taskwait_template<false>(loc_ref, gtid, NULL, NULL); 2002 } 2003 2004 // __kmpc_omp_taskyield: switch to a different task 2005 kmp_int32 __kmpc_omp_taskyield(ident_t *loc_ref, kmp_int32 gtid, int end_part) { 2006 kmp_taskdata_t *taskdata = NULL; 2007 kmp_info_t *thread; 2008 int thread_finished = FALSE; 2009 2010 KMP_COUNT_BLOCK(OMP_TASKYIELD); 2011 KMP_SET_THREAD_STATE_BLOCK(TASKYIELD); 2012 2013 KA_TRACE(10, ("__kmpc_omp_taskyield(enter): T#%d loc=%p end_part = %d\n", 2014 gtid, loc_ref, end_part)); 2015 __kmp_assert_valid_gtid(gtid); 2016 2017 if (__kmp_tasking_mode != tskm_immediate_exec && __kmp_init_parallel) { 2018 thread = __kmp_threads[gtid]; 2019 taskdata = thread->th.th_current_task; 2020 // Should we model this as a task wait or not? 2021 // Debugger: The taskwait is active. Store location and thread encountered the 2022 // taskwait. 2023 #if USE_ITT_BUILD 2024 // Note: These values are used by ITT events as well. 2025 #endif /* USE_ITT_BUILD */ 2026 taskdata->td_taskwait_counter += 1; 2027 taskdata->td_taskwait_ident = loc_ref; 2028 taskdata->td_taskwait_thread = gtid + 1; 2029 2030 #if USE_ITT_BUILD 2031 void *itt_sync_obj = NULL; 2032 #if USE_ITT_NOTIFY 2033 KMP_ITT_TASKWAIT_STARTING(itt_sync_obj); 2034 #endif /* USE_ITT_NOTIFY */ 2035 #endif /* USE_ITT_BUILD */ 2036 if (!taskdata->td_flags.team_serial) { 2037 kmp_task_team_t *task_team = thread->th.th_task_team; 2038 if (task_team != NULL) { 2039 if (KMP_TASKING_ENABLED(task_team)) { 2040 #if OMPT_SUPPORT 2041 if (UNLIKELY(ompt_enabled.enabled)) 2042 thread->th.ompt_thread_info.ompt_task_yielded = 1; 2043 #endif 2044 __kmp_execute_tasks_32( 2045 thread, gtid, (kmp_flag_32<> *)NULL, FALSE, 2046 &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), 2047 __kmp_task_stealing_constraint); 2048 #if OMPT_SUPPORT 2049 if (UNLIKELY(ompt_enabled.enabled)) 2050 thread->th.ompt_thread_info.ompt_task_yielded = 0; 2051 #endif 2052 } 2053 } 2054 } 2055 #if USE_ITT_BUILD 2056 KMP_ITT_TASKWAIT_FINISHED(itt_sync_obj); 2057 #endif /* USE_ITT_BUILD */ 2058 2059 // Debugger: The taskwait is completed. Location remains, but thread is 2060 // negated. 2061 taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread; 2062 } 2063 2064 KA_TRACE(10, ("__kmpc_omp_taskyield(exit): T#%d task %p resuming, " 2065 "returning TASK_CURRENT_NOT_QUEUED\n", 2066 gtid, taskdata)); 2067 2068 return TASK_CURRENT_NOT_QUEUED; 2069 } 2070 2071 // Task Reduction implementation 2072 // 2073 // Note: initial implementation didn't take into account the possibility 2074 // to specify omp_orig for initializer of the UDR (user defined reduction). 2075 // Corrected implementation takes into account the omp_orig object. 2076 // Compiler is free to use old implementation if omp_orig is not specified. 2077 2078 /*! 2079 @ingroup BASIC_TYPES 2080 @{ 2081 */ 2082 2083 /*! 2084 Flags for special info per task reduction item. 2085 */ 2086 typedef struct kmp_taskred_flags { 2087 /*! 1 - use lazy alloc/init (e.g. big objects, #tasks < #threads) */ 2088 unsigned lazy_priv : 1; 2089 unsigned reserved31 : 31; 2090 } kmp_taskred_flags_t; 2091 2092 /*! 2093 Internal struct for reduction data item related info set up by compiler. 2094 */ 2095 typedef struct kmp_task_red_input { 2096 void *reduce_shar; /**< shared between tasks item to reduce into */ 2097 size_t reduce_size; /**< size of data item in bytes */ 2098 // three compiler-generated routines (init, fini are optional): 2099 void *reduce_init; /**< data initialization routine (single parameter) */ 2100 void *reduce_fini; /**< data finalization routine */ 2101 void *reduce_comb; /**< data combiner routine */ 2102 kmp_taskred_flags_t flags; /**< flags for additional info from compiler */ 2103 } kmp_task_red_input_t; 2104 2105 /*! 2106 Internal struct for reduction data item related info saved by the library. 2107 */ 2108 typedef struct kmp_taskred_data { 2109 void *reduce_shar; /**< shared between tasks item to reduce into */ 2110 size_t reduce_size; /**< size of data item */ 2111 kmp_taskred_flags_t flags; /**< flags for additional info from compiler */ 2112 void *reduce_priv; /**< array of thread specific items */ 2113 void *reduce_pend; /**< end of private data for faster comparison op */ 2114 // three compiler-generated routines (init, fini are optional): 2115 void *reduce_comb; /**< data combiner routine */ 2116 void *reduce_init; /**< data initialization routine (two parameters) */ 2117 void *reduce_fini; /**< data finalization routine */ 2118 void *reduce_orig; /**< original item (can be used in UDR initializer) */ 2119 } kmp_taskred_data_t; 2120 2121 /*! 2122 Internal struct for reduction data item related info set up by compiler. 2123 2124 New interface: added reduce_orig field to provide omp_orig for UDR initializer. 2125 */ 2126 typedef struct kmp_taskred_input { 2127 void *reduce_shar; /**< shared between tasks item to reduce into */ 2128 void *reduce_orig; /**< original reduction item used for initialization */ 2129 size_t reduce_size; /**< size of data item */ 2130 // three compiler-generated routines (init, fini are optional): 2131 void *reduce_init; /**< data initialization routine (two parameters) */ 2132 void *reduce_fini; /**< data finalization routine */ 2133 void *reduce_comb; /**< data combiner routine */ 2134 kmp_taskred_flags_t flags; /**< flags for additional info from compiler */ 2135 } kmp_taskred_input_t; 2136 /*! 2137 @} 2138 */ 2139 2140 template <typename T> void __kmp_assign_orig(kmp_taskred_data_t &item, T &src); 2141 template <> 2142 void __kmp_assign_orig<kmp_task_red_input_t>(kmp_taskred_data_t &item, 2143 kmp_task_red_input_t &src) { 2144 item.reduce_orig = NULL; 2145 } 2146 template <> 2147 void __kmp_assign_orig<kmp_taskred_input_t>(kmp_taskred_data_t &item, 2148 kmp_taskred_input_t &src) { 2149 if (src.reduce_orig != NULL) { 2150 item.reduce_orig = src.reduce_orig; 2151 } else { 2152 item.reduce_orig = src.reduce_shar; 2153 } // non-NULL reduce_orig means new interface used 2154 } 2155 2156 template <typename T> void __kmp_call_init(kmp_taskred_data_t &item, size_t j); 2157 template <> 2158 void __kmp_call_init<kmp_task_red_input_t>(kmp_taskred_data_t &item, 2159 size_t offset) { 2160 ((void (*)(void *))item.reduce_init)((char *)(item.reduce_priv) + offset); 2161 } 2162 template <> 2163 void __kmp_call_init<kmp_taskred_input_t>(kmp_taskred_data_t &item, 2164 size_t offset) { 2165 ((void (*)(void *, void *))item.reduce_init)( 2166 (char *)(item.reduce_priv) + offset, item.reduce_orig); 2167 } 2168 2169 template <typename T> 2170 void *__kmp_task_reduction_init(int gtid, int num, T *data) { 2171 __kmp_assert_valid_gtid(gtid); 2172 kmp_info_t *thread = __kmp_threads[gtid]; 2173 kmp_taskgroup_t *tg = thread->th.th_current_task->td_taskgroup; 2174 kmp_uint32 nth = thread->th.th_team_nproc; 2175 kmp_taskred_data_t *arr; 2176 2177 // check input data just in case 2178 KMP_ASSERT(tg != NULL); 2179 KMP_ASSERT(data != NULL); 2180 KMP_ASSERT(num > 0); 2181 if (nth == 1) { 2182 KA_TRACE(10, ("__kmpc_task_reduction_init: T#%d, tg %p, exiting nth=1\n", 2183 gtid, tg)); 2184 return (void *)tg; 2185 } 2186 KA_TRACE(10, ("__kmpc_task_reduction_init: T#%d, taskgroup %p, #items %d\n", 2187 gtid, tg, num)); 2188 arr = (kmp_taskred_data_t *)__kmp_thread_malloc( 2189 thread, num * sizeof(kmp_taskred_data_t)); 2190 for (int i = 0; i < num; ++i) { 2191 size_t size = data[i].reduce_size - 1; 2192 // round the size up to cache line per thread-specific item 2193 size += CACHE_LINE - size % CACHE_LINE; 2194 KMP_ASSERT(data[i].reduce_comb != NULL); // combiner is mandatory 2195 arr[i].reduce_shar = data[i].reduce_shar; 2196 arr[i].reduce_size = size; 2197 arr[i].flags = data[i].flags; 2198 arr[i].reduce_comb = data[i].reduce_comb; 2199 arr[i].reduce_init = data[i].reduce_init; 2200 arr[i].reduce_fini = data[i].reduce_fini; 2201 __kmp_assign_orig<T>(arr[i], data[i]); 2202 if (!arr[i].flags.lazy_priv) { 2203 // allocate cache-line aligned block and fill it with zeros 2204 arr[i].reduce_priv = __kmp_allocate(nth * size); 2205 arr[i].reduce_pend = (char *)(arr[i].reduce_priv) + nth * size; 2206 if (arr[i].reduce_init != NULL) { 2207 // initialize all thread-specific items 2208 for (size_t j = 0; j < nth; ++j) { 2209 __kmp_call_init<T>(arr[i], j * size); 2210 } 2211 } 2212 } else { 2213 // only allocate space for pointers now, 2214 // objects will be lazily allocated/initialized if/when requested 2215 // note that __kmp_allocate zeroes the allocated memory 2216 arr[i].reduce_priv = __kmp_allocate(nth * sizeof(void *)); 2217 } 2218 } 2219 tg->reduce_data = (void *)arr; 2220 tg->reduce_num_data = num; 2221 return (void *)tg; 2222 } 2223 2224 /*! 2225 @ingroup TASKING 2226 @param gtid Global thread ID 2227 @param num Number of data items to reduce 2228 @param data Array of data for reduction 2229 @return The taskgroup identifier 2230 2231 Initialize task reduction for the taskgroup. 2232 2233 Note: this entry supposes the optional compiler-generated initializer routine 2234 has single parameter - pointer to object to be initialized. That means 2235 the reduction either does not use omp_orig object, or the omp_orig is accessible 2236 without help of the runtime library. 2237 */ 2238 void *__kmpc_task_reduction_init(int gtid, int num, void *data) { 2239 return __kmp_task_reduction_init(gtid, num, (kmp_task_red_input_t *)data); 2240 } 2241 2242 /*! 2243 @ingroup TASKING 2244 @param gtid Global thread ID 2245 @param num Number of data items to reduce 2246 @param data Array of data for reduction 2247 @return The taskgroup identifier 2248 2249 Initialize task reduction for the taskgroup. 2250 2251 Note: this entry supposes the optional compiler-generated initializer routine 2252 has two parameters, pointer to object to be initialized and pointer to omp_orig 2253 */ 2254 void *__kmpc_taskred_init(int gtid, int num, void *data) { 2255 return __kmp_task_reduction_init(gtid, num, (kmp_taskred_input_t *)data); 2256 } 2257 2258 // Copy task reduction data (except for shared pointers). 2259 template <typename T> 2260 void __kmp_task_reduction_init_copy(kmp_info_t *thr, int num, T *data, 2261 kmp_taskgroup_t *tg, void *reduce_data) { 2262 kmp_taskred_data_t *arr; 2263 KA_TRACE(20, ("__kmp_task_reduction_init_copy: Th %p, init taskgroup %p," 2264 " from data %p\n", 2265 thr, tg, reduce_data)); 2266 arr = (kmp_taskred_data_t *)__kmp_thread_malloc( 2267 thr, num * sizeof(kmp_taskred_data_t)); 2268 // threads will share private copies, thunk routines, sizes, flags, etc.: 2269 KMP_MEMCPY(arr, reduce_data, num * sizeof(kmp_taskred_data_t)); 2270 for (int i = 0; i < num; ++i) { 2271 arr[i].reduce_shar = data[i].reduce_shar; // init unique shared pointers 2272 } 2273 tg->reduce_data = (void *)arr; 2274 tg->reduce_num_data = num; 2275 } 2276 2277 /*! 2278 @ingroup TASKING 2279 @param gtid Global thread ID 2280 @param tskgrp The taskgroup ID (optional) 2281 @param data Shared location of the item 2282 @return The pointer to per-thread data 2283 2284 Get thread-specific location of data item 2285 */ 2286 void *__kmpc_task_reduction_get_th_data(int gtid, void *tskgrp, void *data) { 2287 __kmp_assert_valid_gtid(gtid); 2288 kmp_info_t *thread = __kmp_threads[gtid]; 2289 kmp_int32 nth = thread->th.th_team_nproc; 2290 if (nth == 1) 2291 return data; // nothing to do 2292 2293 kmp_taskgroup_t *tg = (kmp_taskgroup_t *)tskgrp; 2294 if (tg == NULL) 2295 tg = thread->th.th_current_task->td_taskgroup; 2296 KMP_ASSERT(tg != NULL); 2297 kmp_taskred_data_t *arr = (kmp_taskred_data_t *)(tg->reduce_data); 2298 kmp_int32 num = tg->reduce_num_data; 2299 kmp_int32 tid = thread->th.th_info.ds.ds_tid; 2300 2301 KMP_ASSERT(data != NULL); 2302 while (tg != NULL) { 2303 for (int i = 0; i < num; ++i) { 2304 if (!arr[i].flags.lazy_priv) { 2305 if (data == arr[i].reduce_shar || 2306 (data >= arr[i].reduce_priv && data < arr[i].reduce_pend)) 2307 return (char *)(arr[i].reduce_priv) + tid * arr[i].reduce_size; 2308 } else { 2309 // check shared location first 2310 void **p_priv = (void **)(arr[i].reduce_priv); 2311 if (data == arr[i].reduce_shar) 2312 goto found; 2313 // check if we get some thread specific location as parameter 2314 for (int j = 0; j < nth; ++j) 2315 if (data == p_priv[j]) 2316 goto found; 2317 continue; // not found, continue search 2318 found: 2319 if (p_priv[tid] == NULL) { 2320 // allocate thread specific object lazily 2321 p_priv[tid] = __kmp_allocate(arr[i].reduce_size); 2322 if (arr[i].reduce_init != NULL) { 2323 if (arr[i].reduce_orig != NULL) { // new interface 2324 ((void (*)(void *, void *))arr[i].reduce_init)( 2325 p_priv[tid], arr[i].reduce_orig); 2326 } else { // old interface (single parameter) 2327 ((void (*)(void *))arr[i].reduce_init)(p_priv[tid]); 2328 } 2329 } 2330 } 2331 return p_priv[tid]; 2332 } 2333 } 2334 tg = tg->parent; 2335 arr = (kmp_taskred_data_t *)(tg->reduce_data); 2336 num = tg->reduce_num_data; 2337 } 2338 KMP_ASSERT2(0, "Unknown task reduction item"); 2339 return NULL; // ERROR, this line never executed 2340 } 2341 2342 // Finalize task reduction. 2343 // Called from __kmpc_end_taskgroup() 2344 static void __kmp_task_reduction_fini(kmp_info_t *th, kmp_taskgroup_t *tg) { 2345 kmp_int32 nth = th->th.th_team_nproc; 2346 KMP_DEBUG_ASSERT(nth > 1); // should not be called if nth == 1 2347 kmp_taskred_data_t *arr = (kmp_taskred_data_t *)tg->reduce_data; 2348 kmp_int32 num = tg->reduce_num_data; 2349 for (int i = 0; i < num; ++i) { 2350 void *sh_data = arr[i].reduce_shar; 2351 void (*f_fini)(void *) = (void (*)(void *))(arr[i].reduce_fini); 2352 void (*f_comb)(void *, void *) = 2353 (void (*)(void *, void *))(arr[i].reduce_comb); 2354 if (!arr[i].flags.lazy_priv) { 2355 void *pr_data = arr[i].reduce_priv; 2356 size_t size = arr[i].reduce_size; 2357 for (int j = 0; j < nth; ++j) { 2358 void *priv_data = (char *)pr_data + j * size; 2359 f_comb(sh_data, priv_data); // combine results 2360 if (f_fini) 2361 f_fini(priv_data); // finalize if needed 2362 } 2363 } else { 2364 void **pr_data = (void **)(arr[i].reduce_priv); 2365 for (int j = 0; j < nth; ++j) { 2366 if (pr_data[j] != NULL) { 2367 f_comb(sh_data, pr_data[j]); // combine results 2368 if (f_fini) 2369 f_fini(pr_data[j]); // finalize if needed 2370 __kmp_free(pr_data[j]); 2371 } 2372 } 2373 } 2374 __kmp_free(arr[i].reduce_priv); 2375 } 2376 __kmp_thread_free(th, arr); 2377 tg->reduce_data = NULL; 2378 tg->reduce_num_data = 0; 2379 } 2380 2381 // Cleanup task reduction data for parallel or worksharing, 2382 // do not touch task private data other threads still working with. 2383 // Called from __kmpc_end_taskgroup() 2384 static void __kmp_task_reduction_clean(kmp_info_t *th, kmp_taskgroup_t *tg) { 2385 __kmp_thread_free(th, tg->reduce_data); 2386 tg->reduce_data = NULL; 2387 tg->reduce_num_data = 0; 2388 } 2389 2390 template <typename T> 2391 void *__kmp_task_reduction_modifier_init(ident_t *loc, int gtid, int is_ws, 2392 int num, T *data) { 2393 __kmp_assert_valid_gtid(gtid); 2394 kmp_info_t *thr = __kmp_threads[gtid]; 2395 kmp_int32 nth = thr->th.th_team_nproc; 2396 __kmpc_taskgroup(loc, gtid); // form new taskgroup first 2397 if (nth == 1) { 2398 KA_TRACE(10, 2399 ("__kmpc_reduction_modifier_init: T#%d, tg %p, exiting nth=1\n", 2400 gtid, thr->th.th_current_task->td_taskgroup)); 2401 return (void *)thr->th.th_current_task->td_taskgroup; 2402 } 2403 kmp_team_t *team = thr->th.th_team; 2404 void *reduce_data; 2405 kmp_taskgroup_t *tg; 2406 reduce_data = KMP_ATOMIC_LD_RLX(&team->t.t_tg_reduce_data[is_ws]); 2407 if (reduce_data == NULL && 2408 __kmp_atomic_compare_store(&team->t.t_tg_reduce_data[is_ws], reduce_data, 2409 (void *)1)) { 2410 // single thread enters this block to initialize common reduction data 2411 KMP_DEBUG_ASSERT(reduce_data == NULL); 2412 // first initialize own data, then make a copy other threads can use 2413 tg = (kmp_taskgroup_t *)__kmp_task_reduction_init<T>(gtid, num, data); 2414 reduce_data = __kmp_thread_malloc(thr, num * sizeof(kmp_taskred_data_t)); 2415 KMP_MEMCPY(reduce_data, tg->reduce_data, num * sizeof(kmp_taskred_data_t)); 2416 // fini counters should be 0 at this point 2417 KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&team->t.t_tg_fini_counter[0]) == 0); 2418 KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&team->t.t_tg_fini_counter[1]) == 0); 2419 KMP_ATOMIC_ST_REL(&team->t.t_tg_reduce_data[is_ws], reduce_data); 2420 } else { 2421 while ( 2422 (reduce_data = KMP_ATOMIC_LD_ACQ(&team->t.t_tg_reduce_data[is_ws])) == 2423 (void *)1) { // wait for task reduction initialization 2424 KMP_CPU_PAUSE(); 2425 } 2426 KMP_DEBUG_ASSERT(reduce_data > (void *)1); // should be valid pointer here 2427 tg = thr->th.th_current_task->td_taskgroup; 2428 __kmp_task_reduction_init_copy<T>(thr, num, data, tg, reduce_data); 2429 } 2430 return tg; 2431 } 2432 2433 /*! 2434 @ingroup TASKING 2435 @param loc Source location info 2436 @param gtid Global thread ID 2437 @param is_ws Is 1 if the reduction is for worksharing, 0 otherwise 2438 @param num Number of data items to reduce 2439 @param data Array of data for reduction 2440 @return The taskgroup identifier 2441 2442 Initialize task reduction for a parallel or worksharing. 2443 2444 Note: this entry supposes the optional compiler-generated initializer routine 2445 has single parameter - pointer to object to be initialized. That means 2446 the reduction either does not use omp_orig object, or the omp_orig is accessible 2447 without help of the runtime library. 2448 */ 2449 void *__kmpc_task_reduction_modifier_init(ident_t *loc, int gtid, int is_ws, 2450 int num, void *data) { 2451 return __kmp_task_reduction_modifier_init(loc, gtid, is_ws, num, 2452 (kmp_task_red_input_t *)data); 2453 } 2454 2455 /*! 2456 @ingroup TASKING 2457 @param loc Source location info 2458 @param gtid Global thread ID 2459 @param is_ws Is 1 if the reduction is for worksharing, 0 otherwise 2460 @param num Number of data items to reduce 2461 @param data Array of data for reduction 2462 @return The taskgroup identifier 2463 2464 Initialize task reduction for a parallel or worksharing. 2465 2466 Note: this entry supposes the optional compiler-generated initializer routine 2467 has two parameters, pointer to object to be initialized and pointer to omp_orig 2468 */ 2469 void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int is_ws, int num, 2470 void *data) { 2471 return __kmp_task_reduction_modifier_init(loc, gtid, is_ws, num, 2472 (kmp_taskred_input_t *)data); 2473 } 2474 2475 /*! 2476 @ingroup TASKING 2477 @param loc Source location info 2478 @param gtid Global thread ID 2479 @param is_ws Is 1 if the reduction is for worksharing, 0 otherwise 2480 2481 Finalize task reduction for a parallel or worksharing. 2482 */ 2483 void __kmpc_task_reduction_modifier_fini(ident_t *loc, int gtid, int is_ws) { 2484 __kmpc_end_taskgroup(loc, gtid); 2485 } 2486 2487 // __kmpc_taskgroup: Start a new taskgroup 2488 void __kmpc_taskgroup(ident_t *loc, int gtid) { 2489 __kmp_assert_valid_gtid(gtid); 2490 kmp_info_t *thread = __kmp_threads[gtid]; 2491 kmp_taskdata_t *taskdata = thread->th.th_current_task; 2492 kmp_taskgroup_t *tg_new = 2493 (kmp_taskgroup_t *)__kmp_thread_malloc(thread, sizeof(kmp_taskgroup_t)); 2494 KA_TRACE(10, ("__kmpc_taskgroup: T#%d loc=%p group=%p\n", gtid, loc, tg_new)); 2495 KMP_ATOMIC_ST_RLX(&tg_new->count, 0); 2496 KMP_ATOMIC_ST_RLX(&tg_new->cancel_request, cancel_noreq); 2497 tg_new->parent = taskdata->td_taskgroup; 2498 tg_new->reduce_data = NULL; 2499 tg_new->reduce_num_data = 0; 2500 taskdata->td_taskgroup = tg_new; 2501 2502 #if OMPT_SUPPORT && OMPT_OPTIONAL 2503 if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)) { 2504 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); 2505 if (!codeptr) 2506 codeptr = OMPT_GET_RETURN_ADDRESS(0); 2507 kmp_team_t *team = thread->th.th_team; 2508 ompt_data_t my_task_data = taskdata->ompt_task_info.task_data; 2509 // FIXME: I think this is wrong for lwt! 2510 ompt_data_t my_parallel_data = team->t.ompt_team_info.parallel_data; 2511 2512 ompt_callbacks.ompt_callback(ompt_callback_sync_region)( 2513 ompt_sync_region_taskgroup, ompt_scope_begin, &(my_parallel_data), 2514 &(my_task_data), codeptr); 2515 } 2516 #endif 2517 } 2518 2519 // __kmpc_end_taskgroup: Wait until all tasks generated by the current task 2520 // and its descendants are complete 2521 void __kmpc_end_taskgroup(ident_t *loc, int gtid) { 2522 __kmp_assert_valid_gtid(gtid); 2523 kmp_info_t *thread = __kmp_threads[gtid]; 2524 kmp_taskdata_t *taskdata = thread->th.th_current_task; 2525 kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup; 2526 int thread_finished = FALSE; 2527 2528 #if OMPT_SUPPORT && OMPT_OPTIONAL 2529 kmp_team_t *team; 2530 ompt_data_t my_task_data; 2531 ompt_data_t my_parallel_data; 2532 void *codeptr = nullptr; 2533 if (UNLIKELY(ompt_enabled.enabled)) { 2534 team = thread->th.th_team; 2535 my_task_data = taskdata->ompt_task_info.task_data; 2536 // FIXME: I think this is wrong for lwt! 2537 my_parallel_data = team->t.ompt_team_info.parallel_data; 2538 codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid); 2539 if (!codeptr) 2540 codeptr = OMPT_GET_RETURN_ADDRESS(0); 2541 } 2542 #endif 2543 2544 KA_TRACE(10, ("__kmpc_end_taskgroup(enter): T#%d loc=%p\n", gtid, loc)); 2545 KMP_DEBUG_ASSERT(taskgroup != NULL); 2546 KMP_SET_THREAD_STATE_BLOCK(TASKGROUP); 2547 2548 if (__kmp_tasking_mode != tskm_immediate_exec) { 2549 // mark task as waiting not on a barrier 2550 taskdata->td_taskwait_counter += 1; 2551 taskdata->td_taskwait_ident = loc; 2552 taskdata->td_taskwait_thread = gtid + 1; 2553 #if USE_ITT_BUILD 2554 // For ITT the taskgroup wait is similar to taskwait until we need to 2555 // distinguish them 2556 void *itt_sync_obj = NULL; 2557 #if USE_ITT_NOTIFY 2558 KMP_ITT_TASKWAIT_STARTING(itt_sync_obj); 2559 #endif /* USE_ITT_NOTIFY */ 2560 #endif /* USE_ITT_BUILD */ 2561 2562 #if OMPT_SUPPORT && OMPT_OPTIONAL 2563 if (UNLIKELY(ompt_enabled.ompt_callback_sync_region_wait)) { 2564 ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)( 2565 ompt_sync_region_taskgroup, ompt_scope_begin, &(my_parallel_data), 2566 &(my_task_data), codeptr); 2567 } 2568 #endif 2569 2570 if (!taskdata->td_flags.team_serial || 2571 (thread->th.th_task_team != NULL && 2572 thread->th.th_task_team->tt.tt_found_proxy_tasks)) { 2573 kmp_flag_32<false, false> flag( 2574 RCAST(std::atomic<kmp_uint32> *, &(taskgroup->count)), 0U); 2575 while (KMP_ATOMIC_LD_ACQ(&taskgroup->count) != 0) { 2576 flag.execute_tasks(thread, gtid, FALSE, 2577 &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), 2578 __kmp_task_stealing_constraint); 2579 } 2580 } 2581 taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread; // end waiting 2582 2583 #if OMPT_SUPPORT && OMPT_OPTIONAL 2584 if (UNLIKELY(ompt_enabled.ompt_callback_sync_region_wait)) { 2585 ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)( 2586 ompt_sync_region_taskgroup, ompt_scope_end, &(my_parallel_data), 2587 &(my_task_data), codeptr); 2588 } 2589 #endif 2590 2591 #if USE_ITT_BUILD 2592 KMP_ITT_TASKWAIT_FINISHED(itt_sync_obj); 2593 KMP_FSYNC_ACQUIRED(taskdata); // acquire self - sync with descendants 2594 #endif /* USE_ITT_BUILD */ 2595 } 2596 KMP_DEBUG_ASSERT(taskgroup->count == 0); 2597 2598 if (taskgroup->reduce_data != NULL) { // need to reduce? 2599 int cnt; 2600 void *reduce_data; 2601 kmp_team_t *t = thread->th.th_team; 2602 kmp_taskred_data_t *arr = (kmp_taskred_data_t *)taskgroup->reduce_data; 2603 // check if <priv> data of the first reduction variable shared for the team 2604 void *priv0 = arr[0].reduce_priv; 2605 if ((reduce_data = KMP_ATOMIC_LD_ACQ(&t->t.t_tg_reduce_data[0])) != NULL && 2606 ((kmp_taskred_data_t *)reduce_data)[0].reduce_priv == priv0) { 2607 // finishing task reduction on parallel 2608 cnt = KMP_ATOMIC_INC(&t->t.t_tg_fini_counter[0]); 2609 if (cnt == thread->th.th_team_nproc - 1) { 2610 // we are the last thread passing __kmpc_reduction_modifier_fini() 2611 // finalize task reduction: 2612 __kmp_task_reduction_fini(thread, taskgroup); 2613 // cleanup fields in the team structure: 2614 // TODO: is relaxed store enough here (whole barrier should follow)? 2615 __kmp_thread_free(thread, reduce_data); 2616 KMP_ATOMIC_ST_REL(&t->t.t_tg_reduce_data[0], NULL); 2617 KMP_ATOMIC_ST_REL(&t->t.t_tg_fini_counter[0], 0); 2618 } else { 2619 // we are not the last thread passing __kmpc_reduction_modifier_fini(), 2620 // so do not finalize reduction, just clean own copy of the data 2621 __kmp_task_reduction_clean(thread, taskgroup); 2622 } 2623 } else if ((reduce_data = KMP_ATOMIC_LD_ACQ(&t->t.t_tg_reduce_data[1])) != 2624 NULL && 2625 ((kmp_taskred_data_t *)reduce_data)[0].reduce_priv == priv0) { 2626 // finishing task reduction on worksharing 2627 cnt = KMP_ATOMIC_INC(&t->t.t_tg_fini_counter[1]); 2628 if (cnt == thread->th.th_team_nproc - 1) { 2629 // we are the last thread passing __kmpc_reduction_modifier_fini() 2630 __kmp_task_reduction_fini(thread, taskgroup); 2631 // cleanup fields in team structure: 2632 // TODO: is relaxed store enough here (whole barrier should follow)? 2633 __kmp_thread_free(thread, reduce_data); 2634 KMP_ATOMIC_ST_REL(&t->t.t_tg_reduce_data[1], NULL); 2635 KMP_ATOMIC_ST_REL(&t->t.t_tg_fini_counter[1], 0); 2636 } else { 2637 // we are not the last thread passing __kmpc_reduction_modifier_fini(), 2638 // so do not finalize reduction, just clean own copy of the data 2639 __kmp_task_reduction_clean(thread, taskgroup); 2640 } 2641 } else { 2642 // finishing task reduction on taskgroup 2643 __kmp_task_reduction_fini(thread, taskgroup); 2644 } 2645 } 2646 // Restore parent taskgroup for the current task 2647 taskdata->td_taskgroup = taskgroup->parent; 2648 __kmp_thread_free(thread, taskgroup); 2649 2650 KA_TRACE(10, ("__kmpc_end_taskgroup(exit): T#%d task %p finished waiting\n", 2651 gtid, taskdata)); 2652 ANNOTATE_HAPPENS_AFTER(taskdata); 2653 2654 #if OMPT_SUPPORT && OMPT_OPTIONAL 2655 if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)) { 2656 ompt_callbacks.ompt_callback(ompt_callback_sync_region)( 2657 ompt_sync_region_taskgroup, ompt_scope_end, &(my_parallel_data), 2658 &(my_task_data), codeptr); 2659 } 2660 #endif 2661 } 2662 2663 // __kmp_remove_my_task: remove a task from my own deque 2664 static kmp_task_t *__kmp_remove_my_task(kmp_info_t *thread, kmp_int32 gtid, 2665 kmp_task_team_t *task_team, 2666 kmp_int32 is_constrained) { 2667 kmp_task_t *task; 2668 kmp_taskdata_t *taskdata; 2669 kmp_thread_data_t *thread_data; 2670 kmp_uint32 tail; 2671 2672 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec); 2673 KMP_DEBUG_ASSERT(task_team->tt.tt_threads_data != 2674 NULL); // Caller should check this condition 2675 2676 thread_data = &task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)]; 2677 2678 KA_TRACE(10, ("__kmp_remove_my_task(enter): T#%d ntasks=%d head=%u tail=%u\n", 2679 gtid, thread_data->td.td_deque_ntasks, 2680 thread_data->td.td_deque_head, thread_data->td.td_deque_tail)); 2681 2682 if (TCR_4(thread_data->td.td_deque_ntasks) == 0) { 2683 KA_TRACE(10, 2684 ("__kmp_remove_my_task(exit #1): T#%d No tasks to remove: " 2685 "ntasks=%d head=%u tail=%u\n", 2686 gtid, thread_data->td.td_deque_ntasks, 2687 thread_data->td.td_deque_head, thread_data->td.td_deque_tail)); 2688 return NULL; 2689 } 2690 2691 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock); 2692 2693 if (TCR_4(thread_data->td.td_deque_ntasks) == 0) { 2694 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock); 2695 KA_TRACE(10, 2696 ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: " 2697 "ntasks=%d head=%u tail=%u\n", 2698 gtid, thread_data->td.td_deque_ntasks, 2699 thread_data->td.td_deque_head, thread_data->td.td_deque_tail)); 2700 return NULL; 2701 } 2702 2703 tail = (thread_data->td.td_deque_tail - 1) & 2704 TASK_DEQUE_MASK(thread_data->td); // Wrap index. 2705 taskdata = thread_data->td.td_deque[tail]; 2706 2707 if (!__kmp_task_is_allowed(gtid, is_constrained, taskdata, 2708 thread->th.th_current_task)) { 2709 // The TSC does not allow to steal victim task 2710 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock); 2711 KA_TRACE(10, 2712 ("__kmp_remove_my_task(exit #3): T#%d TSC blocks tail task: " 2713 "ntasks=%d head=%u tail=%u\n", 2714 gtid, thread_data->td.td_deque_ntasks, 2715 thread_data->td.td_deque_head, thread_data->td.td_deque_tail)); 2716 return NULL; 2717 } 2718 2719 thread_data->td.td_deque_tail = tail; 2720 TCW_4(thread_data->td.td_deque_ntasks, thread_data->td.td_deque_ntasks - 1); 2721 2722 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock); 2723 2724 KA_TRACE(10, ("__kmp_remove_my_task(exit #4): T#%d task %p removed: " 2725 "ntasks=%d head=%u tail=%u\n", 2726 gtid, taskdata, thread_data->td.td_deque_ntasks, 2727 thread_data->td.td_deque_head, thread_data->td.td_deque_tail)); 2728 2729 task = KMP_TASKDATA_TO_TASK(taskdata); 2730 return task; 2731 } 2732 2733 // __kmp_steal_task: remove a task from another thread's deque 2734 // Assume that calling thread has already checked existence of 2735 // task_team thread_data before calling this routine. 2736 static kmp_task_t *__kmp_steal_task(kmp_info_t *victim_thr, kmp_int32 gtid, 2737 kmp_task_team_t *task_team, 2738 std::atomic<kmp_int32> *unfinished_threads, 2739 int *thread_finished, 2740 kmp_int32 is_constrained) { 2741 kmp_task_t *task; 2742 kmp_taskdata_t *taskdata; 2743 kmp_taskdata_t *current; 2744 kmp_thread_data_t *victim_td, *threads_data; 2745 kmp_int32 target; 2746 kmp_int32 victim_tid; 2747 2748 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec); 2749 2750 threads_data = task_team->tt.tt_threads_data; 2751 KMP_DEBUG_ASSERT(threads_data != NULL); // Caller should check this condition 2752 2753 victim_tid = victim_thr->th.th_info.ds.ds_tid; 2754 victim_td = &threads_data[victim_tid]; 2755 2756 KA_TRACE(10, ("__kmp_steal_task(enter): T#%d try to steal from T#%d: " 2757 "task_team=%p ntasks=%d head=%u tail=%u\n", 2758 gtid, __kmp_gtid_from_thread(victim_thr), task_team, 2759 victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head, 2760 victim_td->td.td_deque_tail)); 2761 2762 if (TCR_4(victim_td->td.td_deque_ntasks) == 0) { 2763 KA_TRACE(10, ("__kmp_steal_task(exit #1): T#%d could not steal from T#%d: " 2764 "task_team=%p ntasks=%d head=%u tail=%u\n", 2765 gtid, __kmp_gtid_from_thread(victim_thr), task_team, 2766 victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head, 2767 victim_td->td.td_deque_tail)); 2768 return NULL; 2769 } 2770 2771 __kmp_acquire_bootstrap_lock(&victim_td->td.td_deque_lock); 2772 2773 int ntasks = TCR_4(victim_td->td.td_deque_ntasks); 2774 // Check again after we acquire the lock 2775 if (ntasks == 0) { 2776 __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock); 2777 KA_TRACE(10, ("__kmp_steal_task(exit #2): T#%d could not steal from T#%d: " 2778 "task_team=%p ntasks=%d head=%u tail=%u\n", 2779 gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks, 2780 victim_td->td.td_deque_head, victim_td->td.td_deque_tail)); 2781 return NULL; 2782 } 2783 2784 KMP_DEBUG_ASSERT(victim_td->td.td_deque != NULL); 2785 current = __kmp_threads[gtid]->th.th_current_task; 2786 taskdata = victim_td->td.td_deque[victim_td->td.td_deque_head]; 2787 if (__kmp_task_is_allowed(gtid, is_constrained, taskdata, current)) { 2788 // Bump head pointer and Wrap. 2789 victim_td->td.td_deque_head = 2790 (victim_td->td.td_deque_head + 1) & TASK_DEQUE_MASK(victim_td->td); 2791 } else { 2792 if (!task_team->tt.tt_untied_task_encountered) { 2793 // The TSC does not allow to steal victim task 2794 __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock); 2795 KA_TRACE(10, ("__kmp_steal_task(exit #3): T#%d could not steal from " 2796 "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n", 2797 gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks, 2798 victim_td->td.td_deque_head, victim_td->td.td_deque_tail)); 2799 return NULL; 2800 } 2801 int i; 2802 // walk through victim's deque trying to steal any task 2803 target = victim_td->td.td_deque_head; 2804 taskdata = NULL; 2805 for (i = 1; i < ntasks; ++i) { 2806 target = (target + 1) & TASK_DEQUE_MASK(victim_td->td); 2807 taskdata = victim_td->td.td_deque[target]; 2808 if (__kmp_task_is_allowed(gtid, is_constrained, taskdata, current)) { 2809 break; // found victim task 2810 } else { 2811 taskdata = NULL; 2812 } 2813 } 2814 if (taskdata == NULL) { 2815 // No appropriate candidate to steal found 2816 __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock); 2817 KA_TRACE(10, ("__kmp_steal_task(exit #4): T#%d could not steal from " 2818 "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n", 2819 gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks, 2820 victim_td->td.td_deque_head, victim_td->td.td_deque_tail)); 2821 return NULL; 2822 } 2823 int prev = target; 2824 for (i = i + 1; i < ntasks; ++i) { 2825 // shift remaining tasks in the deque left by 1 2826 target = (target + 1) & TASK_DEQUE_MASK(victim_td->td); 2827 victim_td->td.td_deque[prev] = victim_td->td.td_deque[target]; 2828 prev = target; 2829 } 2830 KMP_DEBUG_ASSERT( 2831 victim_td->td.td_deque_tail == 2832 (kmp_uint32)((target + 1) & TASK_DEQUE_MASK(victim_td->td))); 2833 victim_td->td.td_deque_tail = target; // tail -= 1 (wrapped)) 2834 } 2835 if (*thread_finished) { 2836 // We need to un-mark this victim as a finished victim. This must be done 2837 // before releasing the lock, or else other threads (starting with the 2838 // primary thread victim) might be prematurely released from the barrier!!! 2839 kmp_int32 count; 2840 2841 count = KMP_ATOMIC_INC(unfinished_threads); 2842 2843 KA_TRACE( 2844 20, 2845 ("__kmp_steal_task: T#%d inc unfinished_threads to %d: task_team=%p\n", 2846 gtid, count + 1, task_team)); 2847 2848 *thread_finished = FALSE; 2849 } 2850 TCW_4(victim_td->td.td_deque_ntasks, ntasks - 1); 2851 2852 __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock); 2853 2854 KMP_COUNT_BLOCK(TASK_stolen); 2855 KA_TRACE(10, 2856 ("__kmp_steal_task(exit #5): T#%d stole task %p from T#%d: " 2857 "task_team=%p ntasks=%d head=%u tail=%u\n", 2858 gtid, taskdata, __kmp_gtid_from_thread(victim_thr), task_team, 2859 ntasks, victim_td->td.td_deque_head, victim_td->td.td_deque_tail)); 2860 2861 task = KMP_TASKDATA_TO_TASK(taskdata); 2862 return task; 2863 } 2864 2865 // __kmp_execute_tasks_template: Choose and execute tasks until either the 2866 // condition is statisfied (return true) or there are none left (return false). 2867 // 2868 // final_spin is TRUE if this is the spin at the release barrier. 2869 // thread_finished indicates whether the thread is finished executing all 2870 // the tasks it has on its deque, and is at the release barrier. 2871 // spinner is the location on which to spin. 2872 // spinner == NULL means only execute a single task and return. 2873 // checker is the value to check to terminate the spin. 2874 template <class C> 2875 static inline int __kmp_execute_tasks_template( 2876 kmp_info_t *thread, kmp_int32 gtid, C *flag, int final_spin, 2877 int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj), 2878 kmp_int32 is_constrained) { 2879 kmp_task_team_t *task_team = thread->th.th_task_team; 2880 kmp_thread_data_t *threads_data; 2881 kmp_task_t *task; 2882 kmp_info_t *other_thread; 2883 kmp_taskdata_t *current_task = thread->th.th_current_task; 2884 std::atomic<kmp_int32> *unfinished_threads; 2885 kmp_int32 nthreads, victim_tid = -2, use_own_tasks = 1, new_victim = 0, 2886 tid = thread->th.th_info.ds.ds_tid; 2887 2888 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec); 2889 KMP_DEBUG_ASSERT(thread == __kmp_threads[gtid]); 2890 2891 if (task_team == NULL || current_task == NULL) 2892 return FALSE; 2893 2894 KA_TRACE(15, ("__kmp_execute_tasks_template(enter): T#%d final_spin=%d " 2895 "*thread_finished=%d\n", 2896 gtid, final_spin, *thread_finished)); 2897 2898 thread->th.th_reap_state = KMP_NOT_SAFE_TO_REAP; 2899 threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data); 2900 2901 KMP_DEBUG_ASSERT(threads_data != NULL); 2902 2903 nthreads = task_team->tt.tt_nproc; 2904 unfinished_threads = &(task_team->tt.tt_unfinished_threads); 2905 KMP_DEBUG_ASSERT(nthreads > 1 || task_team->tt.tt_found_proxy_tasks || 2906 task_team->tt.tt_hidden_helper_task_encountered); 2907 KMP_DEBUG_ASSERT(*unfinished_threads >= 0); 2908 2909 while (1) { // Outer loop keeps trying to find tasks in case of single thread 2910 // getting tasks from target constructs 2911 while (1) { // Inner loop to find a task and execute it 2912 task = NULL; 2913 if (use_own_tasks) { // check on own queue first 2914 task = __kmp_remove_my_task(thread, gtid, task_team, is_constrained); 2915 } 2916 if ((task == NULL) && (nthreads > 1)) { // Steal a task 2917 int asleep = 1; 2918 use_own_tasks = 0; 2919 // Try to steal from the last place I stole from successfully. 2920 if (victim_tid == -2) { // haven't stolen anything yet 2921 victim_tid = threads_data[tid].td.td_deque_last_stolen; 2922 if (victim_tid != 2923 -1) // if we have a last stolen from victim, get the thread 2924 other_thread = threads_data[victim_tid].td.td_thr; 2925 } 2926 if (victim_tid != -1) { // found last victim 2927 asleep = 0; 2928 } else if (!new_victim) { // no recent steals and we haven't already 2929 // used a new victim; select a random thread 2930 do { // Find a different thread to steal work from. 2931 // Pick a random thread. Initial plan was to cycle through all the 2932 // threads, and only return if we tried to steal from every thread, 2933 // and failed. Arch says that's not such a great idea. 2934 victim_tid = __kmp_get_random(thread) % (nthreads - 1); 2935 if (victim_tid >= tid) { 2936 ++victim_tid; // Adjusts random distribution to exclude self 2937 } 2938 // Found a potential victim 2939 other_thread = threads_data[victim_tid].td.td_thr; 2940 // There is a slight chance that __kmp_enable_tasking() did not wake 2941 // up all threads waiting at the barrier. If victim is sleeping, 2942 // then wake it up. Since we were going to pay the cache miss 2943 // penalty for referencing another thread's kmp_info_t struct 2944 // anyway, 2945 // the check shouldn't cost too much performance at this point. In 2946 // extra barrier mode, tasks do not sleep at the separate tasking 2947 // barrier, so this isn't a problem. 2948 asleep = 0; 2949 if ((__kmp_tasking_mode == tskm_task_teams) && 2950 (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) && 2951 (TCR_PTR(CCAST(void *, other_thread->th.th_sleep_loc)) != 2952 NULL)) { 2953 asleep = 1; 2954 __kmp_null_resume_wrapper(__kmp_gtid_from_thread(other_thread), 2955 other_thread->th.th_sleep_loc); 2956 // A sleeping thread should not have any tasks on it's queue. 2957 // There is a slight possibility that it resumes, steals a task 2958 // from another thread, which spawns more tasks, all in the time 2959 // that it takes this thread to check => don't write an assertion 2960 // that the victim's queue is empty. Try stealing from a 2961 // different thread. 2962 } 2963 } while (asleep); 2964 } 2965 2966 if (!asleep) { 2967 // We have a victim to try to steal from 2968 task = __kmp_steal_task(other_thread, gtid, task_team, 2969 unfinished_threads, thread_finished, 2970 is_constrained); 2971 } 2972 if (task != NULL) { // set last stolen to victim 2973 if (threads_data[tid].td.td_deque_last_stolen != victim_tid) { 2974 threads_data[tid].td.td_deque_last_stolen = victim_tid; 2975 // The pre-refactored code did not try more than 1 successful new 2976 // vicitm, unless the last one generated more local tasks; 2977 // new_victim keeps track of this 2978 new_victim = 1; 2979 } 2980 } else { // No tasks found; unset last_stolen 2981 KMP_CHECK_UPDATE(threads_data[tid].td.td_deque_last_stolen, -1); 2982 victim_tid = -2; // no successful victim found 2983 } 2984 } 2985 2986 if (task == NULL) 2987 break; // break out of tasking loop 2988 2989 // Found a task; execute it 2990 #if USE_ITT_BUILD && USE_ITT_NOTIFY 2991 if (__itt_sync_create_ptr || KMP_ITT_DEBUG) { 2992 if (itt_sync_obj == NULL) { // we are at fork barrier where we could not 2993 // get the object reliably 2994 itt_sync_obj = __kmp_itt_barrier_object(gtid, bs_forkjoin_barrier); 2995 } 2996 __kmp_itt_task_starting(itt_sync_obj); 2997 } 2998 #endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */ 2999 __kmp_invoke_task(gtid, task, current_task); 3000 #if USE_ITT_BUILD 3001 if (itt_sync_obj != NULL) 3002 __kmp_itt_task_finished(itt_sync_obj); 3003 #endif /* USE_ITT_BUILD */ 3004 // If this thread is only partway through the barrier and the condition is 3005 // met, then return now, so that the barrier gather/release pattern can 3006 // proceed. If this thread is in the last spin loop in the barrier, 3007 // waiting to be released, we know that the termination condition will not 3008 // be satisfied, so don't waste any cycles checking it. 3009 if (flag == NULL || (!final_spin && flag->done_check())) { 3010 KA_TRACE( 3011 15, 3012 ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n", 3013 gtid)); 3014 return TRUE; 3015 } 3016 if (thread->th.th_task_team == NULL) { 3017 break; 3018 } 3019 KMP_YIELD(__kmp_library == library_throughput); // Yield before next task 3020 // If execution of a stolen task results in more tasks being placed on our 3021 // run queue, reset use_own_tasks 3022 if (!use_own_tasks && TCR_4(threads_data[tid].td.td_deque_ntasks) != 0) { 3023 KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d stolen task spawned " 3024 "other tasks, restart\n", 3025 gtid)); 3026 use_own_tasks = 1; 3027 new_victim = 0; 3028 } 3029 } 3030 3031 // The task source has been exhausted. If in final spin loop of barrier, 3032 // check if termination condition is satisfied. The work queue may be empty 3033 // but there might be proxy tasks still executing. 3034 if (final_spin && 3035 KMP_ATOMIC_LD_ACQ(¤t_task->td_incomplete_child_tasks) == 0) { 3036 // First, decrement the #unfinished threads, if that has not already been 3037 // done. This decrement might be to the spin location, and result in the 3038 // termination condition being satisfied. 3039 if (!*thread_finished) { 3040 kmp_int32 count; 3041 3042 count = KMP_ATOMIC_DEC(unfinished_threads) - 1; 3043 KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d dec " 3044 "unfinished_threads to %d task_team=%p\n", 3045 gtid, count, task_team)); 3046 *thread_finished = TRUE; 3047 } 3048 3049 // It is now unsafe to reference thread->th.th_team !!! 3050 // Decrementing task_team->tt.tt_unfinished_threads can allow the primary 3051 // thread to pass through the barrier, where it might reset each thread's 3052 // th.th_team field for the next parallel region. If we can steal more 3053 // work, we know that this has not happened yet. 3054 if (flag != NULL && flag->done_check()) { 3055 KA_TRACE( 3056 15, 3057 ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n", 3058 gtid)); 3059 return TRUE; 3060 } 3061 } 3062 3063 // If this thread's task team is NULL, primary thread has recognized that 3064 // there are no more tasks; bail out 3065 if (thread->th.th_task_team == NULL) { 3066 KA_TRACE(15, 3067 ("__kmp_execute_tasks_template: T#%d no more tasks\n", gtid)); 3068 return FALSE; 3069 } 3070 3071 // We could be getting tasks from target constructs; if this is the only 3072 // thread, keep trying to execute tasks from own queue 3073 if (nthreads == 1 && 3074 KMP_ATOMIC_LD_ACQ(¤t_task->td_incomplete_child_tasks)) 3075 use_own_tasks = 1; 3076 else { 3077 KA_TRACE(15, 3078 ("__kmp_execute_tasks_template: T#%d can't find work\n", gtid)); 3079 return FALSE; 3080 } 3081 } 3082 } 3083 3084 template <bool C, bool S> 3085 int __kmp_execute_tasks_32( 3086 kmp_info_t *thread, kmp_int32 gtid, kmp_flag_32<C, S> *flag, int final_spin, 3087 int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj), 3088 kmp_int32 is_constrained) { 3089 return __kmp_execute_tasks_template( 3090 thread, gtid, flag, final_spin, 3091 thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained); 3092 } 3093 3094 template <bool C, bool S> 3095 int __kmp_execute_tasks_64( 3096 kmp_info_t *thread, kmp_int32 gtid, kmp_flag_64<C, S> *flag, int final_spin, 3097 int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj), 3098 kmp_int32 is_constrained) { 3099 return __kmp_execute_tasks_template( 3100 thread, gtid, flag, final_spin, 3101 thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained); 3102 } 3103 3104 int __kmp_execute_tasks_oncore( 3105 kmp_info_t *thread, kmp_int32 gtid, kmp_flag_oncore *flag, int final_spin, 3106 int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj), 3107 kmp_int32 is_constrained) { 3108 return __kmp_execute_tasks_template( 3109 thread, gtid, flag, final_spin, 3110 thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained); 3111 } 3112 3113 template int 3114 __kmp_execute_tasks_32<false, false>(kmp_info_t *, kmp_int32, 3115 kmp_flag_32<false, false> *, int, 3116 int *USE_ITT_BUILD_ARG(void *), kmp_int32); 3117 3118 template int __kmp_execute_tasks_64<false, true>(kmp_info_t *, kmp_int32, 3119 kmp_flag_64<false, true> *, 3120 int, 3121 int *USE_ITT_BUILD_ARG(void *), 3122 kmp_int32); 3123 3124 template int __kmp_execute_tasks_64<true, false>(kmp_info_t *, kmp_int32, 3125 kmp_flag_64<true, false> *, 3126 int, 3127 int *USE_ITT_BUILD_ARG(void *), 3128 kmp_int32); 3129 3130 // __kmp_enable_tasking: Allocate task team and resume threads sleeping at the 3131 // next barrier so they can assist in executing enqueued tasks. 3132 // First thread in allocates the task team atomically. 3133 static void __kmp_enable_tasking(kmp_task_team_t *task_team, 3134 kmp_info_t *this_thr) { 3135 kmp_thread_data_t *threads_data; 3136 int nthreads, i, is_init_thread; 3137 3138 KA_TRACE(10, ("__kmp_enable_tasking(enter): T#%d\n", 3139 __kmp_gtid_from_thread(this_thr))); 3140 3141 KMP_DEBUG_ASSERT(task_team != NULL); 3142 KMP_DEBUG_ASSERT(this_thr->th.th_team != NULL); 3143 3144 nthreads = task_team->tt.tt_nproc; 3145 KMP_DEBUG_ASSERT(nthreads > 0); 3146 KMP_DEBUG_ASSERT(nthreads == this_thr->th.th_team->t.t_nproc); 3147 3148 // Allocate or increase the size of threads_data if necessary 3149 is_init_thread = __kmp_realloc_task_threads_data(this_thr, task_team); 3150 3151 if (!is_init_thread) { 3152 // Some other thread already set up the array. 3153 KA_TRACE( 3154 20, 3155 ("__kmp_enable_tasking(exit): T#%d: threads array already set up.\n", 3156 __kmp_gtid_from_thread(this_thr))); 3157 return; 3158 } 3159 threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data); 3160 KMP_DEBUG_ASSERT(threads_data != NULL); 3161 3162 if (__kmp_tasking_mode == tskm_task_teams && 3163 (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME)) { 3164 // Release any threads sleeping at the barrier, so that they can steal 3165 // tasks and execute them. In extra barrier mode, tasks do not sleep 3166 // at the separate tasking barrier, so this isn't a problem. 3167 for (i = 0; i < nthreads; i++) { 3168 volatile void *sleep_loc; 3169 kmp_info_t *thread = threads_data[i].td.td_thr; 3170 3171 if (i == this_thr->th.th_info.ds.ds_tid) { 3172 continue; 3173 } 3174 // Since we haven't locked the thread's suspend mutex lock at this 3175 // point, there is a small window where a thread might be putting 3176 // itself to sleep, but hasn't set the th_sleep_loc field yet. 3177 // To work around this, __kmp_execute_tasks_template() periodically checks 3178 // see if other threads are sleeping (using the same random mechanism that 3179 // is used for task stealing) and awakens them if they are. 3180 if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))) != 3181 NULL) { 3182 KF_TRACE(50, ("__kmp_enable_tasking: T#%d waking up thread T#%d\n", 3183 __kmp_gtid_from_thread(this_thr), 3184 __kmp_gtid_from_thread(thread))); 3185 __kmp_null_resume_wrapper(__kmp_gtid_from_thread(thread), sleep_loc); 3186 } else { 3187 KF_TRACE(50, ("__kmp_enable_tasking: T#%d don't wake up thread T#%d\n", 3188 __kmp_gtid_from_thread(this_thr), 3189 __kmp_gtid_from_thread(thread))); 3190 } 3191 } 3192 } 3193 3194 KA_TRACE(10, ("__kmp_enable_tasking(exit): T#%d\n", 3195 __kmp_gtid_from_thread(this_thr))); 3196 } 3197 3198 /* // TODO: Check the comment consistency 3199 * Utility routines for "task teams". A task team (kmp_task_t) is kind of 3200 * like a shadow of the kmp_team_t data struct, with a different lifetime. 3201 * After a child * thread checks into a barrier and calls __kmp_release() from 3202 * the particular variant of __kmp_<barrier_kind>_barrier_gather(), it can no 3203 * longer assume that the kmp_team_t structure is intact (at any moment, the 3204 * primary thread may exit the barrier code and free the team data structure, 3205 * and return the threads to the thread pool). 3206 * 3207 * This does not work with the tasking code, as the thread is still 3208 * expected to participate in the execution of any tasks that may have been 3209 * spawned my a member of the team, and the thread still needs access to all 3210 * to each thread in the team, so that it can steal work from it. 3211 * 3212 * Enter the existence of the kmp_task_team_t struct. It employs a reference 3213 * counting mechanism, and is allocated by the primary thread before calling 3214 * __kmp_<barrier_kind>_release, and then is release by the last thread to 3215 * exit __kmp_<barrier_kind>_release at the next barrier. I.e. the lifetimes 3216 * of the kmp_task_team_t structs for consecutive barriers can overlap 3217 * (and will, unless the primary thread is the last thread to exit the barrier 3218 * release phase, which is not typical). The existence of such a struct is 3219 * useful outside the context of tasking. 3220 * 3221 * We currently use the existence of the threads array as an indicator that 3222 * tasks were spawned since the last barrier. If the structure is to be 3223 * useful outside the context of tasking, then this will have to change, but 3224 * not setting the field minimizes the performance impact of tasking on 3225 * barriers, when no explicit tasks were spawned (pushed, actually). 3226 */ 3227 3228 static kmp_task_team_t *__kmp_free_task_teams = 3229 NULL; // Free list for task_team data structures 3230 // Lock for task team data structures 3231 kmp_bootstrap_lock_t __kmp_task_team_lock = 3232 KMP_BOOTSTRAP_LOCK_INITIALIZER(__kmp_task_team_lock); 3233 3234 // __kmp_alloc_task_deque: 3235 // Allocates a task deque for a particular thread, and initialize the necessary 3236 // data structures relating to the deque. This only happens once per thread 3237 // per task team since task teams are recycled. No lock is needed during 3238 // allocation since each thread allocates its own deque. 3239 static void __kmp_alloc_task_deque(kmp_info_t *thread, 3240 kmp_thread_data_t *thread_data) { 3241 __kmp_init_bootstrap_lock(&thread_data->td.td_deque_lock); 3242 KMP_DEBUG_ASSERT(thread_data->td.td_deque == NULL); 3243 3244 // Initialize last stolen task field to "none" 3245 thread_data->td.td_deque_last_stolen = -1; 3246 3247 KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) == 0); 3248 KMP_DEBUG_ASSERT(thread_data->td.td_deque_head == 0); 3249 KMP_DEBUG_ASSERT(thread_data->td.td_deque_tail == 0); 3250 3251 KE_TRACE( 3252 10, 3253 ("__kmp_alloc_task_deque: T#%d allocating deque[%d] for thread_data %p\n", 3254 __kmp_gtid_from_thread(thread), INITIAL_TASK_DEQUE_SIZE, thread_data)); 3255 // Allocate space for task deque, and zero the deque 3256 // Cannot use __kmp_thread_calloc() because threads not around for 3257 // kmp_reap_task_team( ). 3258 thread_data->td.td_deque = (kmp_taskdata_t **)__kmp_allocate( 3259 INITIAL_TASK_DEQUE_SIZE * sizeof(kmp_taskdata_t *)); 3260 thread_data->td.td_deque_size = INITIAL_TASK_DEQUE_SIZE; 3261 } 3262 3263 // __kmp_free_task_deque: 3264 // Deallocates a task deque for a particular thread. Happens at library 3265 // deallocation so don't need to reset all thread data fields. 3266 static void __kmp_free_task_deque(kmp_thread_data_t *thread_data) { 3267 if (thread_data->td.td_deque != NULL) { 3268 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock); 3269 TCW_4(thread_data->td.td_deque_ntasks, 0); 3270 __kmp_free(thread_data->td.td_deque); 3271 thread_data->td.td_deque = NULL; 3272 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock); 3273 } 3274 3275 #ifdef BUILD_TIED_TASK_STACK 3276 // GEH: Figure out what to do here for td_susp_tied_tasks 3277 if (thread_data->td.td_susp_tied_tasks.ts_entries != TASK_STACK_EMPTY) { 3278 __kmp_free_task_stack(__kmp_thread_from_gtid(gtid), thread_data); 3279 } 3280 #endif // BUILD_TIED_TASK_STACK 3281 } 3282 3283 // __kmp_realloc_task_threads_data: 3284 // Allocates a threads_data array for a task team, either by allocating an 3285 // initial array or enlarging an existing array. Only the first thread to get 3286 // the lock allocs or enlarges the array and re-initializes the array elements. 3287 // That thread returns "TRUE", the rest return "FALSE". 3288 // Assumes that the new array size is given by task_team -> tt.tt_nproc. 3289 // The current size is given by task_team -> tt.tt_max_threads. 3290 static int __kmp_realloc_task_threads_data(kmp_info_t *thread, 3291 kmp_task_team_t *task_team) { 3292 kmp_thread_data_t **threads_data_p; 3293 kmp_int32 nthreads, maxthreads; 3294 int is_init_thread = FALSE; 3295 3296 if (TCR_4(task_team->tt.tt_found_tasks)) { 3297 // Already reallocated and initialized. 3298 return FALSE; 3299 } 3300 3301 threads_data_p = &task_team->tt.tt_threads_data; 3302 nthreads = task_team->tt.tt_nproc; 3303 maxthreads = task_team->tt.tt_max_threads; 3304 3305 // All threads must lock when they encounter the first task of the implicit 3306 // task region to make sure threads_data fields are (re)initialized before 3307 // used. 3308 __kmp_acquire_bootstrap_lock(&task_team->tt.tt_threads_lock); 3309 3310 if (!TCR_4(task_team->tt.tt_found_tasks)) { 3311 // first thread to enable tasking 3312 kmp_team_t *team = thread->th.th_team; 3313 int i; 3314 3315 is_init_thread = TRUE; 3316 if (maxthreads < nthreads) { 3317 3318 if (*threads_data_p != NULL) { 3319 kmp_thread_data_t *old_data = *threads_data_p; 3320 kmp_thread_data_t *new_data = NULL; 3321 3322 KE_TRACE( 3323 10, 3324 ("__kmp_realloc_task_threads_data: T#%d reallocating " 3325 "threads data for task_team %p, new_size = %d, old_size = %d\n", 3326 __kmp_gtid_from_thread(thread), task_team, nthreads, maxthreads)); 3327 // Reallocate threads_data to have more elements than current array 3328 // Cannot use __kmp_thread_realloc() because threads not around for 3329 // kmp_reap_task_team( ). Note all new array entries are initialized 3330 // to zero by __kmp_allocate(). 3331 new_data = (kmp_thread_data_t *)__kmp_allocate( 3332 nthreads * sizeof(kmp_thread_data_t)); 3333 // copy old data to new data 3334 KMP_MEMCPY_S((void *)new_data, nthreads * sizeof(kmp_thread_data_t), 3335 (void *)old_data, maxthreads * sizeof(kmp_thread_data_t)); 3336 3337 #ifdef BUILD_TIED_TASK_STACK 3338 // GEH: Figure out if this is the right thing to do 3339 for (i = maxthreads; i < nthreads; i++) { 3340 kmp_thread_data_t *thread_data = &(*threads_data_p)[i]; 3341 __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data); 3342 } 3343 #endif // BUILD_TIED_TASK_STACK 3344 // Install the new data and free the old data 3345 (*threads_data_p) = new_data; 3346 __kmp_free(old_data); 3347 } else { 3348 KE_TRACE(10, ("__kmp_realloc_task_threads_data: T#%d allocating " 3349 "threads data for task_team %p, size = %d\n", 3350 __kmp_gtid_from_thread(thread), task_team, nthreads)); 3351 // Make the initial allocate for threads_data array, and zero entries 3352 // Cannot use __kmp_thread_calloc() because threads not around for 3353 // kmp_reap_task_team( ). 3354 ANNOTATE_IGNORE_WRITES_BEGIN(); 3355 *threads_data_p = (kmp_thread_data_t *)__kmp_allocate( 3356 nthreads * sizeof(kmp_thread_data_t)); 3357 ANNOTATE_IGNORE_WRITES_END(); 3358 #ifdef BUILD_TIED_TASK_STACK 3359 // GEH: Figure out if this is the right thing to do 3360 for (i = 0; i < nthreads; i++) { 3361 kmp_thread_data_t *thread_data = &(*threads_data_p)[i]; 3362 __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data); 3363 } 3364 #endif // BUILD_TIED_TASK_STACK 3365 } 3366 task_team->tt.tt_max_threads = nthreads; 3367 } else { 3368 // If array has (more than) enough elements, go ahead and use it 3369 KMP_DEBUG_ASSERT(*threads_data_p != NULL); 3370 } 3371 3372 // initialize threads_data pointers back to thread_info structures 3373 for (i = 0; i < nthreads; i++) { 3374 kmp_thread_data_t *thread_data = &(*threads_data_p)[i]; 3375 thread_data->td.td_thr = team->t.t_threads[i]; 3376 3377 if (thread_data->td.td_deque_last_stolen >= nthreads) { 3378 // The last stolen field survives across teams / barrier, and the number 3379 // of threads may have changed. It's possible (likely?) that a new 3380 // parallel region will exhibit the same behavior as previous region. 3381 thread_data->td.td_deque_last_stolen = -1; 3382 } 3383 } 3384 3385 KMP_MB(); 3386 TCW_SYNC_4(task_team->tt.tt_found_tasks, TRUE); 3387 } 3388 3389 __kmp_release_bootstrap_lock(&task_team->tt.tt_threads_lock); 3390 return is_init_thread; 3391 } 3392 3393 // __kmp_free_task_threads_data: 3394 // Deallocates a threads_data array for a task team, including any attached 3395 // tasking deques. Only occurs at library shutdown. 3396 static void __kmp_free_task_threads_data(kmp_task_team_t *task_team) { 3397 __kmp_acquire_bootstrap_lock(&task_team->tt.tt_threads_lock); 3398 if (task_team->tt.tt_threads_data != NULL) { 3399 int i; 3400 for (i = 0; i < task_team->tt.tt_max_threads; i++) { 3401 __kmp_free_task_deque(&task_team->tt.tt_threads_data[i]); 3402 } 3403 __kmp_free(task_team->tt.tt_threads_data); 3404 task_team->tt.tt_threads_data = NULL; 3405 } 3406 __kmp_release_bootstrap_lock(&task_team->tt.tt_threads_lock); 3407 } 3408 3409 // __kmp_allocate_task_team: 3410 // Allocates a task team associated with a specific team, taking it from 3411 // the global task team free list if possible. Also initializes data 3412 // structures. 3413 static kmp_task_team_t *__kmp_allocate_task_team(kmp_info_t *thread, 3414 kmp_team_t *team) { 3415 kmp_task_team_t *task_team = NULL; 3416 int nthreads; 3417 3418 KA_TRACE(20, ("__kmp_allocate_task_team: T#%d entering; team = %p\n", 3419 (thread ? __kmp_gtid_from_thread(thread) : -1), team)); 3420 3421 if (TCR_PTR(__kmp_free_task_teams) != NULL) { 3422 // Take a task team from the task team pool 3423 __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock); 3424 if (__kmp_free_task_teams != NULL) { 3425 task_team = __kmp_free_task_teams; 3426 TCW_PTR(__kmp_free_task_teams, task_team->tt.tt_next); 3427 task_team->tt.tt_next = NULL; 3428 } 3429 __kmp_release_bootstrap_lock(&__kmp_task_team_lock); 3430 } 3431 3432 if (task_team == NULL) { 3433 KE_TRACE(10, ("__kmp_allocate_task_team: T#%d allocating " 3434 "task team for team %p\n", 3435 __kmp_gtid_from_thread(thread), team)); 3436 // Allocate a new task team if one is not available. Cannot use 3437 // __kmp_thread_malloc because threads not around for kmp_reap_task_team. 3438 task_team = (kmp_task_team_t *)__kmp_allocate(sizeof(kmp_task_team_t)); 3439 __kmp_init_bootstrap_lock(&task_team->tt.tt_threads_lock); 3440 #if USE_ITT_BUILD && USE_ITT_NOTIFY && KMP_DEBUG 3441 // suppress race conditions detection on synchronization flags in debug mode 3442 // this helps to analyze library internals eliminating false positives 3443 __itt_suppress_mark_range( 3444 __itt_suppress_range, __itt_suppress_threading_errors, 3445 &task_team->tt.tt_found_tasks, sizeof(task_team->tt.tt_found_tasks)); 3446 __itt_suppress_mark_range(__itt_suppress_range, 3447 __itt_suppress_threading_errors, 3448 CCAST(kmp_uint32 *, &task_team->tt.tt_active), 3449 sizeof(task_team->tt.tt_active)); 3450 #endif /* USE_ITT_BUILD && USE_ITT_NOTIFY && KMP_DEBUG */ 3451 // Note: __kmp_allocate zeroes returned memory, othewise we would need: 3452 // task_team->tt.tt_threads_data = NULL; 3453 // task_team->tt.tt_max_threads = 0; 3454 // task_team->tt.tt_next = NULL; 3455 } 3456 3457 TCW_4(task_team->tt.tt_found_tasks, FALSE); 3458 TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE); 3459 task_team->tt.tt_nproc = nthreads = team->t.t_nproc; 3460 3461 KMP_ATOMIC_ST_REL(&task_team->tt.tt_unfinished_threads, nthreads); 3462 TCW_4(task_team->tt.tt_hidden_helper_task_encountered, FALSE); 3463 TCW_4(task_team->tt.tt_active, TRUE); 3464 3465 KA_TRACE(20, ("__kmp_allocate_task_team: T#%d exiting; task_team = %p " 3466 "unfinished_threads init'd to %d\n", 3467 (thread ? __kmp_gtid_from_thread(thread) : -1), task_team, 3468 KMP_ATOMIC_LD_RLX(&task_team->tt.tt_unfinished_threads))); 3469 return task_team; 3470 } 3471 3472 // __kmp_free_task_team: 3473 // Frees the task team associated with a specific thread, and adds it 3474 // to the global task team free list. 3475 void __kmp_free_task_team(kmp_info_t *thread, kmp_task_team_t *task_team) { 3476 KA_TRACE(20, ("__kmp_free_task_team: T#%d task_team = %p\n", 3477 thread ? __kmp_gtid_from_thread(thread) : -1, task_team)); 3478 3479 // Put task team back on free list 3480 __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock); 3481 3482 KMP_DEBUG_ASSERT(task_team->tt.tt_next == NULL); 3483 task_team->tt.tt_next = __kmp_free_task_teams; 3484 TCW_PTR(__kmp_free_task_teams, task_team); 3485 3486 __kmp_release_bootstrap_lock(&__kmp_task_team_lock); 3487 } 3488 3489 // __kmp_reap_task_teams: 3490 // Free all the task teams on the task team free list. 3491 // Should only be done during library shutdown. 3492 // Cannot do anything that needs a thread structure or gtid since they are 3493 // already gone. 3494 void __kmp_reap_task_teams(void) { 3495 kmp_task_team_t *task_team; 3496 3497 if (TCR_PTR(__kmp_free_task_teams) != NULL) { 3498 // Free all task_teams on the free list 3499 __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock); 3500 while ((task_team = __kmp_free_task_teams) != NULL) { 3501 __kmp_free_task_teams = task_team->tt.tt_next; 3502 task_team->tt.tt_next = NULL; 3503 3504 // Free threads_data if necessary 3505 if (task_team->tt.tt_threads_data != NULL) { 3506 __kmp_free_task_threads_data(task_team); 3507 } 3508 __kmp_free(task_team); 3509 } 3510 __kmp_release_bootstrap_lock(&__kmp_task_team_lock); 3511 } 3512 } 3513 3514 // __kmp_wait_to_unref_task_teams: 3515 // Some threads could still be in the fork barrier release code, possibly 3516 // trying to steal tasks. Wait for each thread to unreference its task team. 3517 void __kmp_wait_to_unref_task_teams(void) { 3518 kmp_info_t *thread; 3519 kmp_uint32 spins; 3520 int done; 3521 3522 KMP_INIT_YIELD(spins); 3523 3524 for (;;) { 3525 done = TRUE; 3526 3527 // TODO: GEH - this may be is wrong because some sync would be necessary 3528 // in case threads are added to the pool during the traversal. Need to 3529 // verify that lock for thread pool is held when calling this routine. 3530 for (thread = CCAST(kmp_info_t *, __kmp_thread_pool); thread != NULL; 3531 thread = thread->th.th_next_pool) { 3532 #if KMP_OS_WINDOWS 3533 DWORD exit_val; 3534 #endif 3535 if (TCR_PTR(thread->th.th_task_team) == NULL) { 3536 KA_TRACE(10, ("__kmp_wait_to_unref_task_team: T#%d task_team == NULL\n", 3537 __kmp_gtid_from_thread(thread))); 3538 continue; 3539 } 3540 #if KMP_OS_WINDOWS 3541 // TODO: GEH - add this check for Linux* OS / OS X* as well? 3542 if (!__kmp_is_thread_alive(thread, &exit_val)) { 3543 thread->th.th_task_team = NULL; 3544 continue; 3545 } 3546 #endif 3547 3548 done = FALSE; // Because th_task_team pointer is not NULL for this thread 3549 3550 KA_TRACE(10, ("__kmp_wait_to_unref_task_team: Waiting for T#%d to " 3551 "unreference task_team\n", 3552 __kmp_gtid_from_thread(thread))); 3553 3554 if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) { 3555 volatile void *sleep_loc; 3556 // If the thread is sleeping, awaken it. 3557 if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))) != 3558 NULL) { 3559 KA_TRACE( 3560 10, 3561 ("__kmp_wait_to_unref_task_team: T#%d waking up thread T#%d\n", 3562 __kmp_gtid_from_thread(thread), __kmp_gtid_from_thread(thread))); 3563 __kmp_null_resume_wrapper(__kmp_gtid_from_thread(thread), sleep_loc); 3564 } 3565 } 3566 } 3567 if (done) { 3568 break; 3569 } 3570 3571 // If oversubscribed or have waited a bit, yield. 3572 KMP_YIELD_OVERSUB_ELSE_SPIN(spins); 3573 } 3574 } 3575 3576 // __kmp_task_team_setup: Create a task_team for the current team, but use 3577 // an already created, unused one if it already exists. 3578 void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team, int always) { 3579 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec); 3580 3581 // If this task_team hasn't been created yet, allocate it. It will be used in 3582 // the region after the next. 3583 // If it exists, it is the current task team and shouldn't be touched yet as 3584 // it may still be in use. 3585 if (team->t.t_task_team[this_thr->th.th_task_state] == NULL && 3586 (always || team->t.t_nproc > 1)) { 3587 team->t.t_task_team[this_thr->th.th_task_state] = 3588 __kmp_allocate_task_team(this_thr, team); 3589 KA_TRACE(20, ("__kmp_task_team_setup: Primary T#%d created new task_team %p" 3590 " for team %d at parity=%d\n", 3591 __kmp_gtid_from_thread(this_thr), 3592 team->t.t_task_team[this_thr->th.th_task_state], team->t.t_id, 3593 this_thr->th.th_task_state)); 3594 } 3595 3596 // After threads exit the release, they will call sync, and then point to this 3597 // other task_team; make sure it is allocated and properly initialized. As 3598 // threads spin in the barrier release phase, they will continue to use the 3599 // previous task_team struct(above), until they receive the signal to stop 3600 // checking for tasks (they can't safely reference the kmp_team_t struct, 3601 // which could be reallocated by the primary thread). No task teams are formed 3602 // for serialized teams. 3603 if (team->t.t_nproc > 1) { 3604 int other_team = 1 - this_thr->th.th_task_state; 3605 KMP_DEBUG_ASSERT(other_team >= 0 && other_team < 2); 3606 if (team->t.t_task_team[other_team] == NULL) { // setup other team as well 3607 team->t.t_task_team[other_team] = 3608 __kmp_allocate_task_team(this_thr, team); 3609 KA_TRACE(20, ("__kmp_task_team_setup: Primary T#%d created second new " 3610 "task_team %p for team %d at parity=%d\n", 3611 __kmp_gtid_from_thread(this_thr), 3612 team->t.t_task_team[other_team], team->t.t_id, other_team)); 3613 } else { // Leave the old task team struct in place for the upcoming region; 3614 // adjust as needed 3615 kmp_task_team_t *task_team = team->t.t_task_team[other_team]; 3616 if (!task_team->tt.tt_active || 3617 team->t.t_nproc != task_team->tt.tt_nproc) { 3618 TCW_4(task_team->tt.tt_nproc, team->t.t_nproc); 3619 TCW_4(task_team->tt.tt_found_tasks, FALSE); 3620 TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE); 3621 KMP_ATOMIC_ST_REL(&task_team->tt.tt_unfinished_threads, 3622 team->t.t_nproc); 3623 TCW_4(task_team->tt.tt_active, TRUE); 3624 } 3625 // if team size has changed, the first thread to enable tasking will 3626 // realloc threads_data if necessary 3627 KA_TRACE(20, ("__kmp_task_team_setup: Primary T#%d reset next task_team " 3628 "%p for team %d at parity=%d\n", 3629 __kmp_gtid_from_thread(this_thr), 3630 team->t.t_task_team[other_team], team->t.t_id, other_team)); 3631 } 3632 } 3633 3634 // For regular thread, task enabling should be called when the task is going 3635 // to be pushed to a dequeue. However, for the hidden helper thread, we need 3636 // it ahead of time so that some operations can be performed without race 3637 // condition. 3638 if (this_thr == __kmp_hidden_helper_main_thread) { 3639 for (int i = 0; i < 2; ++i) { 3640 kmp_task_team_t *task_team = team->t.t_task_team[i]; 3641 if (KMP_TASKING_ENABLED(task_team)) { 3642 continue; 3643 } 3644 __kmp_enable_tasking(task_team, this_thr); 3645 for (int j = 0; j < task_team->tt.tt_nproc; ++j) { 3646 kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[j]; 3647 if (thread_data->td.td_deque == NULL) { 3648 __kmp_alloc_task_deque(__kmp_hidden_helper_threads[j], thread_data); 3649 } 3650 } 3651 } 3652 } 3653 } 3654 3655 // __kmp_task_team_sync: Propagation of task team data from team to threads 3656 // which happens just after the release phase of a team barrier. This may be 3657 // called by any thread, but only for teams with # threads > 1. 3658 void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team) { 3659 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec); 3660 3661 // Toggle the th_task_state field, to switch which task_team this thread 3662 // refers to 3663 this_thr->th.th_task_state = (kmp_uint8)(1 - this_thr->th.th_task_state); 3664 3665 // It is now safe to propagate the task team pointer from the team struct to 3666 // the current thread. 3667 TCW_PTR(this_thr->th.th_task_team, 3668 team->t.t_task_team[this_thr->th.th_task_state]); 3669 KA_TRACE(20, 3670 ("__kmp_task_team_sync: Thread T#%d task team switched to task_team " 3671 "%p from Team #%d (parity=%d)\n", 3672 __kmp_gtid_from_thread(this_thr), this_thr->th.th_task_team, 3673 team->t.t_id, this_thr->th.th_task_state)); 3674 } 3675 3676 // __kmp_task_team_wait: Primary thread waits for outstanding tasks after the 3677 // barrier gather phase. Only called by primary thread if #threads in team > 1 3678 // or if proxy tasks were created. 3679 // 3680 // wait is a flag that defaults to 1 (see kmp.h), but waiting can be turned off 3681 // by passing in 0 optionally as the last argument. When wait is zero, primary 3682 // thread does not wait for unfinished_threads to reach 0. 3683 void __kmp_task_team_wait( 3684 kmp_info_t *this_thr, 3685 kmp_team_t *team USE_ITT_BUILD_ARG(void *itt_sync_obj), int wait) { 3686 kmp_task_team_t *task_team = team->t.t_task_team[this_thr->th.th_task_state]; 3687 3688 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec); 3689 KMP_DEBUG_ASSERT(task_team == this_thr->th.th_task_team); 3690 3691 if ((task_team != NULL) && KMP_TASKING_ENABLED(task_team)) { 3692 if (wait) { 3693 KA_TRACE(20, ("__kmp_task_team_wait: Primary T#%d waiting for all tasks " 3694 "(for unfinished_threads to reach 0) on task_team = %p\n", 3695 __kmp_gtid_from_thread(this_thr), task_team)); 3696 // Worker threads may have dropped through to release phase, but could 3697 // still be executing tasks. Wait here for tasks to complete. To avoid 3698 // memory contention, only primary thread checks termination condition. 3699 kmp_flag_32<false, false> flag( 3700 RCAST(std::atomic<kmp_uint32> *, 3701 &task_team->tt.tt_unfinished_threads), 3702 0U); 3703 flag.wait(this_thr, TRUE USE_ITT_BUILD_ARG(itt_sync_obj)); 3704 } 3705 // Deactivate the old task team, so that the worker threads will stop 3706 // referencing it while spinning. 3707 KA_TRACE( 3708 20, 3709 ("__kmp_task_team_wait: Primary T#%d deactivating task_team %p: " 3710 "setting active to false, setting local and team's pointer to NULL\n", 3711 __kmp_gtid_from_thread(this_thr), task_team)); 3712 KMP_DEBUG_ASSERT(task_team->tt.tt_nproc > 1 || 3713 task_team->tt.tt_found_proxy_tasks == TRUE); 3714 TCW_SYNC_4(task_team->tt.tt_found_proxy_tasks, FALSE); 3715 KMP_CHECK_UPDATE(task_team->tt.tt_untied_task_encountered, 0); 3716 TCW_SYNC_4(task_team->tt.tt_active, FALSE); 3717 KMP_MB(); 3718 3719 TCW_PTR(this_thr->th.th_task_team, NULL); 3720 } 3721 } 3722 3723 // __kmp_tasking_barrier: 3724 // This routine is called only when __kmp_tasking_mode == tskm_extra_barrier. 3725 // Internal function to execute all tasks prior to a regular barrier or a join 3726 // barrier. It is a full barrier itself, which unfortunately turns regular 3727 // barriers into double barriers and join barriers into 1 1/2 barriers. 3728 void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread, int gtid) { 3729 std::atomic<kmp_uint32> *spin = RCAST( 3730 std::atomic<kmp_uint32> *, 3731 &team->t.t_task_team[thread->th.th_task_state]->tt.tt_unfinished_threads); 3732 int flag = FALSE; 3733 KMP_DEBUG_ASSERT(__kmp_tasking_mode == tskm_extra_barrier); 3734 3735 #if USE_ITT_BUILD 3736 KMP_FSYNC_SPIN_INIT(spin, NULL); 3737 #endif /* USE_ITT_BUILD */ 3738 kmp_flag_32<false, false> spin_flag(spin, 0U); 3739 while (!spin_flag.execute_tasks(thread, gtid, TRUE, 3740 &flag USE_ITT_BUILD_ARG(NULL), 0)) { 3741 #if USE_ITT_BUILD 3742 // TODO: What about itt_sync_obj?? 3743 KMP_FSYNC_SPIN_PREPARE(RCAST(void *, spin)); 3744 #endif /* USE_ITT_BUILD */ 3745 3746 if (TCR_4(__kmp_global.g.g_done)) { 3747 if (__kmp_global.g.g_abort) 3748 __kmp_abort_thread(); 3749 break; 3750 } 3751 KMP_YIELD(TRUE); 3752 } 3753 #if USE_ITT_BUILD 3754 KMP_FSYNC_SPIN_ACQUIRED(RCAST(void *, spin)); 3755 #endif /* USE_ITT_BUILD */ 3756 } 3757 3758 // __kmp_give_task puts a task into a given thread queue if: 3759 // - the queue for that thread was created 3760 // - there's space in that queue 3761 // Because of this, __kmp_push_task needs to check if there's space after 3762 // getting the lock 3763 static bool __kmp_give_task(kmp_info_t *thread, kmp_int32 tid, kmp_task_t *task, 3764 kmp_int32 pass) { 3765 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task); 3766 kmp_task_team_t *task_team = taskdata->td_task_team; 3767 3768 KA_TRACE(20, ("__kmp_give_task: trying to give task %p to thread %d.\n", 3769 taskdata, tid)); 3770 3771 // If task_team is NULL something went really bad... 3772 KMP_DEBUG_ASSERT(task_team != NULL); 3773 3774 bool result = false; 3775 kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid]; 3776 3777 if (thread_data->td.td_deque == NULL) { 3778 // There's no queue in this thread, go find another one 3779 // We're guaranteed that at least one thread has a queue 3780 KA_TRACE(30, 3781 ("__kmp_give_task: thread %d has no queue while giving task %p.\n", 3782 tid, taskdata)); 3783 return result; 3784 } 3785 3786 if (TCR_4(thread_data->td.td_deque_ntasks) >= 3787 TASK_DEQUE_SIZE(thread_data->td)) { 3788 KA_TRACE( 3789 30, 3790 ("__kmp_give_task: queue is full while giving task %p to thread %d.\n", 3791 taskdata, tid)); 3792 3793 // if this deque is bigger than the pass ratio give a chance to another 3794 // thread 3795 if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass) 3796 return result; 3797 3798 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock); 3799 if (TCR_4(thread_data->td.td_deque_ntasks) >= 3800 TASK_DEQUE_SIZE(thread_data->td)) { 3801 // expand deque to push the task which is not allowed to execute 3802 __kmp_realloc_task_deque(thread, thread_data); 3803 } 3804 3805 } else { 3806 3807 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock); 3808 3809 if (TCR_4(thread_data->td.td_deque_ntasks) >= 3810 TASK_DEQUE_SIZE(thread_data->td)) { 3811 KA_TRACE(30, ("__kmp_give_task: queue is full while giving task %p to " 3812 "thread %d.\n", 3813 taskdata, tid)); 3814 3815 // if this deque is bigger than the pass ratio give a chance to another 3816 // thread 3817 if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass) 3818 goto release_and_exit; 3819 3820 __kmp_realloc_task_deque(thread, thread_data); 3821 } 3822 } 3823 3824 // lock is held here, and there is space in the deque 3825 3826 thread_data->td.td_deque[thread_data->td.td_deque_tail] = taskdata; 3827 // Wrap index. 3828 thread_data->td.td_deque_tail = 3829 (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td); 3830 TCW_4(thread_data->td.td_deque_ntasks, 3831 TCR_4(thread_data->td.td_deque_ntasks) + 1); 3832 3833 result = true; 3834 KA_TRACE(30, ("__kmp_give_task: successfully gave task %p to thread %d.\n", 3835 taskdata, tid)); 3836 3837 release_and_exit: 3838 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock); 3839 3840 return result; 3841 } 3842 3843 /* The finish of the proxy tasks is divided in two pieces: 3844 - the top half is the one that can be done from a thread outside the team 3845 - the bottom half must be run from a thread within the team 3846 3847 In order to run the bottom half the task gets queued back into one of the 3848 threads of the team. Once the td_incomplete_child_task counter of the parent 3849 is decremented the threads can leave the barriers. So, the bottom half needs 3850 to be queued before the counter is decremented. The top half is therefore 3851 divided in two parts: 3852 - things that can be run before queuing the bottom half 3853 - things that must be run after queuing the bottom half 3854 3855 This creates a second race as the bottom half can free the task before the 3856 second top half is executed. To avoid this we use the 3857 td_incomplete_child_task of the proxy task to synchronize the top and bottom 3858 half. */ 3859 static void __kmp_first_top_half_finish_proxy(kmp_taskdata_t *taskdata) { 3860 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT); 3861 KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY); 3862 KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0); 3863 KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0); 3864 3865 taskdata->td_flags.complete = 1; // mark the task as completed 3866 3867 if (taskdata->td_taskgroup) 3868 KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count); 3869 3870 // Create an imaginary children for this task so the bottom half cannot 3871 // release the task before we have completed the second top half 3872 KMP_ATOMIC_INC(&taskdata->td_incomplete_child_tasks); 3873 } 3874 3875 static void __kmp_second_top_half_finish_proxy(kmp_taskdata_t *taskdata) { 3876 kmp_int32 children = 0; 3877 3878 // Predecrement simulated by "- 1" calculation 3879 children = 3880 KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks) - 1; 3881 KMP_DEBUG_ASSERT(children >= 0); 3882 3883 // Remove the imaginary children 3884 KMP_ATOMIC_DEC(&taskdata->td_incomplete_child_tasks); 3885 } 3886 3887 static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask) { 3888 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask); 3889 kmp_info_t *thread = __kmp_threads[gtid]; 3890 3891 KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY); 3892 KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 3893 1); // top half must run before bottom half 3894 3895 // We need to wait to make sure the top half is finished 3896 // Spinning here should be ok as this should happen quickly 3897 while (KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks) > 0) 3898 ; 3899 3900 __kmp_release_deps(gtid, taskdata); 3901 __kmp_free_task_and_ancestors(gtid, taskdata, thread); 3902 } 3903 3904 /*! 3905 @ingroup TASKING 3906 @param gtid Global Thread ID of encountering thread 3907 @param ptask Task which execution is completed 3908 3909 Execute the completion of a proxy task from a thread of that is part of the 3910 team. Run first and bottom halves directly. 3911 */ 3912 void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask) { 3913 KMP_DEBUG_ASSERT(ptask != NULL); 3914 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask); 3915 KA_TRACE( 3916 10, ("__kmp_proxy_task_completed(enter): T#%d proxy task %p completing\n", 3917 gtid, taskdata)); 3918 __kmp_assert_valid_gtid(gtid); 3919 KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY); 3920 3921 __kmp_first_top_half_finish_proxy(taskdata); 3922 __kmp_second_top_half_finish_proxy(taskdata); 3923 __kmp_bottom_half_finish_proxy(gtid, ptask); 3924 3925 KA_TRACE(10, 3926 ("__kmp_proxy_task_completed(exit): T#%d proxy task %p completing\n", 3927 gtid, taskdata)); 3928 } 3929 3930 /*! 3931 @ingroup TASKING 3932 @param ptask Task which execution is completed 3933 3934 Execute the completion of a proxy task from a thread that could not belong to 3935 the team. 3936 */ 3937 void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask) { 3938 KMP_DEBUG_ASSERT(ptask != NULL); 3939 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask); 3940 3941 KA_TRACE( 3942 10, 3943 ("__kmp_proxy_task_completed_ooo(enter): proxy task completing ooo %p\n", 3944 taskdata)); 3945 3946 KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY); 3947 3948 __kmp_first_top_half_finish_proxy(taskdata); 3949 3950 // Enqueue task to complete bottom half completion from a thread within the 3951 // corresponding team 3952 kmp_team_t *team = taskdata->td_team; 3953 kmp_int32 nthreads = team->t.t_nproc; 3954 kmp_info_t *thread; 3955 3956 // This should be similar to start_k = __kmp_get_random( thread ) % nthreads 3957 // but we cannot use __kmp_get_random here 3958 kmp_int32 start_k = 0; 3959 kmp_int32 pass = 1; 3960 kmp_int32 k = start_k; 3961 3962 do { 3963 // For now we're just linearly trying to find a thread 3964 thread = team->t.t_threads[k]; 3965 k = (k + 1) % nthreads; 3966 3967 // we did a full pass through all the threads 3968 if (k == start_k) 3969 pass = pass << 1; 3970 3971 } while (!__kmp_give_task(thread, k, ptask, pass)); 3972 3973 __kmp_second_top_half_finish_proxy(taskdata); 3974 3975 KA_TRACE( 3976 10, 3977 ("__kmp_proxy_task_completed_ooo(exit): proxy task completing ooo %p\n", 3978 taskdata)); 3979 } 3980 3981 kmp_event_t *__kmpc_task_allow_completion_event(ident_t *loc_ref, int gtid, 3982 kmp_task_t *task) { 3983 kmp_taskdata_t *td = KMP_TASK_TO_TASKDATA(task); 3984 if (td->td_allow_completion_event.type == KMP_EVENT_UNINITIALIZED) { 3985 td->td_allow_completion_event.type = KMP_EVENT_ALLOW_COMPLETION; 3986 td->td_allow_completion_event.ed.task = task; 3987 __kmp_init_tas_lock(&td->td_allow_completion_event.lock); 3988 } 3989 return &td->td_allow_completion_event; 3990 } 3991 3992 void __kmp_fulfill_event(kmp_event_t *event) { 3993 if (event->type == KMP_EVENT_ALLOW_COMPLETION) { 3994 kmp_task_t *ptask = event->ed.task; 3995 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask); 3996 bool detached = false; 3997 int gtid = __kmp_get_gtid(); 3998 3999 // The associated task might have completed or could be completing at this 4000 // point. 4001 // We need to take the lock to avoid races 4002 __kmp_acquire_tas_lock(&event->lock, gtid); 4003 if (taskdata->td_flags.proxy == TASK_PROXY) { 4004 detached = true; 4005 } else { 4006 #if OMPT_SUPPORT 4007 // The OMPT event must occur under mutual exclusion, 4008 // otherwise the tool might access ptask after free 4009 if (UNLIKELY(ompt_enabled.enabled)) 4010 __ompt_task_finish(ptask, NULL, ompt_task_early_fulfill); 4011 #endif 4012 } 4013 event->type = KMP_EVENT_UNINITIALIZED; 4014 __kmp_release_tas_lock(&event->lock, gtid); 4015 4016 if (detached) { 4017 #if OMPT_SUPPORT 4018 // We free ptask afterwards and know the task is finished, 4019 // so locking is not necessary 4020 if (UNLIKELY(ompt_enabled.enabled)) 4021 __ompt_task_finish(ptask, NULL, ompt_task_late_fulfill); 4022 #endif 4023 // If the task detached complete the proxy task 4024 if (gtid >= 0) { 4025 kmp_team_t *team = taskdata->td_team; 4026 kmp_info_t *thread = __kmp_get_thread(); 4027 if (thread->th.th_team == team) { 4028 __kmpc_proxy_task_completed(gtid, ptask); 4029 return; 4030 } 4031 } 4032 4033 // fallback 4034 __kmpc_proxy_task_completed_ooo(ptask); 4035 } 4036 } 4037 } 4038 4039 // __kmp_task_dup_alloc: Allocate the taskdata and make a copy of source task 4040 // for taskloop 4041 // 4042 // thread: allocating thread 4043 // task_src: pointer to source task to be duplicated 4044 // returns: a pointer to the allocated kmp_task_t structure (task). 4045 kmp_task_t *__kmp_task_dup_alloc(kmp_info_t *thread, kmp_task_t *task_src) { 4046 kmp_task_t *task; 4047 kmp_taskdata_t *taskdata; 4048 kmp_taskdata_t *taskdata_src = KMP_TASK_TO_TASKDATA(task_src); 4049 kmp_taskdata_t *parent_task = taskdata_src->td_parent; // same parent task 4050 size_t shareds_offset; 4051 size_t task_size; 4052 4053 KA_TRACE(10, ("__kmp_task_dup_alloc(enter): Th %p, source task %p\n", thread, 4054 task_src)); 4055 KMP_DEBUG_ASSERT(taskdata_src->td_flags.proxy == 4056 TASK_FULL); // it should not be proxy task 4057 KMP_DEBUG_ASSERT(taskdata_src->td_flags.tasktype == TASK_EXPLICIT); 4058 task_size = taskdata_src->td_size_alloc; 4059 4060 // Allocate a kmp_taskdata_t block and a kmp_task_t block. 4061 KA_TRACE(30, ("__kmp_task_dup_alloc: Th %p, malloc size %ld\n", thread, 4062 task_size)); 4063 #if USE_FAST_MEMORY 4064 taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, task_size); 4065 #else 4066 taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, task_size); 4067 #endif /* USE_FAST_MEMORY */ 4068 KMP_MEMCPY(taskdata, taskdata_src, task_size); 4069 4070 task = KMP_TASKDATA_TO_TASK(taskdata); 4071 4072 // Initialize new task (only specific fields not affected by memcpy) 4073 taskdata->td_task_id = KMP_GEN_TASK_ID(); 4074 if (task->shareds != NULL) { // need setup shareds pointer 4075 shareds_offset = (char *)task_src->shareds - (char *)taskdata_src; 4076 task->shareds = &((char *)taskdata)[shareds_offset]; 4077 KMP_DEBUG_ASSERT((((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) == 4078 0); 4079 } 4080 taskdata->td_alloc_thread = thread; 4081 taskdata->td_parent = parent_task; 4082 // task inherits the taskgroup from the parent task 4083 taskdata->td_taskgroup = parent_task->td_taskgroup; 4084 // tied task needs to initialize the td_last_tied at creation, 4085 // untied one does this when it is scheduled for execution 4086 if (taskdata->td_flags.tiedness == TASK_TIED) 4087 taskdata->td_last_tied = taskdata; 4088 4089 // Only need to keep track of child task counts if team parallel and tasking 4090 // not serialized 4091 if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser)) { 4092 KMP_ATOMIC_INC(&parent_task->td_incomplete_child_tasks); 4093 if (parent_task->td_taskgroup) 4094 KMP_ATOMIC_INC(&parent_task->td_taskgroup->count); 4095 // Only need to keep track of allocated child tasks for explicit tasks since 4096 // implicit not deallocated 4097 if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT) 4098 KMP_ATOMIC_INC(&taskdata->td_parent->td_allocated_child_tasks); 4099 } 4100 4101 KA_TRACE(20, 4102 ("__kmp_task_dup_alloc(exit): Th %p, created task %p, parent=%p\n", 4103 thread, taskdata, taskdata->td_parent)); 4104 #if OMPT_SUPPORT 4105 if (UNLIKELY(ompt_enabled.enabled)) 4106 __ompt_task_init(taskdata, thread->th.th_info.ds.ds_gtid); 4107 #endif 4108 return task; 4109 } 4110 4111 // Routine optionally generated by the compiler for setting the lastprivate flag 4112 // and calling needed constructors for private/firstprivate objects 4113 // (used to form taskloop tasks from pattern task) 4114 // Parameters: dest task, src task, lastprivate flag. 4115 typedef void (*p_task_dup_t)(kmp_task_t *, kmp_task_t *, kmp_int32); 4116 4117 KMP_BUILD_ASSERT(sizeof(long) == 4 || sizeof(long) == 8); 4118 4119 // class to encapsulate manipulating loop bounds in a taskloop task. 4120 // this abstracts away the Intel vs GOMP taskloop interface for setting/getting 4121 // the loop bound variables. 4122 class kmp_taskloop_bounds_t { 4123 kmp_task_t *task; 4124 const kmp_taskdata_t *taskdata; 4125 size_t lower_offset; 4126 size_t upper_offset; 4127 4128 public: 4129 kmp_taskloop_bounds_t(kmp_task_t *_task, kmp_uint64 *lb, kmp_uint64 *ub) 4130 : task(_task), taskdata(KMP_TASK_TO_TASKDATA(task)), 4131 lower_offset((char *)lb - (char *)task), 4132 upper_offset((char *)ub - (char *)task) { 4133 KMP_DEBUG_ASSERT((char *)lb > (char *)_task); 4134 KMP_DEBUG_ASSERT((char *)ub > (char *)_task); 4135 } 4136 kmp_taskloop_bounds_t(kmp_task_t *_task, const kmp_taskloop_bounds_t &bounds) 4137 : task(_task), taskdata(KMP_TASK_TO_TASKDATA(_task)), 4138 lower_offset(bounds.lower_offset), upper_offset(bounds.upper_offset) {} 4139 size_t get_lower_offset() const { return lower_offset; } 4140 size_t get_upper_offset() const { return upper_offset; } 4141 kmp_uint64 get_lb() const { 4142 kmp_int64 retval; 4143 #if defined(KMP_GOMP_COMPAT) 4144 // Intel task just returns the lower bound normally 4145 if (!taskdata->td_flags.native) { 4146 retval = *(kmp_int64 *)((char *)task + lower_offset); 4147 } else { 4148 // GOMP task has to take into account the sizeof(long) 4149 if (taskdata->td_size_loop_bounds == 4) { 4150 kmp_int32 *lb = RCAST(kmp_int32 *, task->shareds); 4151 retval = (kmp_int64)*lb; 4152 } else { 4153 kmp_int64 *lb = RCAST(kmp_int64 *, task->shareds); 4154 retval = (kmp_int64)*lb; 4155 } 4156 } 4157 #else 4158 (void)taskdata; 4159 retval = *(kmp_int64 *)((char *)task + lower_offset); 4160 #endif // defined(KMP_GOMP_COMPAT) 4161 return retval; 4162 } 4163 kmp_uint64 get_ub() const { 4164 kmp_int64 retval; 4165 #if defined(KMP_GOMP_COMPAT) 4166 // Intel task just returns the upper bound normally 4167 if (!taskdata->td_flags.native) { 4168 retval = *(kmp_int64 *)((char *)task + upper_offset); 4169 } else { 4170 // GOMP task has to take into account the sizeof(long) 4171 if (taskdata->td_size_loop_bounds == 4) { 4172 kmp_int32 *ub = RCAST(kmp_int32 *, task->shareds) + 1; 4173 retval = (kmp_int64)*ub; 4174 } else { 4175 kmp_int64 *ub = RCAST(kmp_int64 *, task->shareds) + 1; 4176 retval = (kmp_int64)*ub; 4177 } 4178 } 4179 #else 4180 retval = *(kmp_int64 *)((char *)task + upper_offset); 4181 #endif // defined(KMP_GOMP_COMPAT) 4182 return retval; 4183 } 4184 void set_lb(kmp_uint64 lb) { 4185 #if defined(KMP_GOMP_COMPAT) 4186 // Intel task just sets the lower bound normally 4187 if (!taskdata->td_flags.native) { 4188 *(kmp_uint64 *)((char *)task + lower_offset) = lb; 4189 } else { 4190 // GOMP task has to take into account the sizeof(long) 4191 if (taskdata->td_size_loop_bounds == 4) { 4192 kmp_uint32 *lower = RCAST(kmp_uint32 *, task->shareds); 4193 *lower = (kmp_uint32)lb; 4194 } else { 4195 kmp_uint64 *lower = RCAST(kmp_uint64 *, task->shareds); 4196 *lower = (kmp_uint64)lb; 4197 } 4198 } 4199 #else 4200 *(kmp_uint64 *)((char *)task + lower_offset) = lb; 4201 #endif // defined(KMP_GOMP_COMPAT) 4202 } 4203 void set_ub(kmp_uint64 ub) { 4204 #if defined(KMP_GOMP_COMPAT) 4205 // Intel task just sets the upper bound normally 4206 if (!taskdata->td_flags.native) { 4207 *(kmp_uint64 *)((char *)task + upper_offset) = ub; 4208 } else { 4209 // GOMP task has to take into account the sizeof(long) 4210 if (taskdata->td_size_loop_bounds == 4) { 4211 kmp_uint32 *upper = RCAST(kmp_uint32 *, task->shareds) + 1; 4212 *upper = (kmp_uint32)ub; 4213 } else { 4214 kmp_uint64 *upper = RCAST(kmp_uint64 *, task->shareds) + 1; 4215 *upper = (kmp_uint64)ub; 4216 } 4217 } 4218 #else 4219 *(kmp_uint64 *)((char *)task + upper_offset) = ub; 4220 #endif // defined(KMP_GOMP_COMPAT) 4221 } 4222 }; 4223 4224 // __kmp_taskloop_linear: Start tasks of the taskloop linearly 4225 // 4226 // loc Source location information 4227 // gtid Global thread ID 4228 // task Pattern task, exposes the loop iteration range 4229 // lb Pointer to loop lower bound in task structure 4230 // ub Pointer to loop upper bound in task structure 4231 // st Loop stride 4232 // ub_glob Global upper bound (used for lastprivate check) 4233 // num_tasks Number of tasks to execute 4234 // grainsize Number of loop iterations per task 4235 // extras Number of chunks with grainsize+1 iterations 4236 // last_chunk Reduction of grainsize for last task 4237 // tc Iterations count 4238 // task_dup Tasks duplication routine 4239 // codeptr_ra Return address for OMPT events 4240 void __kmp_taskloop_linear(ident_t *loc, int gtid, kmp_task_t *task, 4241 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, 4242 kmp_uint64 ub_glob, kmp_uint64 num_tasks, 4243 kmp_uint64 grainsize, kmp_uint64 extras, 4244 kmp_int64 last_chunk, kmp_uint64 tc, 4245 #if OMPT_SUPPORT 4246 void *codeptr_ra, 4247 #endif 4248 void *task_dup) { 4249 KMP_COUNT_BLOCK(OMP_TASKLOOP); 4250 KMP_TIME_PARTITIONED_BLOCK(OMP_taskloop_scheduling); 4251 p_task_dup_t ptask_dup = (p_task_dup_t)task_dup; 4252 // compiler provides global bounds here 4253 kmp_taskloop_bounds_t task_bounds(task, lb, ub); 4254 kmp_uint64 lower = task_bounds.get_lb(); 4255 kmp_uint64 upper = task_bounds.get_ub(); 4256 kmp_uint64 i; 4257 kmp_info_t *thread = __kmp_threads[gtid]; 4258 kmp_taskdata_t *current_task = thread->th.th_current_task; 4259 kmp_task_t *next_task; 4260 kmp_int32 lastpriv = 0; 4261 4262 KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + 4263 (last_chunk < 0 ? last_chunk : extras)); 4264 KMP_DEBUG_ASSERT(num_tasks > extras); 4265 KMP_DEBUG_ASSERT(num_tasks > 0); 4266 KA_TRACE(20, ("__kmp_taskloop_linear: T#%d: %lld tasks, grainsize %lld, " 4267 "extras %lld, last_chunk %lld, i=%lld,%lld(%d)%lld, dup %p\n", 4268 gtid, num_tasks, grainsize, extras, last_chunk, lower, upper, 4269 ub_glob, st, task_dup)); 4270 4271 // Launch num_tasks tasks, assign grainsize iterations each task 4272 for (i = 0; i < num_tasks; ++i) { 4273 kmp_uint64 chunk_minus_1; 4274 if (extras == 0) { 4275 chunk_minus_1 = grainsize - 1; 4276 } else { 4277 chunk_minus_1 = grainsize; 4278 --extras; // first extras iterations get bigger chunk (grainsize+1) 4279 } 4280 upper = lower + st * chunk_minus_1; 4281 if (upper > *ub) { 4282 upper = *ub; 4283 } 4284 if (i == num_tasks - 1) { 4285 // schedule the last task, set lastprivate flag if needed 4286 if (st == 1) { // most common case 4287 KMP_DEBUG_ASSERT(upper == *ub); 4288 if (upper == ub_glob) 4289 lastpriv = 1; 4290 } else if (st > 0) { // positive loop stride 4291 KMP_DEBUG_ASSERT((kmp_uint64)st > *ub - upper); 4292 if ((kmp_uint64)st > ub_glob - upper) 4293 lastpriv = 1; 4294 } else { // negative loop stride 4295 KMP_DEBUG_ASSERT(upper + st < *ub); 4296 if (upper - ub_glob < (kmp_uint64)(-st)) 4297 lastpriv = 1; 4298 } 4299 } 4300 next_task = __kmp_task_dup_alloc(thread, task); // allocate new task 4301 kmp_taskdata_t *next_taskdata = KMP_TASK_TO_TASKDATA(next_task); 4302 kmp_taskloop_bounds_t next_task_bounds = 4303 kmp_taskloop_bounds_t(next_task, task_bounds); 4304 4305 // adjust task-specific bounds 4306 next_task_bounds.set_lb(lower); 4307 if (next_taskdata->td_flags.native) { 4308 next_task_bounds.set_ub(upper + (st > 0 ? 1 : -1)); 4309 } else { 4310 next_task_bounds.set_ub(upper); 4311 } 4312 if (ptask_dup != NULL) // set lastprivate flag, construct firstprivates, 4313 // etc. 4314 ptask_dup(next_task, task, lastpriv); 4315 KA_TRACE(40, 4316 ("__kmp_taskloop_linear: T#%d; task #%llu: task %p: lower %lld, " 4317 "upper %lld stride %lld, (offsets %p %p)\n", 4318 gtid, i, next_task, lower, upper, st, 4319 next_task_bounds.get_lower_offset(), 4320 next_task_bounds.get_upper_offset())); 4321 #if OMPT_SUPPORT 4322 __kmp_omp_taskloop_task(NULL, gtid, next_task, 4323 codeptr_ra); // schedule new task 4324 #else 4325 __kmp_omp_task(gtid, next_task, true); // schedule new task 4326 #endif 4327 lower = upper + st; // adjust lower bound for the next iteration 4328 } 4329 // free the pattern task and exit 4330 __kmp_task_start(gtid, task, current_task); // make internal bookkeeping 4331 // do not execute the pattern task, just do internal bookkeeping 4332 __kmp_task_finish<false>(gtid, task, current_task); 4333 } 4334 4335 // Structure to keep taskloop parameters for auxiliary task 4336 // kept in the shareds of the task structure. 4337 typedef struct __taskloop_params { 4338 kmp_task_t *task; 4339 kmp_uint64 *lb; 4340 kmp_uint64 *ub; 4341 void *task_dup; 4342 kmp_int64 st; 4343 kmp_uint64 ub_glob; 4344 kmp_uint64 num_tasks; 4345 kmp_uint64 grainsize; 4346 kmp_uint64 extras; 4347 kmp_int64 last_chunk; 4348 kmp_uint64 tc; 4349 kmp_uint64 num_t_min; 4350 #if OMPT_SUPPORT 4351 void *codeptr_ra; 4352 #endif 4353 } __taskloop_params_t; 4354 4355 void __kmp_taskloop_recur(ident_t *, int, kmp_task_t *, kmp_uint64 *, 4356 kmp_uint64 *, kmp_int64, kmp_uint64, kmp_uint64, 4357 kmp_uint64, kmp_uint64, kmp_int64, kmp_uint64, 4358 kmp_uint64, 4359 #if OMPT_SUPPORT 4360 void *, 4361 #endif 4362 void *); 4363 4364 // Execute part of the taskloop submitted as a task. 4365 int __kmp_taskloop_task(int gtid, void *ptask) { 4366 __taskloop_params_t *p = 4367 (__taskloop_params_t *)((kmp_task_t *)ptask)->shareds; 4368 kmp_task_t *task = p->task; 4369 kmp_uint64 *lb = p->lb; 4370 kmp_uint64 *ub = p->ub; 4371 void *task_dup = p->task_dup; 4372 // p_task_dup_t ptask_dup = (p_task_dup_t)task_dup; 4373 kmp_int64 st = p->st; 4374 kmp_uint64 ub_glob = p->ub_glob; 4375 kmp_uint64 num_tasks = p->num_tasks; 4376 kmp_uint64 grainsize = p->grainsize; 4377 kmp_uint64 extras = p->extras; 4378 kmp_int64 last_chunk = p->last_chunk; 4379 kmp_uint64 tc = p->tc; 4380 kmp_uint64 num_t_min = p->num_t_min; 4381 #if OMPT_SUPPORT 4382 void *codeptr_ra = p->codeptr_ra; 4383 #endif 4384 #if KMP_DEBUG 4385 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task); 4386 KMP_DEBUG_ASSERT(task != NULL); 4387 KA_TRACE(20, 4388 ("__kmp_taskloop_task: T#%d, task %p: %lld tasks, grainsize" 4389 " %lld, extras %lld, last_chunk %lld, i=%lld,%lld(%d), dup %p\n", 4390 gtid, taskdata, num_tasks, grainsize, extras, last_chunk, *lb, *ub, 4391 st, task_dup)); 4392 #endif 4393 KMP_DEBUG_ASSERT(num_tasks * 2 + 1 > num_t_min); 4394 if (num_tasks > num_t_min) 4395 __kmp_taskloop_recur(NULL, gtid, task, lb, ub, st, ub_glob, num_tasks, 4396 grainsize, extras, last_chunk, tc, num_t_min, 4397 #if OMPT_SUPPORT 4398 codeptr_ra, 4399 #endif 4400 task_dup); 4401 else 4402 __kmp_taskloop_linear(NULL, gtid, task, lb, ub, st, ub_glob, num_tasks, 4403 grainsize, extras, last_chunk, tc, 4404 #if OMPT_SUPPORT 4405 codeptr_ra, 4406 #endif 4407 task_dup); 4408 4409 KA_TRACE(40, ("__kmp_taskloop_task(exit): T#%d\n", gtid)); 4410 return 0; 4411 } 4412 4413 // Schedule part of the taskloop as a task, 4414 // execute the rest of the taskloop. 4415 // 4416 // loc Source location information 4417 // gtid Global thread ID 4418 // task Pattern task, exposes the loop iteration range 4419 // lb Pointer to loop lower bound in task structure 4420 // ub Pointer to loop upper bound in task structure 4421 // st Loop stride 4422 // ub_glob Global upper bound (used for lastprivate check) 4423 // num_tasks Number of tasks to execute 4424 // grainsize Number of loop iterations per task 4425 // extras Number of chunks with grainsize+1 iterations 4426 // last_chunk Reduction of grainsize for last task 4427 // tc Iterations count 4428 // num_t_min Threshold to launch tasks recursively 4429 // task_dup Tasks duplication routine 4430 // codeptr_ra Return address for OMPT events 4431 void __kmp_taskloop_recur(ident_t *loc, int gtid, kmp_task_t *task, 4432 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, 4433 kmp_uint64 ub_glob, kmp_uint64 num_tasks, 4434 kmp_uint64 grainsize, kmp_uint64 extras, 4435 kmp_int64 last_chunk, kmp_uint64 tc, 4436 kmp_uint64 num_t_min, 4437 #if OMPT_SUPPORT 4438 void *codeptr_ra, 4439 #endif 4440 void *task_dup) { 4441 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task); 4442 KMP_DEBUG_ASSERT(task != NULL); 4443 KMP_DEBUG_ASSERT(num_tasks > num_t_min); 4444 KA_TRACE(20, 4445 ("__kmp_taskloop_recur: T#%d, task %p: %lld tasks, grainsize" 4446 " %lld, extras %lld, last_chunk %lld, i=%lld,%lld(%d), dup %p\n", 4447 gtid, taskdata, num_tasks, grainsize, extras, last_chunk, *lb, *ub, 4448 st, task_dup)); 4449 p_task_dup_t ptask_dup = (p_task_dup_t)task_dup; 4450 kmp_uint64 lower = *lb; 4451 kmp_info_t *thread = __kmp_threads[gtid]; 4452 // kmp_taskdata_t *current_task = thread->th.th_current_task; 4453 kmp_task_t *next_task; 4454 size_t lower_offset = 4455 (char *)lb - (char *)task; // remember offset of lb in the task structure 4456 size_t upper_offset = 4457 (char *)ub - (char *)task; // remember offset of ub in the task structure 4458 4459 KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + 4460 (last_chunk < 0 ? last_chunk : extras)); 4461 KMP_DEBUG_ASSERT(num_tasks > extras); 4462 KMP_DEBUG_ASSERT(num_tasks > 0); 4463 4464 // split the loop in two halves 4465 kmp_uint64 lb1, ub0, tc0, tc1, ext0, ext1; 4466 kmp_int64 last_chunk0 = 0, last_chunk1 = 0; 4467 kmp_uint64 gr_size0 = grainsize; 4468 kmp_uint64 n_tsk0 = num_tasks >> 1; // num_tasks/2 to execute 4469 kmp_uint64 n_tsk1 = num_tasks - n_tsk0; // to schedule as a task 4470 if (last_chunk < 0) { 4471 ext0 = ext1 = 0; 4472 last_chunk1 = last_chunk; 4473 tc0 = grainsize * n_tsk0; 4474 tc1 = tc - tc0; 4475 } else if (n_tsk0 <= extras) { 4476 gr_size0++; // integrate extras into grainsize 4477 ext0 = 0; // no extra iters in 1st half 4478 ext1 = extras - n_tsk0; // remaining extras 4479 tc0 = gr_size0 * n_tsk0; 4480 tc1 = tc - tc0; 4481 } else { // n_tsk0 > extras 4482 ext1 = 0; // no extra iters in 2nd half 4483 ext0 = extras; 4484 tc1 = grainsize * n_tsk1; 4485 tc0 = tc - tc1; 4486 } 4487 ub0 = lower + st * (tc0 - 1); 4488 lb1 = ub0 + st; 4489 4490 // create pattern task for 2nd half of the loop 4491 next_task = __kmp_task_dup_alloc(thread, task); // duplicate the task 4492 // adjust lower bound (upper bound is not changed) for the 2nd half 4493 *(kmp_uint64 *)((char *)next_task + lower_offset) = lb1; 4494 if (ptask_dup != NULL) // construct firstprivates, etc. 4495 ptask_dup(next_task, task, 0); 4496 *ub = ub0; // adjust upper bound for the 1st half 4497 4498 // create auxiliary task for 2nd half of the loop 4499 // make sure new task has same parent task as the pattern task 4500 kmp_taskdata_t *current_task = thread->th.th_current_task; 4501 thread->th.th_current_task = taskdata->td_parent; 4502 kmp_task_t *new_task = 4503 __kmpc_omp_task_alloc(loc, gtid, 1, 3 * sizeof(void *), 4504 sizeof(__taskloop_params_t), &__kmp_taskloop_task); 4505 // restore current task 4506 thread->th.th_current_task = current_task; 4507 __taskloop_params_t *p = (__taskloop_params_t *)new_task->shareds; 4508 p->task = next_task; 4509 p->lb = (kmp_uint64 *)((char *)next_task + lower_offset); 4510 p->ub = (kmp_uint64 *)((char *)next_task + upper_offset); 4511 p->task_dup = task_dup; 4512 p->st = st; 4513 p->ub_glob = ub_glob; 4514 p->num_tasks = n_tsk1; 4515 p->grainsize = grainsize; 4516 p->extras = ext1; 4517 p->last_chunk = last_chunk1; 4518 p->tc = tc1; 4519 p->num_t_min = num_t_min; 4520 #if OMPT_SUPPORT 4521 p->codeptr_ra = codeptr_ra; 4522 #endif 4523 4524 #if OMPT_SUPPORT 4525 // schedule new task with correct return address for OMPT events 4526 __kmp_omp_taskloop_task(NULL, gtid, new_task, codeptr_ra); 4527 #else 4528 __kmp_omp_task(gtid, new_task, true); // schedule new task 4529 #endif 4530 4531 // execute the 1st half of current subrange 4532 if (n_tsk0 > num_t_min) 4533 __kmp_taskloop_recur(loc, gtid, task, lb, ub, st, ub_glob, n_tsk0, gr_size0, 4534 ext0, last_chunk0, tc0, num_t_min, 4535 #if OMPT_SUPPORT 4536 codeptr_ra, 4537 #endif 4538 task_dup); 4539 else 4540 __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, n_tsk0, 4541 gr_size0, ext0, last_chunk0, tc0, 4542 #if OMPT_SUPPORT 4543 codeptr_ra, 4544 #endif 4545 task_dup); 4546 4547 KA_TRACE(40, ("__kmp_taskloop_recur(exit): T#%d\n", gtid)); 4548 } 4549 4550 static void __kmp_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val, 4551 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, 4552 int nogroup, int sched, kmp_uint64 grainsize, 4553 int modifier, void *task_dup) { 4554 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task); 4555 KMP_DEBUG_ASSERT(task != NULL); 4556 if (nogroup == 0) { 4557 #if OMPT_SUPPORT && OMPT_OPTIONAL 4558 OMPT_STORE_RETURN_ADDRESS(gtid); 4559 #endif 4560 __kmpc_taskgroup(loc, gtid); 4561 } 4562 4563 // ========================================================================= 4564 // calculate loop parameters 4565 kmp_taskloop_bounds_t task_bounds(task, lb, ub); 4566 kmp_uint64 tc; 4567 // compiler provides global bounds here 4568 kmp_uint64 lower = task_bounds.get_lb(); 4569 kmp_uint64 upper = task_bounds.get_ub(); 4570 kmp_uint64 ub_glob = upper; // global upper used to calc lastprivate flag 4571 kmp_uint64 num_tasks = 0, extras = 0; 4572 kmp_int64 last_chunk = 4573 0; // reduce grainsize of last task by last_chunk in strict mode 4574 kmp_uint64 num_tasks_min = __kmp_taskloop_min_tasks; 4575 kmp_info_t *thread = __kmp_threads[gtid]; 4576 kmp_taskdata_t *current_task = thread->th.th_current_task; 4577 4578 KA_TRACE(20, ("__kmp_taskloop: T#%d, task %p, lb %lld, ub %lld, st %lld, " 4579 "grain %llu(%d, %d), dup %p\n", 4580 gtid, taskdata, lower, upper, st, grainsize, sched, modifier, 4581 task_dup)); 4582 4583 // compute trip count 4584 if (st == 1) { // most common case 4585 tc = upper - lower + 1; 4586 } else if (st < 0) { 4587 tc = (lower - upper) / (-st) + 1; 4588 } else { // st > 0 4589 tc = (upper - lower) / st + 1; 4590 } 4591 if (tc == 0) { 4592 KA_TRACE(20, ("__kmp_taskloop(exit): T#%d zero-trip loop\n", gtid)); 4593 // free the pattern task and exit 4594 __kmp_task_start(gtid, task, current_task); 4595 // do not execute anything for zero-trip loop 4596 __kmp_task_finish<false>(gtid, task, current_task); 4597 return; 4598 } 4599 4600 #if OMPT_SUPPORT && OMPT_OPTIONAL 4601 ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL); 4602 ompt_task_info_t *task_info = __ompt_get_task_info_object(0); 4603 if (ompt_enabled.ompt_callback_work) { 4604 ompt_callbacks.ompt_callback(ompt_callback_work)( 4605 ompt_work_taskloop, ompt_scope_begin, &(team_info->parallel_data), 4606 &(task_info->task_data), tc, OMPT_GET_RETURN_ADDRESS(0)); 4607 } 4608 #endif 4609 4610 if (num_tasks_min == 0) 4611 // TODO: can we choose better default heuristic? 4612 num_tasks_min = 4613 KMP_MIN(thread->th.th_team_nproc * 10, INITIAL_TASK_DEQUE_SIZE); 4614 4615 // compute num_tasks/grainsize based on the input provided 4616 switch (sched) { 4617 case 0: // no schedule clause specified, we can choose the default 4618 // let's try to schedule (team_size*10) tasks 4619 grainsize = thread->th.th_team_nproc * 10; 4620 KMP_FALLTHROUGH(); 4621 case 2: // num_tasks provided 4622 if (grainsize > tc) { 4623 num_tasks = tc; // too big num_tasks requested, adjust values 4624 grainsize = 1; 4625 extras = 0; 4626 } else { 4627 num_tasks = grainsize; 4628 grainsize = tc / num_tasks; 4629 extras = tc % num_tasks; 4630 } 4631 break; 4632 case 1: // grainsize provided 4633 if (grainsize > tc) { 4634 num_tasks = 1; 4635 grainsize = tc; // too big grainsize requested, adjust values 4636 extras = 0; 4637 } else { 4638 if (modifier) { 4639 num_tasks = (tc + grainsize - 1) / grainsize; 4640 last_chunk = tc - (num_tasks * grainsize); 4641 extras = 0; 4642 } else { 4643 num_tasks = tc / grainsize; 4644 // adjust grainsize for balanced distribution of iterations 4645 grainsize = tc / num_tasks; 4646 extras = tc % num_tasks; 4647 } 4648 } 4649 break; 4650 default: 4651 KMP_ASSERT2(0, "unknown scheduling of taskloop"); 4652 } 4653 4654 KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + 4655 (last_chunk < 0 ? last_chunk : extras)); 4656 KMP_DEBUG_ASSERT(num_tasks > extras); 4657 KMP_DEBUG_ASSERT(num_tasks > 0); 4658 // ========================================================================= 4659 4660 // check if clause value first 4661 // Also require GOMP_taskloop to reduce to linear (taskdata->td_flags.native) 4662 if (if_val == 0) { // if(0) specified, mark task as serial 4663 taskdata->td_flags.task_serial = 1; 4664 taskdata->td_flags.tiedness = TASK_TIED; // AC: serial task cannot be untied 4665 // always start serial tasks linearly 4666 __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks, 4667 grainsize, extras, last_chunk, tc, 4668 #if OMPT_SUPPORT 4669 OMPT_GET_RETURN_ADDRESS(0), 4670 #endif 4671 task_dup); 4672 // !taskdata->td_flags.native => currently force linear spawning of tasks 4673 // for GOMP_taskloop 4674 } else if (num_tasks > num_tasks_min && !taskdata->td_flags.native) { 4675 KA_TRACE(20, ("__kmp_taskloop: T#%d, go recursive: tc %llu, #tasks %llu" 4676 "(%lld), grain %llu, extras %llu, last_chunk %lld\n", 4677 gtid, tc, num_tasks, num_tasks_min, grainsize, extras, 4678 last_chunk)); 4679 __kmp_taskloop_recur(loc, gtid, task, lb, ub, st, ub_glob, num_tasks, 4680 grainsize, extras, last_chunk, tc, num_tasks_min, 4681 #if OMPT_SUPPORT 4682 OMPT_GET_RETURN_ADDRESS(0), 4683 #endif 4684 task_dup); 4685 } else { 4686 KA_TRACE(20, ("__kmp_taskloop: T#%d, go linear: tc %llu, #tasks %llu" 4687 "(%lld), grain %llu, extras %llu, last_chunk %lld\n", 4688 gtid, tc, num_tasks, num_tasks_min, grainsize, extras, 4689 last_chunk)); 4690 __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks, 4691 grainsize, extras, last_chunk, tc, 4692 #if OMPT_SUPPORT 4693 OMPT_GET_RETURN_ADDRESS(0), 4694 #endif 4695 task_dup); 4696 } 4697 4698 #if OMPT_SUPPORT && OMPT_OPTIONAL 4699 if (ompt_enabled.ompt_callback_work) { 4700 ompt_callbacks.ompt_callback(ompt_callback_work)( 4701 ompt_work_taskloop, ompt_scope_end, &(team_info->parallel_data), 4702 &(task_info->task_data), tc, OMPT_GET_RETURN_ADDRESS(0)); 4703 } 4704 #endif 4705 4706 if (nogroup == 0) { 4707 #if OMPT_SUPPORT && OMPT_OPTIONAL 4708 OMPT_STORE_RETURN_ADDRESS(gtid); 4709 #endif 4710 __kmpc_end_taskgroup(loc, gtid); 4711 } 4712 KA_TRACE(20, ("__kmp_taskloop(exit): T#%d\n", gtid)); 4713 } 4714 4715 /*! 4716 @ingroup TASKING 4717 @param loc Source location information 4718 @param gtid Global thread ID 4719 @param task Task structure 4720 @param if_val Value of the if clause 4721 @param lb Pointer to loop lower bound in task structure 4722 @param ub Pointer to loop upper bound in task structure 4723 @param st Loop stride 4724 @param nogroup Flag, 1 if nogroup clause specified, 0 otherwise 4725 @param sched Schedule specified 0/1/2 for none/grainsize/num_tasks 4726 @param grainsize Schedule value if specified 4727 @param task_dup Tasks duplication routine 4728 4729 Execute the taskloop construct. 4730 */ 4731 void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val, 4732 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, 4733 int sched, kmp_uint64 grainsize, void *task_dup) { 4734 __kmp_assert_valid_gtid(gtid); 4735 KA_TRACE(20, ("__kmpc_taskloop(enter): T#%d\n", gtid)); 4736 __kmp_taskloop(loc, gtid, task, if_val, lb, ub, st, nogroup, sched, grainsize, 4737 0, task_dup); 4738 KA_TRACE(20, ("__kmpc_taskloop(exit): T#%d\n", gtid)); 4739 } 4740 4741 /*! 4742 @ingroup TASKING 4743 @param loc Source location information 4744 @param gtid Global thread ID 4745 @param task Task structure 4746 @param if_val Value of the if clause 4747 @param lb Pointer to loop lower bound in task structure 4748 @param ub Pointer to loop upper bound in task structure 4749 @param st Loop stride 4750 @param nogroup Flag, 1 if nogroup clause specified, 0 otherwise 4751 @param sched Schedule specified 0/1/2 for none/grainsize/num_tasks 4752 @param grainsize Schedule value if specified 4753 @param modifer Modifier 'strict' for sched, 1 if present, 0 otherwise 4754 @param task_dup Tasks duplication routine 4755 4756 Execute the taskloop construct. 4757 */ 4758 void __kmpc_taskloop_5(ident_t *loc, int gtid, kmp_task_t *task, int if_val, 4759 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, 4760 int nogroup, int sched, kmp_uint64 grainsize, 4761 int modifier, void *task_dup) { 4762 __kmp_assert_valid_gtid(gtid); 4763 KA_TRACE(20, ("__kmpc_taskloop_5(enter): T#%d\n", gtid)); 4764 __kmp_taskloop(loc, gtid, task, if_val, lb, ub, st, nogroup, sched, grainsize, 4765 modifier, task_dup); 4766 KA_TRACE(20, ("__kmpc_taskloop_5(exit): T#%d\n", gtid)); 4767 } 4768