1 /*
2  * kmp_gsupport.cpp
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 //                     The LLVM Compiler Infrastructure
8 //
9 // This file is dual licensed under the MIT and the University of Illinois Open
10 // Source Licenses. See LICENSE.txt for details.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "kmp.h"
15 #include "kmp_atomic.h"
16 
17 #if OMPT_SUPPORT
18 #include "ompt-specific.h"
19 #endif
20 
21 #ifdef __cplusplus
22 extern "C" {
23 #endif // __cplusplus
24 
25 #define MKLOC(loc, routine)                                                    \
26   static ident_t(loc) = {0, KMP_IDENT_KMPC, 0, 0, ";unknown;unknown;0;0;;"};
27 
28 #include "kmp_ftn_os.h"
29 
30 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_BARRIER)(void) {
31   int gtid = __kmp_entry_gtid();
32   MKLOC(loc, "GOMP_barrier");
33   KA_TRACE(20, ("GOMP_barrier: T#%d\n", gtid));
34 #if OMPT_SUPPORT && OMPT_OPTIONAL
35   ompt_frame_t *ompt_frame;
36   if (ompt_enabled.enabled) {
37     __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
38     ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
39     OMPT_STORE_RETURN_ADDRESS(gtid);
40   }
41 #endif
42   __kmpc_barrier(&loc, gtid);
43 #if OMPT_SUPPORT && OMPT_OPTIONAL
44   if (ompt_enabled.enabled) {
45     ompt_frame->enter_frame = NULL;
46   }
47 #endif
48 }
49 
50 // Mutual exclusion
51 
52 // The symbol that icc/ifort generates for unnamed for unnamed critical sections
53 // - .gomp_critical_user_ - is defined using .comm in any objects reference it.
54 // We can't reference it directly here in C code, as the symbol contains a ".".
55 //
56 // The RTL contains an assembly language definition of .gomp_critical_user_
57 // with another symbol __kmp_unnamed_critical_addr initialized with it's
58 // address.
59 extern kmp_critical_name *__kmp_unnamed_critical_addr;
60 
61 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_START)(void) {
62   int gtid = __kmp_entry_gtid();
63   MKLOC(loc, "GOMP_critical_start");
64   KA_TRACE(20, ("GOMP_critical_start: T#%d\n", gtid));
65 #if OMPT_SUPPORT && OMPT_OPTIONAL
66   OMPT_STORE_RETURN_ADDRESS(gtid);
67 #endif
68   __kmpc_critical(&loc, gtid, __kmp_unnamed_critical_addr);
69 }
70 
71 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_END)(void) {
72   int gtid = __kmp_get_gtid();
73   MKLOC(loc, "GOMP_critical_end");
74   KA_TRACE(20, ("GOMP_critical_end: T#%d\n", gtid));
75 #if OMPT_SUPPORT && OMPT_OPTIONAL
76   OMPT_STORE_RETURN_ADDRESS(gtid);
77 #endif
78   __kmpc_end_critical(&loc, gtid, __kmp_unnamed_critical_addr);
79 }
80 
81 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_NAME_START)(void **pptr) {
82   int gtid = __kmp_entry_gtid();
83   MKLOC(loc, "GOMP_critical_name_start");
84   KA_TRACE(20, ("GOMP_critical_name_start: T#%d\n", gtid));
85   __kmpc_critical(&loc, gtid, (kmp_critical_name *)pptr);
86 }
87 
88 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_NAME_END)(void **pptr) {
89   int gtid = __kmp_get_gtid();
90   MKLOC(loc, "GOMP_critical_name_end");
91   KA_TRACE(20, ("GOMP_critical_name_end: T#%d\n", gtid));
92   __kmpc_end_critical(&loc, gtid, (kmp_critical_name *)pptr);
93 }
94 
95 // The Gnu codegen tries to use locked operations to perform atomic updates
96 // inline.  If it can't, then it calls GOMP_atomic_start() before performing
97 // the update and GOMP_atomic_end() afterward, regardless of the data type.
98 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ATOMIC_START)(void) {
99   int gtid = __kmp_entry_gtid();
100   KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
101 
102 #if OMPT_SUPPORT
103   __ompt_thread_assign_wait_id(0);
104 #endif
105 
106   __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);
107 }
108 
109 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ATOMIC_END)(void) {
110   int gtid = __kmp_get_gtid();
111   KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
112   __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);
113 }
114 
115 int KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_START)(void) {
116   int gtid = __kmp_entry_gtid();
117   MKLOC(loc, "GOMP_single_start");
118   KA_TRACE(20, ("GOMP_single_start: T#%d\n", gtid));
119 
120   if (!TCR_4(__kmp_init_parallel))
121     __kmp_parallel_initialize();
122 
123   // 3rd parameter == FALSE prevents kmp_enter_single from pushing a
124   // workshare when USE_CHECKS is defined.  We need to avoid the push,
125   // as there is no corresponding GOMP_single_end() call.
126   kmp_int32 rc = __kmp_enter_single(gtid, &loc, FALSE);
127 
128 #if OMPT_SUPPORT && OMPT_OPTIONAL
129   kmp_info_t *this_thr = __kmp_threads[gtid];
130   kmp_team_t *team = this_thr->th.th_team;
131   int tid = __kmp_tid_from_gtid(gtid);
132 
133   if (ompt_enabled.enabled) {
134     if (rc) {
135       if (ompt_enabled.ompt_callback_work) {
136         ompt_callbacks.ompt_callback(ompt_callback_work)(
137             ompt_work_single_executor, ompt_scope_begin,
138             &(team->t.ompt_team_info.parallel_data),
139             &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
140             1, OMPT_GET_RETURN_ADDRESS(0));
141       }
142     } else {
143       if (ompt_enabled.ompt_callback_work) {
144         ompt_callbacks.ompt_callback(ompt_callback_work)(
145             ompt_work_single_other, ompt_scope_begin,
146             &(team->t.ompt_team_info.parallel_data),
147             &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
148             1, OMPT_GET_RETURN_ADDRESS(0));
149         ompt_callbacks.ompt_callback(ompt_callback_work)(
150             ompt_work_single_other, ompt_scope_end,
151             &(team->t.ompt_team_info.parallel_data),
152             &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
153             1, OMPT_GET_RETURN_ADDRESS(0));
154       }
155     }
156   }
157 #endif
158 
159   return rc;
160 }
161 
162 void *KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_COPY_START)(void) {
163   void *retval;
164   int gtid = __kmp_entry_gtid();
165   MKLOC(loc, "GOMP_single_copy_start");
166   KA_TRACE(20, ("GOMP_single_copy_start: T#%d\n", gtid));
167 
168   if (!TCR_4(__kmp_init_parallel))
169     __kmp_parallel_initialize();
170 
171   // If this is the first thread to enter, return NULL.  The generated code will
172   // then call GOMP_single_copy_end() for this thread only, with the
173   // copyprivate data pointer as an argument.
174   if (__kmp_enter_single(gtid, &loc, FALSE))
175     return NULL;
176 
177 // Wait for the first thread to set the copyprivate data pointer,
178 // and for all other threads to reach this point.
179 
180 #if OMPT_SUPPORT && OMPT_OPTIONAL
181   ompt_frame_t *ompt_frame;
182   if (ompt_enabled.enabled) {
183     __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
184     ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
185     OMPT_STORE_RETURN_ADDRESS(gtid);
186   }
187 #endif
188   __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
189 
190   // Retrieve the value of the copyprivate data point, and wait for all
191   // threads to do likewise, then return.
192   retval = __kmp_team_from_gtid(gtid)->t.t_copypriv_data;
193 #if OMPT_SUPPORT && OMPT_OPTIONAL
194   if (ompt_enabled.enabled) {
195     OMPT_STORE_RETURN_ADDRESS(gtid);
196   }
197 #endif
198   __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
199 #if OMPT_SUPPORT && OMPT_OPTIONAL
200   if (ompt_enabled.enabled) {
201     ompt_frame->enter_frame = NULL;
202   }
203 #endif
204   return retval;
205 }
206 
207 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_COPY_END)(void *data) {
208   int gtid = __kmp_get_gtid();
209   KA_TRACE(20, ("GOMP_single_copy_end: T#%d\n", gtid));
210 
211   // Set the copyprivate data pointer fo the team, then hit the barrier so that
212   // the other threads will continue on and read it.  Hit another barrier before
213   // continuing, so that the know that the copyprivate data pointer has been
214   // propagated to all threads before trying to reuse the t_copypriv_data field.
215   __kmp_team_from_gtid(gtid)->t.t_copypriv_data = data;
216 #if OMPT_SUPPORT && OMPT_OPTIONAL
217   ompt_frame_t *ompt_frame;
218   if (ompt_enabled.enabled) {
219     __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
220     ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
221     OMPT_STORE_RETURN_ADDRESS(gtid);
222   }
223 #endif
224   __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
225 #if OMPT_SUPPORT && OMPT_OPTIONAL
226   if (ompt_enabled.enabled) {
227     OMPT_STORE_RETURN_ADDRESS(gtid);
228   }
229 #endif
230   __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
231 #if OMPT_SUPPORT && OMPT_OPTIONAL
232   if (ompt_enabled.enabled) {
233     ompt_frame->enter_frame = NULL;
234   }
235 #endif
236 }
237 
238 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ORDERED_START)(void) {
239   int gtid = __kmp_entry_gtid();
240   MKLOC(loc, "GOMP_ordered_start");
241   KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
242 #if OMPT_SUPPORT && OMPT_OPTIONAL
243   OMPT_STORE_RETURN_ADDRESS(gtid);
244 #endif
245   __kmpc_ordered(&loc, gtid);
246 }
247 
248 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ORDERED_END)(void) {
249   int gtid = __kmp_get_gtid();
250   MKLOC(loc, "GOMP_ordered_end");
251   KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
252 #if OMPT_SUPPORT && OMPT_OPTIONAL
253   OMPT_STORE_RETURN_ADDRESS(gtid);
254 #endif
255   __kmpc_end_ordered(&loc, gtid);
256 }
257 
258 // Dispatch macro defs
259 //
260 // They come in two flavors: 64-bit unsigned, and either 32-bit signed
261 // (IA-32 architecture) or 64-bit signed (Intel(R) 64).
262 
263 #if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS
264 #define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_4
265 #define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_4
266 #define KMP_DISPATCH_NEXT __kmpc_dispatch_next_4
267 #else
268 #define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_8
269 #define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_8
270 #define KMP_DISPATCH_NEXT __kmpc_dispatch_next_8
271 #endif /* KMP_ARCH_X86 */
272 
273 #define KMP_DISPATCH_INIT_ULL __kmp_aux_dispatch_init_8u
274 #define KMP_DISPATCH_FINI_CHUNK_ULL __kmp_aux_dispatch_fini_chunk_8u
275 #define KMP_DISPATCH_NEXT_ULL __kmpc_dispatch_next_8u
276 
277 // The parallel contruct
278 
279 #ifndef KMP_DEBUG
280 static
281 #endif /* KMP_DEBUG */
282     void
283     __kmp_GOMP_microtask_wrapper(int *gtid, int *npr, void (*task)(void *),
284                                  void *data) {
285 #if OMPT_SUPPORT
286   kmp_info_t *thr;
287   ompt_frame_t *ompt_frame;
288   omp_state_t enclosing_state;
289 
290   if (ompt_enabled.enabled) {
291     // get pointer to thread data structure
292     thr = __kmp_threads[*gtid];
293 
294     // save enclosing task state; set current state for task
295     enclosing_state = thr->th.ompt_thread_info.state;
296     thr->th.ompt_thread_info.state = omp_state_work_parallel;
297 
298     // set task frame
299     __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
300     ompt_frame->exit_frame = OMPT_GET_FRAME_ADDRESS(0);
301   }
302 #endif
303 
304   task(data);
305 
306 #if OMPT_SUPPORT
307   if (ompt_enabled.enabled) {
308     // clear task frame
309     ompt_frame->exit_frame = NULL;
310 
311     // restore enclosing state
312     thr->th.ompt_thread_info.state = enclosing_state;
313   }
314 #endif
315 }
316 
317 #ifndef KMP_DEBUG
318 static
319 #endif /* KMP_DEBUG */
320     void
321     __kmp_GOMP_parallel_microtask_wrapper(int *gtid, int *npr,
322                                           void (*task)(void *), void *data,
323                                           unsigned num_threads, ident_t *loc,
324                                           enum sched_type schedule, long start,
325                                           long end, long incr,
326                                           long chunk_size) {
327 // Intialize the loop worksharing construct.
328 
329 #if OMPT_SUPPORT
330   if (ompt_enabled.enabled)
331     OMPT_STORE_RETURN_ADDRESS(*gtid);
332 #endif
333   KMP_DISPATCH_INIT(loc, *gtid, schedule, start, end, incr, chunk_size,
334                     schedule != kmp_sch_static);
335 
336 #if OMPT_SUPPORT
337   kmp_info_t *thr;
338   ompt_frame_t *ompt_frame;
339   omp_state_t enclosing_state;
340 
341   if (ompt_enabled.enabled) {
342     thr = __kmp_threads[*gtid];
343     // save enclosing task state; set current state for task
344     enclosing_state = thr->th.ompt_thread_info.state;
345     thr->th.ompt_thread_info.state = omp_state_work_parallel;
346 
347     // set task frame
348     __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
349     ompt_frame->exit_frame = OMPT_GET_FRAME_ADDRESS(0);
350   }
351 #endif
352 
353   // Now invoke the microtask.
354   task(data);
355 
356 #if OMPT_SUPPORT
357   if (ompt_enabled.enabled) {
358     // clear task frame
359     ompt_frame->exit_frame = NULL;
360 
361     // reset enclosing state
362     thr->th.ompt_thread_info.state = enclosing_state;
363   }
364 #endif
365 }
366 
367 #ifndef KMP_DEBUG
368 static
369 #endif /* KMP_DEBUG */
370     void
371     __kmp_GOMP_fork_call(ident_t *loc, int gtid, void (*unwrapped_task)(void *),
372                          microtask_t wrapper, int argc, ...) {
373   int rc;
374   kmp_info_t *thr = __kmp_threads[gtid];
375   kmp_team_t *team = thr->th.th_team;
376   int tid = __kmp_tid_from_gtid(gtid);
377 
378   va_list ap;
379   va_start(ap, argc);
380 
381   rc = __kmp_fork_call(loc, gtid, fork_context_gnu, argc, wrapper,
382                        __kmp_invoke_task_func,
383 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
384                        &ap
385 #else
386                        ap
387 #endif
388                        );
389 
390   va_end(ap);
391 
392   if (rc) {
393     __kmp_run_before_invoked_task(gtid, tid, thr, team);
394   }
395 
396 #if OMPT_SUPPORT
397   int ompt_team_size;
398   if (ompt_enabled.enabled) {
399     ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
400     ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
401 
402     // implicit task callback
403     if (ompt_enabled.ompt_callback_implicit_task) {
404       ompt_team_size = __kmp_team_from_gtid(gtid)->t.t_nproc;
405       ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
406           ompt_scope_begin, &(team_info->parallel_data),
407           &(task_info->task_data), ompt_team_size, __kmp_tid_from_gtid(gtid));
408     }
409     thr->th.ompt_thread_info.state = omp_state_work_parallel;
410   }
411 #endif
412 }
413 
414 static void __kmp_GOMP_serialized_parallel(ident_t *loc, kmp_int32 gtid,
415                                            void (*task)(void *)) {
416 #if OMPT_SUPPORT
417   OMPT_STORE_RETURN_ADDRESS(gtid);
418 #endif
419   __kmp_serialized_parallel(loc, gtid);
420 }
421 
422 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_START)(void (*task)(void *),
423                                                        void *data,
424                                                        unsigned num_threads) {
425   int gtid = __kmp_entry_gtid();
426 
427 #if OMPT_SUPPORT
428   ompt_frame_t *parent_frame, *frame;
429 
430   if (ompt_enabled.enabled) {
431     __ompt_get_task_info_internal(0, NULL, NULL, &parent_frame, NULL, NULL);
432     parent_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
433     OMPT_STORE_RETURN_ADDRESS(gtid);
434   }
435 #endif
436 
437   MKLOC(loc, "GOMP_parallel_start");
438   KA_TRACE(20, ("GOMP_parallel_start: T#%d\n", gtid));
439 
440   if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
441     if (num_threads != 0) {
442       __kmp_push_num_threads(&loc, gtid, num_threads);
443     }
444     __kmp_GOMP_fork_call(&loc, gtid, task,
445                          (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task,
446                          data);
447   } else {
448     __kmp_GOMP_serialized_parallel(&loc, gtid, task);
449   }
450 
451 #if OMPT_SUPPORT
452   if (ompt_enabled.enabled) {
453     __ompt_get_task_info_internal(0, NULL, NULL, &frame, NULL, NULL);
454     frame->exit_frame = OMPT_GET_FRAME_ADDRESS(1);
455   }
456 #endif
457 }
458 
459 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)(void) {
460   int gtid = __kmp_get_gtid();
461   kmp_info_t *thr;
462   int ompt_team_size = __kmp_team_from_gtid(gtid)->t.t_nproc;
463 
464   thr = __kmp_threads[gtid];
465 
466   MKLOC(loc, "GOMP_parallel_end");
467   KA_TRACE(20, ("GOMP_parallel_end: T#%d\n", gtid));
468 
469   if (!thr->th.th_team->t.t_serialized) {
470     __kmp_run_after_invoked_task(gtid, __kmp_tid_from_gtid(gtid), thr,
471                                  thr->th.th_team);
472 
473 #if OMPT_SUPPORT
474     if (ompt_enabled.enabled) {
475       // Implicit task is finished here, in the barrier we might schedule
476       // deferred tasks,
477       // these don't see the implicit task on the stack
478       OMPT_CUR_TASK_INFO(thr)->frame.exit_frame = NULL;
479     }
480 #endif
481 
482     __kmp_join_call(&loc, gtid
483 #if OMPT_SUPPORT
484                     ,
485                     fork_context_gnu
486 #endif
487                     );
488   } else {
489     __kmpc_end_serialized_parallel(&loc, gtid);
490   }
491 }
492 
493 // Loop worksharing constructs
494 
495 // The Gnu codegen passes in an exclusive upper bound for the overall range,
496 // but the libguide dispatch code expects an inclusive upper bound, hence the
497 // "end - incr" 5th argument to KMP_DISPATCH_INIT (and the " ub - str" 11th
498 // argument to __kmp_GOMP_fork_call).
499 //
500 // Conversely, KMP_DISPATCH_NEXT returns and inclusive upper bound in *p_ub,
501 // but the Gnu codegen expects an excluside upper bound, so the adjustment
502 // "*p_ub += stride" compenstates for the discrepancy.
503 //
504 // Correction: the gnu codegen always adjusts the upper bound by +-1, not the
505 // stride value.  We adjust the dispatch parameters accordingly (by +-1), but
506 // we still adjust p_ub by the actual stride value.
507 //
508 // The "runtime" versions do not take a chunk_sz parameter.
509 //
510 // The profile lib cannot support construct checking of unordered loops that
511 // are predetermined by the compiler to be statically scheduled, as the gcc
512 // codegen will not always emit calls to GOMP_loop_static_next() to get the
513 // next iteration.  Instead, it emits inline code to call omp_get_thread_num()
514 // num and calculate the iteration space using the result.  It doesn't do this
515 // with ordered static loop, so they can be checked.
516 
517 #if OMPT_SUPPORT
518 #define IF_OMPT_SUPPORT(code) code
519 #else
520 #define IF_OMPT_SUPPORT(code)
521 #endif
522 
523 #define LOOP_START(func, schedule)                                             \
524   int func(long lb, long ub, long str, long chunk_sz, long *p_lb,              \
525            long *p_ub) {                                                       \
526     int status;                                                                \
527     long stride;                                                               \
528     int gtid = __kmp_entry_gtid();                                             \
529     MKLOC(loc, #func);                                                         \
530     KA_TRACE(20,                                                               \
531              (#func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
532               gtid, lb, ub, str, chunk_sz));                                   \
533                                                                                \
534     if ((str > 0) ? (lb < ub) : (lb > ub)) {                                   \
535       IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);)                        \
536       KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb,                            \
537                         (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz,        \
538                         (schedule) != kmp_sch_static);                         \
539       IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);)                        \
540       status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb,            \
541                                  (kmp_int *)p_ub, (kmp_int *)&stride);         \
542       if (status) {                                                            \
543         KMP_DEBUG_ASSERT(stride == str);                                       \
544         *p_ub += (str > 0) ? 1 : -1;                                           \
545       }                                                                        \
546     } else {                                                                   \
547       status = 0;                                                              \
548     }                                                                          \
549                                                                                \
550     KA_TRACE(20,                                                               \
551              (#func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n",   \
552               gtid, *p_lb, *p_ub, status));                                    \
553     return status;                                                             \
554   }
555 
556 #define LOOP_RUNTIME_START(func, schedule)                                     \
557   int func(long lb, long ub, long str, long *p_lb, long *p_ub) {               \
558     int status;                                                                \
559     long stride;                                                               \
560     long chunk_sz = 0;                                                         \
561     int gtid = __kmp_entry_gtid();                                             \
562     MKLOC(loc, #func);                                                         \
563     KA_TRACE(20,                                                               \
564              (#func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n",    \
565               gtid, lb, ub, str, chunk_sz));                                   \
566                                                                                \
567     if ((str > 0) ? (lb < ub) : (lb > ub)) {                                   \
568       IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);)                        \
569       KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb,                            \
570                         (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, TRUE); \
571       IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);)                        \
572       status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb,            \
573                                  (kmp_int *)p_ub, (kmp_int *)&stride);         \
574       if (status) {                                                            \
575         KMP_DEBUG_ASSERT(stride == str);                                       \
576         *p_ub += (str > 0) ? 1 : -1;                                           \
577       }                                                                        \
578     } else {                                                                   \
579       status = 0;                                                              \
580     }                                                                          \
581                                                                                \
582     KA_TRACE(20,                                                               \
583              (#func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n",   \
584               gtid, *p_lb, *p_ub, status));                                    \
585     return status;                                                             \
586   }
587 
588 #define LOOP_NEXT(func, fini_code)                                             \
589   int func(long *p_lb, long *p_ub) {                                           \
590     int status;                                                                \
591     long stride;                                                               \
592     int gtid = __kmp_get_gtid();                                               \
593     MKLOC(loc, #func);                                                         \
594     KA_TRACE(20, (#func ": T#%d\n", gtid));                                    \
595                                                                                \
596     IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);)                          \
597     fini_code status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb,    \
598                                          (kmp_int *)p_ub, (kmp_int *)&stride); \
599     if (status) {                                                              \
600       *p_ub += (stride > 0) ? 1 : -1;                                          \
601     }                                                                          \
602                                                                                \
603     KA_TRACE(20,                                                               \
604              (#func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, stride 0x%lx, "    \
605                     "returning %d\n",                                          \
606               gtid, *p_lb, *p_ub, stride, status));                            \
607     return status;                                                             \
608   }
609 
610 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_STATIC_START), kmp_sch_static)
611 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT), {})
612 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START),
613            kmp_sch_dynamic_chunked)
614 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT), {})
615 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_GUIDED_START),
616            kmp_sch_guided_chunked)
617 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT), {})
618 LOOP_RUNTIME_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_RUNTIME_START),
619                    kmp_sch_runtime)
620 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT), {})
621 
622 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START),
623            kmp_ord_static)
624 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT),
625           { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
626 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START),
627            kmp_ord_dynamic_chunked)
628 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT),
629           { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
630 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START),
631            kmp_ord_guided_chunked)
632 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT),
633           { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
634 LOOP_RUNTIME_START(
635     KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START),
636     kmp_ord_runtime)
637 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT),
638           { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
639 
640 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END)(void) {
641   int gtid = __kmp_get_gtid();
642   KA_TRACE(20, ("GOMP_loop_end: T#%d\n", gtid))
643 
644 #if OMPT_SUPPORT && OMPT_OPTIONAL
645   ompt_frame_t *ompt_frame;
646   if (ompt_enabled.enabled) {
647     __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
648     ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
649     OMPT_STORE_RETURN_ADDRESS(gtid);
650   }
651 #endif
652   __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
653 #if OMPT_SUPPORT && OMPT_OPTIONAL
654   if (ompt_enabled.enabled) {
655     ompt_frame->enter_frame = NULL;
656   }
657 #endif
658 
659   KA_TRACE(20, ("GOMP_loop_end exit: T#%d\n", gtid))
660 }
661 
662 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END_NOWAIT)(void) {
663   KA_TRACE(20, ("GOMP_loop_end_nowait: T#%d\n", __kmp_get_gtid()))
664 }
665 
666 // Unsigned long long loop worksharing constructs
667 //
668 // These are new with gcc 4.4
669 
670 #define LOOP_START_ULL(func, schedule)                                         \
671   int func(int up, unsigned long long lb, unsigned long long ub,               \
672            unsigned long long str, unsigned long long chunk_sz,                \
673            unsigned long long *p_lb, unsigned long long *p_ub) {               \
674     int status;                                                                \
675     long long str2 = up ? ((long long)str) : -((long long)str);                \
676     long long stride;                                                          \
677     int gtid = __kmp_entry_gtid();                                             \
678     MKLOC(loc, #func);                                                         \
679                                                                                \
680     KA_TRACE(                                                                  \
681         20,                                                                    \
682         (#func                                                                 \
683          ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str 0x%llx, chunk_sz 0x%llx\n", \
684          gtid, up, lb, ub, str, chunk_sz));                                    \
685                                                                                \
686     if ((str > 0) ? (lb < ub) : (lb > ub)) {                                   \
687       KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb,                        \
688                             (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz,  \
689                             (schedule) != kmp_sch_static);                     \
690       status =                                                                 \
691           KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb,          \
692                                 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride);     \
693       if (status) {                                                            \
694         KMP_DEBUG_ASSERT(stride == str2);                                      \
695         *p_ub += (str > 0) ? 1 : -1;                                           \
696       }                                                                        \
697     } else {                                                                   \
698       status = 0;                                                              \
699     }                                                                          \
700                                                                                \
701     KA_TRACE(20,                                                               \
702              (#func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
703               gtid, *p_lb, *p_ub, status));                                    \
704     return status;                                                             \
705   }
706 
707 #define LOOP_RUNTIME_START_ULL(func, schedule)                                 \
708   int func(int up, unsigned long long lb, unsigned long long ub,               \
709            unsigned long long str, unsigned long long *p_lb,                   \
710            unsigned long long *p_ub) {                                         \
711     int status;                                                                \
712     long long str2 = up ? ((long long)str) : -((long long)str);                \
713     unsigned long long stride;                                                 \
714     unsigned long long chunk_sz = 0;                                           \
715     int gtid = __kmp_entry_gtid();                                             \
716     MKLOC(loc, #func);                                                         \
717                                                                                \
718     KA_TRACE(                                                                  \
719         20,                                                                    \
720         (#func                                                                 \
721          ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str 0x%llx, chunk_sz 0x%llx\n", \
722          gtid, up, lb, ub, str, chunk_sz));                                    \
723                                                                                \
724     if ((str > 0) ? (lb < ub) : (lb > ub)) {                                   \
725       KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb,                        \
726                             (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz,  \
727                             TRUE);                                             \
728       status =                                                                 \
729           KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb,          \
730                                 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride);     \
731       if (status) {                                                            \
732         KMP_DEBUG_ASSERT((long long)stride == str2);                           \
733         *p_ub += (str > 0) ? 1 : -1;                                           \
734       }                                                                        \
735     } else {                                                                   \
736       status = 0;                                                              \
737     }                                                                          \
738                                                                                \
739     KA_TRACE(20,                                                               \
740              (#func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
741               gtid, *p_lb, *p_ub, status));                                    \
742     return status;                                                             \
743   }
744 
745 #define LOOP_NEXT_ULL(func, fini_code)                                         \
746   int func(unsigned long long *p_lb, unsigned long long *p_ub) {               \
747     int status;                                                                \
748     long long stride;                                                          \
749     int gtid = __kmp_get_gtid();                                               \
750     MKLOC(loc, #func);                                                         \
751     KA_TRACE(20, (#func ": T#%d\n", gtid));                                    \
752                                                                                \
753     fini_code status =                                                         \
754         KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb,            \
755                               (kmp_uint64 *)p_ub, (kmp_int64 *)&stride);       \
756     if (status) {                                                              \
757       *p_ub += (stride > 0) ? 1 : -1;                                          \
758     }                                                                          \
759                                                                                \
760     KA_TRACE(20,                                                               \
761              (#func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, stride 0x%llx, " \
762                     "returning %d\n",                                          \
763               gtid, *p_lb, *p_ub, stride, status));                            \
764     return status;                                                             \
765   }
766 
767 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START),
768                kmp_sch_static)
769 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT), {})
770 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START),
771                kmp_sch_dynamic_chunked)
772 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT), {})
773 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START),
774                kmp_sch_guided_chunked)
775 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT), {})
776 LOOP_RUNTIME_START_ULL(
777     KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START), kmp_sch_runtime)
778 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT), {})
779 
780 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START),
781                kmp_ord_static)
782 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT),
783               { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
784 LOOP_START_ULL(
785     KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START),
786     kmp_ord_dynamic_chunked)
787 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT),
788               { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
789 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START),
790                kmp_ord_guided_chunked)
791 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT),
792               { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
793 LOOP_RUNTIME_START_ULL(
794     KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START),
795     kmp_ord_runtime)
796 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT),
797               { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
798 
799 // Combined parallel / loop worksharing constructs
800 //
801 // There are no ull versions (yet).
802 
803 #define PARALLEL_LOOP_START(func, schedule, ompt_pre, ompt_post)               \
804   void func(void (*task)(void *), void *data, unsigned num_threads, long lb,   \
805             long ub, long str, long chunk_sz) {                                \
806     int gtid = __kmp_entry_gtid();                                             \
807     MKLOC(loc, #func);                                                         \
808     KA_TRACE(20,                                                               \
809              (#func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
810               gtid, lb, ub, str, chunk_sz));                                   \
811                                                                                \
812     ompt_pre();                                                                \
813                                                                                \
814     if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {                       \
815       if (num_threads != 0) {                                                  \
816         __kmp_push_num_threads(&loc, gtid, num_threads);                       \
817       }                                                                        \
818       __kmp_GOMP_fork_call(&loc, gtid, task,                                   \
819                            (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, \
820                            9, task, data, num_threads, &loc, (schedule), lb,   \
821                            (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz);    \
822     } else {                                                                   \
823       __kmp_GOMP_serialized_parallel(&loc, gtid, task);                        \
824     }                                                                          \
825                                                                                \
826     KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb,                              \
827                       (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz,          \
828                       (schedule) != kmp_sch_static);                           \
829                                                                                \
830     ompt_post();                                                               \
831                                                                                \
832     KA_TRACE(20, (#func " exit: T#%d\n", gtid));                               \
833   }
834 
835 #if OMPT_SUPPORT && OMPT_OPTIONAL
836 
837 #define OMPT_LOOP_PRE()                                                        \
838   ompt_frame_t *parent_frame;                                                  \
839   if (ompt_enabled.enabled) {                                                  \
840     __ompt_get_task_info_internal(0, NULL, NULL, &parent_frame, NULL, NULL);   \
841     parent_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);                     \
842     OMPT_STORE_RETURN_ADDRESS(gtid);                                           \
843   }
844 
845 #define OMPT_LOOP_POST()                                                       \
846   if (ompt_enabled.enabled) {                                                  \
847     parent_frame->enter_frame = NULL;                                          \
848   }
849 
850 #else
851 
852 #define OMPT_LOOP_PRE()
853 
854 #define OMPT_LOOP_POST()
855 
856 #endif
857 
858 PARALLEL_LOOP_START(
859     KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START),
860     kmp_sch_static, OMPT_LOOP_PRE, OMPT_LOOP_POST)
861 PARALLEL_LOOP_START(
862     KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START),
863     kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
864 PARALLEL_LOOP_START(
865     KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START),
866     kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
867 PARALLEL_LOOP_START(
868     KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START),
869     kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST)
870 
871 // Tasking constructs
872 
873 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASK)(void (*func)(void *), void *data,
874                                              void (*copy_func)(void *, void *),
875                                              long arg_size, long arg_align,
876                                              bool if_cond, unsigned gomp_flags
877 #if OMP_40_ENABLED
878                                              ,
879                                              void **depend
880 #endif
881                                              ) {
882   MKLOC(loc, "GOMP_task");
883   int gtid = __kmp_entry_gtid();
884   kmp_int32 flags = 0;
885   kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags;
886 
887   KA_TRACE(20, ("GOMP_task: T#%d\n", gtid));
888 
889   // The low-order bit is the "untied" flag
890   if (!(gomp_flags & 1)) {
891     input_flags->tiedness = 1;
892   }
893   // The second low-order bit is the "final" flag
894   if (gomp_flags & 2) {
895     input_flags->final = 1;
896   }
897   input_flags->native = 1;
898   // __kmp_task_alloc() sets up all other flags
899 
900   if (!if_cond) {
901     arg_size = 0;
902   }
903 
904   kmp_task_t *task = __kmp_task_alloc(
905       &loc, gtid, input_flags, sizeof(kmp_task_t),
906       arg_size ? arg_size + arg_align - 1 : 0, (kmp_routine_entry_t)func);
907 
908   if (arg_size > 0) {
909     if (arg_align > 0) {
910       task->shareds = (void *)((((size_t)task->shareds) + arg_align - 1) /
911                                arg_align * arg_align);
912     }
913     // else error??
914 
915     if (copy_func) {
916       (*copy_func)(task->shareds, data);
917     } else {
918       KMP_MEMCPY(task->shareds, data, arg_size);
919     }
920   }
921 
922 #if OMPT_SUPPORT
923   kmp_taskdata_t *current_task;
924   if (ompt_enabled.enabled) {
925     OMPT_STORE_RETURN_ADDRESS(gtid);
926     current_task = __kmp_threads[gtid]->th.th_current_task;
927     current_task->ompt_task_info.frame.enter_frame = OMPT_GET_FRAME_ADDRESS(1);
928   }
929 #endif
930 
931   if (if_cond) {
932 #if OMP_40_ENABLED
933     if (gomp_flags & 8) {
934       KMP_ASSERT(depend);
935       const size_t ndeps = (kmp_intptr_t)depend[0];
936       const size_t nout = (kmp_intptr_t)depend[1];
937       kmp_depend_info_t dep_list[ndeps];
938 
939       for (size_t i = 0U; i < ndeps; i++) {
940         dep_list[i].base_addr = (kmp_intptr_t)depend[2U + i];
941         dep_list[i].len = 0U;
942         dep_list[i].flags.in = 1;
943         dep_list[i].flags.out = (i < nout);
944       }
945       __kmpc_omp_task_with_deps(&loc, gtid, task, ndeps, dep_list, 0, NULL);
946     } else {
947 #endif
948       __kmpc_omp_task(&loc, gtid, task);
949     }
950   } else {
951 #if OMPT_SUPPORT
952     ompt_thread_info_t oldInfo;
953     kmp_info_t *thread;
954     kmp_taskdata_t *taskdata;
955     kmp_taskdata_t *current_task;
956     if (ompt_enabled.enabled) {
957       // Store the threads states and restore them after the task
958       thread = __kmp_threads[gtid];
959       taskdata = KMP_TASK_TO_TASKDATA(task);
960       oldInfo = thread->th.ompt_thread_info;
961       thread->th.ompt_thread_info.wait_id = 0;
962       thread->th.ompt_thread_info.state = omp_state_work_parallel;
963       taskdata->ompt_task_info.frame.exit_frame = OMPT_GET_FRAME_ADDRESS(0);
964       OMPT_STORE_RETURN_ADDRESS(gtid);
965     }
966 #endif
967 
968     __kmpc_omp_task_begin_if0(&loc, gtid, task);
969     func(data);
970     __kmpc_omp_task_complete_if0(&loc, gtid, task);
971 
972 #if OMPT_SUPPORT
973     if (ompt_enabled.enabled) {
974       thread->th.ompt_thread_info = oldInfo;
975       taskdata->ompt_task_info.frame.exit_frame = NULL;
976     }
977 #endif
978   }
979 #if OMPT_SUPPORT
980   if (ompt_enabled.enabled) {
981     current_task->ompt_task_info.frame.enter_frame = NULL;
982   }
983 #endif
984 
985   KA_TRACE(20, ("GOMP_task exit: T#%d\n", gtid));
986 }
987 
988 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKWAIT)(void) {
989   MKLOC(loc, "GOMP_taskwait");
990   int gtid = __kmp_entry_gtid();
991 
992 #if OMPT_SUPPORT
993   if (ompt_enabled.enabled)
994     OMPT_STORE_RETURN_ADDRESS(gtid);
995 #endif
996 
997   KA_TRACE(20, ("GOMP_taskwait: T#%d\n", gtid));
998 
999   __kmpc_omp_taskwait(&loc, gtid);
1000 
1001   KA_TRACE(20, ("GOMP_taskwait exit: T#%d\n", gtid));
1002 }
1003 
1004 // Sections worksharing constructs
1005 //
1006 // For the sections construct, we initialize a dynamically scheduled loop
1007 // worksharing construct with lb 1 and stride 1, and use the iteration #'s
1008 // that its returns as sections ids.
1009 //
1010 // There are no special entry points for ordered sections, so we always use
1011 // the dynamically scheduled workshare, even if the sections aren't ordered.
1012 
1013 unsigned KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_START)(unsigned count) {
1014   int status;
1015   kmp_int lb, ub, stride;
1016   int gtid = __kmp_entry_gtid();
1017   MKLOC(loc, "GOMP_sections_start");
1018   KA_TRACE(20, ("GOMP_sections_start: T#%d\n", gtid));
1019 
1020   KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1021 
1022   status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
1023   if (status) {
1024     KMP_DEBUG_ASSERT(stride == 1);
1025     KMP_DEBUG_ASSERT(lb > 0);
1026     KMP_ASSERT(lb == ub);
1027   } else {
1028     lb = 0;
1029   }
1030 
1031   KA_TRACE(20, ("GOMP_sections_start exit: T#%d returning %u\n", gtid,
1032                 (unsigned)lb));
1033   return (unsigned)lb;
1034 }
1035 
1036 unsigned KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_NEXT)(void) {
1037   int status;
1038   kmp_int lb, ub, stride;
1039   int gtid = __kmp_get_gtid();
1040   MKLOC(loc, "GOMP_sections_next");
1041   KA_TRACE(20, ("GOMP_sections_next: T#%d\n", gtid));
1042 
1043   status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
1044   if (status) {
1045     KMP_DEBUG_ASSERT(stride == 1);
1046     KMP_DEBUG_ASSERT(lb > 0);
1047     KMP_ASSERT(lb == ub);
1048   } else {
1049     lb = 0;
1050   }
1051 
1052   KA_TRACE(
1053       20, ("GOMP_sections_next exit: T#%d returning %u\n", gtid, (unsigned)lb));
1054   return (unsigned)lb;
1055 }
1056 
1057 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START)(
1058     void (*task)(void *), void *data, unsigned num_threads, unsigned count) {
1059   int gtid = __kmp_entry_gtid();
1060 
1061 #if OMPT_SUPPORT
1062   ompt_frame_t *parent_frame;
1063 
1064   if (ompt_enabled.enabled) {
1065     __ompt_get_task_info_internal(0, NULL, NULL, &parent_frame, NULL, NULL);
1066     parent_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
1067     OMPT_STORE_RETURN_ADDRESS(gtid);
1068   }
1069 #endif
1070 
1071   MKLOC(loc, "GOMP_parallel_sections_start");
1072   KA_TRACE(20, ("GOMP_parallel_sections_start: T#%d\n", gtid));
1073 
1074   if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1075     if (num_threads != 0) {
1076       __kmp_push_num_threads(&loc, gtid, num_threads);
1077     }
1078     __kmp_GOMP_fork_call(&loc, gtid, task,
1079                          (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9,
1080                          task, data, num_threads, &loc, kmp_nm_dynamic_chunked,
1081                          (kmp_int)1, (kmp_int)count, (kmp_int)1, (kmp_int)1);
1082   } else {
1083     __kmp_GOMP_serialized_parallel(&loc, gtid, task);
1084   }
1085 
1086 #if OMPT_SUPPORT
1087   if (ompt_enabled.enabled) {
1088     parent_frame->enter_frame = NULL;
1089   }
1090 #endif
1091 
1092   KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1093 
1094   KA_TRACE(20, ("GOMP_parallel_sections_start exit: T#%d\n", gtid));
1095 }
1096 
1097 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END)(void) {
1098   int gtid = __kmp_get_gtid();
1099   KA_TRACE(20, ("GOMP_sections_end: T#%d\n", gtid))
1100 
1101 #if OMPT_SUPPORT
1102   ompt_frame_t *ompt_frame;
1103   if (ompt_enabled.enabled) {
1104     __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
1105     ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
1106     OMPT_STORE_RETURN_ADDRESS(gtid);
1107   }
1108 #endif
1109   __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
1110 #if OMPT_SUPPORT
1111   if (ompt_enabled.enabled) {
1112     ompt_frame->enter_frame = NULL;
1113   }
1114 #endif
1115 
1116   KA_TRACE(20, ("GOMP_sections_end exit: T#%d\n", gtid))
1117 }
1118 
1119 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT)(void) {
1120   KA_TRACE(20, ("GOMP_sections_end_nowait: T#%d\n", __kmp_get_gtid()))
1121 }
1122 
1123 // libgomp has an empty function for GOMP_taskyield as of 2013-10-10
1124 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKYIELD)(void) {
1125   KA_TRACE(20, ("GOMP_taskyield: T#%d\n", __kmp_get_gtid()))
1126   return;
1127 }
1128 
1129 #if OMP_40_ENABLED // these are new GOMP_4.0 entry points
1130 
1131 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL)(void (*task)(void *),
1132                                                  void *data,
1133                                                  unsigned num_threads,
1134                                                  unsigned int flags) {
1135   int gtid = __kmp_entry_gtid();
1136   MKLOC(loc, "GOMP_parallel");
1137   KA_TRACE(20, ("GOMP_parallel: T#%d\n", gtid));
1138 
1139 #if OMPT_SUPPORT
1140   ompt_task_info_t *parent_task_info, *task_info;
1141   if (ompt_enabled.enabled) {
1142     parent_task_info = __ompt_get_task_info_object(0);
1143     parent_task_info->frame.enter_frame = OMPT_GET_FRAME_ADDRESS(1);
1144     OMPT_STORE_RETURN_ADDRESS(gtid);
1145   }
1146 #endif
1147   if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1148     if (num_threads != 0) {
1149       __kmp_push_num_threads(&loc, gtid, num_threads);
1150     }
1151     if (flags != 0) {
1152       __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags);
1153     }
1154     __kmp_GOMP_fork_call(&loc, gtid, task,
1155                          (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task,
1156                          data);
1157   } else {
1158     __kmp_GOMP_serialized_parallel(&loc, gtid, task);
1159   }
1160 #if OMPT_SUPPORT
1161   if (ompt_enabled.enabled) {
1162     task_info = __ompt_get_task_info_object(0);
1163     task_info->frame.exit_frame = OMPT_GET_FRAME_ADDRESS(0);
1164   }
1165 #endif
1166   task(data);
1167 #if OMPT_SUPPORT
1168   if (ompt_enabled.enabled) {
1169     OMPT_STORE_RETURN_ADDRESS(gtid);
1170   }
1171 #endif
1172   KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)();
1173 #if OMPT_SUPPORT
1174   if (ompt_enabled.enabled) {
1175     task_info->frame.exit_frame = NULL;
1176     parent_task_info->frame.enter_frame = NULL;
1177   }
1178 #endif
1179 }
1180 
1181 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_SECTIONS)(void (*task)(void *),
1182                                                           void *data,
1183                                                           unsigned num_threads,
1184                                                           unsigned count,
1185                                                           unsigned flags) {
1186   int gtid = __kmp_entry_gtid();
1187   MKLOC(loc, "GOMP_parallel_sections");
1188   KA_TRACE(20, ("GOMP_parallel_sections: T#%d\n", gtid));
1189 
1190 #if OMPT_SUPPORT
1191   OMPT_STORE_RETURN_ADDRESS(gtid);
1192 #endif
1193 
1194   if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1195     if (num_threads != 0) {
1196       __kmp_push_num_threads(&loc, gtid, num_threads);
1197     }
1198     if (flags != 0) {
1199       __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags);
1200     }
1201     __kmp_GOMP_fork_call(&loc, gtid, task,
1202                          (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9,
1203                          task, data, num_threads, &loc, kmp_nm_dynamic_chunked,
1204                          (kmp_int)1, (kmp_int)count, (kmp_int)1, (kmp_int)1);
1205   } else {
1206     __kmp_GOMP_serialized_parallel(&loc, gtid, task);
1207   }
1208 
1209   KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1210 
1211   task(data);
1212   KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)();
1213   KA_TRACE(20, ("GOMP_parallel_sections exit: T#%d\n", gtid));
1214 }
1215 
1216 #define PARALLEL_LOOP(func, schedule, ompt_pre, ompt_post)                     \
1217   void func(void (*task)(void *), void *data, unsigned num_threads, long lb,   \
1218             long ub, long str, long chunk_sz, unsigned flags) {                \
1219     int gtid = __kmp_entry_gtid();                                             \
1220     MKLOC(loc, #func);                                                         \
1221     KA_TRACE(20,                                                               \
1222              (#func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
1223               gtid, lb, ub, str, chunk_sz));                                   \
1224                                                                                \
1225     ompt_pre();                                                                \
1226     if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {                       \
1227       if (num_threads != 0) {                                                  \
1228         __kmp_push_num_threads(&loc, gtid, num_threads);                       \
1229       }                                                                        \
1230       if (flags != 0) {                                                        \
1231         __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags);              \
1232       }                                                                        \
1233       __kmp_GOMP_fork_call(&loc, gtid, task,                                   \
1234                            (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, \
1235                            9, task, data, num_threads, &loc, (schedule), lb,   \
1236                            (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz);    \
1237     } else {                                                                   \
1238       __kmp_GOMP_serialized_parallel(&loc, gtid, task);                        \
1239     }                                                                          \
1240                                                                                \
1241     IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);)                          \
1242     KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb,                              \
1243                       (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz,          \
1244                       (schedule) != kmp_sch_static);                           \
1245     task(data);                                                                \
1246     KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)();                         \
1247     ompt_post();                                                               \
1248                                                                                \
1249     KA_TRACE(20, (#func " exit: T#%d\n", gtid));                               \
1250   }
1251 
1252 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC),
1253               kmp_sch_static, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1254 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC),
1255               kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1256 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED),
1257               kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1258 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME),
1259               kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1260 
1261 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKGROUP_START)(void) {
1262   int gtid = __kmp_entry_gtid();
1263   MKLOC(loc, "GOMP_taskgroup_start");
1264   KA_TRACE(20, ("GOMP_taskgroup_start: T#%d\n", gtid));
1265 
1266 #if OMPT_SUPPORT
1267   if (ompt_enabled.enabled)
1268     OMPT_STORE_RETURN_ADDRESS(gtid);
1269 #endif
1270 
1271   __kmpc_taskgroup(&loc, gtid);
1272 
1273   return;
1274 }
1275 
1276 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKGROUP_END)(void) {
1277   int gtid = __kmp_get_gtid();
1278   MKLOC(loc, "GOMP_taskgroup_end");
1279   KA_TRACE(20, ("GOMP_taskgroup_end: T#%d\n", gtid));
1280 
1281 #if OMPT_SUPPORT
1282   if (ompt_enabled.enabled)
1283     OMPT_STORE_RETURN_ADDRESS(gtid);
1284 #endif
1285 
1286   __kmpc_end_taskgroup(&loc, gtid);
1287 
1288   return;
1289 }
1290 
1291 #ifndef KMP_DEBUG
1292 static
1293 #endif /* KMP_DEBUG */
1294     kmp_int32
1295     __kmp_gomp_to_omp_cancellation_kind(int gomp_kind) {
1296   kmp_int32 cncl_kind = 0;
1297   switch (gomp_kind) {
1298   case 1:
1299     cncl_kind = cancel_parallel;
1300     break;
1301   case 2:
1302     cncl_kind = cancel_loop;
1303     break;
1304   case 4:
1305     cncl_kind = cancel_sections;
1306     break;
1307   case 8:
1308     cncl_kind = cancel_taskgroup;
1309     break;
1310   }
1311   return cncl_kind;
1312 }
1313 
1314 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CANCELLATION_POINT)(int which) {
1315   if (__kmp_omp_cancellation) {
1316     KMP_FATAL(NoGompCancellation);
1317   }
1318   int gtid = __kmp_get_gtid();
1319   MKLOC(loc, "GOMP_cancellation_point");
1320   KA_TRACE(20, ("GOMP_cancellation_point: T#%d\n", gtid));
1321 
1322   kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which);
1323 
1324   return __kmpc_cancellationpoint(&loc, gtid, cncl_kind);
1325 }
1326 
1327 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_BARRIER_CANCEL)(void) {
1328   if (__kmp_omp_cancellation) {
1329     KMP_FATAL(NoGompCancellation);
1330   }
1331   KMP_FATAL(NoGompCancellation);
1332   int gtid = __kmp_get_gtid();
1333   MKLOC(loc, "GOMP_barrier_cancel");
1334   KA_TRACE(20, ("GOMP_barrier_cancel: T#%d\n", gtid));
1335 
1336   return __kmpc_cancel_barrier(&loc, gtid);
1337 }
1338 
1339 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CANCEL)(int which, bool do_cancel) {
1340   if (__kmp_omp_cancellation) {
1341     KMP_FATAL(NoGompCancellation);
1342   } else {
1343     return FALSE;
1344   }
1345 
1346   int gtid = __kmp_get_gtid();
1347   MKLOC(loc, "GOMP_cancel");
1348   KA_TRACE(20, ("GOMP_cancel: T#%d\n", gtid));
1349 
1350   kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which);
1351 
1352   if (do_cancel == FALSE) {
1353     return KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CANCELLATION_POINT)(which);
1354   } else {
1355     return __kmpc_cancel(&loc, gtid, cncl_kind);
1356   }
1357 }
1358 
1359 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL)(void) {
1360   if (__kmp_omp_cancellation) {
1361     KMP_FATAL(NoGompCancellation);
1362   }
1363   int gtid = __kmp_get_gtid();
1364   MKLOC(loc, "GOMP_sections_end_cancel");
1365   KA_TRACE(20, ("GOMP_sections_end_cancel: T#%d\n", gtid));
1366 
1367   return __kmpc_cancel_barrier(&loc, gtid);
1368 }
1369 
1370 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END_CANCEL)(void) {
1371   if (__kmp_omp_cancellation) {
1372     KMP_FATAL(NoGompCancellation);
1373   }
1374   int gtid = __kmp_get_gtid();
1375   MKLOC(loc, "GOMP_loop_end_cancel");
1376   KA_TRACE(20, ("GOMP_loop_end_cancel: T#%d\n", gtid));
1377 
1378   return __kmpc_cancel_barrier(&loc, gtid);
1379 }
1380 
1381 // All target functions are empty as of 2014-05-29
1382 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET)(int device, void (*fn)(void *),
1383                                                const void *openmp_target,
1384                                                size_t mapnum, void **hostaddrs,
1385                                                size_t *sizes,
1386                                                unsigned char *kinds) {
1387   return;
1388 }
1389 
1390 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_DATA)(
1391     int device, const void *openmp_target, size_t mapnum, void **hostaddrs,
1392     size_t *sizes, unsigned char *kinds) {
1393   return;
1394 }
1395 
1396 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_END_DATA)(void) { return; }
1397 
1398 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_UPDATE)(
1399     int device, const void *openmp_target, size_t mapnum, void **hostaddrs,
1400     size_t *sizes, unsigned char *kinds) {
1401   return;
1402 }
1403 
1404 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TEAMS)(unsigned int num_teams,
1405                                               unsigned int thread_limit) {
1406   return;
1407 }
1408 #endif // OMP_40_ENABLED
1409 
1410 /* The following sections of code create aliases for the GOMP_* functions, then
1411    create versioned symbols using the assembler directive .symver. This is only
1412    pertinent for ELF .so library. The KMP_VERSION_SYMBOL macro is defined in
1413    kmp_os.h  */
1414 
1415 #ifdef KMP_USE_VERSION_SYMBOLS
1416 // GOMP_1.0 versioned symbols
1417 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ATOMIC_END, 10, "GOMP_1.0");
1418 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ATOMIC_START, 10, "GOMP_1.0");
1419 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_BARRIER, 10, "GOMP_1.0");
1420 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_END, 10, "GOMP_1.0");
1421 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10, "GOMP_1.0");
1422 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10, "GOMP_1.0");
1423 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_START, 10, "GOMP_1.0");
1424 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10, "GOMP_1.0");
1425 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10, "GOMP_1.0");
1426 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END, 10, "GOMP_1.0");
1427 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10, "GOMP_1.0");
1428 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10, "GOMP_1.0");
1429 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10, "GOMP_1.0");
1430 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10, "GOMP_1.0");
1431 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10,
1432                    "GOMP_1.0");
1433 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10, "GOMP_1.0");
1434 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10, "GOMP_1.0");
1435 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10, "GOMP_1.0");
1436 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10,
1437                    "GOMP_1.0");
1438 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10, "GOMP_1.0");
1439 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10, "GOMP_1.0");
1440 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10, "GOMP_1.0");
1441 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10, "GOMP_1.0");
1442 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10, "GOMP_1.0");
1443 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10, "GOMP_1.0");
1444 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ORDERED_END, 10, "GOMP_1.0");
1445 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ORDERED_START, 10, "GOMP_1.0");
1446 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_END, 10, "GOMP_1.0");
1447 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10,
1448                    "GOMP_1.0");
1449 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10,
1450                    "GOMP_1.0");
1451 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10,
1452                    "GOMP_1.0");
1453 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10,
1454                    "GOMP_1.0");
1455 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10, "GOMP_1.0");
1456 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_START, 10, "GOMP_1.0");
1457 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END, 10, "GOMP_1.0");
1458 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10, "GOMP_1.0");
1459 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10, "GOMP_1.0");
1460 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_START, 10, "GOMP_1.0");
1461 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10, "GOMP_1.0");
1462 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10, "GOMP_1.0");
1463 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_START, 10, "GOMP_1.0");
1464 
1465 // GOMP_2.0 versioned symbols
1466 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASK, 20, "GOMP_2.0");
1467 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKWAIT, 20, "GOMP_2.0");
1468 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20, "GOMP_2.0");
1469 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20, "GOMP_2.0");
1470 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20, "GOMP_2.0");
1471 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20, "GOMP_2.0");
1472 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20,
1473                    "GOMP_2.0");
1474 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20,
1475                    "GOMP_2.0");
1476 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20,
1477                    "GOMP_2.0");
1478 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20,
1479                    "GOMP_2.0");
1480 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20,
1481                    "GOMP_2.0");
1482 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20,
1483                    "GOMP_2.0");
1484 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20,
1485                    "GOMP_2.0");
1486 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20,
1487                    "GOMP_2.0");
1488 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20, "GOMP_2.0");
1489 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20, "GOMP_2.0");
1490 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20, "GOMP_2.0");
1491 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20, "GOMP_2.0");
1492 
1493 // GOMP_3.0 versioned symbols
1494 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKYIELD, 30, "GOMP_3.0");
1495 
1496 // GOMP_4.0 versioned symbols
1497 #if OMP_40_ENABLED
1498 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL, 40, "GOMP_4.0");
1499 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_SECTIONS, 40, "GOMP_4.0");
1500 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC, 40, "GOMP_4.0");
1501 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED, 40, "GOMP_4.0");
1502 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME, 40, "GOMP_4.0");
1503 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC, 40, "GOMP_4.0");
1504 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKGROUP_START, 40, "GOMP_4.0");
1505 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKGROUP_END, 40, "GOMP_4.0");
1506 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_BARRIER_CANCEL, 40, "GOMP_4.0");
1507 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CANCEL, 40, "GOMP_4.0");
1508 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CANCELLATION_POINT, 40, "GOMP_4.0");
1509 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END_CANCEL, 40, "GOMP_4.0");
1510 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL, 40, "GOMP_4.0");
1511 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET, 40, "GOMP_4.0");
1512 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_DATA, 40, "GOMP_4.0");
1513 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_END_DATA, 40, "GOMP_4.0");
1514 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_UPDATE, 40, "GOMP_4.0");
1515 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TEAMS, 40, "GOMP_4.0");
1516 #endif
1517 
1518 #endif // KMP_USE_VERSION_SYMBOLS
1519 
1520 #ifdef __cplusplus
1521 } // extern "C"
1522 #endif // __cplusplus
1523