1 /*
2  * kmp_gsupport.cpp
3  */
4 
5 
6 //===----------------------------------------------------------------------===//
7 //
8 //                     The LLVM Compiler Infrastructure
9 //
10 // This file is dual licensed under the MIT and the University of Illinois Open
11 // Source Licenses. See LICENSE.txt for details.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 
16 #include "kmp.h"
17 #include "kmp_atomic.h"
18 
19 #if OMPT_SUPPORT
20 #include "ompt-specific.h"
21 #endif
22 
23 #ifdef __cplusplus
24     extern "C" {
25 #endif // __cplusplus
26 
27 #define MKLOC(loc,routine) \
28     static ident_t (loc) = {0, KMP_IDENT_KMPC, 0, 0, ";unknown;unknown;0;0;;" };
29 
30 #include "kmp_ftn_os.h"
31 
32 void
33 xexpand(KMP_API_NAME_GOMP_BARRIER)(void)
34 {
35     int gtid = __kmp_entry_gtid();
36     MKLOC(loc, "GOMP_barrier");
37     KA_TRACE(20, ("GOMP_barrier: T#%d\n", gtid));
38 #if OMPT_SUPPORT && OMPT_TRACE
39     ompt_frame_t * ompt_frame;
40     if (ompt_enabled ) {
41         ompt_frame = __ompt_get_task_frame_internal(0);
42         ompt_frame->reenter_runtime_frame = __builtin_frame_address(1);
43     }
44 #endif
45     __kmpc_barrier(&loc, gtid);
46 }
47 
48 
49 //
50 // Mutual exclusion
51 //
52 
53 //
54 // The symbol that icc/ifort generates for unnamed for unnamed critical
55 // sections - .gomp_critical_user_ - is defined using .comm in any objects
56 // reference it.  We can't reference it directly here in C code, as the
57 // symbol contains a ".".
58 //
59 // The RTL contains an assembly language definition of .gomp_critical_user_
60 // with another symbol __kmp_unnamed_critical_addr initialized with it's
61 // address.
62 //
63 extern kmp_critical_name *__kmp_unnamed_critical_addr;
64 
65 
66 void
67 xexpand(KMP_API_NAME_GOMP_CRITICAL_START)(void)
68 {
69     int gtid = __kmp_entry_gtid();
70     MKLOC(loc, "GOMP_critical_start");
71     KA_TRACE(20, ("GOMP_critical_start: T#%d\n", gtid));
72     __kmpc_critical(&loc, gtid, __kmp_unnamed_critical_addr);
73 }
74 
75 
76 void
77 xexpand(KMP_API_NAME_GOMP_CRITICAL_END)(void)
78 {
79     int gtid = __kmp_get_gtid();
80     MKLOC(loc, "GOMP_critical_end");
81     KA_TRACE(20, ("GOMP_critical_end: T#%d\n", gtid));
82     __kmpc_end_critical(&loc, gtid, __kmp_unnamed_critical_addr);
83 }
84 
85 
86 void
87 xexpand(KMP_API_NAME_GOMP_CRITICAL_NAME_START)(void **pptr)
88 {
89     int gtid = __kmp_entry_gtid();
90     MKLOC(loc, "GOMP_critical_name_start");
91     KA_TRACE(20, ("GOMP_critical_name_start: T#%d\n", gtid));
92     __kmpc_critical(&loc, gtid, (kmp_critical_name *)pptr);
93 }
94 
95 
96 void
97 xexpand(KMP_API_NAME_GOMP_CRITICAL_NAME_END)(void **pptr)
98 {
99     int gtid = __kmp_get_gtid();
100     MKLOC(loc, "GOMP_critical_name_end");
101     KA_TRACE(20, ("GOMP_critical_name_end: T#%d\n", gtid));
102     __kmpc_end_critical(&loc, gtid, (kmp_critical_name *)pptr);
103 }
104 
105 
106 //
107 // The Gnu codegen tries to use locked operations to perform atomic updates
108 // inline.  If it can't, then it calls GOMP_atomic_start() before performing
109 // the update and GOMP_atomic_end() afterward, regardless of the data type.
110 //
111 
112 void
113 xexpand(KMP_API_NAME_GOMP_ATOMIC_START)(void)
114 {
115     int gtid = __kmp_entry_gtid();
116     KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
117 
118 #if OMPT_SUPPORT
119     __ompt_thread_assign_wait_id(0);
120 #endif
121 
122     __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);
123 }
124 
125 
126 void
127 xexpand(KMP_API_NAME_GOMP_ATOMIC_END)(void)
128 {
129     int gtid = __kmp_get_gtid();
130     KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
131     __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);
132 }
133 
134 
135 int
136 xexpand(KMP_API_NAME_GOMP_SINGLE_START)(void)
137 {
138     int gtid = __kmp_entry_gtid();
139     MKLOC(loc, "GOMP_single_start");
140     KA_TRACE(20, ("GOMP_single_start: T#%d\n", gtid));
141 
142     if (! TCR_4(__kmp_init_parallel))
143         __kmp_parallel_initialize();
144 
145     //
146     // 3rd parameter == FALSE prevents kmp_enter_single from pushing a
147     // workshare when USE_CHECKS is defined.  We need to avoid the push,
148     // as there is no corresponding GOMP_single_end() call.
149     //
150     return __kmp_enter_single(gtid, &loc, FALSE);
151 }
152 
153 
154 void *
155 xexpand(KMP_API_NAME_GOMP_SINGLE_COPY_START)(void)
156 {
157     void *retval;
158     int gtid = __kmp_entry_gtid();
159     MKLOC(loc, "GOMP_single_copy_start");
160     KA_TRACE(20, ("GOMP_single_copy_start: T#%d\n", gtid));
161 
162     if (! TCR_4(__kmp_init_parallel))
163         __kmp_parallel_initialize();
164 
165     //
166     // If this is the first thread to enter, return NULL.  The generated
167     // code will then call GOMP_single_copy_end() for this thread only,
168     // with the copyprivate data pointer as an argument.
169     //
170     if (__kmp_enter_single(gtid, &loc, FALSE))
171         return NULL;
172 
173     //
174     // Wait for the first thread to set the copyprivate data pointer,
175     // and for all other threads to reach this point.
176     //
177     __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
178 
179     //
180     // Retrieve the value of the copyprivate data point, and wait for all
181     // threads to do likewise, then return.
182     //
183     retval = __kmp_team_from_gtid(gtid)->t.t_copypriv_data;
184     __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
185     return retval;
186 }
187 
188 
189 void
190 xexpand(KMP_API_NAME_GOMP_SINGLE_COPY_END)(void *data)
191 {
192     int gtid = __kmp_get_gtid();
193     KA_TRACE(20, ("GOMP_single_copy_end: T#%d\n", gtid));
194 
195     //
196     // Set the copyprivate data pointer fo the team, then hit the barrier
197     // so that the other threads will continue on and read it.  Hit another
198     // barrier before continuing, so that the know that the copyprivate
199     // data pointer has been propagated to all threads before trying to
200     // reuse the t_copypriv_data field.
201     //
202     __kmp_team_from_gtid(gtid)->t.t_copypriv_data = data;
203     __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
204     __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
205 }
206 
207 
208 void
209 xexpand(KMP_API_NAME_GOMP_ORDERED_START)(void)
210 {
211     int gtid = __kmp_entry_gtid();
212     MKLOC(loc, "GOMP_ordered_start");
213     KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
214     __kmpc_ordered(&loc, gtid);
215 }
216 
217 
218 void
219 xexpand(KMP_API_NAME_GOMP_ORDERED_END)(void)
220 {
221     int gtid = __kmp_get_gtid();
222     MKLOC(loc, "GOMP_ordered_end");
223     KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
224     __kmpc_end_ordered(&loc, gtid);
225 }
226 
227 
228 //
229 // Dispatch macro defs
230 //
231 // They come in two flavors: 64-bit unsigned, and either 32-bit signed
232 // (IA-32 architecture) or 64-bit signed (Intel(R) 64).
233 //
234 
235 #if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS
236 # define KMP_DISPATCH_INIT              __kmp_aux_dispatch_init_4
237 # define KMP_DISPATCH_FINI_CHUNK        __kmp_aux_dispatch_fini_chunk_4
238 # define KMP_DISPATCH_NEXT              __kmpc_dispatch_next_4
239 #else
240 # define KMP_DISPATCH_INIT              __kmp_aux_dispatch_init_8
241 # define KMP_DISPATCH_FINI_CHUNK        __kmp_aux_dispatch_fini_chunk_8
242 # define KMP_DISPATCH_NEXT              __kmpc_dispatch_next_8
243 #endif /* KMP_ARCH_X86 */
244 
245 # define KMP_DISPATCH_INIT_ULL          __kmp_aux_dispatch_init_8u
246 # define KMP_DISPATCH_FINI_CHUNK_ULL    __kmp_aux_dispatch_fini_chunk_8u
247 # define KMP_DISPATCH_NEXT_ULL          __kmpc_dispatch_next_8u
248 
249 
250 //
251 // The parallel contruct
252 //
253 
254 #ifndef KMP_DEBUG
255 static
256 #endif /* KMP_DEBUG */
257 void
258 __kmp_GOMP_microtask_wrapper(int *gtid, int *npr, void (*task)(void *),
259   void *data)
260 {
261 #if OMPT_SUPPORT
262     kmp_info_t *thr;
263     ompt_frame_t *ompt_frame;
264     ompt_state_t enclosing_state;
265 
266     if (ompt_enabled) {
267         // get pointer to thread data structure
268         thr = __kmp_threads[*gtid];
269 
270         // save enclosing task state; set current state for task
271         enclosing_state = thr->th.ompt_thread_info.state;
272         thr->th.ompt_thread_info.state = ompt_state_work_parallel;
273 
274         // set task frame
275         ompt_frame = __ompt_get_task_frame_internal(0);
276         ompt_frame->exit_runtime_frame = __builtin_frame_address(0);
277     }
278 #endif
279 
280     task(data);
281 
282 #if OMPT_SUPPORT
283     if (ompt_enabled) {
284         // clear task frame
285         ompt_frame->exit_runtime_frame = NULL;
286 
287         // restore enclosing state
288         thr->th.ompt_thread_info.state = enclosing_state;
289     }
290 #endif
291 }
292 
293 
294 #ifndef KMP_DEBUG
295 static
296 #endif /* KMP_DEBUG */
297 void
298 __kmp_GOMP_parallel_microtask_wrapper(int *gtid, int *npr,
299   void (*task)(void *), void *data, unsigned num_threads, ident_t *loc,
300   enum sched_type schedule, long start, long end, long incr, long chunk_size)
301 {
302     //
303     // Intialize the loop worksharing construct.
304     //
305     KMP_DISPATCH_INIT(loc, *gtid, schedule, start, end, incr, chunk_size,
306       schedule != kmp_sch_static);
307 
308 #if OMPT_SUPPORT
309     kmp_info_t *thr;
310     ompt_frame_t *ompt_frame;
311     ompt_state_t enclosing_state;
312 
313     if (ompt_enabled) {
314         thr = __kmp_threads[*gtid];
315         // save enclosing task state; set current state for task
316         enclosing_state = thr->th.ompt_thread_info.state;
317         thr->th.ompt_thread_info.state = ompt_state_work_parallel;
318 
319         // set task frame
320         ompt_frame = __ompt_get_task_frame_internal(0);
321         ompt_frame->exit_runtime_frame = __builtin_frame_address(0);
322     }
323 #endif
324 
325     //
326     // Now invoke the microtask.
327     //
328     task(data);
329 
330 #if OMPT_SUPPORT
331     if (ompt_enabled) {
332         // clear task frame
333         ompt_frame->exit_runtime_frame = NULL;
334 
335         // reset enclosing state
336         thr->th.ompt_thread_info.state = enclosing_state;
337     }
338 #endif
339 }
340 
341 
342 #ifndef KMP_DEBUG
343 static
344 #endif /* KMP_DEBUG */
345 void
346 __kmp_GOMP_fork_call(ident_t *loc, int gtid, void (*unwrapped_task)(void *), microtask_t wrapper, int argc,...)
347 {
348     int rc;
349     kmp_info_t *thr = __kmp_threads[gtid];
350     kmp_team_t *team = thr->th.th_team;
351     int tid = __kmp_tid_from_gtid(gtid);
352 
353     va_list ap;
354     va_start(ap, argc);
355 
356     rc = __kmp_fork_call(loc, gtid, fork_context_gnu, argc,
357 #if OMPT_SUPPORT
358       VOLATILE_CAST(void *) unwrapped_task,
359 #endif
360       wrapper, __kmp_invoke_task_func,
361 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
362       &ap
363 #else
364       ap
365 #endif
366       );
367 
368     va_end(ap);
369 
370     if (rc) {
371         __kmp_run_before_invoked_task(gtid, tid, thr, team);
372     }
373 
374 #if OMPT_SUPPORT
375     if (ompt_enabled) {
376 #if OMPT_TRACE
377         ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
378         ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
379 
380         // implicit task callback
381         if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
382             ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)(
383                 team_info->parallel_id, task_info->task_id);
384         }
385 #endif
386         thr->th.ompt_thread_info.state = ompt_state_work_parallel;
387     }
388 #endif
389 }
390 
391 static void
392 __kmp_GOMP_serialized_parallel(ident_t *loc, kmp_int32 gtid, void (*task)(void *))
393 {
394 #if OMPT_SUPPORT
395     ompt_parallel_id_t ompt_parallel_id;
396     if (ompt_enabled) {
397         ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
398 
399         ompt_parallel_id = __ompt_parallel_id_new(gtid);
400 
401         // parallel region callback
402         if (ompt_callbacks.ompt_callback(ompt_event_parallel_begin)) {
403             int team_size = 1;
404             ompt_callbacks.ompt_callback(ompt_event_parallel_begin)(
405                 task_info->task_id, &task_info->frame, ompt_parallel_id,
406                 team_size, (void *) task,
407                 OMPT_INVOKER(fork_context_gnu));
408         }
409     }
410 #endif
411 
412     __kmp_serialized_parallel(loc, gtid);
413 
414 #if OMPT_SUPPORT
415     if (ompt_enabled) {
416         kmp_info_t *thr = __kmp_threads[gtid];
417 
418         ompt_task_id_t my_ompt_task_id = __ompt_task_id_new(gtid);
419 
420         // set up lightweight task
421         ompt_lw_taskteam_t *lwt = (ompt_lw_taskteam_t *)
422             __kmp_allocate(sizeof(ompt_lw_taskteam_t));
423         __ompt_lw_taskteam_init(lwt, thr, gtid, (void *) task, ompt_parallel_id);
424         lwt->ompt_task_info.task_id = my_ompt_task_id;
425         __ompt_lw_taskteam_link(lwt, thr);
426 
427 #if OMPT_TRACE
428         // implicit task callback
429         if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
430             ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)(
431                 ompt_parallel_id, my_ompt_task_id);
432         }
433         thr->th.ompt_thread_info.state = ompt_state_work_parallel;
434 #endif
435     }
436 #endif
437 }
438 
439 
440 void
441 xexpand(KMP_API_NAME_GOMP_PARALLEL_START)(void (*task)(void *), void *data, unsigned num_threads)
442 {
443     int gtid = __kmp_entry_gtid();
444 
445 #if OMPT_SUPPORT
446     ompt_frame_t *parent_frame, *frame;
447 
448     if (ompt_enabled) {
449         parent_frame = __ompt_get_task_frame_internal(0);
450         parent_frame->reenter_runtime_frame = __builtin_frame_address(1);
451     }
452 #endif
453 
454     MKLOC(loc, "GOMP_parallel_start");
455     KA_TRACE(20, ("GOMP_parallel_start: T#%d\n", gtid));
456 
457     if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
458         if (num_threads != 0) {
459             __kmp_push_num_threads(&loc, gtid, num_threads);
460         }
461         __kmp_GOMP_fork_call(&loc, gtid, task,
462           (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task, data);
463     }
464     else {
465         __kmp_GOMP_serialized_parallel(&loc, gtid, task);
466     }
467 
468 #if OMPT_SUPPORT
469     if (ompt_enabled) {
470         frame = __ompt_get_task_frame_internal(0);
471         frame->exit_runtime_frame = __builtin_frame_address(1);
472     }
473 #endif
474 }
475 
476 
477 void
478 xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(void)
479 {
480     int gtid = __kmp_get_gtid();
481     kmp_info_t *thr;
482 
483     thr = __kmp_threads[gtid];
484 
485     MKLOC(loc, "GOMP_parallel_end");
486     KA_TRACE(20, ("GOMP_parallel_end: T#%d\n", gtid));
487 
488 
489 #if OMPT_SUPPORT
490     ompt_parallel_id_t parallel_id;
491     ompt_task_id_t serialized_task_id;
492     ompt_frame_t *ompt_frame = NULL;
493 
494     if (ompt_enabled) {
495         ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
496         parallel_id = team_info->parallel_id;
497 
498         ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
499         serialized_task_id = task_info->task_id;
500 
501         // unlink if necessary. no-op if there is not a lightweight task.
502         ompt_lw_taskteam_t *lwt = __ompt_lw_taskteam_unlink(thr);
503         // GOMP allocates/frees lwt since it can't be kept on the stack
504         if (lwt) {
505            __kmp_free(lwt);
506 
507         }
508     }
509 #endif
510 
511     if (! thr->th.th_team->t.t_serialized) {
512         __kmp_run_after_invoked_task(gtid, __kmp_tid_from_gtid(gtid), thr,
513           thr->th.th_team);
514 
515 #if OMPT_SUPPORT
516         if (ompt_enabled) {
517           // Implicit task is finished here, in the barrier we might schedule deferred tasks,
518           // these don't see the implicit task on the stack
519           ompt_frame = __ompt_get_task_frame_internal(0);
520           ompt_frame->exit_runtime_frame = NULL;
521         }
522 #endif
523 
524         __kmp_join_call(&loc, gtid
525 #if OMPT_SUPPORT
526             , fork_context_gnu
527 #endif
528         );
529     }
530     else {
531 #if OMPT_SUPPORT && OMPT_TRACE
532         if (ompt_enabled &&
533             ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) {
534             ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)(
535                 parallel_id, serialized_task_id);
536         }
537 #endif
538 
539         __kmpc_end_serialized_parallel(&loc, gtid);
540 
541 #if OMPT_SUPPORT
542         if (ompt_enabled) {
543             // Record that we re-entered the runtime system in the frame that
544             // created the parallel region.
545             ompt_task_info_t *parent_task_info = __ompt_get_taskinfo(0);
546 
547             if (ompt_callbacks.ompt_callback(ompt_event_parallel_end)) {
548                 ompt_callbacks.ompt_callback(ompt_event_parallel_end)(
549                     parallel_id, parent_task_info->task_id,
550                     OMPT_INVOKER(fork_context_gnu));
551             }
552 
553             parent_task_info->frame.reenter_runtime_frame = NULL;
554 
555             thr->th.ompt_thread_info.state =
556                 (((thr->th.th_team)->t.t_serialized) ?
557                 ompt_state_work_serial : ompt_state_work_parallel);
558         }
559 #endif
560     }
561 }
562 
563 
564 //
565 // Loop worksharing constructs
566 //
567 
568 //
569 // The Gnu codegen passes in an exclusive upper bound for the overall range,
570 // but the libguide dispatch code expects an inclusive upper bound, hence the
571 // "end - incr" 5th argument to KMP_DISPATCH_INIT (and the " ub - str" 11th
572 // argument to __kmp_GOMP_fork_call).
573 //
574 // Conversely, KMP_DISPATCH_NEXT returns and inclusive upper bound in *p_ub,
575 // but the Gnu codegen expects an excluside upper bound, so the adjustment
576 // "*p_ub += stride" compenstates for the discrepancy.
577 //
578 // Correction: the gnu codegen always adjusts the upper bound by +-1, not the
579 // stride value.  We adjust the dispatch parameters accordingly (by +-1), but
580 // we still adjust p_ub by the actual stride value.
581 //
582 // The "runtime" versions do not take a chunk_sz parameter.
583 //
584 // The profile lib cannot support construct checking of unordered loops that
585 // are predetermined by the compiler to be statically scheduled, as the gcc
586 // codegen will not always emit calls to GOMP_loop_static_next() to get the
587 // next iteration.  Instead, it emits inline code to call omp_get_thread_num()
588 // num and calculate the iteration space using the result.  It doesn't do this
589 // with ordered static loop, so they can be checked.
590 //
591 
592 #define LOOP_START(func,schedule) \
593     int func (long lb, long ub, long str, long chunk_sz, long *p_lb,         \
594       long *p_ub)                                                            \
595     {                                                                        \
596         int status;                                                          \
597         long stride;                                                         \
598         int gtid = __kmp_entry_gtid();                                       \
599         MKLOC(loc, #func);                                                   \
600         KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n",  \
601           gtid, lb, ub, str, chunk_sz ));                                    \
602                                                                              \
603         if ((str > 0) ? (lb < ub) : (lb > ub)) {                             \
604             KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb,                    \
605               (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz,                \
606               (schedule) != kmp_sch_static);                                 \
607             status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb,    \
608               (kmp_int *)p_ub, (kmp_int *)&stride);                          \
609             if (status) {                                                    \
610                 KMP_DEBUG_ASSERT(stride == str);                             \
611                 *p_ub += (str > 0) ? 1 : -1;                                 \
612             }                                                                \
613         }                                                                    \
614         else {                                                               \
615             status = 0;                                                      \
616         }                                                                    \
617                                                                              \
618         KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
619           gtid, *p_lb, *p_ub, status));                                      \
620         return status;                                                       \
621     }
622 
623 
624 #define LOOP_RUNTIME_START(func,schedule) \
625     int func (long lb, long ub, long str, long *p_lb, long *p_ub)            \
626     {                                                                        \
627         int status;                                                          \
628         long stride;                                                         \
629         long chunk_sz = 0;                                                   \
630         int gtid = __kmp_entry_gtid();                                       \
631         MKLOC(loc, #func);                                                   \
632         KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n",  \
633           gtid, lb, ub, str, chunk_sz ));                                    \
634                                                                              \
635         if ((str > 0) ? (lb < ub) : (lb > ub)) {                             \
636             KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb,                    \
637               (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, TRUE);         \
638             status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb,    \
639               (kmp_int *)p_ub, (kmp_int *)&stride);                          \
640             if (status) {                                                    \
641                 KMP_DEBUG_ASSERT(stride == str);                             \
642                 *p_ub += (str > 0) ? 1 : -1;                                 \
643             }                                                                \
644         }                                                                    \
645         else {                                                               \
646             status = 0;                                                      \
647         }                                                                    \
648                                                                              \
649         KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
650           gtid, *p_lb, *p_ub, status));                                      \
651         return status;                                                       \
652     }
653 
654 
655 #define LOOP_NEXT(func,fini_code) \
656     int func(long *p_lb, long *p_ub)                                         \
657     {                                                                        \
658         int status;                                                          \
659         long stride;                                                         \
660         int gtid = __kmp_get_gtid();                                         \
661         MKLOC(loc, #func);                                                   \
662         KA_TRACE(20, ( #func ": T#%d\n", gtid));                             \
663                                                                              \
664         fini_code                                                            \
665         status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb,        \
666           (kmp_int *)p_ub, (kmp_int *)&stride);                              \
667         if (status) {                                                        \
668             *p_ub += (stride > 0) ? 1 : -1;                                  \
669         }                                                                    \
670                                                                              \
671         KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, stride 0x%lx, "  \
672           "returning %d\n", gtid, *p_lb, *p_ub, stride, status));            \
673         return status;                                                       \
674     }
675 
676 
677 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_STATIC_START), kmp_sch_static)
678 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT), {})
679 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START), kmp_sch_dynamic_chunked)
680 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT), {})
681 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_GUIDED_START), kmp_sch_guided_chunked)
682 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT), {})
683 LOOP_RUNTIME_START(xexpand(KMP_API_NAME_GOMP_LOOP_RUNTIME_START), kmp_sch_runtime)
684 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT), {})
685 
686 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START), kmp_ord_static)
687 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT), \
688     { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
689 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START), kmp_ord_dynamic_chunked)
690 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT), \
691     { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
692 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START), kmp_ord_guided_chunked)
693 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT), \
694     { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
695 LOOP_RUNTIME_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START), kmp_ord_runtime)
696 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT), \
697     { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
698 
699 
700 void
701 xexpand(KMP_API_NAME_GOMP_LOOP_END)(void)
702 {
703     int gtid = __kmp_get_gtid();
704     KA_TRACE(20, ("GOMP_loop_end: T#%d\n", gtid))
705 
706     __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
707 
708     KA_TRACE(20, ("GOMP_loop_end exit: T#%d\n", gtid))
709 }
710 
711 
712 void
713 xexpand(KMP_API_NAME_GOMP_LOOP_END_NOWAIT)(void)
714 {
715     KA_TRACE(20, ("GOMP_loop_end_nowait: T#%d\n", __kmp_get_gtid()))
716 }
717 
718 
719 //
720 // Unsigned long long loop worksharing constructs
721 //
722 // These are new with gcc 4.4
723 //
724 
725 #define LOOP_START_ULL(func,schedule) \
726     int func (int up, unsigned long long lb, unsigned long long ub,          \
727       unsigned long long str, unsigned long long chunk_sz,                   \
728       unsigned long long *p_lb, unsigned long long *p_ub)                    \
729     {                                                                        \
730         int status;                                                          \
731         long long str2 = up ? ((long long)str) : -((long long)str);          \
732         long long stride;                                                    \
733         int gtid = __kmp_entry_gtid();                                       \
734         MKLOC(loc, #func);                                                   \
735                                                                              \
736         KA_TRACE(20, ( #func ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str 0x%llx, chunk_sz 0x%llx\n", \
737           gtid, up, lb, ub, str, chunk_sz ));                                \
738                                                                              \
739         if ((str > 0) ? (lb < ub) : (lb > ub)) {                             \
740             KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb,                \
741               (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz,              \
742               (schedule) != kmp_sch_static);                                 \
743             status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL,                 \
744               (kmp_uint64 *)p_lb, (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
745             if (status) {                                                    \
746                 KMP_DEBUG_ASSERT(stride == str2);                            \
747                 *p_ub += (str > 0) ? 1 : -1;                                 \
748             }                                                                \
749         }                                                                    \
750         else {                                                               \
751             status = 0;                                                      \
752         }                                                                    \
753                                                                              \
754         KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
755           gtid, *p_lb, *p_ub, status));                                      \
756         return status;                                                       \
757     }
758 
759 
760 #define LOOP_RUNTIME_START_ULL(func,schedule) \
761     int func (int up, unsigned long long lb, unsigned long long ub,          \
762       unsigned long long str, unsigned long long *p_lb,                      \
763       unsigned long long *p_ub)                                              \
764     {                                                                        \
765         int status;                                                          \
766         long long str2 = up ? ((long long)str) : -((long long)str);          \
767         unsigned long long stride;                                           \
768         unsigned long long chunk_sz = 0;                                     \
769         int gtid = __kmp_entry_gtid();                                       \
770         MKLOC(loc, #func);                                                   \
771                                                                              \
772         KA_TRACE(20, ( #func ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str 0x%llx, chunk_sz 0x%llx\n", \
773           gtid, up, lb, ub, str, chunk_sz ));                                \
774                                                                              \
775         if ((str > 0) ? (lb < ub) : (lb > ub)) {                             \
776             KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb,                \
777               (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, TRUE);       \
778             status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL,                 \
779               (kmp_uint64 *)p_lb, (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
780             if (status) {                                                    \
781                 KMP_DEBUG_ASSERT((long long)stride == str2);                 \
782                 *p_ub += (str > 0) ? 1 : -1;                                 \
783             }                                                                \
784         }                                                                    \
785         else {                                                               \
786             status = 0;                                                      \
787         }                                                                    \
788                                                                              \
789         KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
790           gtid, *p_lb, *p_ub, status));                                      \
791         return status;                                                       \
792     }
793 
794 
795 #define LOOP_NEXT_ULL(func,fini_code) \
796     int func(unsigned long long *p_lb, unsigned long long *p_ub)             \
797     {                                                                        \
798         int status;                                                          \
799         long long stride;                                                    \
800         int gtid = __kmp_get_gtid();                                         \
801         MKLOC(loc, #func);                                                   \
802         KA_TRACE(20, ( #func ": T#%d\n", gtid));                             \
803                                                                              \
804         fini_code                                                            \
805         status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
806           (kmp_uint64 *)p_ub, (kmp_int64 *)&stride);                         \
807         if (status) {                                                        \
808             *p_ub += (stride > 0) ? 1 : -1;                                  \
809         }                                                                    \
810                                                                              \
811         KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, stride 0x%llx, " \
812           "returning %d\n", gtid, *p_lb, *p_ub, stride, status));            \
813         return status;                                                       \
814     }
815 
816 
817 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START), kmp_sch_static)
818 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT), {})
819 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START), kmp_sch_dynamic_chunked)
820 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT), {})
821 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START), kmp_sch_guided_chunked)
822 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT), {})
823 LOOP_RUNTIME_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START), kmp_sch_runtime)
824 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT), {})
825 
826 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START), kmp_ord_static)
827 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT), \
828     { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
829 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START), kmp_ord_dynamic_chunked)
830 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT), \
831     { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
832 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START), kmp_ord_guided_chunked)
833 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT), \
834     { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
835 LOOP_RUNTIME_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START), kmp_ord_runtime)
836 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT), \
837     { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
838 
839 
840 //
841 // Combined parallel / loop worksharing constructs
842 //
843 // There are no ull versions (yet).
844 //
845 
846 #define PARALLEL_LOOP_START(func, schedule, ompt_pre, ompt_post) \
847     void func (void (*task) (void *), void *data, unsigned num_threads,      \
848       long lb, long ub, long str, long chunk_sz)                             \
849     {                                                                        \
850         int gtid = __kmp_entry_gtid();                                       \
851         MKLOC(loc, #func);                                                   \
852         KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n",        \
853           gtid, lb, ub, str, chunk_sz ));                                    \
854                                                                              \
855         ompt_pre();                                                          \
856                                                                              \
857         if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {                 \
858             if (num_threads != 0) {                                          \
859                 __kmp_push_num_threads(&loc, gtid, num_threads);             \
860             }                                                                \
861             __kmp_GOMP_fork_call(&loc, gtid, task,                           \
862               (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9,         \
863               task, data, num_threads, &loc, (schedule), lb,                 \
864               (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz);               \
865         }                                                                    \
866         else {                                                               \
867             __kmp_GOMP_serialized_parallel(&loc, gtid, task);                \
868         }                                                                    \
869                                                                              \
870         KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb,                        \
871           (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz,                    \
872           (schedule) != kmp_sch_static);                                     \
873                                                                              \
874         ompt_post();                                                         \
875                                                                              \
876         KA_TRACE(20, ( #func " exit: T#%d\n", gtid));                        \
877     }
878 
879 
880 
881 #if OMPT_SUPPORT
882 
883 #define OMPT_LOOP_PRE() \
884     ompt_frame_t *parent_frame; \
885     if (ompt_enabled) { \
886         parent_frame = __ompt_get_task_frame_internal(0); \
887         parent_frame->reenter_runtime_frame = __builtin_frame_address(1); \
888     }
889 
890 
891 #define OMPT_LOOP_POST() \
892     if (ompt_enabled) { \
893         parent_frame->reenter_runtime_frame = NULL; \
894     }
895 
896 #else
897 
898 #define OMPT_LOOP_PRE()
899 
900 #define OMPT_LOOP_POST()
901 
902 #endif
903 
904 
905 PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START),
906                     kmp_sch_static, OMPT_LOOP_PRE, OMPT_LOOP_POST)
907 PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START),
908                     kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
909 PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START),
910                     kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
911 PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START),
912                     kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST)
913 
914 
915 //
916 // Tasking constructs
917 //
918 
919 void
920 xexpand(KMP_API_NAME_GOMP_TASK)(void (*func)(void *), void *data, void (*copy_func)(void *, void *),
921   long arg_size, long arg_align, bool if_cond, unsigned gomp_flags)
922 {
923     MKLOC(loc, "GOMP_task");
924     int gtid = __kmp_entry_gtid();
925     kmp_int32 flags = 0;
926     kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *) & flags;
927 
928     KA_TRACE(20, ("GOMP_task: T#%d\n", gtid));
929 
930     // The low-order bit is the "tied" flag
931     if (gomp_flags & 1) {
932         input_flags->tiedness = 1;
933     }
934     // The second low-order bit is the "final" flag
935     if (gomp_flags & 2) {
936         input_flags->final = 1;
937     }
938     input_flags->native = 1;
939     // __kmp_task_alloc() sets up all other flags
940 
941     if (! if_cond) {
942         arg_size = 0;
943     }
944 
945     kmp_task_t *task = __kmp_task_alloc(&loc, gtid, input_flags,
946       sizeof(kmp_task_t), arg_size ? arg_size + arg_align - 1 : 0,
947       (kmp_routine_entry_t)func);
948 
949     if (arg_size > 0) {
950         if (arg_align > 0) {
951             task->shareds = (void *)((((size_t)task->shareds)
952               + arg_align - 1) / arg_align * arg_align);
953         }
954         //else error??
955 
956         if (copy_func) {
957             (*copy_func)(task->shareds, data);
958         }
959         else {
960             KMP_MEMCPY(task->shareds, data, arg_size);
961         }
962     }
963 
964     if (if_cond) {
965         __kmpc_omp_task(&loc, gtid, task);
966     }
967     else {
968 #if OMPT_SUPPORT
969         ompt_thread_info_t oldInfo;
970         kmp_info_t *thread;
971         kmp_taskdata_t *taskdata;
972         if (ompt_enabled) {
973             // Store the threads states and restore them after the task
974             thread = __kmp_threads[ gtid ];
975             taskdata = KMP_TASK_TO_TASKDATA(task);
976             oldInfo = thread->th.ompt_thread_info;
977             thread->th.ompt_thread_info.wait_id = 0;
978             thread->th.ompt_thread_info.state = ompt_state_work_parallel;
979             taskdata->ompt_task_info.frame.exit_runtime_frame =
980                 __builtin_frame_address(0);
981         }
982 #endif
983 
984         __kmpc_omp_task_begin_if0(&loc, gtid, task);
985         func(data);
986         __kmpc_omp_task_complete_if0(&loc, gtid, task);
987 
988 #if OMPT_SUPPORT
989         if (ompt_enabled) {
990             thread->th.ompt_thread_info = oldInfo;
991             taskdata->ompt_task_info.frame.exit_runtime_frame = NULL;
992         }
993 #endif
994     }
995 
996     KA_TRACE(20, ("GOMP_task exit: T#%d\n", gtid));
997 }
998 
999 
1000 void
1001 xexpand(KMP_API_NAME_GOMP_TASKWAIT)(void)
1002 {
1003     MKLOC(loc, "GOMP_taskwait");
1004     int gtid = __kmp_entry_gtid();
1005 
1006     KA_TRACE(20, ("GOMP_taskwait: T#%d\n", gtid));
1007 
1008     __kmpc_omp_taskwait(&loc, gtid);
1009 
1010     KA_TRACE(20, ("GOMP_taskwait exit: T#%d\n", gtid));
1011 }
1012 
1013 
1014 //
1015 // Sections worksharing constructs
1016 //
1017 
1018 //
1019 // For the sections construct, we initialize a dynamically scheduled loop
1020 // worksharing construct with lb 1 and stride 1, and use the iteration #'s
1021 // that its returns as sections ids.
1022 //
1023 // There are no special entry points for ordered sections, so we always use
1024 // the dynamically scheduled workshare, even if the sections aren't ordered.
1025 //
1026 
1027 unsigned
1028 xexpand(KMP_API_NAME_GOMP_SECTIONS_START)(unsigned count)
1029 {
1030     int status;
1031     kmp_int lb, ub, stride;
1032     int gtid = __kmp_entry_gtid();
1033     MKLOC(loc, "GOMP_sections_start");
1034     KA_TRACE(20, ("GOMP_sections_start: T#%d\n", gtid));
1035 
1036     KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1037 
1038     status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
1039     if (status) {
1040         KMP_DEBUG_ASSERT(stride == 1);
1041         KMP_DEBUG_ASSERT(lb > 0);
1042         KMP_ASSERT(lb == ub);
1043     }
1044     else {
1045         lb = 0;
1046     }
1047 
1048     KA_TRACE(20, ("GOMP_sections_start exit: T#%d returning %u\n", gtid,
1049       (unsigned)lb));
1050     return (unsigned)lb;
1051 }
1052 
1053 
1054 unsigned
1055 xexpand(KMP_API_NAME_GOMP_SECTIONS_NEXT)(void)
1056 {
1057     int status;
1058     kmp_int lb, ub, stride;
1059     int gtid = __kmp_get_gtid();
1060     MKLOC(loc, "GOMP_sections_next");
1061     KA_TRACE(20, ("GOMP_sections_next: T#%d\n", gtid));
1062 
1063     status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
1064     if (status) {
1065         KMP_DEBUG_ASSERT(stride == 1);
1066         KMP_DEBUG_ASSERT(lb > 0);
1067         KMP_ASSERT(lb == ub);
1068     }
1069     else {
1070         lb = 0;
1071     }
1072 
1073     KA_TRACE(20, ("GOMP_sections_next exit: T#%d returning %u\n", gtid,
1074       (unsigned)lb));
1075     return (unsigned)lb;
1076 }
1077 
1078 
1079 void
1080 xexpand(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START)(void (*task) (void *), void *data,
1081   unsigned num_threads, unsigned count)
1082 {
1083     int gtid = __kmp_entry_gtid();
1084 
1085 #if OMPT_SUPPORT
1086     ompt_frame_t *parent_frame;
1087 
1088     if (ompt_enabled) {
1089         parent_frame = __ompt_get_task_frame_internal(0);
1090         parent_frame->reenter_runtime_frame = __builtin_frame_address(1);
1091     }
1092 #endif
1093 
1094     MKLOC(loc, "GOMP_parallel_sections_start");
1095     KA_TRACE(20, ("GOMP_parallel_sections_start: T#%d\n", gtid));
1096 
1097     if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1098         if (num_threads != 0) {
1099             __kmp_push_num_threads(&loc, gtid, num_threads);
1100         }
1101         __kmp_GOMP_fork_call(&loc, gtid, task,
1102           (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, task, data,
1103           num_threads, &loc, kmp_nm_dynamic_chunked, (kmp_int)1,
1104           (kmp_int)count, (kmp_int)1, (kmp_int)1);
1105     }
1106     else {
1107         __kmp_GOMP_serialized_parallel(&loc, gtid, task);
1108     }
1109 
1110 #if OMPT_SUPPORT
1111     if (ompt_enabled) {
1112         parent_frame->reenter_runtime_frame = NULL;
1113     }
1114 #endif
1115 
1116     KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1117 
1118     KA_TRACE(20, ("GOMP_parallel_sections_start exit: T#%d\n", gtid));
1119 }
1120 
1121 
1122 void
1123 xexpand(KMP_API_NAME_GOMP_SECTIONS_END)(void)
1124 {
1125     int gtid = __kmp_get_gtid();
1126     KA_TRACE(20, ("GOMP_sections_end: T#%d\n", gtid))
1127 
1128     __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
1129 
1130     KA_TRACE(20, ("GOMP_sections_end exit: T#%d\n", gtid))
1131 }
1132 
1133 
1134 void
1135 xexpand(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT)(void)
1136 {
1137     KA_TRACE(20, ("GOMP_sections_end_nowait: T#%d\n", __kmp_get_gtid()))
1138 }
1139 
1140 // libgomp has an empty function for GOMP_taskyield as of 2013-10-10
1141 void
1142 xexpand(KMP_API_NAME_GOMP_TASKYIELD)(void)
1143 {
1144     KA_TRACE(20, ("GOMP_taskyield: T#%d\n", __kmp_get_gtid()))
1145     return;
1146 }
1147 
1148 #if OMP_40_ENABLED // these are new GOMP_4.0 entry points
1149 
1150 void
1151 xexpand(KMP_API_NAME_GOMP_PARALLEL)(void (*task)(void *), void *data, unsigned num_threads, unsigned int flags)
1152 {
1153     int gtid = __kmp_entry_gtid();
1154     MKLOC(loc, "GOMP_parallel");
1155     KA_TRACE(20, ("GOMP_parallel: T#%d\n", gtid));
1156 
1157 #if OMPT_SUPPORT
1158     ompt_task_info_t *parent_task_info, *task_info;
1159     if (ompt_enabled) {
1160         parent_task_info = __ompt_get_taskinfo(0);
1161         parent_task_info->frame.reenter_runtime_frame = __builtin_frame_address(1);
1162     }
1163 #endif
1164     if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1165         if (num_threads != 0) {
1166             __kmp_push_num_threads(&loc, gtid, num_threads);
1167         }
1168         if(flags != 0) {
1169             __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags);
1170         }
1171         __kmp_GOMP_fork_call(&loc, gtid, task,
1172           (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task, data);
1173     }
1174     else {
1175         __kmp_GOMP_serialized_parallel(&loc, gtid, task);
1176     }
1177 #if OMPT_SUPPORT
1178     if (ompt_enabled) {
1179         task_info = __ompt_get_taskinfo(0);
1180         task_info->frame.exit_runtime_frame = __builtin_frame_address(0);
1181     }
1182 #endif
1183     task(data);
1184     xexpand(KMP_API_NAME_GOMP_PARALLEL_END)();
1185 #if OMPT_SUPPORT
1186     if (ompt_enabled) {
1187         task_info->frame.exit_runtime_frame = NULL;
1188         parent_task_info->frame.reenter_runtime_frame = NULL;
1189     }
1190 #endif
1191 }
1192 
1193 void
1194 xexpand(KMP_API_NAME_GOMP_PARALLEL_SECTIONS)(void (*task) (void *), void *data,
1195   unsigned num_threads, unsigned count, unsigned flags)
1196 {
1197     int gtid = __kmp_entry_gtid();
1198     MKLOC(loc, "GOMP_parallel_sections");
1199     KA_TRACE(20, ("GOMP_parallel_sections: T#%d\n", gtid));
1200 
1201     if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1202         if (num_threads != 0) {
1203             __kmp_push_num_threads(&loc, gtid, num_threads);
1204         }
1205         if(flags != 0) {
1206             __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags);
1207         }
1208         __kmp_GOMP_fork_call(&loc, gtid, task,
1209           (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, task, data,
1210           num_threads, &loc, kmp_nm_dynamic_chunked, (kmp_int)1,
1211           (kmp_int)count, (kmp_int)1, (kmp_int)1);
1212     }
1213     else {
1214         __kmp_GOMP_serialized_parallel(&loc, gtid, task);
1215     }
1216 
1217     KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1218 
1219     task(data);
1220     xexpand(KMP_API_NAME_GOMP_PARALLEL_END)();
1221     KA_TRACE(20, ("GOMP_parallel_sections exit: T#%d\n", gtid));
1222 }
1223 
1224 #define PARALLEL_LOOP(func, schedule, ompt_pre, ompt_post) \
1225     void func (void (*task) (void *), void *data, unsigned num_threads,      \
1226       long lb, long ub, long str, long chunk_sz, unsigned flags)             \
1227     {                                                                        \
1228         int gtid = __kmp_entry_gtid();                                       \
1229         MKLOC(loc, #func);                                                   \
1230         KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n",        \
1231           gtid, lb, ub, str, chunk_sz ));                                    \
1232                                                                              \
1233         ompt_pre();                                                          \
1234         if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {                 \
1235             if (num_threads != 0) {                                          \
1236                 __kmp_push_num_threads(&loc, gtid, num_threads);             \
1237             }                                                                \
1238             if (flags != 0) {                                                \
1239                 __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags);    \
1240             }                                                                \
1241             __kmp_GOMP_fork_call(&loc, gtid, task,                           \
1242               (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9,         \
1243               task, data, num_threads, &loc, (schedule), lb,                 \
1244               (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz);               \
1245         }                                                                    \
1246         else {                                                               \
1247             __kmp_GOMP_serialized_parallel(&loc, gtid, task);                \
1248         }                                                                    \
1249                                                                              \
1250         KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb,                        \
1251           (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz,                    \
1252           (schedule) != kmp_sch_static);                                     \
1253         task(data);                                                          \
1254         xexpand(KMP_API_NAME_GOMP_PARALLEL_END)();                           \
1255         ompt_post();                                                          \
1256                                                                              \
1257         KA_TRACE(20, ( #func " exit: T#%d\n", gtid));                        \
1258     }
1259 
1260 PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC), kmp_sch_static,
1261                 OMPT_LOOP_PRE, OMPT_LOOP_POST)
1262 PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC), kmp_sch_dynamic_chunked,
1263                 OMPT_LOOP_PRE, OMPT_LOOP_POST)
1264 PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED), kmp_sch_guided_chunked,
1265                 OMPT_LOOP_PRE, OMPT_LOOP_POST)
1266 PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME), kmp_sch_runtime,
1267                 OMPT_LOOP_PRE, OMPT_LOOP_POST)
1268 
1269 
1270 void
1271 xexpand(KMP_API_NAME_GOMP_TASKGROUP_START)(void)
1272 {
1273     int gtid = __kmp_entry_gtid();
1274     MKLOC(loc, "GOMP_taskgroup_start");
1275     KA_TRACE(20, ("GOMP_taskgroup_start: T#%d\n", gtid));
1276 
1277     __kmpc_taskgroup(&loc, gtid);
1278 
1279     return;
1280 }
1281 
1282 void
1283 xexpand(KMP_API_NAME_GOMP_TASKGROUP_END)(void)
1284 {
1285     int gtid = __kmp_get_gtid();
1286     MKLOC(loc, "GOMP_taskgroup_end");
1287     KA_TRACE(20, ("GOMP_taskgroup_end: T#%d\n", gtid));
1288 
1289     __kmpc_end_taskgroup(&loc, gtid);
1290 
1291     return;
1292 }
1293 
1294 #ifndef KMP_DEBUG
1295 static
1296 #endif /* KMP_DEBUG */
1297 kmp_int32 __kmp_gomp_to_omp_cancellation_kind(int gomp_kind) {
1298     kmp_int32 cncl_kind = 0;
1299     switch(gomp_kind) {
1300       case 1:
1301         cncl_kind = cancel_parallel;
1302         break;
1303       case 2:
1304         cncl_kind = cancel_loop;
1305         break;
1306       case 4:
1307         cncl_kind = cancel_sections;
1308         break;
1309       case 8:
1310         cncl_kind = cancel_taskgroup;
1311         break;
1312     }
1313     return cncl_kind;
1314 }
1315 
1316 bool
1317 xexpand(KMP_API_NAME_GOMP_CANCELLATION_POINT)(int which)
1318 {
1319     if(__kmp_omp_cancellation) {
1320         KMP_FATAL(NoGompCancellation);
1321     }
1322     int gtid = __kmp_get_gtid();
1323     MKLOC(loc, "GOMP_cancellation_point");
1324     KA_TRACE(20, ("GOMP_cancellation_point: T#%d\n", gtid));
1325 
1326     kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which);
1327 
1328     return __kmpc_cancellationpoint(&loc, gtid, cncl_kind);
1329 }
1330 
1331 bool
1332 xexpand(KMP_API_NAME_GOMP_BARRIER_CANCEL)(void)
1333 {
1334     if(__kmp_omp_cancellation) {
1335         KMP_FATAL(NoGompCancellation);
1336     }
1337     KMP_FATAL(NoGompCancellation);
1338     int gtid = __kmp_get_gtid();
1339     MKLOC(loc, "GOMP_barrier_cancel");
1340     KA_TRACE(20, ("GOMP_barrier_cancel: T#%d\n", gtid));
1341 
1342     return __kmpc_cancel_barrier(&loc, gtid);
1343 }
1344 
1345 bool
1346 xexpand(KMP_API_NAME_GOMP_CANCEL)(int which, bool do_cancel)
1347 {
1348     if(__kmp_omp_cancellation) {
1349         KMP_FATAL(NoGompCancellation);
1350     } else {
1351         return FALSE;
1352     }
1353 
1354     int gtid = __kmp_get_gtid();
1355     MKLOC(loc, "GOMP_cancel");
1356     KA_TRACE(20, ("GOMP_cancel: T#%d\n", gtid));
1357 
1358     kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which);
1359 
1360     if(do_cancel == FALSE) {
1361         return xexpand(KMP_API_NAME_GOMP_CANCELLATION_POINT)(which);
1362     } else {
1363         return __kmpc_cancel(&loc, gtid, cncl_kind);
1364     }
1365 }
1366 
1367 bool
1368 xexpand(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL)(void)
1369 {
1370     if(__kmp_omp_cancellation) {
1371         KMP_FATAL(NoGompCancellation);
1372     }
1373     int gtid = __kmp_get_gtid();
1374     MKLOC(loc, "GOMP_sections_end_cancel");
1375     KA_TRACE(20, ("GOMP_sections_end_cancel: T#%d\n", gtid));
1376 
1377     return __kmpc_cancel_barrier(&loc, gtid);
1378 }
1379 
1380 bool
1381 xexpand(KMP_API_NAME_GOMP_LOOP_END_CANCEL)(void)
1382 {
1383     if(__kmp_omp_cancellation) {
1384         KMP_FATAL(NoGompCancellation);
1385     }
1386     int gtid = __kmp_get_gtid();
1387     MKLOC(loc, "GOMP_loop_end_cancel");
1388     KA_TRACE(20, ("GOMP_loop_end_cancel: T#%d\n", gtid));
1389 
1390     return __kmpc_cancel_barrier(&loc, gtid);
1391 }
1392 
1393 // All target functions are empty as of 2014-05-29
1394 void
1395 xexpand(KMP_API_NAME_GOMP_TARGET)(int device, void (*fn) (void *), const void *openmp_target,
1396              size_t mapnum, void **hostaddrs, size_t *sizes, unsigned char *kinds)
1397 {
1398     return;
1399 }
1400 
1401 void
1402 xexpand(KMP_API_NAME_GOMP_TARGET_DATA)(int device, const void *openmp_target, size_t mapnum,
1403                   void **hostaddrs, size_t *sizes, unsigned char *kinds)
1404 {
1405     return;
1406 }
1407 
1408 void
1409 xexpand(KMP_API_NAME_GOMP_TARGET_END_DATA)(void)
1410 {
1411     return;
1412 }
1413 
1414 void
1415 xexpand(KMP_API_NAME_GOMP_TARGET_UPDATE)(int device, const void *openmp_target, size_t mapnum,
1416                     void **hostaddrs, size_t *sizes, unsigned char *kinds)
1417 {
1418     return;
1419 }
1420 
1421 void
1422 xexpand(KMP_API_NAME_GOMP_TEAMS)(unsigned int num_teams, unsigned int thread_limit)
1423 {
1424     return;
1425 }
1426 #endif // OMP_40_ENABLED
1427 
1428 
1429 /*
1430     The following sections of code create aliases for the GOMP_* functions,
1431     then create versioned symbols using the assembler directive .symver.
1432     This is only pertinent for ELF .so library
1433     xaliasify and xversionify are defined in kmp_ftn_os.h
1434 */
1435 
1436 #ifdef KMP_USE_VERSION_SYMBOLS
1437 
1438 // GOMP_1.0 aliases
1439 xaliasify(KMP_API_NAME_GOMP_ATOMIC_END, 10);
1440 xaliasify(KMP_API_NAME_GOMP_ATOMIC_START, 10);
1441 xaliasify(KMP_API_NAME_GOMP_BARRIER, 10);
1442 xaliasify(KMP_API_NAME_GOMP_CRITICAL_END, 10);
1443 xaliasify(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10);
1444 xaliasify(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10);
1445 xaliasify(KMP_API_NAME_GOMP_CRITICAL_START, 10);
1446 xaliasify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10);
1447 xaliasify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10);
1448 xaliasify(KMP_API_NAME_GOMP_LOOP_END, 10);
1449 xaliasify(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10);
1450 xaliasify(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10);
1451 xaliasify(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10);
1452 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10);
1453 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10);
1454 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10);
1455 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10);
1456 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10);
1457 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10);
1458 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10);
1459 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10);
1460 xaliasify(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10);
1461 xaliasify(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10);
1462 xaliasify(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10);
1463 xaliasify(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10);
1464 xaliasify(KMP_API_NAME_GOMP_ORDERED_END, 10);
1465 xaliasify(KMP_API_NAME_GOMP_ORDERED_START, 10);
1466 xaliasify(KMP_API_NAME_GOMP_PARALLEL_END, 10);
1467 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10);
1468 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10);
1469 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10);
1470 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10);
1471 xaliasify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10);
1472 xaliasify(KMP_API_NAME_GOMP_PARALLEL_START, 10);
1473 xaliasify(KMP_API_NAME_GOMP_SECTIONS_END, 10);
1474 xaliasify(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10);
1475 xaliasify(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10);
1476 xaliasify(KMP_API_NAME_GOMP_SECTIONS_START, 10);
1477 xaliasify(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10);
1478 xaliasify(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10);
1479 xaliasify(KMP_API_NAME_GOMP_SINGLE_START, 10);
1480 
1481 // GOMP_2.0 aliases
1482 xaliasify(KMP_API_NAME_GOMP_TASK, 20);
1483 xaliasify(KMP_API_NAME_GOMP_TASKWAIT, 20);
1484 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20);
1485 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20);
1486 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20);
1487 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20);
1488 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20);
1489 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20);
1490 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20);
1491 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20);
1492 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20);
1493 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20);
1494 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20);
1495 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20);
1496 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20);
1497 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20);
1498 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20);
1499 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20);
1500 
1501 // GOMP_3.0 aliases
1502 xaliasify(KMP_API_NAME_GOMP_TASKYIELD, 30);
1503 
1504 // GOMP_4.0 aliases
1505 // The GOMP_parallel* entry points below aren't OpenMP 4.0 related.
1506 #if OMP_40_ENABLED
1507 xaliasify(KMP_API_NAME_GOMP_PARALLEL, 40);
1508 xaliasify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS, 40);
1509 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC, 40);
1510 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED, 40);
1511 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME, 40);
1512 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC, 40);
1513 xaliasify(KMP_API_NAME_GOMP_TASKGROUP_START, 40);
1514 xaliasify(KMP_API_NAME_GOMP_TASKGROUP_END, 40);
1515 xaliasify(KMP_API_NAME_GOMP_BARRIER_CANCEL, 40);
1516 xaliasify(KMP_API_NAME_GOMP_CANCEL, 40);
1517 xaliasify(KMP_API_NAME_GOMP_CANCELLATION_POINT, 40);
1518 xaliasify(KMP_API_NAME_GOMP_LOOP_END_CANCEL, 40);
1519 xaliasify(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL, 40);
1520 xaliasify(KMP_API_NAME_GOMP_TARGET, 40);
1521 xaliasify(KMP_API_NAME_GOMP_TARGET_DATA, 40);
1522 xaliasify(KMP_API_NAME_GOMP_TARGET_END_DATA, 40);
1523 xaliasify(KMP_API_NAME_GOMP_TARGET_UPDATE, 40);
1524 xaliasify(KMP_API_NAME_GOMP_TEAMS, 40);
1525 #endif
1526 
1527 // GOMP_1.0 versioned symbols
1528 xversionify(KMP_API_NAME_GOMP_ATOMIC_END, 10, "GOMP_1.0");
1529 xversionify(KMP_API_NAME_GOMP_ATOMIC_START, 10, "GOMP_1.0");
1530 xversionify(KMP_API_NAME_GOMP_BARRIER, 10, "GOMP_1.0");
1531 xversionify(KMP_API_NAME_GOMP_CRITICAL_END, 10, "GOMP_1.0");
1532 xversionify(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10, "GOMP_1.0");
1533 xversionify(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10, "GOMP_1.0");
1534 xversionify(KMP_API_NAME_GOMP_CRITICAL_START, 10, "GOMP_1.0");
1535 xversionify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10, "GOMP_1.0");
1536 xversionify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10, "GOMP_1.0");
1537 xversionify(KMP_API_NAME_GOMP_LOOP_END, 10, "GOMP_1.0");
1538 xversionify(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10, "GOMP_1.0");
1539 xversionify(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10, "GOMP_1.0");
1540 xversionify(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10, "GOMP_1.0");
1541 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10, "GOMP_1.0");
1542 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10, "GOMP_1.0");
1543 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10, "GOMP_1.0");
1544 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10, "GOMP_1.0");
1545 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10, "GOMP_1.0");
1546 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10, "GOMP_1.0");
1547 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10, "GOMP_1.0");
1548 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10, "GOMP_1.0");
1549 xversionify(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10, "GOMP_1.0");
1550 xversionify(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10, "GOMP_1.0");
1551 xversionify(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10, "GOMP_1.0");
1552 xversionify(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10, "GOMP_1.0");
1553 xversionify(KMP_API_NAME_GOMP_ORDERED_END, 10, "GOMP_1.0");
1554 xversionify(KMP_API_NAME_GOMP_ORDERED_START, 10, "GOMP_1.0");
1555 xversionify(KMP_API_NAME_GOMP_PARALLEL_END, 10, "GOMP_1.0");
1556 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10, "GOMP_1.0");
1557 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10, "GOMP_1.0");
1558 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10, "GOMP_1.0");
1559 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10, "GOMP_1.0");
1560 xversionify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10, "GOMP_1.0");
1561 xversionify(KMP_API_NAME_GOMP_PARALLEL_START, 10, "GOMP_1.0");
1562 xversionify(KMP_API_NAME_GOMP_SECTIONS_END, 10, "GOMP_1.0");
1563 xversionify(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10, "GOMP_1.0");
1564 xversionify(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10, "GOMP_1.0");
1565 xversionify(KMP_API_NAME_GOMP_SECTIONS_START, 10, "GOMP_1.0");
1566 xversionify(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10, "GOMP_1.0");
1567 xversionify(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10, "GOMP_1.0");
1568 xversionify(KMP_API_NAME_GOMP_SINGLE_START, 10, "GOMP_1.0");
1569 
1570 // GOMP_2.0 versioned symbols
1571 xversionify(KMP_API_NAME_GOMP_TASK, 20, "GOMP_2.0");
1572 xversionify(KMP_API_NAME_GOMP_TASKWAIT, 20, "GOMP_2.0");
1573 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20, "GOMP_2.0");
1574 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20, "GOMP_2.0");
1575 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20, "GOMP_2.0");
1576 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20, "GOMP_2.0");
1577 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20, "GOMP_2.0");
1578 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20, "GOMP_2.0");
1579 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20, "GOMP_2.0");
1580 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20, "GOMP_2.0");
1581 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20, "GOMP_2.0");
1582 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20, "GOMP_2.0");
1583 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20, "GOMP_2.0");
1584 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20, "GOMP_2.0");
1585 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20, "GOMP_2.0");
1586 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20, "GOMP_2.0");
1587 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20, "GOMP_2.0");
1588 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20, "GOMP_2.0");
1589 
1590 // GOMP_3.0 versioned symbols
1591 xversionify(KMP_API_NAME_GOMP_TASKYIELD, 30, "GOMP_3.0");
1592 
1593 // GOMP_4.0 versioned symbols
1594 #if OMP_40_ENABLED
1595 xversionify(KMP_API_NAME_GOMP_PARALLEL, 40, "GOMP_4.0");
1596 xversionify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS, 40, "GOMP_4.0");
1597 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC, 40, "GOMP_4.0");
1598 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED, 40, "GOMP_4.0");
1599 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME, 40, "GOMP_4.0");
1600 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC, 40, "GOMP_4.0");
1601 xversionify(KMP_API_NAME_GOMP_TASKGROUP_START, 40, "GOMP_4.0");
1602 xversionify(KMP_API_NAME_GOMP_TASKGROUP_END, 40, "GOMP_4.0");
1603 xversionify(KMP_API_NAME_GOMP_BARRIER_CANCEL, 40, "GOMP_4.0");
1604 xversionify(KMP_API_NAME_GOMP_CANCEL, 40, "GOMP_4.0");
1605 xversionify(KMP_API_NAME_GOMP_CANCELLATION_POINT, 40, "GOMP_4.0");
1606 xversionify(KMP_API_NAME_GOMP_LOOP_END_CANCEL, 40, "GOMP_4.0");
1607 xversionify(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL, 40, "GOMP_4.0");
1608 xversionify(KMP_API_NAME_GOMP_TARGET, 40, "GOMP_4.0");
1609 xversionify(KMP_API_NAME_GOMP_TARGET_DATA, 40, "GOMP_4.0");
1610 xversionify(KMP_API_NAME_GOMP_TARGET_END_DATA, 40, "GOMP_4.0");
1611 xversionify(KMP_API_NAME_GOMP_TARGET_UPDATE, 40, "GOMP_4.0");
1612 xversionify(KMP_API_NAME_GOMP_TEAMS, 40, "GOMP_4.0");
1613 #endif
1614 
1615 #endif // KMP_USE_VERSION_SYMBOLS
1616 
1617 #ifdef __cplusplus
1618     } //extern "C"
1619 #endif // __cplusplus
1620 
1621 
1622