1 /*
2  * kmp_gsupport.cpp
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 //                     The LLVM Compiler Infrastructure
8 //
9 // This file is dual licensed under the MIT and the University of Illinois Open
10 // Source Licenses. See LICENSE.txt for details.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "kmp.h"
15 #include "kmp_atomic.h"
16 
17 #if OMPT_SUPPORT
18 #include "ompt-specific.h"
19 #endif
20 
21 #ifdef __cplusplus
22 extern "C" {
23 #endif // __cplusplus
24 
25 #define MKLOC(loc, routine)                                                    \
26   static ident_t(loc) = {0, KMP_IDENT_KMPC, 0, 0, ";unknown;unknown;0;0;;"};
27 
28 #include "kmp_ftn_os.h"
29 
30 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_BARRIER)(void) {
31   int gtid = __kmp_entry_gtid();
32   MKLOC(loc, "GOMP_barrier");
33   KA_TRACE(20, ("GOMP_barrier: T#%d\n", gtid));
34 #if OMPT_SUPPORT && OMPT_OPTIONAL
35   ompt_frame_t *ompt_frame;
36   if (ompt_enabled.enabled) {
37     __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
38     ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
39     OMPT_STORE_RETURN_ADDRESS(gtid);
40   }
41 #endif
42   __kmpc_barrier(&loc, gtid);
43 #if OMPT_SUPPORT && OMPT_OPTIONAL
44   if (ompt_enabled.enabled) {
45     ompt_frame->enter_frame = NULL;
46   }
47 #endif
48 }
49 
50 // Mutual exclusion
51 
52 // The symbol that icc/ifort generates for unnamed for unnamed critical sections
53 // - .gomp_critical_user_ - is defined using .comm in any objects reference it.
54 // We can't reference it directly here in C code, as the symbol contains a ".".
55 //
56 // The RTL contains an assembly language definition of .gomp_critical_user_
57 // with another symbol __kmp_unnamed_critical_addr initialized with it's
58 // address.
59 extern kmp_critical_name *__kmp_unnamed_critical_addr;
60 
61 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_START)(void) {
62   int gtid = __kmp_entry_gtid();
63   MKLOC(loc, "GOMP_critical_start");
64   KA_TRACE(20, ("GOMP_critical_start: T#%d\n", gtid));
65 #if OMPT_SUPPORT && OMPT_OPTIONAL
66   OMPT_STORE_RETURN_ADDRESS(gtid);
67 #endif
68   __kmpc_critical(&loc, gtid, __kmp_unnamed_critical_addr);
69 }
70 
71 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_END)(void) {
72   int gtid = __kmp_get_gtid();
73   MKLOC(loc, "GOMP_critical_end");
74   KA_TRACE(20, ("GOMP_critical_end: T#%d\n", gtid));
75 #if OMPT_SUPPORT && OMPT_OPTIONAL
76   OMPT_STORE_RETURN_ADDRESS(gtid);
77 #endif
78   __kmpc_end_critical(&loc, gtid, __kmp_unnamed_critical_addr);
79 }
80 
81 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_NAME_START)(void **pptr) {
82   int gtid = __kmp_entry_gtid();
83   MKLOC(loc, "GOMP_critical_name_start");
84   KA_TRACE(20, ("GOMP_critical_name_start: T#%d\n", gtid));
85   __kmpc_critical(&loc, gtid, (kmp_critical_name *)pptr);
86 }
87 
88 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_NAME_END)(void **pptr) {
89   int gtid = __kmp_get_gtid();
90   MKLOC(loc, "GOMP_critical_name_end");
91   KA_TRACE(20, ("GOMP_critical_name_end: T#%d\n", gtid));
92   __kmpc_end_critical(&loc, gtid, (kmp_critical_name *)pptr);
93 }
94 
95 // The Gnu codegen tries to use locked operations to perform atomic updates
96 // inline.  If it can't, then it calls GOMP_atomic_start() before performing
97 // the update and GOMP_atomic_end() afterward, regardless of the data type.
98 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ATOMIC_START)(void) {
99   int gtid = __kmp_entry_gtid();
100   KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
101 
102 #if OMPT_SUPPORT
103   __ompt_thread_assign_wait_id(0);
104 #endif
105 
106   __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);
107 }
108 
109 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ATOMIC_END)(void) {
110   int gtid = __kmp_get_gtid();
111   KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
112   __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);
113 }
114 
115 int KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_START)(void) {
116   int gtid = __kmp_entry_gtid();
117   MKLOC(loc, "GOMP_single_start");
118   KA_TRACE(20, ("GOMP_single_start: T#%d\n", gtid));
119 
120   if (!TCR_4(__kmp_init_parallel))
121     __kmp_parallel_initialize();
122 
123   // 3rd parameter == FALSE prevents kmp_enter_single from pushing a
124   // workshare when USE_CHECKS is defined.  We need to avoid the push,
125   // as there is no corresponding GOMP_single_end() call.
126   kmp_int32 rc = __kmp_enter_single(gtid, &loc, FALSE);
127 
128 #if OMPT_SUPPORT && OMPT_OPTIONAL
129   kmp_info_t *this_thr = __kmp_threads[gtid];
130   kmp_team_t *team = this_thr->th.th_team;
131   int tid = __kmp_tid_from_gtid(gtid);
132 
133   if (ompt_enabled.enabled) {
134     if (rc) {
135       if (ompt_enabled.ompt_callback_work) {
136         ompt_callbacks.ompt_callback(ompt_callback_work)(
137             ompt_work_single_executor, ompt_scope_begin,
138             &(team->t.ompt_team_info.parallel_data),
139             &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
140             1, OMPT_GET_RETURN_ADDRESS(0));
141       }
142     } else {
143       if (ompt_enabled.ompt_callback_work) {
144         ompt_callbacks.ompt_callback(ompt_callback_work)(
145             ompt_work_single_other, ompt_scope_begin,
146             &(team->t.ompt_team_info.parallel_data),
147             &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
148             1, OMPT_GET_RETURN_ADDRESS(0));
149         ompt_callbacks.ompt_callback(ompt_callback_work)(
150             ompt_work_single_other, ompt_scope_end,
151             &(team->t.ompt_team_info.parallel_data),
152             &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
153             1, OMPT_GET_RETURN_ADDRESS(0));
154       }
155     }
156   }
157 #endif
158 
159   return rc;
160 }
161 
162 void *KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_COPY_START)(void) {
163   void *retval;
164   int gtid = __kmp_entry_gtid();
165   MKLOC(loc, "GOMP_single_copy_start");
166   KA_TRACE(20, ("GOMP_single_copy_start: T#%d\n", gtid));
167 
168   if (!TCR_4(__kmp_init_parallel))
169     __kmp_parallel_initialize();
170 
171   // If this is the first thread to enter, return NULL.  The generated code will
172   // then call GOMP_single_copy_end() for this thread only, with the
173   // copyprivate data pointer as an argument.
174   if (__kmp_enter_single(gtid, &loc, FALSE))
175     return NULL;
176 
177 // Wait for the first thread to set the copyprivate data pointer,
178 // and for all other threads to reach this point.
179 
180 #if OMPT_SUPPORT && OMPT_OPTIONAL
181   ompt_frame_t *ompt_frame;
182   if (ompt_enabled.enabled) {
183     __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
184     ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
185     OMPT_STORE_RETURN_ADDRESS(gtid);
186   }
187 #endif
188   __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
189 
190   // Retrieve the value of the copyprivate data point, and wait for all
191   // threads to do likewise, then return.
192   retval = __kmp_team_from_gtid(gtid)->t.t_copypriv_data;
193 #if OMPT_SUPPORT && OMPT_OPTIONAL
194   if (ompt_enabled.enabled) {
195     OMPT_STORE_RETURN_ADDRESS(gtid);
196   }
197 #endif
198   __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
199 #if OMPT_SUPPORT && OMPT_OPTIONAL
200   if (ompt_enabled.enabled) {
201     ompt_frame->enter_frame = NULL;
202   }
203 #endif
204   return retval;
205 }
206 
207 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_COPY_END)(void *data) {
208   int gtid = __kmp_get_gtid();
209   KA_TRACE(20, ("GOMP_single_copy_end: T#%d\n", gtid));
210 
211   // Set the copyprivate data pointer fo the team, then hit the barrier so that
212   // the other threads will continue on and read it.  Hit another barrier before
213   // continuing, so that the know that the copyprivate data pointer has been
214   // propagated to all threads before trying to reuse the t_copypriv_data field.
215   __kmp_team_from_gtid(gtid)->t.t_copypriv_data = data;
216 #if OMPT_SUPPORT && OMPT_OPTIONAL
217   ompt_frame_t *ompt_frame;
218   if (ompt_enabled.enabled) {
219     __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
220     ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
221     OMPT_STORE_RETURN_ADDRESS(gtid);
222   }
223 #endif
224   __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
225 #if OMPT_SUPPORT && OMPT_OPTIONAL
226   if (ompt_enabled.enabled) {
227     OMPT_STORE_RETURN_ADDRESS(gtid);
228   }
229 #endif
230   __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
231 #if OMPT_SUPPORT && OMPT_OPTIONAL
232   if (ompt_enabled.enabled) {
233     ompt_frame->enter_frame = NULL;
234   }
235 #endif
236 }
237 
238 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ORDERED_START)(void) {
239   int gtid = __kmp_entry_gtid();
240   MKLOC(loc, "GOMP_ordered_start");
241   KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
242 #if OMPT_SUPPORT && OMPT_OPTIONAL
243   OMPT_STORE_RETURN_ADDRESS(gtid);
244 #endif
245   __kmpc_ordered(&loc, gtid);
246 }
247 
248 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ORDERED_END)(void) {
249   int gtid = __kmp_get_gtid();
250   MKLOC(loc, "GOMP_ordered_end");
251   KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
252 #if OMPT_SUPPORT && OMPT_OPTIONAL
253   OMPT_STORE_RETURN_ADDRESS(gtid);
254 #endif
255   __kmpc_end_ordered(&loc, gtid);
256 }
257 
258 // Dispatch macro defs
259 //
260 // They come in two flavors: 64-bit unsigned, and either 32-bit signed
261 // (IA-32 architecture) or 64-bit signed (Intel(R) 64).
262 
263 #if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS
264 #define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_4
265 #define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_4
266 #define KMP_DISPATCH_NEXT __kmpc_dispatch_next_4
267 #else
268 #define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_8
269 #define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_8
270 #define KMP_DISPATCH_NEXT __kmpc_dispatch_next_8
271 #endif /* KMP_ARCH_X86 */
272 
273 #define KMP_DISPATCH_INIT_ULL __kmp_aux_dispatch_init_8u
274 #define KMP_DISPATCH_FINI_CHUNK_ULL __kmp_aux_dispatch_fini_chunk_8u
275 #define KMP_DISPATCH_NEXT_ULL __kmpc_dispatch_next_8u
276 
277 // The parallel contruct
278 
279 #ifndef KMP_DEBUG
280 static
281 #endif /* KMP_DEBUG */
282     void
283     __kmp_GOMP_microtask_wrapper(int *gtid, int *npr, void (*task)(void *),
284                                  void *data) {
285 #if OMPT_SUPPORT
286   kmp_info_t *thr;
287   ompt_frame_t *ompt_frame;
288   omp_state_t enclosing_state;
289 
290   if (ompt_enabled.enabled) {
291     // get pointer to thread data structure
292     thr = __kmp_threads[*gtid];
293 
294     // save enclosing task state; set current state for task
295     enclosing_state = thr->th.ompt_thread_info.state;
296     thr->th.ompt_thread_info.state = omp_state_work_parallel;
297 
298     // set task frame
299     __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
300     ompt_frame->exit_frame = OMPT_GET_FRAME_ADDRESS(0);
301   }
302 #endif
303 
304   task(data);
305 
306 #if OMPT_SUPPORT
307   if (ompt_enabled.enabled) {
308     // clear task frame
309     ompt_frame->exit_frame = NULL;
310 
311     // restore enclosing state
312     thr->th.ompt_thread_info.state = enclosing_state;
313   }
314 #endif
315 }
316 
317 #ifndef KMP_DEBUG
318 static
319 #endif /* KMP_DEBUG */
320     void
321     __kmp_GOMP_parallel_microtask_wrapper(int *gtid, int *npr,
322                                           void (*task)(void *), void *data,
323                                           unsigned num_threads, ident_t *loc,
324                                           enum sched_type schedule, long start,
325                                           long end, long incr,
326                                           long chunk_size) {
327   // Intialize the loop worksharing construct.
328 
329   KMP_DISPATCH_INIT(loc, *gtid, schedule, start, end, incr, chunk_size,
330                     schedule != kmp_sch_static);
331 
332 #if OMPT_SUPPORT
333   kmp_info_t *thr;
334   ompt_frame_t *ompt_frame;
335   omp_state_t enclosing_state;
336 
337   if (ompt_enabled.enabled) {
338     thr = __kmp_threads[*gtid];
339     // save enclosing task state; set current state for task
340     enclosing_state = thr->th.ompt_thread_info.state;
341     thr->th.ompt_thread_info.state = omp_state_work_parallel;
342 
343     // set task frame
344     __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
345     ompt_frame->exit_frame = OMPT_GET_FRAME_ADDRESS(0);
346   }
347 #endif
348 
349   // Now invoke the microtask.
350   task(data);
351 
352 #if OMPT_SUPPORT
353   if (ompt_enabled.enabled) {
354     // clear task frame
355     ompt_frame->exit_frame = NULL;
356 
357     // reset enclosing state
358     thr->th.ompt_thread_info.state = enclosing_state;
359   }
360 #endif
361 }
362 
363 #ifndef KMP_DEBUG
364 static
365 #endif /* KMP_DEBUG */
366     void
367     __kmp_GOMP_fork_call(ident_t *loc, int gtid, void (*unwrapped_task)(void *),
368                          microtask_t wrapper, int argc, ...) {
369   int rc;
370   kmp_info_t *thr = __kmp_threads[gtid];
371   kmp_team_t *team = thr->th.th_team;
372   int tid = __kmp_tid_from_gtid(gtid);
373 
374   va_list ap;
375   va_start(ap, argc);
376 
377   rc = __kmp_fork_call(loc, gtid, fork_context_gnu, argc, wrapper,
378                        __kmp_invoke_task_func,
379 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
380                        &ap
381 #else
382                        ap
383 #endif
384                        );
385 
386   va_end(ap);
387 
388   if (rc) {
389     __kmp_run_before_invoked_task(gtid, tid, thr, team);
390   }
391 
392 #if OMPT_SUPPORT
393   int ompt_team_size;
394   if (ompt_enabled.enabled) {
395     ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
396     ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
397 
398     // implicit task callback
399     if (ompt_enabled.ompt_callback_implicit_task) {
400       ompt_team_size = __kmp_team_from_gtid(gtid)->t.t_nproc;
401       ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
402           ompt_scope_begin, &(team_info->parallel_data),
403           &(task_info->task_data), ompt_team_size, __kmp_tid_from_gtid(gtid));
404     }
405     thr->th.ompt_thread_info.state = omp_state_work_parallel;
406   }
407 #endif
408 }
409 
410 static void __kmp_GOMP_serialized_parallel(ident_t *loc, kmp_int32 gtid,
411                                            void (*task)(void *)) {
412 #if OMPT_SUPPORT
413   OMPT_STORE_RETURN_ADDRESS(gtid);
414 #endif
415   __kmp_serialized_parallel(loc, gtid);
416 }
417 
418 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_START)(void (*task)(void *),
419                                                        void *data,
420                                                        unsigned num_threads) {
421   int gtid = __kmp_entry_gtid();
422 
423 #if OMPT_SUPPORT
424   ompt_frame_t *parent_frame, *frame;
425 
426   if (ompt_enabled.enabled) {
427     __ompt_get_task_info_internal(0, NULL, NULL, &parent_frame, NULL, NULL);
428     parent_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
429     OMPT_STORE_RETURN_ADDRESS(gtid);
430   }
431 #endif
432 
433   MKLOC(loc, "GOMP_parallel_start");
434   KA_TRACE(20, ("GOMP_parallel_start: T#%d\n", gtid));
435 
436   if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
437     if (num_threads != 0) {
438       __kmp_push_num_threads(&loc, gtid, num_threads);
439     }
440     __kmp_GOMP_fork_call(&loc, gtid, task,
441                          (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task,
442                          data);
443   } else {
444     __kmp_GOMP_serialized_parallel(&loc, gtid, task);
445   }
446 
447 #if OMPT_SUPPORT
448   if (ompt_enabled.enabled) {
449     __ompt_get_task_info_internal(0, NULL, NULL, &frame, NULL, NULL);
450     frame->exit_frame = OMPT_GET_FRAME_ADDRESS(1);
451   }
452 #endif
453 }
454 
455 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)(void) {
456   int gtid = __kmp_get_gtid();
457   kmp_info_t *thr;
458   int ompt_team_size = __kmp_team_from_gtid(gtid)->t.t_nproc;
459 
460   thr = __kmp_threads[gtid];
461 
462   MKLOC(loc, "GOMP_parallel_end");
463   KA_TRACE(20, ("GOMP_parallel_end: T#%d\n", gtid));
464 
465   if (!thr->th.th_team->t.t_serialized) {
466     __kmp_run_after_invoked_task(gtid, __kmp_tid_from_gtid(gtid), thr,
467                                  thr->th.th_team);
468 
469 #if OMPT_SUPPORT
470     if (ompt_enabled.enabled) {
471       // Implicit task is finished here, in the barrier we might schedule
472       // deferred tasks,
473       // these don't see the implicit task on the stack
474       OMPT_CUR_TASK_INFO(thr)->frame.exit_frame = NULL;
475     }
476 #endif
477 
478     __kmp_join_call(&loc, gtid
479 #if OMPT_SUPPORT
480                     ,
481                     fork_context_gnu
482 #endif
483                     );
484   } else {
485     __kmpc_end_serialized_parallel(&loc, gtid);
486   }
487 }
488 
489 // Loop worksharing constructs
490 
491 // The Gnu codegen passes in an exclusive upper bound for the overall range,
492 // but the libguide dispatch code expects an inclusive upper bound, hence the
493 // "end - incr" 5th argument to KMP_DISPATCH_INIT (and the " ub - str" 11th
494 // argument to __kmp_GOMP_fork_call).
495 //
496 // Conversely, KMP_DISPATCH_NEXT returns and inclusive upper bound in *p_ub,
497 // but the Gnu codegen expects an excluside upper bound, so the adjustment
498 // "*p_ub += stride" compenstates for the discrepancy.
499 //
500 // Correction: the gnu codegen always adjusts the upper bound by +-1, not the
501 // stride value.  We adjust the dispatch parameters accordingly (by +-1), but
502 // we still adjust p_ub by the actual stride value.
503 //
504 // The "runtime" versions do not take a chunk_sz parameter.
505 //
506 // The profile lib cannot support construct checking of unordered loops that
507 // are predetermined by the compiler to be statically scheduled, as the gcc
508 // codegen will not always emit calls to GOMP_loop_static_next() to get the
509 // next iteration.  Instead, it emits inline code to call omp_get_thread_num()
510 // num and calculate the iteration space using the result.  It doesn't do this
511 // with ordered static loop, so they can be checked.
512 
513 #if OMPT_SUPPORT
514 #define IF_OMPT_SUPPORT(code) code
515 #else
516 #define IF_OMPT_SUPPORT(code)
517 #endif
518 
519 #define LOOP_START(func, schedule)                                             \
520   int func(long lb, long ub, long str, long chunk_sz, long *p_lb,              \
521            long *p_ub) {                                                       \
522     int status;                                                                \
523     long stride;                                                               \
524     int gtid = __kmp_entry_gtid();                                             \
525     MKLOC(loc, #func);                                                         \
526     KA_TRACE(20,                                                               \
527              (#func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
528               gtid, lb, ub, str, chunk_sz));                                   \
529                                                                                \
530     if ((str > 0) ? (lb < ub) : (lb > ub)) {                                   \
531       IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);)                        \
532       KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb,                            \
533                         (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz,        \
534                         (schedule) != kmp_sch_static);                         \
535       IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);)                        \
536       status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb,            \
537                                  (kmp_int *)p_ub, (kmp_int *)&stride);         \
538       if (status) {                                                            \
539         KMP_DEBUG_ASSERT(stride == str);                                       \
540         *p_ub += (str > 0) ? 1 : -1;                                           \
541       }                                                                        \
542     } else {                                                                   \
543       status = 0;                                                              \
544     }                                                                          \
545                                                                                \
546     KA_TRACE(20,                                                               \
547              (#func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n",   \
548               gtid, *p_lb, *p_ub, status));                                    \
549     return status;                                                             \
550   }
551 
552 #define LOOP_RUNTIME_START(func, schedule)                                     \
553   int func(long lb, long ub, long str, long *p_lb, long *p_ub) {               \
554     int status;                                                                \
555     long stride;                                                               \
556     long chunk_sz = 0;                                                         \
557     int gtid = __kmp_entry_gtid();                                             \
558     MKLOC(loc, #func);                                                         \
559     KA_TRACE(20,                                                               \
560              (#func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n",    \
561               gtid, lb, ub, str, chunk_sz));                                   \
562                                                                                \
563     if ((str > 0) ? (lb < ub) : (lb > ub)) {                                   \
564       IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);)                        \
565       KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb,                            \
566                         (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, TRUE); \
567       IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);)                        \
568       status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb,            \
569                                  (kmp_int *)p_ub, (kmp_int *)&stride);         \
570       if (status) {                                                            \
571         KMP_DEBUG_ASSERT(stride == str);                                       \
572         *p_ub += (str > 0) ? 1 : -1;                                           \
573       }                                                                        \
574     } else {                                                                   \
575       status = 0;                                                              \
576     }                                                                          \
577                                                                                \
578     KA_TRACE(20,                                                               \
579              (#func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n",   \
580               gtid, *p_lb, *p_ub, status));                                    \
581     return status;                                                             \
582   }
583 
584 #define LOOP_NEXT(func, fini_code)                                             \
585   int func(long *p_lb, long *p_ub) {                                           \
586     int status;                                                                \
587     long stride;                                                               \
588     int gtid = __kmp_get_gtid();                                               \
589     MKLOC(loc, #func);                                                         \
590     KA_TRACE(20, (#func ": T#%d\n", gtid));                                    \
591                                                                                \
592     IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);)                          \
593     fini_code status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb,    \
594                                          (kmp_int *)p_ub, (kmp_int *)&stride); \
595     if (status) {                                                              \
596       *p_ub += (stride > 0) ? 1 : -1;                                          \
597     }                                                                          \
598                                                                                \
599     KA_TRACE(20,                                                               \
600              (#func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, stride 0x%lx, "    \
601                     "returning %d\n",                                          \
602               gtid, *p_lb, *p_ub, stride, status));                            \
603     return status;                                                             \
604   }
605 
606 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_STATIC_START), kmp_sch_static)
607 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT), {})
608 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START),
609            kmp_sch_dynamic_chunked)
610 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT), {})
611 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_GUIDED_START),
612            kmp_sch_guided_chunked)
613 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT), {})
614 LOOP_RUNTIME_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_RUNTIME_START),
615                    kmp_sch_runtime)
616 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT), {})
617 
618 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START),
619            kmp_ord_static)
620 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT),
621           { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
622 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START),
623            kmp_ord_dynamic_chunked)
624 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT),
625           { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
626 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START),
627            kmp_ord_guided_chunked)
628 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT),
629           { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
630 LOOP_RUNTIME_START(
631     KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START),
632     kmp_ord_runtime)
633 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT),
634           { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
635 
636 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END)(void) {
637   int gtid = __kmp_get_gtid();
638   KA_TRACE(20, ("GOMP_loop_end: T#%d\n", gtid))
639 
640 #if OMPT_SUPPORT && OMPT_OPTIONAL
641   ompt_frame_t *ompt_frame;
642   if (ompt_enabled.enabled) {
643     __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
644     ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
645     OMPT_STORE_RETURN_ADDRESS(gtid);
646   }
647 #endif
648   __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
649 #if OMPT_SUPPORT && OMPT_OPTIONAL
650   if (ompt_enabled.enabled) {
651     ompt_frame->enter_frame = NULL;
652   }
653 #endif
654 
655   KA_TRACE(20, ("GOMP_loop_end exit: T#%d\n", gtid))
656 }
657 
658 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END_NOWAIT)(void) {
659   KA_TRACE(20, ("GOMP_loop_end_nowait: T#%d\n", __kmp_get_gtid()))
660 }
661 
662 // Unsigned long long loop worksharing constructs
663 //
664 // These are new with gcc 4.4
665 
666 #define LOOP_START_ULL(func, schedule)                                         \
667   int func(int up, unsigned long long lb, unsigned long long ub,               \
668            unsigned long long str, unsigned long long chunk_sz,                \
669            unsigned long long *p_lb, unsigned long long *p_ub) {               \
670     int status;                                                                \
671     long long str2 = up ? ((long long)str) : -((long long)str);                \
672     long long stride;                                                          \
673     int gtid = __kmp_entry_gtid();                                             \
674     MKLOC(loc, #func);                                                         \
675                                                                                \
676     KA_TRACE(                                                                  \
677         20,                                                                    \
678         (#func                                                                 \
679          ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str 0x%llx, chunk_sz 0x%llx\n", \
680          gtid, up, lb, ub, str, chunk_sz));                                    \
681                                                                                \
682     if ((str > 0) ? (lb < ub) : (lb > ub)) {                                   \
683       KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb,                        \
684                             (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz,  \
685                             (schedule) != kmp_sch_static);                     \
686       status =                                                                 \
687           KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb,          \
688                                 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride);     \
689       if (status) {                                                            \
690         KMP_DEBUG_ASSERT(stride == str2);                                      \
691         *p_ub += (str > 0) ? 1 : -1;                                           \
692       }                                                                        \
693     } else {                                                                   \
694       status = 0;                                                              \
695     }                                                                          \
696                                                                                \
697     KA_TRACE(20,                                                               \
698              (#func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
699               gtid, *p_lb, *p_ub, status));                                    \
700     return status;                                                             \
701   }
702 
703 #define LOOP_RUNTIME_START_ULL(func, schedule)                                 \
704   int func(int up, unsigned long long lb, unsigned long long ub,               \
705            unsigned long long str, unsigned long long *p_lb,                   \
706            unsigned long long *p_ub) {                                         \
707     int status;                                                                \
708     long long str2 = up ? ((long long)str) : -((long long)str);                \
709     unsigned long long stride;                                                 \
710     unsigned long long chunk_sz = 0;                                           \
711     int gtid = __kmp_entry_gtid();                                             \
712     MKLOC(loc, #func);                                                         \
713                                                                                \
714     KA_TRACE(                                                                  \
715         20,                                                                    \
716         (#func                                                                 \
717          ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str 0x%llx, chunk_sz 0x%llx\n", \
718          gtid, up, lb, ub, str, chunk_sz));                                    \
719                                                                                \
720     if ((str > 0) ? (lb < ub) : (lb > ub)) {                                   \
721       KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb,                        \
722                             (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz,  \
723                             TRUE);                                             \
724       status =                                                                 \
725           KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb,          \
726                                 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride);     \
727       if (status) {                                                            \
728         KMP_DEBUG_ASSERT((long long)stride == str2);                           \
729         *p_ub += (str > 0) ? 1 : -1;                                           \
730       }                                                                        \
731     } else {                                                                   \
732       status = 0;                                                              \
733     }                                                                          \
734                                                                                \
735     KA_TRACE(20,                                                               \
736              (#func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
737               gtid, *p_lb, *p_ub, status));                                    \
738     return status;                                                             \
739   }
740 
741 #define LOOP_NEXT_ULL(func, fini_code)                                         \
742   int func(unsigned long long *p_lb, unsigned long long *p_ub) {               \
743     int status;                                                                \
744     long long stride;                                                          \
745     int gtid = __kmp_get_gtid();                                               \
746     MKLOC(loc, #func);                                                         \
747     KA_TRACE(20, (#func ": T#%d\n", gtid));                                    \
748                                                                                \
749     fini_code status =                                                         \
750         KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb,            \
751                               (kmp_uint64 *)p_ub, (kmp_int64 *)&stride);       \
752     if (status) {                                                              \
753       *p_ub += (stride > 0) ? 1 : -1;                                          \
754     }                                                                          \
755                                                                                \
756     KA_TRACE(20,                                                               \
757              (#func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, stride 0x%llx, " \
758                     "returning %d\n",                                          \
759               gtid, *p_lb, *p_ub, stride, status));                            \
760     return status;                                                             \
761   }
762 
763 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START),
764                kmp_sch_static)
765 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT), {})
766 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START),
767                kmp_sch_dynamic_chunked)
768 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT), {})
769 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START),
770                kmp_sch_guided_chunked)
771 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT), {})
772 LOOP_RUNTIME_START_ULL(
773     KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START), kmp_sch_runtime)
774 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT), {})
775 
776 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START),
777                kmp_ord_static)
778 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT),
779               { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
780 LOOP_START_ULL(
781     KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START),
782     kmp_ord_dynamic_chunked)
783 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT),
784               { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
785 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START),
786                kmp_ord_guided_chunked)
787 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT),
788               { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
789 LOOP_RUNTIME_START_ULL(
790     KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START),
791     kmp_ord_runtime)
792 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT),
793               { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
794 
795 // Combined parallel / loop worksharing constructs
796 //
797 // There are no ull versions (yet).
798 
799 #define PARALLEL_LOOP_START(func, schedule, ompt_pre, ompt_post)               \
800   void func(void (*task)(void *), void *data, unsigned num_threads, long lb,   \
801             long ub, long str, long chunk_sz) {                                \
802     int gtid = __kmp_entry_gtid();                                             \
803     MKLOC(loc, #func);                                                         \
804     KA_TRACE(20,                                                               \
805              (#func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
806               gtid, lb, ub, str, chunk_sz));                                   \
807                                                                                \
808     ompt_pre();                                                                \
809                                                                                \
810     if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {                       \
811       if (num_threads != 0) {                                                  \
812         __kmp_push_num_threads(&loc, gtid, num_threads);                       \
813       }                                                                        \
814       __kmp_GOMP_fork_call(&loc, gtid, task,                                   \
815                            (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, \
816                            9, task, data, num_threads, &loc, (schedule), lb,   \
817                            (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz);    \
818       IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid));                        \
819     } else {                                                                   \
820       __kmp_GOMP_serialized_parallel(&loc, gtid, task);                        \
821       IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid));                        \
822     }                                                                          \
823                                                                                \
824     KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb,                              \
825                       (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz,          \
826                       (schedule) != kmp_sch_static);                           \
827                                                                                \
828     ompt_post();                                                               \
829                                                                                \
830     KA_TRACE(20, (#func " exit: T#%d\n", gtid));                               \
831   }
832 
833 #if OMPT_SUPPORT && OMPT_OPTIONAL
834 
835 #define OMPT_LOOP_PRE()                                                        \
836   ompt_frame_t *parent_frame;                                                  \
837   if (ompt_enabled.enabled) {                                                  \
838     __ompt_get_task_info_internal(0, NULL, NULL, &parent_frame, NULL, NULL);   \
839     parent_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);                     \
840     OMPT_STORE_RETURN_ADDRESS(gtid);                                           \
841   }
842 
843 #define OMPT_LOOP_POST()                                                       \
844   if (ompt_enabled.enabled) {                                                  \
845     parent_frame->enter_frame = NULL;                                          \
846   }
847 
848 #else
849 
850 #define OMPT_LOOP_PRE()
851 
852 #define OMPT_LOOP_POST()
853 
854 #endif
855 
856 PARALLEL_LOOP_START(
857     KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START),
858     kmp_sch_static, OMPT_LOOP_PRE, OMPT_LOOP_POST)
859 PARALLEL_LOOP_START(
860     KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START),
861     kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
862 PARALLEL_LOOP_START(
863     KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START),
864     kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
865 PARALLEL_LOOP_START(
866     KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START),
867     kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST)
868 
869 // Tasking constructs
870 
871 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASK)(void (*func)(void *), void *data,
872                                              void (*copy_func)(void *, void *),
873                                              long arg_size, long arg_align,
874                                              bool if_cond, unsigned gomp_flags
875 #if OMP_40_ENABLED
876                                              ,
877                                              void **depend
878 #endif
879                                              ) {
880   MKLOC(loc, "GOMP_task");
881   int gtid = __kmp_entry_gtid();
882   kmp_int32 flags = 0;
883   kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags;
884 
885   KA_TRACE(20, ("GOMP_task: T#%d\n", gtid));
886 
887   // The low-order bit is the "untied" flag
888   if (!(gomp_flags & 1)) {
889     input_flags->tiedness = 1;
890   }
891   // The second low-order bit is the "final" flag
892   if (gomp_flags & 2) {
893     input_flags->final = 1;
894   }
895   input_flags->native = 1;
896   // __kmp_task_alloc() sets up all other flags
897 
898   if (!if_cond) {
899     arg_size = 0;
900   }
901 
902   kmp_task_t *task = __kmp_task_alloc(
903       &loc, gtid, input_flags, sizeof(kmp_task_t),
904       arg_size ? arg_size + arg_align - 1 : 0, (kmp_routine_entry_t)func);
905 
906   if (arg_size > 0) {
907     if (arg_align > 0) {
908       task->shareds = (void *)((((size_t)task->shareds) + arg_align - 1) /
909                                arg_align * arg_align);
910     }
911     // else error??
912 
913     if (copy_func) {
914       (*copy_func)(task->shareds, data);
915     } else {
916       KMP_MEMCPY(task->shareds, data, arg_size);
917     }
918   }
919 
920 #if OMPT_SUPPORT
921   kmp_taskdata_t *current_task;
922   if (ompt_enabled.enabled) {
923     OMPT_STORE_RETURN_ADDRESS(gtid);
924     current_task = __kmp_threads[gtid]->th.th_current_task;
925     current_task->ompt_task_info.frame.enter_frame = OMPT_GET_FRAME_ADDRESS(1);
926   }
927 #endif
928 
929   if (if_cond) {
930 #if OMP_40_ENABLED
931     if (gomp_flags & 8) {
932       KMP_ASSERT(depend);
933       const size_t ndeps = (kmp_intptr_t)depend[0];
934       const size_t nout = (kmp_intptr_t)depend[1];
935       kmp_depend_info_t dep_list[ndeps];
936 
937       for (size_t i = 0U; i < ndeps; i++) {
938         dep_list[i].base_addr = (kmp_intptr_t)depend[2U + i];
939         dep_list[i].len = 0U;
940         dep_list[i].flags.in = 1;
941         dep_list[i].flags.out = (i < nout);
942       }
943       __kmpc_omp_task_with_deps(&loc, gtid, task, ndeps, dep_list, 0, NULL);
944     } else {
945 #endif
946       __kmpc_omp_task(&loc, gtid, task);
947     }
948   } else {
949 #if OMPT_SUPPORT
950     ompt_thread_info_t oldInfo;
951     kmp_info_t *thread;
952     kmp_taskdata_t *taskdata;
953     kmp_taskdata_t *current_task;
954     if (ompt_enabled.enabled) {
955       // Store the threads states and restore them after the task
956       thread = __kmp_threads[gtid];
957       taskdata = KMP_TASK_TO_TASKDATA(task);
958       oldInfo = thread->th.ompt_thread_info;
959       thread->th.ompt_thread_info.wait_id = 0;
960       thread->th.ompt_thread_info.state = omp_state_work_parallel;
961       taskdata->ompt_task_info.frame.exit_frame = OMPT_GET_FRAME_ADDRESS(0);
962       OMPT_STORE_RETURN_ADDRESS(gtid);
963     }
964 #endif
965 
966     __kmpc_omp_task_begin_if0(&loc, gtid, task);
967     func(data);
968     __kmpc_omp_task_complete_if0(&loc, gtid, task);
969 
970 #if OMPT_SUPPORT
971     if (ompt_enabled.enabled) {
972       thread->th.ompt_thread_info = oldInfo;
973       taskdata->ompt_task_info.frame.exit_frame = NULL;
974     }
975 #endif
976   }
977 #if OMPT_SUPPORT
978   if (ompt_enabled.enabled) {
979     current_task->ompt_task_info.frame.enter_frame = NULL;
980   }
981 #endif
982 
983   KA_TRACE(20, ("GOMP_task exit: T#%d\n", gtid));
984 }
985 
986 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKWAIT)(void) {
987   MKLOC(loc, "GOMP_taskwait");
988   int gtid = __kmp_entry_gtid();
989 
990 #if OMPT_SUPPORT
991   if (ompt_enabled.enabled)
992     OMPT_STORE_RETURN_ADDRESS(gtid);
993 #endif
994 
995   KA_TRACE(20, ("GOMP_taskwait: T#%d\n", gtid));
996 
997   __kmpc_omp_taskwait(&loc, gtid);
998 
999   KA_TRACE(20, ("GOMP_taskwait exit: T#%d\n", gtid));
1000 }
1001 
1002 // Sections worksharing constructs
1003 //
1004 // For the sections construct, we initialize a dynamically scheduled loop
1005 // worksharing construct with lb 1 and stride 1, and use the iteration #'s
1006 // that its returns as sections ids.
1007 //
1008 // There are no special entry points for ordered sections, so we always use
1009 // the dynamically scheduled workshare, even if the sections aren't ordered.
1010 
1011 unsigned KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_START)(unsigned count) {
1012   int status;
1013   kmp_int lb, ub, stride;
1014   int gtid = __kmp_entry_gtid();
1015   MKLOC(loc, "GOMP_sections_start");
1016   KA_TRACE(20, ("GOMP_sections_start: T#%d\n", gtid));
1017 
1018   KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1019 
1020   status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
1021   if (status) {
1022     KMP_DEBUG_ASSERT(stride == 1);
1023     KMP_DEBUG_ASSERT(lb > 0);
1024     KMP_ASSERT(lb == ub);
1025   } else {
1026     lb = 0;
1027   }
1028 
1029   KA_TRACE(20, ("GOMP_sections_start exit: T#%d returning %u\n", gtid,
1030                 (unsigned)lb));
1031   return (unsigned)lb;
1032 }
1033 
1034 unsigned KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_NEXT)(void) {
1035   int status;
1036   kmp_int lb, ub, stride;
1037   int gtid = __kmp_get_gtid();
1038   MKLOC(loc, "GOMP_sections_next");
1039   KA_TRACE(20, ("GOMP_sections_next: T#%d\n", gtid));
1040 
1041 #if OMPT_SUPPORT
1042   OMPT_STORE_RETURN_ADDRESS(gtid);
1043 #endif
1044 
1045   status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
1046   if (status) {
1047     KMP_DEBUG_ASSERT(stride == 1);
1048     KMP_DEBUG_ASSERT(lb > 0);
1049     KMP_ASSERT(lb == ub);
1050   } else {
1051     lb = 0;
1052   }
1053 
1054   KA_TRACE(
1055       20, ("GOMP_sections_next exit: T#%d returning %u\n", gtid, (unsigned)lb));
1056   return (unsigned)lb;
1057 }
1058 
1059 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START)(
1060     void (*task)(void *), void *data, unsigned num_threads, unsigned count) {
1061   int gtid = __kmp_entry_gtid();
1062 
1063 #if OMPT_SUPPORT
1064   ompt_frame_t *parent_frame;
1065 
1066   if (ompt_enabled.enabled) {
1067     __ompt_get_task_info_internal(0, NULL, NULL, &parent_frame, NULL, NULL);
1068     parent_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
1069     OMPT_STORE_RETURN_ADDRESS(gtid);
1070   }
1071 #endif
1072 
1073   MKLOC(loc, "GOMP_parallel_sections_start");
1074   KA_TRACE(20, ("GOMP_parallel_sections_start: T#%d\n", gtid));
1075 
1076   if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1077     if (num_threads != 0) {
1078       __kmp_push_num_threads(&loc, gtid, num_threads);
1079     }
1080     __kmp_GOMP_fork_call(&loc, gtid, task,
1081                          (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9,
1082                          task, data, num_threads, &loc, kmp_nm_dynamic_chunked,
1083                          (kmp_int)1, (kmp_int)count, (kmp_int)1, (kmp_int)1);
1084   } else {
1085     __kmp_GOMP_serialized_parallel(&loc, gtid, task);
1086   }
1087 
1088 #if OMPT_SUPPORT
1089   if (ompt_enabled.enabled) {
1090     parent_frame->enter_frame = NULL;
1091   }
1092 #endif
1093 
1094   KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1095 
1096   KA_TRACE(20, ("GOMP_parallel_sections_start exit: T#%d\n", gtid));
1097 }
1098 
1099 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END)(void) {
1100   int gtid = __kmp_get_gtid();
1101   KA_TRACE(20, ("GOMP_sections_end: T#%d\n", gtid))
1102 
1103 #if OMPT_SUPPORT
1104   ompt_frame_t *ompt_frame;
1105   if (ompt_enabled.enabled) {
1106     __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
1107     ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
1108     OMPT_STORE_RETURN_ADDRESS(gtid);
1109   }
1110 #endif
1111   __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
1112 #if OMPT_SUPPORT
1113   if (ompt_enabled.enabled) {
1114     ompt_frame->enter_frame = NULL;
1115   }
1116 #endif
1117 
1118   KA_TRACE(20, ("GOMP_sections_end exit: T#%d\n", gtid))
1119 }
1120 
1121 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT)(void) {
1122   KA_TRACE(20, ("GOMP_sections_end_nowait: T#%d\n", __kmp_get_gtid()))
1123 }
1124 
1125 // libgomp has an empty function for GOMP_taskyield as of 2013-10-10
1126 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKYIELD)(void) {
1127   KA_TRACE(20, ("GOMP_taskyield: T#%d\n", __kmp_get_gtid()))
1128   return;
1129 }
1130 
1131 #if OMP_40_ENABLED // these are new GOMP_4.0 entry points
1132 
1133 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL)(void (*task)(void *),
1134                                                  void *data,
1135                                                  unsigned num_threads,
1136                                                  unsigned int flags) {
1137   int gtid = __kmp_entry_gtid();
1138   MKLOC(loc, "GOMP_parallel");
1139   KA_TRACE(20, ("GOMP_parallel: T#%d\n", gtid));
1140 
1141 #if OMPT_SUPPORT
1142   ompt_task_info_t *parent_task_info, *task_info;
1143   if (ompt_enabled.enabled) {
1144     parent_task_info = __ompt_get_task_info_object(0);
1145     parent_task_info->frame.enter_frame = OMPT_GET_FRAME_ADDRESS(1);
1146     OMPT_STORE_RETURN_ADDRESS(gtid);
1147   }
1148 #endif
1149   if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1150     if (num_threads != 0) {
1151       __kmp_push_num_threads(&loc, gtid, num_threads);
1152     }
1153     if (flags != 0) {
1154       __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags);
1155     }
1156     __kmp_GOMP_fork_call(&loc, gtid, task,
1157                          (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task,
1158                          data);
1159   } else {
1160     __kmp_GOMP_serialized_parallel(&loc, gtid, task);
1161   }
1162 #if OMPT_SUPPORT
1163   if (ompt_enabled.enabled) {
1164     task_info = __ompt_get_task_info_object(0);
1165     task_info->frame.exit_frame = OMPT_GET_FRAME_ADDRESS(0);
1166   }
1167 #endif
1168   task(data);
1169 #if OMPT_SUPPORT
1170   if (ompt_enabled.enabled) {
1171     OMPT_STORE_RETURN_ADDRESS(gtid);
1172   }
1173 #endif
1174   KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)();
1175 #if OMPT_SUPPORT
1176   if (ompt_enabled.enabled) {
1177     task_info->frame.exit_frame = NULL;
1178     parent_task_info->frame.enter_frame = NULL;
1179   }
1180 #endif
1181 }
1182 
1183 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_SECTIONS)(void (*task)(void *),
1184                                                           void *data,
1185                                                           unsigned num_threads,
1186                                                           unsigned count,
1187                                                           unsigned flags) {
1188   int gtid = __kmp_entry_gtid();
1189   MKLOC(loc, "GOMP_parallel_sections");
1190   KA_TRACE(20, ("GOMP_parallel_sections: T#%d\n", gtid));
1191 
1192 #if OMPT_SUPPORT
1193   OMPT_STORE_RETURN_ADDRESS(gtid);
1194 #endif
1195 
1196   if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1197     if (num_threads != 0) {
1198       __kmp_push_num_threads(&loc, gtid, num_threads);
1199     }
1200     if (flags != 0) {
1201       __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags);
1202     }
1203     __kmp_GOMP_fork_call(&loc, gtid, task,
1204                          (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9,
1205                          task, data, num_threads, &loc, kmp_nm_dynamic_chunked,
1206                          (kmp_int)1, (kmp_int)count, (kmp_int)1, (kmp_int)1);
1207   } else {
1208     __kmp_GOMP_serialized_parallel(&loc, gtid, task);
1209   }
1210 
1211 #if OMPT_SUPPORT
1212   OMPT_STORE_RETURN_ADDRESS(gtid);
1213 #endif
1214 
1215   KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1216 
1217   task(data);
1218   KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)();
1219   KA_TRACE(20, ("GOMP_parallel_sections exit: T#%d\n", gtid));
1220 }
1221 
1222 #define PARALLEL_LOOP(func, schedule, ompt_pre, ompt_post)                     \
1223   void func(void (*task)(void *), void *data, unsigned num_threads, long lb,   \
1224             long ub, long str, long chunk_sz, unsigned flags) {                \
1225     int gtid = __kmp_entry_gtid();                                             \
1226     MKLOC(loc, #func);                                                         \
1227     KA_TRACE(20,                                                               \
1228              (#func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
1229               gtid, lb, ub, str, chunk_sz));                                   \
1230                                                                                \
1231     ompt_pre();                                                                \
1232     if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {                       \
1233       if (num_threads != 0) {                                                  \
1234         __kmp_push_num_threads(&loc, gtid, num_threads);                       \
1235       }                                                                        \
1236       if (flags != 0) {                                                        \
1237         __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags);              \
1238       }                                                                        \
1239       __kmp_GOMP_fork_call(&loc, gtid, task,                                   \
1240                            (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, \
1241                            9, task, data, num_threads, &loc, (schedule), lb,   \
1242                            (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz);    \
1243     } else {                                                                   \
1244       __kmp_GOMP_serialized_parallel(&loc, gtid, task);                        \
1245     }                                                                          \
1246                                                                                \
1247     IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);)                          \
1248     KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb,                              \
1249                       (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz,          \
1250                       (schedule) != kmp_sch_static);                           \
1251     task(data);                                                                \
1252     KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)();                         \
1253     ompt_post();                                                               \
1254                                                                                \
1255     KA_TRACE(20, (#func " exit: T#%d\n", gtid));                               \
1256   }
1257 
1258 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC),
1259               kmp_sch_static, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1260 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC),
1261               kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1262 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED),
1263               kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1264 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME),
1265               kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1266 
1267 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKGROUP_START)(void) {
1268   int gtid = __kmp_entry_gtid();
1269   MKLOC(loc, "GOMP_taskgroup_start");
1270   KA_TRACE(20, ("GOMP_taskgroup_start: T#%d\n", gtid));
1271 
1272 #if OMPT_SUPPORT
1273   if (ompt_enabled.enabled)
1274     OMPT_STORE_RETURN_ADDRESS(gtid);
1275 #endif
1276 
1277   __kmpc_taskgroup(&loc, gtid);
1278 
1279   return;
1280 }
1281 
1282 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKGROUP_END)(void) {
1283   int gtid = __kmp_get_gtid();
1284   MKLOC(loc, "GOMP_taskgroup_end");
1285   KA_TRACE(20, ("GOMP_taskgroup_end: T#%d\n", gtid));
1286 
1287 #if OMPT_SUPPORT
1288   if (ompt_enabled.enabled)
1289     OMPT_STORE_RETURN_ADDRESS(gtid);
1290 #endif
1291 
1292   __kmpc_end_taskgroup(&loc, gtid);
1293 
1294   return;
1295 }
1296 
1297 #ifndef KMP_DEBUG
1298 static
1299 #endif /* KMP_DEBUG */
1300     kmp_int32
1301     __kmp_gomp_to_omp_cancellation_kind(int gomp_kind) {
1302   kmp_int32 cncl_kind = 0;
1303   switch (gomp_kind) {
1304   case 1:
1305     cncl_kind = cancel_parallel;
1306     break;
1307   case 2:
1308     cncl_kind = cancel_loop;
1309     break;
1310   case 4:
1311     cncl_kind = cancel_sections;
1312     break;
1313   case 8:
1314     cncl_kind = cancel_taskgroup;
1315     break;
1316   }
1317   return cncl_kind;
1318 }
1319 
1320 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CANCELLATION_POINT)(int which) {
1321   if (__kmp_omp_cancellation) {
1322     KMP_FATAL(NoGompCancellation);
1323   }
1324   int gtid = __kmp_get_gtid();
1325   MKLOC(loc, "GOMP_cancellation_point");
1326   KA_TRACE(20, ("GOMP_cancellation_point: T#%d\n", gtid));
1327 
1328   kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which);
1329 
1330   return __kmpc_cancellationpoint(&loc, gtid, cncl_kind);
1331 }
1332 
1333 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_BARRIER_CANCEL)(void) {
1334   if (__kmp_omp_cancellation) {
1335     KMP_FATAL(NoGompCancellation);
1336   }
1337   KMP_FATAL(NoGompCancellation);
1338   int gtid = __kmp_get_gtid();
1339   MKLOC(loc, "GOMP_barrier_cancel");
1340   KA_TRACE(20, ("GOMP_barrier_cancel: T#%d\n", gtid));
1341 
1342   return __kmpc_cancel_barrier(&loc, gtid);
1343 }
1344 
1345 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CANCEL)(int which, bool do_cancel) {
1346   if (__kmp_omp_cancellation) {
1347     KMP_FATAL(NoGompCancellation);
1348   } else {
1349     return FALSE;
1350   }
1351 
1352   int gtid = __kmp_get_gtid();
1353   MKLOC(loc, "GOMP_cancel");
1354   KA_TRACE(20, ("GOMP_cancel: T#%d\n", gtid));
1355 
1356   kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which);
1357 
1358   if (do_cancel == FALSE) {
1359     return KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CANCELLATION_POINT)(which);
1360   } else {
1361     return __kmpc_cancel(&loc, gtid, cncl_kind);
1362   }
1363 }
1364 
1365 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL)(void) {
1366   if (__kmp_omp_cancellation) {
1367     KMP_FATAL(NoGompCancellation);
1368   }
1369   int gtid = __kmp_get_gtid();
1370   MKLOC(loc, "GOMP_sections_end_cancel");
1371   KA_TRACE(20, ("GOMP_sections_end_cancel: T#%d\n", gtid));
1372 
1373   return __kmpc_cancel_barrier(&loc, gtid);
1374 }
1375 
1376 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END_CANCEL)(void) {
1377   if (__kmp_omp_cancellation) {
1378     KMP_FATAL(NoGompCancellation);
1379   }
1380   int gtid = __kmp_get_gtid();
1381   MKLOC(loc, "GOMP_loop_end_cancel");
1382   KA_TRACE(20, ("GOMP_loop_end_cancel: T#%d\n", gtid));
1383 
1384   return __kmpc_cancel_barrier(&loc, gtid);
1385 }
1386 
1387 // All target functions are empty as of 2014-05-29
1388 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET)(int device, void (*fn)(void *),
1389                                                const void *openmp_target,
1390                                                size_t mapnum, void **hostaddrs,
1391                                                size_t *sizes,
1392                                                unsigned char *kinds) {
1393   return;
1394 }
1395 
1396 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_DATA)(
1397     int device, const void *openmp_target, size_t mapnum, void **hostaddrs,
1398     size_t *sizes, unsigned char *kinds) {
1399   return;
1400 }
1401 
1402 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_END_DATA)(void) { return; }
1403 
1404 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_UPDATE)(
1405     int device, const void *openmp_target, size_t mapnum, void **hostaddrs,
1406     size_t *sizes, unsigned char *kinds) {
1407   return;
1408 }
1409 
1410 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TEAMS)(unsigned int num_teams,
1411                                               unsigned int thread_limit) {
1412   return;
1413 }
1414 #endif // OMP_40_ENABLED
1415 
1416 /* The following sections of code create aliases for the GOMP_* functions, then
1417    create versioned symbols using the assembler directive .symver. This is only
1418    pertinent for ELF .so library. The KMP_VERSION_SYMBOL macro is defined in
1419    kmp_os.h  */
1420 
1421 #ifdef KMP_USE_VERSION_SYMBOLS
1422 // GOMP_1.0 versioned symbols
1423 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ATOMIC_END, 10, "GOMP_1.0");
1424 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ATOMIC_START, 10, "GOMP_1.0");
1425 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_BARRIER, 10, "GOMP_1.0");
1426 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_END, 10, "GOMP_1.0");
1427 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10, "GOMP_1.0");
1428 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10, "GOMP_1.0");
1429 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_START, 10, "GOMP_1.0");
1430 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10, "GOMP_1.0");
1431 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10, "GOMP_1.0");
1432 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END, 10, "GOMP_1.0");
1433 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10, "GOMP_1.0");
1434 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10, "GOMP_1.0");
1435 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10, "GOMP_1.0");
1436 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10, "GOMP_1.0");
1437 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10,
1438                    "GOMP_1.0");
1439 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10, "GOMP_1.0");
1440 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10, "GOMP_1.0");
1441 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10, "GOMP_1.0");
1442 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10,
1443                    "GOMP_1.0");
1444 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10, "GOMP_1.0");
1445 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10, "GOMP_1.0");
1446 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10, "GOMP_1.0");
1447 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10, "GOMP_1.0");
1448 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10, "GOMP_1.0");
1449 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10, "GOMP_1.0");
1450 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ORDERED_END, 10, "GOMP_1.0");
1451 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ORDERED_START, 10, "GOMP_1.0");
1452 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_END, 10, "GOMP_1.0");
1453 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10,
1454                    "GOMP_1.0");
1455 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10,
1456                    "GOMP_1.0");
1457 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10,
1458                    "GOMP_1.0");
1459 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10,
1460                    "GOMP_1.0");
1461 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10, "GOMP_1.0");
1462 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_START, 10, "GOMP_1.0");
1463 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END, 10, "GOMP_1.0");
1464 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10, "GOMP_1.0");
1465 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10, "GOMP_1.0");
1466 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_START, 10, "GOMP_1.0");
1467 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10, "GOMP_1.0");
1468 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10, "GOMP_1.0");
1469 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_START, 10, "GOMP_1.0");
1470 
1471 // GOMP_2.0 versioned symbols
1472 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASK, 20, "GOMP_2.0");
1473 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKWAIT, 20, "GOMP_2.0");
1474 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20, "GOMP_2.0");
1475 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20, "GOMP_2.0");
1476 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20, "GOMP_2.0");
1477 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20, "GOMP_2.0");
1478 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20,
1479                    "GOMP_2.0");
1480 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20,
1481                    "GOMP_2.0");
1482 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20,
1483                    "GOMP_2.0");
1484 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20,
1485                    "GOMP_2.0");
1486 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20,
1487                    "GOMP_2.0");
1488 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20,
1489                    "GOMP_2.0");
1490 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20,
1491                    "GOMP_2.0");
1492 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20,
1493                    "GOMP_2.0");
1494 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20, "GOMP_2.0");
1495 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20, "GOMP_2.0");
1496 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20, "GOMP_2.0");
1497 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20, "GOMP_2.0");
1498 
1499 // GOMP_3.0 versioned symbols
1500 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKYIELD, 30, "GOMP_3.0");
1501 
1502 // GOMP_4.0 versioned symbols
1503 #if OMP_40_ENABLED
1504 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL, 40, "GOMP_4.0");
1505 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_SECTIONS, 40, "GOMP_4.0");
1506 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC, 40, "GOMP_4.0");
1507 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED, 40, "GOMP_4.0");
1508 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME, 40, "GOMP_4.0");
1509 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC, 40, "GOMP_4.0");
1510 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKGROUP_START, 40, "GOMP_4.0");
1511 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKGROUP_END, 40, "GOMP_4.0");
1512 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_BARRIER_CANCEL, 40, "GOMP_4.0");
1513 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CANCEL, 40, "GOMP_4.0");
1514 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CANCELLATION_POINT, 40, "GOMP_4.0");
1515 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END_CANCEL, 40, "GOMP_4.0");
1516 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL, 40, "GOMP_4.0");
1517 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET, 40, "GOMP_4.0");
1518 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_DATA, 40, "GOMP_4.0");
1519 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_END_DATA, 40, "GOMP_4.0");
1520 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_UPDATE, 40, "GOMP_4.0");
1521 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TEAMS, 40, "GOMP_4.0");
1522 #endif
1523 
1524 #endif // KMP_USE_VERSION_SYMBOLS
1525 
1526 #ifdef __cplusplus
1527 } // extern "C"
1528 #endif // __cplusplus
1529