1 /*
2  * kmp_error.cpp -- KPTS functions for error checking at runtime
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 //                     The LLVM Compiler Infrastructure
8 //
9 // This file is dual licensed under the MIT and the University of Illinois Open
10 // Source Licenses. See LICENSE.txt for details.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "kmp.h"
15 #include "kmp_error.h"
16 #include "kmp_i18n.h"
17 #include "kmp_str.h"
18 
19 /* ------------------------------------------------------------------------ */
20 
21 #define MIN_STACK 100
22 
23 static char const *cons_text_c[] = {
24     "(none)", "\"parallel\"", "work-sharing", /* this is not called "for"
25                                                  because of lowering of
26                                                  "sections" pragmas */
27     "\"ordered\" work-sharing", /* this is not called "for ordered" because of
28                                    lowering of "sections" pragmas */
29     "\"sections\"",
30     "work-sharing", /* this is not called "single" because of lowering of
31                        "sections" pragmas */
32     "\"taskq\"", "\"taskq\"", "\"taskq ordered\"", "\"critical\"",
33     "\"ordered\"", /* in PARALLEL */
34     "\"ordered\"", /* in PDO */
35     "\"ordered\"", /* in TASKQ */
36     "\"master\"", "\"reduce\"", "\"barrier\""};
37 
38 #define get_src(ident) ((ident) == NULL ? NULL : (ident)->psource)
39 
40 #define PUSH_MSG(ct, ident)                                                    \
41   "\tpushing on stack: %s (%s)\n", cons_text_c[(ct)], get_src((ident))
42 #define POP_MSG(p)                                                             \
43   "\tpopping off stack: %s (%s)\n", cons_text_c[(p)->stack_data[tos].type],    \
44       get_src((p)->stack_data[tos].ident)
45 
46 static int const cons_text_c_num = sizeof(cons_text_c) / sizeof(char const *);
47 
48 /* --------------- START OF STATIC LOCAL ROUTINES ------------------------- */
49 
__kmp_check_null_func(void)50 static void __kmp_check_null_func(void) { /* nothing to do */
51 }
52 
__kmp_expand_cons_stack(int gtid,struct cons_header * p)53 static void __kmp_expand_cons_stack(int gtid, struct cons_header *p) {
54   int i;
55   struct cons_data *d;
56 
57   /* TODO for monitor perhaps? */
58   if (gtid < 0)
59     __kmp_check_null_func();
60 
61   KE_TRACE(10, ("expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid()));
62 
63   d = p->stack_data;
64 
65   p->stack_size = (p->stack_size * 2) + 100;
66 
67   /* TODO free the old data */
68   p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) *
69                                                      (p->stack_size + 1));
70 
71   for (i = p->stack_top; i >= 0; --i)
72     p->stack_data[i] = d[i];
73 
74   /* NOTE: we do not free the old stack_data */
75 }
76 
77 // NOTE: Function returns allocated memory, caller must free it!
__kmp_pragma(int ct,ident_t const * ident)78 static char *__kmp_pragma(int ct, ident_t const *ident) {
79   char const *cons = NULL; // Construct name.
80   char *file = NULL; // File name.
81   char *func = NULL; // Function (routine) name.
82   char *line = NULL; // Line number.
83   kmp_str_buf_t buffer;
84   kmp_msg_t prgm;
85   __kmp_str_buf_init(&buffer);
86   if (0 < ct && ct < cons_text_c_num) {
87     cons = cons_text_c[ct];
88   } else {
89     KMP_DEBUG_ASSERT(0);
90   }
91   if (ident != NULL && ident->psource != NULL) {
92     char *tail = NULL;
93     __kmp_str_buf_print(&buffer, "%s",
94                         ident->psource); // Copy source to buffer.
95     // Split string in buffer to file, func, and line.
96     tail = buffer.str;
97     __kmp_str_split(tail, ';', NULL, &tail);
98     __kmp_str_split(tail, ';', &file, &tail);
99     __kmp_str_split(tail, ';', &func, &tail);
100     __kmp_str_split(tail, ';', &line, &tail);
101   }
102   prgm = __kmp_msg_format(kmp_i18n_fmt_Pragma, cons, file, func, line);
103   __kmp_str_buf_free(&buffer);
104   return prgm.str;
105 } // __kmp_pragma
106 
107 /* ----------------- END OF STATIC LOCAL ROUTINES ------------------------- */
108 
__kmp_error_construct(kmp_i18n_id_t id,enum cons_type ct,ident_t const * ident)109 void __kmp_error_construct(kmp_i18n_id_t id, // Message identifier.
110                            enum cons_type ct, // Construct type.
111                            ident_t const *ident // Construct ident.
112                            ) {
113   char *construct = __kmp_pragma(ct, ident);
114   __kmp_fatal(__kmp_msg_format(id, construct), __kmp_msg_null);
115   KMP_INTERNAL_FREE(construct);
116 }
117 
__kmp_error_construct2(kmp_i18n_id_t id,enum cons_type ct,ident_t const * ident,struct cons_data const * cons)118 void __kmp_error_construct2(kmp_i18n_id_t id, // Message identifier.
119                             enum cons_type ct, // First construct type.
120                             ident_t const *ident, // First construct ident.
121                             struct cons_data const *cons // Second construct.
122                             ) {
123   char *construct1 = __kmp_pragma(ct, ident);
124   char *construct2 = __kmp_pragma(cons->type, cons->ident);
125   __kmp_fatal(__kmp_msg_format(id, construct1, construct2), __kmp_msg_null);
126   KMP_INTERNAL_FREE(construct1);
127   KMP_INTERNAL_FREE(construct2);
128 }
129 
__kmp_allocate_cons_stack(int gtid)130 struct cons_header *__kmp_allocate_cons_stack(int gtid) {
131   struct cons_header *p;
132 
133   /* TODO for monitor perhaps? */
134   if (gtid < 0) {
135     __kmp_check_null_func();
136   }
137   KE_TRACE(10, ("allocate cons_stack (%d)\n", gtid));
138   p = (struct cons_header *)__kmp_allocate(sizeof(struct cons_header));
139   p->p_top = p->w_top = p->s_top = 0;
140   p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) *
141                                                      (MIN_STACK + 1));
142   p->stack_size = MIN_STACK;
143   p->stack_top = 0;
144   p->stack_data[0].type = ct_none;
145   p->stack_data[0].prev = 0;
146   p->stack_data[0].ident = NULL;
147   return p;
148 }
149 
__kmp_free_cons_stack(void * ptr)150 void __kmp_free_cons_stack(void *ptr) {
151   struct cons_header *p = (struct cons_header *)ptr;
152   if (p != NULL) {
153     if (p->stack_data != NULL) {
154       __kmp_free(p->stack_data);
155       p->stack_data = NULL;
156     }
157     __kmp_free(p);
158   }
159 }
160 
161 #if KMP_DEBUG
dump_cons_stack(int gtid,struct cons_header * p)162 static void dump_cons_stack(int gtid, struct cons_header *p) {
163   int i;
164   int tos = p->stack_top;
165   kmp_str_buf_t buffer;
166   __kmp_str_buf_init(&buffer);
167   __kmp_str_buf_print(
168       &buffer,
169       "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
170   __kmp_str_buf_print(&buffer,
171                       "Begin construct stack with %d items for thread %d\n",
172                       tos, gtid);
173   __kmp_str_buf_print(&buffer, "     stack_top=%d { P=%d, W=%d, S=%d }\n", tos,
174                       p->p_top, p->w_top, p->s_top);
175   for (i = tos; i > 0; i--) {
176     struct cons_data *c = &(p->stack_data[i]);
177     __kmp_str_buf_print(
178         &buffer, "        stack_data[%2d] = { %s (%s) %d %p }\n", i,
179         cons_text_c[c->type], get_src(c->ident), c->prev, c->name);
180   }
181   __kmp_str_buf_print(&buffer, "End construct stack for thread %d\n", gtid);
182   __kmp_str_buf_print(
183       &buffer,
184       "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
185   __kmp_debug_printf("%s", buffer.str);
186   __kmp_str_buf_free(&buffer);
187 }
188 #endif
189 
__kmp_push_parallel(int gtid,ident_t const * ident)190 void __kmp_push_parallel(int gtid, ident_t const *ident) {
191   int tos;
192   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
193 
194   KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
195   KE_TRACE(10, ("__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
196   KE_TRACE(100, (PUSH_MSG(ct_parallel, ident)));
197   if (p->stack_top >= p->stack_size) {
198     __kmp_expand_cons_stack(gtid, p);
199   }
200   tos = ++p->stack_top;
201   p->stack_data[tos].type = ct_parallel;
202   p->stack_data[tos].prev = p->p_top;
203   p->stack_data[tos].ident = ident;
204   p->stack_data[tos].name = NULL;
205   p->p_top = tos;
206   KE_DUMP(1000, dump_cons_stack(gtid, p));
207 }
208 
__kmp_check_workshare(int gtid,enum cons_type ct,ident_t const * ident)209 void __kmp_check_workshare(int gtid, enum cons_type ct, ident_t const *ident) {
210   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
211 
212   KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
213   KE_TRACE(10, ("__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
214 
215   if (p->stack_top >= p->stack_size) {
216     __kmp_expand_cons_stack(gtid, p);
217   }
218   if (p->w_top > p->p_top &&
219       !(IS_CONS_TYPE_TASKQ(p->stack_data[p->w_top].type) &&
220         IS_CONS_TYPE_TASKQ(ct))) {
221     // We are already in a WORKSHARE construct for this PARALLEL region.
222     __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
223                            &p->stack_data[p->w_top]);
224   }
225   if (p->s_top > p->p_top) {
226     // We are already in a SYNC construct for this PARALLEL region.
227     __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
228                            &p->stack_data[p->s_top]);
229   }
230 }
231 
__kmp_push_workshare(int gtid,enum cons_type ct,ident_t const * ident)232 void __kmp_push_workshare(int gtid, enum cons_type ct, ident_t const *ident) {
233   int tos;
234   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
235   KE_TRACE(10, ("__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
236   __kmp_check_workshare(gtid, ct, ident);
237   KE_TRACE(100, (PUSH_MSG(ct, ident)));
238   tos = ++p->stack_top;
239   p->stack_data[tos].type = ct;
240   p->stack_data[tos].prev = p->w_top;
241   p->stack_data[tos].ident = ident;
242   p->stack_data[tos].name = NULL;
243   p->w_top = tos;
244   KE_DUMP(1000, dump_cons_stack(gtid, p));
245 }
246 
247 void
248 #if KMP_USE_DYNAMIC_LOCK
__kmp_check_sync(int gtid,enum cons_type ct,ident_t const * ident,kmp_user_lock_p lck,kmp_uint32 seq)249 __kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
250 #else
251 __kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
252 #endif
253 {
254   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
255 
256   KE_TRACE(10, ("__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid()));
257 
258   if (p->stack_top >= p->stack_size)
259     __kmp_expand_cons_stack(gtid, p);
260 
261   if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo ||
262       ct == ct_ordered_in_taskq) {
263     if (p->w_top <= p->p_top) {
264 /* we are not in a worksharing construct */
265 #ifdef BUILD_PARALLEL_ORDERED
266       /* do not report error messages for PARALLEL ORDERED */
267       KMP_ASSERT(ct == ct_ordered_in_parallel);
268 #else
269       __kmp_error_construct(kmp_i18n_msg_CnsBoundToWorksharing, ct, ident);
270 #endif /* BUILD_PARALLEL_ORDERED */
271     } else {
272       /* inside a WORKSHARING construct for this PARALLEL region */
273       if (!IS_CONS_TYPE_ORDERED(p->stack_data[p->w_top].type)) {
274         if (p->stack_data[p->w_top].type == ct_taskq) {
275           __kmp_error_construct2(kmp_i18n_msg_CnsNotInTaskConstruct, ct, ident,
276                                  &p->stack_data[p->w_top]);
277         } else {
278           __kmp_error_construct2(kmp_i18n_msg_CnsNoOrderedClause, ct, ident,
279                                  &p->stack_data[p->w_top]);
280         }
281       }
282     }
283     if (p->s_top > p->p_top && p->s_top > p->w_top) {
284       /* inside a sync construct which is inside a worksharing construct */
285       int index = p->s_top;
286       enum cons_type stack_type;
287 
288       stack_type = p->stack_data[index].type;
289 
290       if (stack_type == ct_critical ||
291           ((stack_type == ct_ordered_in_parallel ||
292             stack_type == ct_ordered_in_pdo ||
293             stack_type ==
294                 ct_ordered_in_taskq) && /* C doesn't allow named ordered;
295                                            ordered in ordered gets error */
296            p->stack_data[index].ident != NULL &&
297            (p->stack_data[index].ident->flags & KMP_IDENT_KMPC))) {
298         /* we are in ORDERED which is inside an ORDERED or CRITICAL construct */
299         __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
300                                &p->stack_data[index]);
301       }
302     }
303   } else if (ct == ct_critical) {
304 #if KMP_USE_DYNAMIC_LOCK
305     if (lck != NULL &&
306         __kmp_get_user_lock_owner(lck, seq) ==
307             gtid) { /* this thread already has lock for this critical section */
308 #else
309     if (lck != NULL &&
310         __kmp_get_user_lock_owner(lck) ==
311             gtid) { /* this thread already has lock for this critical section */
312 #endif
313       int index = p->s_top;
314       struct cons_data cons = {NULL, ct_critical, 0, NULL};
315       /* walk up construct stack and try to find critical with matching name */
316       while (index != 0 && p->stack_data[index].name != lck) {
317         index = p->stack_data[index].prev;
318       }
319       if (index != 0) {
320         /* found match on the stack (may not always because of interleaved
321          * critical for Fortran) */
322         cons = p->stack_data[index];
323       }
324       /* we are in CRITICAL which is inside a CRITICAL construct of same name */
325       __kmp_error_construct2(kmp_i18n_msg_CnsNestingSameName, ct, ident, &cons);
326     }
327   } else if (ct == ct_master || ct == ct_reduce) {
328     if (p->w_top > p->p_top) {
329       /* inside a WORKSHARING construct for this PARALLEL region */
330       __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
331                              &p->stack_data[p->w_top]);
332     }
333     if (ct == ct_reduce && p->s_top > p->p_top) {
334       /* inside a another SYNC construct for this PARALLEL region */
335       __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
336                              &p->stack_data[p->s_top]);
337     }
338   }
339 }
340 
341 void
342 #if KMP_USE_DYNAMIC_LOCK
343 __kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
344 #else
345 __kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
346 #endif
347 {
348   int tos;
349   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
350 
351   KMP_ASSERT(gtid == __kmp_get_gtid());
352   KE_TRACE(10, ("__kmp_push_sync (gtid=%d)\n", gtid));
353 #if KMP_USE_DYNAMIC_LOCK
354   __kmp_check_sync(gtid, ct, ident, lck, seq);
355 #else
356   __kmp_check_sync(gtid, ct, ident, lck);
357 #endif
358   KE_TRACE(100, (PUSH_MSG(ct, ident)));
359   tos = ++p->stack_top;
360   p->stack_data[tos].type = ct;
361   p->stack_data[tos].prev = p->s_top;
362   p->stack_data[tos].ident = ident;
363   p->stack_data[tos].name = lck;
364   p->s_top = tos;
365   KE_DUMP(1000, dump_cons_stack(gtid, p));
366 }
367 
368 /* ------------------------------------------------------------------------ */
369 
370 void __kmp_pop_parallel(int gtid, ident_t const *ident) {
371   int tos;
372   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
373   tos = p->stack_top;
374   KE_TRACE(10, ("__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
375   if (tos == 0 || p->p_top == 0) {
376     __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident);
377   }
378   if (tos != p->p_top || p->stack_data[tos].type != ct_parallel) {
379     __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct_parallel, ident,
380                            &p->stack_data[tos]);
381   }
382   KE_TRACE(100, (POP_MSG(p)));
383   p->p_top = p->stack_data[tos].prev;
384   p->stack_data[tos].type = ct_none;
385   p->stack_data[tos].ident = NULL;
386   p->stack_top = tos - 1;
387   KE_DUMP(1000, dump_cons_stack(gtid, p));
388 }
389 
390 enum cons_type __kmp_pop_workshare(int gtid, enum cons_type ct,
391                                    ident_t const *ident) {
392   int tos;
393   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
394 
395   tos = p->stack_top;
396   KE_TRACE(10, ("__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
397   if (tos == 0 || p->w_top == 0) {
398     __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
399   }
400 
401   if (tos != p->w_top ||
402       (p->stack_data[tos].type != ct &&
403        // below are two exceptions to the rule that construct types must match
404        !(p->stack_data[tos].type == ct_pdo_ordered && ct == ct_pdo) &&
405        !(p->stack_data[tos].type == ct_task_ordered && ct == ct_task))) {
406     __kmp_check_null_func();
407     __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
408                            &p->stack_data[tos]);
409   }
410   KE_TRACE(100, (POP_MSG(p)));
411   p->w_top = p->stack_data[tos].prev;
412   p->stack_data[tos].type = ct_none;
413   p->stack_data[tos].ident = NULL;
414   p->stack_top = tos - 1;
415   KE_DUMP(1000, dump_cons_stack(gtid, p));
416   return p->stack_data[p->w_top].type;
417 }
418 
419 void __kmp_pop_sync(int gtid, enum cons_type ct, ident_t const *ident) {
420   int tos;
421   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
422   tos = p->stack_top;
423   KE_TRACE(10, ("__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid()));
424   if (tos == 0 || p->s_top == 0) {
425     __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
426   }
427   if (tos != p->s_top || p->stack_data[tos].type != ct) {
428     __kmp_check_null_func();
429     __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
430                            &p->stack_data[tos]);
431   }
432   if (gtid < 0) {
433     __kmp_check_null_func();
434   }
435   KE_TRACE(100, (POP_MSG(p)));
436   p->s_top = p->stack_data[tos].prev;
437   p->stack_data[tos].type = ct_none;
438   p->stack_data[tos].ident = NULL;
439   p->stack_top = tos - 1;
440   KE_DUMP(1000, dump_cons_stack(gtid, p));
441 }
442 
443 /* ------------------------------------------------------------------------ */
444 
445 void __kmp_check_barrier(int gtid, enum cons_type ct, ident_t const *ident) {
446   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
447   KE_TRACE(10, ("__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid,
448                 __kmp_get_gtid()));
449   if (ident != 0) {
450     __kmp_check_null_func();
451   }
452   if (p->w_top > p->p_top) {
453     /* we are already in a WORKSHARING construct for this PARALLEL region */
454     __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
455                            &p->stack_data[p->w_top]);
456   }
457   if (p->s_top > p->p_top) {
458     /* we are already in a SYNC construct for this PARALLEL region */
459     __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
460                            &p->stack_data[p->s_top]);
461   }
462 }
463