1 /*
2  * kmp_error.cpp -- KPTS functions for error checking at runtime
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_error.h"
15 #include "kmp_i18n.h"
16 #include "kmp_str.h"
17 
18 /* ------------------------------------------------------------------------ */
19 
20 #define MIN_STACK 100
21 
22 static char const *cons_text_c[] = {
23     "(none)",
24     "\"parallel\"",
25     "work-sharing", /* this is not called "for"
26                        because of lowering of
27                        "sections" pragmas */
28     "\"ordered\" work-sharing", /* this is not called "for ordered" because of
29                                    lowering of "sections" pragmas */
30     "\"sections\"",
31     "work-sharing", /* this is not called "single" because of lowering of
32                        "sections" pragmas */
33     "\"critical\"",
34     "\"ordered\"", /* in PARALLEL */
35     "\"ordered\"", /* in PDO */
36     "\"master\"",
37     "\"reduce\"",
38     "\"barrier\""};
39 
40 #define get_src(ident) ((ident) == NULL ? NULL : (ident)->psource)
41 
42 #define PUSH_MSG(ct, ident)                                                    \
43   "\tpushing on stack: %s (%s)\n", cons_text_c[(ct)], get_src((ident))
44 #define POP_MSG(p)                                                             \
45   "\tpopping off stack: %s (%s)\n", cons_text_c[(p)->stack_data[tos].type],    \
46       get_src((p)->stack_data[tos].ident)
47 
48 static int const cons_text_c_num = sizeof(cons_text_c) / sizeof(char const *);
49 
50 /* --------------- START OF STATIC LOCAL ROUTINES ------------------------- */
51 
52 static void __kmp_check_null_func(void) { /* nothing to do */
53 }
54 
55 static void __kmp_expand_cons_stack(int gtid, struct cons_header *p) {
56   int i;
57   struct cons_data *d;
58 
59   /* TODO for monitor perhaps? */
60   if (gtid < 0)
61     __kmp_check_null_func();
62 
63   KE_TRACE(10, ("expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid()));
64 
65   d = p->stack_data;
66 
67   p->stack_size = (p->stack_size * 2) + 100;
68 
69   /* TODO free the old data */
70   p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) *
71                                                      (p->stack_size + 1));
72 
73   for (i = p->stack_top; i >= 0; --i)
74     p->stack_data[i] = d[i];
75 
76   /* NOTE: we do not free the old stack_data */
77 }
78 
79 // NOTE: Function returns allocated memory, caller must free it!
80 static char *__kmp_pragma(int ct, ident_t const *ident) {
81   char const *cons = NULL; // Construct name.
82   char *file = NULL; // File name.
83   char *func = NULL; // Function (routine) name.
84   char *line = NULL; // Line number.
85   kmp_str_buf_t buffer;
86   kmp_msg_t prgm;
87   __kmp_str_buf_init(&buffer);
88   if (0 < ct && ct < cons_text_c_num) {
89     cons = cons_text_c[ct];
90   } else {
91     KMP_DEBUG_ASSERT(0);
92   }
93   if (ident != NULL && ident->psource != NULL) {
94     char *tail = NULL;
95     __kmp_str_buf_print(&buffer, "%s",
96                         ident->psource); // Copy source to buffer.
97     // Split string in buffer to file, func, and line.
98     tail = buffer.str;
99     __kmp_str_split(tail, ';', NULL, &tail);
100     __kmp_str_split(tail, ';', &file, &tail);
101     __kmp_str_split(tail, ';', &func, &tail);
102     __kmp_str_split(tail, ';', &line, &tail);
103   }
104   prgm = __kmp_msg_format(kmp_i18n_fmt_Pragma, cons, file, func, line);
105   __kmp_str_buf_free(&buffer);
106   return prgm.str;
107 } // __kmp_pragma
108 
109 /* ----------------- END OF STATIC LOCAL ROUTINES ------------------------- */
110 
111 void __kmp_error_construct(kmp_i18n_id_t id, // Message identifier.
112                            enum cons_type ct, // Construct type.
113                            ident_t const *ident // Construct ident.
114 ) {
115   char *construct = __kmp_pragma(ct, ident);
116   __kmp_fatal(__kmp_msg_format(id, construct), __kmp_msg_null);
117   KMP_INTERNAL_FREE(construct);
118 }
119 
120 void __kmp_error_construct2(kmp_i18n_id_t id, // Message identifier.
121                             enum cons_type ct, // First construct type.
122                             ident_t const *ident, // First construct ident.
123                             struct cons_data const *cons // Second construct.
124 ) {
125   char *construct1 = __kmp_pragma(ct, ident);
126   char *construct2 = __kmp_pragma(cons->type, cons->ident);
127   __kmp_fatal(__kmp_msg_format(id, construct1, construct2), __kmp_msg_null);
128   KMP_INTERNAL_FREE(construct1);
129   KMP_INTERNAL_FREE(construct2);
130 }
131 
132 struct cons_header *__kmp_allocate_cons_stack(int gtid) {
133   struct cons_header *p;
134 
135   /* TODO for monitor perhaps? */
136   if (gtid < 0) {
137     __kmp_check_null_func();
138   }
139   KE_TRACE(10, ("allocate cons_stack (%d)\n", gtid));
140   p = (struct cons_header *)__kmp_allocate(sizeof(struct cons_header));
141   p->p_top = p->w_top = p->s_top = 0;
142   p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) *
143                                                      (MIN_STACK + 1));
144   p->stack_size = MIN_STACK;
145   p->stack_top = 0;
146   p->stack_data[0].type = ct_none;
147   p->stack_data[0].prev = 0;
148   p->stack_data[0].ident = NULL;
149   return p;
150 }
151 
152 void __kmp_free_cons_stack(void *ptr) {
153   struct cons_header *p = (struct cons_header *)ptr;
154   if (p != NULL) {
155     if (p->stack_data != NULL) {
156       __kmp_free(p->stack_data);
157       p->stack_data = NULL;
158     }
159     __kmp_free(p);
160   }
161 }
162 
163 #if KMP_DEBUG
164 static void dump_cons_stack(int gtid, struct cons_header *p) {
165   int i;
166   int tos = p->stack_top;
167   kmp_str_buf_t buffer;
168   __kmp_str_buf_init(&buffer);
169   __kmp_str_buf_print(
170       &buffer,
171       "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
172   __kmp_str_buf_print(&buffer,
173                       "Begin construct stack with %d items for thread %d\n",
174                       tos, gtid);
175   __kmp_str_buf_print(&buffer, "     stack_top=%d { P=%d, W=%d, S=%d }\n", tos,
176                       p->p_top, p->w_top, p->s_top);
177   for (i = tos; i > 0; i--) {
178     struct cons_data *c = &(p->stack_data[i]);
179     __kmp_str_buf_print(
180         &buffer, "        stack_data[%2d] = { %s (%s) %d %p }\n", i,
181         cons_text_c[c->type], get_src(c->ident), c->prev, c->name);
182   }
183   __kmp_str_buf_print(&buffer, "End construct stack for thread %d\n", gtid);
184   __kmp_str_buf_print(
185       &buffer,
186       "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
187   __kmp_debug_printf("%s", buffer.str);
188   __kmp_str_buf_free(&buffer);
189 }
190 #endif
191 
192 void __kmp_push_parallel(int gtid, ident_t const *ident) {
193   int tos;
194   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
195 
196   KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
197   KE_TRACE(10, ("__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
198   KE_TRACE(100, (PUSH_MSG(ct_parallel, ident)));
199   if (p->stack_top >= p->stack_size) {
200     __kmp_expand_cons_stack(gtid, p);
201   }
202   tos = ++p->stack_top;
203   p->stack_data[tos].type = ct_parallel;
204   p->stack_data[tos].prev = p->p_top;
205   p->stack_data[tos].ident = ident;
206   p->stack_data[tos].name = NULL;
207   p->p_top = tos;
208   KE_DUMP(1000, dump_cons_stack(gtid, p));
209 }
210 
211 void __kmp_check_workshare(int gtid, enum cons_type ct, ident_t const *ident) {
212   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
213 
214   KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
215   KE_TRACE(10, ("__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
216 
217   if (p->stack_top >= p->stack_size) {
218     __kmp_expand_cons_stack(gtid, p);
219   }
220   if (p->w_top > p->p_top) {
221     // We are already in a WORKSHARE construct for this PARALLEL region.
222     __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
223                            &p->stack_data[p->w_top]);
224   }
225   if (p->s_top > p->p_top) {
226     // We are already in a SYNC construct for this PARALLEL region.
227     __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
228                            &p->stack_data[p->s_top]);
229   }
230 }
231 
232 void __kmp_push_workshare(int gtid, enum cons_type ct, ident_t const *ident) {
233   int tos;
234   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
235   KE_TRACE(10, ("__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
236   __kmp_check_workshare(gtid, ct, ident);
237   KE_TRACE(100, (PUSH_MSG(ct, ident)));
238   tos = ++p->stack_top;
239   p->stack_data[tos].type = ct;
240   p->stack_data[tos].prev = p->w_top;
241   p->stack_data[tos].ident = ident;
242   p->stack_data[tos].name = NULL;
243   p->w_top = tos;
244   KE_DUMP(1000, dump_cons_stack(gtid, p));
245 }
246 
247 void
248 #if KMP_USE_DYNAMIC_LOCK
249 __kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
250 #else
251 __kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
252 #endif
253 {
254   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
255 
256   KE_TRACE(10, ("__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid()));
257 
258   if (p->stack_top >= p->stack_size)
259     __kmp_expand_cons_stack(gtid, p);
260 
261   if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo) {
262     if (p->w_top <= p->p_top) {
263 /* we are not in a worksharing construct */
264 #ifdef BUILD_PARALLEL_ORDERED
265       /* do not report error messages for PARALLEL ORDERED */
266       KMP_ASSERT(ct == ct_ordered_in_parallel);
267 #else
268       __kmp_error_construct(kmp_i18n_msg_CnsBoundToWorksharing, ct, ident);
269 #endif /* BUILD_PARALLEL_ORDERED */
270     } else {
271       /* inside a WORKSHARING construct for this PARALLEL region */
272       if (!IS_CONS_TYPE_ORDERED(p->stack_data[p->w_top].type)) {
273         __kmp_error_construct2(kmp_i18n_msg_CnsNoOrderedClause, ct, ident,
274                                &p->stack_data[p->w_top]);
275       }
276     }
277     if (p->s_top > p->p_top && p->s_top > p->w_top) {
278       /* inside a sync construct which is inside a worksharing construct */
279       int index = p->s_top;
280       enum cons_type stack_type;
281 
282       stack_type = p->stack_data[index].type;
283 
284       if (stack_type == ct_critical ||
285           ((stack_type == ct_ordered_in_parallel ||
286             stack_type == ct_ordered_in_pdo) &&
287            /* C doesn't allow named ordered; ordered in ordered gets error */
288            p->stack_data[index].ident != NULL &&
289            (p->stack_data[index].ident->flags & KMP_IDENT_KMPC))) {
290         /* we are in ORDERED which is inside an ORDERED or CRITICAL construct */
291         __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
292                                &p->stack_data[index]);
293       }
294     }
295   } else if (ct == ct_critical) {
296 #if KMP_USE_DYNAMIC_LOCK
297     if (lck != NULL &&
298         __kmp_get_user_lock_owner(lck, seq) ==
299             gtid) { /* this thread already has lock for this critical section */
300 #else
301     if (lck != NULL &&
302         __kmp_get_user_lock_owner(lck) ==
303             gtid) { /* this thread already has lock for this critical section */
304 #endif
305       int index = p->s_top;
306       struct cons_data cons = {NULL, ct_critical, 0, NULL};
307       /* walk up construct stack and try to find critical with matching name */
308       while (index != 0 && p->stack_data[index].name != lck) {
309         index = p->stack_data[index].prev;
310       }
311       if (index != 0) {
312         /* found match on the stack (may not always because of interleaved
313          * critical for Fortran) */
314         cons = p->stack_data[index];
315       }
316       /* we are in CRITICAL which is inside a CRITICAL construct of same name */
317       __kmp_error_construct2(kmp_i18n_msg_CnsNestingSameName, ct, ident, &cons);
318     }
319   } else if (ct == ct_master || ct == ct_reduce) {
320     if (p->w_top > p->p_top) {
321       /* inside a WORKSHARING construct for this PARALLEL region */
322       __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
323                              &p->stack_data[p->w_top]);
324     }
325     if (ct == ct_reduce && p->s_top > p->p_top) {
326       /* inside a another SYNC construct for this PARALLEL region */
327       __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
328                              &p->stack_data[p->s_top]);
329     }
330   }
331 }
332 
333 void
334 #if KMP_USE_DYNAMIC_LOCK
335 __kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
336 #else
337 __kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
338 #endif
339 {
340   int tos;
341   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
342 
343   KMP_ASSERT(gtid == __kmp_get_gtid());
344   KE_TRACE(10, ("__kmp_push_sync (gtid=%d)\n", gtid));
345 #if KMP_USE_DYNAMIC_LOCK
346   __kmp_check_sync(gtid, ct, ident, lck, seq);
347 #else
348   __kmp_check_sync(gtid, ct, ident, lck);
349 #endif
350   KE_TRACE(100, (PUSH_MSG(ct, ident)));
351   tos = ++p->stack_top;
352   p->stack_data[tos].type = ct;
353   p->stack_data[tos].prev = p->s_top;
354   p->stack_data[tos].ident = ident;
355   p->stack_data[tos].name = lck;
356   p->s_top = tos;
357   KE_DUMP(1000, dump_cons_stack(gtid, p));
358 }
359 
360 /* ------------------------------------------------------------------------ */
361 
362 void __kmp_pop_parallel(int gtid, ident_t const *ident) {
363   int tos;
364   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
365   tos = p->stack_top;
366   KE_TRACE(10, ("__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
367   if (tos == 0 || p->p_top == 0) {
368     __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident);
369   }
370   if (tos != p->p_top || p->stack_data[tos].type != ct_parallel) {
371     __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct_parallel, ident,
372                            &p->stack_data[tos]);
373   }
374   KE_TRACE(100, (POP_MSG(p)));
375   p->p_top = p->stack_data[tos].prev;
376   p->stack_data[tos].type = ct_none;
377   p->stack_data[tos].ident = NULL;
378   p->stack_top = tos - 1;
379   KE_DUMP(1000, dump_cons_stack(gtid, p));
380 }
381 
382 enum cons_type __kmp_pop_workshare(int gtid, enum cons_type ct,
383                                    ident_t const *ident) {
384   int tos;
385   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
386 
387   tos = p->stack_top;
388   KE_TRACE(10, ("__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
389   if (tos == 0 || p->w_top == 0) {
390     __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
391   }
392 
393   if (tos != p->w_top ||
394       (p->stack_data[tos].type != ct &&
395        // below is the exception to the rule that construct types must match
396        !(p->stack_data[tos].type == ct_pdo_ordered && ct == ct_pdo))) {
397     __kmp_check_null_func();
398     __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
399                            &p->stack_data[tos]);
400   }
401   KE_TRACE(100, (POP_MSG(p)));
402   p->w_top = p->stack_data[tos].prev;
403   p->stack_data[tos].type = ct_none;
404   p->stack_data[tos].ident = NULL;
405   p->stack_top = tos - 1;
406   KE_DUMP(1000, dump_cons_stack(gtid, p));
407   return p->stack_data[p->w_top].type;
408 }
409 
410 void __kmp_pop_sync(int gtid, enum cons_type ct, ident_t const *ident) {
411   int tos;
412   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
413   tos = p->stack_top;
414   KE_TRACE(10, ("__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid()));
415   if (tos == 0 || p->s_top == 0) {
416     __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
417   }
418   if (tos != p->s_top || p->stack_data[tos].type != ct) {
419     __kmp_check_null_func();
420     __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
421                            &p->stack_data[tos]);
422   }
423   KE_TRACE(100, (POP_MSG(p)));
424   p->s_top = p->stack_data[tos].prev;
425   p->stack_data[tos].type = ct_none;
426   p->stack_data[tos].ident = NULL;
427   p->stack_top = tos - 1;
428   KE_DUMP(1000, dump_cons_stack(gtid, p));
429 }
430 
431 /* ------------------------------------------------------------------------ */
432 
433 void __kmp_check_barrier(int gtid, enum cons_type ct, ident_t const *ident) {
434   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
435   KE_TRACE(10, ("__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid,
436                 __kmp_get_gtid()));
437   if (ident != 0) {
438     __kmp_check_null_func();
439   }
440   if (p->w_top > p->p_top) {
441     /* we are already in a WORKSHARING construct for this PARALLEL region */
442     __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
443                            &p->stack_data[p->w_top]);
444   }
445   if (p->s_top > p->p_top) {
446     /* we are already in a SYNC construct for this PARALLEL region */
447     __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
448                            &p->stack_data[p->s_top]);
449   }
450 }
451