1 /* 2 * kmp_error.cpp -- KPTS functions for error checking at runtime 3 */ 4 5 6 //===----------------------------------------------------------------------===// 7 // 8 // The LLVM Compiler Infrastructure 9 // 10 // This file is dual licensed under the MIT and the University of Illinois Open 11 // Source Licenses. See LICENSE.txt for details. 12 // 13 //===----------------------------------------------------------------------===// 14 15 16 #include "kmp.h" 17 #include "kmp_error.h" 18 #include "kmp_i18n.h" 19 #include "kmp_str.h" 20 21 /* ------------------------------------------------------------------------ */ 22 23 #define MIN_STACK 100 24 25 static char const *cons_text_c[] = { 26 "(none)", "\"parallel\"", "work-sharing", /* this is not called "for" 27 because of lowering of 28 "sections" pragmas */ 29 "\"ordered\" work-sharing", /* this is not called "for ordered" because of 30 lowering of "sections" pragmas */ 31 "\"sections\"", 32 "work-sharing", /* this is not called "single" because of lowering of 33 "sections" pragmas */ 34 "\"taskq\"", "\"taskq\"", "\"taskq ordered\"", "\"critical\"", 35 "\"ordered\"", /* in PARALLEL */ 36 "\"ordered\"", /* in PDO */ 37 "\"ordered\"", /* in TASKQ */ 38 "\"master\"", "\"reduce\"", "\"barrier\""}; 39 40 #define get_src(ident) ((ident) == NULL ? NULL : (ident)->psource) 41 42 #define PUSH_MSG(ct, ident) \ 43 "\tpushing on stack: %s (%s)\n", cons_text_c[(ct)], get_src((ident)) 44 #define POP_MSG(p) \ 45 "\tpopping off stack: %s (%s)\n", cons_text_c[(p)->stack_data[tos].type], \ 46 get_src((p)->stack_data[tos].ident) 47 48 static int const cons_text_c_num = sizeof(cons_text_c) / sizeof(char const *); 49 50 /* --------------- START OF STATIC LOCAL ROUTINES ------------------------- */ 51 52 static void __kmp_check_null_func(void) { /* nothing to do */ 53 } 54 55 static void __kmp_expand_cons_stack(int gtid, struct cons_header *p) { 56 int i; 57 struct cons_data *d; 58 59 /* TODO for monitor perhaps? */ 60 if (gtid < 0) 61 __kmp_check_null_func(); 62 63 KE_TRACE(10, ("expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid())); 64 65 d = p->stack_data; 66 67 p->stack_size = (p->stack_size * 2) + 100; 68 69 /* TODO free the old data */ 70 p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) * 71 (p->stack_size + 1)); 72 73 for (i = p->stack_top; i >= 0; --i) 74 p->stack_data[i] = d[i]; 75 76 /* NOTE: we do not free the old stack_data */ 77 } 78 79 // NOTE: Function returns allocated memory, caller must free it! 80 static char const *__kmp_pragma(int ct, ident_t const *ident) { 81 char const *cons = NULL; // Construct name. 82 char *file = NULL; // File name. 83 char *func = NULL; // Function (routine) name. 84 char *line = NULL; // Line number. 85 kmp_str_buf_t buffer; 86 kmp_msg_t prgm; 87 __kmp_str_buf_init(&buffer); 88 if (0 < ct && ct < cons_text_c_num) { 89 cons = cons_text_c[ct]; 90 } else { 91 KMP_DEBUG_ASSERT(0); 92 }; 93 if (ident != NULL && ident->psource != NULL) { 94 char *tail = NULL; 95 __kmp_str_buf_print(&buffer, "%s", 96 ident->psource); // Copy source to buffer. 97 // Split string in buffer to file, func, and line. 98 tail = buffer.str; 99 __kmp_str_split(tail, ';', NULL, &tail); 100 __kmp_str_split(tail, ';', &file, &tail); 101 __kmp_str_split(tail, ';', &func, &tail); 102 __kmp_str_split(tail, ';', &line, &tail); 103 }; // if 104 prgm = __kmp_msg_format(kmp_i18n_fmt_Pragma, cons, file, func, line); 105 __kmp_str_buf_free(&buffer); 106 return prgm.str; 107 } // __kmp_pragma 108 109 /* ----------------- END OF STATIC LOCAL ROUTINES ------------------------- */ 110 111 void __kmp_error_construct(kmp_i18n_id_t id, // Message identifier. 112 enum cons_type ct, // Construct type. 113 ident_t const *ident // Construct ident. 114 ) { 115 char const *construct = __kmp_pragma(ct, ident); 116 __kmp_fatal(__kmp_msg_format(id, construct), __kmp_msg_null); 117 KMP_INTERNAL_FREE(CCAST(char *, construct)); 118 } 119 120 void __kmp_error_construct2(kmp_i18n_id_t id, // Message identifier. 121 enum cons_type ct, // First construct type. 122 ident_t const *ident, // First construct ident. 123 struct cons_data const *cons // Second construct. 124 ) { 125 char const *construct1 = __kmp_pragma(ct, ident); 126 char const *construct2 = __kmp_pragma(cons->type, cons->ident); 127 __kmp_fatal(__kmp_msg_format(id, construct1, construct2), __kmp_msg_null); 128 KMP_INTERNAL_FREE(CCAST(char *, construct1)); 129 KMP_INTERNAL_FREE(CCAST(char *, construct2)); 130 } 131 132 struct cons_header *__kmp_allocate_cons_stack(int gtid) { 133 struct cons_header *p; 134 135 /* TODO for monitor perhaps? */ 136 if (gtid < 0) { 137 __kmp_check_null_func(); 138 }; // if 139 KE_TRACE(10, ("allocate cons_stack (%d)\n", gtid)); 140 p = (struct cons_header *)__kmp_allocate(sizeof(struct cons_header)); 141 p->p_top = p->w_top = p->s_top = 0; 142 p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) * 143 (MIN_STACK + 1)); 144 p->stack_size = MIN_STACK; 145 p->stack_top = 0; 146 p->stack_data[0].type = ct_none; 147 p->stack_data[0].prev = 0; 148 p->stack_data[0].ident = NULL; 149 return p; 150 } 151 152 void __kmp_free_cons_stack(void *ptr) { 153 struct cons_header *p = (struct cons_header *)ptr; 154 if (p != NULL) { 155 if (p->stack_data != NULL) { 156 __kmp_free(p->stack_data); 157 p->stack_data = NULL; 158 }; // if 159 __kmp_free(p); 160 }; // if 161 } 162 163 #if KMP_DEBUG 164 static void dump_cons_stack(int gtid, struct cons_header *p) { 165 int i; 166 int tos = p->stack_top; 167 kmp_str_buf_t buffer; 168 __kmp_str_buf_init(&buffer); 169 __kmp_str_buf_print( 170 &buffer, 171 "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n"); 172 __kmp_str_buf_print(&buffer, 173 "Begin construct stack with %d items for thread %d\n", 174 tos, gtid); 175 __kmp_str_buf_print(&buffer, " stack_top=%d { P=%d, W=%d, S=%d }\n", tos, 176 p->p_top, p->w_top, p->s_top); 177 for (i = tos; i > 0; i--) { 178 struct cons_data *c = &(p->stack_data[i]); 179 __kmp_str_buf_print( 180 &buffer, " stack_data[%2d] = { %s (%s) %d %p }\n", i, 181 cons_text_c[c->type], get_src(c->ident), c->prev, c->name); 182 }; // for i 183 __kmp_str_buf_print(&buffer, "End construct stack for thread %d\n", gtid); 184 __kmp_str_buf_print( 185 &buffer, 186 "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n"); 187 __kmp_debug_printf("%s", buffer.str); 188 __kmp_str_buf_free(&buffer); 189 } 190 #endif 191 192 void __kmp_push_parallel(int gtid, ident_t const *ident) { 193 int tos; 194 struct cons_header *p = __kmp_threads[gtid]->th.th_cons; 195 196 KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons); 197 KE_TRACE(10, ("__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid())); 198 KE_TRACE(100, (PUSH_MSG(ct_parallel, ident))); 199 if (p->stack_top >= p->stack_size) { 200 __kmp_expand_cons_stack(gtid, p); 201 }; // if 202 tos = ++p->stack_top; 203 p->stack_data[tos].type = ct_parallel; 204 p->stack_data[tos].prev = p->p_top; 205 p->stack_data[tos].ident = ident; 206 p->stack_data[tos].name = NULL; 207 p->p_top = tos; 208 KE_DUMP(1000, dump_cons_stack(gtid, p)); 209 } 210 211 void __kmp_check_workshare(int gtid, enum cons_type ct, ident_t const *ident) { 212 struct cons_header *p = __kmp_threads[gtid]->th.th_cons; 213 214 KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons); 215 KE_TRACE(10, ("__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid())); 216 217 if (p->stack_top >= p->stack_size) { 218 __kmp_expand_cons_stack(gtid, p); 219 }; // if 220 if (p->w_top > p->p_top && 221 !(IS_CONS_TYPE_TASKQ(p->stack_data[p->w_top].type) && 222 IS_CONS_TYPE_TASKQ(ct))) { 223 // We are already in a WORKSHARE construct for this PARALLEL region. 224 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident, 225 &p->stack_data[p->w_top]); 226 }; // if 227 if (p->s_top > p->p_top) { 228 // We are already in a SYNC construct for this PARALLEL region. 229 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident, 230 &p->stack_data[p->s_top]); 231 }; // if 232 } 233 234 void __kmp_push_workshare(int gtid, enum cons_type ct, ident_t const *ident) { 235 int tos; 236 struct cons_header *p = __kmp_threads[gtid]->th.th_cons; 237 KE_TRACE(10, ("__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid())); 238 __kmp_check_workshare(gtid, ct, ident); 239 KE_TRACE(100, (PUSH_MSG(ct, ident))); 240 tos = ++p->stack_top; 241 p->stack_data[tos].type = ct; 242 p->stack_data[tos].prev = p->w_top; 243 p->stack_data[tos].ident = ident; 244 p->stack_data[tos].name = NULL; 245 p->w_top = tos; 246 KE_DUMP(1000, dump_cons_stack(gtid, p)); 247 } 248 249 void 250 #if KMP_USE_DYNAMIC_LOCK 251 __kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq ) 252 #else 253 __kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck ) 254 #endif 255 { 256 struct cons_header *p = __kmp_threads[gtid]->th.th_cons; 257 258 KE_TRACE(10, ("__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid())); 259 260 if (p->stack_top >= p->stack_size) 261 __kmp_expand_cons_stack(gtid, p); 262 263 if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo || 264 ct == ct_ordered_in_taskq) { 265 if (p->w_top <= p->p_top) { 266 /* we are not in a worksharing construct */ 267 #ifdef BUILD_PARALLEL_ORDERED 268 /* do not report error messages for PARALLEL ORDERED */ 269 KMP_ASSERT(ct == ct_ordered_in_parallel); 270 #else 271 __kmp_error_construct(kmp_i18n_msg_CnsBoundToWorksharing, ct, ident); 272 #endif /* BUILD_PARALLEL_ORDERED */ 273 } else { 274 /* inside a WORKSHARING construct for this PARALLEL region */ 275 if (!IS_CONS_TYPE_ORDERED(p->stack_data[p->w_top].type)) { 276 if (p->stack_data[p->w_top].type == ct_taskq) { 277 __kmp_error_construct2(kmp_i18n_msg_CnsNotInTaskConstruct, ct, ident, 278 &p->stack_data[p->w_top]); 279 } else { 280 __kmp_error_construct2(kmp_i18n_msg_CnsNoOrderedClause, ct, ident, 281 &p->stack_data[p->w_top]); 282 } 283 } 284 } 285 if (p->s_top > p->p_top && p->s_top > p->w_top) { 286 /* inside a sync construct which is inside a worksharing construct */ 287 int index = p->s_top; 288 enum cons_type stack_type; 289 290 stack_type = p->stack_data[index].type; 291 292 if (stack_type == ct_critical || 293 ((stack_type == ct_ordered_in_parallel || 294 stack_type == ct_ordered_in_pdo || 295 stack_type == 296 ct_ordered_in_taskq) && /* C doesn't allow named ordered; 297 ordered in ordered gets error */ 298 p->stack_data[index].ident != NULL && 299 (p->stack_data[index].ident->flags & KMP_IDENT_KMPC))) { 300 /* we are in ORDERED which is inside an ORDERED or CRITICAL construct */ 301 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident, 302 &p->stack_data[index]); 303 } 304 } 305 } else if (ct == ct_critical) { 306 #if KMP_USE_DYNAMIC_LOCK 307 if (lck != NULL && 308 __kmp_get_user_lock_owner(lck, seq) == 309 gtid) { /* this thread already has lock for this critical section */ 310 #else 311 if (lck != NULL && 312 __kmp_get_user_lock_owner(lck) == 313 gtid) { /* this thread already has lock for this critical section */ 314 #endif 315 int index = p->s_top; 316 struct cons_data cons = {NULL, ct_critical, 0, NULL}; 317 /* walk up construct stack and try to find critical with matching name */ 318 while (index != 0 && p->stack_data[index].name != lck) { 319 index = p->stack_data[index].prev; 320 } 321 if (index != 0) { 322 /* found match on the stack (may not always because of interleaved 323 * critical for Fortran) */ 324 cons = p->stack_data[index]; 325 } 326 /* we are in CRITICAL which is inside a CRITICAL construct of same name */ 327 __kmp_error_construct2(kmp_i18n_msg_CnsNestingSameName, ct, ident, &cons); 328 } 329 } else if (ct == ct_master || ct == ct_reduce) { 330 if (p->w_top > p->p_top) { 331 /* inside a WORKSHARING construct for this PARALLEL region */ 332 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident, 333 &p->stack_data[p->w_top]); 334 } 335 if (ct == ct_reduce && p->s_top > p->p_top) { 336 /* inside a another SYNC construct for this PARALLEL region */ 337 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident, 338 &p->stack_data[p->s_top]); 339 }; // if 340 }; // if 341 } 342 343 void 344 #if KMP_USE_DYNAMIC_LOCK 345 __kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq ) 346 #else 347 __kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck ) 348 #endif 349 { 350 int tos; 351 struct cons_header *p = __kmp_threads[gtid]->th.th_cons; 352 353 KMP_ASSERT(gtid == __kmp_get_gtid()); 354 KE_TRACE(10, ("__kmp_push_sync (gtid=%d)\n", gtid)); 355 #if KMP_USE_DYNAMIC_LOCK 356 __kmp_check_sync(gtid, ct, ident, lck, seq); 357 #else 358 __kmp_check_sync(gtid, ct, ident, lck); 359 #endif 360 KE_TRACE(100, (PUSH_MSG(ct, ident))); 361 tos = ++p->stack_top; 362 p->stack_data[tos].type = ct; 363 p->stack_data[tos].prev = p->s_top; 364 p->stack_data[tos].ident = ident; 365 p->stack_data[tos].name = lck; 366 p->s_top = tos; 367 KE_DUMP(1000, dump_cons_stack(gtid, p)); 368 } 369 370 /* ------------------------------------------------------------------------ */ 371 372 void __kmp_pop_parallel(int gtid, ident_t const *ident) { 373 int tos; 374 struct cons_header *p = __kmp_threads[gtid]->th.th_cons; 375 tos = p->stack_top; 376 KE_TRACE(10, ("__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid())); 377 if (tos == 0 || p->p_top == 0) { 378 __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident); 379 } 380 if (tos != p->p_top || p->stack_data[tos].type != ct_parallel) { 381 __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct_parallel, ident, 382 &p->stack_data[tos]); 383 } 384 KE_TRACE(100, (POP_MSG(p))); 385 p->p_top = p->stack_data[tos].prev; 386 p->stack_data[tos].type = ct_none; 387 p->stack_data[tos].ident = NULL; 388 p->stack_top = tos - 1; 389 KE_DUMP(1000, dump_cons_stack(gtid, p)); 390 } 391 392 enum cons_type __kmp_pop_workshare(int gtid, enum cons_type ct, 393 ident_t const *ident) { 394 int tos; 395 struct cons_header *p = __kmp_threads[gtid]->th.th_cons; 396 397 tos = p->stack_top; 398 KE_TRACE(10, ("__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid())); 399 if (tos == 0 || p->w_top == 0) { 400 __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident); 401 } 402 403 if (tos != p->w_top || 404 (p->stack_data[tos].type != ct && 405 // below are two exceptions to the rule that construct types must match 406 !(p->stack_data[tos].type == ct_pdo_ordered && ct == ct_pdo) && 407 !(p->stack_data[tos].type == ct_task_ordered && ct == ct_task))) { 408 __kmp_check_null_func(); 409 __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident, 410 &p->stack_data[tos]); 411 } 412 KE_TRACE(100, (POP_MSG(p))); 413 p->w_top = p->stack_data[tos].prev; 414 p->stack_data[tos].type = ct_none; 415 p->stack_data[tos].ident = NULL; 416 p->stack_top = tos - 1; 417 KE_DUMP(1000, dump_cons_stack(gtid, p)); 418 return p->stack_data[p->w_top].type; 419 } 420 421 void __kmp_pop_sync(int gtid, enum cons_type ct, ident_t const *ident) { 422 int tos; 423 struct cons_header *p = __kmp_threads[gtid]->th.th_cons; 424 tos = p->stack_top; 425 KE_TRACE(10, ("__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid())); 426 if (tos == 0 || p->s_top == 0) { 427 __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident); 428 }; 429 if (tos != p->s_top || p->stack_data[tos].type != ct) { 430 __kmp_check_null_func(); 431 __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident, 432 &p->stack_data[tos]); 433 }; 434 if (gtid < 0) { 435 __kmp_check_null_func(); 436 }; 437 KE_TRACE(100, (POP_MSG(p))); 438 p->s_top = p->stack_data[tos].prev; 439 p->stack_data[tos].type = ct_none; 440 p->stack_data[tos].ident = NULL; 441 p->stack_top = tos - 1; 442 KE_DUMP(1000, dump_cons_stack(gtid, p)); 443 } 444 445 /* ------------------------------------------------------------------------ */ 446 447 void __kmp_check_barrier(int gtid, enum cons_type ct, ident_t const *ident) { 448 struct cons_header *p = __kmp_threads[gtid]->th.th_cons; 449 KE_TRACE(10, ("__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid, 450 __kmp_get_gtid())); 451 if (ident != 0) { 452 __kmp_check_null_func(); 453 } 454 if (p->w_top > p->p_top) { 455 /* we are already in a WORKSHARING construct for this PARALLEL region */ 456 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident, 457 &p->stack_data[p->w_top]); 458 } 459 if (p->s_top > p->p_top) { 460 /* we are already in a SYNC construct for this PARALLEL region */ 461 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident, 462 &p->stack_data[p->s_top]); 463 } 464 } 465