1 /* 2 * kmp_error.c -- KPTS functions for error checking at runtime 3 */ 4 5 6 //===----------------------------------------------------------------------===// 7 // 8 // The LLVM Compiler Infrastructure 9 // 10 // This file is dual licensed under the MIT and the University of Illinois Open 11 // Source Licenses. See LICENSE.txt for details. 12 // 13 //===----------------------------------------------------------------------===// 14 15 16 #include "kmp.h" 17 #include "kmp_i18n.h" 18 #include "kmp_str.h" 19 #include "kmp_error.h" 20 21 /* ------------------------------------------------------------------------ */ 22 /* ------------------------------------------------------------------------ */ 23 24 #define MIN_STACK 100 25 26 27 static char const * cons_text_c[] = { 28 "(none)", 29 "\"parallel\"", 30 "work-sharing", /* this is not called "for" because of lowering of "sections" pragmas */ 31 "\"ordered\" work-sharing", /* this is not called "for ordered" because of lowering of "sections" pragmas */ 32 "\"sections\"", 33 "work-sharing", /* this is not called "single" because of lowering of "sections" pragmas */ 34 "\"taskq\"", 35 "\"taskq\"", 36 "\"taskq ordered\"", 37 "\"critical\"", 38 "\"ordered\"", /* in PARALLEL */ 39 "\"ordered\"", /* in PDO */ 40 "\"ordered\"", /* in TASKQ */ 41 "\"master\"", 42 "\"reduce\"", 43 "\"barrier\"" 44 }; 45 46 #define get_src( ident ) ( (ident) == NULL ? NULL : (ident)->psource ) 47 48 #define PUSH_MSG( ct, ident ) \ 49 "\tpushing on stack: %s (%s)\n", cons_text_c[ (ct) ], get_src( (ident) ) 50 #define POP_MSG( p ) \ 51 "\tpopping off stack: %s (%s)\n", \ 52 cons_text_c[ (p)->stack_data[ tos ].type ], \ 53 get_src( (p)->stack_data[ tos ].ident ) 54 55 static int const cons_text_c_num = sizeof( cons_text_c ) / sizeof( char const * ); 56 57 /* ------------------------------------------------------------------------ */ 58 /* --------------- START OF STATIC LOCAL ROUTINES ------------------------- */ 59 /* ------------------------------------------------------------------------ */ 60 61 static void 62 __kmp_check_null_func( void ) 63 { 64 /* nothing to do */ 65 } 66 67 static void 68 __kmp_expand_cons_stack( int gtid, struct cons_header *p ) 69 { 70 int i; 71 struct cons_data *d; 72 73 /* TODO for monitor perhaps? */ 74 if (gtid < 0) 75 __kmp_check_null_func(); 76 77 KE_TRACE( 10, ("expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid() ) ); 78 79 d = p->stack_data; 80 81 p->stack_size = (p->stack_size * 2) + 100; 82 83 /* TODO free the old data */ 84 p->stack_data = (struct cons_data *) __kmp_allocate( sizeof( struct cons_data ) * (p->stack_size+1) ); 85 86 for (i = p->stack_top; i >= 0; --i) 87 p->stack_data[i] = d[i]; 88 89 /* NOTE: we do not free the old stack_data */ 90 } 91 92 // NOTE: Function returns allocated memory, caller must free it! 93 static char const * 94 __kmp_pragma( 95 int ct, 96 ident_t const * ident 97 ) { 98 char const * cons = NULL; // Construct name. 99 char * file = NULL; // File name. 100 char * func = NULL; // Function (routine) name. 101 char * line = NULL; // Line number. 102 kmp_str_buf_t buffer; 103 kmp_msg_t prgm; 104 __kmp_str_buf_init( & buffer ); 105 if ( 0 < ct && ct < cons_text_c_num ) { 106 cons = cons_text_c[ ct ]; 107 } else { 108 KMP_DEBUG_ASSERT( 0 ); 109 }; 110 if ( ident != NULL && ident->psource != NULL ) { 111 char * tail = NULL; 112 __kmp_str_buf_print( & buffer, "%s", ident->psource ); // Copy source to buffer. 113 // Split string in buffer to file, func, and line. 114 tail = buffer.str; 115 __kmp_str_split( tail, ';', NULL, & tail ); 116 __kmp_str_split( tail, ';', & file, & tail ); 117 __kmp_str_split( tail, ';', & func, & tail ); 118 __kmp_str_split( tail, ';', & line, & tail ); 119 }; // if 120 prgm = __kmp_msg_format( kmp_i18n_fmt_Pragma, cons, file, func, line ); 121 __kmp_str_buf_free( & buffer ); 122 return prgm.str; 123 } // __kmp_pragma 124 125 /* ------------------------------------------------------------------------ */ 126 /* ----------------- END OF STATIC LOCAL ROUTINES ------------------------- */ 127 /* ------------------------------------------------------------------------ */ 128 129 130 void 131 __kmp_error_construct( 132 kmp_i18n_id_t id, // Message identifier. 133 enum cons_type ct, // Construct type. 134 ident_t const * ident // Construct ident. 135 ) { 136 char const * construct = __kmp_pragma( ct, ident ); 137 __kmp_msg( kmp_ms_fatal, __kmp_msg_format( id, construct ), __kmp_msg_null ); 138 KMP_INTERNAL_FREE( (void *) construct ); 139 } 140 141 void 142 __kmp_error_construct2( 143 kmp_i18n_id_t id, // Message identifier. 144 enum cons_type ct, // First construct type. 145 ident_t const * ident, // First construct ident. 146 struct cons_data const * cons // Second construct. 147 ) { 148 char const * construct1 = __kmp_pragma( ct, ident ); 149 char const * construct2 = __kmp_pragma( cons->type, cons->ident ); 150 __kmp_msg( kmp_ms_fatal, __kmp_msg_format( id, construct1, construct2 ), __kmp_msg_null ); 151 KMP_INTERNAL_FREE( (void *) construct1 ); 152 KMP_INTERNAL_FREE( (void *) construct2 ); 153 } 154 155 156 struct cons_header * 157 __kmp_allocate_cons_stack( int gtid ) 158 { 159 struct cons_header *p; 160 161 /* TODO for monitor perhaps? */ 162 if ( gtid < 0 ) { 163 __kmp_check_null_func(); 164 }; // if 165 KE_TRACE( 10, ("allocate cons_stack (%d)\n", gtid ) ); 166 p = (struct cons_header *) __kmp_allocate( sizeof( struct cons_header ) ); 167 p->p_top = p->w_top = p->s_top = 0; 168 p->stack_data = (struct cons_data *) __kmp_allocate( sizeof( struct cons_data ) * (MIN_STACK+1) ); 169 p->stack_size = MIN_STACK; 170 p->stack_top = 0; 171 p->stack_data[ 0 ].type = ct_none; 172 p->stack_data[ 0 ].prev = 0; 173 p->stack_data[ 0 ].ident = NULL; 174 return p; 175 } 176 177 void 178 __kmp_free_cons_stack( void * ptr ) { 179 struct cons_header * p = (struct cons_header *) ptr; 180 if ( p != NULL ) { 181 if ( p->stack_data != NULL ) { 182 __kmp_free( p->stack_data ); 183 p->stack_data = NULL; 184 }; // if 185 __kmp_free( p ); 186 }; // if 187 } 188 189 190 #if KMP_DEBUG 191 static void 192 dump_cons_stack( int gtid, struct cons_header * p ) { 193 int i; 194 int tos = p->stack_top; 195 kmp_str_buf_t buffer; 196 __kmp_str_buf_init( & buffer ); 197 __kmp_str_buf_print( & buffer, "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n" ); 198 __kmp_str_buf_print( & buffer, "Begin construct stack with %d items for thread %d\n", tos, gtid ); 199 __kmp_str_buf_print( & buffer, " stack_top=%d { P=%d, W=%d, S=%d }\n", tos, p->p_top, p->w_top, p->s_top ); 200 for ( i = tos; i > 0; i-- ) { 201 struct cons_data * c = & ( p->stack_data[ i ] ); 202 __kmp_str_buf_print( & buffer, " stack_data[%2d] = { %s (%s) %d %p }\n", i, cons_text_c[ c->type ], get_src( c->ident ), c->prev, c->name ); 203 }; // for i 204 __kmp_str_buf_print( & buffer, "End construct stack for thread %d\n", gtid ); 205 __kmp_str_buf_print( & buffer, "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n" ); 206 __kmp_debug_printf( "%s", buffer.str ); 207 __kmp_str_buf_free( & buffer ); 208 } 209 #endif 210 211 void 212 __kmp_push_parallel( int gtid, ident_t const * ident ) 213 { 214 int tos; 215 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; 216 217 KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons ); 218 KE_TRACE( 10, ("__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid() ) ); 219 KE_TRACE( 100, ( PUSH_MSG( ct_parallel, ident ) ) ); 220 if ( p->stack_top >= p->stack_size ) { 221 __kmp_expand_cons_stack( gtid, p ); 222 }; // if 223 tos = ++p->stack_top; 224 p->stack_data[ tos ].type = ct_parallel; 225 p->stack_data[ tos ].prev = p->p_top; 226 p->stack_data[ tos ].ident = ident; 227 p->stack_data[ tos ].name = NULL; 228 p->p_top = tos; 229 KE_DUMP( 1000, dump_cons_stack( gtid, p ) ); 230 } 231 232 void 233 __kmp_check_workshare( int gtid, enum cons_type ct, ident_t const * ident ) 234 { 235 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; 236 237 KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons ); 238 KE_TRACE( 10, ("__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) ); 239 240 241 if ( p->stack_top >= p->stack_size ) { 242 __kmp_expand_cons_stack( gtid, p ); 243 }; // if 244 if ( p->w_top > p->p_top && 245 !(IS_CONS_TYPE_TASKQ(p->stack_data[ p->w_top ].type) && IS_CONS_TYPE_TASKQ(ct))) { 246 // We are already in a WORKSHARE construct for this PARALLEL region. 247 __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->w_top ] ); 248 }; // if 249 if ( p->s_top > p->p_top ) { 250 // We are already in a SYNC construct for this PARALLEL region. 251 __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->s_top ] ); 252 }; // if 253 } 254 255 void 256 __kmp_push_workshare( int gtid, enum cons_type ct, ident_t const * ident ) 257 { 258 int tos; 259 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; 260 KE_TRACE( 10, ("__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) ); 261 __kmp_check_workshare( gtid, ct, ident ); 262 KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) ); 263 tos = ++p->stack_top; 264 p->stack_data[ tos ].type = ct; 265 p->stack_data[ tos ].prev = p->w_top; 266 p->stack_data[ tos ].ident = ident; 267 p->stack_data[ tos ].name = NULL; 268 p->w_top = tos; 269 KE_DUMP( 1000, dump_cons_stack( gtid, p ) ); 270 } 271 272 void 273 #if KMP_USE_DYNAMIC_LOCK 274 __kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq ) 275 #else 276 __kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck ) 277 #endif 278 { 279 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; 280 281 KE_TRACE( 10, ("__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid() ) ); 282 283 if (p->stack_top >= p->stack_size) 284 __kmp_expand_cons_stack( gtid, p ); 285 286 if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo || ct == ct_ordered_in_taskq ) { 287 if (p->w_top <= p->p_top) { 288 /* we are not in a worksharing construct */ 289 #ifdef BUILD_PARALLEL_ORDERED 290 /* do not report error messages for PARALLEL ORDERED */ 291 KMP_ASSERT( ct == ct_ordered_in_parallel ); 292 #else 293 __kmp_error_construct( kmp_i18n_msg_CnsBoundToWorksharing, ct, ident ); 294 #endif /* BUILD_PARALLEL_ORDERED */ 295 } else { 296 /* inside a WORKSHARING construct for this PARALLEL region */ 297 if (!IS_CONS_TYPE_ORDERED(p->stack_data[ p->w_top ].type)) { 298 if (p->stack_data[ p->w_top ].type == ct_taskq) { 299 __kmp_error_construct2( 300 kmp_i18n_msg_CnsNotInTaskConstruct, 301 ct, ident, 302 & p->stack_data[ p->w_top ] 303 ); 304 } else { 305 __kmp_error_construct2( 306 kmp_i18n_msg_CnsNoOrderedClause, 307 ct, ident, 308 & p->stack_data[ p->w_top ] 309 ); 310 } 311 } 312 } 313 if (p->s_top > p->p_top && p->s_top > p->w_top) { 314 /* inside a sync construct which is inside a worksharing construct */ 315 int index = p->s_top; 316 enum cons_type stack_type; 317 318 stack_type = p->stack_data[ index ].type; 319 320 if (stack_type == ct_critical || 321 ( ( stack_type == ct_ordered_in_parallel || 322 stack_type == ct_ordered_in_pdo || 323 stack_type == ct_ordered_in_taskq ) && /* C doesn't allow named ordered; ordered in ordered gets error */ 324 p->stack_data[ index ].ident != NULL && 325 (p->stack_data[ index ].ident->flags & KMP_IDENT_KMPC ))) { 326 /* we are in ORDERED which is inside an ORDERED or CRITICAL construct */ 327 __kmp_error_construct2( 328 kmp_i18n_msg_CnsInvalidNesting, 329 ct, ident, 330 & p->stack_data[ index ] 331 ); 332 } 333 } 334 } else if ( ct == ct_critical ) { 335 #if KMP_USE_DYNAMIC_LOCK 336 if ( lck != NULL && __kmp_get_user_lock_owner( lck, seq ) == gtid ) { /* this same thread already has lock for this critical section */ 337 #else 338 if ( lck != NULL && __kmp_get_user_lock_owner( lck ) == gtid ) { /* this same thread already has lock for this critical section */ 339 #endif 340 int index = p->s_top; 341 struct cons_data cons = { NULL, ct_critical, 0, NULL }; 342 /* walk up construct stack and try to find critical with matching name */ 343 while ( index != 0 && p->stack_data[ index ].name != lck ) { 344 index = p->stack_data[ index ].prev; 345 } 346 if ( index != 0 ) { 347 /* found match on the stack (may not always because of interleaved critical for Fortran) */ 348 cons = p->stack_data[ index ]; 349 } 350 /* we are in CRITICAL which is inside a CRITICAL construct of the same name */ 351 __kmp_error_construct2( kmp_i18n_msg_CnsNestingSameName, ct, ident, & cons ); 352 } 353 } else if ( ct == ct_master || ct == ct_reduce ) { 354 if (p->w_top > p->p_top) { 355 /* inside a WORKSHARING construct for this PARALLEL region */ 356 __kmp_error_construct2( 357 kmp_i18n_msg_CnsInvalidNesting, 358 ct, ident, 359 & p->stack_data[ p->w_top ] 360 ); 361 } 362 if (ct == ct_reduce && p->s_top > p->p_top) { 363 /* inside a another SYNC construct for this PARALLEL region */ 364 __kmp_error_construct2( 365 kmp_i18n_msg_CnsInvalidNesting, 366 ct, ident, 367 & p->stack_data[ p->s_top ] 368 ); 369 }; // if 370 }; // if 371 } 372 373 void 374 #if KMP_USE_DYNAMIC_LOCK 375 __kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq ) 376 #else 377 __kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck ) 378 #endif 379 { 380 int tos; 381 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; 382 383 KMP_ASSERT( gtid == __kmp_get_gtid() ); 384 KE_TRACE( 10, ("__kmp_push_sync (gtid=%d)\n", gtid ) ); 385 #if KMP_USE_DYNAMIC_LOCK 386 __kmp_check_sync( gtid, ct, ident, lck, seq ); 387 #else 388 __kmp_check_sync( gtid, ct, ident, lck ); 389 #endif 390 KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) ); 391 tos = ++ p->stack_top; 392 p->stack_data[ tos ].type = ct; 393 p->stack_data[ tos ].prev = p->s_top; 394 p->stack_data[ tos ].ident = ident; 395 p->stack_data[ tos ].name = lck; 396 p->s_top = tos; 397 KE_DUMP( 1000, dump_cons_stack( gtid, p ) ); 398 } 399 400 /* ------------------------------------------------------------------------ */ 401 402 void 403 __kmp_pop_parallel( int gtid, ident_t const * ident ) 404 { 405 int tos; 406 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; 407 tos = p->stack_top; 408 KE_TRACE( 10, ("__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid() ) ); 409 if ( tos == 0 || p->p_top == 0 ) { 410 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident ); 411 } 412 if ( tos != p->p_top || p->stack_data[ tos ].type != ct_parallel ) { 413 __kmp_error_construct2( 414 kmp_i18n_msg_CnsExpectedEnd, 415 ct_parallel, ident, 416 & p->stack_data[ tos ] 417 ); 418 } 419 KE_TRACE( 100, ( POP_MSG( p ) ) ); 420 p->p_top = p->stack_data[ tos ].prev; 421 p->stack_data[ tos ].type = ct_none; 422 p->stack_data[ tos ].ident = NULL; 423 p->stack_top = tos - 1; 424 KE_DUMP( 1000, dump_cons_stack( gtid, p ) ); 425 } 426 427 enum cons_type 428 __kmp_pop_workshare( int gtid, enum cons_type ct, ident_t const * ident ) 429 { 430 int tos; 431 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; 432 433 tos = p->stack_top; 434 KE_TRACE( 10, ("__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) ); 435 if ( tos == 0 || p->w_top == 0 ) { 436 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident ); 437 } 438 439 if ( tos != p->w_top || 440 ( p->stack_data[ tos ].type != ct && 441 /* below are two exceptions to the rule that construct types must match */ 442 ! ( p->stack_data[ tos ].type == ct_pdo_ordered && ct == ct_pdo ) && 443 ! ( p->stack_data[ tos ].type == ct_task_ordered && ct == ct_task ) 444 ) 445 ) { 446 __kmp_check_null_func(); 447 __kmp_error_construct2( 448 kmp_i18n_msg_CnsExpectedEnd, 449 ct, ident, 450 & p->stack_data[ tos ] 451 ); 452 } 453 KE_TRACE( 100, ( POP_MSG( p ) ) ); 454 p->w_top = p->stack_data[ tos ].prev; 455 p->stack_data[ tos ].type = ct_none; 456 p->stack_data[ tos ].ident = NULL; 457 p->stack_top = tos - 1; 458 KE_DUMP( 1000, dump_cons_stack( gtid, p ) ); 459 return p->stack_data[ p->w_top ].type; 460 } 461 462 void 463 __kmp_pop_sync( int gtid, enum cons_type ct, ident_t const * ident ) 464 { 465 int tos; 466 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; 467 tos = p->stack_top; 468 KE_TRACE( 10, ("__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid() ) ); 469 if ( tos == 0 || p->s_top == 0 ) { 470 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident ); 471 }; 472 if ( tos != p->s_top || p->stack_data[ tos ].type != ct ) { 473 __kmp_check_null_func(); 474 __kmp_error_construct2( 475 kmp_i18n_msg_CnsExpectedEnd, 476 ct, ident, 477 & p->stack_data[ tos ] 478 ); 479 }; 480 if ( gtid < 0 ) { 481 __kmp_check_null_func(); 482 }; 483 KE_TRACE( 100, ( POP_MSG( p ) ) ); 484 p->s_top = p->stack_data[ tos ].prev; 485 p->stack_data[ tos ].type = ct_none; 486 p->stack_data[ tos ].ident = NULL; 487 p->stack_top = tos - 1; 488 KE_DUMP( 1000, dump_cons_stack( gtid, p ) ); 489 } 490 491 /* ------------------------------------------------------------------------ */ 492 493 void 494 __kmp_check_barrier( int gtid, enum cons_type ct, ident_t const * ident ) 495 { 496 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; 497 KE_TRACE( 10, ("__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid, __kmp_get_gtid() ) ); 498 if ( ident != 0 ) { 499 __kmp_check_null_func(); 500 } 501 if ( p->w_top > p->p_top ) { 502 /* we are already in a WORKSHARING construct for this PARALLEL region */ 503 __kmp_error_construct2( 504 kmp_i18n_msg_CnsInvalidNesting, 505 ct, ident, 506 & p->stack_data[ p->w_top ] 507 ); 508 } 509 if (p->s_top > p->p_top) { 510 /* we are already in a SYNC construct for this PARALLEL region */ 511 __kmp_error_construct2( 512 kmp_i18n_msg_CnsInvalidNesting, 513 ct, ident, 514 & p->stack_data[ p->s_top ] 515 ); 516 } 517 } 518 519 /* ------------------------------------------------------------------------ */ 520 521 522 /* ------------------------------------------------------------------------ */ 523 /* ------------------------------------------------------------------------ */ 524