1 /* 2 * kmp_threadprivate.cpp -- OpenMP threadprivate support library 3 */ 4 5 6 //===----------------------------------------------------------------------===// 7 // 8 // The LLVM Compiler Infrastructure 9 // 10 // This file is dual licensed under the MIT and the University of Illinois Open 11 // Source Licenses. See LICENSE.txt for details. 12 // 13 //===----------------------------------------------------------------------===// 14 15 16 #include "kmp.h" 17 #include "kmp_i18n.h" 18 #include "kmp_itt.h" 19 20 #define USE_CHECKS_COMMON 21 22 #define KMP_INLINE_SUBR 1 23 24 void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr, 25 void *data_addr, size_t pc_size); 26 struct private_common *kmp_threadprivate_insert(int gtid, void *pc_addr, 27 void *data_addr, 28 size_t pc_size); 29 30 struct shared_table __kmp_threadprivate_d_table; 31 32 static 33 #ifdef KMP_INLINE_SUBR 34 __forceinline 35 #endif 36 struct private_common * 37 __kmp_threadprivate_find_task_common(struct common_table *tbl, int gtid, 38 void *pc_addr) 39 40 { 41 struct private_common *tn; 42 43 #ifdef KMP_TASK_COMMON_DEBUG 44 KC_TRACE(10, ("__kmp_threadprivate_find_task_common: thread#%d, called with " 45 "address %p\n", 46 gtid, pc_addr)); 47 dump_list(); 48 #endif 49 50 for (tn = tbl->data[KMP_HASH(pc_addr)]; tn; tn = tn->next) { 51 if (tn->gbl_addr == pc_addr) { 52 #ifdef KMP_TASK_COMMON_DEBUG 53 KC_TRACE(10, ("__kmp_threadprivate_find_task_common: thread#%d, found " 54 "node %p on list\n", 55 gtid, pc_addr)); 56 #endif 57 return tn; 58 } 59 } 60 return 0; 61 } 62 63 static 64 #ifdef KMP_INLINE_SUBR 65 __forceinline 66 #endif 67 struct shared_common * 68 __kmp_find_shared_task_common(struct shared_table *tbl, int gtid, 69 void *pc_addr) { 70 struct shared_common *tn; 71 72 for (tn = tbl->data[KMP_HASH(pc_addr)]; tn; tn = tn->next) { 73 if (tn->gbl_addr == pc_addr) { 74 #ifdef KMP_TASK_COMMON_DEBUG 75 KC_TRACE( 76 10, 77 ("__kmp_find_shared_task_common: thread#%d, found node %p on list\n", 78 gtid, pc_addr)); 79 #endif 80 return tn; 81 } 82 } 83 return 0; 84 } 85 86 // Create a template for the data initialized storage. Either the template is 87 // NULL indicating zero fill, or the template is a copy of the original data. 88 static struct private_data *__kmp_init_common_data(void *pc_addr, 89 size_t pc_size) { 90 struct private_data *d; 91 size_t i; 92 char *p; 93 94 d = (struct private_data *)__kmp_allocate(sizeof(struct private_data)); 95 /* 96 d->data = 0; // AC: commented out because __kmp_allocate zeroes the 97 memory 98 d->next = 0; 99 */ 100 d->size = pc_size; 101 d->more = 1; 102 103 p = (char *)pc_addr; 104 105 for (i = pc_size; i > 0; --i) { 106 if (*p++ != '\0') { 107 d->data = __kmp_allocate(pc_size); 108 KMP_MEMCPY(d->data, pc_addr, pc_size); 109 break; 110 } 111 } 112 113 return d; 114 } 115 116 // Initialize the data area from the template. 117 static void __kmp_copy_common_data(void *pc_addr, struct private_data *d) { 118 char *addr = (char *)pc_addr; 119 int i, offset; 120 121 for (offset = 0; d != 0; d = d->next) { 122 for (i = d->more; i > 0; --i) { 123 if (d->data == 0) 124 memset(&addr[offset], '\0', d->size); 125 else 126 KMP_MEMCPY(&addr[offset], d->data, d->size); 127 offset += d->size; 128 } 129 } 130 } 131 132 /* we are called from __kmp_serial_initialize() with __kmp_initz_lock held. */ 133 void __kmp_common_initialize(void) { 134 if (!TCR_4(__kmp_init_common)) { 135 int q; 136 #ifdef KMP_DEBUG 137 int gtid; 138 #endif 139 140 __kmp_threadpriv_cache_list = NULL; 141 142 #ifdef KMP_DEBUG 143 /* verify the uber masters were initialized */ 144 for (gtid = 0; gtid < __kmp_threads_capacity; gtid++) 145 if (__kmp_root[gtid]) { 146 KMP_DEBUG_ASSERT(__kmp_root[gtid]->r.r_uber_thread); 147 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) 148 KMP_DEBUG_ASSERT( 149 !__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q]); 150 /* __kmp_root[ gitd ]-> r.r_uber_thread -> 151 * th.th_pri_common -> data[ q ] = 0;*/ 152 } 153 #endif /* KMP_DEBUG */ 154 155 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) 156 __kmp_threadprivate_d_table.data[q] = 0; 157 158 TCW_4(__kmp_init_common, TRUE); 159 } 160 } 161 162 /* Call all destructors for threadprivate data belonging to all threads. 163 Currently unused! */ 164 void __kmp_common_destroy(void) { 165 if (TCR_4(__kmp_init_common)) { 166 int q; 167 168 TCW_4(__kmp_init_common, FALSE); 169 170 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) { 171 int gtid; 172 struct private_common *tn; 173 struct shared_common *d_tn; 174 175 /* C++ destructors need to be called once per thread before exiting. 176 Don't call destructors for master thread though unless we used copy 177 constructor */ 178 179 for (d_tn = __kmp_threadprivate_d_table.data[q]; d_tn; 180 d_tn = d_tn->next) { 181 if (d_tn->is_vec) { 182 if (d_tn->dt.dtorv != 0) { 183 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) { 184 if (__kmp_threads[gtid]) { 185 if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid)) 186 : (!KMP_UBER_GTID(gtid))) { 187 tn = __kmp_threadprivate_find_task_common( 188 __kmp_threads[gtid]->th.th_pri_common, gtid, 189 d_tn->gbl_addr); 190 if (tn) { 191 (*d_tn->dt.dtorv)(tn->par_addr, d_tn->vec_len); 192 } 193 } 194 } 195 } 196 if (d_tn->obj_init != 0) { 197 (*d_tn->dt.dtorv)(d_tn->obj_init, d_tn->vec_len); 198 } 199 } 200 } else { 201 if (d_tn->dt.dtor != 0) { 202 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) { 203 if (__kmp_threads[gtid]) { 204 if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid)) 205 : (!KMP_UBER_GTID(gtid))) { 206 tn = __kmp_threadprivate_find_task_common( 207 __kmp_threads[gtid]->th.th_pri_common, gtid, 208 d_tn->gbl_addr); 209 if (tn) { 210 (*d_tn->dt.dtor)(tn->par_addr); 211 } 212 } 213 } 214 } 215 if (d_tn->obj_init != 0) { 216 (*d_tn->dt.dtor)(d_tn->obj_init); 217 } 218 } 219 } 220 } 221 __kmp_threadprivate_d_table.data[q] = 0; 222 } 223 } 224 } 225 226 /* Call all destructors for threadprivate data belonging to this thread */ 227 void __kmp_common_destroy_gtid(int gtid) { 228 struct private_common *tn; 229 struct shared_common *d_tn; 230 231 KC_TRACE(10, ("__kmp_common_destroy_gtid: T#%d called\n", gtid)); 232 if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid)) : (!KMP_UBER_GTID(gtid))) { 233 234 if (TCR_4(__kmp_init_common)) { 235 236 /* Cannot do this here since not all threads have destroyed their data */ 237 /* TCW_4(__kmp_init_common, FALSE); */ 238 239 for (tn = __kmp_threads[gtid]->th.th_pri_head; tn; tn = tn->link) { 240 241 d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, gtid, 242 tn->gbl_addr); 243 244 KMP_DEBUG_ASSERT(d_tn); 245 246 if (d_tn->is_vec) { 247 if (d_tn->dt.dtorv != 0) { 248 (void)(*d_tn->dt.dtorv)(tn->par_addr, d_tn->vec_len); 249 } 250 if (d_tn->obj_init != 0) { 251 (void)(*d_tn->dt.dtorv)(d_tn->obj_init, d_tn->vec_len); 252 } 253 } else { 254 if (d_tn->dt.dtor != 0) { 255 (void)(*d_tn->dt.dtor)(tn->par_addr); 256 } 257 if (d_tn->obj_init != 0) { 258 (void)(*d_tn->dt.dtor)(d_tn->obj_init); 259 } 260 } 261 } 262 KC_TRACE(30, ("__kmp_common_destroy_gtid: T#%d threadprivate destructors " 263 "complete\n", 264 gtid)); 265 } 266 } 267 } 268 269 #ifdef KMP_TASK_COMMON_DEBUG 270 static void dump_list(void) { 271 int p, q; 272 273 for (p = 0; p < __kmp_all_nth; ++p) { 274 if (!__kmp_threads[p]) 275 continue; 276 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) { 277 if (__kmp_threads[p]->th.th_pri_common->data[q]) { 278 struct private_common *tn; 279 280 KC_TRACE(10, ("\tdump_list: gtid:%d addresses\n", p)); 281 282 for (tn = __kmp_threads[p]->th.th_pri_common->data[q]; tn; 283 tn = tn->next) { 284 KC_TRACE(10, 285 ("\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n", 286 tn->gbl_addr, tn->par_addr)); 287 } 288 } 289 } 290 } 291 } 292 #endif /* KMP_TASK_COMMON_DEBUG */ 293 294 // NOTE: this routine is to be called only from the serial part of the program. 295 void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr, 296 void *data_addr, size_t pc_size) { 297 struct shared_common **lnk_tn, *d_tn; 298 KMP_DEBUG_ASSERT(__kmp_threads[gtid] && 299 __kmp_threads[gtid]->th.th_root->r.r_active == 0); 300 301 d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, gtid, 302 pc_addr); 303 304 if (d_tn == 0) { 305 d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common)); 306 307 d_tn->gbl_addr = pc_addr; 308 d_tn->pod_init = __kmp_init_common_data(data_addr, pc_size); 309 /* 310 d_tn->obj_init = 0; // AC: commented out because __kmp_allocate 311 zeroes the memory 312 d_tn->ct.ctor = 0; 313 d_tn->cct.cctor = 0;; 314 d_tn->dt.dtor = 0; 315 d_tn->is_vec = FALSE; 316 d_tn->vec_len = 0L; 317 */ 318 d_tn->cmn_size = pc_size; 319 320 __kmp_acquire_lock(&__kmp_global_lock, gtid); 321 322 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(pc_addr)]); 323 324 d_tn->next = *lnk_tn; 325 *lnk_tn = d_tn; 326 327 __kmp_release_lock(&__kmp_global_lock, gtid); 328 } 329 } 330 331 struct private_common *kmp_threadprivate_insert(int gtid, void *pc_addr, 332 void *data_addr, 333 size_t pc_size) { 334 struct private_common *tn, **tt; 335 struct shared_common *d_tn; 336 337 /* +++++++++ START OF CRITICAL SECTION +++++++++ */ 338 __kmp_acquire_lock(&__kmp_global_lock, gtid); 339 340 tn = (struct private_common *)__kmp_allocate(sizeof(struct private_common)); 341 342 tn->gbl_addr = pc_addr; 343 344 d_tn = __kmp_find_shared_task_common( 345 &__kmp_threadprivate_d_table, gtid, 346 pc_addr); /* Only the MASTER data table exists. */ 347 348 if (d_tn != 0) { 349 /* This threadprivate variable has already been seen. */ 350 351 if (d_tn->pod_init == 0 && d_tn->obj_init == 0) { 352 d_tn->cmn_size = pc_size; 353 354 if (d_tn->is_vec) { 355 if (d_tn->ct.ctorv != 0) { 356 /* Construct from scratch so no prototype exists */ 357 d_tn->obj_init = 0; 358 } else if (d_tn->cct.cctorv != 0) { 359 /* Now data initialize the prototype since it was previously 360 * registered */ 361 d_tn->obj_init = (void *)__kmp_allocate(d_tn->cmn_size); 362 (void)(*d_tn->cct.cctorv)(d_tn->obj_init, pc_addr, d_tn->vec_len); 363 } else { 364 d_tn->pod_init = __kmp_init_common_data(data_addr, d_tn->cmn_size); 365 } 366 } else { 367 if (d_tn->ct.ctor != 0) { 368 /* Construct from scratch so no prototype exists */ 369 d_tn->obj_init = 0; 370 } else if (d_tn->cct.cctor != 0) { 371 /* Now data initialize the prototype since it was previously 372 registered */ 373 d_tn->obj_init = (void *)__kmp_allocate(d_tn->cmn_size); 374 (void)(*d_tn->cct.cctor)(d_tn->obj_init, pc_addr); 375 } else { 376 d_tn->pod_init = __kmp_init_common_data(data_addr, d_tn->cmn_size); 377 } 378 } 379 } 380 } else { 381 struct shared_common **lnk_tn; 382 383 d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common)); 384 d_tn->gbl_addr = pc_addr; 385 d_tn->cmn_size = pc_size; 386 d_tn->pod_init = __kmp_init_common_data(data_addr, pc_size); 387 /* 388 d_tn->obj_init = 0; // AC: commented out because __kmp_allocate 389 zeroes the memory 390 d_tn->ct.ctor = 0; 391 d_tn->cct.cctor = 0; 392 d_tn->dt.dtor = 0; 393 d_tn->is_vec = FALSE; 394 d_tn->vec_len = 0L; 395 */ 396 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(pc_addr)]); 397 398 d_tn->next = *lnk_tn; 399 *lnk_tn = d_tn; 400 } 401 402 tn->cmn_size = d_tn->cmn_size; 403 404 if ((__kmp_foreign_tp) ? (KMP_INITIAL_GTID(gtid)) : (KMP_UBER_GTID(gtid))) { 405 tn->par_addr = (void *)pc_addr; 406 } else { 407 tn->par_addr = (void *)__kmp_allocate(tn->cmn_size); 408 } 409 410 __kmp_release_lock(&__kmp_global_lock, gtid); 411 /* +++++++++ END OF CRITICAL SECTION +++++++++ */ 412 413 #ifdef USE_CHECKS_COMMON 414 if (pc_size > d_tn->cmn_size) { 415 KC_TRACE( 416 10, ("__kmp_threadprivate_insert: THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC 417 " ,%" KMP_UINTPTR_SPEC ")\n", 418 pc_addr, pc_size, d_tn->cmn_size)); 419 KMP_FATAL(TPCommonBlocksInconsist); 420 } 421 #endif /* USE_CHECKS_COMMON */ 422 423 tt = &(__kmp_threads[gtid]->th.th_pri_common->data[KMP_HASH(pc_addr)]); 424 425 #ifdef KMP_TASK_COMMON_DEBUG 426 if (*tt != 0) { 427 KC_TRACE( 428 10, 429 ("__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n", 430 gtid, pc_addr)); 431 } 432 #endif 433 tn->next = *tt; 434 *tt = tn; 435 436 #ifdef KMP_TASK_COMMON_DEBUG 437 KC_TRACE(10, 438 ("__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n", 439 gtid, pc_addr)); 440 dump_list(); 441 #endif 442 443 /* Link the node into a simple list */ 444 445 tn->link = __kmp_threads[gtid]->th.th_pri_head; 446 __kmp_threads[gtid]->th.th_pri_head = tn; 447 448 if ((__kmp_foreign_tp) ? (KMP_INITIAL_GTID(gtid)) : (KMP_UBER_GTID(gtid))) 449 return tn; 450 451 /* if C++ object with copy constructor, use it; 452 * else if C++ object with constructor, use it for the non-master copies only; 453 * else use pod_init and memcpy 454 * 455 * C++ constructors need to be called once for each non-master thread on 456 * allocate 457 * C++ copy constructors need to be called once for each thread on allocate */ 458 459 /* C++ object with constructors/destructors; don't call constructors for 460 master thread though */ 461 if (d_tn->is_vec) { 462 if (d_tn->ct.ctorv != 0) { 463 (void)(*d_tn->ct.ctorv)(tn->par_addr, d_tn->vec_len); 464 } else if (d_tn->cct.cctorv != 0) { 465 (void)(*d_tn->cct.cctorv)(tn->par_addr, d_tn->obj_init, d_tn->vec_len); 466 } else if (tn->par_addr != tn->gbl_addr) { 467 __kmp_copy_common_data(tn->par_addr, d_tn->pod_init); 468 } 469 } else { 470 if (d_tn->ct.ctor != 0) { 471 (void)(*d_tn->ct.ctor)(tn->par_addr); 472 } else if (d_tn->cct.cctor != 0) { 473 (void)(*d_tn->cct.cctor)(tn->par_addr, d_tn->obj_init); 474 } else if (tn->par_addr != tn->gbl_addr) { 475 __kmp_copy_common_data(tn->par_addr, d_tn->pod_init); 476 } 477 } 478 /* !BUILD_OPENMP_C 479 if (tn->par_addr != tn->gbl_addr) 480 __kmp_copy_common_data( tn->par_addr, d_tn->pod_init ); */ 481 482 return tn; 483 } 484 485 /* ------------------------------------------------------------------------ */ 486 /* We are currently parallel, and we know the thread id. */ 487 /* ------------------------------------------------------------------------ */ 488 489 /*! 490 @ingroup THREADPRIVATE 491 492 @param loc source location information 493 @param data pointer to data being privatized 494 @param ctor pointer to constructor function for data 495 @param cctor pointer to copy constructor function for data 496 @param dtor pointer to destructor function for data 497 498 Register constructors and destructors for thread private data. 499 This function is called when executing in parallel, when we know the thread id. 500 */ 501 void __kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor, 502 kmpc_cctor cctor, kmpc_dtor dtor) { 503 struct shared_common *d_tn, **lnk_tn; 504 505 KC_TRACE(10, ("__kmpc_threadprivate_register: called\n")); 506 507 #ifdef USE_CHECKS_COMMON 508 /* copy constructor must be zero for current code gen (Nov 2002 - jph) */ 509 KMP_ASSERT(cctor == 0); 510 #endif /* USE_CHECKS_COMMON */ 511 512 /* Only the global data table exists. */ 513 d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, -1, data); 514 515 if (d_tn == 0) { 516 d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common)); 517 d_tn->gbl_addr = data; 518 519 d_tn->ct.ctor = ctor; 520 d_tn->cct.cctor = cctor; 521 d_tn->dt.dtor = dtor; 522 /* 523 d_tn->is_vec = FALSE; // AC: commented out because __kmp_allocate 524 zeroes the memory 525 d_tn->vec_len = 0L; 526 d_tn->obj_init = 0; 527 d_tn->pod_init = 0; 528 */ 529 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(data)]); 530 531 d_tn->next = *lnk_tn; 532 *lnk_tn = d_tn; 533 } 534 } 535 536 void *__kmpc_threadprivate(ident_t *loc, kmp_int32 global_tid, void *data, 537 size_t size) { 538 void *ret; 539 struct private_common *tn; 540 541 KC_TRACE(10, ("__kmpc_threadprivate: T#%d called\n", global_tid)); 542 543 #ifdef USE_CHECKS_COMMON 544 if (!__kmp_init_serial) 545 KMP_FATAL(RTLNotInitialized); 546 #endif /* USE_CHECKS_COMMON */ 547 548 if (!__kmp_threads[global_tid]->th.th_root->r.r_active && !__kmp_foreign_tp) { 549 /* The parallel address will NEVER overlap with the data_address */ 550 /* dkp: 3rd arg to kmp_threadprivate_insert_private_data() is the 551 * data_address; use data_address = data */ 552 553 KC_TRACE(20, ("__kmpc_threadprivate: T#%d inserting private data\n", 554 global_tid)); 555 kmp_threadprivate_insert_private_data(global_tid, data, data, size); 556 557 ret = data; 558 } else { 559 KC_TRACE( 560 50, 561 ("__kmpc_threadprivate: T#%d try to find private data at address %p\n", 562 global_tid, data)); 563 tn = __kmp_threadprivate_find_task_common( 564 __kmp_threads[global_tid]->th.th_pri_common, global_tid, data); 565 566 if (tn) { 567 KC_TRACE(20, ("__kmpc_threadprivate: T#%d found data\n", global_tid)); 568 #ifdef USE_CHECKS_COMMON 569 if ((size_t)size > tn->cmn_size) { 570 KC_TRACE(10, ("THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC 571 " ,%" KMP_UINTPTR_SPEC ")\n", 572 data, size, tn->cmn_size)); 573 KMP_FATAL(TPCommonBlocksInconsist); 574 } 575 #endif /* USE_CHECKS_COMMON */ 576 } else { 577 /* The parallel address will NEVER overlap with the data_address */ 578 /* dkp: 3rd arg to kmp_threadprivate_insert() is the data_address; use 579 * data_address = data */ 580 KC_TRACE(20, ("__kmpc_threadprivate: T#%d inserting data\n", global_tid)); 581 tn = kmp_threadprivate_insert(global_tid, data, data, size); 582 } 583 584 ret = tn->par_addr; 585 } 586 KC_TRACE(10, ("__kmpc_threadprivate: T#%d exiting; return value = %p\n", 587 global_tid, ret)); 588 589 return ret; 590 } 591 592 /*! 593 @ingroup THREADPRIVATE 594 @param loc source location information 595 @param global_tid global thread number 596 @param data pointer to data to privatize 597 @param size size of data to privatize 598 @param cache pointer to cache 599 @return pointer to private storage 600 601 Allocate private storage for threadprivate data. 602 */ 603 void * 604 __kmpc_threadprivate_cached(ident_t *loc, 605 kmp_int32 global_tid, // gtid. 606 void *data, // Pointer to original global variable. 607 size_t size, // Size of original global variable. 608 void ***cache) { 609 KC_TRACE(10, ("__kmpc_threadprivate_cached: T#%d called with cache: %p, " 610 "address: %p, size: %" KMP_SIZE_T_SPEC "\n", 611 global_tid, *cache, data, size)); 612 613 if (TCR_PTR(*cache) == 0) { 614 __kmp_acquire_lock(&__kmp_global_lock, global_tid); 615 616 if (TCR_PTR(*cache) == 0) { 617 __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock); 618 __kmp_tp_cached = 1; 619 __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock); 620 void **my_cache; 621 KMP_ITT_IGNORE( 622 my_cache = (void **)__kmp_allocate( 623 sizeof(void *) * __kmp_tp_capacity + sizeof(kmp_cached_addr_t));); 624 // No need to zero the allocated memory; __kmp_allocate does that. 625 KC_TRACE( 626 50, 627 ("__kmpc_threadprivate_cached: T#%d allocated cache at address %p\n", 628 global_tid, my_cache)); 629 630 /* TODO: free all this memory in __kmp_common_destroy using 631 * __kmp_threadpriv_cache_list */ 632 /* Add address of mycache to linked list for cleanup later */ 633 kmp_cached_addr_t *tp_cache_addr; 634 635 tp_cache_addr = (kmp_cached_addr_t *)&my_cache[__kmp_tp_capacity]; 636 tp_cache_addr->addr = my_cache; 637 tp_cache_addr->next = __kmp_threadpriv_cache_list; 638 __kmp_threadpriv_cache_list = tp_cache_addr; 639 640 KMP_MB(); 641 642 TCW_PTR(*cache, my_cache); 643 644 KMP_MB(); 645 } 646 647 __kmp_release_lock(&__kmp_global_lock, global_tid); 648 } 649 650 void *ret; 651 if ((ret = TCR_PTR((*cache)[global_tid])) == 0) { 652 ret = __kmpc_threadprivate(loc, global_tid, data, (size_t)size); 653 654 TCW_PTR((*cache)[global_tid], ret); 655 } 656 KC_TRACE(10, 657 ("__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n", 658 global_tid, ret)); 659 660 return ret; 661 } 662 663 /*! 664 @ingroup THREADPRIVATE 665 @param loc source location information 666 @param data pointer to data being privatized 667 @param ctor pointer to constructor function for data 668 @param cctor pointer to copy constructor function for data 669 @param dtor pointer to destructor function for data 670 @param vector_length length of the vector (bytes or elements?) 671 Register vector constructors and destructors for thread private data. 672 */ 673 void __kmpc_threadprivate_register_vec(ident_t *loc, void *data, 674 kmpc_ctor_vec ctor, kmpc_cctor_vec cctor, 675 kmpc_dtor_vec dtor, 676 size_t vector_length) { 677 struct shared_common *d_tn, **lnk_tn; 678 679 KC_TRACE(10, ("__kmpc_threadprivate_register_vec: called\n")); 680 681 #ifdef USE_CHECKS_COMMON 682 /* copy constructor must be zero for current code gen (Nov 2002 - jph) */ 683 KMP_ASSERT(cctor == 0); 684 #endif /* USE_CHECKS_COMMON */ 685 686 d_tn = __kmp_find_shared_task_common( 687 &__kmp_threadprivate_d_table, -1, 688 data); /* Only the global data table exists. */ 689 690 if (d_tn == 0) { 691 d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common)); 692 d_tn->gbl_addr = data; 693 694 d_tn->ct.ctorv = ctor; 695 d_tn->cct.cctorv = cctor; 696 d_tn->dt.dtorv = dtor; 697 d_tn->is_vec = TRUE; 698 d_tn->vec_len = (size_t)vector_length; 699 /* 700 d_tn->obj_init = 0; // AC: commented out because __kmp_allocate 701 zeroes the memory 702 d_tn->pod_init = 0; 703 */ 704 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(data)]); 705 706 d_tn->next = *lnk_tn; 707 *lnk_tn = d_tn; 708 } 709 } 710