10b57cec5SDimitry Andric /* 20b57cec5SDimitry Andric * kmp_threadprivate.cpp -- OpenMP threadprivate support library 30b57cec5SDimitry Andric */ 40b57cec5SDimitry Andric 50b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 60b57cec5SDimitry Andric // 70b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 80b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 90b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 100b57cec5SDimitry Andric // 110b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 120b57cec5SDimitry Andric 130b57cec5SDimitry Andric #include "kmp.h" 140b57cec5SDimitry Andric #include "kmp_i18n.h" 150b57cec5SDimitry Andric #include "kmp_itt.h" 160b57cec5SDimitry Andric 170b57cec5SDimitry Andric #define USE_CHECKS_COMMON 180b57cec5SDimitry Andric 190b57cec5SDimitry Andric #define KMP_INLINE_SUBR 1 200b57cec5SDimitry Andric 210b57cec5SDimitry Andric void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr, 220b57cec5SDimitry Andric void *data_addr, size_t pc_size); 230b57cec5SDimitry Andric struct private_common *kmp_threadprivate_insert(int gtid, void *pc_addr, 240b57cec5SDimitry Andric void *data_addr, 250b57cec5SDimitry Andric size_t pc_size); 260b57cec5SDimitry Andric 270b57cec5SDimitry Andric struct shared_table __kmp_threadprivate_d_table; 280b57cec5SDimitry Andric 290b57cec5SDimitry Andric static 300b57cec5SDimitry Andric #ifdef KMP_INLINE_SUBR 310b57cec5SDimitry Andric __forceinline 320b57cec5SDimitry Andric #endif 330b57cec5SDimitry Andric struct private_common * 340b57cec5SDimitry Andric __kmp_threadprivate_find_task_common(struct common_table *tbl, int gtid, 350b57cec5SDimitry Andric void *pc_addr) 360b57cec5SDimitry Andric 370b57cec5SDimitry Andric { 380b57cec5SDimitry Andric struct private_common *tn; 390b57cec5SDimitry Andric 400b57cec5SDimitry Andric #ifdef KMP_TASK_COMMON_DEBUG 410b57cec5SDimitry Andric KC_TRACE(10, ("__kmp_threadprivate_find_task_common: thread#%d, called with " 420b57cec5SDimitry Andric "address %p\n", 430b57cec5SDimitry Andric gtid, pc_addr)); 440b57cec5SDimitry Andric dump_list(); 450b57cec5SDimitry Andric #endif 460b57cec5SDimitry Andric 470b57cec5SDimitry Andric for (tn = tbl->data[KMP_HASH(pc_addr)]; tn; tn = tn->next) { 480b57cec5SDimitry Andric if (tn->gbl_addr == pc_addr) { 490b57cec5SDimitry Andric #ifdef KMP_TASK_COMMON_DEBUG 500b57cec5SDimitry Andric KC_TRACE(10, ("__kmp_threadprivate_find_task_common: thread#%d, found " 510b57cec5SDimitry Andric "node %p on list\n", 520b57cec5SDimitry Andric gtid, pc_addr)); 530b57cec5SDimitry Andric #endif 540b57cec5SDimitry Andric return tn; 550b57cec5SDimitry Andric } 560b57cec5SDimitry Andric } 570b57cec5SDimitry Andric return 0; 580b57cec5SDimitry Andric } 590b57cec5SDimitry Andric 600b57cec5SDimitry Andric static 610b57cec5SDimitry Andric #ifdef KMP_INLINE_SUBR 620b57cec5SDimitry Andric __forceinline 630b57cec5SDimitry Andric #endif 640b57cec5SDimitry Andric struct shared_common * 650b57cec5SDimitry Andric __kmp_find_shared_task_common(struct shared_table *tbl, int gtid, 660b57cec5SDimitry Andric void *pc_addr) { 670b57cec5SDimitry Andric struct shared_common *tn; 680b57cec5SDimitry Andric 690b57cec5SDimitry Andric for (tn = tbl->data[KMP_HASH(pc_addr)]; tn; tn = tn->next) { 700b57cec5SDimitry Andric if (tn->gbl_addr == pc_addr) { 710b57cec5SDimitry Andric #ifdef KMP_TASK_COMMON_DEBUG 720b57cec5SDimitry Andric KC_TRACE( 730b57cec5SDimitry Andric 10, 740b57cec5SDimitry Andric ("__kmp_find_shared_task_common: thread#%d, found node %p on list\n", 750b57cec5SDimitry Andric gtid, pc_addr)); 760b57cec5SDimitry Andric #endif 770b57cec5SDimitry Andric return tn; 780b57cec5SDimitry Andric } 790b57cec5SDimitry Andric } 800b57cec5SDimitry Andric return 0; 810b57cec5SDimitry Andric } 820b57cec5SDimitry Andric 830b57cec5SDimitry Andric // Create a template for the data initialized storage. Either the template is 840b57cec5SDimitry Andric // NULL indicating zero fill, or the template is a copy of the original data. 850b57cec5SDimitry Andric static struct private_data *__kmp_init_common_data(void *pc_addr, 860b57cec5SDimitry Andric size_t pc_size) { 870b57cec5SDimitry Andric struct private_data *d; 880b57cec5SDimitry Andric size_t i; 890b57cec5SDimitry Andric char *p; 900b57cec5SDimitry Andric 910b57cec5SDimitry Andric d = (struct private_data *)__kmp_allocate(sizeof(struct private_data)); 920b57cec5SDimitry Andric /* 930b57cec5SDimitry Andric d->data = 0; // AC: commented out because __kmp_allocate zeroes the 940b57cec5SDimitry Andric memory 950b57cec5SDimitry Andric d->next = 0; 960b57cec5SDimitry Andric */ 970b57cec5SDimitry Andric d->size = pc_size; 980b57cec5SDimitry Andric d->more = 1; 990b57cec5SDimitry Andric 1000b57cec5SDimitry Andric p = (char *)pc_addr; 1010b57cec5SDimitry Andric 1020b57cec5SDimitry Andric for (i = pc_size; i > 0; --i) { 1030b57cec5SDimitry Andric if (*p++ != '\0') { 1040b57cec5SDimitry Andric d->data = __kmp_allocate(pc_size); 1050b57cec5SDimitry Andric KMP_MEMCPY(d->data, pc_addr, pc_size); 1060b57cec5SDimitry Andric break; 1070b57cec5SDimitry Andric } 1080b57cec5SDimitry Andric } 1090b57cec5SDimitry Andric 1100b57cec5SDimitry Andric return d; 1110b57cec5SDimitry Andric } 1120b57cec5SDimitry Andric 1130b57cec5SDimitry Andric // Initialize the data area from the template. 1140b57cec5SDimitry Andric static void __kmp_copy_common_data(void *pc_addr, struct private_data *d) { 1150b57cec5SDimitry Andric char *addr = (char *)pc_addr; 1160b57cec5SDimitry Andric 117e8d8bef9SDimitry Andric for (size_t offset = 0; d != 0; d = d->next) { 118e8d8bef9SDimitry Andric for (int i = d->more; i > 0; --i) { 1190b57cec5SDimitry Andric if (d->data == 0) 1200b57cec5SDimitry Andric memset(&addr[offset], '\0', d->size); 1210b57cec5SDimitry Andric else 1220b57cec5SDimitry Andric KMP_MEMCPY(&addr[offset], d->data, d->size); 1230b57cec5SDimitry Andric offset += d->size; 1240b57cec5SDimitry Andric } 1250b57cec5SDimitry Andric } 1260b57cec5SDimitry Andric } 1270b57cec5SDimitry Andric 1280b57cec5SDimitry Andric /* we are called from __kmp_serial_initialize() with __kmp_initz_lock held. */ 1290b57cec5SDimitry Andric void __kmp_common_initialize(void) { 1300b57cec5SDimitry Andric if (!TCR_4(__kmp_init_common)) { 1310b57cec5SDimitry Andric int q; 1320b57cec5SDimitry Andric #ifdef KMP_DEBUG 1330b57cec5SDimitry Andric int gtid; 1340b57cec5SDimitry Andric #endif 1350b57cec5SDimitry Andric 1360b57cec5SDimitry Andric __kmp_threadpriv_cache_list = NULL; 1370b57cec5SDimitry Andric 1380b57cec5SDimitry Andric #ifdef KMP_DEBUG 1390b57cec5SDimitry Andric /* verify the uber masters were initialized */ 1400b57cec5SDimitry Andric for (gtid = 0; gtid < __kmp_threads_capacity; gtid++) 1410b57cec5SDimitry Andric if (__kmp_root[gtid]) { 1420b57cec5SDimitry Andric KMP_DEBUG_ASSERT(__kmp_root[gtid]->r.r_uber_thread); 1430b57cec5SDimitry Andric for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) 1440b57cec5SDimitry Andric KMP_DEBUG_ASSERT( 1450b57cec5SDimitry Andric !__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q]); 1460b57cec5SDimitry Andric /* __kmp_root[ gitd ]-> r.r_uber_thread -> 1470b57cec5SDimitry Andric * th.th_pri_common -> data[ q ] = 0;*/ 1480b57cec5SDimitry Andric } 1490b57cec5SDimitry Andric #endif /* KMP_DEBUG */ 1500b57cec5SDimitry Andric 1510b57cec5SDimitry Andric for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) 1520b57cec5SDimitry Andric __kmp_threadprivate_d_table.data[q] = 0; 1530b57cec5SDimitry Andric 1540b57cec5SDimitry Andric TCW_4(__kmp_init_common, TRUE); 1550b57cec5SDimitry Andric } 1560b57cec5SDimitry Andric } 1570b57cec5SDimitry Andric 1580b57cec5SDimitry Andric /* Call all destructors for threadprivate data belonging to all threads. 1590b57cec5SDimitry Andric Currently unused! */ 1600b57cec5SDimitry Andric void __kmp_common_destroy(void) { 1610b57cec5SDimitry Andric if (TCR_4(__kmp_init_common)) { 1620b57cec5SDimitry Andric int q; 1630b57cec5SDimitry Andric 1640b57cec5SDimitry Andric TCW_4(__kmp_init_common, FALSE); 1650b57cec5SDimitry Andric 1660b57cec5SDimitry Andric for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) { 1670b57cec5SDimitry Andric int gtid; 1680b57cec5SDimitry Andric struct private_common *tn; 1690b57cec5SDimitry Andric struct shared_common *d_tn; 1700b57cec5SDimitry Andric 1710b57cec5SDimitry Andric /* C++ destructors need to be called once per thread before exiting. 172fe6060f1SDimitry Andric Don't call destructors for primary thread though unless we used copy 1730b57cec5SDimitry Andric constructor */ 1740b57cec5SDimitry Andric 1750b57cec5SDimitry Andric for (d_tn = __kmp_threadprivate_d_table.data[q]; d_tn; 1760b57cec5SDimitry Andric d_tn = d_tn->next) { 1770b57cec5SDimitry Andric if (d_tn->is_vec) { 1780b57cec5SDimitry Andric if (d_tn->dt.dtorv != 0) { 1790b57cec5SDimitry Andric for (gtid = 0; gtid < __kmp_all_nth; ++gtid) { 1800b57cec5SDimitry Andric if (__kmp_threads[gtid]) { 1810b57cec5SDimitry Andric if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid)) 1820b57cec5SDimitry Andric : (!KMP_UBER_GTID(gtid))) { 1830b57cec5SDimitry Andric tn = __kmp_threadprivate_find_task_common( 1840b57cec5SDimitry Andric __kmp_threads[gtid]->th.th_pri_common, gtid, 1850b57cec5SDimitry Andric d_tn->gbl_addr); 1860b57cec5SDimitry Andric if (tn) { 1870b57cec5SDimitry Andric (*d_tn->dt.dtorv)(tn->par_addr, d_tn->vec_len); 1880b57cec5SDimitry Andric } 1890b57cec5SDimitry Andric } 1900b57cec5SDimitry Andric } 1910b57cec5SDimitry Andric } 1920b57cec5SDimitry Andric if (d_tn->obj_init != 0) { 1930b57cec5SDimitry Andric (*d_tn->dt.dtorv)(d_tn->obj_init, d_tn->vec_len); 1940b57cec5SDimitry Andric } 1950b57cec5SDimitry Andric } 1960b57cec5SDimitry Andric } else { 1970b57cec5SDimitry Andric if (d_tn->dt.dtor != 0) { 1980b57cec5SDimitry Andric for (gtid = 0; gtid < __kmp_all_nth; ++gtid) { 1990b57cec5SDimitry Andric if (__kmp_threads[gtid]) { 2000b57cec5SDimitry Andric if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid)) 2010b57cec5SDimitry Andric : (!KMP_UBER_GTID(gtid))) { 2020b57cec5SDimitry Andric tn = __kmp_threadprivate_find_task_common( 2030b57cec5SDimitry Andric __kmp_threads[gtid]->th.th_pri_common, gtid, 2040b57cec5SDimitry Andric d_tn->gbl_addr); 2050b57cec5SDimitry Andric if (tn) { 2060b57cec5SDimitry Andric (*d_tn->dt.dtor)(tn->par_addr); 2070b57cec5SDimitry Andric } 2080b57cec5SDimitry Andric } 2090b57cec5SDimitry Andric } 2100b57cec5SDimitry Andric } 2110b57cec5SDimitry Andric if (d_tn->obj_init != 0) { 2120b57cec5SDimitry Andric (*d_tn->dt.dtor)(d_tn->obj_init); 2130b57cec5SDimitry Andric } 2140b57cec5SDimitry Andric } 2150b57cec5SDimitry Andric } 2160b57cec5SDimitry Andric } 2170b57cec5SDimitry Andric __kmp_threadprivate_d_table.data[q] = 0; 2180b57cec5SDimitry Andric } 2190b57cec5SDimitry Andric } 2200b57cec5SDimitry Andric } 2210b57cec5SDimitry Andric 2220b57cec5SDimitry Andric /* Call all destructors for threadprivate data belonging to this thread */ 2230b57cec5SDimitry Andric void __kmp_common_destroy_gtid(int gtid) { 2240b57cec5SDimitry Andric struct private_common *tn; 2250b57cec5SDimitry Andric struct shared_common *d_tn; 2260b57cec5SDimitry Andric 2270b57cec5SDimitry Andric if (!TCR_4(__kmp_init_gtid)) { 2280b57cec5SDimitry Andric // This is possible when one of multiple roots initiates early library 2290b57cec5SDimitry Andric // termination in a sequential region while other teams are active, and its 2300b57cec5SDimitry Andric // child threads are about to end. 2310b57cec5SDimitry Andric return; 2320b57cec5SDimitry Andric } 2330b57cec5SDimitry Andric 2340b57cec5SDimitry Andric KC_TRACE(10, ("__kmp_common_destroy_gtid: T#%d called\n", gtid)); 2350b57cec5SDimitry Andric if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid)) : (!KMP_UBER_GTID(gtid))) { 2360b57cec5SDimitry Andric 2370b57cec5SDimitry Andric if (TCR_4(__kmp_init_common)) { 2380b57cec5SDimitry Andric 2390b57cec5SDimitry Andric /* Cannot do this here since not all threads have destroyed their data */ 2400b57cec5SDimitry Andric /* TCW_4(__kmp_init_common, FALSE); */ 2410b57cec5SDimitry Andric 2420b57cec5SDimitry Andric for (tn = __kmp_threads[gtid]->th.th_pri_head; tn; tn = tn->link) { 2430b57cec5SDimitry Andric 2440b57cec5SDimitry Andric d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, gtid, 2450b57cec5SDimitry Andric tn->gbl_addr); 246e8d8bef9SDimitry Andric if (d_tn == NULL) 247e8d8bef9SDimitry Andric continue; 2480b57cec5SDimitry Andric if (d_tn->is_vec) { 2490b57cec5SDimitry Andric if (d_tn->dt.dtorv != 0) { 2500b57cec5SDimitry Andric (void)(*d_tn->dt.dtorv)(tn->par_addr, d_tn->vec_len); 2510b57cec5SDimitry Andric if (d_tn->obj_init != 0) { 2520b57cec5SDimitry Andric (void)(*d_tn->dt.dtorv)(d_tn->obj_init, d_tn->vec_len); 2530b57cec5SDimitry Andric } 254*0fca6ea1SDimitry Andric } 2550b57cec5SDimitry Andric } else { 2560b57cec5SDimitry Andric if (d_tn->dt.dtor != 0) { 2570b57cec5SDimitry Andric (void)(*d_tn->dt.dtor)(tn->par_addr); 2580b57cec5SDimitry Andric if (d_tn->obj_init != 0) { 2590b57cec5SDimitry Andric (void)(*d_tn->dt.dtor)(d_tn->obj_init); 2600b57cec5SDimitry Andric } 2610b57cec5SDimitry Andric } 2620b57cec5SDimitry Andric } 263*0fca6ea1SDimitry Andric } 2640b57cec5SDimitry Andric KC_TRACE(30, ("__kmp_common_destroy_gtid: T#%d threadprivate destructors " 2650b57cec5SDimitry Andric "complete\n", 2660b57cec5SDimitry Andric gtid)); 2670b57cec5SDimitry Andric } 2680b57cec5SDimitry Andric } 2690b57cec5SDimitry Andric } 2700b57cec5SDimitry Andric 2710b57cec5SDimitry Andric #ifdef KMP_TASK_COMMON_DEBUG 2720b57cec5SDimitry Andric static void dump_list(void) { 2730b57cec5SDimitry Andric int p, q; 2740b57cec5SDimitry Andric 2750b57cec5SDimitry Andric for (p = 0; p < __kmp_all_nth; ++p) { 2760b57cec5SDimitry Andric if (!__kmp_threads[p]) 2770b57cec5SDimitry Andric continue; 2780b57cec5SDimitry Andric for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) { 2790b57cec5SDimitry Andric if (__kmp_threads[p]->th.th_pri_common->data[q]) { 2800b57cec5SDimitry Andric struct private_common *tn; 2810b57cec5SDimitry Andric 2820b57cec5SDimitry Andric KC_TRACE(10, ("\tdump_list: gtid:%d addresses\n", p)); 2830b57cec5SDimitry Andric 2840b57cec5SDimitry Andric for (tn = __kmp_threads[p]->th.th_pri_common->data[q]; tn; 2850b57cec5SDimitry Andric tn = tn->next) { 2860b57cec5SDimitry Andric KC_TRACE(10, 2870b57cec5SDimitry Andric ("\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n", 2880b57cec5SDimitry Andric tn->gbl_addr, tn->par_addr)); 2890b57cec5SDimitry Andric } 2900b57cec5SDimitry Andric } 2910b57cec5SDimitry Andric } 2920b57cec5SDimitry Andric } 2930b57cec5SDimitry Andric } 2940b57cec5SDimitry Andric #endif /* KMP_TASK_COMMON_DEBUG */ 2950b57cec5SDimitry Andric 2960b57cec5SDimitry Andric // NOTE: this routine is to be called only from the serial part of the program. 2970b57cec5SDimitry Andric void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr, 2980b57cec5SDimitry Andric void *data_addr, size_t pc_size) { 2990b57cec5SDimitry Andric struct shared_common **lnk_tn, *d_tn; 3000b57cec5SDimitry Andric KMP_DEBUG_ASSERT(__kmp_threads[gtid] && 3010b57cec5SDimitry Andric __kmp_threads[gtid]->th.th_root->r.r_active == 0); 3020b57cec5SDimitry Andric 3030b57cec5SDimitry Andric d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, gtid, 3040b57cec5SDimitry Andric pc_addr); 3050b57cec5SDimitry Andric 3060b57cec5SDimitry Andric if (d_tn == 0) { 3070b57cec5SDimitry Andric d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common)); 3080b57cec5SDimitry Andric 3090b57cec5SDimitry Andric d_tn->gbl_addr = pc_addr; 3100b57cec5SDimitry Andric d_tn->pod_init = __kmp_init_common_data(data_addr, pc_size); 3110b57cec5SDimitry Andric /* 3120b57cec5SDimitry Andric d_tn->obj_init = 0; // AC: commented out because __kmp_allocate 3130b57cec5SDimitry Andric zeroes the memory 3140b57cec5SDimitry Andric d_tn->ct.ctor = 0; 3150b57cec5SDimitry Andric d_tn->cct.cctor = 0;; 3160b57cec5SDimitry Andric d_tn->dt.dtor = 0; 3170b57cec5SDimitry Andric d_tn->is_vec = FALSE; 3180b57cec5SDimitry Andric d_tn->vec_len = 0L; 3190b57cec5SDimitry Andric */ 3200b57cec5SDimitry Andric d_tn->cmn_size = pc_size; 3210b57cec5SDimitry Andric 3220b57cec5SDimitry Andric __kmp_acquire_lock(&__kmp_global_lock, gtid); 3230b57cec5SDimitry Andric 3240b57cec5SDimitry Andric lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(pc_addr)]); 3250b57cec5SDimitry Andric 3260b57cec5SDimitry Andric d_tn->next = *lnk_tn; 3270b57cec5SDimitry Andric *lnk_tn = d_tn; 3280b57cec5SDimitry Andric 3290b57cec5SDimitry Andric __kmp_release_lock(&__kmp_global_lock, gtid); 3300b57cec5SDimitry Andric } 3310b57cec5SDimitry Andric } 3320b57cec5SDimitry Andric 3330b57cec5SDimitry Andric struct private_common *kmp_threadprivate_insert(int gtid, void *pc_addr, 3340b57cec5SDimitry Andric void *data_addr, 3350b57cec5SDimitry Andric size_t pc_size) { 3360b57cec5SDimitry Andric struct private_common *tn, **tt; 3370b57cec5SDimitry Andric struct shared_common *d_tn; 3380b57cec5SDimitry Andric 3390b57cec5SDimitry Andric /* +++++++++ START OF CRITICAL SECTION +++++++++ */ 3400b57cec5SDimitry Andric __kmp_acquire_lock(&__kmp_global_lock, gtid); 3410b57cec5SDimitry Andric 3420b57cec5SDimitry Andric tn = (struct private_common *)__kmp_allocate(sizeof(struct private_common)); 3430b57cec5SDimitry Andric 3440b57cec5SDimitry Andric tn->gbl_addr = pc_addr; 3450b57cec5SDimitry Andric 3460b57cec5SDimitry Andric d_tn = __kmp_find_shared_task_common( 3470b57cec5SDimitry Andric &__kmp_threadprivate_d_table, gtid, 3480b57cec5SDimitry Andric pc_addr); /* Only the MASTER data table exists. */ 3490b57cec5SDimitry Andric 3500b57cec5SDimitry Andric if (d_tn != 0) { 3510b57cec5SDimitry Andric /* This threadprivate variable has already been seen. */ 3520b57cec5SDimitry Andric 3530b57cec5SDimitry Andric if (d_tn->pod_init == 0 && d_tn->obj_init == 0) { 3540b57cec5SDimitry Andric d_tn->cmn_size = pc_size; 3550b57cec5SDimitry Andric 3560b57cec5SDimitry Andric if (d_tn->is_vec) { 3570b57cec5SDimitry Andric if (d_tn->ct.ctorv != 0) { 3580b57cec5SDimitry Andric /* Construct from scratch so no prototype exists */ 3590b57cec5SDimitry Andric d_tn->obj_init = 0; 3600b57cec5SDimitry Andric } else if (d_tn->cct.cctorv != 0) { 3610b57cec5SDimitry Andric /* Now data initialize the prototype since it was previously 3620b57cec5SDimitry Andric * registered */ 3630b57cec5SDimitry Andric d_tn->obj_init = (void *)__kmp_allocate(d_tn->cmn_size); 3640b57cec5SDimitry Andric (void)(*d_tn->cct.cctorv)(d_tn->obj_init, pc_addr, d_tn->vec_len); 3650b57cec5SDimitry Andric } else { 3660b57cec5SDimitry Andric d_tn->pod_init = __kmp_init_common_data(data_addr, d_tn->cmn_size); 3670b57cec5SDimitry Andric } 3680b57cec5SDimitry Andric } else { 3690b57cec5SDimitry Andric if (d_tn->ct.ctor != 0) { 3700b57cec5SDimitry Andric /* Construct from scratch so no prototype exists */ 3710b57cec5SDimitry Andric d_tn->obj_init = 0; 3720b57cec5SDimitry Andric } else if (d_tn->cct.cctor != 0) { 3730b57cec5SDimitry Andric /* Now data initialize the prototype since it was previously 3740b57cec5SDimitry Andric registered */ 3750b57cec5SDimitry Andric d_tn->obj_init = (void *)__kmp_allocate(d_tn->cmn_size); 3760b57cec5SDimitry Andric (void)(*d_tn->cct.cctor)(d_tn->obj_init, pc_addr); 3770b57cec5SDimitry Andric } else { 3780b57cec5SDimitry Andric d_tn->pod_init = __kmp_init_common_data(data_addr, d_tn->cmn_size); 3790b57cec5SDimitry Andric } 3800b57cec5SDimitry Andric } 3810b57cec5SDimitry Andric } 3820b57cec5SDimitry Andric } else { 3830b57cec5SDimitry Andric struct shared_common **lnk_tn; 3840b57cec5SDimitry Andric 3850b57cec5SDimitry Andric d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common)); 3860b57cec5SDimitry Andric d_tn->gbl_addr = pc_addr; 3870b57cec5SDimitry Andric d_tn->cmn_size = pc_size; 3880b57cec5SDimitry Andric d_tn->pod_init = __kmp_init_common_data(data_addr, pc_size); 3890b57cec5SDimitry Andric /* 3900b57cec5SDimitry Andric d_tn->obj_init = 0; // AC: commented out because __kmp_allocate 3910b57cec5SDimitry Andric zeroes the memory 3920b57cec5SDimitry Andric d_tn->ct.ctor = 0; 3930b57cec5SDimitry Andric d_tn->cct.cctor = 0; 3940b57cec5SDimitry Andric d_tn->dt.dtor = 0; 3950b57cec5SDimitry Andric d_tn->is_vec = FALSE; 3960b57cec5SDimitry Andric d_tn->vec_len = 0L; 3970b57cec5SDimitry Andric */ 3980b57cec5SDimitry Andric lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(pc_addr)]); 3990b57cec5SDimitry Andric 4000b57cec5SDimitry Andric d_tn->next = *lnk_tn; 4010b57cec5SDimitry Andric *lnk_tn = d_tn; 4020b57cec5SDimitry Andric } 4030b57cec5SDimitry Andric 4040b57cec5SDimitry Andric tn->cmn_size = d_tn->cmn_size; 4050b57cec5SDimitry Andric 4060b57cec5SDimitry Andric if ((__kmp_foreign_tp) ? (KMP_INITIAL_GTID(gtid)) : (KMP_UBER_GTID(gtid))) { 4070b57cec5SDimitry Andric tn->par_addr = (void *)pc_addr; 4080b57cec5SDimitry Andric } else { 4090b57cec5SDimitry Andric tn->par_addr = (void *)__kmp_allocate(tn->cmn_size); 4100b57cec5SDimitry Andric } 4110b57cec5SDimitry Andric 4120b57cec5SDimitry Andric __kmp_release_lock(&__kmp_global_lock, gtid); 4130b57cec5SDimitry Andric /* +++++++++ END OF CRITICAL SECTION +++++++++ */ 4140b57cec5SDimitry Andric 4150b57cec5SDimitry Andric #ifdef USE_CHECKS_COMMON 4160b57cec5SDimitry Andric if (pc_size > d_tn->cmn_size) { 4170b57cec5SDimitry Andric KC_TRACE( 4180b57cec5SDimitry Andric 10, ("__kmp_threadprivate_insert: THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC 4190b57cec5SDimitry Andric " ,%" KMP_UINTPTR_SPEC ")\n", 4200b57cec5SDimitry Andric pc_addr, pc_size, d_tn->cmn_size)); 4210b57cec5SDimitry Andric KMP_FATAL(TPCommonBlocksInconsist); 4220b57cec5SDimitry Andric } 4230b57cec5SDimitry Andric #endif /* USE_CHECKS_COMMON */ 4240b57cec5SDimitry Andric 4250b57cec5SDimitry Andric tt = &(__kmp_threads[gtid]->th.th_pri_common->data[KMP_HASH(pc_addr)]); 4260b57cec5SDimitry Andric 4270b57cec5SDimitry Andric #ifdef KMP_TASK_COMMON_DEBUG 4280b57cec5SDimitry Andric if (*tt != 0) { 4290b57cec5SDimitry Andric KC_TRACE( 4300b57cec5SDimitry Andric 10, 4310b57cec5SDimitry Andric ("__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n", 4320b57cec5SDimitry Andric gtid, pc_addr)); 4330b57cec5SDimitry Andric } 4340b57cec5SDimitry Andric #endif 4350b57cec5SDimitry Andric tn->next = *tt; 4360b57cec5SDimitry Andric *tt = tn; 4370b57cec5SDimitry Andric 4380b57cec5SDimitry Andric #ifdef KMP_TASK_COMMON_DEBUG 4390b57cec5SDimitry Andric KC_TRACE(10, 4400b57cec5SDimitry Andric ("__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n", 4410b57cec5SDimitry Andric gtid, pc_addr)); 4420b57cec5SDimitry Andric dump_list(); 4430b57cec5SDimitry Andric #endif 4440b57cec5SDimitry Andric 4450b57cec5SDimitry Andric /* Link the node into a simple list */ 4460b57cec5SDimitry Andric 4470b57cec5SDimitry Andric tn->link = __kmp_threads[gtid]->th.th_pri_head; 4480b57cec5SDimitry Andric __kmp_threads[gtid]->th.th_pri_head = tn; 4490b57cec5SDimitry Andric 4500b57cec5SDimitry Andric if ((__kmp_foreign_tp) ? (KMP_INITIAL_GTID(gtid)) : (KMP_UBER_GTID(gtid))) 4510b57cec5SDimitry Andric return tn; 4520b57cec5SDimitry Andric 4530b57cec5SDimitry Andric /* if C++ object with copy constructor, use it; 454fe6060f1SDimitry Andric * else if C++ object with constructor, use it for the non-primary thread 455fe6060f1SDimitry Andric copies only; 4560b57cec5SDimitry Andric * else use pod_init and memcpy 4570b57cec5SDimitry Andric * 458fe6060f1SDimitry Andric * C++ constructors need to be called once for each non-primary thread on 4590b57cec5SDimitry Andric * allocate 4600b57cec5SDimitry Andric * C++ copy constructors need to be called once for each thread on allocate */ 4610b57cec5SDimitry Andric 4620b57cec5SDimitry Andric /* C++ object with constructors/destructors; don't call constructors for 463fe6060f1SDimitry Andric primary thread though */ 4640b57cec5SDimitry Andric if (d_tn->is_vec) { 4650b57cec5SDimitry Andric if (d_tn->ct.ctorv != 0) { 4660b57cec5SDimitry Andric (void)(*d_tn->ct.ctorv)(tn->par_addr, d_tn->vec_len); 4670b57cec5SDimitry Andric } else if (d_tn->cct.cctorv != 0) { 4680b57cec5SDimitry Andric (void)(*d_tn->cct.cctorv)(tn->par_addr, d_tn->obj_init, d_tn->vec_len); 4690b57cec5SDimitry Andric } else if (tn->par_addr != tn->gbl_addr) { 4700b57cec5SDimitry Andric __kmp_copy_common_data(tn->par_addr, d_tn->pod_init); 4710b57cec5SDimitry Andric } 4720b57cec5SDimitry Andric } else { 4730b57cec5SDimitry Andric if (d_tn->ct.ctor != 0) { 4740b57cec5SDimitry Andric (void)(*d_tn->ct.ctor)(tn->par_addr); 4750b57cec5SDimitry Andric } else if (d_tn->cct.cctor != 0) { 4760b57cec5SDimitry Andric (void)(*d_tn->cct.cctor)(tn->par_addr, d_tn->obj_init); 4770b57cec5SDimitry Andric } else if (tn->par_addr != tn->gbl_addr) { 4780b57cec5SDimitry Andric __kmp_copy_common_data(tn->par_addr, d_tn->pod_init); 4790b57cec5SDimitry Andric } 4800b57cec5SDimitry Andric } 4810b57cec5SDimitry Andric /* !BUILD_OPENMP_C 4820b57cec5SDimitry Andric if (tn->par_addr != tn->gbl_addr) 4830b57cec5SDimitry Andric __kmp_copy_common_data( tn->par_addr, d_tn->pod_init ); */ 4840b57cec5SDimitry Andric 4850b57cec5SDimitry Andric return tn; 4860b57cec5SDimitry Andric } 4870b57cec5SDimitry Andric 4880b57cec5SDimitry Andric /* ------------------------------------------------------------------------ */ 4890b57cec5SDimitry Andric /* We are currently parallel, and we know the thread id. */ 4900b57cec5SDimitry Andric /* ------------------------------------------------------------------------ */ 4910b57cec5SDimitry Andric 4920b57cec5SDimitry Andric /*! 4930b57cec5SDimitry Andric @ingroup THREADPRIVATE 4940b57cec5SDimitry Andric 4950b57cec5SDimitry Andric @param loc source location information 4960b57cec5SDimitry Andric @param data pointer to data being privatized 4970b57cec5SDimitry Andric @param ctor pointer to constructor function for data 4980b57cec5SDimitry Andric @param cctor pointer to copy constructor function for data 4990b57cec5SDimitry Andric @param dtor pointer to destructor function for data 5000b57cec5SDimitry Andric 5010b57cec5SDimitry Andric Register constructors and destructors for thread private data. 5020b57cec5SDimitry Andric This function is called when executing in parallel, when we know the thread id. 5030b57cec5SDimitry Andric */ 5040b57cec5SDimitry Andric void __kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor, 5050b57cec5SDimitry Andric kmpc_cctor cctor, kmpc_dtor dtor) { 5060b57cec5SDimitry Andric struct shared_common *d_tn, **lnk_tn; 5070b57cec5SDimitry Andric 5080b57cec5SDimitry Andric KC_TRACE(10, ("__kmpc_threadprivate_register: called\n")); 5090b57cec5SDimitry Andric 5100b57cec5SDimitry Andric #ifdef USE_CHECKS_COMMON 5110b57cec5SDimitry Andric /* copy constructor must be zero for current code gen (Nov 2002 - jph) */ 5120b57cec5SDimitry Andric KMP_ASSERT(cctor == 0); 5130b57cec5SDimitry Andric #endif /* USE_CHECKS_COMMON */ 5140b57cec5SDimitry Andric 5150b57cec5SDimitry Andric /* Only the global data table exists. */ 5160b57cec5SDimitry Andric d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, -1, data); 5170b57cec5SDimitry Andric 5180b57cec5SDimitry Andric if (d_tn == 0) { 5190b57cec5SDimitry Andric d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common)); 5200b57cec5SDimitry Andric d_tn->gbl_addr = data; 5210b57cec5SDimitry Andric 5220b57cec5SDimitry Andric d_tn->ct.ctor = ctor; 5230b57cec5SDimitry Andric d_tn->cct.cctor = cctor; 5240b57cec5SDimitry Andric d_tn->dt.dtor = dtor; 5250b57cec5SDimitry Andric /* 5260b57cec5SDimitry Andric d_tn->is_vec = FALSE; // AC: commented out because __kmp_allocate 5270b57cec5SDimitry Andric zeroes the memory 5280b57cec5SDimitry Andric d_tn->vec_len = 0L; 5290b57cec5SDimitry Andric d_tn->obj_init = 0; 5300b57cec5SDimitry Andric d_tn->pod_init = 0; 5310b57cec5SDimitry Andric */ 5320b57cec5SDimitry Andric lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(data)]); 5330b57cec5SDimitry Andric 5340b57cec5SDimitry Andric d_tn->next = *lnk_tn; 5350b57cec5SDimitry Andric *lnk_tn = d_tn; 5360b57cec5SDimitry Andric } 5370b57cec5SDimitry Andric } 5380b57cec5SDimitry Andric 5390b57cec5SDimitry Andric void *__kmpc_threadprivate(ident_t *loc, kmp_int32 global_tid, void *data, 5400b57cec5SDimitry Andric size_t size) { 5410b57cec5SDimitry Andric void *ret; 5420b57cec5SDimitry Andric struct private_common *tn; 5430b57cec5SDimitry Andric 5440b57cec5SDimitry Andric KC_TRACE(10, ("__kmpc_threadprivate: T#%d called\n", global_tid)); 5450b57cec5SDimitry Andric 5460b57cec5SDimitry Andric #ifdef USE_CHECKS_COMMON 5470b57cec5SDimitry Andric if (!__kmp_init_serial) 5480b57cec5SDimitry Andric KMP_FATAL(RTLNotInitialized); 5490b57cec5SDimitry Andric #endif /* USE_CHECKS_COMMON */ 5500b57cec5SDimitry Andric 5510b57cec5SDimitry Andric if (!__kmp_threads[global_tid]->th.th_root->r.r_active && !__kmp_foreign_tp) { 5520b57cec5SDimitry Andric /* The parallel address will NEVER overlap with the data_address */ 5530b57cec5SDimitry Andric /* dkp: 3rd arg to kmp_threadprivate_insert_private_data() is the 5540b57cec5SDimitry Andric * data_address; use data_address = data */ 5550b57cec5SDimitry Andric 5560b57cec5SDimitry Andric KC_TRACE(20, ("__kmpc_threadprivate: T#%d inserting private data\n", 5570b57cec5SDimitry Andric global_tid)); 5580b57cec5SDimitry Andric kmp_threadprivate_insert_private_data(global_tid, data, data, size); 5590b57cec5SDimitry Andric 5600b57cec5SDimitry Andric ret = data; 5610b57cec5SDimitry Andric } else { 5620b57cec5SDimitry Andric KC_TRACE( 5630b57cec5SDimitry Andric 50, 5640b57cec5SDimitry Andric ("__kmpc_threadprivate: T#%d try to find private data at address %p\n", 5650b57cec5SDimitry Andric global_tid, data)); 5660b57cec5SDimitry Andric tn = __kmp_threadprivate_find_task_common( 5670b57cec5SDimitry Andric __kmp_threads[global_tid]->th.th_pri_common, global_tid, data); 5680b57cec5SDimitry Andric 5690b57cec5SDimitry Andric if (tn) { 5700b57cec5SDimitry Andric KC_TRACE(20, ("__kmpc_threadprivate: T#%d found data\n", global_tid)); 5710b57cec5SDimitry Andric #ifdef USE_CHECKS_COMMON 5720b57cec5SDimitry Andric if ((size_t)size > tn->cmn_size) { 5730b57cec5SDimitry Andric KC_TRACE(10, ("THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC 5740b57cec5SDimitry Andric " ,%" KMP_UINTPTR_SPEC ")\n", 5750b57cec5SDimitry Andric data, size, tn->cmn_size)); 5760b57cec5SDimitry Andric KMP_FATAL(TPCommonBlocksInconsist); 5770b57cec5SDimitry Andric } 5780b57cec5SDimitry Andric #endif /* USE_CHECKS_COMMON */ 5790b57cec5SDimitry Andric } else { 5800b57cec5SDimitry Andric /* The parallel address will NEVER overlap with the data_address */ 5810b57cec5SDimitry Andric /* dkp: 3rd arg to kmp_threadprivate_insert() is the data_address; use 5820b57cec5SDimitry Andric * data_address = data */ 5830b57cec5SDimitry Andric KC_TRACE(20, ("__kmpc_threadprivate: T#%d inserting data\n", global_tid)); 5840b57cec5SDimitry Andric tn = kmp_threadprivate_insert(global_tid, data, data, size); 5850b57cec5SDimitry Andric } 5860b57cec5SDimitry Andric 5870b57cec5SDimitry Andric ret = tn->par_addr; 5880b57cec5SDimitry Andric } 5890b57cec5SDimitry Andric KC_TRACE(10, ("__kmpc_threadprivate: T#%d exiting; return value = %p\n", 5900b57cec5SDimitry Andric global_tid, ret)); 5910b57cec5SDimitry Andric 5920b57cec5SDimitry Andric return ret; 5930b57cec5SDimitry Andric } 5940b57cec5SDimitry Andric 5950b57cec5SDimitry Andric static kmp_cached_addr_t *__kmp_find_cache(void *data) { 5960b57cec5SDimitry Andric kmp_cached_addr_t *ptr = __kmp_threadpriv_cache_list; 5970b57cec5SDimitry Andric while (ptr && ptr->data != data) 5980b57cec5SDimitry Andric ptr = ptr->next; 5990b57cec5SDimitry Andric return ptr; 6000b57cec5SDimitry Andric } 6010b57cec5SDimitry Andric 6020b57cec5SDimitry Andric /*! 6030b57cec5SDimitry Andric @ingroup THREADPRIVATE 6040b57cec5SDimitry Andric @param loc source location information 6050b57cec5SDimitry Andric @param global_tid global thread number 6060b57cec5SDimitry Andric @param data pointer to data to privatize 6070b57cec5SDimitry Andric @param size size of data to privatize 6080b57cec5SDimitry Andric @param cache pointer to cache 6090b57cec5SDimitry Andric @return pointer to private storage 6100b57cec5SDimitry Andric 6110b57cec5SDimitry Andric Allocate private storage for threadprivate data. 6120b57cec5SDimitry Andric */ 6130b57cec5SDimitry Andric void * 6140b57cec5SDimitry Andric __kmpc_threadprivate_cached(ident_t *loc, 6150b57cec5SDimitry Andric kmp_int32 global_tid, // gtid. 6160b57cec5SDimitry Andric void *data, // Pointer to original global variable. 6170b57cec5SDimitry Andric size_t size, // Size of original global variable. 6180b57cec5SDimitry Andric void ***cache) { 6190b57cec5SDimitry Andric KC_TRACE(10, ("__kmpc_threadprivate_cached: T#%d called with cache: %p, " 6200b57cec5SDimitry Andric "address: %p, size: %" KMP_SIZE_T_SPEC "\n", 6210b57cec5SDimitry Andric global_tid, *cache, data, size)); 6220b57cec5SDimitry Andric 6230b57cec5SDimitry Andric if (TCR_PTR(*cache) == 0) { 6240b57cec5SDimitry Andric __kmp_acquire_lock(&__kmp_global_lock, global_tid); 6250b57cec5SDimitry Andric 6260b57cec5SDimitry Andric if (TCR_PTR(*cache) == 0) { 6270b57cec5SDimitry Andric __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock); 6280b57cec5SDimitry Andric // Compiler often passes in NULL cache, even if it's already been created 6290b57cec5SDimitry Andric void **my_cache; 6300b57cec5SDimitry Andric kmp_cached_addr_t *tp_cache_addr; 6310b57cec5SDimitry Andric // Look for an existing cache 6320b57cec5SDimitry Andric tp_cache_addr = __kmp_find_cache(data); 6330b57cec5SDimitry Andric if (!tp_cache_addr) { // Cache was never created; do it now 6340b57cec5SDimitry Andric __kmp_tp_cached = 1; 6350b57cec5SDimitry Andric KMP_ITT_IGNORE(my_cache = (void **)__kmp_allocate( 6360b57cec5SDimitry Andric sizeof(void *) * __kmp_tp_capacity + 6370b57cec5SDimitry Andric sizeof(kmp_cached_addr_t));); 6380b57cec5SDimitry Andric // No need to zero the allocated memory; __kmp_allocate does that. 6390b57cec5SDimitry Andric KC_TRACE(50, ("__kmpc_threadprivate_cached: T#%d allocated cache at " 6400b57cec5SDimitry Andric "address %p\n", 6410b57cec5SDimitry Andric global_tid, my_cache)); 6420b57cec5SDimitry Andric /* TODO: free all this memory in __kmp_common_destroy using 6430b57cec5SDimitry Andric * __kmp_threadpriv_cache_list */ 6440b57cec5SDimitry Andric /* Add address of mycache to linked list for cleanup later */ 6450b57cec5SDimitry Andric tp_cache_addr = (kmp_cached_addr_t *)&my_cache[__kmp_tp_capacity]; 6460b57cec5SDimitry Andric tp_cache_addr->addr = my_cache; 6470b57cec5SDimitry Andric tp_cache_addr->data = data; 6480b57cec5SDimitry Andric tp_cache_addr->compiler_cache = cache; 6490b57cec5SDimitry Andric tp_cache_addr->next = __kmp_threadpriv_cache_list; 6500b57cec5SDimitry Andric __kmp_threadpriv_cache_list = tp_cache_addr; 6510b57cec5SDimitry Andric } else { // A cache was already created; use it 6520b57cec5SDimitry Andric my_cache = tp_cache_addr->addr; 6530b57cec5SDimitry Andric tp_cache_addr->compiler_cache = cache; 6540b57cec5SDimitry Andric } 6550b57cec5SDimitry Andric KMP_MB(); 6560b57cec5SDimitry Andric 6570b57cec5SDimitry Andric TCW_PTR(*cache, my_cache); 6580b57cec5SDimitry Andric __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock); 6590b57cec5SDimitry Andric 6600b57cec5SDimitry Andric KMP_MB(); 6610b57cec5SDimitry Andric } 6620b57cec5SDimitry Andric __kmp_release_lock(&__kmp_global_lock, global_tid); 6630b57cec5SDimitry Andric } 6640b57cec5SDimitry Andric 6650b57cec5SDimitry Andric void *ret; 6660b57cec5SDimitry Andric if ((ret = TCR_PTR((*cache)[global_tid])) == 0) { 6670b57cec5SDimitry Andric ret = __kmpc_threadprivate(loc, global_tid, data, (size_t)size); 6680b57cec5SDimitry Andric 6690b57cec5SDimitry Andric TCW_PTR((*cache)[global_tid], ret); 6700b57cec5SDimitry Andric } 6710b57cec5SDimitry Andric KC_TRACE(10, 6720b57cec5SDimitry Andric ("__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n", 6730b57cec5SDimitry Andric global_tid, ret)); 6740b57cec5SDimitry Andric return ret; 6750b57cec5SDimitry Andric } 6760b57cec5SDimitry Andric 6770b57cec5SDimitry Andric // This function should only be called when both __kmp_tp_cached_lock and 6780b57cec5SDimitry Andric // kmp_forkjoin_lock are held. 6790b57cec5SDimitry Andric void __kmp_threadprivate_resize_cache(int newCapacity) { 6800b57cec5SDimitry Andric KC_TRACE(10, ("__kmp_threadprivate_resize_cache: called with size: %d\n", 6810b57cec5SDimitry Andric newCapacity)); 6820b57cec5SDimitry Andric 6830b57cec5SDimitry Andric kmp_cached_addr_t *ptr = __kmp_threadpriv_cache_list; 6840b57cec5SDimitry Andric 6850b57cec5SDimitry Andric while (ptr) { 6860b57cec5SDimitry Andric if (ptr->data) { // this location has an active cache; resize it 6870b57cec5SDimitry Andric void **my_cache; 6880b57cec5SDimitry Andric KMP_ITT_IGNORE(my_cache = 6890b57cec5SDimitry Andric (void **)__kmp_allocate(sizeof(void *) * newCapacity + 6900b57cec5SDimitry Andric sizeof(kmp_cached_addr_t));); 6910b57cec5SDimitry Andric // No need to zero the allocated memory; __kmp_allocate does that. 6920b57cec5SDimitry Andric KC_TRACE(50, ("__kmp_threadprivate_resize_cache: allocated cache at %p\n", 6930b57cec5SDimitry Andric my_cache)); 6940b57cec5SDimitry Andric // Now copy old cache into new cache 6950b57cec5SDimitry Andric void **old_cache = ptr->addr; 6960b57cec5SDimitry Andric for (int i = 0; i < __kmp_tp_capacity; ++i) { 6970b57cec5SDimitry Andric my_cache[i] = old_cache[i]; 6980b57cec5SDimitry Andric } 6990b57cec5SDimitry Andric 7000b57cec5SDimitry Andric // Add address of new my_cache to linked list for cleanup later 7010b57cec5SDimitry Andric kmp_cached_addr_t *tp_cache_addr; 7020b57cec5SDimitry Andric tp_cache_addr = (kmp_cached_addr_t *)&my_cache[newCapacity]; 7030b57cec5SDimitry Andric tp_cache_addr->addr = my_cache; 7040b57cec5SDimitry Andric tp_cache_addr->data = ptr->data; 7050b57cec5SDimitry Andric tp_cache_addr->compiler_cache = ptr->compiler_cache; 7060b57cec5SDimitry Andric tp_cache_addr->next = __kmp_threadpriv_cache_list; 7070b57cec5SDimitry Andric __kmp_threadpriv_cache_list = tp_cache_addr; 7080b57cec5SDimitry Andric 7090b57cec5SDimitry Andric // Copy new cache to compiler's location: We can copy directly 7100b57cec5SDimitry Andric // to (*compiler_cache) if compiler guarantees it will keep 7110b57cec5SDimitry Andric // using the same location for the cache. This is not yet true 7120b57cec5SDimitry Andric // for some compilers, in which case we have to check if 7130b57cec5SDimitry Andric // compiler_cache is still pointing at old cache, and if so, we 7140b57cec5SDimitry Andric // can point it at the new cache with an atomic compare&swap 7150b57cec5SDimitry Andric // operation. (Old method will always work, but we should shift 7160b57cec5SDimitry Andric // to new method (commented line below) when Intel and Clang 7170b57cec5SDimitry Andric // compilers use new method.) 7180b57cec5SDimitry Andric (void)KMP_COMPARE_AND_STORE_PTR(tp_cache_addr->compiler_cache, old_cache, 7190b57cec5SDimitry Andric my_cache); 7200b57cec5SDimitry Andric // TCW_PTR(*(tp_cache_addr->compiler_cache), my_cache); 7210b57cec5SDimitry Andric 7220b57cec5SDimitry Andric // If the store doesn't happen here, the compiler's old behavior will 7230b57cec5SDimitry Andric // inevitably call __kmpc_threadprivate_cache with a new location for the 7240b57cec5SDimitry Andric // cache, and that function will store the resized cache there at that 7250b57cec5SDimitry Andric // point. 7260b57cec5SDimitry Andric 7270b57cec5SDimitry Andric // Nullify old cache's data pointer so we skip it next time 7280b57cec5SDimitry Andric ptr->data = NULL; 7290b57cec5SDimitry Andric } 7300b57cec5SDimitry Andric ptr = ptr->next; 7310b57cec5SDimitry Andric } 7320b57cec5SDimitry Andric // After all caches are resized, update __kmp_tp_capacity to the new size 7330b57cec5SDimitry Andric *(volatile int *)&__kmp_tp_capacity = newCapacity; 7340b57cec5SDimitry Andric } 7350b57cec5SDimitry Andric 7360b57cec5SDimitry Andric /*! 7370b57cec5SDimitry Andric @ingroup THREADPRIVATE 7380b57cec5SDimitry Andric @param loc source location information 7390b57cec5SDimitry Andric @param data pointer to data being privatized 7400b57cec5SDimitry Andric @param ctor pointer to constructor function for data 7410b57cec5SDimitry Andric @param cctor pointer to copy constructor function for data 7420b57cec5SDimitry Andric @param dtor pointer to destructor function for data 7430b57cec5SDimitry Andric @param vector_length length of the vector (bytes or elements?) 7440b57cec5SDimitry Andric Register vector constructors and destructors for thread private data. 7450b57cec5SDimitry Andric */ 7460b57cec5SDimitry Andric void __kmpc_threadprivate_register_vec(ident_t *loc, void *data, 7470b57cec5SDimitry Andric kmpc_ctor_vec ctor, kmpc_cctor_vec cctor, 7480b57cec5SDimitry Andric kmpc_dtor_vec dtor, 7490b57cec5SDimitry Andric size_t vector_length) { 7500b57cec5SDimitry Andric struct shared_common *d_tn, **lnk_tn; 7510b57cec5SDimitry Andric 7520b57cec5SDimitry Andric KC_TRACE(10, ("__kmpc_threadprivate_register_vec: called\n")); 7530b57cec5SDimitry Andric 7540b57cec5SDimitry Andric #ifdef USE_CHECKS_COMMON 7550b57cec5SDimitry Andric /* copy constructor must be zero for current code gen (Nov 2002 - jph) */ 7560b57cec5SDimitry Andric KMP_ASSERT(cctor == 0); 7570b57cec5SDimitry Andric #endif /* USE_CHECKS_COMMON */ 7580b57cec5SDimitry Andric 7590b57cec5SDimitry Andric d_tn = __kmp_find_shared_task_common( 7600b57cec5SDimitry Andric &__kmp_threadprivate_d_table, -1, 7610b57cec5SDimitry Andric data); /* Only the global data table exists. */ 7620b57cec5SDimitry Andric 7630b57cec5SDimitry Andric if (d_tn == 0) { 7640b57cec5SDimitry Andric d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common)); 7650b57cec5SDimitry Andric d_tn->gbl_addr = data; 7660b57cec5SDimitry Andric 7670b57cec5SDimitry Andric d_tn->ct.ctorv = ctor; 7680b57cec5SDimitry Andric d_tn->cct.cctorv = cctor; 7690b57cec5SDimitry Andric d_tn->dt.dtorv = dtor; 7700b57cec5SDimitry Andric d_tn->is_vec = TRUE; 7710b57cec5SDimitry Andric d_tn->vec_len = (size_t)vector_length; 7720b57cec5SDimitry Andric // d_tn->obj_init = 0; // AC: __kmp_allocate zeroes the memory 7730b57cec5SDimitry Andric // d_tn->pod_init = 0; 7740b57cec5SDimitry Andric lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(data)]); 7750b57cec5SDimitry Andric 7760b57cec5SDimitry Andric d_tn->next = *lnk_tn; 7770b57cec5SDimitry Andric *lnk_tn = d_tn; 7780b57cec5SDimitry Andric } 7790b57cec5SDimitry Andric } 7800b57cec5SDimitry Andric 7810b57cec5SDimitry Andric void __kmp_cleanup_threadprivate_caches() { 7820b57cec5SDimitry Andric kmp_cached_addr_t *ptr = __kmp_threadpriv_cache_list; 7830b57cec5SDimitry Andric 7840b57cec5SDimitry Andric while (ptr) { 7850b57cec5SDimitry Andric void **cache = ptr->addr; 7860b57cec5SDimitry Andric __kmp_threadpriv_cache_list = ptr->next; 7870b57cec5SDimitry Andric if (*ptr->compiler_cache) 7880b57cec5SDimitry Andric *ptr->compiler_cache = NULL; 7890b57cec5SDimitry Andric ptr->compiler_cache = NULL; 7900b57cec5SDimitry Andric ptr->data = NULL; 7910b57cec5SDimitry Andric ptr->addr = NULL; 7920b57cec5SDimitry Andric ptr->next = NULL; 7930b57cec5SDimitry Andric // Threadprivate data pointed at by cache entries are destroyed at end of 7940b57cec5SDimitry Andric // __kmp_launch_thread with __kmp_common_destroy_gtid. 7950b57cec5SDimitry Andric __kmp_free(cache); // implicitly frees ptr too 7960b57cec5SDimitry Andric ptr = __kmp_threadpriv_cache_list; 7970b57cec5SDimitry Andric } 7980b57cec5SDimitry Andric } 799