1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*0Sstevel@tonic-gate 29*0Sstevel@tonic-gate #include "lint.h" 30*0Sstevel@tonic-gate #include "thr_uberdata.h" 31*0Sstevel@tonic-gate #include <stddef.h> 32*0Sstevel@tonic-gate 33*0Sstevel@tonic-gate /* 34*0Sstevel@tonic-gate * 128 million keys should be enough for anyone. 35*0Sstevel@tonic-gate * This allocates half a gigabyte of memory for the keys themselves and 36*0Sstevel@tonic-gate * half a gigabyte of memory for each thread that uses the largest key. 37*0Sstevel@tonic-gate */ 38*0Sstevel@tonic-gate #define MAX_KEYS 0x08000000U 39*0Sstevel@tonic-gate 40*0Sstevel@tonic-gate #pragma weak thr_keycreate = _thr_keycreate 41*0Sstevel@tonic-gate #pragma weak pthread_key_create = _thr_keycreate 42*0Sstevel@tonic-gate #pragma weak _pthread_key_create = _thr_keycreate 43*0Sstevel@tonic-gate int 44*0Sstevel@tonic-gate _thr_keycreate(thread_key_t *pkey, void (*destructor)(void *)) 45*0Sstevel@tonic-gate { 46*0Sstevel@tonic-gate tsd_metadata_t *tsdm = &curthread->ul_uberdata->tsd_metadata; 47*0Sstevel@tonic-gate void (**old_data)(void *) = NULL; 48*0Sstevel@tonic-gate void (**new_data)(void *); 49*0Sstevel@tonic-gate uint_t old_nkeys; 50*0Sstevel@tonic-gate uint_t new_nkeys; 51*0Sstevel@tonic-gate 52*0Sstevel@tonic-gate lmutex_lock(&tsdm->tsdm_lock); 53*0Sstevel@tonic-gate 54*0Sstevel@tonic-gate /* 55*0Sstevel@tonic-gate * Unfortunately, pthread_getspecific() specifies that a 56*0Sstevel@tonic-gate * pthread_getspecific() on an allocated key upon which the 57*0Sstevel@tonic-gate * calling thread has not performed a pthread_setspecifc() 58*0Sstevel@tonic-gate * must return NULL. Consider the following sequence: 59*0Sstevel@tonic-gate * 60*0Sstevel@tonic-gate * pthread_key_create(&key); 61*0Sstevel@tonic-gate * pthread_setspecific(key, datum); 62*0Sstevel@tonic-gate * pthread_key_delete(&key); 63*0Sstevel@tonic-gate * pthread_key_create(&key); 64*0Sstevel@tonic-gate * val = pthread_getspecific(key); 65*0Sstevel@tonic-gate * 66*0Sstevel@tonic-gate * According to POSIX, if the deleted key is reused for the new 67*0Sstevel@tonic-gate * key returned by the second pthread_key_create(), then the 68*0Sstevel@tonic-gate * pthread_getspecific() in the above example must return NULL 69*0Sstevel@tonic-gate * (and not the stale datum). The implementation is thus left 70*0Sstevel@tonic-gate * with two alternatives: 71*0Sstevel@tonic-gate * 72*0Sstevel@tonic-gate * (1) Reuse deleted keys. If this is to be implemented optimally, 73*0Sstevel@tonic-gate * it requires that pthread_key_create() somehow associate 74*0Sstevel@tonic-gate * the value NULL with the new (reused) key for each thread. 75*0Sstevel@tonic-gate * Keeping the hot path fast and lock-free induces substantial 76*0Sstevel@tonic-gate * complexity on the implementation. 77*0Sstevel@tonic-gate * 78*0Sstevel@tonic-gate * (2) Never reuse deleted keys. This allows the pthread_getspecific() 79*0Sstevel@tonic-gate * implementation to simply perform a check against the number 80*0Sstevel@tonic-gate * of keys set by the calling thread, returning NULL if the 81*0Sstevel@tonic-gate * specified key is larger than the highest set key. This has 82*0Sstevel@tonic-gate * the disadvantage of wasting memory (a program which simply 83*0Sstevel@tonic-gate * loops calling pthread_key_create()/pthread_key_delete() 84*0Sstevel@tonic-gate * will ultimately run out of memory), but permits an optimal 85*0Sstevel@tonic-gate * pthread_getspecific() while allowing for simple key creation 86*0Sstevel@tonic-gate * and deletion. 87*0Sstevel@tonic-gate * 88*0Sstevel@tonic-gate * All Solaris implementations have opted for (2). Given the 89*0Sstevel@tonic-gate * ~10 years that this has been in the field, it is safe to assume 90*0Sstevel@tonic-gate * that applications don't loop creating and destroying keys; we 91*0Sstevel@tonic-gate * stick with (2). 92*0Sstevel@tonic-gate */ 93*0Sstevel@tonic-gate if (tsdm->tsdm_nused == (old_nkeys = tsdm->tsdm_nkeys)) { 94*0Sstevel@tonic-gate /* 95*0Sstevel@tonic-gate * We need to allocate or double the number of keys. 96*0Sstevel@tonic-gate * tsdm->tsdm_nused must always be a power of two. 97*0Sstevel@tonic-gate */ 98*0Sstevel@tonic-gate if ((new_nkeys = (old_nkeys << 1)) == 0) 99*0Sstevel@tonic-gate new_nkeys = 8; 100*0Sstevel@tonic-gate 101*0Sstevel@tonic-gate if (new_nkeys > MAX_KEYS) { 102*0Sstevel@tonic-gate lmutex_unlock(&tsdm->tsdm_lock); 103*0Sstevel@tonic-gate return (EAGAIN); 104*0Sstevel@tonic-gate } 105*0Sstevel@tonic-gate if ((new_data = lmalloc(new_nkeys * sizeof (void *))) == NULL) { 106*0Sstevel@tonic-gate lmutex_unlock(&tsdm->tsdm_lock); 107*0Sstevel@tonic-gate return (ENOMEM); 108*0Sstevel@tonic-gate } 109*0Sstevel@tonic-gate if ((old_data = tsdm->tsdm_destro) == NULL) { 110*0Sstevel@tonic-gate /* key == 0 is always invalid */ 111*0Sstevel@tonic-gate new_data[0] = TSD_UNALLOCATED; 112*0Sstevel@tonic-gate tsdm->tsdm_nused = 1; 113*0Sstevel@tonic-gate } else { 114*0Sstevel@tonic-gate (void) _private_memcpy(new_data, old_data, 115*0Sstevel@tonic-gate old_nkeys * sizeof (void *)); 116*0Sstevel@tonic-gate } 117*0Sstevel@tonic-gate tsdm->tsdm_destro = new_data; 118*0Sstevel@tonic-gate tsdm->tsdm_nkeys = new_nkeys; 119*0Sstevel@tonic-gate } 120*0Sstevel@tonic-gate 121*0Sstevel@tonic-gate *pkey = tsdm->tsdm_nused; 122*0Sstevel@tonic-gate tsdm->tsdm_destro[tsdm->tsdm_nused++] = destructor; 123*0Sstevel@tonic-gate lmutex_unlock(&tsdm->tsdm_lock); 124*0Sstevel@tonic-gate 125*0Sstevel@tonic-gate if (old_data != NULL) 126*0Sstevel@tonic-gate lfree(old_data, old_nkeys * sizeof (void *)); 127*0Sstevel@tonic-gate 128*0Sstevel@tonic-gate return (0); 129*0Sstevel@tonic-gate } 130*0Sstevel@tonic-gate 131*0Sstevel@tonic-gate #pragma weak pthread_key_delete = _thr_key_delete 132*0Sstevel@tonic-gate #pragma weak _pthread_key_delete = _thr_key_delete 133*0Sstevel@tonic-gate int 134*0Sstevel@tonic-gate _thr_key_delete(thread_key_t key) 135*0Sstevel@tonic-gate { 136*0Sstevel@tonic-gate tsd_metadata_t *tsdm = &curthread->ul_uberdata->tsd_metadata; 137*0Sstevel@tonic-gate 138*0Sstevel@tonic-gate lmutex_lock(&tsdm->tsdm_lock); 139*0Sstevel@tonic-gate 140*0Sstevel@tonic-gate if (key >= tsdm->tsdm_nused || 141*0Sstevel@tonic-gate tsdm->tsdm_destro[key] == TSD_UNALLOCATED) { 142*0Sstevel@tonic-gate lmutex_unlock(&tsdm->tsdm_lock); 143*0Sstevel@tonic-gate return (EINVAL); 144*0Sstevel@tonic-gate } 145*0Sstevel@tonic-gate 146*0Sstevel@tonic-gate tsdm->tsdm_destro[key] = TSD_UNALLOCATED; 147*0Sstevel@tonic-gate lmutex_unlock(&tsdm->tsdm_lock); 148*0Sstevel@tonic-gate 149*0Sstevel@tonic-gate return (0); 150*0Sstevel@tonic-gate } 151*0Sstevel@tonic-gate 152*0Sstevel@tonic-gate /* 153*0Sstevel@tonic-gate * Blessedly, the pthread_getspecific() interface is much better than the 154*0Sstevel@tonic-gate * thr_getspecific() interface in that it cannot return an error status. 155*0Sstevel@tonic-gate * Thus, if the key specified is bogus, pthread_getspecific()'s behavior 156*0Sstevel@tonic-gate * is undefined. As an added bonus (and as an artificat of not returning 157*0Sstevel@tonic-gate * an error code), the requested datum is returned rather than stored 158*0Sstevel@tonic-gate * through a parameter -- thereby avoiding the unnecessary store/load pair 159*0Sstevel@tonic-gate * incurred by thr_getspecific(). Every once in a while, the Standards 160*0Sstevel@tonic-gate * get it right -- but usually by accident. 161*0Sstevel@tonic-gate */ 162*0Sstevel@tonic-gate #pragma weak pthread_getspecific = _pthread_getspecific 163*0Sstevel@tonic-gate void * 164*0Sstevel@tonic-gate _pthread_getspecific(pthread_key_t key) 165*0Sstevel@tonic-gate { 166*0Sstevel@tonic-gate tsd_t *stsd; 167*0Sstevel@tonic-gate 168*0Sstevel@tonic-gate /* 169*0Sstevel@tonic-gate * We are cycle-shaving in this function because some 170*0Sstevel@tonic-gate * applications make heavy use of it and one machine cycle 171*0Sstevel@tonic-gate * can make a measurable difference in performance. This 172*0Sstevel@tonic-gate * is why we waste a little memory and allocate a NULL value 173*0Sstevel@tonic-gate * for the invalid key == 0 in curthread->ul_ftsd[0] rather 174*0Sstevel@tonic-gate * than adjusting the key by subtracting one. 175*0Sstevel@tonic-gate */ 176*0Sstevel@tonic-gate if (key < TSD_NFAST) 177*0Sstevel@tonic-gate return (curthread->ul_ftsd[key]); 178*0Sstevel@tonic-gate 179*0Sstevel@tonic-gate if ((stsd = curthread->ul_stsd) != NULL && key < stsd->tsd_nalloc) 180*0Sstevel@tonic-gate return (stsd->tsd_data[key]); 181*0Sstevel@tonic-gate 182*0Sstevel@tonic-gate return (NULL); 183*0Sstevel@tonic-gate } 184*0Sstevel@tonic-gate 185*0Sstevel@tonic-gate #pragma weak thr_getspecific = _thr_getspecific 186*0Sstevel@tonic-gate int 187*0Sstevel@tonic-gate _thr_getspecific(thread_key_t key, void **valuep) 188*0Sstevel@tonic-gate { 189*0Sstevel@tonic-gate tsd_t *stsd; 190*0Sstevel@tonic-gate 191*0Sstevel@tonic-gate /* 192*0Sstevel@tonic-gate * Amazingly, some application code (and worse, some particularly 193*0Sstevel@tonic-gate * fugly Solaris library code) _relies_ on the fact that 0 is always 194*0Sstevel@tonic-gate * an invalid key. To preserve this semantic, 0 is never returned 195*0Sstevel@tonic-gate * as a key from thr_/pthread_key_create(); we explicitly check 196*0Sstevel@tonic-gate * for it here and return EINVAL. 197*0Sstevel@tonic-gate */ 198*0Sstevel@tonic-gate if (key == 0) 199*0Sstevel@tonic-gate return (EINVAL); 200*0Sstevel@tonic-gate 201*0Sstevel@tonic-gate if (key < TSD_NFAST) 202*0Sstevel@tonic-gate *valuep = curthread->ul_ftsd[key]; 203*0Sstevel@tonic-gate else if ((stsd = curthread->ul_stsd) != NULL && key < stsd->tsd_nalloc) 204*0Sstevel@tonic-gate *valuep = stsd->tsd_data[key]; 205*0Sstevel@tonic-gate else 206*0Sstevel@tonic-gate *valuep = NULL; 207*0Sstevel@tonic-gate 208*0Sstevel@tonic-gate return (0); 209*0Sstevel@tonic-gate } 210*0Sstevel@tonic-gate 211*0Sstevel@tonic-gate /* 212*0Sstevel@tonic-gate * We call _thr_setspecific_slow() when the key specified 213*0Sstevel@tonic-gate * is beyond the current thread's currently allocated range. 214*0Sstevel@tonic-gate * This case is in a separate function because we want 215*0Sstevel@tonic-gate * the compiler to optimize for the common case. 216*0Sstevel@tonic-gate */ 217*0Sstevel@tonic-gate static int 218*0Sstevel@tonic-gate _thr_setspecific_slow(thread_key_t key, void *value) 219*0Sstevel@tonic-gate { 220*0Sstevel@tonic-gate ulwp_t *self = curthread; 221*0Sstevel@tonic-gate tsd_metadata_t *tsdm = &self->ul_uberdata->tsd_metadata; 222*0Sstevel@tonic-gate tsd_t *stsd; 223*0Sstevel@tonic-gate tsd_t *ntsd; 224*0Sstevel@tonic-gate uint_t nkeys; 225*0Sstevel@tonic-gate 226*0Sstevel@tonic-gate /* 227*0Sstevel@tonic-gate * It isn't necessary to grab locks in this path; 228*0Sstevel@tonic-gate * tsdm->tsdm_nused can only increase. 229*0Sstevel@tonic-gate */ 230*0Sstevel@tonic-gate if (key >= tsdm->tsdm_nused) 231*0Sstevel@tonic-gate return (EINVAL); 232*0Sstevel@tonic-gate 233*0Sstevel@tonic-gate /* 234*0Sstevel@tonic-gate * We would like to test (tsdm->tsdm_destro[key] == TSD_UNALLOCATED) 235*0Sstevel@tonic-gate * here but that would require acquiring tsdm->tsdm_lock and we 236*0Sstevel@tonic-gate * want to avoid locks in this path. 237*0Sstevel@tonic-gate * 238*0Sstevel@tonic-gate * We have a key which is (or at least _was_) valid. If this key 239*0Sstevel@tonic-gate * is later deleted (or indeed, is deleted before we set the value), 240*0Sstevel@tonic-gate * we don't care; such a condition would indicate an application 241*0Sstevel@tonic-gate * race for which POSIX thankfully leaves the behavior unspecified. 242*0Sstevel@tonic-gate * 243*0Sstevel@tonic-gate * First, determine our new size. To avoid allocating more than we 244*0Sstevel@tonic-gate * have to, continue doubling our size only until the new key fits. 245*0Sstevel@tonic-gate * stsd->tsd_nalloc must always be a power of two. 246*0Sstevel@tonic-gate */ 247*0Sstevel@tonic-gate nkeys = ((stsd = self->ul_stsd) != NULL)? stsd->tsd_nalloc : 8; 248*0Sstevel@tonic-gate for (; key >= nkeys; nkeys <<= 1) 249*0Sstevel@tonic-gate continue; 250*0Sstevel@tonic-gate 251*0Sstevel@tonic-gate /* 252*0Sstevel@tonic-gate * Allocate the new TSD. 253*0Sstevel@tonic-gate */ 254*0Sstevel@tonic-gate if ((ntsd = lmalloc(nkeys * sizeof (void *))) == NULL) 255*0Sstevel@tonic-gate return (ENOMEM); 256*0Sstevel@tonic-gate 257*0Sstevel@tonic-gate if (stsd != NULL) { 258*0Sstevel@tonic-gate /* 259*0Sstevel@tonic-gate * Copy the old TSD across to the new. 260*0Sstevel@tonic-gate */ 261*0Sstevel@tonic-gate (void) _private_memcpy(ntsd, stsd, 262*0Sstevel@tonic-gate stsd->tsd_nalloc * sizeof (void *)); 263*0Sstevel@tonic-gate lfree(stsd, stsd->tsd_nalloc * sizeof (void *)); 264*0Sstevel@tonic-gate } 265*0Sstevel@tonic-gate 266*0Sstevel@tonic-gate ntsd->tsd_nalloc = nkeys; 267*0Sstevel@tonic-gate ntsd->tsd_data[key] = value; 268*0Sstevel@tonic-gate self->ul_stsd = ntsd; 269*0Sstevel@tonic-gate 270*0Sstevel@tonic-gate return (0); 271*0Sstevel@tonic-gate } 272*0Sstevel@tonic-gate 273*0Sstevel@tonic-gate #pragma weak thr_setspecific = _thr_setspecific 274*0Sstevel@tonic-gate #pragma weak pthread_setspecific = _thr_setspecific 275*0Sstevel@tonic-gate #pragma weak _pthread_setspecific = _thr_setspecific 276*0Sstevel@tonic-gate int 277*0Sstevel@tonic-gate _thr_setspecific(thread_key_t key, void *value) 278*0Sstevel@tonic-gate { 279*0Sstevel@tonic-gate tsd_t *stsd; 280*0Sstevel@tonic-gate int ret; 281*0Sstevel@tonic-gate ulwp_t *self = curthread; 282*0Sstevel@tonic-gate 283*0Sstevel@tonic-gate /* 284*0Sstevel@tonic-gate * See the comment in _thr_getspecific(), above. 285*0Sstevel@tonic-gate */ 286*0Sstevel@tonic-gate if (key == 0) 287*0Sstevel@tonic-gate return (EINVAL); 288*0Sstevel@tonic-gate 289*0Sstevel@tonic-gate if (key < TSD_NFAST) { 290*0Sstevel@tonic-gate curthread->ul_ftsd[key] = value; 291*0Sstevel@tonic-gate return (0); 292*0Sstevel@tonic-gate } 293*0Sstevel@tonic-gate 294*0Sstevel@tonic-gate if ((stsd = curthread->ul_stsd) != NULL && key < stsd->tsd_nalloc) { 295*0Sstevel@tonic-gate stsd->tsd_data[key] = value; 296*0Sstevel@tonic-gate return (0); 297*0Sstevel@tonic-gate } 298*0Sstevel@tonic-gate 299*0Sstevel@tonic-gate /* 300*0Sstevel@tonic-gate * This is a critical region since we are dealing with memory 301*0Sstevel@tonic-gate * allocation and free. Similar protection required in tsd_free(). 302*0Sstevel@tonic-gate */ 303*0Sstevel@tonic-gate enter_critical(self); 304*0Sstevel@tonic-gate ret = _thr_setspecific_slow(key, value); 305*0Sstevel@tonic-gate exit_critical(self); 306*0Sstevel@tonic-gate return (ret); 307*0Sstevel@tonic-gate } 308*0Sstevel@tonic-gate 309*0Sstevel@tonic-gate /* 310*0Sstevel@tonic-gate * Contract-private interface for java. See PSARC/2003/159 311*0Sstevel@tonic-gate * 312*0Sstevel@tonic-gate * If the key falls within the TSD_NFAST range, return a non-negative 313*0Sstevel@tonic-gate * offset that can be used by the caller to fetch the TSD data value 314*0Sstevel@tonic-gate * directly out of the thread structure using %g7 (sparc) or %gs (x86). 315*0Sstevel@tonic-gate * With the advent of TLS, %g7 and %gs are part of the ABI, even though 316*0Sstevel@tonic-gate * the definition of the thread structure itself (ulwp_t) is private. 317*0Sstevel@tonic-gate * 318*0Sstevel@tonic-gate * We guarantee that the offset returned on sparc will fit within 319*0Sstevel@tonic-gate * a SIMM13 field (that is, it is less than 2048). 320*0Sstevel@tonic-gate * 321*0Sstevel@tonic-gate * On failure (key is not in the TSD_NFAST range), return -1. 322*0Sstevel@tonic-gate */ 323*0Sstevel@tonic-gate ptrdiff_t 324*0Sstevel@tonic-gate _thr_slot_offset(thread_key_t key) 325*0Sstevel@tonic-gate { 326*0Sstevel@tonic-gate if (key != 0 && key < TSD_NFAST) 327*0Sstevel@tonic-gate return ((ptrdiff_t)offsetof(ulwp_t, ul_ftsd[key])); 328*0Sstevel@tonic-gate return (-1); 329*0Sstevel@tonic-gate } 330*0Sstevel@tonic-gate 331*0Sstevel@tonic-gate /* 332*0Sstevel@tonic-gate * This is called by _thrp_exit() to apply destructors to the thread's tsd. 333*0Sstevel@tonic-gate */ 334*0Sstevel@tonic-gate void 335*0Sstevel@tonic-gate tsd_exit() 336*0Sstevel@tonic-gate { 337*0Sstevel@tonic-gate ulwp_t *self = curthread; 338*0Sstevel@tonic-gate tsd_metadata_t *tsdm = &self->ul_uberdata->tsd_metadata; 339*0Sstevel@tonic-gate thread_key_t key; 340*0Sstevel@tonic-gate int recheck; 341*0Sstevel@tonic-gate void *val; 342*0Sstevel@tonic-gate void (*func)(void *); 343*0Sstevel@tonic-gate 344*0Sstevel@tonic-gate lmutex_lock(&tsdm->tsdm_lock); 345*0Sstevel@tonic-gate 346*0Sstevel@tonic-gate do { 347*0Sstevel@tonic-gate recheck = 0; 348*0Sstevel@tonic-gate 349*0Sstevel@tonic-gate for (key = 1; key < TSD_NFAST && 350*0Sstevel@tonic-gate key < tsdm->tsdm_nused; key++) { 351*0Sstevel@tonic-gate if ((func = tsdm->tsdm_destro[key]) != NULL && 352*0Sstevel@tonic-gate func != TSD_UNALLOCATED && 353*0Sstevel@tonic-gate (val = self->ul_ftsd[key]) != NULL) { 354*0Sstevel@tonic-gate self->ul_ftsd[key] = NULL; 355*0Sstevel@tonic-gate lmutex_unlock(&tsdm->tsdm_lock); 356*0Sstevel@tonic-gate (*func)(val); 357*0Sstevel@tonic-gate lmutex_lock(&tsdm->tsdm_lock); 358*0Sstevel@tonic-gate recheck = 1; 359*0Sstevel@tonic-gate } 360*0Sstevel@tonic-gate } 361*0Sstevel@tonic-gate 362*0Sstevel@tonic-gate if (self->ul_stsd == NULL) 363*0Sstevel@tonic-gate continue; 364*0Sstevel@tonic-gate 365*0Sstevel@tonic-gate /* 366*0Sstevel@tonic-gate * Any of these destructors could cause us to grow the number 367*0Sstevel@tonic-gate * TSD keys in the slow TSD; we cannot cache the slow TSD 368*0Sstevel@tonic-gate * pointer through this loop. 369*0Sstevel@tonic-gate */ 370*0Sstevel@tonic-gate for (; key < self->ul_stsd->tsd_nalloc && 371*0Sstevel@tonic-gate key < tsdm->tsdm_nused; key++) { 372*0Sstevel@tonic-gate if ((func = tsdm->tsdm_destro[key]) != NULL && 373*0Sstevel@tonic-gate func != TSD_UNALLOCATED && 374*0Sstevel@tonic-gate (val = self->ul_stsd->tsd_data[key]) != NULL) { 375*0Sstevel@tonic-gate self->ul_stsd->tsd_data[key] = NULL; 376*0Sstevel@tonic-gate lmutex_unlock(&tsdm->tsdm_lock); 377*0Sstevel@tonic-gate (*func)(val); 378*0Sstevel@tonic-gate lmutex_lock(&tsdm->tsdm_lock); 379*0Sstevel@tonic-gate recheck = 1; 380*0Sstevel@tonic-gate } 381*0Sstevel@tonic-gate } 382*0Sstevel@tonic-gate } while (recheck); 383*0Sstevel@tonic-gate 384*0Sstevel@tonic-gate lmutex_unlock(&tsdm->tsdm_lock); 385*0Sstevel@tonic-gate 386*0Sstevel@tonic-gate /* 387*0Sstevel@tonic-gate * We're done; if we have slow TSD, we need to free it. 388*0Sstevel@tonic-gate */ 389*0Sstevel@tonic-gate tsd_free(self); 390*0Sstevel@tonic-gate } 391*0Sstevel@tonic-gate 392*0Sstevel@tonic-gate void 393*0Sstevel@tonic-gate tsd_free(ulwp_t *ulwp) 394*0Sstevel@tonic-gate { 395*0Sstevel@tonic-gate tsd_t *stsd; 396*0Sstevel@tonic-gate ulwp_t *self = curthread; 397*0Sstevel@tonic-gate 398*0Sstevel@tonic-gate enter_critical(self); 399*0Sstevel@tonic-gate if ((stsd = ulwp->ul_stsd) != NULL) 400*0Sstevel@tonic-gate lfree(stsd, stsd->tsd_nalloc * sizeof (void *)); 401*0Sstevel@tonic-gate ulwp->ul_stsd = NULL; 402*0Sstevel@tonic-gate exit_critical(self); 403*0Sstevel@tonic-gate } 404