10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
53864Sraf * Common Development and Distribution License (the "License").
63864Sraf * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
213864Sraf
220Sstevel@tonic-gate /*
236515Sraf * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
240Sstevel@tonic-gate * Use is subject to license terms.
250Sstevel@tonic-gate */
260Sstevel@tonic-gate
270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI"
280Sstevel@tonic-gate
290Sstevel@tonic-gate #include "lint.h"
300Sstevel@tonic-gate #include "thr_uberdata.h"
310Sstevel@tonic-gate #include <stddef.h>
320Sstevel@tonic-gate
330Sstevel@tonic-gate /*
34*6812Sraf * These symbols should not be exported from libc, but
35*6812Sraf * /lib/libm.so.2 references them. libm needs to be fixed.
36*6812Sraf * Also, some older versions of the Studio compiler/debugger
37*6812Sraf * components reference them. These need to be fixed, too.
38*6812Sraf */
39*6812Sraf #pragma weak _thr_getspecific = thr_getspecific
40*6812Sraf #pragma weak _thr_keycreate = thr_keycreate
41*6812Sraf #pragma weak _thr_setspecific = thr_setspecific
42*6812Sraf
43*6812Sraf /*
440Sstevel@tonic-gate * 128 million keys should be enough for anyone.
450Sstevel@tonic-gate * This allocates half a gigabyte of memory for the keys themselves and
460Sstevel@tonic-gate * half a gigabyte of memory for each thread that uses the largest key.
470Sstevel@tonic-gate */
480Sstevel@tonic-gate #define MAX_KEYS 0x08000000U
490Sstevel@tonic-gate
500Sstevel@tonic-gate int
thr_keycreate(thread_key_t * pkey,void (* destructor)(void *))51*6812Sraf thr_keycreate(thread_key_t *pkey, void (*destructor)(void *))
520Sstevel@tonic-gate {
530Sstevel@tonic-gate tsd_metadata_t *tsdm = &curthread->ul_uberdata->tsd_metadata;
540Sstevel@tonic-gate void (**old_data)(void *) = NULL;
550Sstevel@tonic-gate void (**new_data)(void *);
560Sstevel@tonic-gate uint_t old_nkeys;
570Sstevel@tonic-gate uint_t new_nkeys;
580Sstevel@tonic-gate
590Sstevel@tonic-gate lmutex_lock(&tsdm->tsdm_lock);
600Sstevel@tonic-gate
610Sstevel@tonic-gate /*
620Sstevel@tonic-gate * Unfortunately, pthread_getspecific() specifies that a
630Sstevel@tonic-gate * pthread_getspecific() on an allocated key upon which the
640Sstevel@tonic-gate * calling thread has not performed a pthread_setspecifc()
650Sstevel@tonic-gate * must return NULL. Consider the following sequence:
660Sstevel@tonic-gate *
670Sstevel@tonic-gate * pthread_key_create(&key);
680Sstevel@tonic-gate * pthread_setspecific(key, datum);
690Sstevel@tonic-gate * pthread_key_delete(&key);
700Sstevel@tonic-gate * pthread_key_create(&key);
710Sstevel@tonic-gate * val = pthread_getspecific(key);
720Sstevel@tonic-gate *
730Sstevel@tonic-gate * According to POSIX, if the deleted key is reused for the new
740Sstevel@tonic-gate * key returned by the second pthread_key_create(), then the
750Sstevel@tonic-gate * pthread_getspecific() in the above example must return NULL
760Sstevel@tonic-gate * (and not the stale datum). The implementation is thus left
770Sstevel@tonic-gate * with two alternatives:
780Sstevel@tonic-gate *
790Sstevel@tonic-gate * (1) Reuse deleted keys. If this is to be implemented optimally,
800Sstevel@tonic-gate * it requires that pthread_key_create() somehow associate
810Sstevel@tonic-gate * the value NULL with the new (reused) key for each thread.
820Sstevel@tonic-gate * Keeping the hot path fast and lock-free induces substantial
830Sstevel@tonic-gate * complexity on the implementation.
840Sstevel@tonic-gate *
850Sstevel@tonic-gate * (2) Never reuse deleted keys. This allows the pthread_getspecific()
860Sstevel@tonic-gate * implementation to simply perform a check against the number
870Sstevel@tonic-gate * of keys set by the calling thread, returning NULL if the
880Sstevel@tonic-gate * specified key is larger than the highest set key. This has
890Sstevel@tonic-gate * the disadvantage of wasting memory (a program which simply
900Sstevel@tonic-gate * loops calling pthread_key_create()/pthread_key_delete()
910Sstevel@tonic-gate * will ultimately run out of memory), but permits an optimal
920Sstevel@tonic-gate * pthread_getspecific() while allowing for simple key creation
930Sstevel@tonic-gate * and deletion.
940Sstevel@tonic-gate *
950Sstevel@tonic-gate * All Solaris implementations have opted for (2). Given the
960Sstevel@tonic-gate * ~10 years that this has been in the field, it is safe to assume
970Sstevel@tonic-gate * that applications don't loop creating and destroying keys; we
980Sstevel@tonic-gate * stick with (2).
990Sstevel@tonic-gate */
1000Sstevel@tonic-gate if (tsdm->tsdm_nused == (old_nkeys = tsdm->tsdm_nkeys)) {
1010Sstevel@tonic-gate /*
1020Sstevel@tonic-gate * We need to allocate or double the number of keys.
1030Sstevel@tonic-gate * tsdm->tsdm_nused must always be a power of two.
1040Sstevel@tonic-gate */
1050Sstevel@tonic-gate if ((new_nkeys = (old_nkeys << 1)) == 0)
1060Sstevel@tonic-gate new_nkeys = 8;
1070Sstevel@tonic-gate
1080Sstevel@tonic-gate if (new_nkeys > MAX_KEYS) {
1090Sstevel@tonic-gate lmutex_unlock(&tsdm->tsdm_lock);
1100Sstevel@tonic-gate return (EAGAIN);
1110Sstevel@tonic-gate }
1120Sstevel@tonic-gate if ((new_data = lmalloc(new_nkeys * sizeof (void *))) == NULL) {
1130Sstevel@tonic-gate lmutex_unlock(&tsdm->tsdm_lock);
1140Sstevel@tonic-gate return (ENOMEM);
1150Sstevel@tonic-gate }
1160Sstevel@tonic-gate if ((old_data = tsdm->tsdm_destro) == NULL) {
1170Sstevel@tonic-gate /* key == 0 is always invalid */
1180Sstevel@tonic-gate new_data[0] = TSD_UNALLOCATED;
1190Sstevel@tonic-gate tsdm->tsdm_nused = 1;
1200Sstevel@tonic-gate } else {
1216515Sraf (void) memcpy(new_data, old_data,
1226515Sraf old_nkeys * sizeof (void *));
1230Sstevel@tonic-gate }
1240Sstevel@tonic-gate tsdm->tsdm_destro = new_data;
1250Sstevel@tonic-gate tsdm->tsdm_nkeys = new_nkeys;
1260Sstevel@tonic-gate }
1270Sstevel@tonic-gate
1280Sstevel@tonic-gate *pkey = tsdm->tsdm_nused;
1290Sstevel@tonic-gate tsdm->tsdm_destro[tsdm->tsdm_nused++] = destructor;
1300Sstevel@tonic-gate lmutex_unlock(&tsdm->tsdm_lock);
1310Sstevel@tonic-gate
1320Sstevel@tonic-gate if (old_data != NULL)
1330Sstevel@tonic-gate lfree(old_data, old_nkeys * sizeof (void *));
1340Sstevel@tonic-gate
1350Sstevel@tonic-gate return (0);
1360Sstevel@tonic-gate }
1370Sstevel@tonic-gate
138*6812Sraf #pragma weak _pthread_key_create = pthread_key_create
139*6812Sraf int
pthread_key_create(pthread_key_t * pkey,void (* destructor)(void *))140*6812Sraf pthread_key_create(pthread_key_t *pkey, void (*destructor)(void *))
141*6812Sraf {
142*6812Sraf return (thr_keycreate(pkey, destructor));
143*6812Sraf }
144*6812Sraf
1453864Sraf /*
146*6812Sraf * Same as thr_keycreate(), above, except that the key creation
1473864Sraf * is performed only once. This relies upon the fact that a key
1483864Sraf * value of THR_ONCE_KEY is invalid, and requires that the key be
1493864Sraf * allocated with a value of THR_ONCE_KEY before calling here.
1503864Sraf * THR_ONCE_KEY and PTHREAD_ONCE_KEY_NP, defined in <thread.h>
1513864Sraf * and <pthread.h> respectively, must have the same value.
1523864Sraf * Example:
1533864Sraf *
1543864Sraf * static pthread_key_t key = PTHREAD_ONCE_KEY_NP;
1553864Sraf * ...
1563864Sraf * pthread_key_create_once_np(&key, destructor);
1573864Sraf */
158*6812Sraf #pragma weak pthread_key_create_once_np = thr_keycreate_once
1593864Sraf int
thr_keycreate_once(thread_key_t * keyp,void (* destructor)(void *))160*6812Sraf thr_keycreate_once(thread_key_t *keyp, void (*destructor)(void *))
1613864Sraf {
1623864Sraf static mutex_t key_lock = DEFAULTMUTEX;
1633864Sraf thread_key_t key;
1643864Sraf int error;
1653864Sraf
1663864Sraf if (*keyp == THR_ONCE_KEY) {
1673864Sraf lmutex_lock(&key_lock);
1683864Sraf if (*keyp == THR_ONCE_KEY) {
169*6812Sraf error = thr_keycreate(&key, destructor);
1703864Sraf if (error) {
1713864Sraf lmutex_unlock(&key_lock);
1723864Sraf return (error);
1733864Sraf }
174*6812Sraf membar_producer();
1753864Sraf *keyp = key;
1763864Sraf }
1773864Sraf lmutex_unlock(&key_lock);
1783864Sraf }
179*6812Sraf membar_consumer();
1803864Sraf
1813864Sraf return (0);
1823864Sraf }
1833864Sraf
1840Sstevel@tonic-gate int
pthread_key_delete(pthread_key_t key)185*6812Sraf pthread_key_delete(pthread_key_t key)
1860Sstevel@tonic-gate {
1870Sstevel@tonic-gate tsd_metadata_t *tsdm = &curthread->ul_uberdata->tsd_metadata;
1880Sstevel@tonic-gate
1890Sstevel@tonic-gate lmutex_lock(&tsdm->tsdm_lock);
1900Sstevel@tonic-gate
1910Sstevel@tonic-gate if (key >= tsdm->tsdm_nused ||
1920Sstevel@tonic-gate tsdm->tsdm_destro[key] == TSD_UNALLOCATED) {
1930Sstevel@tonic-gate lmutex_unlock(&tsdm->tsdm_lock);
1940Sstevel@tonic-gate return (EINVAL);
1950Sstevel@tonic-gate }
1960Sstevel@tonic-gate
1970Sstevel@tonic-gate tsdm->tsdm_destro[key] = TSD_UNALLOCATED;
1980Sstevel@tonic-gate lmutex_unlock(&tsdm->tsdm_lock);
1990Sstevel@tonic-gate
2000Sstevel@tonic-gate return (0);
2010Sstevel@tonic-gate }
2020Sstevel@tonic-gate
2030Sstevel@tonic-gate /*
2040Sstevel@tonic-gate * Blessedly, the pthread_getspecific() interface is much better than the
2050Sstevel@tonic-gate * thr_getspecific() interface in that it cannot return an error status.
2060Sstevel@tonic-gate * Thus, if the key specified is bogus, pthread_getspecific()'s behavior
2070Sstevel@tonic-gate * is undefined. As an added bonus (and as an artificat of not returning
2080Sstevel@tonic-gate * an error code), the requested datum is returned rather than stored
2090Sstevel@tonic-gate * through a parameter -- thereby avoiding the unnecessary store/load pair
2100Sstevel@tonic-gate * incurred by thr_getspecific(). Every once in a while, the Standards
2110Sstevel@tonic-gate * get it right -- but usually by accident.
2120Sstevel@tonic-gate */
2130Sstevel@tonic-gate void *
pthread_getspecific(pthread_key_t key)214*6812Sraf pthread_getspecific(pthread_key_t key)
2150Sstevel@tonic-gate {
2160Sstevel@tonic-gate tsd_t *stsd;
2170Sstevel@tonic-gate
2180Sstevel@tonic-gate /*
2190Sstevel@tonic-gate * We are cycle-shaving in this function because some
2200Sstevel@tonic-gate * applications make heavy use of it and one machine cycle
2210Sstevel@tonic-gate * can make a measurable difference in performance. This
2220Sstevel@tonic-gate * is why we waste a little memory and allocate a NULL value
2230Sstevel@tonic-gate * for the invalid key == 0 in curthread->ul_ftsd[0] rather
2240Sstevel@tonic-gate * than adjusting the key by subtracting one.
2250Sstevel@tonic-gate */
2260Sstevel@tonic-gate if (key < TSD_NFAST)
2270Sstevel@tonic-gate return (curthread->ul_ftsd[key]);
2280Sstevel@tonic-gate
2290Sstevel@tonic-gate if ((stsd = curthread->ul_stsd) != NULL && key < stsd->tsd_nalloc)
2300Sstevel@tonic-gate return (stsd->tsd_data[key]);
2310Sstevel@tonic-gate
2320Sstevel@tonic-gate return (NULL);
2330Sstevel@tonic-gate }
2340Sstevel@tonic-gate
2350Sstevel@tonic-gate int
thr_getspecific(thread_key_t key,void ** valuep)236*6812Sraf thr_getspecific(thread_key_t key, void **valuep)
2370Sstevel@tonic-gate {
2380Sstevel@tonic-gate tsd_t *stsd;
2390Sstevel@tonic-gate
2400Sstevel@tonic-gate /*
2410Sstevel@tonic-gate * Amazingly, some application code (and worse, some particularly
2420Sstevel@tonic-gate * fugly Solaris library code) _relies_ on the fact that 0 is always
2430Sstevel@tonic-gate * an invalid key. To preserve this semantic, 0 is never returned
2440Sstevel@tonic-gate * as a key from thr_/pthread_key_create(); we explicitly check
2450Sstevel@tonic-gate * for it here and return EINVAL.
2460Sstevel@tonic-gate */
2470Sstevel@tonic-gate if (key == 0)
2480Sstevel@tonic-gate return (EINVAL);
2490Sstevel@tonic-gate
2500Sstevel@tonic-gate if (key < TSD_NFAST)
2510Sstevel@tonic-gate *valuep = curthread->ul_ftsd[key];
2520Sstevel@tonic-gate else if ((stsd = curthread->ul_stsd) != NULL && key < stsd->tsd_nalloc)
2530Sstevel@tonic-gate *valuep = stsd->tsd_data[key];
2540Sstevel@tonic-gate else
2550Sstevel@tonic-gate *valuep = NULL;
2560Sstevel@tonic-gate
2570Sstevel@tonic-gate return (0);
2580Sstevel@tonic-gate }
2590Sstevel@tonic-gate
2600Sstevel@tonic-gate /*
261*6812Sraf * We call thr_setspecific_slow() when the key specified
2620Sstevel@tonic-gate * is beyond the current thread's currently allocated range.
2630Sstevel@tonic-gate * This case is in a separate function because we want
2640Sstevel@tonic-gate * the compiler to optimize for the common case.
2650Sstevel@tonic-gate */
2660Sstevel@tonic-gate static int
thr_setspecific_slow(thread_key_t key,void * value)267*6812Sraf thr_setspecific_slow(thread_key_t key, void *value)
2680Sstevel@tonic-gate {
2690Sstevel@tonic-gate ulwp_t *self = curthread;
2700Sstevel@tonic-gate tsd_metadata_t *tsdm = &self->ul_uberdata->tsd_metadata;
2710Sstevel@tonic-gate tsd_t *stsd;
2720Sstevel@tonic-gate tsd_t *ntsd;
2730Sstevel@tonic-gate uint_t nkeys;
2740Sstevel@tonic-gate
2750Sstevel@tonic-gate /*
2760Sstevel@tonic-gate * It isn't necessary to grab locks in this path;
2770Sstevel@tonic-gate * tsdm->tsdm_nused can only increase.
2780Sstevel@tonic-gate */
2790Sstevel@tonic-gate if (key >= tsdm->tsdm_nused)
2800Sstevel@tonic-gate return (EINVAL);
2810Sstevel@tonic-gate
2820Sstevel@tonic-gate /*
2830Sstevel@tonic-gate * We would like to test (tsdm->tsdm_destro[key] == TSD_UNALLOCATED)
2840Sstevel@tonic-gate * here but that would require acquiring tsdm->tsdm_lock and we
2850Sstevel@tonic-gate * want to avoid locks in this path.
2860Sstevel@tonic-gate *
2870Sstevel@tonic-gate * We have a key which is (or at least _was_) valid. If this key
2880Sstevel@tonic-gate * is later deleted (or indeed, is deleted before we set the value),
2890Sstevel@tonic-gate * we don't care; such a condition would indicate an application
2900Sstevel@tonic-gate * race for which POSIX thankfully leaves the behavior unspecified.
2910Sstevel@tonic-gate *
2920Sstevel@tonic-gate * First, determine our new size. To avoid allocating more than we
2930Sstevel@tonic-gate * have to, continue doubling our size only until the new key fits.
2940Sstevel@tonic-gate * stsd->tsd_nalloc must always be a power of two.
2950Sstevel@tonic-gate */
2960Sstevel@tonic-gate nkeys = ((stsd = self->ul_stsd) != NULL)? stsd->tsd_nalloc : 8;
2970Sstevel@tonic-gate for (; key >= nkeys; nkeys <<= 1)
2980Sstevel@tonic-gate continue;
2990Sstevel@tonic-gate
3000Sstevel@tonic-gate /*
3010Sstevel@tonic-gate * Allocate the new TSD.
3020Sstevel@tonic-gate */
3030Sstevel@tonic-gate if ((ntsd = lmalloc(nkeys * sizeof (void *))) == NULL)
3040Sstevel@tonic-gate return (ENOMEM);
3050Sstevel@tonic-gate
3060Sstevel@tonic-gate if (stsd != NULL) {
3070Sstevel@tonic-gate /*
3080Sstevel@tonic-gate * Copy the old TSD across to the new.
3090Sstevel@tonic-gate */
3106515Sraf (void) memcpy(ntsd, stsd, stsd->tsd_nalloc * sizeof (void *));
3110Sstevel@tonic-gate lfree(stsd, stsd->tsd_nalloc * sizeof (void *));
3120Sstevel@tonic-gate }
3130Sstevel@tonic-gate
3140Sstevel@tonic-gate ntsd->tsd_nalloc = nkeys;
3150Sstevel@tonic-gate ntsd->tsd_data[key] = value;
3160Sstevel@tonic-gate self->ul_stsd = ntsd;
3170Sstevel@tonic-gate
3180Sstevel@tonic-gate return (0);
3190Sstevel@tonic-gate }
3200Sstevel@tonic-gate
3210Sstevel@tonic-gate int
thr_setspecific(thread_key_t key,void * value)322*6812Sraf thr_setspecific(thread_key_t key, void *value)
3230Sstevel@tonic-gate {
3240Sstevel@tonic-gate tsd_t *stsd;
3250Sstevel@tonic-gate int ret;
3260Sstevel@tonic-gate ulwp_t *self = curthread;
3270Sstevel@tonic-gate
3280Sstevel@tonic-gate /*
329*6812Sraf * See the comment in thr_getspecific(), above.
3300Sstevel@tonic-gate */
3310Sstevel@tonic-gate if (key == 0)
3320Sstevel@tonic-gate return (EINVAL);
3330Sstevel@tonic-gate
3340Sstevel@tonic-gate if (key < TSD_NFAST) {
3350Sstevel@tonic-gate curthread->ul_ftsd[key] = value;
3360Sstevel@tonic-gate return (0);
3370Sstevel@tonic-gate }
3380Sstevel@tonic-gate
3390Sstevel@tonic-gate if ((stsd = curthread->ul_stsd) != NULL && key < stsd->tsd_nalloc) {
3400Sstevel@tonic-gate stsd->tsd_data[key] = value;
3410Sstevel@tonic-gate return (0);
3420Sstevel@tonic-gate }
3430Sstevel@tonic-gate
3440Sstevel@tonic-gate /*
3450Sstevel@tonic-gate * This is a critical region since we are dealing with memory
3460Sstevel@tonic-gate * allocation and free. Similar protection required in tsd_free().
3470Sstevel@tonic-gate */
3480Sstevel@tonic-gate enter_critical(self);
349*6812Sraf ret = thr_setspecific_slow(key, value);
3500Sstevel@tonic-gate exit_critical(self);
3510Sstevel@tonic-gate return (ret);
3520Sstevel@tonic-gate }
3530Sstevel@tonic-gate
354*6812Sraf int
pthread_setspecific(pthread_key_t key,const void * value)355*6812Sraf pthread_setspecific(pthread_key_t key, const void *value)
356*6812Sraf {
357*6812Sraf return (thr_setspecific(key, (void *)value));
358*6812Sraf }
359*6812Sraf
3600Sstevel@tonic-gate /*
3610Sstevel@tonic-gate * Contract-private interface for java. See PSARC/2003/159
3620Sstevel@tonic-gate *
3630Sstevel@tonic-gate * If the key falls within the TSD_NFAST range, return a non-negative
3640Sstevel@tonic-gate * offset that can be used by the caller to fetch the TSD data value
3650Sstevel@tonic-gate * directly out of the thread structure using %g7 (sparc) or %gs (x86).
3660Sstevel@tonic-gate * With the advent of TLS, %g7 and %gs are part of the ABI, even though
3670Sstevel@tonic-gate * the definition of the thread structure itself (ulwp_t) is private.
3680Sstevel@tonic-gate *
3690Sstevel@tonic-gate * We guarantee that the offset returned on sparc will fit within
3700Sstevel@tonic-gate * a SIMM13 field (that is, it is less than 2048).
3710Sstevel@tonic-gate *
3720Sstevel@tonic-gate * On failure (key is not in the TSD_NFAST range), return -1.
3730Sstevel@tonic-gate */
3740Sstevel@tonic-gate ptrdiff_t
_thr_slot_offset(thread_key_t key)3750Sstevel@tonic-gate _thr_slot_offset(thread_key_t key)
3760Sstevel@tonic-gate {
3770Sstevel@tonic-gate if (key != 0 && key < TSD_NFAST)
3780Sstevel@tonic-gate return ((ptrdiff_t)offsetof(ulwp_t, ul_ftsd[key]));
3790Sstevel@tonic-gate return (-1);
3800Sstevel@tonic-gate }
3810Sstevel@tonic-gate
3820Sstevel@tonic-gate /*
3830Sstevel@tonic-gate * This is called by _thrp_exit() to apply destructors to the thread's tsd.
3840Sstevel@tonic-gate */
3850Sstevel@tonic-gate void
tsd_exit()3860Sstevel@tonic-gate tsd_exit()
3870Sstevel@tonic-gate {
3880Sstevel@tonic-gate ulwp_t *self = curthread;
3890Sstevel@tonic-gate tsd_metadata_t *tsdm = &self->ul_uberdata->tsd_metadata;
3900Sstevel@tonic-gate thread_key_t key;
3910Sstevel@tonic-gate int recheck;
3920Sstevel@tonic-gate void *val;
3930Sstevel@tonic-gate void (*func)(void *);
3940Sstevel@tonic-gate
3950Sstevel@tonic-gate lmutex_lock(&tsdm->tsdm_lock);
3960Sstevel@tonic-gate
3970Sstevel@tonic-gate do {
3980Sstevel@tonic-gate recheck = 0;
3990Sstevel@tonic-gate
4000Sstevel@tonic-gate for (key = 1; key < TSD_NFAST &&
4010Sstevel@tonic-gate key < tsdm->tsdm_nused; key++) {
4020Sstevel@tonic-gate if ((func = tsdm->tsdm_destro[key]) != NULL &&
4030Sstevel@tonic-gate func != TSD_UNALLOCATED &&
4040Sstevel@tonic-gate (val = self->ul_ftsd[key]) != NULL) {
4050Sstevel@tonic-gate self->ul_ftsd[key] = NULL;
4060Sstevel@tonic-gate lmutex_unlock(&tsdm->tsdm_lock);
4070Sstevel@tonic-gate (*func)(val);
4080Sstevel@tonic-gate lmutex_lock(&tsdm->tsdm_lock);
4090Sstevel@tonic-gate recheck = 1;
4100Sstevel@tonic-gate }
4110Sstevel@tonic-gate }
4120Sstevel@tonic-gate
4130Sstevel@tonic-gate if (self->ul_stsd == NULL)
4140Sstevel@tonic-gate continue;
4150Sstevel@tonic-gate
4160Sstevel@tonic-gate /*
4170Sstevel@tonic-gate * Any of these destructors could cause us to grow the number
4180Sstevel@tonic-gate * TSD keys in the slow TSD; we cannot cache the slow TSD
4190Sstevel@tonic-gate * pointer through this loop.
4200Sstevel@tonic-gate */
4210Sstevel@tonic-gate for (; key < self->ul_stsd->tsd_nalloc &&
4220Sstevel@tonic-gate key < tsdm->tsdm_nused; key++) {
4230Sstevel@tonic-gate if ((func = tsdm->tsdm_destro[key]) != NULL &&
4240Sstevel@tonic-gate func != TSD_UNALLOCATED &&
4250Sstevel@tonic-gate (val = self->ul_stsd->tsd_data[key]) != NULL) {
4260Sstevel@tonic-gate self->ul_stsd->tsd_data[key] = NULL;
4270Sstevel@tonic-gate lmutex_unlock(&tsdm->tsdm_lock);
4280Sstevel@tonic-gate (*func)(val);
4290Sstevel@tonic-gate lmutex_lock(&tsdm->tsdm_lock);
4300Sstevel@tonic-gate recheck = 1;
4310Sstevel@tonic-gate }
4320Sstevel@tonic-gate }
4330Sstevel@tonic-gate } while (recheck);
4340Sstevel@tonic-gate
4350Sstevel@tonic-gate lmutex_unlock(&tsdm->tsdm_lock);
4360Sstevel@tonic-gate
4370Sstevel@tonic-gate /*
4380Sstevel@tonic-gate * We're done; if we have slow TSD, we need to free it.
4390Sstevel@tonic-gate */
4400Sstevel@tonic-gate tsd_free(self);
4410Sstevel@tonic-gate }
4420Sstevel@tonic-gate
4430Sstevel@tonic-gate void
tsd_free(ulwp_t * ulwp)4440Sstevel@tonic-gate tsd_free(ulwp_t *ulwp)
4450Sstevel@tonic-gate {
4460Sstevel@tonic-gate tsd_t *stsd;
4470Sstevel@tonic-gate ulwp_t *self = curthread;
4480Sstevel@tonic-gate
4490Sstevel@tonic-gate enter_critical(self);
4500Sstevel@tonic-gate if ((stsd = ulwp->ul_stsd) != NULL)
4510Sstevel@tonic-gate lfree(stsd, stsd->tsd_nalloc * sizeof (void *));
4520Sstevel@tonic-gate ulwp->ul_stsd = NULL;
4530Sstevel@tonic-gate exit_critical(self);
4540Sstevel@tonic-gate }
455