xref: /onnv-gate/usr/src/lib/libc/port/threads/tsd.c (revision 3864:2ae506652d11)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*3864Sraf  * Common Development and Distribution License (the "License").
6*3864Sraf  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
21*3864Sraf 
220Sstevel@tonic-gate /*
23*3864Sraf  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
280Sstevel@tonic-gate 
290Sstevel@tonic-gate #include "lint.h"
300Sstevel@tonic-gate #include "thr_uberdata.h"
310Sstevel@tonic-gate #include <stddef.h>
320Sstevel@tonic-gate 
330Sstevel@tonic-gate /*
340Sstevel@tonic-gate  * 128 million keys should be enough for anyone.
350Sstevel@tonic-gate  * This allocates half a gigabyte of memory for the keys themselves and
360Sstevel@tonic-gate  * half a gigabyte of memory for each thread that uses the largest key.
370Sstevel@tonic-gate  */
380Sstevel@tonic-gate #define	MAX_KEYS	0x08000000U
390Sstevel@tonic-gate 
400Sstevel@tonic-gate #pragma weak thr_keycreate = _thr_keycreate
410Sstevel@tonic-gate #pragma weak pthread_key_create = _thr_keycreate
420Sstevel@tonic-gate #pragma weak _pthread_key_create = _thr_keycreate
430Sstevel@tonic-gate int
440Sstevel@tonic-gate _thr_keycreate(thread_key_t *pkey, void (*destructor)(void *))
450Sstevel@tonic-gate {
460Sstevel@tonic-gate 	tsd_metadata_t *tsdm = &curthread->ul_uberdata->tsd_metadata;
470Sstevel@tonic-gate 	void (**old_data)(void *) = NULL;
480Sstevel@tonic-gate 	void (**new_data)(void *);
490Sstevel@tonic-gate 	uint_t old_nkeys;
500Sstevel@tonic-gate 	uint_t new_nkeys;
510Sstevel@tonic-gate 
520Sstevel@tonic-gate 	lmutex_lock(&tsdm->tsdm_lock);
530Sstevel@tonic-gate 
540Sstevel@tonic-gate 	/*
550Sstevel@tonic-gate 	 * Unfortunately, pthread_getspecific() specifies that a
560Sstevel@tonic-gate 	 * pthread_getspecific() on an allocated key upon which the
570Sstevel@tonic-gate 	 * calling thread has not performed a pthread_setspecifc()
580Sstevel@tonic-gate 	 * must return NULL.  Consider the following sequence:
590Sstevel@tonic-gate 	 *
600Sstevel@tonic-gate 	 *	pthread_key_create(&key);
610Sstevel@tonic-gate 	 *	pthread_setspecific(key, datum);
620Sstevel@tonic-gate 	 *	pthread_key_delete(&key);
630Sstevel@tonic-gate 	 *	pthread_key_create(&key);
640Sstevel@tonic-gate 	 *	val = pthread_getspecific(key);
650Sstevel@tonic-gate 	 *
660Sstevel@tonic-gate 	 * According to POSIX, if the deleted key is reused for the new
670Sstevel@tonic-gate 	 * key returned by the second pthread_key_create(), then the
680Sstevel@tonic-gate 	 * pthread_getspecific() in the above example must return NULL
690Sstevel@tonic-gate 	 * (and not the stale datum).  The implementation is thus left
700Sstevel@tonic-gate 	 * with two alternatives:
710Sstevel@tonic-gate 	 *
720Sstevel@tonic-gate 	 *  (1)	Reuse deleted keys.  If this is to be implemented optimally,
730Sstevel@tonic-gate 	 *	it requires that pthread_key_create() somehow associate
740Sstevel@tonic-gate 	 *	the value NULL with the new (reused) key for each thread.
750Sstevel@tonic-gate 	 *	Keeping the hot path fast and lock-free induces substantial
760Sstevel@tonic-gate 	 *	complexity on the implementation.
770Sstevel@tonic-gate 	 *
780Sstevel@tonic-gate 	 *  (2)	Never reuse deleted keys. This allows the pthread_getspecific()
790Sstevel@tonic-gate 	 *	implementation to simply perform a check against the number
800Sstevel@tonic-gate 	 *	of keys set by the calling thread, returning NULL if the
810Sstevel@tonic-gate 	 *	specified key is larger than the highest set key.  This has
820Sstevel@tonic-gate 	 *	the disadvantage of wasting memory (a program which simply
830Sstevel@tonic-gate 	 *	loops calling pthread_key_create()/pthread_key_delete()
840Sstevel@tonic-gate 	 *	will ultimately run out of memory), but permits an optimal
850Sstevel@tonic-gate 	 *	pthread_getspecific() while allowing for simple key creation
860Sstevel@tonic-gate 	 *	and deletion.
870Sstevel@tonic-gate 	 *
880Sstevel@tonic-gate 	 * All Solaris implementations have opted for (2).  Given the
890Sstevel@tonic-gate 	 * ~10 years that this has been in the field, it is safe to assume
900Sstevel@tonic-gate 	 * that applications don't loop creating and destroying keys; we
910Sstevel@tonic-gate 	 * stick with (2).
920Sstevel@tonic-gate 	 */
930Sstevel@tonic-gate 	if (tsdm->tsdm_nused == (old_nkeys = tsdm->tsdm_nkeys)) {
940Sstevel@tonic-gate 		/*
950Sstevel@tonic-gate 		 * We need to allocate or double the number of keys.
960Sstevel@tonic-gate 		 * tsdm->tsdm_nused must always be a power of two.
970Sstevel@tonic-gate 		 */
980Sstevel@tonic-gate 		if ((new_nkeys = (old_nkeys << 1)) == 0)
990Sstevel@tonic-gate 			new_nkeys = 8;
1000Sstevel@tonic-gate 
1010Sstevel@tonic-gate 		if (new_nkeys > MAX_KEYS) {
1020Sstevel@tonic-gate 			lmutex_unlock(&tsdm->tsdm_lock);
1030Sstevel@tonic-gate 			return (EAGAIN);
1040Sstevel@tonic-gate 		}
1050Sstevel@tonic-gate 		if ((new_data = lmalloc(new_nkeys * sizeof (void *))) == NULL) {
1060Sstevel@tonic-gate 			lmutex_unlock(&tsdm->tsdm_lock);
1070Sstevel@tonic-gate 			return (ENOMEM);
1080Sstevel@tonic-gate 		}
1090Sstevel@tonic-gate 		if ((old_data = tsdm->tsdm_destro) == NULL) {
1100Sstevel@tonic-gate 			/* key == 0 is always invalid */
1110Sstevel@tonic-gate 			new_data[0] = TSD_UNALLOCATED;
1120Sstevel@tonic-gate 			tsdm->tsdm_nused = 1;
1130Sstevel@tonic-gate 		} else {
1140Sstevel@tonic-gate 			(void) _private_memcpy(new_data, old_data,
1150Sstevel@tonic-gate 				old_nkeys * sizeof (void *));
1160Sstevel@tonic-gate 		}
1170Sstevel@tonic-gate 		tsdm->tsdm_destro = new_data;
1180Sstevel@tonic-gate 		tsdm->tsdm_nkeys = new_nkeys;
1190Sstevel@tonic-gate 	}
1200Sstevel@tonic-gate 
1210Sstevel@tonic-gate 	*pkey = tsdm->tsdm_nused;
1220Sstevel@tonic-gate 	tsdm->tsdm_destro[tsdm->tsdm_nused++] = destructor;
1230Sstevel@tonic-gate 	lmutex_unlock(&tsdm->tsdm_lock);
1240Sstevel@tonic-gate 
1250Sstevel@tonic-gate 	if (old_data != NULL)
1260Sstevel@tonic-gate 		lfree(old_data, old_nkeys * sizeof (void *));
1270Sstevel@tonic-gate 
1280Sstevel@tonic-gate 	return (0);
1290Sstevel@tonic-gate }
1300Sstevel@tonic-gate 
131*3864Sraf /*
132*3864Sraf  * Same as _thr_keycreate(), above, except that the key creation
133*3864Sraf  * is performed only once.  This relies upon the fact that a key
134*3864Sraf  * value of THR_ONCE_KEY is invalid, and requires that the key be
135*3864Sraf  * allocated with a value of THR_ONCE_KEY before calling here.
136*3864Sraf  * THR_ONCE_KEY and PTHREAD_ONCE_KEY_NP, defined in <thread.h>
137*3864Sraf  * and <pthread.h> respectively, must have the same value.
138*3864Sraf  * Example:
139*3864Sraf  *
140*3864Sraf  *	static pthread_key_t key = PTHREAD_ONCE_KEY_NP;
141*3864Sraf  *	...
142*3864Sraf  *	pthread_key_create_once_np(&key, destructor);
143*3864Sraf  */
144*3864Sraf #pragma weak pthread_key_create_once_np = _thr_keycreate_once
145*3864Sraf #pragma weak _pthread_key_create_once_np = _thr_keycreate_once
146*3864Sraf #pragma weak thr_keycreate_once = _thr_keycreate_once
147*3864Sraf int
148*3864Sraf _thr_keycreate_once(thread_key_t *keyp, void (*destructor)(void *))
149*3864Sraf {
150*3864Sraf 	static mutex_t key_lock = DEFAULTMUTEX;
151*3864Sraf 	thread_key_t key;
152*3864Sraf 	int error;
153*3864Sraf 
154*3864Sraf 	if (*keyp == THR_ONCE_KEY) {
155*3864Sraf 		lmutex_lock(&key_lock);
156*3864Sraf 		if (*keyp == THR_ONCE_KEY) {
157*3864Sraf 			error = _thr_keycreate(&key, destructor);
158*3864Sraf 			if (error) {
159*3864Sraf 				lmutex_unlock(&key_lock);
160*3864Sraf 				return (error);
161*3864Sraf 			}
162*3864Sraf 			_membar_producer();
163*3864Sraf 			*keyp = key;
164*3864Sraf 		}
165*3864Sraf 		lmutex_unlock(&key_lock);
166*3864Sraf 	}
167*3864Sraf 	_membar_consumer();
168*3864Sraf 
169*3864Sraf 	return (0);
170*3864Sraf }
171*3864Sraf 
1720Sstevel@tonic-gate #pragma weak pthread_key_delete = _thr_key_delete
1730Sstevel@tonic-gate #pragma weak _pthread_key_delete = _thr_key_delete
1740Sstevel@tonic-gate int
1750Sstevel@tonic-gate _thr_key_delete(thread_key_t key)
1760Sstevel@tonic-gate {
1770Sstevel@tonic-gate 	tsd_metadata_t *tsdm = &curthread->ul_uberdata->tsd_metadata;
1780Sstevel@tonic-gate 
1790Sstevel@tonic-gate 	lmutex_lock(&tsdm->tsdm_lock);
1800Sstevel@tonic-gate 
1810Sstevel@tonic-gate 	if (key >= tsdm->tsdm_nused ||
1820Sstevel@tonic-gate 	    tsdm->tsdm_destro[key] == TSD_UNALLOCATED) {
1830Sstevel@tonic-gate 		lmutex_unlock(&tsdm->tsdm_lock);
1840Sstevel@tonic-gate 		return (EINVAL);
1850Sstevel@tonic-gate 	}
1860Sstevel@tonic-gate 
1870Sstevel@tonic-gate 	tsdm->tsdm_destro[key] = TSD_UNALLOCATED;
1880Sstevel@tonic-gate 	lmutex_unlock(&tsdm->tsdm_lock);
1890Sstevel@tonic-gate 
1900Sstevel@tonic-gate 	return (0);
1910Sstevel@tonic-gate }
1920Sstevel@tonic-gate 
1930Sstevel@tonic-gate /*
1940Sstevel@tonic-gate  * Blessedly, the pthread_getspecific() interface is much better than the
1950Sstevel@tonic-gate  * thr_getspecific() interface in that it cannot return an error status.
1960Sstevel@tonic-gate  * Thus, if the key specified is bogus, pthread_getspecific()'s behavior
1970Sstevel@tonic-gate  * is undefined.  As an added bonus (and as an artificat of not returning
1980Sstevel@tonic-gate  * an error code), the requested datum is returned rather than stored
1990Sstevel@tonic-gate  * through a parameter -- thereby avoiding the unnecessary store/load pair
2000Sstevel@tonic-gate  * incurred by thr_getspecific().  Every once in a while, the Standards
2010Sstevel@tonic-gate  * get it right -- but usually by accident.
2020Sstevel@tonic-gate  */
2030Sstevel@tonic-gate #pragma weak	pthread_getspecific	= _pthread_getspecific
2040Sstevel@tonic-gate void *
2050Sstevel@tonic-gate _pthread_getspecific(pthread_key_t key)
2060Sstevel@tonic-gate {
2070Sstevel@tonic-gate 	tsd_t *stsd;
2080Sstevel@tonic-gate 
2090Sstevel@tonic-gate 	/*
2100Sstevel@tonic-gate 	 * We are cycle-shaving in this function because some
2110Sstevel@tonic-gate 	 * applications make heavy use of it and one machine cycle
2120Sstevel@tonic-gate 	 * can make a measurable difference in performance.  This
2130Sstevel@tonic-gate 	 * is why we waste a little memory and allocate a NULL value
2140Sstevel@tonic-gate 	 * for the invalid key == 0 in curthread->ul_ftsd[0] rather
2150Sstevel@tonic-gate 	 * than adjusting the key by subtracting one.
2160Sstevel@tonic-gate 	 */
2170Sstevel@tonic-gate 	if (key < TSD_NFAST)
2180Sstevel@tonic-gate 		return (curthread->ul_ftsd[key]);
2190Sstevel@tonic-gate 
2200Sstevel@tonic-gate 	if ((stsd = curthread->ul_stsd) != NULL && key < stsd->tsd_nalloc)
2210Sstevel@tonic-gate 		return (stsd->tsd_data[key]);
2220Sstevel@tonic-gate 
2230Sstevel@tonic-gate 	return (NULL);
2240Sstevel@tonic-gate }
2250Sstevel@tonic-gate 
2260Sstevel@tonic-gate #pragma weak thr_getspecific = _thr_getspecific
2270Sstevel@tonic-gate int
2280Sstevel@tonic-gate _thr_getspecific(thread_key_t key, void **valuep)
2290Sstevel@tonic-gate {
2300Sstevel@tonic-gate 	tsd_t *stsd;
2310Sstevel@tonic-gate 
2320Sstevel@tonic-gate 	/*
2330Sstevel@tonic-gate 	 * Amazingly, some application code (and worse, some particularly
2340Sstevel@tonic-gate 	 * fugly Solaris library code) _relies_ on the fact that 0 is always
2350Sstevel@tonic-gate 	 * an invalid key.  To preserve this semantic, 0 is never returned
2360Sstevel@tonic-gate 	 * as a key from thr_/pthread_key_create(); we explicitly check
2370Sstevel@tonic-gate 	 * for it here and return EINVAL.
2380Sstevel@tonic-gate 	 */
2390Sstevel@tonic-gate 	if (key == 0)
2400Sstevel@tonic-gate 		return (EINVAL);
2410Sstevel@tonic-gate 
2420Sstevel@tonic-gate 	if (key < TSD_NFAST)
2430Sstevel@tonic-gate 		*valuep = curthread->ul_ftsd[key];
2440Sstevel@tonic-gate 	else if ((stsd = curthread->ul_stsd) != NULL && key < stsd->tsd_nalloc)
2450Sstevel@tonic-gate 		*valuep = stsd->tsd_data[key];
2460Sstevel@tonic-gate 	else
2470Sstevel@tonic-gate 		*valuep = NULL;
2480Sstevel@tonic-gate 
2490Sstevel@tonic-gate 	return (0);
2500Sstevel@tonic-gate }
2510Sstevel@tonic-gate 
2520Sstevel@tonic-gate /*
2530Sstevel@tonic-gate  * We call _thr_setspecific_slow() when the key specified
2540Sstevel@tonic-gate  * is beyond the current thread's currently allocated range.
2550Sstevel@tonic-gate  * This case is in a separate function because we want
2560Sstevel@tonic-gate  * the compiler to optimize for the common case.
2570Sstevel@tonic-gate  */
2580Sstevel@tonic-gate static int
2590Sstevel@tonic-gate _thr_setspecific_slow(thread_key_t key, void *value)
2600Sstevel@tonic-gate {
2610Sstevel@tonic-gate 	ulwp_t *self = curthread;
2620Sstevel@tonic-gate 	tsd_metadata_t *tsdm = &self->ul_uberdata->tsd_metadata;
2630Sstevel@tonic-gate 	tsd_t *stsd;
2640Sstevel@tonic-gate 	tsd_t *ntsd;
2650Sstevel@tonic-gate 	uint_t nkeys;
2660Sstevel@tonic-gate 
2670Sstevel@tonic-gate 	/*
2680Sstevel@tonic-gate 	 * It isn't necessary to grab locks in this path;
2690Sstevel@tonic-gate 	 * tsdm->tsdm_nused can only increase.
2700Sstevel@tonic-gate 	 */
2710Sstevel@tonic-gate 	if (key >= tsdm->tsdm_nused)
2720Sstevel@tonic-gate 		return (EINVAL);
2730Sstevel@tonic-gate 
2740Sstevel@tonic-gate 	/*
2750Sstevel@tonic-gate 	 * We would like to test (tsdm->tsdm_destro[key] == TSD_UNALLOCATED)
2760Sstevel@tonic-gate 	 * here but that would require acquiring tsdm->tsdm_lock and we
2770Sstevel@tonic-gate 	 * want to avoid locks in this path.
2780Sstevel@tonic-gate 	 *
2790Sstevel@tonic-gate 	 * We have a key which is (or at least _was_) valid.  If this key
2800Sstevel@tonic-gate 	 * is later deleted (or indeed, is deleted before we set the value),
2810Sstevel@tonic-gate 	 * we don't care; such a condition would indicate an application
2820Sstevel@tonic-gate 	 * race for which POSIX thankfully leaves the behavior unspecified.
2830Sstevel@tonic-gate 	 *
2840Sstevel@tonic-gate 	 * First, determine our new size.  To avoid allocating more than we
2850Sstevel@tonic-gate 	 * have to, continue doubling our size only until the new key fits.
2860Sstevel@tonic-gate 	 * stsd->tsd_nalloc must always be a power of two.
2870Sstevel@tonic-gate 	 */
2880Sstevel@tonic-gate 	nkeys = ((stsd = self->ul_stsd) != NULL)? stsd->tsd_nalloc : 8;
2890Sstevel@tonic-gate 	for (; key >= nkeys; nkeys <<= 1)
2900Sstevel@tonic-gate 		continue;
2910Sstevel@tonic-gate 
2920Sstevel@tonic-gate 	/*
2930Sstevel@tonic-gate 	 * Allocate the new TSD.
2940Sstevel@tonic-gate 	 */
2950Sstevel@tonic-gate 	if ((ntsd = lmalloc(nkeys * sizeof (void *))) == NULL)
2960Sstevel@tonic-gate 		return (ENOMEM);
2970Sstevel@tonic-gate 
2980Sstevel@tonic-gate 	if (stsd != NULL) {
2990Sstevel@tonic-gate 		/*
3000Sstevel@tonic-gate 		 * Copy the old TSD across to the new.
3010Sstevel@tonic-gate 		 */
3020Sstevel@tonic-gate 		(void) _private_memcpy(ntsd, stsd,
3030Sstevel@tonic-gate 			stsd->tsd_nalloc * sizeof (void *));
3040Sstevel@tonic-gate 		lfree(stsd, stsd->tsd_nalloc * sizeof (void *));
3050Sstevel@tonic-gate 	}
3060Sstevel@tonic-gate 
3070Sstevel@tonic-gate 	ntsd->tsd_nalloc = nkeys;
3080Sstevel@tonic-gate 	ntsd->tsd_data[key] = value;
3090Sstevel@tonic-gate 	self->ul_stsd = ntsd;
3100Sstevel@tonic-gate 
3110Sstevel@tonic-gate 	return (0);
3120Sstevel@tonic-gate }
3130Sstevel@tonic-gate 
3140Sstevel@tonic-gate #pragma weak thr_setspecific = _thr_setspecific
3150Sstevel@tonic-gate #pragma weak pthread_setspecific = _thr_setspecific
3160Sstevel@tonic-gate #pragma weak _pthread_setspecific = _thr_setspecific
3170Sstevel@tonic-gate int
3180Sstevel@tonic-gate _thr_setspecific(thread_key_t key, void *value)
3190Sstevel@tonic-gate {
3200Sstevel@tonic-gate 	tsd_t *stsd;
3210Sstevel@tonic-gate 	int ret;
3220Sstevel@tonic-gate 	ulwp_t *self = curthread;
3230Sstevel@tonic-gate 
3240Sstevel@tonic-gate 	/*
3250Sstevel@tonic-gate 	 * See the comment in _thr_getspecific(), above.
3260Sstevel@tonic-gate 	 */
3270Sstevel@tonic-gate 	if (key == 0)
3280Sstevel@tonic-gate 		return (EINVAL);
3290Sstevel@tonic-gate 
3300Sstevel@tonic-gate 	if (key < TSD_NFAST) {
3310Sstevel@tonic-gate 		curthread->ul_ftsd[key] = value;
3320Sstevel@tonic-gate 		return (0);
3330Sstevel@tonic-gate 	}
3340Sstevel@tonic-gate 
3350Sstevel@tonic-gate 	if ((stsd = curthread->ul_stsd) != NULL && key < stsd->tsd_nalloc) {
3360Sstevel@tonic-gate 		stsd->tsd_data[key] = value;
3370Sstevel@tonic-gate 		return (0);
3380Sstevel@tonic-gate 	}
3390Sstevel@tonic-gate 
3400Sstevel@tonic-gate 	/*
3410Sstevel@tonic-gate 	 * This is a critical region since we are dealing with memory
3420Sstevel@tonic-gate 	 * allocation and free. Similar protection required in tsd_free().
3430Sstevel@tonic-gate 	 */
3440Sstevel@tonic-gate 	enter_critical(self);
3450Sstevel@tonic-gate 	ret = _thr_setspecific_slow(key, value);
3460Sstevel@tonic-gate 	exit_critical(self);
3470Sstevel@tonic-gate 	return (ret);
3480Sstevel@tonic-gate }
3490Sstevel@tonic-gate 
3500Sstevel@tonic-gate /*
3510Sstevel@tonic-gate  * Contract-private interface for java.  See PSARC/2003/159
3520Sstevel@tonic-gate  *
3530Sstevel@tonic-gate  * If the key falls within the TSD_NFAST range, return a non-negative
3540Sstevel@tonic-gate  * offset that can be used by the caller to fetch the TSD data value
3550Sstevel@tonic-gate  * directly out of the thread structure using %g7 (sparc) or %gs (x86).
3560Sstevel@tonic-gate  * With the advent of TLS, %g7 and %gs are part of the ABI, even though
3570Sstevel@tonic-gate  * the definition of the thread structure itself (ulwp_t) is private.
3580Sstevel@tonic-gate  *
3590Sstevel@tonic-gate  * We guarantee that the offset returned on sparc will fit within
3600Sstevel@tonic-gate  * a SIMM13 field (that is, it is less than 2048).
3610Sstevel@tonic-gate  *
3620Sstevel@tonic-gate  * On failure (key is not in the TSD_NFAST range), return -1.
3630Sstevel@tonic-gate  */
3640Sstevel@tonic-gate ptrdiff_t
3650Sstevel@tonic-gate _thr_slot_offset(thread_key_t key)
3660Sstevel@tonic-gate {
3670Sstevel@tonic-gate 	if (key != 0 && key < TSD_NFAST)
3680Sstevel@tonic-gate 		return ((ptrdiff_t)offsetof(ulwp_t, ul_ftsd[key]));
3690Sstevel@tonic-gate 	return (-1);
3700Sstevel@tonic-gate }
3710Sstevel@tonic-gate 
3720Sstevel@tonic-gate /*
3730Sstevel@tonic-gate  * This is called by _thrp_exit() to apply destructors to the thread's tsd.
3740Sstevel@tonic-gate  */
3750Sstevel@tonic-gate void
3760Sstevel@tonic-gate tsd_exit()
3770Sstevel@tonic-gate {
3780Sstevel@tonic-gate 	ulwp_t *self = curthread;
3790Sstevel@tonic-gate 	tsd_metadata_t *tsdm = &self->ul_uberdata->tsd_metadata;
3800Sstevel@tonic-gate 	thread_key_t key;
3810Sstevel@tonic-gate 	int recheck;
3820Sstevel@tonic-gate 	void *val;
3830Sstevel@tonic-gate 	void (*func)(void *);
3840Sstevel@tonic-gate 
3850Sstevel@tonic-gate 	lmutex_lock(&tsdm->tsdm_lock);
3860Sstevel@tonic-gate 
3870Sstevel@tonic-gate 	do {
3880Sstevel@tonic-gate 		recheck = 0;
3890Sstevel@tonic-gate 
3900Sstevel@tonic-gate 		for (key = 1; key < TSD_NFAST &&
3910Sstevel@tonic-gate 		    key < tsdm->tsdm_nused; key++) {
3920Sstevel@tonic-gate 			if ((func = tsdm->tsdm_destro[key]) != NULL &&
3930Sstevel@tonic-gate 			    func != TSD_UNALLOCATED &&
3940Sstevel@tonic-gate 			    (val = self->ul_ftsd[key]) != NULL) {
3950Sstevel@tonic-gate 				self->ul_ftsd[key] = NULL;
3960Sstevel@tonic-gate 				lmutex_unlock(&tsdm->tsdm_lock);
3970Sstevel@tonic-gate 				(*func)(val);
3980Sstevel@tonic-gate 				lmutex_lock(&tsdm->tsdm_lock);
3990Sstevel@tonic-gate 				recheck = 1;
4000Sstevel@tonic-gate 			}
4010Sstevel@tonic-gate 		}
4020Sstevel@tonic-gate 
4030Sstevel@tonic-gate 		if (self->ul_stsd == NULL)
4040Sstevel@tonic-gate 			continue;
4050Sstevel@tonic-gate 
4060Sstevel@tonic-gate 		/*
4070Sstevel@tonic-gate 		 * Any of these destructors could cause us to grow the number
4080Sstevel@tonic-gate 		 * TSD keys in the slow TSD; we cannot cache the slow TSD
4090Sstevel@tonic-gate 		 * pointer through this loop.
4100Sstevel@tonic-gate 		 */
4110Sstevel@tonic-gate 		for (; key < self->ul_stsd->tsd_nalloc &&
4120Sstevel@tonic-gate 		    key < tsdm->tsdm_nused; key++) {
4130Sstevel@tonic-gate 			if ((func = tsdm->tsdm_destro[key]) != NULL &&
4140Sstevel@tonic-gate 			    func != TSD_UNALLOCATED &&
4150Sstevel@tonic-gate 			    (val = self->ul_stsd->tsd_data[key]) != NULL) {
4160Sstevel@tonic-gate 				self->ul_stsd->tsd_data[key] = NULL;
4170Sstevel@tonic-gate 				lmutex_unlock(&tsdm->tsdm_lock);
4180Sstevel@tonic-gate 				(*func)(val);
4190Sstevel@tonic-gate 				lmutex_lock(&tsdm->tsdm_lock);
4200Sstevel@tonic-gate 				recheck = 1;
4210Sstevel@tonic-gate 			}
4220Sstevel@tonic-gate 		}
4230Sstevel@tonic-gate 	} while (recheck);
4240Sstevel@tonic-gate 
4250Sstevel@tonic-gate 	lmutex_unlock(&tsdm->tsdm_lock);
4260Sstevel@tonic-gate 
4270Sstevel@tonic-gate 	/*
4280Sstevel@tonic-gate 	 * We're done; if we have slow TSD, we need to free it.
4290Sstevel@tonic-gate 	 */
4300Sstevel@tonic-gate 	tsd_free(self);
4310Sstevel@tonic-gate }
4320Sstevel@tonic-gate 
4330Sstevel@tonic-gate void
4340Sstevel@tonic-gate tsd_free(ulwp_t *ulwp)
4350Sstevel@tonic-gate {
4360Sstevel@tonic-gate 	tsd_t *stsd;
4370Sstevel@tonic-gate 	ulwp_t *self = curthread;
4380Sstevel@tonic-gate 
4390Sstevel@tonic-gate 	enter_critical(self);
4400Sstevel@tonic-gate 	if ((stsd = ulwp->ul_stsd) != NULL)
4410Sstevel@tonic-gate 		lfree(stsd, stsd->tsd_nalloc * sizeof (void *));
4420Sstevel@tonic-gate 	ulwp->ul_stsd = NULL;
4430Sstevel@tonic-gate 	exit_critical(self);
4440Sstevel@tonic-gate }
445