xref: /dflybsd-src/lib/libthread_xu/thread/thr_stack.c (revision 940be950819fa932cd401a01f1182bf686a2e61e)
171b3fa15SDavid Xu /*
271b3fa15SDavid Xu  * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
371b3fa15SDavid Xu  * Copyright (c) 2000-2001 Jason Evans <jasone@freebsd.org>
471b3fa15SDavid Xu  * All rights reserved.
571b3fa15SDavid Xu  *
671b3fa15SDavid Xu  * Redistribution and use in source and binary forms, with or without
771b3fa15SDavid Xu  * modification, are permitted provided that the following conditions
871b3fa15SDavid Xu  * are met:
971b3fa15SDavid Xu  * 1. Redistributions of source code must retain the above copyright
1071b3fa15SDavid Xu  *    notice, this list of conditions and the following disclaimer.
1171b3fa15SDavid Xu  * 2. Redistributions in binary form must reproduce the above copyright
1271b3fa15SDavid Xu  *    notice, this list of conditions and the following disclaimer in the
1371b3fa15SDavid Xu  *    documentation and/or other materials provided with the distribution.
1471b3fa15SDavid Xu  *
1571b3fa15SDavid Xu  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
1671b3fa15SDavid Xu  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1771b3fa15SDavid Xu  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1871b3fa15SDavid Xu  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
1971b3fa15SDavid Xu  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2071b3fa15SDavid Xu  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2171b3fa15SDavid Xu  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2271b3fa15SDavid Xu  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2371b3fa15SDavid Xu  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2471b3fa15SDavid Xu  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2571b3fa15SDavid Xu  * SUCH DAMAGE.
2671b3fa15SDavid Xu  *
2771b3fa15SDavid Xu  * $FreeBSD: src/lib/libpthread/thread/thr_stack.c,v 1.9 2004/10/06 08:11:07 davidxu Exp $
2871b3fa15SDavid Xu  */
2971b3fa15SDavid Xu #include <sys/types.h>
3071b3fa15SDavid Xu #include <sys/mman.h>
3171b3fa15SDavid Xu #include <sys/queue.h>
324837705eSMatthew Dillon #include <sys/time.h>
334837705eSMatthew Dillon #include <sys/resource.h>
344837705eSMatthew Dillon #include <sys/sysctl.h>
359e2ee207SJoerg Sonnenberger #include <machine/tls.h>
364837705eSMatthew Dillon #include <machine/vmparam.h>
3771b3fa15SDavid Xu #include <stdlib.h>
3871b3fa15SDavid Xu #include <pthread.h>
3971b3fa15SDavid Xu #include "thr_private.h"
4071b3fa15SDavid Xu 
4171b3fa15SDavid Xu /* Spare thread stack. */
4271b3fa15SDavid Xu struct stack {
4371b3fa15SDavid Xu 	LIST_ENTRY(stack)	qe;		/* Stack queue linkage. */
4471b3fa15SDavid Xu 	size_t			stacksize;	/* Stack size (rounded up). */
4571b3fa15SDavid Xu 	size_t			guardsize;	/* Guard size. */
4671b3fa15SDavid Xu 	void			*stackaddr;	/* Stack address. */
4771b3fa15SDavid Xu };
4871b3fa15SDavid Xu 
4971b3fa15SDavid Xu /*
5071b3fa15SDavid Xu  * Default sized (stack and guard) spare stack queue.  Stacks are cached
5171b3fa15SDavid Xu  * to avoid additional complexity managing mmap()ed stack regions.  Spare
5271b3fa15SDavid Xu  * stacks are used in LIFO order to increase cache locality.
5371b3fa15SDavid Xu  */
5471b3fa15SDavid Xu static LIST_HEAD(, stack)	dstackq = LIST_HEAD_INITIALIZER(dstackq);
5571b3fa15SDavid Xu 
5671b3fa15SDavid Xu /*
5771b3fa15SDavid Xu  * Miscellaneous sized (non-default stack and/or guard) spare stack queue.
5871b3fa15SDavid Xu  * Stacks are cached to avoid additional complexity managing mmap()ed
5971b3fa15SDavid Xu  * stack regions.  This list is unordered, since ordering on both stack
6071b3fa15SDavid Xu  * size and guard size would be more trouble than it's worth.  Stacks are
6171b3fa15SDavid Xu  * allocated from this cache on a first size match basis.
6271b3fa15SDavid Xu  */
6371b3fa15SDavid Xu static LIST_HEAD(, stack)	mstackq = LIST_HEAD_INITIALIZER(mstackq);
6471b3fa15SDavid Xu 
654837705eSMatthew Dillon /*
664837705eSMatthew Dillon  * Thread stack base for mmap() hint, starts
674837705eSMatthew Dillon  * at _usrstack - kern.maxssiz - kern.maxthrssiz
6871b3fa15SDavid Xu  */
694837705eSMatthew Dillon static char *base_stack = NULL;
7071b3fa15SDavid Xu 
7171b3fa15SDavid Xu /*
7271b3fa15SDavid Xu  * Round size up to the nearest multiple of
7371b3fa15SDavid Xu  * _thr_page_size.
7471b3fa15SDavid Xu  */
7571b3fa15SDavid Xu static inline size_t
round_up(size_t size)7671b3fa15SDavid Xu round_up(size_t size)
7771b3fa15SDavid Xu {
7871b3fa15SDavid Xu 	if (size % _thr_page_size != 0)
7971b3fa15SDavid Xu 		size = ((size / _thr_page_size) + 1) *
8071b3fa15SDavid Xu 		    _thr_page_size;
8171b3fa15SDavid Xu 	return size;
8271b3fa15SDavid Xu }
8371b3fa15SDavid Xu 
8471b3fa15SDavid Xu int
_thr_stack_alloc(pthread_attr_t attr)85*940be950Szrj _thr_stack_alloc(pthread_attr_t attr)
8671b3fa15SDavid Xu {
87*940be950Szrj 	pthread_t curthread = tls_get_curthread();
8871b3fa15SDavid Xu 	struct stack *spare_stack;
8971b3fa15SDavid Xu 	size_t stacksize;
9071b3fa15SDavid Xu 	size_t guardsize;
9171b3fa15SDavid Xu 	char *stackaddr;
9271b3fa15SDavid Xu 
9371b3fa15SDavid Xu 	/*
9471b3fa15SDavid Xu 	 * Round up stack size to nearest multiple of _thr_page_size so
9571b3fa15SDavid Xu 	 * that mmap() * will work.  If the stack size is not an even
9671b3fa15SDavid Xu 	 * multiple, we end up initializing things such that there is
9771b3fa15SDavid Xu 	 * unused space above the beginning of the stack, so the stack
9871b3fa15SDavid Xu 	 * sits snugly against its guard.
9971b3fa15SDavid Xu 	 */
10071b3fa15SDavid Xu 	stacksize = round_up(attr->stacksize_attr);
10171b3fa15SDavid Xu 	guardsize = round_up(attr->guardsize_attr);
10271b3fa15SDavid Xu 
10371b3fa15SDavid Xu 	attr->stackaddr_attr = NULL;
10471b3fa15SDavid Xu 	attr->flags &= ~THR_STACK_USER;
10571b3fa15SDavid Xu 
10671b3fa15SDavid Xu 	/*
10771b3fa15SDavid Xu 	 * Use the garbage collector lock for synchronization of the
10871b3fa15SDavid Xu 	 * spare stack lists and allocations from usrstack.
10971b3fa15SDavid Xu 	 */
11071b3fa15SDavid Xu 	THREAD_LIST_LOCK(curthread);
11171b3fa15SDavid Xu 	/*
11271b3fa15SDavid Xu 	 * If the stack and guard sizes are default, try to allocate a stack
11371b3fa15SDavid Xu 	 * from the default-size stack cache:
11471b3fa15SDavid Xu 	 */
11571b3fa15SDavid Xu 	if ((stacksize == THR_STACK_DEFAULT) &&
11671b3fa15SDavid Xu 	    (guardsize == _thr_guard_default)) {
11771b3fa15SDavid Xu 		if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) {
11871b3fa15SDavid Xu 			/* Use the spare stack. */
11971b3fa15SDavid Xu 			LIST_REMOVE(spare_stack, qe);
12071b3fa15SDavid Xu 			attr->stackaddr_attr = spare_stack->stackaddr;
12171b3fa15SDavid Xu 		}
12271b3fa15SDavid Xu 	}
12371b3fa15SDavid Xu 	/*
12471b3fa15SDavid Xu 	 * The user specified a non-default stack and/or guard size, so try to
12571b3fa15SDavid Xu 	 * allocate a stack from the non-default size stack cache, using the
12671b3fa15SDavid Xu 	 * rounded up stack size (stack_size) in the search:
12771b3fa15SDavid Xu 	 */
12871b3fa15SDavid Xu 	else {
12971b3fa15SDavid Xu 		LIST_FOREACH(spare_stack, &mstackq, qe) {
13071b3fa15SDavid Xu 			if (spare_stack->stacksize == stacksize &&
13171b3fa15SDavid Xu 			    spare_stack->guardsize == guardsize) {
13271b3fa15SDavid Xu 				LIST_REMOVE(spare_stack, qe);
13371b3fa15SDavid Xu 				attr->stackaddr_attr = spare_stack->stackaddr;
13471b3fa15SDavid Xu 				break;
13571b3fa15SDavid Xu 			}
13671b3fa15SDavid Xu 		}
13771b3fa15SDavid Xu 	}
13871b3fa15SDavid Xu 	if (attr->stackaddr_attr != NULL) {
13971b3fa15SDavid Xu 		/* A cached stack was found.  Release the lock. */
14071b3fa15SDavid Xu 		THREAD_LIST_UNLOCK(curthread);
1414837705eSMatthew Dillon 	} else {
14271b3fa15SDavid Xu 		/*
1434837705eSMatthew Dillon 		 * Calculate base_stack on first use (race ok).
1444837705eSMatthew Dillon 		 * If base _stack
14571b3fa15SDavid Xu 		 */
1464837705eSMatthew Dillon 		if (base_stack == NULL) {
1474837705eSMatthew Dillon 			int64_t maxssiz;
1484837705eSMatthew Dillon 			int64_t maxthrssiz;
1494837705eSMatthew Dillon 			struct rlimit rl;
1504837705eSMatthew Dillon 			size_t len;
1514837705eSMatthew Dillon 
1524837705eSMatthew Dillon 			if (getrlimit(RLIMIT_STACK, &rl) == 0)
1534837705eSMatthew Dillon 				maxssiz = rl.rlim_max;
1544837705eSMatthew Dillon 			else
1554837705eSMatthew Dillon 				maxssiz = MAXSSIZ;
1564837705eSMatthew Dillon 			len = sizeof(maxssiz);
1574837705eSMatthew Dillon 			sysctlbyname("kern.maxssiz", &maxssiz, &len, NULL, 0);
1584837705eSMatthew Dillon 			len = sizeof(maxthrssiz);
1594837705eSMatthew Dillon 			if (sysctlbyname("kern.maxthrssiz",
1604837705eSMatthew Dillon 					 &maxthrssiz, &len, NULL, 0) < 0) {
1614837705eSMatthew Dillon 				maxthrssiz = MAXTHRSSIZ;
1624837705eSMatthew Dillon 			}
1634837705eSMatthew Dillon 			base_stack = _usrstack - maxssiz - maxthrssiz;
1644837705eSMatthew Dillon 		}
16571b3fa15SDavid Xu 
16671b3fa15SDavid Xu 		/* Release the lock before mmap'ing it. */
16771b3fa15SDavid Xu 		THREAD_LIST_UNLOCK(curthread);
16871b3fa15SDavid Xu 
1692035b679SMatthew Dillon 		/*
1702035b679SMatthew Dillon 		 * Map the stack and guard page together then split the
1712035b679SMatthew Dillon 		 * guard page from allocated space.
1722035b679SMatthew Dillon 		 *
1734837705eSMatthew Dillon 		 * We no longer use MAP_STACK and we define an area far
1744837705eSMatthew Dillon 		 * away from the default user stack (even though this will
1754837705eSMatthew Dillon 		 * cost us another few 4K page-table pages).  DFly no longer
1764837705eSMatthew Dillon 		 * allows new MAP_STACK mappings to be made inside ungrown
1774837705eSMatthew Dillon 		 * portions of existing mappings.
1782035b679SMatthew Dillon 		 */
1794837705eSMatthew Dillon 		stackaddr = mmap(base_stack, stacksize + guardsize,
180c809941bSMatthew Dillon 				 PROT_READ | PROT_WRITE,
1814837705eSMatthew Dillon 				 MAP_ANON | MAP_PRIVATE, -1, 0);
1822035b679SMatthew Dillon 		if (stackaddr != MAP_FAILED && guardsize) {
1832035b679SMatthew Dillon 			if (mmap(stackaddr, guardsize, 0,
1842035b679SMatthew Dillon 				 MAP_ANON | MAP_FIXED, -1, 0) == MAP_FAILED) {
18571b3fa15SDavid Xu 				munmap(stackaddr, stacksize + guardsize);
1862035b679SMatthew Dillon 				stackaddr = MAP_FAILED;
1872035b679SMatthew Dillon 			} else {
1882035b679SMatthew Dillon 				stackaddr += guardsize;
18971b3fa15SDavid Xu 			}
1902035b679SMatthew Dillon 		}
1912035b679SMatthew Dillon 		if (stackaddr == MAP_FAILED)
1922035b679SMatthew Dillon 			stackaddr = NULL;
19371b3fa15SDavid Xu 		attr->stackaddr_attr = stackaddr;
19471b3fa15SDavid Xu 	}
19571b3fa15SDavid Xu 	if (attr->stackaddr_attr != NULL)
19671b3fa15SDavid Xu 		return (0);
19771b3fa15SDavid Xu 	else
19871b3fa15SDavid Xu 		return (-1);
19971b3fa15SDavid Xu }
20071b3fa15SDavid Xu 
20171b3fa15SDavid Xu /* This function must be called with _thread_list_lock held. */
20271b3fa15SDavid Xu void
_thr_stack_free(pthread_attr_t attr)203*940be950Szrj _thr_stack_free(pthread_attr_t attr)
20471b3fa15SDavid Xu {
20571b3fa15SDavid Xu 	struct stack *spare_stack;
20671b3fa15SDavid Xu 
20771b3fa15SDavid Xu 	if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0)
20871b3fa15SDavid Xu 	    && (attr->stackaddr_attr != NULL)) {
209fc71f871SDavid Xu 		spare_stack = (struct stack *)((char *)attr->stackaddr_attr +
210fc71f871SDavid Xu 			attr->stacksize_attr - sizeof(struct stack));
21171b3fa15SDavid Xu 		spare_stack->stacksize = round_up(attr->stacksize_attr);
21271b3fa15SDavid Xu 		spare_stack->guardsize = round_up(attr->guardsize_attr);
21371b3fa15SDavid Xu 		spare_stack->stackaddr = attr->stackaddr_attr;
21471b3fa15SDavid Xu 
21571b3fa15SDavid Xu 		if (spare_stack->stacksize == THR_STACK_DEFAULT &&
21671b3fa15SDavid Xu 		    spare_stack->guardsize == _thr_guard_default) {
21771b3fa15SDavid Xu 			/* Default stack/guard size. */
21871b3fa15SDavid Xu 			LIST_INSERT_HEAD(&dstackq, spare_stack, qe);
21971b3fa15SDavid Xu 		} else {
22071b3fa15SDavid Xu 			/* Non-default stack/guard size. */
22171b3fa15SDavid Xu 			LIST_INSERT_HEAD(&mstackq, spare_stack, qe);
22271b3fa15SDavid Xu 		}
22371b3fa15SDavid Xu 		attr->stackaddr_attr = NULL;
22471b3fa15SDavid Xu 	}
22571b3fa15SDavid Xu }
2260dc5e56dSMatthew Dillon 
2270dc5e56dSMatthew Dillon void
_thr_stack_cleanup(void)2280dc5e56dSMatthew Dillon _thr_stack_cleanup(void)
2290dc5e56dSMatthew Dillon {
2300dc5e56dSMatthew Dillon 	struct stack *spare;
2310dc5e56dSMatthew Dillon 
2320dc5e56dSMatthew Dillon 	while ((spare = LIST_FIRST(&dstackq)) != NULL) {
2330dc5e56dSMatthew Dillon 		LIST_REMOVE(spare, qe);
2340dc5e56dSMatthew Dillon 		munmap(spare->stackaddr,
2350dc5e56dSMatthew Dillon 		       spare->stacksize + spare->guardsize);
2360dc5e56dSMatthew Dillon 	}
2370dc5e56dSMatthew Dillon }
238