xref: /openbsd-src/lib/librthread/rthread_stack.c (revision 91f110e064cd7c194e59e019b83bb7496c1c84d4)
1 /* $OpenBSD: rthread_stack.c,v 1.11 2013/12/18 16:42:08 deraadt Exp $ */
2 /* $snafu: rthread_stack.c,v 1.12 2005/01/11 02:45:28 marc Exp $ */
3 
4 /* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */
5 
6 #include <sys/param.h>
7 #include <sys/mman.h>
8 
9 #include <errno.h>
10 #include <pthread.h>
11 #include <stdint.h>
12 #include <stdlib.h>
13 #include <unistd.h>
14 
15 #include "rthread.h"
16 
17 /*
18  * Follow uthread's example and keep around stacks that have default
19  * attributes for possible reuse.
20  */
21 static SLIST_HEAD(, stack) def_stacks = SLIST_HEAD_INITIALIZER(head);
22 static struct _spinlock def_stacks_lock = _SPINLOCK_UNLOCKED;
23 
24 struct stack *
25 _rthread_alloc_stack(pthread_t thread)
26 {
27 	struct stack *stack;
28 	u_int32_t rnd;
29 	caddr_t base;
30 	caddr_t guard;
31 	size_t size;
32 	size_t guardsize;
33 
34 	/* if the request uses the defaults, try to reuse one */
35 	if (thread->attr.stack_addr == NULL &&
36 	    thread->attr.stack_size == RTHREAD_STACK_SIZE_DEF &&
37 	    thread->attr.guard_size == _rthread_attr_default.guard_size) {
38 		_spinlock(&def_stacks_lock);
39 		stack = SLIST_FIRST(&def_stacks);
40 		if (stack != NULL) {
41 			SLIST_REMOVE_HEAD(&def_stacks, link);
42 			_spinunlock(&def_stacks_lock);
43 			return (stack);
44 		}
45 		_spinunlock(&def_stacks_lock);
46 	}
47 
48 	/* allocate the stack struct that we'll return */
49 	stack = malloc(sizeof(*stack));
50 	if (stack == NULL)
51 		return (NULL);
52 
53 	/* Smaller the stack, smaller the random bias */
54 	if (thread->attr.stack_size > _thread_pagesize)
55 		rnd = arc4random() & (_thread_pagesize - 1);
56 	else if (thread->attr.stack_size == _thread_pagesize)
57 		rnd = arc4random() & (_thread_pagesize / 16 - 1);
58 	else
59 		rnd = 0;
60 	rnd &= ~_STACKALIGNBYTES;
61 
62 	/* If a stack address was provided, just fill in the details */
63 	if (thread->attr.stack_addr != NULL) {
64 		stack->base = base = thread->attr.stack_addr;
65 		stack->len  = thread->attr.stack_size;
66 #ifdef MACHINE_STACK_GROWS_UP
67 		stack->sp = base + rnd;
68 #else
69 		stack->sp = base + thread->attr.stack_size - rnd;
70 #endif
71 		/*
72 		 * This impossible guardsize marks this stack as
73 		 * application allocated so it won't be freed or
74 		 * cached by _rthread_free_stack()
75 		 */
76 		stack->guardsize = 1;
77 		return (stack);
78 	}
79 
80 	/* round up the requested sizes up to full pages */
81 	size = ROUND_TO_PAGE(thread->attr.stack_size);
82 	guardsize = ROUND_TO_PAGE(thread->attr.guard_size);
83 
84 	/* check for overflow */
85 	if (size < thread->attr.stack_size ||
86 	    guardsize < thread->attr.guard_size ||
87 	    SIZE_MAX - size < guardsize) {
88 		free(stack);
89 		errno = EINVAL;
90 		return (NULL);
91 	}
92 	size += guardsize;
93 
94 	/* actually allocate the real stack */
95 	base = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_ANON, -1, 0);
96 	if (base == MAP_FAILED) {
97 		free(stack);
98 		return (NULL);
99 	}
100 
101 #ifdef MACHINE_STACK_GROWS_UP
102 	guard = base + size - guardsize;
103 	stack->sp = base + rnd;
104 #else
105 	guard = base;
106 	stack->sp = base + size - rnd;
107 #endif
108 
109 	/* memory protect the guard region */
110 	if (guardsize != 0 && mprotect(guard, guardsize, PROT_NONE) == -1) {
111 		munmap(base, size);
112 		free(stack);
113 		return (NULL);
114 	}
115 
116 	stack->base = base;
117 	stack->guardsize = guardsize;
118 	stack->len = size;
119 	return (stack);
120 }
121 
122 void
123 _rthread_free_stack(struct stack *stack)
124 {
125 	if (stack->len == RTHREAD_STACK_SIZE_DEF + stack->guardsize &&
126 	    stack->guardsize == _rthread_attr_default.guard_size) {
127 		_spinlock(&def_stacks_lock);
128 		SLIST_INSERT_HEAD(&def_stacks, stack, link);
129 		_spinunlock(&def_stacks_lock);
130 	} else {
131 		/* unmap the storage unless it was application allocated */
132 		if (stack->guardsize != 1)
133 			munmap(stack->base, stack->len);
134 		free(stack);
135 	}
136 }
137 
138