xref: /openbsd-src/lib/librthread/rthread_stack.c (revision 0b7734b3d77bb9b21afec6f4621cae6c805dbd45)
1 /* $OpenBSD: rthread_stack.c,v 1.14 2015/01/24 10:35:33 kettenis Exp $ */
2 /* $snafu: rthread_stack.c,v 1.12 2005/01/11 02:45:28 marc Exp $ */
3 
4 /* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */
5 
6 #include <sys/param.h>
7 #include <sys/mman.h>
8 
9 #include <errno.h>
10 #include <pthread.h>
11 #include <stdint.h>
12 #include <stdlib.h>
13 #include <unistd.h>
14 
15 #include "rthread.h"
16 
17 /*
18  * Follow uthread's example and keep around stacks that have default
19  * attributes for possible reuse.
20  */
21 static SLIST_HEAD(, stack) def_stacks = SLIST_HEAD_INITIALIZER(head);
22 static struct _spinlock def_stacks_lock = _SPINLOCK_UNLOCKED;
23 
24 struct stack *
25 _rthread_alloc_stack(pthread_t thread)
26 {
27 	struct stack *stack;
28 	u_int32_t rnd;
29 	caddr_t base;
30 	caddr_t guard;
31 	size_t size;
32 	size_t guardsize;
33 
34 	/* if the request uses the defaults, try to reuse one */
35 	if (thread->attr.stack_addr == NULL &&
36 	    thread->attr.stack_size == RTHREAD_STACK_SIZE_DEF &&
37 	    thread->attr.guard_size == _rthread_attr_default.guard_size) {
38 		_spinlock(&def_stacks_lock);
39 		stack = SLIST_FIRST(&def_stacks);
40 		if (stack != NULL) {
41 			SLIST_REMOVE_HEAD(&def_stacks, link);
42 			_spinunlock(&def_stacks_lock);
43 			return (stack);
44 		}
45 		_spinunlock(&def_stacks_lock);
46 	}
47 
48 	/* allocate the stack struct that we'll return */
49 	stack = malloc(sizeof(*stack));
50 	if (stack == NULL)
51 		return (NULL);
52 
53 	/* Smaller the stack, smaller the random bias */
54 	if (thread->attr.stack_size > _thread_pagesize)
55 		rnd = arc4random() & (_thread_pagesize - 1);
56 	else if (thread->attr.stack_size == _thread_pagesize)
57 		rnd = arc4random() & (_thread_pagesize / 16 - 1);
58 	else
59 		rnd = 0;
60 	rnd &= ~_STACKALIGNBYTES;
61 
62 	/* If a stack address was provided, just fill in the details */
63 	if (thread->attr.stack_addr != NULL) {
64 		stack->base = base = thread->attr.stack_addr;
65 		stack->len  = thread->attr.stack_size;
66 #ifdef MACHINE_STACK_GROWS_UP
67 		stack->sp = base + rnd;
68 #else
69 		stack->sp = base + thread->attr.stack_size - rnd;
70 #endif
71 		/*
72 		 * This impossible guardsize marks this stack as
73 		 * application allocated so it won't be freed or
74 		 * cached by _rthread_free_stack()
75 		 */
76 		stack->guardsize = 1;
77 		return (stack);
78 	}
79 
80 	/* round up the requested sizes up to full pages */
81 	size = ROUND_TO_PAGE(thread->attr.stack_size);
82 	guardsize = ROUND_TO_PAGE(thread->attr.guard_size);
83 
84 	/* check for overflow */
85 	if (size < thread->attr.stack_size ||
86 	    guardsize < thread->attr.guard_size ||
87 	    SIZE_MAX - size < guardsize) {
88 		free(stack);
89 		errno = EINVAL;
90 		return (NULL);
91 	}
92 	size += guardsize;
93 
94 	/* actually allocate the real stack */
95 	base = mmap(NULL, size, PROT_READ | PROT_WRITE,
96 	    MAP_PRIVATE | MAP_ANON, -1, 0);
97 	if (base == MAP_FAILED) {
98 		free(stack);
99 		return (NULL);
100 	}
101 
102 #ifdef MACHINE_STACK_GROWS_UP
103 	guard = base + size - guardsize;
104 	stack->sp = base + rnd;
105 #else
106 	guard = base;
107 	stack->sp = base + size - rnd;
108 #endif
109 
110 	/* memory protect the guard region */
111 	if (guardsize != 0 && mprotect(guard, guardsize, PROT_NONE) == -1) {
112 		munmap(base, size);
113 		free(stack);
114 		return (NULL);
115 	}
116 
117 	stack->base = base;
118 	stack->guardsize = guardsize;
119 	stack->len = size;
120 	return (stack);
121 }
122 
123 void
124 _rthread_free_stack(struct stack *stack)
125 {
126 	if (stack->len == RTHREAD_STACK_SIZE_DEF + stack->guardsize &&
127 	    stack->guardsize == _rthread_attr_default.guard_size) {
128 		_spinlock(&def_stacks_lock);
129 		SLIST_INSERT_HEAD(&def_stacks, stack, link);
130 		_spinunlock(&def_stacks_lock);
131 	} else {
132 		/* unmap the storage unless it was application allocated */
133 		if (stack->guardsize != 1)
134 			munmap(stack->base, stack->len);
135 		free(stack);
136 	}
137 }
138 
139