xref: /openbsd-src/lib/librthread/rthread_stack.c (revision 4c1e55dc91edd6e69ccc60ce855900fbc12cf34f)
1 /* $OpenBSD: rthread_stack.c,v 1.7 2012/02/19 04:54:40 guenther Exp $ */
2 /* $snafu: rthread_stack.c,v 1.12 2005/01/11 02:45:28 marc Exp $ */
3 
4 /* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */
5 
6 #include <sys/types.h>
7 #include <sys/mman.h>
8 
9 #include <machine/param.h>
10 
11 #include <errno.h>
12 #include <pthread.h>
13 #include <stdint.h>
14 #include <stdlib.h>
15 #include <unistd.h>
16 
17 #include "rthread.h"
18 
19 /*
20  * Follow uthread's example and keep around stacks that have default
21  * attributes for possible reuse.
22  */
23 static SLIST_HEAD(, stack) def_stacks = SLIST_HEAD_INITIALIZER(head);
24 static _spinlock_lock_t def_stacks_lock = _SPINLOCK_UNLOCKED;
25 
26 struct stack *
27 _rthread_alloc_stack(pthread_t thread)
28 {
29 	struct stack *stack;
30 	caddr_t base;
31 	caddr_t guard;
32 	size_t size;
33 	size_t guardsize;
34 
35 	/* if the request uses the defaults, try to reuse one */
36 	if (thread->attr.stack_addr != NULL &&
37 	    thread->attr.stack_size == RTHREAD_STACK_SIZE_DEF &&
38 	    thread->attr.guard_size == _rthread_attr_default.guard_size) {
39 		_spinlock(&def_stacks_lock);
40 		stack = SLIST_FIRST(&def_stacks);
41 		if (stack != NULL)
42 		_spinunlock(&def_stacks_lock);
43 		if (stack != NULL)
44 			return (stack);
45 	}
46 
47 	/* allocate the stack struct that we'll return */
48 	stack = malloc(sizeof(*stack));
49 	if (stack == NULL)
50 		return (NULL);
51 
52 	/* If a stack address was provided, just fill in the details */
53 	if (thread->attr.stack_addr != NULL) {
54 		stack->base = base = thread->attr.stack_addr;
55 		stack->len  = thread->attr.stack_size;
56 #ifdef MACHINE_STACK_GROWS_UP
57 		stack->sp = base;
58 #else
59 		stack->sp = base + thread->attr.stack_size;
60 #endif
61 		/*
62 		 * This impossible guardsize marks this stack as
63 		 * application allocated so it won't be freed or
64 		 * cached by _rthread_free_stack()
65 		 */
66 		stack->guardsize = 1;
67 		return (stack);
68 	}
69 
70 	/* round up the requested sizes up to full pages */
71 	size = ROUND_TO_PAGE(thread->attr.stack_size);
72 	guardsize = ROUND_TO_PAGE(thread->attr.guard_size);
73 
74 	/* check for overflow */
75 	if (size < thread->attr.stack_size ||
76 	    guardsize < thread->attr.guard_size ||
77 	    SIZE_MAX - size < guardsize) {
78 		free(stack);
79 		errno = EINVAL;
80 		return (NULL);
81 	}
82 	size += guardsize;
83 
84 	/* actually allocate the real stack */
85 	base = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_ANON, -1, 0);
86 	if (base == MAP_FAILED) {
87 		free(stack);
88 		return (NULL);
89 	}
90 
91 #ifdef MACHINE_STACK_GROWS_UP
92 	guard = base + size - guardsize;
93 	stack->sp = base;
94 #else
95 	guard = base;
96 	stack->sp = base + size;
97 #endif
98 
99 	/* memory protect the guard region */
100 	if (guardsize != 0 && mprotect(guard, guardsize, PROT_NONE) == -1) {
101 		munmap(base, size);
102 		free(stack);
103 		return (NULL);
104 	}
105 
106 	stack->base = base;
107 	stack->guardsize = guardsize;
108 	stack->len = size;
109 	return (stack);
110 }
111 
112 void
113 _rthread_free_stack(struct stack *stack)
114 {
115 	if (stack->len == RTHREAD_STACK_SIZE_DEF &&
116 	    stack->guardsize == _rthread_attr_default.guard_size) {
117 		_spinlock(&def_stacks_lock);
118 		SLIST_INSERT_HEAD(&def_stacks, stack, link);
119 		_spinunlock(&def_stacks_lock);
120 	} else {
121 		/* unmap the storage unless it was application allocated */
122 		if (stack->guardsize != 1)
123 			munmap(stack->base, stack->len);
124 		free(stack);
125 	}
126 }
127 
128