xref: /openbsd-src/lib/librthread/rthread_stack.c (revision ac9b4aacc1da35008afea06a5d23c2f2dea9b93e)
1 /* $OpenBSD: rthread_stack.c,v 1.8 2012/08/04 21:56:51 guenther Exp $ */
2 /* $snafu: rthread_stack.c,v 1.12 2005/01/11 02:45:28 marc Exp $ */
3 
4 /* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */
5 
6 #include <sys/types.h>
7 #include <sys/mman.h>
8 
9 #include <machine/param.h>
10 
11 #include <errno.h>
12 #include <pthread.h>
13 #include <stdint.h>
14 #include <stdlib.h>
15 #include <unistd.h>
16 
17 #include "rthread.h"
18 
19 /*
20  * Follow uthread's example and keep around stacks that have default
21  * attributes for possible reuse.
22  */
23 static SLIST_HEAD(, stack) def_stacks = SLIST_HEAD_INITIALIZER(head);
24 static _spinlock_lock_t def_stacks_lock = _SPINLOCK_UNLOCKED;
25 
26 struct stack *
27 _rthread_alloc_stack(pthread_t thread)
28 {
29 	struct stack *stack;
30 	caddr_t base;
31 	caddr_t guard;
32 	size_t size;
33 	size_t guardsize;
34 
35 	/* if the request uses the defaults, try to reuse one */
36 	if (thread->attr.stack_addr == NULL &&
37 	    thread->attr.stack_size == RTHREAD_STACK_SIZE_DEF &&
38 	    thread->attr.guard_size == _rthread_attr_default.guard_size) {
39 		_spinlock(&def_stacks_lock);
40 		stack = SLIST_FIRST(&def_stacks);
41 		if (stack != NULL) {
42 			SLIST_REMOVE_HEAD(&def_stacks, link);
43 			_spinunlock(&def_stacks_lock);
44 			return (stack);
45 		}
46 		_spinunlock(&def_stacks_lock);
47 	}
48 
49 	/* allocate the stack struct that we'll return */
50 	stack = malloc(sizeof(*stack));
51 	if (stack == NULL)
52 		return (NULL);
53 
54 	/* If a stack address was provided, just fill in the details */
55 	if (thread->attr.stack_addr != NULL) {
56 		stack->base = base = thread->attr.stack_addr;
57 		stack->len  = thread->attr.stack_size;
58 #ifdef MACHINE_STACK_GROWS_UP
59 		stack->sp = base;
60 #else
61 		stack->sp = base + thread->attr.stack_size;
62 #endif
63 		/*
64 		 * This impossible guardsize marks this stack as
65 		 * application allocated so it won't be freed or
66 		 * cached by _rthread_free_stack()
67 		 */
68 		stack->guardsize = 1;
69 		return (stack);
70 	}
71 
72 	/* round up the requested sizes up to full pages */
73 	size = ROUND_TO_PAGE(thread->attr.stack_size);
74 	guardsize = ROUND_TO_PAGE(thread->attr.guard_size);
75 
76 	/* check for overflow */
77 	if (size < thread->attr.stack_size ||
78 	    guardsize < thread->attr.guard_size ||
79 	    SIZE_MAX - size < guardsize) {
80 		free(stack);
81 		errno = EINVAL;
82 		return (NULL);
83 	}
84 	size += guardsize;
85 
86 	/* actually allocate the real stack */
87 	base = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_ANON, -1, 0);
88 	if (base == MAP_FAILED) {
89 		free(stack);
90 		return (NULL);
91 	}
92 
93 #ifdef MACHINE_STACK_GROWS_UP
94 	guard = base + size - guardsize;
95 	stack->sp = base;
96 #else
97 	guard = base;
98 	stack->sp = base + size;
99 #endif
100 
101 	/* memory protect the guard region */
102 	if (guardsize != 0 && mprotect(guard, guardsize, PROT_NONE) == -1) {
103 		munmap(base, size);
104 		free(stack);
105 		return (NULL);
106 	}
107 
108 	stack->base = base;
109 	stack->guardsize = guardsize;
110 	stack->len = size;
111 	return (stack);
112 }
113 
114 void
115 _rthread_free_stack(struct stack *stack)
116 {
117 	if (stack->len == RTHREAD_STACK_SIZE_DEF + stack->guardsize &&
118 	    stack->guardsize == _rthread_attr_default.guard_size) {
119 		_spinlock(&def_stacks_lock);
120 		SLIST_INSERT_HEAD(&def_stacks, stack, link);
121 		_spinunlock(&def_stacks_lock);
122 	} else {
123 		/* unmap the storage unless it was application allocated */
124 		if (stack->guardsize != 1)
125 			munmap(stack->base, stack->len);
126 		free(stack);
127 	}
128 }
129 
130