1 /* 2 * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org> 3 * Copyright (c) 2000-2001 Jason Evans <jasone@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/lib/libpthread/thread/thr_stack.c,v 1.9 2004/10/06 08:11:07 davidxu Exp $ 28 * $DragonFly: src/lib/libthread_xu/thread/thr_stack.c,v 1.2 2005/02/21 13:47:21 davidxu Exp $ 29 */ 30 #include <sys/types.h> 31 #include <sys/mman.h> 32 #include <sys/queue.h> 33 #include <stdlib.h> 34 #include <pthread.h> 35 #include "thr_private.h" 36 37 /* Spare thread stack. */ 38 struct stack { 39 LIST_ENTRY(stack) qe; /* Stack queue linkage. */ 40 size_t stacksize; /* Stack size (rounded up). */ 41 size_t guardsize; /* Guard size. */ 42 void *stackaddr; /* Stack address. */ 43 }; 44 45 /* 46 * Default sized (stack and guard) spare stack queue. Stacks are cached 47 * to avoid additional complexity managing mmap()ed stack regions. Spare 48 * stacks are used in LIFO order to increase cache locality. 49 */ 50 static LIST_HEAD(, stack) dstackq = LIST_HEAD_INITIALIZER(dstackq); 51 52 /* 53 * Miscellaneous sized (non-default stack and/or guard) spare stack queue. 54 * Stacks are cached to avoid additional complexity managing mmap()ed 55 * stack regions. This list is unordered, since ordering on both stack 56 * size and guard size would be more trouble than it's worth. Stacks are 57 * allocated from this cache on a first size match basis. 58 */ 59 static LIST_HEAD(, stack) mstackq = LIST_HEAD_INITIALIZER(mstackq); 60 61 /** 62 * Base address of the last stack allocated (including its red zone, if 63 * there is one). Stacks are allocated contiguously, starting beyond the 64 * top of the main stack. When a new stack is created, a red zone is 65 * typically created (actually, the red zone is mapped with PROT_NONE) above 66 * the top of the stack, such that the stack will not be able to grow all 67 * the way to the bottom of the next stack. This isn't fool-proof. It is 68 * possible for a stack to grow by a large amount, such that it grows into 69 * the next stack, and as long as the memory within the red zone is never 70 * accessed, nothing will prevent one thread stack from trouncing all over 71 * the next. 72 * 73 * low memory 74 * . . . . . . . . . . . . . . . . . . 75 * | | 76 * | stack 3 | start of 3rd thread stack 77 * +-----------------------------------+ 78 * | | 79 * | Red Zone (guard page) | red zone for 2nd thread 80 * | | 81 * +-----------------------------------+ 82 * | stack 2 - _thr_stack_default | top of 2nd thread stack 83 * | | 84 * | | 85 * | | 86 * | | 87 * | stack 2 | 88 * +-----------------------------------+ <-- start of 2nd thread stack 89 * | | 90 * | Red Zone | red zone for 1st thread 91 * | | 92 * +-----------------------------------+ 93 * | stack 1 - _thr_stack_default | top of 1st thread stack 94 * | | 95 * | | 96 * | | 97 * | | 98 * | stack 1 | 99 * +-----------------------------------+ <-- start of 1st thread stack 100 * | | (initial value of last_stack) 101 * | Red Zone | 102 * | | red zone for main thread 103 * +-----------------------------------+ 104 * | USRSTACK - _thr_stack_initial | top of main thread stack 105 * | | ^ 106 * | | | 107 * | | | 108 * | | | stack growth 109 * | | 110 * +-----------------------------------+ <-- start of main thread stack 111 * (USRSTACK) 112 * high memory 113 * 114 */ 115 static void *last_stack = NULL; 116 117 /* 118 * Round size up to the nearest multiple of 119 * _thr_page_size. 120 */ 121 static inline size_t 122 round_up(size_t size) 123 { 124 if (size % _thr_page_size != 0) 125 size = ((size / _thr_page_size) + 1) * 126 _thr_page_size; 127 return size; 128 } 129 130 int 131 _thr_stack_alloc(struct pthread_attr *attr) 132 { 133 struct pthread *curthread = _get_curthread(); 134 struct stack *spare_stack; 135 size_t stacksize; 136 size_t guardsize; 137 char *stackaddr; 138 139 /* 140 * Round up stack size to nearest multiple of _thr_page_size so 141 * that mmap() * will work. If the stack size is not an even 142 * multiple, we end up initializing things such that there is 143 * unused space above the beginning of the stack, so the stack 144 * sits snugly against its guard. 145 */ 146 stacksize = round_up(attr->stacksize_attr); 147 guardsize = round_up(attr->guardsize_attr); 148 149 attr->stackaddr_attr = NULL; 150 attr->flags &= ~THR_STACK_USER; 151 152 /* 153 * Use the garbage collector lock for synchronization of the 154 * spare stack lists and allocations from usrstack. 155 */ 156 THREAD_LIST_LOCK(curthread); 157 /* 158 * If the stack and guard sizes are default, try to allocate a stack 159 * from the default-size stack cache: 160 */ 161 if ((stacksize == THR_STACK_DEFAULT) && 162 (guardsize == _thr_guard_default)) { 163 if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) { 164 /* Use the spare stack. */ 165 LIST_REMOVE(spare_stack, qe); 166 attr->stackaddr_attr = spare_stack->stackaddr; 167 } 168 } 169 /* 170 * The user specified a non-default stack and/or guard size, so try to 171 * allocate a stack from the non-default size stack cache, using the 172 * rounded up stack size (stack_size) in the search: 173 */ 174 else { 175 LIST_FOREACH(spare_stack, &mstackq, qe) { 176 if (spare_stack->stacksize == stacksize && 177 spare_stack->guardsize == guardsize) { 178 LIST_REMOVE(spare_stack, qe); 179 attr->stackaddr_attr = spare_stack->stackaddr; 180 break; 181 } 182 } 183 } 184 if (attr->stackaddr_attr != NULL) { 185 /* A cached stack was found. Release the lock. */ 186 THREAD_LIST_UNLOCK(curthread); 187 } 188 else { 189 /* Allocate a stack from usrstack. */ 190 if (last_stack == NULL) 191 last_stack = _usrstack - _thr_stack_initial - 192 _thr_guard_default; 193 194 /* Allocate a new stack. */ 195 stackaddr = last_stack - stacksize - guardsize; 196 197 /* 198 * Even if stack allocation fails, we don't want to try to 199 * use this location again, so unconditionally decrement 200 * last_stack. Under normal operating conditions, the most 201 * likely reason for an mmap() error is a stack overflow of 202 * the adjacent thread stack. 203 */ 204 last_stack -= (stacksize + guardsize); 205 206 /* Release the lock before mmap'ing it. */ 207 THREAD_LIST_UNLOCK(curthread); 208 209 /* Map the stack and guard page together, and split guard 210 page from allocated space: */ 211 if ((stackaddr = mmap(stackaddr, stacksize+guardsize, 212 PROT_READ | PROT_WRITE, MAP_STACK, 213 -1, 0)) != MAP_FAILED && 214 (guardsize == 0 || 215 mprotect(stackaddr, guardsize, PROT_NONE) == 0)) { 216 stackaddr += guardsize; 217 } else { 218 if (stackaddr != MAP_FAILED) 219 munmap(stackaddr, stacksize + guardsize); 220 stackaddr = NULL; 221 } 222 attr->stackaddr_attr = stackaddr; 223 } 224 if (attr->stackaddr_attr != NULL) 225 return (0); 226 else 227 return (-1); 228 } 229 230 /* This function must be called with _thread_list_lock held. */ 231 void 232 _thr_stack_free(struct pthread_attr *attr) 233 { 234 struct stack *spare_stack; 235 236 if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0) 237 && (attr->stackaddr_attr != NULL)) { 238 spare_stack = (attr->stackaddr_attr + attr->stacksize_attr 239 - sizeof(struct stack)); 240 spare_stack->stacksize = round_up(attr->stacksize_attr); 241 spare_stack->guardsize = round_up(attr->guardsize_attr); 242 spare_stack->stackaddr = attr->stackaddr_attr; 243 244 if (spare_stack->stacksize == THR_STACK_DEFAULT && 245 spare_stack->guardsize == _thr_guard_default) { 246 /* Default stack/guard size. */ 247 LIST_INSERT_HEAD(&dstackq, spare_stack, qe); 248 } else { 249 /* Non-default stack/guard size. */ 250 LIST_INSERT_HEAD(&mstackq, spare_stack, qe); 251 } 252 attr->stackaddr_attr = NULL; 253 } 254 } 255