1 /* $OpenBSD: rthread.c,v 1.33 2006/01/06 07:29:36 marc Exp $ */ 2 /* 3 * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org> 4 * All Rights Reserved. 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 /* 19 * The heart of rthreads. Basic functions like creating and joining 20 * threads. 21 */ 22 23 #include <sys/param.h> 24 #include <sys/event.h> 25 #include <sys/mman.h> 26 #include <sys/wait.h> 27 28 #include <machine/spinlock.h> 29 30 #include <stdlib.h> 31 #include <unistd.h> 32 #include <signal.h> 33 #include <stdio.h> 34 #include <string.h> 35 #include <err.h> 36 #include <errno.h> 37 38 #include <pthread.h> 39 40 #include "thread_private.h" /* in libc/include */ 41 #include "rthread.h" 42 43 static int concurrency_level; /* not used */ 44 45 int _threads_ready; 46 struct listhead _thread_list = LIST_HEAD_INITIALIZER(_thread_list); 47 _spinlock_lock_t _thread_lock = _SPINLOCK_UNLOCKED; 48 struct pthread _initial_thread; 49 50 int rfork_thread(int, void *, void (*)(void *), void *); 51 52 /* 53 * internal support functions 54 */ 55 void 56 _spinlock(_spinlock_lock_t *lock) 57 { 58 59 while (_atomic_lock(lock)) 60 pthread_yield(); 61 } 62 63 void 64 _spinunlock(_spinlock_lock_t *lock) 65 { 66 67 *lock = _SPINLOCK_UNLOCKED; 68 } 69 70 static pthread_t 71 _rthread_findself(void) 72 { 73 pthread_t me; 74 pid_t tid = getthrid(); 75 76 LIST_FOREACH(me, &_thread_list, threads) 77 if (me->tid == tid) 78 break; 79 80 return (me); 81 } 82 83 84 static void 85 _rthread_start(void *v) 86 { 87 pthread_t thread = v; 88 void *retval; 89 90 /* ensure parent returns from rfork, sets up tid */ 91 _spinlock(&_thread_lock); 92 _spinunlock(&_thread_lock); 93 retval = thread->fn(thread->arg); 94 pthread_exit(retval); 95 } 96 97 static int 98 _rthread_init(void) 99 { 100 pthread_t thread = &_initial_thread; 101 extern int __isthreaded; 102 103 thread->tid = getthrid(); 104 thread->donesem.lock = _SPINLOCK_UNLOCKED; 105 thread->flags |= THREAD_CANCEL_ENABLE|THREAD_CANCEL_DEFERRED; 106 thread->flags_lock = _SPINLOCK_UNLOCKED; 107 strlcpy(thread->name, "Main process", sizeof(thread->name)); 108 LIST_INSERT_HEAD(&_thread_list, thread, threads); 109 _rthread_kq = kqueue(); 110 if (_rthread_kq == -1) 111 return (errno); 112 _rthread_debug_init(); 113 _rthread_debug(1, "rthread init\n"); 114 _threads_ready = 1; 115 __isthreaded = 1; 116 117 return (0); 118 } 119 120 static void 121 _rthread_free(pthread_t thread) 122 { 123 /* catch wrongdoers for the moment */ 124 memset(thread, 0xd0, sizeof(*thread)); 125 if (thread != &_initial_thread) 126 free(thread); 127 } 128 129 static void 130 _rthread_setflag(pthread_t thread, int flag) 131 { 132 _spinlock(&thread->flags_lock); 133 thread->flags |= flag; 134 _spinunlock(&thread->flags_lock); 135 } 136 137 static void 138 _rthread_clearflag(pthread_t thread, int flag) 139 { 140 _spinlock(&thread->flags_lock); 141 thread->flags &= ~flag; 142 _spinunlock(&thread->flags_lock); 143 } 144 145 /* 146 * real pthread functions 147 */ 148 pthread_t 149 pthread_self(void) 150 { 151 pthread_t thread; 152 153 if (!_threads_ready) 154 if (_rthread_init()) 155 return (NULL); 156 157 _spinlock(&_thread_lock); 158 thread = _rthread_findself(); 159 _spinunlock(&_thread_lock); 160 161 return (thread); 162 } 163 164 void 165 pthread_exit(void *retval) 166 { 167 struct rthread_cleanup_fn *clfn; 168 pid_t tid; 169 struct stack *stack; 170 pthread_t thread = pthread_self(); 171 172 thread->retval = retval; 173 174 for (clfn = thread->cleanup_fns; clfn; ) { 175 struct rthread_cleanup_fn *oclfn = clfn; 176 clfn = clfn->next; 177 oclfn->fn(oclfn->arg); 178 free(oclfn); 179 } 180 _rthread_tls_destructors(thread); 181 _spinlock(&_thread_lock); 182 LIST_REMOVE(thread, threads); 183 _spinunlock(&_thread_lock); 184 185 _sem_post(&thread->donesem); 186 187 stack = thread->stack; 188 tid = thread->tid; 189 if (thread->flags & THREAD_DETACHED) 190 _rthread_free(thread); 191 else 192 _rthread_setflag(thread, THREAD_DONE); 193 194 if (tid != _initial_thread.tid) 195 _rthread_add_to_reaper(tid, stack); 196 197 _rthread_reaper(); 198 threxit(0); 199 for(;;); 200 } 201 202 int 203 pthread_join(pthread_t thread, void **retval) 204 { 205 int e; 206 207 if (thread->tid == getthrid()) 208 e = EDEADLK; 209 else if (thread->flags & THREAD_DETACHED) 210 e = EINVAL; 211 else { 212 _sem_wait(&thread->donesem, 0, 0); 213 if (retval) 214 *retval = thread->retval; 215 e = 0; 216 /* We should be the last having a ref to this thread, but 217 * someone stupid or evil might haved detached it; 218 * in that case the thread will cleanup itself */ 219 if ((thread->flags & THREAD_DETACHED) == 0) 220 _rthread_free(thread); 221 } 222 223 _rthread_reaper(); 224 return (e); 225 } 226 227 int 228 pthread_detach(pthread_t thread) 229 { 230 int rc = 0; 231 232 _spinlock(&thread->flags_lock); 233 if (thread->flags & THREAD_DETACHED) { 234 rc = EINVAL; 235 _spinunlock(&thread->flags_lock); 236 } else if (thread->flags & THREAD_DONE) { 237 _spinunlock(&thread->flags_lock); 238 _rthread_free(thread); 239 } else { 240 thread->flags |= THREAD_DETACHED; 241 _spinunlock(&thread->flags_lock); 242 } 243 _rthread_reaper(); 244 return (rc); 245 } 246 247 int 248 pthread_create(pthread_t *threadp, const pthread_attr_t *attr, 249 void *(*start_routine)(void *), void *arg) 250 { 251 pthread_t thread; 252 pid_t tid; 253 int rc = 0; 254 255 if (!_threads_ready) 256 if ((rc = _rthread_init())) 257 return (rc); 258 259 thread = malloc(sizeof(*thread)); 260 if (!thread) 261 return (errno); 262 memset(thread, 0, sizeof(*thread)); 263 thread->donesem.lock = _SPINLOCK_UNLOCKED; 264 thread->flags_lock = _SPINLOCK_UNLOCKED; 265 thread->fn = start_routine; 266 thread->arg = arg; 267 if (attr) 268 thread->attr = *(*attr); 269 else { 270 thread->attr.stack_size = RTHREAD_STACK_SIZE_DEF; 271 thread->attr.guard_size = sysconf(_SC_PAGESIZE); 272 thread->attr.stack_size -= thread->attr.guard_size; 273 } 274 if (thread->attr.detach_state == PTHREAD_CREATE_DETACHED) 275 thread->flags |= THREAD_DETACHED; 276 277 _spinlock(&_thread_lock); 278 279 thread->stack = _rthread_alloc_stack(thread); 280 if (!thread->stack) { 281 rc = errno; 282 goto fail1; 283 } 284 LIST_INSERT_HEAD(&_thread_list, thread, threads); 285 286 tid = rfork_thread(RFPROC | RFTHREAD | RFMEM | RFNOWAIT, 287 thread->stack->sp, _rthread_start, thread); 288 if (tid == -1) { 289 rc = errno; 290 goto fail2; 291 } 292 /* new thread will appear _rthread_start */ 293 thread->tid = tid; 294 thread->flags |= THREAD_CANCEL_ENABLE|THREAD_CANCEL_DEFERRED; 295 *threadp = thread; 296 297 /* 298 * Since _rthread_start() aquires the thread lock and due to the way 299 * signal delivery is implemented, this is not a race. 300 */ 301 if (thread->attr.create_suspended) 302 kill(thread->tid, SIGSTOP); 303 304 _spinunlock(&_thread_lock); 305 306 return (0); 307 308 fail2: 309 _rthread_free_stack(thread->stack); 310 LIST_REMOVE(thread, threads); 311 fail1: 312 _spinunlock(&_thread_lock); 313 _rthread_free(thread); 314 315 return (rc); 316 } 317 318 int 319 pthread_kill(pthread_t thread, int sig) 320 { 321 return (kill(thread->tid, sig)); 322 } 323 324 int 325 pthread_equal(pthread_t t1, pthread_t t2) 326 { 327 return (t1 == t2); 328 } 329 330 int 331 pthread_cancel(pthread_t thread) 332 { 333 334 _rthread_setflag(thread, THREAD_CANCELLED); 335 return (0); 336 } 337 338 void 339 pthread_testcancel(void) 340 { 341 if ((pthread_self()->flags & (THREAD_CANCELLED|THREAD_CANCEL_ENABLE)) == 342 (THREAD_CANCELLED|THREAD_CANCEL_ENABLE)) 343 pthread_exit(PTHREAD_CANCELED); 344 345 } 346 347 int 348 pthread_setcancelstate(int state, int *oldstatep) 349 { 350 pthread_t self = pthread_self(); 351 int oldstate; 352 353 oldstate = self->flags & THREAD_CANCEL_ENABLE ? 354 PTHREAD_CANCEL_ENABLE : PTHREAD_CANCEL_DISABLE; 355 if (state == PTHREAD_CANCEL_ENABLE) { 356 _rthread_setflag(self, THREAD_CANCEL_ENABLE); 357 pthread_testcancel(); 358 } else if (state == PTHREAD_CANCEL_DISABLE) { 359 _rthread_clearflag(self, THREAD_CANCEL_ENABLE); 360 } else { 361 return (EINVAL); 362 } 363 if (oldstatep) 364 *oldstatep = oldstate; 365 366 return (0); 367 } 368 369 int 370 pthread_setcanceltype(int type, int *oldtypep) 371 { 372 pthread_t self = pthread_self(); 373 int oldtype; 374 375 oldtype = self->flags & THREAD_CANCEL_DEFERRED ? 376 PTHREAD_CANCEL_DEFERRED : PTHREAD_CANCEL_ASYNCHRONOUS; 377 if (type == PTHREAD_CANCEL_DEFERRED) { 378 _rthread_setflag(self, THREAD_CANCEL_DEFERRED); 379 pthread_testcancel(); 380 } else if (type == PTHREAD_CANCEL_ASYNCHRONOUS) { 381 _rthread_clearflag(self, THREAD_CANCEL_DEFERRED); 382 } else { 383 return (EINVAL); 384 } 385 if (oldtypep) 386 *oldtypep = oldtype; 387 388 return (0); 389 } 390 391 void 392 pthread_cleanup_push(void (*fn)(void *), void *arg) 393 { 394 struct rthread_cleanup_fn *clfn; 395 pthread_t self = pthread_self(); 396 397 clfn = malloc(sizeof(*clfn)); 398 if (!clfn) 399 return; 400 memset(clfn, 0, sizeof(*clfn)); 401 clfn->fn = fn; 402 clfn->arg = arg; 403 clfn->next = self->cleanup_fns; 404 self->cleanup_fns = clfn; 405 } 406 407 void 408 pthread_cleanup_pop(int execute) 409 { 410 struct rthread_cleanup_fn *clfn; 411 pthread_t self = pthread_self(); 412 413 clfn = self->cleanup_fns; 414 if (clfn) { 415 self->cleanup_fns = clfn->next; 416 if (execute) 417 clfn->fn(clfn->arg); 418 free(clfn); 419 } 420 } 421 422 int 423 pthread_getconcurrency(void) 424 { 425 return (concurrency_level); 426 } 427 428 int 429 pthread_setconcurrency(int new_level) 430 { 431 if (new_level < 0) 432 return (EINVAL); 433 concurrency_level = new_level; 434 return (0); 435 } 436 437 /* 438 * compat debug stuff 439 */ 440 void 441 _thread_dump_info(void) 442 { 443 pthread_t thread; 444 445 _spinlock(&_thread_lock); 446 LIST_FOREACH(thread, &_thread_list, threads) 447 printf("thread %d flags %d name %s\n", 448 thread->tid, thread->flags, thread->name); 449 _spinunlock(&_thread_lock); 450 } 451