xref: /openbsd-src/lib/librthread/rthread.c (revision aa5e9e10509ffd51558f081f01cd78bfa3c4f2a5)
1 /*	$OpenBSD: rthread.c,v 1.71 2013/06/01 23:06:26 tedu Exp $ */
2 /*
3  * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
4  * All Rights Reserved.
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*
19  * The heart of rthreads.  Basic functions like creating and joining
20  * threads.
21  */
22 
23 #include <sys/types.h>
24 #include <sys/uio.h>
25 #include <sys/wait.h>
26 #include <sys/socket.h>
27 #include <sys/mman.h>
28 #include <sys/msg.h>
29 #if defined(__ELF__)
30 #include <sys/exec_elf.h>
31 #pragma weak _DYNAMIC
32 #endif
33 
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <signal.h>
37 #include <stdio.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <dlfcn.h>
41 #include <fcntl.h>
42 #include <poll.h>
43 
44 #include <pthread.h>
45 
46 #include "thread_private.h"	/* in libc/include */
47 #include "rthread.h"
48 #include "tcb.h"
49 
50 static int concurrency_level;	/* not used */
51 
52 struct _spinlock _SPINLOCK_UNLOCKED_ASSIGN = _SPINLOCK_UNLOCKED;
53 
54 int _threads_ready;
55 size_t _thread_pagesize;
56 struct listhead _thread_list = LIST_HEAD_INITIALIZER(_thread_list);
57 struct _spinlock _thread_lock = _SPINLOCK_UNLOCKED;
58 static struct pthread_queue _thread_gc_list
59     = TAILQ_HEAD_INITIALIZER(_thread_gc_list);
60 static struct _spinlock _thread_gc_lock = _SPINLOCK_UNLOCKED;
61 struct pthread _initial_thread;
62 struct thread_control_block _initial_thread_tcb;
63 
64 struct pthread_attr _rthread_attr_default = {
65 #ifndef lint
66 	.stack_addr			= NULL,
67 	.stack_size			= RTHREAD_STACK_SIZE_DEF,
68 /*	.guard_size		set in _rthread_init */
69 	.detach_state			= PTHREAD_CREATE_JOINABLE,
70 	.contention_scope		= PTHREAD_SCOPE_SYSTEM,
71 	.sched_policy			= SCHED_OTHER,
72 	.sched_param = { .sched_priority = 0 },
73 	.sched_inherit			= PTHREAD_INHERIT_SCHED,
74 #else
75 	0
76 #endif
77 };
78 
79 /*
80  * internal support functions
81  */
82 void
83 _spinlock(volatile struct _spinlock *lock)
84 {
85 	while (_atomic_lock(&lock->ticket))
86 		sched_yield();
87 }
88 
89 int
90 _spinlocktry(volatile struct _spinlock *lock)
91 {
92 	return 0 == _atomic_lock(&lock->ticket);
93 }
94 
95 void
96 _spinunlock(volatile struct _spinlock *lock)
97 {
98 	lock->ticket = _ATOMIC_LOCK_UNLOCKED;
99 }
100 
101 /*
102  * This sets up the thread base for the initial thread so that it
103  * references the errno location provided by libc.  For other threads
104  * this is handled by __tfork_thread()
105  */
106 void _rthread_initlib(void) __attribute__((constructor));
107 void _rthread_initlib(void)
108 {
109 	struct thread_control_block *tcb = &_initial_thread_tcb;
110 
111 	/* use libc's errno for the main thread */
112 	TCB_INIT(tcb, &_initial_thread, ___errno());
113 	TCB_SET(tcb);
114 }
115 
116 int *
117 __errno(void)
118 {
119 	return (TCB_ERRNOPTR());
120 }
121 
122 static void
123 _rthread_start(void *v)
124 {
125 	pthread_t thread = v;
126 	void *retval;
127 
128 	retval = thread->fn(thread->arg);
129 	pthread_exit(retval);
130 }
131 
132 /* ARGSUSED0 */
133 static void
134 sigthr_handler(__unused int sig)
135 {
136 	pthread_t self = pthread_self();
137 
138 	/*
139 	 * Do nothing unless
140 	 * 1) pthread_cancel() has been called on this thread,
141 	 * 2) cancelation is enabled for it, and
142 	 * 3) we're not already in cancelation processing
143 	 */
144 	if ((self->flags & (THREAD_CANCELED|THREAD_CANCEL_ENABLE|THREAD_DYING))
145 	    != (THREAD_CANCELED|THREAD_CANCEL_ENABLE))
146 		return;
147 
148 	/*
149 	 * If delaying cancels inside complex ops (pthread_cond_wait,
150 	 * pthread_join, etc), just mark that this has happened to
151 	 * prevent a race with going to sleep
152 	 */
153 	if (self->flags & THREAD_CANCEL_DELAY) {
154 		self->delayed_cancel = 1;
155 		return;
156 	}
157 
158 	/*
159 	 * otherwise, if in a cancel point or async cancels are
160 	 * enabled, then exit
161 	 */
162 	if (self->cancel_point || (self->flags & THREAD_CANCEL_DEFERRED) == 0)
163 		pthread_exit(PTHREAD_CANCELED);
164 }
165 
166 int
167 _rthread_init(void)
168 {
169 	pthread_t thread = &_initial_thread;
170 	struct sigaction sa;
171 
172 	thread->tid = getthrid();
173 	thread->donesem.lock = _SPINLOCK_UNLOCKED_ASSIGN;
174 	thread->flags |= THREAD_CANCEL_ENABLE|THREAD_CANCEL_DEFERRED;
175 	thread->flags_lock = _SPINLOCK_UNLOCKED_ASSIGN;
176 	strlcpy(thread->name, "Main process", sizeof(thread->name));
177 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
178 	_rthread_debug_init();
179 
180 	_thread_pagesize = (size_t)sysconf(_SC_PAGESIZE);
181 	_rthread_attr_default.guard_size = _thread_pagesize;
182 
183 	_threads_ready = 1;
184 
185 	_rthread_debug(1, "rthread init\n");
186 
187 #if defined(__ELF__)
188 	if (_DYNAMIC) {
189 		/*
190 		 * To avoid recursion problems in ld.so, we need to trigger the
191 		 * functions once to fully bind them before registering them
192 		 * for use.
193 		 */
194 		_rthread_dl_lock(0);
195 		_rthread_dl_lock(1);
196 		_rthread_bind_lock(0);
197 		_rthread_bind_lock(1);
198 		sched_yield();
199 		dlctl(NULL, DL_SETTHREADLCK, _rthread_dl_lock);
200 		dlctl(NULL, DL_SETBINDLCK, _rthread_bind_lock);
201 	}
202 #endif
203 
204 	/*
205 	 * Set the handler on the signal used for cancelation and
206 	 * suspension, and make sure it's unblocked
207 	 */
208 	memset(&sa, 0, sizeof(sa));
209 	sigemptyset(&sa.sa_mask);
210 	sa.sa_flags = SA_RESTART;
211 	sa.sa_handler = sigthr_handler;
212 	_thread_sys_sigaction(SIGTHR, &sa, NULL);
213 	sigaddset(&sa.sa_mask, SIGTHR);
214 	sigprocmask(SIG_UNBLOCK, &sa.sa_mask, NULL);
215 
216 	return (0);
217 }
218 
219 static void
220 _rthread_free(pthread_t thread)
221 {
222 	/* catch wrongdoers for the moment */
223 	/* initial_thread.tid must remain valid */
224 	if (thread != &_initial_thread) {
225 		struct stack *stack = thread->stack;
226 		pid_t tid = thread->tid;
227 		void *arg = thread->arg;
228 
229 		/* catch wrongdoers for the moment */
230 		memset(thread, 0xd0, sizeof(*thread));
231 		thread->stack = stack;
232 		thread->tid = tid;
233 		thread->arg = arg;
234 		_spinlock(&_thread_gc_lock);
235 		TAILQ_INSERT_TAIL(&_thread_gc_list, thread, waiting);
236 		_spinunlock(&_thread_gc_lock);
237 	}
238 }
239 
240 void
241 _rthread_setflag(pthread_t thread, int flag)
242 {
243 	_spinlock(&thread->flags_lock);
244 	thread->flags |= flag;
245 	_spinunlock(&thread->flags_lock);
246 }
247 
248 void
249 _rthread_clearflag(pthread_t thread, int flag)
250 {
251 	_spinlock(&thread->flags_lock);
252 	thread->flags &= ~flag;
253 	_spinunlock(&thread->flags_lock);
254 }
255 
256 /*
257  * real pthread functions
258  */
259 pthread_t
260 pthread_self(void)
261 {
262 	if (!_threads_ready)
263 		if (_rthread_init())
264 			return (NULL);
265 
266 	return (TCB_THREAD());
267 }
268 
269 static void
270 _rthread_reaper(void)
271 {
272 	pthread_t thread;
273 
274 restart:
275 	_spinlock(&_thread_gc_lock);
276 	TAILQ_FOREACH(thread, &_thread_gc_list, waiting) {
277 		if (thread->tid != 0)
278 			continue;
279 		TAILQ_REMOVE(&_thread_gc_list, thread, waiting);
280 		_spinunlock(&_thread_gc_lock);
281 		_rthread_debug(3, "rthread reaping %p stack %p\n",
282 		    (void *)thread, (void *)thread->stack);
283 		_rthread_free_stack(thread->stack);
284 		_rtld_free_tls(thread->arg,
285 		    sizeof(struct thread_control_block), sizeof(void *));
286 		free(thread);
287 		goto restart;
288 	}
289 	_spinunlock(&_thread_gc_lock);
290 }
291 
292 void
293 pthread_exit(void *retval)
294 {
295 	struct rthread_cleanup_fn *clfn;
296 	pthread_t thread = pthread_self();
297 
298 	if (thread->flags & THREAD_DYING) {
299 		/*
300 		 * Called pthread_exit() from destructor or cancelation
301 		 * handler: blow up.  XXX write something to stderr?
302 		 */
303 		_exit(42);
304 	}
305 
306 	_rthread_setflag(thread, THREAD_DYING);
307 
308 	thread->retval = retval;
309 
310 	for (clfn = thread->cleanup_fns; clfn; ) {
311 		struct rthread_cleanup_fn *oclfn = clfn;
312 		clfn = clfn->next;
313 		oclfn->fn(oclfn->arg);
314 		free(oclfn);
315 	}
316 	_rthread_tls_destructors(thread);
317 	_spinlock(&_thread_lock);
318 	LIST_REMOVE(thread, threads);
319 	_spinunlock(&_thread_lock);
320 
321 #ifdef TCB_GET
322 	thread->arg = TCB_GET();
323 #else
324 	thread->arg = __get_tcb();
325 #endif
326 	_spinlock(&thread->flags_lock);
327 	if (thread->flags & THREAD_DETACHED) {
328 		_spinunlock(&thread->flags_lock);
329 		_rthread_free(thread);
330 	} else {
331 		thread->flags |= THREAD_DONE;
332 		_spinunlock(&thread->flags_lock);
333 		_sem_post(&thread->donesem);
334 	}
335 
336 	__threxit(&thread->tid);
337 	for(;;);
338 }
339 
340 int
341 pthread_join(pthread_t thread, void **retval)
342 {
343 	int e;
344 	pthread_t self = pthread_self();
345 
346 	e = 0;
347 	_enter_delayed_cancel(self);
348 	if (thread == NULL)
349 		e = EINVAL;
350 	else if (thread == self)
351 		e = EDEADLK;
352 	else if (thread->flags & THREAD_DETACHED)
353 		e = EINVAL;
354 	else if ((e = _sem_wait(&thread->donesem, 0, NULL,
355 	    &self->delayed_cancel)) == 0) {
356 		if (retval)
357 			*retval = thread->retval;
358 
359 		/*
360 		 * We should be the last having a ref to this thread,
361 		 * but someone stupid or evil might haved detached it;
362 		 * in that case the thread will clean up itself
363 		 */
364 		if ((thread->flags & THREAD_DETACHED) == 0)
365 			_rthread_free(thread);
366 	}
367 
368 	_leave_delayed_cancel(self, e);
369 	_rthread_reaper();
370 	return (e);
371 }
372 
373 int
374 pthread_detach(pthread_t thread)
375 {
376 	int rc = 0;
377 
378 	_spinlock(&thread->flags_lock);
379 	if (thread->flags & THREAD_DETACHED) {
380 		rc = EINVAL;
381 		_spinunlock(&thread->flags_lock);
382 	} else if (thread->flags & THREAD_DONE) {
383 		_spinunlock(&thread->flags_lock);
384 		_rthread_free(thread);
385 	} else {
386 		thread->flags |= THREAD_DETACHED;
387 		_spinunlock(&thread->flags_lock);
388 	}
389 	_rthread_reaper();
390 	return (rc);
391 }
392 
393 int
394 pthread_create(pthread_t *threadp, const pthread_attr_t *attr,
395     void *(*start_routine)(void *), void *arg)
396 {
397 	extern int __isthreaded;
398 	struct thread_control_block *tcb;
399 	pthread_t thread;
400 	struct __tfork param;
401 	int rc = 0;
402 
403 	if (!_threads_ready)
404 		if ((rc = _rthread_init()))
405 		    return (rc);
406 
407 	_rthread_reaper();
408 
409 	thread = calloc(1, sizeof(*thread));
410 	if (!thread)
411 		return (errno);
412 	thread->donesem.lock = _SPINLOCK_UNLOCKED_ASSIGN;
413 	thread->flags_lock = _SPINLOCK_UNLOCKED_ASSIGN;
414 	thread->fn = start_routine;
415 	thread->arg = arg;
416 	thread->tid = -1;
417 
418 	thread->attr = attr != NULL ? *(*attr) : _rthread_attr_default;
419 	if (thread->attr.detach_state == PTHREAD_CREATE_DETACHED)
420 		thread->flags |= THREAD_DETACHED;
421 	thread->flags |= THREAD_CANCEL_ENABLE|THREAD_CANCEL_DEFERRED;
422 
423 	thread->stack = _rthread_alloc_stack(thread);
424 	if (!thread->stack) {
425 		rc = errno;
426 		goto fail1;
427 	}
428 
429 	tcb = _rtld_allocate_tls(NULL, sizeof(*tcb), sizeof(void *));
430 	if (tcb == NULL) {
431 		rc = errno;
432 		goto fail2;
433 	}
434 	TCB_INIT(tcb, thread, &thread->myerrno);
435 
436 	param.tf_tcb = tcb;
437 	param.tf_tid = &thread->tid;
438 	param.tf_stack = thread->stack->sp;
439 
440 	_spinlock(&_thread_lock);
441 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
442 	_spinunlock(&_thread_lock);
443 
444 	/* we're going to be multi-threaded real soon now */
445 	__isthreaded = 1;
446 	rc = __tfork_thread(&param, sizeof(param), _rthread_start, thread);
447 	if (rc != -1) {
448 		/* success */
449 		*threadp = thread;
450 		return (0);
451 	}
452 
453 	rc = errno;
454 
455 	_spinlock(&_thread_lock);
456 	LIST_REMOVE(thread, threads);
457 	_spinunlock(&_thread_lock);
458 	_rtld_free_tls(tcb, sizeof(*tcb), sizeof(void *));
459 fail2:
460 	_rthread_free_stack(thread->stack);
461 fail1:
462 	_rthread_free(thread);
463 
464 	return (rc);
465 }
466 
467 int
468 pthread_kill(pthread_t thread, int sig)
469 {
470 	return (kill(thread->tid, sig) == 0 ? 0 : errno);
471 }
472 
473 int
474 pthread_equal(pthread_t t1, pthread_t t2)
475 {
476 	return (t1 == t2);
477 }
478 
479 int
480 pthread_cancel(pthread_t thread)
481 {
482 
483 	_rthread_setflag(thread, THREAD_CANCELED);
484 	if (thread->flags & THREAD_CANCEL_ENABLE)
485 		kill(thread->tid, SIGTHR);
486 	return (0);
487 }
488 
489 void
490 pthread_testcancel(void)
491 {
492 	if ((pthread_self()->flags & (THREAD_CANCELED|THREAD_CANCEL_ENABLE)) ==
493 	    (THREAD_CANCELED|THREAD_CANCEL_ENABLE))
494 		pthread_exit(PTHREAD_CANCELED);
495 
496 }
497 
498 int
499 pthread_setcancelstate(int state, int *oldstatep)
500 {
501 	pthread_t self = pthread_self();
502 	int oldstate;
503 
504 	oldstate = self->flags & THREAD_CANCEL_ENABLE ?
505 	    PTHREAD_CANCEL_ENABLE : PTHREAD_CANCEL_DISABLE;
506 	if (state == PTHREAD_CANCEL_ENABLE) {
507 		_rthread_setflag(self, THREAD_CANCEL_ENABLE);
508 	} else if (state == PTHREAD_CANCEL_DISABLE) {
509 		_rthread_clearflag(self, THREAD_CANCEL_ENABLE);
510 	} else {
511 		return (EINVAL);
512 	}
513 	if (oldstatep)
514 		*oldstatep = oldstate;
515 
516 	return (0);
517 }
518 
519 int
520 pthread_setcanceltype(int type, int *oldtypep)
521 {
522 	pthread_t self = pthread_self();
523 	int oldtype;
524 
525 	oldtype = self->flags & THREAD_CANCEL_DEFERRED ?
526 	    PTHREAD_CANCEL_DEFERRED : PTHREAD_CANCEL_ASYNCHRONOUS;
527 	if (type == PTHREAD_CANCEL_DEFERRED) {
528 		_rthread_setflag(self, THREAD_CANCEL_DEFERRED);
529 	} else if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
530 		_rthread_clearflag(self, THREAD_CANCEL_DEFERRED);
531 	} else {
532 		return (EINVAL);
533 	}
534 	if (oldtypep)
535 		*oldtypep = oldtype;
536 
537 	return (0);
538 }
539 
540 void
541 pthread_cleanup_push(void (*fn)(void *), void *arg)
542 {
543 	struct rthread_cleanup_fn *clfn;
544 	pthread_t self = pthread_self();
545 
546 	clfn = calloc(1, sizeof(*clfn));
547 	if (!clfn)
548 		return;
549 	clfn->fn = fn;
550 	clfn->arg = arg;
551 	clfn->next = self->cleanup_fns;
552 	self->cleanup_fns = clfn;
553 }
554 
555 void
556 pthread_cleanup_pop(int execute)
557 {
558 	struct rthread_cleanup_fn *clfn;
559 	pthread_t self = pthread_self();
560 
561 	clfn = self->cleanup_fns;
562 	if (clfn) {
563 		self->cleanup_fns = clfn->next;
564 		if (execute)
565 			clfn->fn(clfn->arg);
566 		free(clfn);
567 	}
568 }
569 
570 int
571 pthread_getconcurrency(void)
572 {
573 	return (concurrency_level);
574 }
575 
576 int
577 pthread_setconcurrency(int new_level)
578 {
579 	if (new_level < 0)
580 		return (EINVAL);
581 	concurrency_level = new_level;
582 	return (0);
583 }
584 
585 /*
586  * compat debug stuff
587  */
588 void
589 _thread_dump_info(void)
590 {
591 	pthread_t thread;
592 
593 	_spinlock(&_thread_lock);
594 	LIST_FOREACH(thread, &_thread_list, threads)
595 		printf("thread %d flags %d name %s\n",
596 		    thread->tid, thread->flags, thread->name);
597 	_spinunlock(&_thread_lock);
598 }
599 
600 #if defined(__ELF__)
601 /*
602  * _rthread_dl_lock() provides the locking for dlopen(), dlclose(), and
603  * the function called via atexit() to invoke all destructors.  The latter
604  * two call shared-object destructors, which may need to call dlclose(),
605  * so this lock needs to permit recursive locking.
606  * The specific code here was extracted from _rthread_mutex_lock() and
607  * pthread_mutex_unlock() and simplified to use the static variables.
608  */
609 void
610 _rthread_dl_lock(int what)
611 {
612 	static struct _spinlock lock = _SPINLOCK_UNLOCKED;
613 	static pthread_t owner = NULL;
614 	static struct pthread_queue lockers = TAILQ_HEAD_INITIALIZER(lockers);
615 	static int count = 0;
616 
617 	if (what == 0)
618 	{
619 		pthread_t self = pthread_self();
620 
621 		/* lock, possibly recursive */
622 		_spinlock(&lock);
623 		if (owner == NULL) {
624 			owner = self;
625 		} else if (owner != self) {
626 			TAILQ_INSERT_TAIL(&lockers, self, waiting);
627 			while (owner != self) {
628 				__thrsleep(self, 0 | _USING_TICKETS, NULL,
629 				    &lock.ticket, NULL);
630 				_spinlock(&lock);
631 			}
632 		}
633 		count++;
634 		_spinunlock(&lock);
635 	}
636 	else
637 	{
638 		/* unlock, possibly recursive */
639 		if (--count == 0) {
640 			pthread_t next;
641 
642 			_spinlock(&lock);
643 			owner = next = TAILQ_FIRST(&lockers);
644 			if (next != NULL)
645 				TAILQ_REMOVE(&lockers, next, waiting);
646 			_spinunlock(&lock);
647 			if (next != NULL)
648 				__thrwakeup(next, 1);
649 		}
650 	}
651 }
652 
653 void
654 _rthread_bind_lock(int what)
655 {
656 	static struct _spinlock lock = _SPINLOCK_UNLOCKED;
657 
658 	if (what == 0)
659 		_spinlock(&lock);
660 	else
661 		_spinunlock(&lock);
662 }
663 #endif
664 
665 #ifdef __ELF__
666 #define CERROR_SYMBOL __cerror
667 #else
668 #define CERROR_SYMBOL _cerror
669 #endif
670 
671 /*
672  * XXX: Bogus type signature, but we only need to be able to emit a
673  * reference to it below.
674  */
675 extern void CERROR_SYMBOL(void);
676 
677 /*
678  * All weak references used within libc that are redefined in libpthread
679  * MUST be in this table.   This is necessary to force the proper version to
680  * be used when linking -static.
681  */
682 static void *__libc_overrides[] __used = {
683 	&CERROR_SYMBOL,
684 	&__errno,
685 	&_thread_arc4_lock,
686 	&_thread_arc4_unlock,
687 	&_thread_atexit_lock,
688 	&_thread_atexit_unlock,
689 	&_thread_malloc_lock,
690 	&_thread_malloc_unlock,
691 	&_thread_mutex_destroy,
692 	&_thread_mutex_lock,
693 	&_thread_mutex_unlock,
694 	&_thread_tag_lock,
695 	&_thread_tag_storage,
696 	&_thread_tag_unlock,
697 	&accept,
698 	&close,
699 	&closefrom,
700 	&connect,
701 	&fcntl,
702 	&flockfile,
703 	&fork,
704 	&fsync,
705 	&ftrylockfile,
706 	&funlockfile,
707 	&msgrcv,
708 	&msgsnd,
709 	&msync,
710 	&nanosleep,
711 	&open,
712 	&openat,
713 	&poll,
714 	&pread,
715 	&preadv,
716 	&pwrite,
717 	&pwritev,
718 	&read,
719 	&readv,
720 	&recvfrom,
721 	&recvmsg,
722 	&select,
723 	&sendmsg,
724 	&sendto,
725 	&sigaction,
726 	&sigprocmask,
727 	&sigsuspend,
728 	&vfork,
729 	&wait4,
730 	&write,
731 	&writev,
732 };
733