xref: /openbsd-src/lib/librthread/rthread.c (revision 4c1e55dc91edd6e69ccc60ce855900fbc12cf34f)
1 /*	$OpenBSD: rthread.c,v 1.62 2012/06/21 00:56:59 guenther Exp $ */
2 /*
3  * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
4  * All Rights Reserved.
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*
19  * The heart of rthreads.  Basic functions like creating and joining
20  * threads.
21  */
22 
23 #include <sys/types.h>
24 
25 #include <stdlib.h>
26 #include <unistd.h>
27 #include <signal.h>
28 #include <stdio.h>
29 #include <string.h>
30 #include <errno.h>
31 #include <dlfcn.h>
32 
33 #include <pthread.h>
34 
35 #include "thread_private.h"	/* in libc/include */
36 #include "rthread.h"
37 #include "tcb.h"
38 
39 static int concurrency_level;	/* not used */
40 
41 int _threads_ready;
42 size_t _thread_pagesize;
43 struct listhead _thread_list = LIST_HEAD_INITIALIZER(_thread_list);
44 _spinlock_lock_t _thread_lock = _SPINLOCK_UNLOCKED;
45 static struct pthread_queue _thread_gc_list
46     = TAILQ_HEAD_INITIALIZER(_thread_gc_list);
47 static _spinlock_lock_t _thread_gc_lock = _SPINLOCK_UNLOCKED;
48 struct pthread _initial_thread;
49 struct thread_control_block _initial_thread_tcb;
50 
51 struct pthread_attr _rthread_attr_default = {
52 #ifndef lint
53 	.stack_addr			= NULL,
54 	.stack_size			= RTHREAD_STACK_SIZE_DEF,
55 /*	.guard_size		set in _rthread_init */
56 	.detach_state			= PTHREAD_CREATE_JOINABLE,
57 	.contention_scope		= PTHREAD_SCOPE_SYSTEM,
58 	.sched_policy			= SCHED_OTHER,
59 	.sched_param = { .sched_priority = 0 },
60 	.sched_inherit			= PTHREAD_INHERIT_SCHED,
61 #else
62 	0
63 #endif
64 };
65 
66 /*
67  * internal support functions
68  */
69 void
70 _spinlock(_spinlock_lock_t *lock)
71 {
72 
73 	while (_atomic_lock(lock))
74 		sched_yield();
75 }
76 
77 void
78 _spinunlock(_spinlock_lock_t *lock)
79 {
80 
81 	*lock = _SPINLOCK_UNLOCKED;
82 }
83 
84 /*
85  * This sets up the thread base for the initial thread so that it
86  * references the errno location provided by libc.  For other threads
87  * this is handled by __tfork_thread()
88  */
89 void _rthread_initlib(void) __attribute__((constructor));
90 void _rthread_initlib(void)
91 {
92 	struct thread_control_block *tcb = &_initial_thread_tcb;
93 
94 	/* use libc's errno for the main thread */
95 	TCB_INIT(tcb, &_initial_thread, ___errno());
96 	TCB_SET(tcb);
97 }
98 
99 int *
100 __errno(void)
101 {
102 	return (TCB_ERRNOPTR());
103 }
104 
105 static void
106 _rthread_start(void *v)
107 {
108 	pthread_t thread = v;
109 	void *retval;
110 
111 	retval = thread->fn(thread->arg);
112 	pthread_exit(retval);
113 }
114 
115 /* ARGSUSED0 */
116 static void
117 sigthr_handler(__unused int sig)
118 {
119 	pthread_t self = pthread_self();
120 
121 	/*
122 	 * Do nothing unless
123 	 * 1) pthread_cancel() has been called on this thread,
124 	 * 2) cancelation is enabled for it, and
125 	 * 3) we're not already in cancelation processing
126 	 */
127 	if ((self->flags & (THREAD_CANCELED|THREAD_CANCEL_ENABLE|THREAD_DYING))
128 	    != (THREAD_CANCELED|THREAD_CANCEL_ENABLE))
129 		return;
130 
131 	/*
132 	 * If delaying cancels inside complex ops (pthread_cond_wait,
133 	 * pthread_join, etc), just mark that this has happened to
134 	 * prevent a race with going to sleep
135 	 */
136 	if (self->flags & THREAD_CANCEL_DELAY) {
137 		self->delayed_cancel = 1;
138 		return;
139 	}
140 
141 	/*
142 	 * otherwise, if in a cancel point or async cancels are
143 	 * enabled, then exit
144 	 */
145 	if (self->cancel_point || (self->flags & THREAD_CANCEL_DEFERRED) == 0)
146 		pthread_exit(PTHREAD_CANCELED);
147 }
148 
149 int
150 _rthread_init(void)
151 {
152 	pthread_t thread = &_initial_thread;
153 	struct sigaction sa;
154 
155 	thread->tid = getthrid();
156 	thread->donesem.lock = _SPINLOCK_UNLOCKED;
157 	thread->flags |= THREAD_CANCEL_ENABLE|THREAD_CANCEL_DEFERRED;
158 	thread->flags_lock = _SPINLOCK_UNLOCKED;
159 	strlcpy(thread->name, "Main process", sizeof(thread->name));
160 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
161 	_rthread_debug_init();
162 
163 	_thread_pagesize = (size_t)sysconf(_SC_PAGESIZE);
164 	_rthread_attr_default.guard_size = _thread_pagesize;
165 
166 	_threads_ready = 1;
167 
168 	_rthread_debug(1, "rthread init\n");
169 
170 #if defined(__ELF__) && defined(PIC)
171 	/*
172 	 * To avoid recursion problems in ld.so, we need to trigger the
173 	 * functions once to fully bind them before registering them
174 	 * for use.
175 	 */
176 	_rthread_dl_lock(0);
177 	_rthread_dl_lock(1);
178 	_rthread_bind_lock(0);
179 	_rthread_bind_lock(1);
180 	sched_yield();
181 	dlctl(NULL, DL_SETTHREADLCK, _rthread_dl_lock);
182 	dlctl(NULL, DL_SETBINDLCK, _rthread_bind_lock);
183 #endif
184 
185 	/*
186 	 * Set the handler on the signal used for cancelation and
187 	 * suspension, and make sure it's unblocked
188 	 */
189 	memset(&sa, 0, sizeof(sa));
190 	sigemptyset(&sa.sa_mask);
191 	sa.sa_flags = SA_RESTART;
192 	sa.sa_handler = sigthr_handler;
193 	_thread_sys_sigaction(SIGTHR, &sa, NULL);
194 	sigaddset(&sa.sa_mask, SIGTHR);
195 	sigprocmask(SIG_UNBLOCK, &sa.sa_mask, NULL);
196 
197 	return (0);
198 }
199 
200 static void
201 _rthread_free(pthread_t thread)
202 {
203 	/* catch wrongdoers for the moment */
204 	/* initial_thread.tid must remain valid */
205 	if (thread != &_initial_thread) {
206 		struct stack *stack = thread->stack;
207 		pid_t tid = thread->tid;
208 		void *arg = thread->arg;
209 
210 		/* catch wrongdoers for the moment */
211 		memset(thread, 0xd0, sizeof(*thread));
212 		thread->stack = stack;
213 		thread->tid = tid;
214 		thread->arg = arg;
215 		_spinlock(&_thread_gc_lock);
216 		TAILQ_INSERT_TAIL(&_thread_gc_list, thread, waiting);
217 		_spinunlock(&_thread_gc_lock);
218 	}
219 }
220 
221 void
222 _rthread_setflag(pthread_t thread, int flag)
223 {
224 	_spinlock(&thread->flags_lock);
225 	thread->flags |= flag;
226 	_spinunlock(&thread->flags_lock);
227 }
228 
229 void
230 _rthread_clearflag(pthread_t thread, int flag)
231 {
232 	_spinlock(&thread->flags_lock);
233 	thread->flags &= ~flag;
234 	_spinunlock(&thread->flags_lock);
235 }
236 
237 /*
238  * real pthread functions
239  */
240 pthread_t
241 pthread_self(void)
242 {
243 	if (!_threads_ready)
244 		if (_rthread_init())
245 			return (NULL);
246 
247 	return (TCB_THREAD());
248 }
249 
250 static void
251 _rthread_reaper(void)
252 {
253 	pthread_t thread;
254 
255 restart:
256 	_spinlock(&_thread_gc_lock);
257 	TAILQ_FOREACH(thread, &_thread_gc_list, waiting) {
258 		if (thread->tid != 0)
259 			continue;
260 		TAILQ_REMOVE(&_thread_gc_list, thread, waiting);
261 		_spinunlock(&_thread_gc_lock);
262 		_rthread_debug(3, "rthread reaping %p stack %p\n",
263 		    (void *)thread, (void *)thread->stack);
264 		_rthread_free_stack(thread->stack);
265 		_rtld_free_tls(thread->arg,
266 		    sizeof(struct thread_control_block), sizeof(void *));
267 		free(thread);
268 		goto restart;
269 	}
270 	_spinunlock(&_thread_gc_lock);
271 }
272 
273 void
274 pthread_exit(void *retval)
275 {
276 	struct rthread_cleanup_fn *clfn;
277 	pthread_t thread = pthread_self();
278 
279 	if (thread->flags & THREAD_DYING) {
280 		/*
281 		 * Called pthread_exit() from destructor or cancelation
282 		 * handler: blow up.  XXX write something to stderr?
283 		 */
284 		_exit(42);
285 	}
286 
287 	_rthread_setflag(thread, THREAD_DYING);
288 
289 	thread->retval = retval;
290 
291 	for (clfn = thread->cleanup_fns; clfn; ) {
292 		struct rthread_cleanup_fn *oclfn = clfn;
293 		clfn = clfn->next;
294 		oclfn->fn(oclfn->arg);
295 		free(oclfn);
296 	}
297 	_rthread_tls_destructors(thread);
298 	_spinlock(&_thread_lock);
299 	LIST_REMOVE(thread, threads);
300 	_spinunlock(&_thread_lock);
301 
302 #ifdef TCB_GET
303 	thread->arg = TCB_GET();
304 #else
305 	thread->arg = __get_tcb();
306 #endif
307 	if (thread->flags & THREAD_DETACHED)
308 		_rthread_free(thread);
309 	else {
310 		_rthread_setflag(thread, THREAD_DONE);
311 		_sem_post(&thread->donesem);
312 	}
313 
314 	__threxit(&thread->tid);
315 	for(;;);
316 }
317 
318 int
319 pthread_join(pthread_t thread, void **retval)
320 {
321 	int e;
322 	pthread_t self = pthread_self();
323 
324 	e = 0;
325 	_enter_delayed_cancel(self);
326 	if (thread == NULL)
327 		e = EINVAL;
328 	else if (thread == self)
329 		e = EDEADLK;
330 	else if (thread->flags & THREAD_DETACHED)
331 		e = EINVAL;
332 	else if ((e = _sem_wait(&thread->donesem, 0, NULL,
333 	    &self->delayed_cancel)) == 0) {
334 		if (retval)
335 			*retval = thread->retval;
336 
337 		/*
338 		 * We should be the last having a ref to this thread,
339 		 * but someone stupid or evil might haved detached it;
340 		 * in that case the thread will clean up itself
341 		 */
342 		if ((thread->flags & THREAD_DETACHED) == 0)
343 			_rthread_free(thread);
344 	}
345 
346 	_leave_delayed_cancel(self, e);
347 	_rthread_reaper();
348 	return (e);
349 }
350 
351 int
352 pthread_detach(pthread_t thread)
353 {
354 	int rc = 0;
355 
356 	_spinlock(&thread->flags_lock);
357 	if (thread->flags & THREAD_DETACHED) {
358 		rc = EINVAL;
359 		_spinunlock(&thread->flags_lock);
360 	} else if (thread->flags & THREAD_DONE) {
361 		_spinunlock(&thread->flags_lock);
362 		_rthread_free(thread);
363 	} else {
364 		thread->flags |= THREAD_DETACHED;
365 		_spinunlock(&thread->flags_lock);
366 	}
367 	_rthread_reaper();
368 	return (rc);
369 }
370 
371 int
372 pthread_create(pthread_t *threadp, const pthread_attr_t *attr,
373     void *(*start_routine)(void *), void *arg)
374 {
375 	extern int __isthreaded;
376 	struct thread_control_block *tcb;
377 	pthread_t thread;
378 	struct __tfork param;
379 	int rc = 0;
380 
381 	if (!_threads_ready)
382 		if ((rc = _rthread_init()))
383 		    return (rc);
384 
385 	_rthread_reaper();
386 
387 	thread = calloc(1, sizeof(*thread));
388 	if (!thread)
389 		return (errno);
390 	thread->donesem.lock = _SPINLOCK_UNLOCKED;
391 	thread->flags_lock = _SPINLOCK_UNLOCKED;
392 	thread->fn = start_routine;
393 	thread->arg = arg;
394 	thread->tid = -1;
395 
396 	thread->attr = attr != NULL ? *(*attr) : _rthread_attr_default;
397 	if (thread->attr.detach_state == PTHREAD_CREATE_DETACHED)
398 		thread->flags |= THREAD_DETACHED;
399 	thread->flags |= THREAD_CANCEL_ENABLE|THREAD_CANCEL_DEFERRED;
400 
401 	thread->stack = _rthread_alloc_stack(thread);
402 	if (!thread->stack) {
403 		rc = errno;
404 		goto fail1;
405 	}
406 
407 	tcb = _rtld_allocate_tls(NULL, sizeof(*tcb), sizeof(void *));
408 	if (tcb == NULL) {
409 		rc = errno;
410 		goto fail2;
411 	}
412 	TCB_INIT(tcb, thread, &thread->myerrno);
413 
414 	param.tf_tcb = tcb;
415 	param.tf_tid = &thread->tid;
416 	param.tf_stack = thread->stack->sp;
417 
418 	_spinlock(&_thread_lock);
419 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
420 	_spinunlock(&_thread_lock);
421 
422 	/* we're going to be multi-threaded real soon now */
423 	__isthreaded = 1;
424 	rc = __tfork_thread(&param, sizeof(param), _rthread_start, thread);
425 	if (rc != -1) {
426 		/* success */
427 		*threadp = thread;
428 		return (0);
429 	}
430 
431 	rc = errno;
432 
433 	_spinlock(&_thread_lock);
434 	LIST_REMOVE(thread, threads);
435 	_spinunlock(&_thread_lock);
436 	_rtld_free_tls(tcb, sizeof(*tcb), sizeof(void *));
437 fail2:
438 	_rthread_free_stack(thread->stack);
439 fail1:
440 	_rthread_free(thread);
441 
442 	return (rc);
443 }
444 
445 int
446 pthread_kill(pthread_t thread, int sig)
447 {
448 	return (kill(thread->tid, sig) == 0 ? 0 : errno);
449 }
450 
451 int
452 pthread_equal(pthread_t t1, pthread_t t2)
453 {
454 	return (t1 == t2);
455 }
456 
457 int
458 pthread_cancel(pthread_t thread)
459 {
460 
461 	_rthread_setflag(thread, THREAD_CANCELED);
462 	if (thread->flags & THREAD_CANCEL_ENABLE)
463 		kill(thread->tid, SIGTHR);
464 	return (0);
465 }
466 
467 void
468 pthread_testcancel(void)
469 {
470 	if ((pthread_self()->flags & (THREAD_CANCELED|THREAD_CANCEL_ENABLE)) ==
471 	    (THREAD_CANCELED|THREAD_CANCEL_ENABLE))
472 		pthread_exit(PTHREAD_CANCELED);
473 
474 }
475 
476 int
477 pthread_setcancelstate(int state, int *oldstatep)
478 {
479 	pthread_t self = pthread_self();
480 	int oldstate;
481 
482 	oldstate = self->flags & THREAD_CANCEL_ENABLE ?
483 	    PTHREAD_CANCEL_ENABLE : PTHREAD_CANCEL_DISABLE;
484 	if (state == PTHREAD_CANCEL_ENABLE) {
485 		_rthread_setflag(self, THREAD_CANCEL_ENABLE);
486 	} else if (state == PTHREAD_CANCEL_DISABLE) {
487 		_rthread_clearflag(self, THREAD_CANCEL_ENABLE);
488 	} else {
489 		return (EINVAL);
490 	}
491 	if (oldstatep)
492 		*oldstatep = oldstate;
493 
494 	return (0);
495 }
496 
497 int
498 pthread_setcanceltype(int type, int *oldtypep)
499 {
500 	pthread_t self = pthread_self();
501 	int oldtype;
502 
503 	oldtype = self->flags & THREAD_CANCEL_DEFERRED ?
504 	    PTHREAD_CANCEL_DEFERRED : PTHREAD_CANCEL_ASYNCHRONOUS;
505 	if (type == PTHREAD_CANCEL_DEFERRED) {
506 		_rthread_setflag(self, THREAD_CANCEL_DEFERRED);
507 	} else if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
508 		_rthread_clearflag(self, THREAD_CANCEL_DEFERRED);
509 	} else {
510 		return (EINVAL);
511 	}
512 	if (oldtypep)
513 		*oldtypep = oldtype;
514 
515 	return (0);
516 }
517 
518 void
519 pthread_cleanup_push(void (*fn)(void *), void *arg)
520 {
521 	struct rthread_cleanup_fn *clfn;
522 	pthread_t self = pthread_self();
523 
524 	clfn = calloc(1, sizeof(*clfn));
525 	if (!clfn)
526 		return;
527 	clfn->fn = fn;
528 	clfn->arg = arg;
529 	clfn->next = self->cleanup_fns;
530 	self->cleanup_fns = clfn;
531 }
532 
533 void
534 pthread_cleanup_pop(int execute)
535 {
536 	struct rthread_cleanup_fn *clfn;
537 	pthread_t self = pthread_self();
538 
539 	clfn = self->cleanup_fns;
540 	if (clfn) {
541 		self->cleanup_fns = clfn->next;
542 		if (execute)
543 			clfn->fn(clfn->arg);
544 		free(clfn);
545 	}
546 }
547 
548 int
549 pthread_getconcurrency(void)
550 {
551 	return (concurrency_level);
552 }
553 
554 int
555 pthread_setconcurrency(int new_level)
556 {
557 	if (new_level < 0)
558 		return (EINVAL);
559 	concurrency_level = new_level;
560 	return (0);
561 }
562 
563 /*
564  * compat debug stuff
565  */
566 void
567 _thread_dump_info(void)
568 {
569 	pthread_t thread;
570 
571 	_spinlock(&_thread_lock);
572 	LIST_FOREACH(thread, &_thread_list, threads)
573 		printf("thread %d flags %d name %s\n",
574 		    thread->tid, thread->flags, thread->name);
575 	_spinunlock(&_thread_lock);
576 }
577 
578 #if defined(__ELF__) && defined(PIC)
579 /*
580  * _rthread_dl_lock() provides the locking for dlopen(), dlclose(), and
581  * the function called via atexit() to invoke all destructors.  The latter
582  * two call shared-object destructors, which may need to call dlclose(),
583  * so this lock needs to permit recursive locking.
584  * The specific code here was extracted from _rthread_mutex_lock() and
585  * pthread_mutex_unlock() and simplified to use the static variables.
586  */
587 void
588 _rthread_dl_lock(int what)
589 {
590 	static _spinlock_lock_t lock = _SPINLOCK_UNLOCKED;
591 	static pthread_t owner = NULL;
592 	static struct pthread_queue lockers = TAILQ_HEAD_INITIALIZER(lockers);
593 	static int count = 0;
594 
595 	if (what == 0)
596 	{
597 		pthread_t self = pthread_self();
598 
599 		/* lock, possibly recursive */
600 		_spinlock(&lock);
601 		if (owner == NULL) {
602 			owner = self;
603 		} else if (owner != self) {
604 			TAILQ_INSERT_TAIL(&lockers, self, waiting);
605 			while (owner != self) {
606 				__thrsleep(self, 0, NULL, &lock, NULL);
607 				_spinlock(&lock);
608 			}
609 		}
610 		count++;
611 		_spinunlock(&lock);
612 	}
613 	else
614 	{
615 		/* unlock, possibly recursive */
616 		if (--count == 0) {
617 			pthread_t next;
618 
619 			_spinlock(&lock);
620 			owner = next = TAILQ_FIRST(&lockers);
621 			if (next != NULL)
622 				TAILQ_REMOVE(&lockers, next, waiting);
623 			_spinunlock(&lock);
624 			if (next != NULL)
625 				__thrwakeup(next, 1);
626 		}
627 	}
628 }
629 
630 void
631 _rthread_bind_lock(int what)
632 {
633 	static _spinlock_lock_t lock = _SPINLOCK_UNLOCKED;
634 
635 	if (what == 0)
636 		_spinlock(&lock);
637 	else
638 		_spinunlock(&lock);
639 }
640 #endif
641