xref: /openbsd-src/lib/librthread/rthread.c (revision d874cce4b1d9fe6b41c9e4f2117a77d8a4a37b92)
1 /*	$OpenBSD: rthread.c,v 1.35 2008/06/05 21:06:11 kurt Exp $ */
2 /*
3  * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
4  * All Rights Reserved.
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*
19  * The heart of rthreads.  Basic functions like creating and joining
20  * threads.
21  */
22 
23 #include <sys/param.h>
24 #include <sys/event.h>
25 #include <sys/mman.h>
26 #include <sys/wait.h>
27 
28 #include <machine/spinlock.h>
29 
30 #include <fcntl.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <signal.h>
34 #include <stdio.h>
35 #include <string.h>
36 #include <err.h>
37 #include <errno.h>
38 #include <dlfcn.h>
39 
40 #include <pthread.h>
41 
42 #include "thread_private.h"	/* in libc/include */
43 #include "rthread.h"
44 
45 static int concurrency_level;	/* not used */
46 
47 int _threads_ready;
48 struct listhead _thread_list = LIST_HEAD_INITIALIZER(_thread_list);
49 _spinlock_lock_t _thread_lock = _SPINLOCK_UNLOCKED;
50 struct pthread _initial_thread;
51 
52 int rfork_thread(int, void *, void (*)(void *), void *);
53 
54 /*
55  * internal support functions
56  */
57 void
58 _spinlock(_spinlock_lock_t *lock)
59 {
60 
61 	while (_atomic_lock(lock))
62 		pthread_yield();
63 }
64 
65 void
66 _spinunlock(_spinlock_lock_t *lock)
67 {
68 
69 	*lock = _SPINLOCK_UNLOCKED;
70 }
71 
72 static pthread_t
73 _rthread_findself(void)
74 {
75 	pthread_t me;
76 	pid_t tid = getthrid();
77 
78 	LIST_FOREACH(me, &_thread_list, threads)
79 		if (me->tid == tid)
80 			break;
81 
82 	return (me);
83 }
84 
85 
86 static void
87 _rthread_start(void *v)
88 {
89 	pthread_t thread = v;
90 	void *retval;
91 
92 	/* ensure parent returns from rfork, sets up tid */
93 	_spinlock(&_thread_lock);
94 	_spinunlock(&_thread_lock);
95 	retval = thread->fn(thread->arg);
96 	pthread_exit(retval);
97 }
98 
99 int
100 _rthread_open_kqueue(void)
101 {
102 	_rthread_kq = kqueue();
103 	if (_rthread_kq == -1)
104 		return 1;
105 	fcntl(_rthread_kq, F_SETFD, FD_CLOEXEC);
106 	return 0;
107 }
108 
109 static int
110 _rthread_init(void)
111 {
112 	pthread_t thread = &_initial_thread;
113 	extern int __isthreaded;
114 
115 	thread->tid = getthrid();
116 	thread->donesem.lock = _SPINLOCK_UNLOCKED;
117 	thread->flags |= THREAD_CANCEL_ENABLE|THREAD_CANCEL_DEFERRED;
118 	thread->flags_lock = _SPINLOCK_UNLOCKED;
119 	strlcpy(thread->name, "Main process", sizeof(thread->name));
120 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
121 	if (_rthread_open_kqueue())
122 		return (errno);
123 	_rthread_debug_init();
124 	_rthread_debug(1, "rthread init\n");
125 	_threads_ready = 1;
126 	__isthreaded = 1;
127 
128 #if defined(__ELF__) && defined(PIC)
129 	/*
130 	 * To avoid recursion problems in ld.so, we need to trigger the
131 	 * functions once to fully bind them before registering them
132 	 * for use.
133 	 */
134 	_rthread_dl_lock(0);
135 	_rthread_dl_lock(1);
136 	_rthread_bind_lock(0);
137 	_rthread_bind_lock(1);
138 	dlctl(NULL, DL_SETTHREADLCK, _rthread_dl_lock);
139 	dlctl(NULL, DL_SETBINDLCK, _rthread_bind_lock);
140 #endif
141 
142 	return (0);
143 }
144 
145 static void
146 _rthread_free(pthread_t thread)
147 {
148 	/* catch wrongdoers for the moment */
149 	memset(thread, 0xd0, sizeof(*thread));
150 	if (thread != &_initial_thread)
151 		free(thread);
152 }
153 
154 static void
155 _rthread_setflag(pthread_t thread, int flag)
156 {
157 	_spinlock(&thread->flags_lock);
158 	thread->flags |= flag;
159 	_spinunlock(&thread->flags_lock);
160 }
161 
162 static void
163 _rthread_clearflag(pthread_t thread, int flag)
164 {
165 	_spinlock(&thread->flags_lock);
166 	thread->flags &= ~flag;
167 	_spinunlock(&thread->flags_lock);
168 }
169 
170 /*
171  * real pthread functions
172  */
173 pthread_t
174 pthread_self(void)
175 {
176 	pthread_t thread;
177 
178 	if (!_threads_ready)
179 		if (_rthread_init())
180 			return (NULL);
181 
182 	_spinlock(&_thread_lock);
183 	thread = _rthread_findself();
184 	_spinunlock(&_thread_lock);
185 
186 	return (thread);
187 }
188 
189 void
190 pthread_exit(void *retval)
191 {
192 	struct rthread_cleanup_fn *clfn;
193 	pid_t tid;
194 	struct stack *stack;
195 	pthread_t thread = pthread_self();
196 
197 	thread->retval = retval;
198 
199 	for (clfn = thread->cleanup_fns; clfn; ) {
200 		struct rthread_cleanup_fn *oclfn = clfn;
201 		clfn = clfn->next;
202 		oclfn->fn(oclfn->arg);
203 		free(oclfn);
204 	}
205 	_rthread_tls_destructors(thread);
206 	_spinlock(&_thread_lock);
207 	LIST_REMOVE(thread, threads);
208 	_spinunlock(&_thread_lock);
209 
210 	_sem_post(&thread->donesem);
211 
212 	stack = thread->stack;
213 	tid = thread->tid;
214 	if (thread->flags & THREAD_DETACHED)
215 		_rthread_free(thread);
216 	else
217 		_rthread_setflag(thread, THREAD_DONE);
218 
219 	if (tid != _initial_thread.tid)
220 		_rthread_add_to_reaper(tid, stack);
221 
222 	_rthread_reaper();
223 	threxit(0);
224 	for(;;);
225 }
226 
227 int
228 pthread_join(pthread_t thread, void **retval)
229 {
230 	int e;
231 
232 	if (thread->tid == getthrid())
233 		e = EDEADLK;
234 	else if (thread->flags & THREAD_DETACHED)
235 		e = EINVAL;
236 	else {
237 		_sem_wait(&thread->donesem, 0, 0);
238 		if (retval)
239 			*retval = thread->retval;
240 		e = 0;
241 		/* We should be the last having a ref to this thread, but
242 		 * someone stupid or evil might haved detached it;
243 		 * in that case the thread will cleanup itself */
244 		if ((thread->flags & THREAD_DETACHED) == 0)
245 			_rthread_free(thread);
246 	}
247 
248 	_rthread_reaper();
249 	return (e);
250 }
251 
252 int
253 pthread_detach(pthread_t thread)
254 {
255 	int rc = 0;
256 
257 	_spinlock(&thread->flags_lock);
258 	if (thread->flags & THREAD_DETACHED) {
259 		rc = EINVAL;
260 		_spinunlock(&thread->flags_lock);
261 	} else if (thread->flags & THREAD_DONE) {
262 		_spinunlock(&thread->flags_lock);
263 		_rthread_free(thread);
264 	} else {
265 		thread->flags |= THREAD_DETACHED;
266 		_spinunlock(&thread->flags_lock);
267 	}
268 	_rthread_reaper();
269 	return (rc);
270 }
271 
272 int
273 pthread_create(pthread_t *threadp, const pthread_attr_t *attr,
274     void *(*start_routine)(void *), void *arg)
275 {
276 	pthread_t thread;
277 	pid_t tid;
278 	int rc = 0;
279 
280 	if (!_threads_ready)
281 		if ((rc = _rthread_init()))
282 		    return (rc);
283 
284 	thread = malloc(sizeof(*thread));
285 	if (!thread)
286 		return (errno);
287 	memset(thread, 0, sizeof(*thread));
288 	thread->donesem.lock = _SPINLOCK_UNLOCKED;
289 	thread->flags_lock = _SPINLOCK_UNLOCKED;
290 	thread->fn = start_routine;
291 	thread->arg = arg;
292 	if (attr)
293 		thread->attr = *(*attr);
294 	else {
295 		thread->attr.stack_size = RTHREAD_STACK_SIZE_DEF;
296 		thread->attr.guard_size = sysconf(_SC_PAGESIZE);
297 		thread->attr.stack_size -= thread->attr.guard_size;
298 	}
299 	if (thread->attr.detach_state == PTHREAD_CREATE_DETACHED)
300 		thread->flags |= THREAD_DETACHED;
301 
302 	_spinlock(&_thread_lock);
303 
304 	thread->stack = _rthread_alloc_stack(thread);
305 	if (!thread->stack) {
306 		rc = errno;
307 		goto fail1;
308 	}
309 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
310 
311 	tid = rfork_thread(RFPROC | RFTHREAD | RFMEM | RFNOWAIT,
312 	    thread->stack->sp, _rthread_start, thread);
313 	if (tid == -1) {
314 		rc = errno;
315 		goto fail2;
316 	}
317 	/* new thread will appear _rthread_start */
318 	thread->tid = tid;
319 	thread->flags |= THREAD_CANCEL_ENABLE|THREAD_CANCEL_DEFERRED;
320 	*threadp = thread;
321 
322 	/*
323 	 * Since _rthread_start() aquires the thread lock and due to the way
324 	 * signal delivery is implemented, this is not a race.
325 	 */
326 	if (thread->attr.create_suspended)
327 		kill(thread->tid, SIGSTOP);
328 
329 	_spinunlock(&_thread_lock);
330 
331 	return (0);
332 
333 fail2:
334 	_rthread_free_stack(thread->stack);
335 	LIST_REMOVE(thread, threads);
336 fail1:
337 	_spinunlock(&_thread_lock);
338 	_rthread_free(thread);
339 
340 	return (rc);
341 }
342 
343 int
344 pthread_kill(pthread_t thread, int sig)
345 {
346 	return (kill(thread->tid, sig));
347 }
348 
349 int
350 pthread_equal(pthread_t t1, pthread_t t2)
351 {
352 	return (t1 == t2);
353 }
354 
355 int
356 pthread_cancel(pthread_t thread)
357 {
358 
359 	_rthread_setflag(thread, THREAD_CANCELLED);
360 	return (0);
361 }
362 
363 void
364 pthread_testcancel(void)
365 {
366 	if ((pthread_self()->flags & (THREAD_CANCELLED|THREAD_CANCEL_ENABLE)) ==
367 	    (THREAD_CANCELLED|THREAD_CANCEL_ENABLE))
368 		pthread_exit(PTHREAD_CANCELED);
369 
370 }
371 
372 int
373 pthread_setcancelstate(int state, int *oldstatep)
374 {
375 	pthread_t self = pthread_self();
376 	int oldstate;
377 
378 	oldstate = self->flags & THREAD_CANCEL_ENABLE ?
379 	    PTHREAD_CANCEL_ENABLE : PTHREAD_CANCEL_DISABLE;
380 	if (state == PTHREAD_CANCEL_ENABLE) {
381 		_rthread_setflag(self, THREAD_CANCEL_ENABLE);
382 		pthread_testcancel();
383 	} else if (state == PTHREAD_CANCEL_DISABLE) {
384 		_rthread_clearflag(self, THREAD_CANCEL_ENABLE);
385 	} else {
386 		return (EINVAL);
387 	}
388 	if (oldstatep)
389 		*oldstatep = oldstate;
390 
391 	return (0);
392 }
393 
394 int
395 pthread_setcanceltype(int type, int *oldtypep)
396 {
397 	pthread_t self = pthread_self();
398 	int oldtype;
399 
400 	oldtype = self->flags & THREAD_CANCEL_DEFERRED ?
401 	    PTHREAD_CANCEL_DEFERRED : PTHREAD_CANCEL_ASYNCHRONOUS;
402 	if (type == PTHREAD_CANCEL_DEFERRED) {
403 		_rthread_setflag(self, THREAD_CANCEL_DEFERRED);
404 		pthread_testcancel();
405 	} else if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
406 		_rthread_clearflag(self, THREAD_CANCEL_DEFERRED);
407 	} else {
408 		return (EINVAL);
409 	}
410 	if (oldtypep)
411 		*oldtypep = oldtype;
412 
413 	return (0);
414 }
415 
416 void
417 pthread_cleanup_push(void (*fn)(void *), void *arg)
418 {
419 	struct rthread_cleanup_fn *clfn;
420 	pthread_t self = pthread_self();
421 
422 	clfn = malloc(sizeof(*clfn));
423 	if (!clfn)
424 		return;
425 	memset(clfn, 0, sizeof(*clfn));
426 	clfn->fn = fn;
427 	clfn->arg = arg;
428 	clfn->next = self->cleanup_fns;
429 	self->cleanup_fns = clfn;
430 }
431 
432 void
433 pthread_cleanup_pop(int execute)
434 {
435 	struct rthread_cleanup_fn *clfn;
436 	pthread_t self = pthread_self();
437 
438 	clfn = self->cleanup_fns;
439 	if (clfn) {
440 		self->cleanup_fns = clfn->next;
441 		if (execute)
442 			clfn->fn(clfn->arg);
443 		free(clfn);
444 	}
445 }
446 
447 int
448 pthread_getconcurrency(void)
449 {
450 	return (concurrency_level);
451 }
452 
453 int
454 pthread_setconcurrency(int new_level)
455 {
456 	if (new_level < 0)
457 		return (EINVAL);
458 	concurrency_level = new_level;
459 	return (0);
460 }
461 
462 /*
463  * compat debug stuff
464  */
465 void
466 _thread_dump_info(void)
467 {
468 	pthread_t thread;
469 
470 	_spinlock(&_thread_lock);
471 	LIST_FOREACH(thread, &_thread_list, threads)
472 		printf("thread %d flags %d name %s\n",
473 		    thread->tid, thread->flags, thread->name);
474 	_spinunlock(&_thread_lock);
475 }
476 
477 #if defined(__ELF__) && defined(PIC)
478 void
479 _rthread_dl_lock(int what)
480 {
481 	static _spinlock_lock_t lock = _SPINLOCK_UNLOCKED;
482 
483 	if (what == 0)
484 		_spinlock(&lock);
485 	else
486 		_spinunlock(&lock);
487 }
488 
489 void
490 _rthread_bind_lock(int what)
491 {
492 	static _spinlock_lock_t lock = _SPINLOCK_UNLOCKED;
493 
494 	if (what == 0)
495 		_spinlock(&lock);
496 	else
497 		_spinunlock(&lock);
498 }
499 #endif
500