xref: /openbsd-src/lib/librthread/rthread.c (revision a28daedfc357b214be5c701aa8ba8adb29a7f1c2)
1 /*	$OpenBSD: rthread.c,v 1.40 2009/02/20 01:24:05 tedu Exp $ */
2 /*
3  * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
4  * All Rights Reserved.
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*
19  * The heart of rthreads.  Basic functions like creating and joining
20  * threads.
21  */
22 
23 #include <sys/param.h>
24 #include <sys/event.h>
25 #include <sys/mman.h>
26 #include <sys/wait.h>
27 
28 #include <machine/spinlock.h>
29 
30 #include <fcntl.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <signal.h>
34 #include <stdio.h>
35 #include <string.h>
36 #include <err.h>
37 #include <errno.h>
38 #include <dlfcn.h>
39 
40 #include <pthread.h>
41 
42 #include "thread_private.h"	/* in libc/include */
43 #include "rthread.h"
44 
45 static int concurrency_level;	/* not used */
46 
47 int _threads_ready;
48 struct listhead _thread_list = LIST_HEAD_INITIALIZER(_thread_list);
49 _spinlock_lock_t _thread_lock = _SPINLOCK_UNLOCKED;
50 struct pthread _initial_thread;
51 
52 int rfork_thread(int, void *, void (*)(void *), void *);
53 
54 /*
55  * internal support functions
56  */
57 void
58 _spinlock(_spinlock_lock_t *lock)
59 {
60 
61 	while (_atomic_lock(lock))
62 		pthread_yield();
63 }
64 
65 void
66 _spinunlock(_spinlock_lock_t *lock)
67 {
68 
69 	*lock = _SPINLOCK_UNLOCKED;
70 }
71 
72 static pthread_t
73 _rthread_findself(void)
74 {
75 	pthread_t me;
76 	pid_t tid = getthrid();
77 
78 	LIST_FOREACH(me, &_thread_list, threads)
79 		if (me->tid == tid)
80 			break;
81 
82 	return (me);
83 }
84 
85 
86 static void
87 _rthread_start(void *v)
88 {
89 	pthread_t thread = v;
90 	void *retval;
91 
92 	/* ensure parent returns from rfork, sets up tid */
93 	_spinlock(&_thread_lock);
94 	_spinunlock(&_thread_lock);
95 	retval = thread->fn(thread->arg);
96 	pthread_exit(retval);
97 }
98 
99 int
100 _rthread_open_kqueue(void)
101 {
102 	_rthread_kq = kqueue();
103 	if (_rthread_kq == -1)
104 		return 1;
105 	fcntl(_rthread_kq, F_SETFD, FD_CLOEXEC);
106 	return 0;
107 }
108 
109 static int
110 _rthread_init(void)
111 {
112 	pthread_t thread = &_initial_thread;
113 	extern int __isthreaded;
114 
115 	thread->tid = getthrid();
116 	thread->donesem.lock = _SPINLOCK_UNLOCKED;
117 	thread->flags |= THREAD_CANCEL_ENABLE|THREAD_CANCEL_DEFERRED;
118 	thread->flags_lock = _SPINLOCK_UNLOCKED;
119 	strlcpy(thread->name, "Main process", sizeof(thread->name));
120 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
121 	if (_rthread_open_kqueue())
122 		return (errno);
123 	_rthread_debug_init();
124 	_rthread_debug(1, "rthread init\n");
125 	_threads_ready = 1;
126 	__isthreaded = 1;
127 
128 #if defined(__ELF__) && defined(PIC)
129 	/*
130 	 * To avoid recursion problems in ld.so, we need to trigger the
131 	 * functions once to fully bind them before registering them
132 	 * for use.
133 	 */
134 	_rthread_dl_lock(0);
135 	_rthread_dl_lock(1);
136 	_rthread_bind_lock(0);
137 	_rthread_bind_lock(1);
138 	dlctl(NULL, DL_SETTHREADLCK, _rthread_dl_lock);
139 	dlctl(NULL, DL_SETBINDLCK, _rthread_bind_lock);
140 #endif
141 
142 	return (0);
143 }
144 
145 static void
146 _rthread_free(pthread_t thread)
147 {
148 	/* catch wrongdoers for the moment */
149 	if (thread != &_initial_thread) {
150 		/* initial_thread.tid must remain valid */
151 		memset(thread, 0xd0, sizeof(*thread));
152 		free(thread);
153 	}
154 }
155 
156 static void
157 _rthread_setflag(pthread_t thread, int flag)
158 {
159 	_spinlock(&thread->flags_lock);
160 	thread->flags |= flag;
161 	_spinunlock(&thread->flags_lock);
162 }
163 
164 static void
165 _rthread_clearflag(pthread_t thread, int flag)
166 {
167 	_spinlock(&thread->flags_lock);
168 	thread->flags &= ~flag;
169 	_spinunlock(&thread->flags_lock);
170 }
171 
172 /*
173  * real pthread functions
174  */
175 pthread_t
176 pthread_self(void)
177 {
178 	pthread_t thread;
179 
180 	if (!_threads_ready)
181 		if (_rthread_init())
182 			return (NULL);
183 
184 	_spinlock(&_thread_lock);
185 	thread = _rthread_findself();
186 	_spinunlock(&_thread_lock);
187 
188 	return (thread);
189 }
190 
191 void
192 pthread_exit(void *retval)
193 {
194 	struct rthread_cleanup_fn *clfn;
195 	pid_t tid;
196 	struct stack *stack;
197 	pthread_t thread = pthread_self();
198 
199 	thread->retval = retval;
200 
201 	for (clfn = thread->cleanup_fns; clfn; ) {
202 		struct rthread_cleanup_fn *oclfn = clfn;
203 		clfn = clfn->next;
204 		oclfn->fn(oclfn->arg);
205 		free(oclfn);
206 	}
207 	_rthread_tls_destructors(thread);
208 	_spinlock(&_thread_lock);
209 	LIST_REMOVE(thread, threads);
210 	_spinunlock(&_thread_lock);
211 
212 	stack = thread->stack;
213 	tid = thread->tid;
214 	if (thread->flags & THREAD_DETACHED)
215 		_rthread_free(thread);
216 	else {
217 		_rthread_setflag(thread, THREAD_DONE);
218 		_sem_post(&thread->donesem);
219 	}
220 
221 	/* reap before adding self, we don't want to disappear too soon */
222 	_rthread_reaper();
223 	if (tid != _initial_thread.tid)
224 		_rthread_add_to_reaper(tid, stack);
225 
226 	threxit(0);
227 	for(;;);
228 }
229 
230 int
231 pthread_join(pthread_t thread, void **retval)
232 {
233 	int e;
234 
235 	if (thread == NULL)
236 		e = EINVAL;
237 	else if (thread->tid == getthrid())
238 		e = EDEADLK;
239 	else if (thread->flags & THREAD_DETACHED)
240 		e = EINVAL;
241 	else {
242 		_sem_wait(&thread->donesem, 0, 0);
243 		if (retval)
244 			*retval = thread->retval;
245 		e = 0;
246 		/* We should be the last having a ref to this thread, but
247 		 * someone stupid or evil might haved detached it;
248 		 * in that case the thread will cleanup itself */
249 		if ((thread->flags & THREAD_DETACHED) == 0)
250 			_rthread_free(thread);
251 	}
252 
253 	_rthread_reaper();
254 	return (e);
255 }
256 
257 int
258 pthread_detach(pthread_t thread)
259 {
260 	int rc = 0;
261 
262 	_spinlock(&thread->flags_lock);
263 	if (thread->flags & THREAD_DETACHED) {
264 		rc = EINVAL;
265 		_spinunlock(&thread->flags_lock);
266 	} else if (thread->flags & THREAD_DONE) {
267 		_spinunlock(&thread->flags_lock);
268 		_rthread_free(thread);
269 	} else {
270 		thread->flags |= THREAD_DETACHED;
271 		_spinunlock(&thread->flags_lock);
272 	}
273 	_rthread_reaper();
274 	return (rc);
275 }
276 
277 int
278 pthread_create(pthread_t *threadp, const pthread_attr_t *attr,
279     void *(*start_routine)(void *), void *arg)
280 {
281 	pthread_t thread;
282 	pid_t tid;
283 	int rc = 0;
284 
285 	if (!_threads_ready)
286 		if ((rc = _rthread_init()))
287 		    return (rc);
288 
289 	thread = calloc(1, sizeof(*thread));
290 	if (!thread)
291 		return (errno);
292 	thread->donesem.lock = _SPINLOCK_UNLOCKED;
293 	thread->flags_lock = _SPINLOCK_UNLOCKED;
294 	thread->fn = start_routine;
295 	thread->arg = arg;
296 	if (attr)
297 		thread->attr = *(*attr);
298 	else {
299 		thread->attr.stack_size = RTHREAD_STACK_SIZE_DEF;
300 		thread->attr.guard_size = sysconf(_SC_PAGESIZE);
301 		thread->attr.stack_size -= thread->attr.guard_size;
302 	}
303 	if (thread->attr.detach_state == PTHREAD_CREATE_DETACHED)
304 		thread->flags |= THREAD_DETACHED;
305 
306 	_spinlock(&_thread_lock);
307 
308 	thread->stack = _rthread_alloc_stack(thread);
309 	if (!thread->stack) {
310 		rc = errno;
311 		goto fail1;
312 	}
313 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
314 
315 	tid = rfork_thread(RFPROC | RFTHREAD | RFMEM | RFNOWAIT,
316 	    thread->stack->sp, _rthread_start, thread);
317 	if (tid == -1) {
318 		rc = errno;
319 		goto fail2;
320 	}
321 	/* new thread will appear _rthread_start */
322 	thread->tid = tid;
323 	thread->flags |= THREAD_CANCEL_ENABLE|THREAD_CANCEL_DEFERRED;
324 	*threadp = thread;
325 
326 	/*
327 	 * Since _rthread_start() aquires the thread lock and due to the way
328 	 * signal delivery is implemented, this is not a race.
329 	 */
330 	if (thread->attr.create_suspended)
331 		kill(thread->tid, SIGSTOP);
332 
333 	_spinunlock(&_thread_lock);
334 
335 	return (0);
336 
337 fail2:
338 	_rthread_free_stack(thread->stack);
339 	LIST_REMOVE(thread, threads);
340 fail1:
341 	_spinunlock(&_thread_lock);
342 	_rthread_free(thread);
343 
344 	return (rc);
345 }
346 
347 int
348 pthread_kill(pthread_t thread, int sig)
349 {
350 	return (kill(thread->tid, sig));
351 }
352 
353 int
354 pthread_equal(pthread_t t1, pthread_t t2)
355 {
356 	return (t1 == t2);
357 }
358 
359 int
360 pthread_cancel(pthread_t thread)
361 {
362 
363 	_rthread_setflag(thread, THREAD_CANCELLED);
364 	return (0);
365 }
366 
367 void
368 pthread_testcancel(void)
369 {
370 	if ((pthread_self()->flags & (THREAD_CANCELLED|THREAD_CANCEL_ENABLE)) ==
371 	    (THREAD_CANCELLED|THREAD_CANCEL_ENABLE))
372 		pthread_exit(PTHREAD_CANCELED);
373 
374 }
375 
376 int
377 pthread_setcancelstate(int state, int *oldstatep)
378 {
379 	pthread_t self = pthread_self();
380 	int oldstate;
381 
382 	oldstate = self->flags & THREAD_CANCEL_ENABLE ?
383 	    PTHREAD_CANCEL_ENABLE : PTHREAD_CANCEL_DISABLE;
384 	if (state == PTHREAD_CANCEL_ENABLE) {
385 		_rthread_setflag(self, THREAD_CANCEL_ENABLE);
386 		pthread_testcancel();
387 	} else if (state == PTHREAD_CANCEL_DISABLE) {
388 		_rthread_clearflag(self, THREAD_CANCEL_ENABLE);
389 	} else {
390 		return (EINVAL);
391 	}
392 	if (oldstatep)
393 		*oldstatep = oldstate;
394 
395 	return (0);
396 }
397 
398 int
399 pthread_setcanceltype(int type, int *oldtypep)
400 {
401 	pthread_t self = pthread_self();
402 	int oldtype;
403 
404 	oldtype = self->flags & THREAD_CANCEL_DEFERRED ?
405 	    PTHREAD_CANCEL_DEFERRED : PTHREAD_CANCEL_ASYNCHRONOUS;
406 	if (type == PTHREAD_CANCEL_DEFERRED) {
407 		_rthread_setflag(self, THREAD_CANCEL_DEFERRED);
408 		pthread_testcancel();
409 	} else if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
410 		_rthread_clearflag(self, THREAD_CANCEL_DEFERRED);
411 	} else {
412 		return (EINVAL);
413 	}
414 	if (oldtypep)
415 		*oldtypep = oldtype;
416 
417 	return (0);
418 }
419 
420 void
421 pthread_cleanup_push(void (*fn)(void *), void *arg)
422 {
423 	struct rthread_cleanup_fn *clfn;
424 	pthread_t self = pthread_self();
425 
426 	clfn = calloc(1, sizeof(*clfn));
427 	if (!clfn)
428 		return;
429 	clfn->fn = fn;
430 	clfn->arg = arg;
431 	clfn->next = self->cleanup_fns;
432 	self->cleanup_fns = clfn;
433 }
434 
435 void
436 pthread_cleanup_pop(int execute)
437 {
438 	struct rthread_cleanup_fn *clfn;
439 	pthread_t self = pthread_self();
440 
441 	clfn = self->cleanup_fns;
442 	if (clfn) {
443 		self->cleanup_fns = clfn->next;
444 		if (execute)
445 			clfn->fn(clfn->arg);
446 		free(clfn);
447 	}
448 }
449 
450 int
451 pthread_getconcurrency(void)
452 {
453 	return (concurrency_level);
454 }
455 
456 int
457 pthread_setconcurrency(int new_level)
458 {
459 	if (new_level < 0)
460 		return (EINVAL);
461 	concurrency_level = new_level;
462 	return (0);
463 }
464 
465 /*
466  * compat debug stuff
467  */
468 void
469 _thread_dump_info(void)
470 {
471 	pthread_t thread;
472 
473 	_spinlock(&_thread_lock);
474 	LIST_FOREACH(thread, &_thread_list, threads)
475 		printf("thread %d flags %d name %s\n",
476 		    thread->tid, thread->flags, thread->name);
477 	_spinunlock(&_thread_lock);
478 }
479 
480 #if defined(__ELF__) && defined(PIC)
481 void
482 _rthread_dl_lock(int what)
483 {
484 	static _spinlock_lock_t lock = _SPINLOCK_UNLOCKED;
485 
486 	if (what == 0)
487 		_spinlock(&lock);
488 	else
489 		_spinunlock(&lock);
490 }
491 
492 void
493 _rthread_bind_lock(int what)
494 {
495 	static _spinlock_lock_t lock = _SPINLOCK_UNLOCKED;
496 
497 	if (what == 0)
498 		_spinlock(&lock);
499 	else
500 		_spinunlock(&lock);
501 }
502 #endif
503