xref: /openbsd-src/lib/librthread/rthread.c (revision 9b9d2a55a62c8e82206c25f94fcc7f4e2765250e)
1 /*	$OpenBSD: rthread.c,v 1.83 2015/05/19 20:50:06 guenther Exp $ */
2 /*
3  * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
4  * All Rights Reserved.
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*
19  * The heart of rthreads.  Basic functions like creating and joining
20  * threads.
21  */
22 
23 #include <sys/types.h>
24 #include <sys/uio.h>
25 #include <sys/wait.h>
26 #include <sys/socket.h>
27 #include <sys/mman.h>
28 #include <sys/msg.h>
29 #ifndef NO_PIC
30 #include <sys/exec_elf.h>
31 #pragma weak _DYNAMIC
32 #endif
33 
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <signal.h>
37 #include <stdio.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <dlfcn.h>
41 #include <fcntl.h>
42 #include <poll.h>
43 
44 #include <pthread.h>
45 
46 #include "thread_private.h"	/* in libc/include */
47 #include "rthread.h"
48 #include "tcb.h"
49 
50 static int concurrency_level;	/* not used */
51 
52 struct _spinlock _SPINLOCK_UNLOCKED_ASSIGN = _SPINLOCK_UNLOCKED;
53 
54 int _threads_ready;
55 size_t _thread_pagesize;
56 struct listhead _thread_list = LIST_HEAD_INITIALIZER(_thread_list);
57 struct _spinlock _thread_lock = _SPINLOCK_UNLOCKED;
58 static struct pthread_queue _thread_gc_list
59     = TAILQ_HEAD_INITIALIZER(_thread_gc_list);
60 static struct _spinlock _thread_gc_lock = _SPINLOCK_UNLOCKED;
61 static struct pthread _initial_thread;
62 static struct thread_control_block _initial_thread_tcb;
63 
64 struct pthread_attr _rthread_attr_default = {
65 	.stack_addr			= NULL,
66 	.stack_size			= RTHREAD_STACK_SIZE_DEF,
67 /*	.guard_size		set in _rthread_init */
68 	.detach_state			= PTHREAD_CREATE_JOINABLE,
69 	.contention_scope		= PTHREAD_SCOPE_SYSTEM,
70 	.sched_policy			= SCHED_OTHER,
71 	.sched_param = { .sched_priority = 0 },
72 	.sched_inherit			= PTHREAD_INHERIT_SCHED,
73 };
74 
75 /*
76  * internal support functions
77  */
78 void
79 _spinlock(volatile struct _spinlock *lock)
80 {
81 	while (_atomic_lock(&lock->ticket))
82 		sched_yield();
83 }
84 
85 int
86 _spinlocktry(volatile struct _spinlock *lock)
87 {
88 	return 0 == _atomic_lock(&lock->ticket);
89 }
90 
91 void
92 _spinunlock(volatile struct _spinlock *lock)
93 {
94 	lock->ticket = _ATOMIC_LOCK_UNLOCKED;
95 }
96 
97 /*
98  * This sets up the thread base for the initial thread so that it
99  * references the errno location provided by libc.  For other threads
100  * this is handled by __tfork_thread()
101  */
102 void _rthread_initlib(void) __attribute__((constructor));
103 void
104 _rthread_initlib(void)
105 {
106 	static int tcb_set;
107 	struct thread_control_block *tcb;
108 
109 	if (__predict_false(tcb_set == 0) && __get_tcb() == NULL) {
110 		tcb_set = 1;
111 
112 		/* use libc's errno for the main thread */
113 		tcb = &_initial_thread_tcb;
114 		TCB_INIT(tcb, &_initial_thread, ___errno());
115 		TCB_SET(tcb);
116 	}
117 }
118 
119 /*
120  * This is invoked by ___start() in crt0.  Eventually, when ld.so handles
121  * TCB setup for dynamic executables, this will only be called to handle
122  * the TCB setup for static executables and may migrate to libc.  The
123  * envp argument is so that it can (someday) use that to find the Auxinfo
124  * array and thus the ELF phdr and the PT_TLS info.
125  */
126 void __init_tcb(char **_envp);
127 void
128 __init_tcb(__unused char **envp)
129 {
130 	_rthread_initlib();
131 }
132 
133 int *
134 __errno(void)
135 {
136 	return (TCB_ERRNOPTR());
137 }
138 
139 static void
140 _rthread_start(void *v)
141 {
142 	pthread_t thread = v;
143 	void *retval;
144 
145 	retval = thread->fn(thread->arg);
146 	pthread_exit(retval);
147 }
148 
149 /* ARGSUSED0 */
150 static void
151 sigthr_handler(__unused int sig)
152 {
153 	pthread_t self = pthread_self();
154 
155 	/*
156 	 * Do nothing unless
157 	 * 1) pthread_cancel() has been called on this thread,
158 	 * 2) cancelation is enabled for it, and
159 	 * 3) we're not already in cancelation processing
160 	 */
161 	if ((self->flags & (THREAD_CANCELED|THREAD_CANCEL_ENABLE|THREAD_DYING))
162 	    != (THREAD_CANCELED|THREAD_CANCEL_ENABLE))
163 		return;
164 
165 	/*
166 	 * If delaying cancels inside complex ops (pthread_cond_wait,
167 	 * pthread_join, etc), just mark that this has happened to
168 	 * prevent a race with going to sleep
169 	 */
170 	if (self->flags & THREAD_CANCEL_DELAY) {
171 		self->delayed_cancel = 1;
172 		return;
173 	}
174 
175 	/*
176 	 * otherwise, if in a cancel point or async cancels are
177 	 * enabled, then exit
178 	 */
179 	if (self->cancel_point || (self->flags & THREAD_CANCEL_DEFERRED) == 0)
180 		pthread_exit(PTHREAD_CANCELED);
181 }
182 
183 int
184 _rthread_init(void)
185 {
186 	pthread_t thread = &_initial_thread;
187 	struct sigaction sa;
188 
189 	thread->tid = getthrid();
190 	thread->donesem.lock = _SPINLOCK_UNLOCKED_ASSIGN;
191 	thread->flags |= THREAD_CANCEL_ENABLE | THREAD_CANCEL_DEFERRED |
192 	    THREAD_ORIGINAL | THREAD_INITIAL_STACK;
193 	thread->flags_lock = _SPINLOCK_UNLOCKED_ASSIGN;
194 	strlcpy(thread->name, "Main process", sizeof(thread->name));
195 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
196 	_rthread_debug_init();
197 
198 	_thread_pagesize = (size_t)sysconf(_SC_PAGESIZE);
199 	_rthread_attr_default.guard_size = _thread_pagesize;
200 	thread->attr = _rthread_attr_default;
201 
202 	_rthread_initlib();
203 
204 	_threads_ready = 1;
205 
206 	_rthread_debug(1, "rthread init\n");
207 
208 #ifndef NO_PIC
209 	if (_DYNAMIC) {
210 		/*
211 		 * To avoid recursion problems in ld.so, we need to trigger the
212 		 * functions once to fully bind them before registering them
213 		 * for use.
214 		 */
215 		_rthread_dl_lock(0);
216 		_rthread_dl_lock(1);
217 		_rthread_bind_lock(0);
218 		_rthread_bind_lock(1);
219 		sched_yield();
220 		dlctl(NULL, DL_SETTHREADLCK, _rthread_dl_lock);
221 		dlctl(NULL, DL_SETBINDLCK, _rthread_bind_lock);
222 	}
223 #endif
224 
225 	/*
226 	 * Set the handler on the signal used for cancelation and
227 	 * suspension, and make sure it's unblocked
228 	 */
229 	memset(&sa, 0, sizeof(sa));
230 	sigemptyset(&sa.sa_mask);
231 	sa.sa_handler = sigthr_handler;
232 	_thread_sys_sigaction(SIGTHR, &sa, NULL);
233 	sigaddset(&sa.sa_mask, SIGTHR);
234 	sigprocmask(SIG_UNBLOCK, &sa.sa_mask, NULL);
235 
236 	return (0);
237 }
238 
239 static void
240 _rthread_free(pthread_t thread)
241 {
242 	/* _initial_thread is static, so don't free it */
243 	if (thread != &_initial_thread) {
244 		/*
245 		 * thread->tid is written to by __threxit in the thread
246 		 * itself, so it's not safe to touch it here
247 		 */
248 		_spinlock(&_thread_gc_lock);
249 		TAILQ_INSERT_TAIL(&_thread_gc_list, thread, waiting);
250 		_spinunlock(&_thread_gc_lock);
251 	}
252 }
253 
254 void
255 _rthread_setflag(pthread_t thread, int flag)
256 {
257 	_spinlock(&thread->flags_lock);
258 	thread->flags |= flag;
259 	_spinunlock(&thread->flags_lock);
260 }
261 
262 void
263 _rthread_clearflag(pthread_t thread, int flag)
264 {
265 	_spinlock(&thread->flags_lock);
266 	thread->flags &= ~flag;
267 	_spinunlock(&thread->flags_lock);
268 }
269 
270 /*
271  * real pthread functions
272  */
273 pthread_t
274 pthread_self(void)
275 {
276 	if (!_threads_ready)
277 		if (_rthread_init())
278 			return (NULL);
279 
280 	return (TCB_THREAD());
281 }
282 
283 static void
284 _rthread_reaper(void)
285 {
286 	pthread_t thread;
287 
288 restart:
289 	_spinlock(&_thread_gc_lock);
290 	TAILQ_FOREACH(thread, &_thread_gc_list, waiting) {
291 		if (thread->tid != 0)
292 			continue;
293 		TAILQ_REMOVE(&_thread_gc_list, thread, waiting);
294 		_spinunlock(&_thread_gc_lock);
295 		_rthread_debug(3, "rthread reaping %p stack %p\n",
296 		    (void *)thread, (void *)thread->stack);
297 		_rthread_free_stack(thread->stack);
298 		_rtld_free_tls(thread->arg,
299 		    sizeof(struct thread_control_block), sizeof(void *));
300 		free(thread);
301 		goto restart;
302 	}
303 	_spinunlock(&_thread_gc_lock);
304 }
305 
306 void
307 pthread_exit(void *retval)
308 {
309 	struct rthread_cleanup_fn *clfn;
310 	pthread_t thread = pthread_self();
311 
312 	if (thread->flags & THREAD_DYING) {
313 		/*
314 		 * Called pthread_exit() from destructor or cancelation
315 		 * handler: blow up.  XXX write something to stderr?
316 		 */
317 		_exit(42);
318 	}
319 
320 	_rthread_setflag(thread, THREAD_DYING);
321 
322 	thread->retval = retval;
323 
324 	for (clfn = thread->cleanup_fns; clfn; ) {
325 		struct rthread_cleanup_fn *oclfn = clfn;
326 		clfn = clfn->next;
327 		oclfn->fn(oclfn->arg);
328 		free(oclfn);
329 	}
330 	_rthread_tls_destructors(thread);
331 	_spinlock(&_thread_lock);
332 	LIST_REMOVE(thread, threads);
333 	_spinunlock(&_thread_lock);
334 
335 #ifdef TCB_GET
336 	thread->arg = TCB_GET();
337 #else
338 	thread->arg = __get_tcb();
339 #endif
340 	_spinlock(&thread->flags_lock);
341 	if (thread->flags & THREAD_DETACHED) {
342 		_spinunlock(&thread->flags_lock);
343 		_rthread_free(thread);
344 	} else {
345 		thread->flags |= THREAD_DONE;
346 		_spinunlock(&thread->flags_lock);
347 		_sem_post(&thread->donesem);
348 	}
349 
350 	__threxit(&thread->tid);
351 	for(;;);
352 }
353 
354 int
355 pthread_join(pthread_t thread, void **retval)
356 {
357 	int e;
358 	pthread_t self = pthread_self();
359 
360 	e = 0;
361 	_enter_delayed_cancel(self);
362 	if (thread == NULL)
363 		e = EINVAL;
364 	else if (thread == self)
365 		e = EDEADLK;
366 	else if (thread->flags & THREAD_DETACHED)
367 		e = EINVAL;
368 	else if ((e = _sem_wait(&thread->donesem, 0, NULL,
369 	    &self->delayed_cancel)) == 0) {
370 		if (retval)
371 			*retval = thread->retval;
372 
373 		/*
374 		 * We should be the last having a ref to this thread,
375 		 * but someone stupid or evil might haved detached it;
376 		 * in that case the thread will clean up itself
377 		 */
378 		if ((thread->flags & THREAD_DETACHED) == 0)
379 			_rthread_free(thread);
380 	}
381 
382 	_leave_delayed_cancel(self, e);
383 	_rthread_reaper();
384 	return (e);
385 }
386 
387 int
388 pthread_detach(pthread_t thread)
389 {
390 	int rc = 0;
391 
392 	_spinlock(&thread->flags_lock);
393 	if (thread->flags & THREAD_DETACHED) {
394 		rc = EINVAL;
395 		_spinunlock(&thread->flags_lock);
396 	} else if (thread->flags & THREAD_DONE) {
397 		_spinunlock(&thread->flags_lock);
398 		_rthread_free(thread);
399 	} else {
400 		thread->flags |= THREAD_DETACHED;
401 		_spinunlock(&thread->flags_lock);
402 	}
403 	_rthread_reaper();
404 	return (rc);
405 }
406 
407 int
408 pthread_create(pthread_t *threadp, const pthread_attr_t *attr,
409     void *(*start_routine)(void *), void *arg)
410 {
411 	extern int __isthreaded;
412 	struct thread_control_block *tcb;
413 	pthread_t thread;
414 	struct __tfork param;
415 	int rc = 0;
416 
417 	if (!_threads_ready)
418 		if ((rc = _rthread_init()))
419 		    return (rc);
420 
421 	_rthread_reaper();
422 
423 	thread = calloc(1, sizeof(*thread));
424 	if (!thread)
425 		return (errno);
426 	thread->donesem.lock = _SPINLOCK_UNLOCKED_ASSIGN;
427 	thread->flags_lock = _SPINLOCK_UNLOCKED_ASSIGN;
428 	thread->fn = start_routine;
429 	thread->arg = arg;
430 	thread->tid = -1;
431 
432 	thread->attr = attr != NULL ? *(*attr) : _rthread_attr_default;
433 	if (thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
434 		pthread_t self = pthread_self();
435 
436 		thread->attr.sched_policy = self->attr.sched_policy;
437 		thread->attr.sched_param = self->attr.sched_param;
438 	}
439 	if (thread->attr.detach_state == PTHREAD_CREATE_DETACHED)
440 		thread->flags |= THREAD_DETACHED;
441 	thread->flags |= THREAD_CANCEL_ENABLE|THREAD_CANCEL_DEFERRED;
442 
443 	thread->stack = _rthread_alloc_stack(thread);
444 	if (!thread->stack) {
445 		rc = errno;
446 		goto fail1;
447 	}
448 
449 	tcb = _rtld_allocate_tls(NULL, sizeof(*tcb), sizeof(void *));
450 	if (tcb == NULL) {
451 		rc = errno;
452 		goto fail2;
453 	}
454 	TCB_INIT(tcb, thread, &thread->myerrno);
455 
456 	param.tf_tcb = tcb;
457 	param.tf_tid = &thread->tid;
458 	param.tf_stack = thread->stack->sp;
459 
460 	_spinlock(&_thread_lock);
461 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
462 	_spinunlock(&_thread_lock);
463 
464 	/* we're going to be multi-threaded real soon now */
465 	__isthreaded = 1;
466 	rc = __tfork_thread(&param, sizeof(param), _rthread_start, thread);
467 	if (rc != -1) {
468 		/* success */
469 		*threadp = thread;
470 		return (0);
471 	}
472 
473 	rc = errno;
474 
475 	_spinlock(&_thread_lock);
476 	LIST_REMOVE(thread, threads);
477 	_spinunlock(&_thread_lock);
478 	_rtld_free_tls(tcb, sizeof(*tcb), sizeof(void *));
479 fail2:
480 	_rthread_free_stack(thread->stack);
481 fail1:
482 	_rthread_free(thread);
483 
484 	return (rc);
485 }
486 
487 int
488 pthread_kill(pthread_t thread, int sig)
489 {
490 	pid_t tid;
491 	int ret;
492 
493 	/* killing myself?  do it without locking */
494 	if (thread == TCB_THREAD())
495 		return (kill(thread->tid, sig) == 0 ? 0 : errno);
496 
497 	/* block the other thread from exiting */
498 	_spinlock(&thread->flags_lock);
499 	if (thread->flags & THREAD_DYING)
500 		ret = (thread->flags & THREAD_DETACHED) ? ESRCH : 0;
501 	else {
502 		tid = thread->tid;
503 		if (tid == 0) {
504 			/* should be impossible without DYING being set */
505 			ret = ESRCH;
506 		} else
507 			ret = kill(tid, sig) == 0 ? 0 : errno;
508 	}
509 	_spinunlock(&thread->flags_lock);
510 	return (ret);
511 }
512 
513 int
514 pthread_equal(pthread_t t1, pthread_t t2)
515 {
516 	return (t1 == t2);
517 }
518 
519 int
520 pthread_cancel(pthread_t thread)
521 {
522 	pid_t tid;
523 
524 	_spinlock(&thread->flags_lock);
525 	tid = thread->tid;
526 	if ((thread->flags & (THREAD_DYING | THREAD_CANCELED)) == 0 &&
527 	    tid != 0) {
528 		thread->flags |= THREAD_CANCELED;
529 
530 		if (thread->flags & THREAD_CANCEL_ENABLE) {
531 
532 			/* canceling myself?  release the lock first */
533 			if (thread == TCB_THREAD()) {
534 				_spinunlock(&thread->flags_lock);
535 				kill(tid, SIGTHR);
536 				return (0);
537 			}
538 
539 			kill(tid, SIGTHR);
540 		}
541 	}
542 	_spinunlock(&thread->flags_lock);
543 	return (0);
544 }
545 
546 void
547 pthread_testcancel(void)
548 {
549 	if ((pthread_self()->flags & (THREAD_CANCELED|THREAD_CANCEL_ENABLE)) ==
550 	    (THREAD_CANCELED|THREAD_CANCEL_ENABLE))
551 		pthread_exit(PTHREAD_CANCELED);
552 
553 }
554 
555 int
556 pthread_setcancelstate(int state, int *oldstatep)
557 {
558 	pthread_t self = pthread_self();
559 	int oldstate;
560 
561 	oldstate = self->flags & THREAD_CANCEL_ENABLE ?
562 	    PTHREAD_CANCEL_ENABLE : PTHREAD_CANCEL_DISABLE;
563 	if (state == PTHREAD_CANCEL_ENABLE) {
564 		_rthread_setflag(self, THREAD_CANCEL_ENABLE);
565 	} else if (state == PTHREAD_CANCEL_DISABLE) {
566 		_rthread_clearflag(self, THREAD_CANCEL_ENABLE);
567 	} else {
568 		return (EINVAL);
569 	}
570 	if (oldstatep)
571 		*oldstatep = oldstate;
572 
573 	return (0);
574 }
575 
576 int
577 pthread_setcanceltype(int type, int *oldtypep)
578 {
579 	pthread_t self = pthread_self();
580 	int oldtype;
581 
582 	oldtype = self->flags & THREAD_CANCEL_DEFERRED ?
583 	    PTHREAD_CANCEL_DEFERRED : PTHREAD_CANCEL_ASYNCHRONOUS;
584 	if (type == PTHREAD_CANCEL_DEFERRED) {
585 		_rthread_setflag(self, THREAD_CANCEL_DEFERRED);
586 	} else if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
587 		_rthread_clearflag(self, THREAD_CANCEL_DEFERRED);
588 	} else {
589 		return (EINVAL);
590 	}
591 	if (oldtypep)
592 		*oldtypep = oldtype;
593 
594 	return (0);
595 }
596 
597 void
598 pthread_cleanup_push(void (*fn)(void *), void *arg)
599 {
600 	struct rthread_cleanup_fn *clfn;
601 	pthread_t self = pthread_self();
602 
603 	clfn = calloc(1, sizeof(*clfn));
604 	if (!clfn)
605 		return;
606 	clfn->fn = fn;
607 	clfn->arg = arg;
608 	clfn->next = self->cleanup_fns;
609 	self->cleanup_fns = clfn;
610 }
611 
612 void
613 pthread_cleanup_pop(int execute)
614 {
615 	struct rthread_cleanup_fn *clfn;
616 	pthread_t self = pthread_self();
617 
618 	clfn = self->cleanup_fns;
619 	if (clfn) {
620 		self->cleanup_fns = clfn->next;
621 		if (execute)
622 			clfn->fn(clfn->arg);
623 		free(clfn);
624 	}
625 }
626 
627 int
628 pthread_getconcurrency(void)
629 {
630 	return (concurrency_level);
631 }
632 
633 int
634 pthread_setconcurrency(int new_level)
635 {
636 	if (new_level < 0)
637 		return (EINVAL);
638 	concurrency_level = new_level;
639 	return (0);
640 }
641 
642 /*
643  * compat debug stuff
644  */
645 void
646 _thread_dump_info(void)
647 {
648 	pthread_t thread;
649 
650 	_spinlock(&_thread_lock);
651 	LIST_FOREACH(thread, &_thread_list, threads)
652 		printf("thread %d flags %d name %s\n",
653 		    thread->tid, thread->flags, thread->name);
654 	_spinunlock(&_thread_lock);
655 }
656 
657 #ifndef NO_PIC
658 /*
659  * _rthread_dl_lock() provides the locking for dlopen(), dlclose(), and
660  * the function called via atexit() to invoke all destructors.  The latter
661  * two call shared-object destructors, which may need to call dlclose(),
662  * so this lock needs to permit recursive locking.
663  * The specific code here was extracted from _rthread_mutex_lock() and
664  * pthread_mutex_unlock() and simplified to use the static variables.
665  */
666 void
667 _rthread_dl_lock(int what)
668 {
669 	static struct _spinlock lock = _SPINLOCK_UNLOCKED;
670 	static pthread_t owner = NULL;
671 	static struct pthread_queue lockers = TAILQ_HEAD_INITIALIZER(lockers);
672 	static int count = 0;
673 
674 	if (what == 0) {
675 		pthread_t self = pthread_self();
676 
677 		/* lock, possibly recursive */
678 		_spinlock(&lock);
679 		if (owner == NULL) {
680 			owner = self;
681 		} else if (owner != self) {
682 			TAILQ_INSERT_TAIL(&lockers, self, waiting);
683 			while (owner != self) {
684 				__thrsleep(self, 0 | _USING_TICKETS, NULL,
685 				    &lock.ticket, NULL);
686 				_spinlock(&lock);
687 			}
688 		}
689 		count++;
690 		_spinunlock(&lock);
691 	} else if (what == 1) {
692 		/* unlock, possibly recursive */
693 		if (--count == 0) {
694 			pthread_t next;
695 
696 			_spinlock(&lock);
697 			owner = next = TAILQ_FIRST(&lockers);
698 			if (next != NULL)
699 				TAILQ_REMOVE(&lockers, next, waiting);
700 			_spinunlock(&lock);
701 			if (next != NULL)
702 				__thrwakeup(next, 1);
703 		}
704 	} else {
705 		/* reinit: used in child after fork to clear the queue */
706 		lock = _SPINLOCK_UNLOCKED_ASSIGN;
707 		if (--count == 0)
708 			owner = NULL;
709 		TAILQ_INIT(&lockers);
710 	}
711 }
712 
713 void
714 _rthread_bind_lock(int what)
715 {
716 	static struct _spinlock lock = _SPINLOCK_UNLOCKED;
717 
718 	if (what == 0)
719 		_spinlock(&lock);
720 	else
721 		_spinunlock(&lock);
722 }
723 #endif
724 
725 
726 /*
727  * XXX: Bogus type signature, but we only need to be able to emit a
728  * reference to it below.
729  */
730 extern void __cerror(void);
731 
732 /*
733  * All weak references used within libc that are redefined in libpthread
734  * MUST be in this table.   This is necessary to force the proper version to
735  * be used when linking -static.
736  */
737 static void *__libc_overrides[] __used = {
738 	&__cerror,
739 	&__errno,
740 	&_thread_arc4_lock,
741 	&_thread_arc4_unlock,
742 	&_thread_atexit_lock,
743 	&_thread_atexit_unlock,
744 	&_thread_atfork_lock,
745 	&_thread_atfork_unlock,
746 	&_thread_malloc_lock,
747 	&_thread_malloc_unlock,
748 	&_thread_mutex_destroy,
749 	&_thread_mutex_lock,
750 	&_thread_mutex_unlock,
751 	&_thread_tag_lock,
752 	&_thread_tag_storage,
753 	&_thread_tag_unlock,
754 	&accept,
755 	&close,
756 	&closefrom,
757 	&connect,
758 	&fcntl,
759 	&flockfile,
760 	&fork,
761 	&fsync,
762 	&ftrylockfile,
763 	&funlockfile,
764 	&msgrcv,
765 	&msgsnd,
766 	&msync,
767 	&nanosleep,
768 	&open,
769 	&openat,
770 	&poll,
771 	&pread,
772 	&preadv,
773 	&pwrite,
774 	&pwritev,
775 	&read,
776 	&readv,
777 	&recvfrom,
778 	&recvmsg,
779 	&select,
780 	&sendmsg,
781 	&sendto,
782 	&sigaction,
783 	&sigprocmask,
784 	&sigsuspend,
785 	&vfork,
786 	&wait4,
787 	&write,
788 	&writev,
789 };
790