xref: /openbsd-src/lib/librthread/rthread.c (revision 5b859c19fe53bbea08f5c342e0a4470e99f883e1)
1 /*	$OpenBSD: rthread.c,v 1.79 2014/11/16 05:26:20 guenther Exp $ */
2 /*
3  * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
4  * All Rights Reserved.
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*
19  * The heart of rthreads.  Basic functions like creating and joining
20  * threads.
21  */
22 
23 #include <sys/types.h>
24 #include <sys/uio.h>
25 #include <sys/wait.h>
26 #include <sys/socket.h>
27 #include <sys/mman.h>
28 #include <sys/msg.h>
29 #if defined(__ELF__)
30 #include <sys/exec_elf.h>
31 #pragma weak _DYNAMIC
32 #endif
33 
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <signal.h>
37 #include <stdio.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <dlfcn.h>
41 #include <fcntl.h>
42 #include <poll.h>
43 
44 #include <pthread.h>
45 
46 #include "thread_private.h"	/* in libc/include */
47 #include "rthread.h"
48 #include "tcb.h"
49 
50 static int concurrency_level;	/* not used */
51 
52 struct _spinlock _SPINLOCK_UNLOCKED_ASSIGN = _SPINLOCK_UNLOCKED;
53 
54 int _threads_ready;
55 size_t _thread_pagesize;
56 struct listhead _thread_list = LIST_HEAD_INITIALIZER(_thread_list);
57 struct _spinlock _thread_lock = _SPINLOCK_UNLOCKED;
58 static struct pthread_queue _thread_gc_list
59     = TAILQ_HEAD_INITIALIZER(_thread_gc_list);
60 static struct _spinlock _thread_gc_lock = _SPINLOCK_UNLOCKED;
61 static struct pthread _initial_thread;
62 static struct thread_control_block _initial_thread_tcb;
63 
64 struct pthread_attr _rthread_attr_default = {
65 	.stack_addr			= NULL,
66 	.stack_size			= RTHREAD_STACK_SIZE_DEF,
67 /*	.guard_size		set in _rthread_init */
68 	.detach_state			= PTHREAD_CREATE_JOINABLE,
69 	.contention_scope		= PTHREAD_SCOPE_SYSTEM,
70 	.sched_policy			= SCHED_OTHER,
71 	.sched_param = { .sched_priority = 0 },
72 	.sched_inherit			= PTHREAD_INHERIT_SCHED,
73 };
74 
75 /*
76  * internal support functions
77  */
78 void
79 _spinlock(volatile struct _spinlock *lock)
80 {
81 	while (_atomic_lock(&lock->ticket))
82 		sched_yield();
83 }
84 
85 int
86 _spinlocktry(volatile struct _spinlock *lock)
87 {
88 	return 0 == _atomic_lock(&lock->ticket);
89 }
90 
91 void
92 _spinunlock(volatile struct _spinlock *lock)
93 {
94 	lock->ticket = _ATOMIC_LOCK_UNLOCKED;
95 }
96 
97 /*
98  * This sets up the thread base for the initial thread so that it
99  * references the errno location provided by libc.  For other threads
100  * this is handled by __tfork_thread()
101  */
102 void _rthread_initlib(void) __attribute__((constructor));
103 void
104 _rthread_initlib(void)
105 {
106 	static int tcb_set;
107 	struct thread_control_block *tcb;
108 
109 	if (__predict_false(tcb_set == 0) && __get_tcb() == NULL) {
110 		tcb_set = 1;
111 
112 		/* use libc's errno for the main thread */
113 		tcb = &_initial_thread_tcb;
114 		TCB_INIT(tcb, &_initial_thread, ___errno());
115 		TCB_SET(tcb);
116 	}
117 }
118 
119 /*
120  * This is invoked by ___start() in crt0.  Eventually, when ld.so handles
121  * TCB setup for dynamic executables, this will only be called to handle
122  * the TCB setup for static executables and may migrate to libc.  The
123  * envp argument is so that it can (someday) use that to find the Auxinfo
124  * array and thus the ELF phdr and the PT_TLS info.
125  */
126 void __init_tcb(char **_envp);
127 void
128 __init_tcb(__unused char **envp)
129 {
130 	_rthread_initlib();
131 }
132 
133 int *
134 __errno(void)
135 {
136 	return (TCB_ERRNOPTR());
137 }
138 
139 static void
140 _rthread_start(void *v)
141 {
142 	pthread_t thread = v;
143 	void *retval;
144 
145 	retval = thread->fn(thread->arg);
146 	pthread_exit(retval);
147 }
148 
149 /* ARGSUSED0 */
150 static void
151 sigthr_handler(__unused int sig)
152 {
153 	pthread_t self = pthread_self();
154 
155 	/*
156 	 * Do nothing unless
157 	 * 1) pthread_cancel() has been called on this thread,
158 	 * 2) cancelation is enabled for it, and
159 	 * 3) we're not already in cancelation processing
160 	 */
161 	if ((self->flags & (THREAD_CANCELED|THREAD_CANCEL_ENABLE|THREAD_DYING))
162 	    != (THREAD_CANCELED|THREAD_CANCEL_ENABLE))
163 		return;
164 
165 	/*
166 	 * If delaying cancels inside complex ops (pthread_cond_wait,
167 	 * pthread_join, etc), just mark that this has happened to
168 	 * prevent a race with going to sleep
169 	 */
170 	if (self->flags & THREAD_CANCEL_DELAY) {
171 		self->delayed_cancel = 1;
172 		return;
173 	}
174 
175 	/*
176 	 * otherwise, if in a cancel point or async cancels are
177 	 * enabled, then exit
178 	 */
179 	if (self->cancel_point || (self->flags & THREAD_CANCEL_DEFERRED) == 0)
180 		pthread_exit(PTHREAD_CANCELED);
181 }
182 
183 int
184 _rthread_init(void)
185 {
186 	pthread_t thread = &_initial_thread;
187 	struct sigaction sa;
188 
189 	thread->tid = getthrid();
190 	thread->donesem.lock = _SPINLOCK_UNLOCKED_ASSIGN;
191 	thread->flags |= THREAD_CANCEL_ENABLE | THREAD_CANCEL_DEFERRED |
192 	    THREAD_ORIGINAL | THREAD_INITIAL_STACK;
193 	thread->flags_lock = _SPINLOCK_UNLOCKED_ASSIGN;
194 	strlcpy(thread->name, "Main process", sizeof(thread->name));
195 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
196 	_rthread_debug_init();
197 
198 	_thread_pagesize = (size_t)sysconf(_SC_PAGESIZE);
199 	_rthread_attr_default.guard_size = _thread_pagesize;
200 
201 	_rthread_initlib();
202 
203 	_threads_ready = 1;
204 
205 	_rthread_debug(1, "rthread init\n");
206 
207 #if defined(__ELF__) && !defined(__vax__)
208 	if (_DYNAMIC) {
209 		/*
210 		 * To avoid recursion problems in ld.so, we need to trigger the
211 		 * functions once to fully bind them before registering them
212 		 * for use.
213 		 */
214 		_rthread_dl_lock(0);
215 		_rthread_dl_lock(1);
216 		_rthread_bind_lock(0);
217 		_rthread_bind_lock(1);
218 		sched_yield();
219 		dlctl(NULL, DL_SETTHREADLCK, _rthread_dl_lock);
220 		dlctl(NULL, DL_SETBINDLCK, _rthread_bind_lock);
221 	}
222 #endif
223 
224 	/*
225 	 * Set the handler on the signal used for cancelation and
226 	 * suspension, and make sure it's unblocked
227 	 */
228 	memset(&sa, 0, sizeof(sa));
229 	sigemptyset(&sa.sa_mask);
230 	sa.sa_handler = sigthr_handler;
231 	_thread_sys_sigaction(SIGTHR, &sa, NULL);
232 	sigaddset(&sa.sa_mask, SIGTHR);
233 	sigprocmask(SIG_UNBLOCK, &sa.sa_mask, NULL);
234 
235 	return (0);
236 }
237 
238 static void
239 _rthread_free(pthread_t thread)
240 {
241 	/* _initial_thread is static, so don't free it */
242 	if (thread != &_initial_thread) {
243 		/*
244 		 * thread->tid is written to by __threxit in the thread
245 		 * itself, so it's not safe to touch it here
246 		 */
247 		_spinlock(&_thread_gc_lock);
248 		TAILQ_INSERT_TAIL(&_thread_gc_list, thread, waiting);
249 		_spinunlock(&_thread_gc_lock);
250 	}
251 }
252 
253 void
254 _rthread_setflag(pthread_t thread, int flag)
255 {
256 	_spinlock(&thread->flags_lock);
257 	thread->flags |= flag;
258 	_spinunlock(&thread->flags_lock);
259 }
260 
261 void
262 _rthread_clearflag(pthread_t thread, int flag)
263 {
264 	_spinlock(&thread->flags_lock);
265 	thread->flags &= ~flag;
266 	_spinunlock(&thread->flags_lock);
267 }
268 
269 /*
270  * real pthread functions
271  */
272 pthread_t
273 pthread_self(void)
274 {
275 	if (!_threads_ready)
276 		if (_rthread_init())
277 			return (NULL);
278 
279 	return (TCB_THREAD());
280 }
281 
282 static void
283 _rthread_reaper(void)
284 {
285 	pthread_t thread;
286 
287 restart:
288 	_spinlock(&_thread_gc_lock);
289 	TAILQ_FOREACH(thread, &_thread_gc_list, waiting) {
290 		if (thread->tid != 0)
291 			continue;
292 		TAILQ_REMOVE(&_thread_gc_list, thread, waiting);
293 		_spinunlock(&_thread_gc_lock);
294 		_rthread_debug(3, "rthread reaping %p stack %p\n",
295 		    (void *)thread, (void *)thread->stack);
296 		_rthread_free_stack(thread->stack);
297 		_rtld_free_tls(thread->arg,
298 		    sizeof(struct thread_control_block), sizeof(void *));
299 		free(thread);
300 		goto restart;
301 	}
302 	_spinunlock(&_thread_gc_lock);
303 }
304 
305 void
306 pthread_exit(void *retval)
307 {
308 	struct rthread_cleanup_fn *clfn;
309 	pthread_t thread = pthread_self();
310 
311 	if (thread->flags & THREAD_DYING) {
312 		/*
313 		 * Called pthread_exit() from destructor or cancelation
314 		 * handler: blow up.  XXX write something to stderr?
315 		 */
316 		_exit(42);
317 	}
318 
319 	_rthread_setflag(thread, THREAD_DYING);
320 
321 	thread->retval = retval;
322 
323 	for (clfn = thread->cleanup_fns; clfn; ) {
324 		struct rthread_cleanup_fn *oclfn = clfn;
325 		clfn = clfn->next;
326 		oclfn->fn(oclfn->arg);
327 		free(oclfn);
328 	}
329 	_rthread_tls_destructors(thread);
330 	_spinlock(&_thread_lock);
331 	LIST_REMOVE(thread, threads);
332 	_spinunlock(&_thread_lock);
333 
334 #ifdef TCB_GET
335 	thread->arg = TCB_GET();
336 #else
337 	thread->arg = __get_tcb();
338 #endif
339 	_spinlock(&thread->flags_lock);
340 	if (thread->flags & THREAD_DETACHED) {
341 		_spinunlock(&thread->flags_lock);
342 		_rthread_free(thread);
343 	} else {
344 		thread->flags |= THREAD_DONE;
345 		_spinunlock(&thread->flags_lock);
346 		_sem_post(&thread->donesem);
347 	}
348 
349 	__threxit(&thread->tid);
350 	for(;;);
351 }
352 
353 int
354 pthread_join(pthread_t thread, void **retval)
355 {
356 	int e;
357 	pthread_t self = pthread_self();
358 
359 	e = 0;
360 	_enter_delayed_cancel(self);
361 	if (thread == NULL)
362 		e = EINVAL;
363 	else if (thread == self)
364 		e = EDEADLK;
365 	else if (thread->flags & THREAD_DETACHED)
366 		e = EINVAL;
367 	else if ((e = _sem_wait(&thread->donesem, 0, NULL,
368 	    &self->delayed_cancel)) == 0) {
369 		if (retval)
370 			*retval = thread->retval;
371 
372 		/*
373 		 * We should be the last having a ref to this thread,
374 		 * but someone stupid or evil might haved detached it;
375 		 * in that case the thread will clean up itself
376 		 */
377 		if ((thread->flags & THREAD_DETACHED) == 0)
378 			_rthread_free(thread);
379 	}
380 
381 	_leave_delayed_cancel(self, e);
382 	_rthread_reaper();
383 	return (e);
384 }
385 
386 int
387 pthread_detach(pthread_t thread)
388 {
389 	int rc = 0;
390 
391 	_spinlock(&thread->flags_lock);
392 	if (thread->flags & THREAD_DETACHED) {
393 		rc = EINVAL;
394 		_spinunlock(&thread->flags_lock);
395 	} else if (thread->flags & THREAD_DONE) {
396 		_spinunlock(&thread->flags_lock);
397 		_rthread_free(thread);
398 	} else {
399 		thread->flags |= THREAD_DETACHED;
400 		_spinunlock(&thread->flags_lock);
401 	}
402 	_rthread_reaper();
403 	return (rc);
404 }
405 
406 int
407 pthread_create(pthread_t *threadp, const pthread_attr_t *attr,
408     void *(*start_routine)(void *), void *arg)
409 {
410 	extern int __isthreaded;
411 	struct thread_control_block *tcb;
412 	pthread_t thread;
413 	struct __tfork param;
414 	int rc = 0;
415 
416 	if (!_threads_ready)
417 		if ((rc = _rthread_init()))
418 		    return (rc);
419 
420 	_rthread_reaper();
421 
422 	thread = calloc(1, sizeof(*thread));
423 	if (!thread)
424 		return (errno);
425 	thread->donesem.lock = _SPINLOCK_UNLOCKED_ASSIGN;
426 	thread->flags_lock = _SPINLOCK_UNLOCKED_ASSIGN;
427 	thread->fn = start_routine;
428 	thread->arg = arg;
429 	thread->tid = -1;
430 
431 	thread->attr = attr != NULL ? *(*attr) : _rthread_attr_default;
432 	if (thread->attr.detach_state == PTHREAD_CREATE_DETACHED)
433 		thread->flags |= THREAD_DETACHED;
434 	thread->flags |= THREAD_CANCEL_ENABLE|THREAD_CANCEL_DEFERRED;
435 
436 	thread->stack = _rthread_alloc_stack(thread);
437 	if (!thread->stack) {
438 		rc = errno;
439 		goto fail1;
440 	}
441 
442 	tcb = _rtld_allocate_tls(NULL, sizeof(*tcb), sizeof(void *));
443 	if (tcb == NULL) {
444 		rc = errno;
445 		goto fail2;
446 	}
447 	TCB_INIT(tcb, thread, &thread->myerrno);
448 
449 	param.tf_tcb = tcb;
450 	param.tf_tid = &thread->tid;
451 	param.tf_stack = thread->stack->sp;
452 
453 	_spinlock(&_thread_lock);
454 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
455 	_spinunlock(&_thread_lock);
456 
457 	/* we're going to be multi-threaded real soon now */
458 	__isthreaded = 1;
459 	rc = __tfork_thread(&param, sizeof(param), _rthread_start, thread);
460 	if (rc != -1) {
461 		/* success */
462 		*threadp = thread;
463 		return (0);
464 	}
465 
466 	rc = errno;
467 
468 	_spinlock(&_thread_lock);
469 	LIST_REMOVE(thread, threads);
470 	_spinunlock(&_thread_lock);
471 	_rtld_free_tls(tcb, sizeof(*tcb), sizeof(void *));
472 fail2:
473 	_rthread_free_stack(thread->stack);
474 fail1:
475 	_rthread_free(thread);
476 
477 	return (rc);
478 }
479 
480 int
481 pthread_kill(pthread_t thread, int sig)
482 {
483 	pid_t tid;
484 	int ret;
485 
486 	/* killing myself?  do it without locking */
487 	if (thread == TCB_THREAD())
488 		return (kill(thread->tid, sig) == 0 ? 0 : errno);
489 
490 	/* block the other thread from exiting */
491 	_spinlock(&thread->flags_lock);
492 	if (thread->flags & THREAD_DYING)
493 		ret = (thread->flags & THREAD_DETACHED) ? ESRCH : 0;
494 	else {
495 		tid = thread->tid;
496 		if (tid == 0) {
497 			/* should be impossible without DYING being set */
498 			ret = ESRCH;
499 		} else
500 			ret = kill(tid, sig) == 0 ? 0 : errno;
501 	}
502 	_spinunlock(&thread->flags_lock);
503 	return (ret);
504 }
505 
506 int
507 pthread_equal(pthread_t t1, pthread_t t2)
508 {
509 	return (t1 == t2);
510 }
511 
512 int
513 pthread_cancel(pthread_t thread)
514 {
515 	pid_t tid;
516 
517 	_spinlock(&thread->flags_lock);
518 	tid = thread->tid;
519 	if ((thread->flags & (THREAD_DYING | THREAD_CANCELED)) == 0 &&
520 	    tid != 0) {
521 		thread->flags |= THREAD_CANCELED;
522 
523 		if (thread->flags & THREAD_CANCEL_ENABLE) {
524 
525 			/* canceling myself?  release the lock first */
526 			if (thread == TCB_THREAD()) {
527 				_spinunlock(&thread->flags_lock);
528 				kill(tid, SIGTHR);
529 				return (0);
530 			}
531 
532 			kill(tid, SIGTHR);
533 		}
534 	}
535 	_spinunlock(&thread->flags_lock);
536 	return (0);
537 }
538 
539 void
540 pthread_testcancel(void)
541 {
542 	if ((pthread_self()->flags & (THREAD_CANCELED|THREAD_CANCEL_ENABLE)) ==
543 	    (THREAD_CANCELED|THREAD_CANCEL_ENABLE))
544 		pthread_exit(PTHREAD_CANCELED);
545 
546 }
547 
548 int
549 pthread_setcancelstate(int state, int *oldstatep)
550 {
551 	pthread_t self = pthread_self();
552 	int oldstate;
553 
554 	oldstate = self->flags & THREAD_CANCEL_ENABLE ?
555 	    PTHREAD_CANCEL_ENABLE : PTHREAD_CANCEL_DISABLE;
556 	if (state == PTHREAD_CANCEL_ENABLE) {
557 		_rthread_setflag(self, THREAD_CANCEL_ENABLE);
558 	} else if (state == PTHREAD_CANCEL_DISABLE) {
559 		_rthread_clearflag(self, THREAD_CANCEL_ENABLE);
560 	} else {
561 		return (EINVAL);
562 	}
563 	if (oldstatep)
564 		*oldstatep = oldstate;
565 
566 	return (0);
567 }
568 
569 int
570 pthread_setcanceltype(int type, int *oldtypep)
571 {
572 	pthread_t self = pthread_self();
573 	int oldtype;
574 
575 	oldtype = self->flags & THREAD_CANCEL_DEFERRED ?
576 	    PTHREAD_CANCEL_DEFERRED : PTHREAD_CANCEL_ASYNCHRONOUS;
577 	if (type == PTHREAD_CANCEL_DEFERRED) {
578 		_rthread_setflag(self, THREAD_CANCEL_DEFERRED);
579 	} else if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
580 		_rthread_clearflag(self, THREAD_CANCEL_DEFERRED);
581 	} else {
582 		return (EINVAL);
583 	}
584 	if (oldtypep)
585 		*oldtypep = oldtype;
586 
587 	return (0);
588 }
589 
590 void
591 pthread_cleanup_push(void (*fn)(void *), void *arg)
592 {
593 	struct rthread_cleanup_fn *clfn;
594 	pthread_t self = pthread_self();
595 
596 	clfn = calloc(1, sizeof(*clfn));
597 	if (!clfn)
598 		return;
599 	clfn->fn = fn;
600 	clfn->arg = arg;
601 	clfn->next = self->cleanup_fns;
602 	self->cleanup_fns = clfn;
603 }
604 
605 void
606 pthread_cleanup_pop(int execute)
607 {
608 	struct rthread_cleanup_fn *clfn;
609 	pthread_t self = pthread_self();
610 
611 	clfn = self->cleanup_fns;
612 	if (clfn) {
613 		self->cleanup_fns = clfn->next;
614 		if (execute)
615 			clfn->fn(clfn->arg);
616 		free(clfn);
617 	}
618 }
619 
620 int
621 pthread_getconcurrency(void)
622 {
623 	return (concurrency_level);
624 }
625 
626 int
627 pthread_setconcurrency(int new_level)
628 {
629 	if (new_level < 0)
630 		return (EINVAL);
631 	concurrency_level = new_level;
632 	return (0);
633 }
634 
635 /*
636  * compat debug stuff
637  */
638 void
639 _thread_dump_info(void)
640 {
641 	pthread_t thread;
642 
643 	_spinlock(&_thread_lock);
644 	LIST_FOREACH(thread, &_thread_list, threads)
645 		printf("thread %d flags %d name %s\n",
646 		    thread->tid, thread->flags, thread->name);
647 	_spinunlock(&_thread_lock);
648 }
649 
650 #if defined(__ELF__)
651 /*
652  * _rthread_dl_lock() provides the locking for dlopen(), dlclose(), and
653  * the function called via atexit() to invoke all destructors.  The latter
654  * two call shared-object destructors, which may need to call dlclose(),
655  * so this lock needs to permit recursive locking.
656  * The specific code here was extracted from _rthread_mutex_lock() and
657  * pthread_mutex_unlock() and simplified to use the static variables.
658  */
659 void
660 _rthread_dl_lock(int what)
661 {
662 	static struct _spinlock lock = _SPINLOCK_UNLOCKED;
663 	static pthread_t owner = NULL;
664 	static struct pthread_queue lockers = TAILQ_HEAD_INITIALIZER(lockers);
665 	static int count = 0;
666 
667 	if (what == 0)
668 	{
669 		pthread_t self = pthread_self();
670 
671 		/* lock, possibly recursive */
672 		_spinlock(&lock);
673 		if (owner == NULL) {
674 			owner = self;
675 		} else if (owner != self) {
676 			TAILQ_INSERT_TAIL(&lockers, self, waiting);
677 			while (owner != self) {
678 				__thrsleep(self, 0 | _USING_TICKETS, NULL,
679 				    &lock.ticket, NULL);
680 				_spinlock(&lock);
681 			}
682 		}
683 		count++;
684 		_spinunlock(&lock);
685 	}
686 	else
687 	{
688 		/* unlock, possibly recursive */
689 		if (--count == 0) {
690 			pthread_t next;
691 
692 			_spinlock(&lock);
693 			owner = next = TAILQ_FIRST(&lockers);
694 			if (next != NULL)
695 				TAILQ_REMOVE(&lockers, next, waiting);
696 			_spinunlock(&lock);
697 			if (next != NULL)
698 				__thrwakeup(next, 1);
699 		}
700 	}
701 }
702 
703 void
704 _rthread_bind_lock(int what)
705 {
706 	static struct _spinlock lock = _SPINLOCK_UNLOCKED;
707 
708 	if (what == 0)
709 		_spinlock(&lock);
710 	else
711 		_spinunlock(&lock);
712 }
713 #endif
714 
715 #ifdef __ELF__
716 #define CERROR_SYMBOL __cerror
717 #else
718 #define CERROR_SYMBOL _cerror
719 #endif
720 
721 /*
722  * XXX: Bogus type signature, but we only need to be able to emit a
723  * reference to it below.
724  */
725 extern void CERROR_SYMBOL(void);
726 
727 /*
728  * All weak references used within libc that are redefined in libpthread
729  * MUST be in this table.   This is necessary to force the proper version to
730  * be used when linking -static.
731  */
732 static void *__libc_overrides[] __used = {
733 	&CERROR_SYMBOL,
734 	&__errno,
735 	&_thread_arc4_lock,
736 	&_thread_arc4_unlock,
737 	&_thread_atexit_lock,
738 	&_thread_atexit_unlock,
739 	&_thread_malloc_lock,
740 	&_thread_malloc_unlock,
741 	&_thread_mutex_destroy,
742 	&_thread_mutex_lock,
743 	&_thread_mutex_unlock,
744 	&_thread_tag_lock,
745 	&_thread_tag_storage,
746 	&_thread_tag_unlock,
747 	&accept,
748 	&close,
749 	&closefrom,
750 	&connect,
751 	&fcntl,
752 	&flockfile,
753 	&fork,
754 	&fsync,
755 	&ftrylockfile,
756 	&funlockfile,
757 	&msgrcv,
758 	&msgsnd,
759 	&msync,
760 	&nanosleep,
761 	&open,
762 	&openat,
763 	&poll,
764 	&pread,
765 	&preadv,
766 	&pwrite,
767 	&pwritev,
768 	&read,
769 	&readv,
770 	&recvfrom,
771 	&recvmsg,
772 	&select,
773 	&sendmsg,
774 	&sendto,
775 	&sigaction,
776 	&sigprocmask,
777 	&sigsuspend,
778 	&vfork,
779 	&wait4,
780 	&write,
781 	&writev,
782 };
783