xref: /openbsd-src/lib/librthread/rthread.c (revision 2777ee89d0e541ec819d05abee114837837abbec)
1 /*	$OpenBSD: rthread.c,v 1.90 2016/04/02 19:56:53 guenther Exp $ */
2 /*
3  * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
4  * All Rights Reserved.
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*
19  * The heart of rthreads.  Basic functions like creating and joining
20  * threads.
21  */
22 
23 #include <sys/types.h>
24 #include <sys/uio.h>
25 #include <sys/wait.h>
26 #include <sys/socket.h>
27 #include <sys/mman.h>
28 #include <sys/msg.h>
29 #ifndef NO_PIC
30 #include <sys/exec_elf.h>
31 #pragma weak _DYNAMIC
32 #endif
33 
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <signal.h>
37 #include <stdio.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <dlfcn.h>
41 #include <fcntl.h>
42 #include <poll.h>
43 
44 #include <pthread.h>
45 
46 #include "thread_private.h"	/* in libc/include */
47 #include "rthread.h"
48 #include "tcb.h"
49 
50 /*
51  * Call nonstandard functions via names in the reserved namespace:
52  *	NOT YET dlctl() -> _dlctl()
53  *	getthrid -> _thread_sys_getthrid
54  */
55 REDIRECT_SYSCALL(getthrid);
56 
57 /*
58  * libc's signal wrappers hide SIGTHR; we need to call the real syscall
59  * stubs _thread_sys_* directly.
60  */
61 REDIRECT_SYSCALL(sigaction);
62 REDIRECT_SYSCALL(sigprocmask);
63 REDIRECT_SYSCALL(thrkill);
64 
65 static int concurrency_level;	/* not used */
66 
67 struct _spinlock _SPINLOCK_UNLOCKED_ASSIGN = _SPINLOCK_UNLOCKED;
68 
69 int _threads_ready;
70 size_t _thread_pagesize;
71 struct listhead _thread_list = LIST_HEAD_INITIALIZER(_thread_list);
72 struct _spinlock _thread_lock = _SPINLOCK_UNLOCKED;
73 static struct pthread_queue _thread_gc_list
74     = TAILQ_HEAD_INITIALIZER(_thread_gc_list);
75 static struct _spinlock _thread_gc_lock = _SPINLOCK_UNLOCKED;
76 static struct pthread _initial_thread;
77 static struct thread_control_block _initial_thread_tcb;
78 
79 struct pthread_attr _rthread_attr_default = {
80 	.stack_addr			= NULL,
81 	.stack_size			= RTHREAD_STACK_SIZE_DEF,
82 /*	.guard_size		set in _rthread_init */
83 	.detach_state			= PTHREAD_CREATE_JOINABLE,
84 	.contention_scope		= PTHREAD_SCOPE_SYSTEM,
85 	.sched_policy			= SCHED_OTHER,
86 	.sched_param = { .sched_priority = 0 },
87 	.sched_inherit			= PTHREAD_INHERIT_SCHED,
88 };
89 
90 /*
91  * internal support functions
92  */
93 void
94 _spinlock(volatile struct _spinlock *lock)
95 {
96 	while (_atomic_lock(&lock->ticket))
97 		sched_yield();
98 }
99 
100 int
101 _spinlocktry(volatile struct _spinlock *lock)
102 {
103 	return 0 == _atomic_lock(&lock->ticket);
104 }
105 
106 void
107 _spinunlock(volatile struct _spinlock *lock)
108 {
109 	lock->ticket = _ATOMIC_LOCK_UNLOCKED;
110 }
111 
112 /*
113  * This sets up the thread base for the initial thread so that it
114  * references the errno location provided by libc.  For other threads
115  * this is handled by __tfork_thread()
116  */
117 void _rthread_initlib(void) __attribute__((constructor));
118 void
119 _rthread_initlib(void)
120 {
121 	static int tcb_set;
122 	struct thread_control_block *tcb;
123 
124 	if (__predict_false(tcb_set == 0)) {
125 		tcb_set = 1;
126 
127 		tcb = __get_tcb();
128 		if (tcb == NULL) {
129 			tcb = &_initial_thread_tcb;
130 			TCB_SET(tcb);
131 		}
132 
133 		/* use libc's errno for the main thread */
134 		TCB_INIT(tcb, &_initial_thread, ___errno());
135 	}
136 }
137 
138 int *
139 __errno(void)
140 {
141 	return (TCB_ERRNOPTR());
142 }
143 
144 static void
145 _rthread_start(void *v)
146 {
147 	pthread_t thread = v;
148 	void *retval;
149 
150 	retval = thread->fn(thread->arg);
151 	pthread_exit(retval);
152 }
153 
154 static void
155 sigthr_handler(__unused int sig)
156 {
157 	pthread_t self = pthread_self();
158 
159 	/*
160 	 * Do nothing unless
161 	 * 1) pthread_cancel() has been called on this thread,
162 	 * 2) cancelation is enabled for it, and
163 	 * 3) we're not already in cancelation processing
164 	 */
165 	if ((self->flags & (THREAD_CANCELED|THREAD_CANCEL_ENABLE|THREAD_DYING))
166 	    != (THREAD_CANCELED|THREAD_CANCEL_ENABLE))
167 		return;
168 
169 	/*
170 	 * If delaying cancels inside complex ops (pthread_cond_wait,
171 	 * pthread_join, etc), just mark that this has happened to
172 	 * prevent a race with going to sleep
173 	 */
174 	if (self->flags & THREAD_CANCEL_DELAY) {
175 		self->delayed_cancel = 1;
176 		return;
177 	}
178 
179 	/*
180 	 * otherwise, if in a cancel point or async cancels are
181 	 * enabled, then exit
182 	 */
183 	if (self->cancel_point || (self->flags & THREAD_CANCEL_DEFERRED) == 0)
184 		pthread_exit(PTHREAD_CANCELED);
185 }
186 
187 int
188 _rthread_init(void)
189 {
190 	pthread_t thread = &_initial_thread;
191 	struct sigaction sa;
192 
193 	thread->tid = getthrid();
194 	thread->donesem.lock = _SPINLOCK_UNLOCKED_ASSIGN;
195 	thread->flags |= THREAD_CANCEL_ENABLE | THREAD_CANCEL_DEFERRED |
196 	    THREAD_ORIGINAL | THREAD_INITIAL_STACK;
197 	thread->flags_lock = _SPINLOCK_UNLOCKED_ASSIGN;
198 	strlcpy(thread->name, "Main process", sizeof(thread->name));
199 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
200 	_rthread_debug_init();
201 
202 	_thread_pagesize = (size_t)sysconf(_SC_PAGESIZE);
203 	_rthread_attr_default.guard_size = _thread_pagesize;
204 	thread->attr = _rthread_attr_default;
205 
206 	_rthread_initlib();
207 
208 	_threads_ready = 1;
209 
210 	_rthread_debug(1, "rthread init\n");
211 
212 #ifndef NO_PIC
213 	if (_DYNAMIC) {
214 		dlctl(NULL, DL_SETTHREADLCK, _rthread_dl_lock);
215 	}
216 #endif
217 
218 	/*
219 	 * Set the handler on the signal used for cancelation and
220 	 * suspension, and make sure it's unblocked
221 	 */
222 	memset(&sa, 0, sizeof(sa));
223 	sigemptyset(&sa.sa_mask);
224 	sa.sa_handler = sigthr_handler;
225 	sigaction(SIGTHR, &sa, NULL);
226 	sigaddset(&sa.sa_mask, SIGTHR);
227 	sigprocmask(SIG_UNBLOCK, &sa.sa_mask, NULL);
228 
229 	return (0);
230 }
231 
232 static void
233 _rthread_free(pthread_t thread)
234 {
235 	/* _initial_thread is static, so don't free it */
236 	if (thread != &_initial_thread) {
237 		/*
238 		 * thread->tid is written to by __threxit in the thread
239 		 * itself, so it's not safe to touch it here
240 		 */
241 		_spinlock(&_thread_gc_lock);
242 		TAILQ_INSERT_TAIL(&_thread_gc_list, thread, waiting);
243 		_spinunlock(&_thread_gc_lock);
244 	}
245 }
246 
247 void
248 _rthread_setflag(pthread_t thread, int flag)
249 {
250 	_spinlock(&thread->flags_lock);
251 	thread->flags |= flag;
252 	_spinunlock(&thread->flags_lock);
253 }
254 
255 void
256 _rthread_clearflag(pthread_t thread, int flag)
257 {
258 	_spinlock(&thread->flags_lock);
259 	thread->flags &= ~flag;
260 	_spinunlock(&thread->flags_lock);
261 }
262 
263 /*
264  * real pthread functions
265  */
266 pthread_t
267 pthread_self(void)
268 {
269 	if (!_threads_ready)
270 		if (_rthread_init())
271 			return (NULL);
272 
273 	return (TCB_THREAD());
274 }
275 DEF_STD(pthread_self);
276 
277 static void
278 _rthread_reaper(void)
279 {
280 	pthread_t thread;
281 
282 restart:
283 	_spinlock(&_thread_gc_lock);
284 	TAILQ_FOREACH(thread, &_thread_gc_list, waiting) {
285 		if (thread->tid != 0)
286 			continue;
287 		TAILQ_REMOVE(&_thread_gc_list, thread, waiting);
288 		_spinunlock(&_thread_gc_lock);
289 		_rthread_debug(3, "rthread reaping %p stack %p\n",
290 		    (void *)thread, (void *)thread->stack);
291 		_rthread_free_stack(thread->stack);
292 		_rtld_free_tls(thread->tcb,
293 		    sizeof(struct thread_control_block), sizeof(void *));
294 		free(thread);
295 		goto restart;
296 	}
297 	_spinunlock(&_thread_gc_lock);
298 }
299 
300 void
301 pthread_exit(void *retval)
302 {
303 	struct rthread_cleanup_fn *clfn;
304 	pthread_t thread = pthread_self();
305 
306 	if (thread->flags & THREAD_DYING) {
307 		/*
308 		 * Called pthread_exit() from destructor or cancelation
309 		 * handler: blow up.  XXX write something to stderr?
310 		 */
311 		_exit(42);
312 	}
313 
314 	_rthread_setflag(thread, THREAD_DYING);
315 
316 	thread->retval = retval;
317 
318 	for (clfn = thread->cleanup_fns; clfn; ) {
319 		struct rthread_cleanup_fn *oclfn = clfn;
320 		clfn = clfn->next;
321 		oclfn->fn(oclfn->arg);
322 		free(oclfn);
323 	}
324 	_rthread_tls_destructors(thread);
325 	_spinlock(&_thread_lock);
326 	LIST_REMOVE(thread, threads);
327 	_spinunlock(&_thread_lock);
328 
329 	_spinlock(&thread->flags_lock);
330 	if (thread->flags & THREAD_DETACHED) {
331 		_spinunlock(&thread->flags_lock);
332 		_rthread_free(thread);
333 	} else {
334 		thread->flags |= THREAD_DONE;
335 		_spinunlock(&thread->flags_lock);
336 		_sem_post(&thread->donesem);
337 	}
338 
339 	__threxit(&thread->tid);
340 	for(;;);
341 }
342 DEF_STD(pthread_exit);
343 
344 int
345 pthread_join(pthread_t thread, void **retval)
346 {
347 	int e;
348 	pthread_t self = pthread_self();
349 
350 	e = 0;
351 	_enter_delayed_cancel(self);
352 	if (thread == NULL)
353 		e = EINVAL;
354 	else if (thread == self)
355 		e = EDEADLK;
356 	else if (thread->flags & THREAD_DETACHED)
357 		e = EINVAL;
358 	else if ((e = _sem_wait(&thread->donesem, 0, NULL,
359 	    &self->delayed_cancel)) == 0) {
360 		if (retval)
361 			*retval = thread->retval;
362 
363 		/*
364 		 * We should be the last having a ref to this thread,
365 		 * but someone stupid or evil might haved detached it;
366 		 * in that case the thread will clean up itself
367 		 */
368 		if ((thread->flags & THREAD_DETACHED) == 0)
369 			_rthread_free(thread);
370 	}
371 
372 	_leave_delayed_cancel(self, e);
373 	_rthread_reaper();
374 	return (e);
375 }
376 
377 int
378 pthread_detach(pthread_t thread)
379 {
380 	int rc = 0;
381 
382 	_spinlock(&thread->flags_lock);
383 	if (thread->flags & THREAD_DETACHED) {
384 		rc = EINVAL;
385 		_spinunlock(&thread->flags_lock);
386 	} else if (thread->flags & THREAD_DONE) {
387 		_spinunlock(&thread->flags_lock);
388 		_rthread_free(thread);
389 	} else {
390 		thread->flags |= THREAD_DETACHED;
391 		_spinunlock(&thread->flags_lock);
392 	}
393 	_rthread_reaper();
394 	return (rc);
395 }
396 
397 int
398 pthread_create(pthread_t *threadp, const pthread_attr_t *attr,
399     void *(*start_routine)(void *), void *arg)
400 {
401 	extern int __isthreaded;
402 	struct thread_control_block *tcb;
403 	pthread_t thread;
404 	struct __tfork param;
405 	int rc = 0;
406 
407 	if (!_threads_ready)
408 		if ((rc = _rthread_init()))
409 		    return (rc);
410 
411 	_rthread_reaper();
412 
413 	thread = calloc(1, sizeof(*thread));
414 	if (!thread)
415 		return (errno);
416 	thread->donesem.lock = _SPINLOCK_UNLOCKED_ASSIGN;
417 	thread->flags_lock = _SPINLOCK_UNLOCKED_ASSIGN;
418 	thread->fn = start_routine;
419 	thread->arg = arg;
420 	thread->tid = -1;
421 
422 	thread->attr = attr != NULL ? *(*attr) : _rthread_attr_default;
423 	if (thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
424 		pthread_t self = pthread_self();
425 
426 		thread->attr.sched_policy = self->attr.sched_policy;
427 		thread->attr.sched_param = self->attr.sched_param;
428 	}
429 	if (thread->attr.detach_state == PTHREAD_CREATE_DETACHED)
430 		thread->flags |= THREAD_DETACHED;
431 	thread->flags |= THREAD_CANCEL_ENABLE|THREAD_CANCEL_DEFERRED;
432 
433 	thread->stack = _rthread_alloc_stack(thread);
434 	if (!thread->stack) {
435 		rc = errno;
436 		goto fail1;
437 	}
438 
439 	tcb = _rtld_allocate_tls(NULL, sizeof(*tcb), sizeof(void *));
440 	if (tcb == NULL) {
441 		rc = errno;
442 		goto fail2;
443 	}
444 	TCB_INIT(tcb, thread, &thread->myerrno);
445 	thread->tcb = tcb;
446 
447 	param.tf_tcb = tcb;
448 	param.tf_tid = &thread->tid;
449 	param.tf_stack = thread->stack->sp;
450 
451 	_spinlock(&_thread_lock);
452 	LIST_INSERT_HEAD(&_thread_list, thread, threads);
453 	_spinunlock(&_thread_lock);
454 
455 	/* we're going to be multi-threaded real soon now */
456 	__isthreaded = 1;
457 	rc = __tfork_thread(&param, sizeof(param), _rthread_start, thread);
458 	if (rc != -1) {
459 		/* success */
460 		*threadp = thread;
461 		return (0);
462 	}
463 
464 	rc = errno;
465 
466 	_spinlock(&_thread_lock);
467 	LIST_REMOVE(thread, threads);
468 	_spinunlock(&_thread_lock);
469 	_rtld_free_tls(tcb, sizeof(*tcb), sizeof(void *));
470 fail2:
471 	_rthread_free_stack(thread->stack);
472 fail1:
473 	_rthread_free(thread);
474 
475 	return (rc);
476 }
477 
478 int
479 pthread_kill(pthread_t thread, int sig)
480 {
481 	if (sig == SIGTHR)
482 		return (EINVAL);
483 	if (thrkill(thread->tid, sig, thread->tcb))
484 		return (errno);
485 	return (0);
486 }
487 
488 int
489 pthread_equal(pthread_t t1, pthread_t t2)
490 {
491 	return (t1 == t2);
492 }
493 
494 int
495 pthread_cancel(pthread_t thread)
496 {
497 	pid_t tid;
498 
499 	_spinlock(&thread->flags_lock);
500 	tid = thread->tid;
501 	if ((thread->flags & (THREAD_DYING | THREAD_CANCELED)) == 0 &&
502 	    tid != 0) {
503 		thread->flags |= THREAD_CANCELED;
504 
505 		if (thread->flags & THREAD_CANCEL_ENABLE) {
506 			_spinunlock(&thread->flags_lock);
507 			thrkill(tid, SIGTHR, thread->tcb);
508 			return (0);
509 		}
510 	}
511 	_spinunlock(&thread->flags_lock);
512 	return (0);
513 }
514 
515 void
516 pthread_testcancel(void)
517 {
518 	if ((pthread_self()->flags & (THREAD_CANCELED|THREAD_CANCEL_ENABLE)) ==
519 	    (THREAD_CANCELED|THREAD_CANCEL_ENABLE))
520 		pthread_exit(PTHREAD_CANCELED);
521 
522 }
523 
524 int
525 pthread_setcancelstate(int state, int *oldstatep)
526 {
527 	pthread_t self = pthread_self();
528 	int oldstate;
529 
530 	oldstate = self->flags & THREAD_CANCEL_ENABLE ?
531 	    PTHREAD_CANCEL_ENABLE : PTHREAD_CANCEL_DISABLE;
532 	if (state == PTHREAD_CANCEL_ENABLE) {
533 		_rthread_setflag(self, THREAD_CANCEL_ENABLE);
534 	} else if (state == PTHREAD_CANCEL_DISABLE) {
535 		_rthread_clearflag(self, THREAD_CANCEL_ENABLE);
536 	} else {
537 		return (EINVAL);
538 	}
539 	if (oldstatep)
540 		*oldstatep = oldstate;
541 
542 	return (0);
543 }
544 DEF_STD(pthread_setcancelstate);
545 
546 int
547 pthread_setcanceltype(int type, int *oldtypep)
548 {
549 	pthread_t self = pthread_self();
550 	int oldtype;
551 
552 	oldtype = self->flags & THREAD_CANCEL_DEFERRED ?
553 	    PTHREAD_CANCEL_DEFERRED : PTHREAD_CANCEL_ASYNCHRONOUS;
554 	if (type == PTHREAD_CANCEL_DEFERRED) {
555 		_rthread_setflag(self, THREAD_CANCEL_DEFERRED);
556 	} else if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
557 		_rthread_clearflag(self, THREAD_CANCEL_DEFERRED);
558 	} else {
559 		return (EINVAL);
560 	}
561 	if (oldtypep)
562 		*oldtypep = oldtype;
563 
564 	return (0);
565 }
566 
567 void
568 pthread_cleanup_push(void (*fn)(void *), void *arg)
569 {
570 	struct rthread_cleanup_fn *clfn;
571 	pthread_t self = pthread_self();
572 
573 	clfn = calloc(1, sizeof(*clfn));
574 	if (!clfn)
575 		return;
576 	clfn->fn = fn;
577 	clfn->arg = arg;
578 	clfn->next = self->cleanup_fns;
579 	self->cleanup_fns = clfn;
580 }
581 
582 void
583 pthread_cleanup_pop(int execute)
584 {
585 	struct rthread_cleanup_fn *clfn;
586 	pthread_t self = pthread_self();
587 
588 	clfn = self->cleanup_fns;
589 	if (clfn) {
590 		self->cleanup_fns = clfn->next;
591 		if (execute)
592 			clfn->fn(clfn->arg);
593 		free(clfn);
594 	}
595 }
596 
597 int
598 pthread_getconcurrency(void)
599 {
600 	return (concurrency_level);
601 }
602 
603 int
604 pthread_setconcurrency(int new_level)
605 {
606 	if (new_level < 0)
607 		return (EINVAL);
608 	concurrency_level = new_level;
609 	return (0);
610 }
611 
612 /*
613  * compat debug stuff
614  */
615 void
616 _thread_dump_info(void)
617 {
618 	pthread_t thread;
619 
620 	_spinlock(&_thread_lock);
621 	LIST_FOREACH(thread, &_thread_list, threads)
622 		printf("thread %d flags %d name %s\n",
623 		    thread->tid, thread->flags, thread->name);
624 	_spinunlock(&_thread_lock);
625 }
626 
627 #ifndef NO_PIC
628 /*
629  * _rthread_dl_lock() provides the locking for dlopen(), dlclose(), and
630  * the function called via atexit() to invoke all destructors.  The latter
631  * two call shared-object destructors, which may need to call dlclose(),
632  * so this lock needs to permit recursive locking.
633  * The specific code here was extracted from _rthread_mutex_lock() and
634  * pthread_mutex_unlock() and simplified to use the static variables.
635  */
636 void
637 _rthread_dl_lock(int what)
638 {
639 	static struct _spinlock lock = _SPINLOCK_UNLOCKED;
640 	static pthread_t owner = NULL;
641 	static struct pthread_queue lockers = TAILQ_HEAD_INITIALIZER(lockers);
642 	static int count = 0;
643 
644 	if (what == 0) {
645 		pthread_t self = pthread_self();
646 
647 		/* lock, possibly recursive */
648 		_spinlock(&lock);
649 		if (owner == NULL) {
650 			owner = self;
651 		} else if (owner != self) {
652 			TAILQ_INSERT_TAIL(&lockers, self, waiting);
653 			while (owner != self) {
654 				__thrsleep(self, 0 | _USING_TICKETS, NULL,
655 				    &lock.ticket, NULL);
656 				_spinlock(&lock);
657 			}
658 		}
659 		count++;
660 		_spinunlock(&lock);
661 	} else if (what == 1) {
662 		/* unlock, possibly recursive */
663 		if (--count == 0) {
664 			pthread_t next;
665 
666 			_spinlock(&lock);
667 			owner = next = TAILQ_FIRST(&lockers);
668 			if (next != NULL)
669 				TAILQ_REMOVE(&lockers, next, waiting);
670 			_spinunlock(&lock);
671 			if (next != NULL)
672 				__thrwakeup(next, 1);
673 		}
674 	} else {
675 		/* reinit: used in child after fork to clear the queue */
676 		lock = _SPINLOCK_UNLOCKED_ASSIGN;
677 		if (--count == 0)
678 			owner = NULL;
679 		TAILQ_INIT(&lockers);
680 	}
681 }
682 #endif
683 
684 
685 /*
686  * XXX: Bogus type signature, but we only need to be able to emit a
687  * reference to it below.
688  */
689 extern void __cerror(void);
690 
691 /*
692  * All weak references used within libc that are redefined in libpthread
693  * MUST be in this table.   This is necessary to force the proper version to
694  * be used when linking -static.
695  */
696 static void *__libc_overrides[] __used = {
697 	&__cerror,
698 	&__errno,
699 	&_thread_arc4_lock,
700 	&_thread_arc4_unlock,
701 	&_thread_atexit_lock,
702 	&_thread_atexit_unlock,
703 	&_thread_atfork_lock,
704 	&_thread_atfork_unlock,
705 	&_thread_malloc_lock,
706 	&_thread_malloc_unlock,
707 	&_thread_mutex_destroy,
708 	&_thread_mutex_lock,
709 	&_thread_mutex_unlock,
710 	&_thread_tag_lock,
711 	&_thread_tag_storage,
712 	&_thread_tag_unlock,
713 	&accept,
714 	&close,
715 	&closefrom,
716 	&connect,
717 	&fcntl,
718 	&flockfile,
719 	&fork,
720 	&fsync,
721 	&ftrylockfile,
722 	&funlockfile,
723 	&msgrcv,
724 	&msgsnd,
725 	&msync,
726 	&nanosleep,
727 	&open,
728 	&openat,
729 	&poll,
730 	&pread,
731 	&preadv,
732 	&pwrite,
733 	&pwritev,
734 	&read,
735 	&readv,
736 	&recvfrom,
737 	&recvmsg,
738 	&select,
739 	&sendmsg,
740 	&sendto,
741 	&sigsuspend,
742 	&vfork,
743 	&wait4,
744 	&write,
745 	&writev,
746 };
747