xref: /onnv-gate/usr/src/lib/libc/port/threads/scalls.c (revision 5891:0d5c6468bb04)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51885Sraf  * Common Development and Distribution License (the "License").
61885Sraf  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
211111Sraf 
220Sstevel@tonic-gate /*
23*5891Sraf  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
280Sstevel@tonic-gate 
290Sstevel@tonic-gate #include "lint.h"
300Sstevel@tonic-gate #include "thr_uberdata.h"
310Sstevel@tonic-gate #include <stdarg.h>
320Sstevel@tonic-gate #include <poll.h>
330Sstevel@tonic-gate #include <stropts.h>
340Sstevel@tonic-gate #include <dlfcn.h>
35*5891Sraf #include <wait.h>
36*5891Sraf #include <sys/socket.h>
370Sstevel@tonic-gate #include <sys/uio.h>
38*5891Sraf #include <sys/file.h>
39*5891Sraf #include <sys/door.h>
400Sstevel@tonic-gate 
410Sstevel@tonic-gate /*
425002Sraf  * atfork_lock protects the pthread_atfork() data structures.
435002Sraf  *
444914Sraf  * fork_lock does double-duty.  Not only does it (and atfork_lock)
454914Sraf  * serialize calls to fork() and forkall(), but it also serializes calls
464914Sraf  * to thr_suspend() and thr_continue() (because fork() and forkall() also
474914Sraf  * suspend and continue other threads and they want no competition).
484914Sraf  *
495002Sraf  * Functions called in dlopen()ed L10N objects can do anything, including
505002Sraf  * call malloc() and free().  Such calls are not fork-safe when protected
515002Sraf  * by an ordinary mutex that is acquired in libc's prefork processing
525002Sraf  * because, with an interposed malloc library present, there would be a
535002Sraf  * lock ordering violation due to the pthread_atfork() prefork function
545002Sraf  * in the interposition library acquiring its malloc lock(s) before the
554914Sraf  * ordinary mutex in libc being acquired by libc's prefork functions.
564914Sraf  *
575002Sraf  * Within libc, calls to malloc() and free() are fork-safe if the calls
585002Sraf  * are made while holding no other libc locks.  This covers almost all
595002Sraf  * of libc's malloc() and free() calls.  For those libc code paths, such
605002Sraf  * as the above-mentioned L10N calls, that require serialization and that
615002Sraf  * may call malloc() or free(), libc uses callout_lock_enter() to perform
625002Sraf  * the serialization.  This works because callout_lock is not acquired as
635002Sraf  * part of running the pthread_atfork() prefork handlers (to avoid the
645002Sraf  * lock ordering violation described above).  Rather, it is simply
655002Sraf  * reinitialized in postfork1_child() to cover the case that some
665002Sraf  * now-defunct thread might have been suspended while holding it.
670Sstevel@tonic-gate  */
684914Sraf 
694843Sraf void
704843Sraf fork_lock_enter(void)
710Sstevel@tonic-gate {
724914Sraf 	ASSERT(curthread->ul_critical == 0);
734914Sraf 	(void) _private_mutex_lock(&curthread->ul_uberdata->fork_lock);
740Sstevel@tonic-gate }
750Sstevel@tonic-gate 
760Sstevel@tonic-gate void
770Sstevel@tonic-gate fork_lock_exit(void)
780Sstevel@tonic-gate {
794914Sraf 	ASSERT(curthread->ul_critical == 0);
804914Sraf 	(void) _private_mutex_unlock(&curthread->ul_uberdata->fork_lock);
814914Sraf }
820Sstevel@tonic-gate 
83*5891Sraf /*
84*5891Sraf  * Use cancel_safe_mutex_lock() to protect against being cancelled while
85*5891Sraf  * holding callout_lock and calling outside of libc (via L10N plugins).
86*5891Sraf  * We will honor a pending cancellation request when callout_lock_exit()
87*5891Sraf  * is called, by calling cancel_safe_mutex_unlock().
88*5891Sraf  */
894914Sraf void
905002Sraf callout_lock_enter(void)
914914Sraf {
924914Sraf 	ASSERT(curthread->ul_critical == 0);
93*5891Sraf 	cancel_safe_mutex_lock(&curthread->ul_uberdata->callout_lock);
944914Sraf }
954914Sraf 
964914Sraf void
975002Sraf callout_lock_exit(void)
984914Sraf {
994914Sraf 	ASSERT(curthread->ul_critical == 0);
100*5891Sraf 	cancel_safe_mutex_unlock(&curthread->ul_uberdata->callout_lock);
1010Sstevel@tonic-gate }
1020Sstevel@tonic-gate 
1033235Sraf #pragma weak forkx = _private_forkx
1043235Sraf #pragma weak _forkx = _private_forkx
1054292Sab196087 pid_t
1063235Sraf _private_forkx(int flags)
1070Sstevel@tonic-gate {
1080Sstevel@tonic-gate 	ulwp_t *self = curthread;
1090Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
1100Sstevel@tonic-gate 	pid_t pid;
1110Sstevel@tonic-gate 
1120Sstevel@tonic-gate 	if (self->ul_vfork) {
1130Sstevel@tonic-gate 		/*
1140Sstevel@tonic-gate 		 * We are a child of vfork(); omit all of the fork
1150Sstevel@tonic-gate 		 * logic and go straight to the system call trap.
1160Sstevel@tonic-gate 		 * A vfork() child of a multithreaded parent
1170Sstevel@tonic-gate 		 * must never call fork().
1180Sstevel@tonic-gate 		 */
1190Sstevel@tonic-gate 		if (udp->uberflags.uf_mt) {
1200Sstevel@tonic-gate 			errno = ENOTSUP;
1210Sstevel@tonic-gate 			return (-1);
1220Sstevel@tonic-gate 		}
1233235Sraf 		pid = __forkx(flags);
1240Sstevel@tonic-gate 		if (pid == 0) {		/* child */
1250Sstevel@tonic-gate 			udp->pid = _private_getpid();
1260Sstevel@tonic-gate 			self->ul_vfork = 0;
1270Sstevel@tonic-gate 		}
1280Sstevel@tonic-gate 		return (pid);
1290Sstevel@tonic-gate 	}
1300Sstevel@tonic-gate 
1314843Sraf 	sigoff(self);
1324843Sraf 	if (self->ul_fork) {
1330Sstevel@tonic-gate 		/*
1340Sstevel@tonic-gate 		 * Cannot call fork() from a fork handler.
1350Sstevel@tonic-gate 		 */
1364843Sraf 		sigon(self);
1374843Sraf 		errno = EDEADLK;
1380Sstevel@tonic-gate 		return (-1);
1390Sstevel@tonic-gate 	}
1400Sstevel@tonic-gate 	self->ul_fork = 1;
1410Sstevel@tonic-gate 
1420Sstevel@tonic-gate 	/*
1430Sstevel@tonic-gate 	 * The functions registered by pthread_atfork() are defined by
1440Sstevel@tonic-gate 	 * the application and its libraries and we must not hold any
1454843Sraf 	 * internal lmutex_lock()-acquired locks while invoking them.
1464843Sraf 	 * We hold only udp->atfork_lock to protect the atfork linkages.
1474843Sraf 	 * If one of these pthread_atfork() functions attempts to fork
1484914Sraf 	 * or to call pthread_atfork(), libc will detect the error and
1494914Sraf 	 * fail the call with EDEADLK.  Otherwise, the pthread_atfork()
1504914Sraf 	 * functions are free to do anything they please (except they
1514914Sraf 	 * will not receive any signals).
1520Sstevel@tonic-gate 	 */
1534914Sraf 	(void) _private_mutex_lock(&udp->atfork_lock);
1540Sstevel@tonic-gate 	_prefork_handler();
1550Sstevel@tonic-gate 
1560Sstevel@tonic-gate 	/*
1574843Sraf 	 * Block every other thread attempting thr_suspend() or thr_continue().
1584843Sraf 	 */
1594914Sraf 	(void) _private_mutex_lock(&udp->fork_lock);
1604843Sraf 
1614843Sraf 	/*
1620Sstevel@tonic-gate 	 * Block all signals.
1634843Sraf 	 * Just deferring them via sigoff() is not enough.
1640Sstevel@tonic-gate 	 * We have to avoid taking a deferred signal in the child
1653235Sraf 	 * that was actually sent to the parent before __forkx().
1660Sstevel@tonic-gate 	 */
1670Sstevel@tonic-gate 	block_all_signals(self);
1680Sstevel@tonic-gate 
1690Sstevel@tonic-gate 	/*
1700Sstevel@tonic-gate 	 * This suspends all threads but this one, leaving them
1710Sstevel@tonic-gate 	 * suspended outside of any critical regions in the library.
1724914Sraf 	 * Thus, we are assured that no lmutex_lock()-acquired library
1734914Sraf 	 * locks are held while we invoke fork() from the current thread.
1740Sstevel@tonic-gate 	 */
1750Sstevel@tonic-gate 	suspend_fork();
1760Sstevel@tonic-gate 
1773235Sraf 	pid = __forkx(flags);
1780Sstevel@tonic-gate 
1790Sstevel@tonic-gate 	if (pid == 0) {		/* child */
1800Sstevel@tonic-gate 		/*
1810Sstevel@tonic-gate 		 * Clear our schedctl pointer.
1820Sstevel@tonic-gate 		 * Discard any deferred signal that was sent to the parent.
1833235Sraf 		 * Because we blocked all signals before __forkx(), a
1840Sstevel@tonic-gate 		 * deferred signal cannot have been taken by the child.
1850Sstevel@tonic-gate 		 */
1860Sstevel@tonic-gate 		self->ul_schedctl_called = NULL;
1870Sstevel@tonic-gate 		self->ul_schedctl = NULL;
1880Sstevel@tonic-gate 		self->ul_cursig = 0;
1890Sstevel@tonic-gate 		self->ul_siginfo.si_signo = 0;
1900Sstevel@tonic-gate 		udp->pid = _private_getpid();
1910Sstevel@tonic-gate 		/* reset the library's data structures to reflect one thread */
1924574Sraf 		unregister_locks();
1932248Sraf 		postfork1_child();
1940Sstevel@tonic-gate 		restore_signals(self);
1954914Sraf 		(void) _private_mutex_unlock(&udp->fork_lock);
1960Sstevel@tonic-gate 		_postfork_child_handler();
1970Sstevel@tonic-gate 	} else {
1983235Sraf 		/* restart all threads that were suspended for fork() */
1990Sstevel@tonic-gate 		continue_fork(0);
2000Sstevel@tonic-gate 		restore_signals(self);
2014914Sraf 		(void) _private_mutex_unlock(&udp->fork_lock);
2020Sstevel@tonic-gate 		_postfork_parent_handler();
2030Sstevel@tonic-gate 	}
2040Sstevel@tonic-gate 
2054843Sraf 	(void) _private_mutex_unlock(&udp->atfork_lock);
2060Sstevel@tonic-gate 	self->ul_fork = 0;
2074843Sraf 	sigon(self);
2080Sstevel@tonic-gate 
2090Sstevel@tonic-gate 	return (pid);
2100Sstevel@tonic-gate }
2110Sstevel@tonic-gate 
2120Sstevel@tonic-gate /*
2133235Sraf  * fork() is fork1() for both Posix threads and Solaris threads.
2143235Sraf  * The forkall() interface exists for applications that require
2153235Sraf  * the semantics of replicating all threads.
2160Sstevel@tonic-gate  */
2173235Sraf #pragma weak fork1 = _fork
2183235Sraf #pragma weak _fork1 = _fork
2193235Sraf #pragma weak fork = _fork
2200Sstevel@tonic-gate pid_t
2213235Sraf _fork(void)
2223235Sraf {
2233235Sraf 	return (_private_forkx(0));
2243235Sraf }
2253235Sraf 
2263235Sraf /*
2273235Sraf  * Much of the logic here is the same as in forkx().
2283235Sraf  * See the comments in forkx(), above.
2293235Sraf  */
2303235Sraf #pragma weak forkallx = _private_forkallx
2313235Sraf #pragma weak _forkallx = _private_forkallx
2324292Sab196087 pid_t
2333235Sraf _private_forkallx(int flags)
2340Sstevel@tonic-gate {
2350Sstevel@tonic-gate 	ulwp_t *self = curthread;
2360Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
2370Sstevel@tonic-gate 	pid_t pid;
2380Sstevel@tonic-gate 
2390Sstevel@tonic-gate 	if (self->ul_vfork) {
2400Sstevel@tonic-gate 		if (udp->uberflags.uf_mt) {
2410Sstevel@tonic-gate 			errno = ENOTSUP;
2420Sstevel@tonic-gate 			return (-1);
2430Sstevel@tonic-gate 		}
2443235Sraf 		pid = __forkallx(flags);
2450Sstevel@tonic-gate 		if (pid == 0) {		/* child */
2460Sstevel@tonic-gate 			udp->pid = _private_getpid();
2470Sstevel@tonic-gate 			self->ul_vfork = 0;
2480Sstevel@tonic-gate 		}
2490Sstevel@tonic-gate 		return (pid);
2500Sstevel@tonic-gate 	}
2510Sstevel@tonic-gate 
2524843Sraf 	sigoff(self);
2534843Sraf 	if (self->ul_fork) {
2544843Sraf 		sigon(self);
2554843Sraf 		errno = EDEADLK;
2560Sstevel@tonic-gate 		return (-1);
2570Sstevel@tonic-gate 	}
2580Sstevel@tonic-gate 	self->ul_fork = 1;
2594914Sraf 	(void) _private_mutex_lock(&udp->atfork_lock);
2604914Sraf 	(void) _private_mutex_lock(&udp->fork_lock);
2610Sstevel@tonic-gate 	block_all_signals(self);
2620Sstevel@tonic-gate 	suspend_fork();
2630Sstevel@tonic-gate 
2643235Sraf 	pid = __forkallx(flags);
2650Sstevel@tonic-gate 
2660Sstevel@tonic-gate 	if (pid == 0) {
2670Sstevel@tonic-gate 		self->ul_schedctl_called = NULL;
2680Sstevel@tonic-gate 		self->ul_schedctl = NULL;
2690Sstevel@tonic-gate 		self->ul_cursig = 0;
2700Sstevel@tonic-gate 		self->ul_siginfo.si_signo = 0;
2710Sstevel@tonic-gate 		udp->pid = _private_getpid();
2724574Sraf 		unregister_locks();
2730Sstevel@tonic-gate 		continue_fork(1);
2740Sstevel@tonic-gate 	} else {
2750Sstevel@tonic-gate 		continue_fork(0);
2760Sstevel@tonic-gate 	}
2770Sstevel@tonic-gate 	restore_signals(self);
2784914Sraf 	(void) _private_mutex_unlock(&udp->fork_lock);
2794914Sraf 	(void) _private_mutex_unlock(&udp->atfork_lock);
2800Sstevel@tonic-gate 	self->ul_fork = 0;
2814843Sraf 	sigon(self);
2820Sstevel@tonic-gate 
2830Sstevel@tonic-gate 	return (pid);
2840Sstevel@tonic-gate }
2850Sstevel@tonic-gate 
2863235Sraf #pragma weak forkall = _forkall
2873235Sraf pid_t
2883235Sraf _forkall(void)
2893235Sraf {
2903235Sraf 	return (_private_forkallx(0));
2913235Sraf }
2923235Sraf 
2930Sstevel@tonic-gate /*
294*5891Sraf  * For the implementation of cancellation at cancellation points.
2950Sstevel@tonic-gate  */
2960Sstevel@tonic-gate #define	PROLOGUE							\
2970Sstevel@tonic-gate {									\
2980Sstevel@tonic-gate 	ulwp_t *self = curthread;					\
299*5891Sraf 	int nocancel =							\
300*5891Sraf 	    (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks |	\
301*5891Sraf 	    self->ul_critical | self->ul_sigdefer);			\
302*5891Sraf 	int abort = 0;							\
3030Sstevel@tonic-gate 	if (nocancel == 0) {						\
3040Sstevel@tonic-gate 		self->ul_save_async = self->ul_cancel_async;		\
3050Sstevel@tonic-gate 		if (!self->ul_cancel_disabled) {			\
3060Sstevel@tonic-gate 			self->ul_cancel_async = 1;			\
3070Sstevel@tonic-gate 			if (self->ul_cancel_pending)			\
3080Sstevel@tonic-gate 				_pthread_exit(PTHREAD_CANCELED);	\
3090Sstevel@tonic-gate 		}							\
3100Sstevel@tonic-gate 		self->ul_sp = stkptr();					\
311*5891Sraf 	} else if (self->ul_cancel_pending &&				\
312*5891Sraf 	    !self->ul_cancel_disabled) {				\
313*5891Sraf 		set_cancel_eintr_flag(self);				\
314*5891Sraf 		abort = 1;						\
3150Sstevel@tonic-gate 	}
3160Sstevel@tonic-gate 
3170Sstevel@tonic-gate #define	EPILOGUE							\
3180Sstevel@tonic-gate 	if (nocancel == 0) {						\
3190Sstevel@tonic-gate 		self->ul_sp = 0;					\
3200Sstevel@tonic-gate 		self->ul_cancel_async = self->ul_save_async;		\
3210Sstevel@tonic-gate 	}								\
3220Sstevel@tonic-gate }
3230Sstevel@tonic-gate 
3240Sstevel@tonic-gate /*
3250Sstevel@tonic-gate  * Perform the body of the action required by most of the cancelable
3260Sstevel@tonic-gate  * function calls.  The return(function_call) part is to allow the
3270Sstevel@tonic-gate  * compiler to make the call be executed with tail recursion, which
3280Sstevel@tonic-gate  * saves a register window on sparc and slightly (not much) improves
3290Sstevel@tonic-gate  * the code for x86/x64 compilations.
3300Sstevel@tonic-gate  */
3310Sstevel@tonic-gate #define	PERFORM(function_call)						\
3320Sstevel@tonic-gate 	PROLOGUE							\
333*5891Sraf 	if (abort) {							\
334*5891Sraf 		*self->ul_errnop = EINTR;				\
335*5891Sraf 		return (-1);						\
336*5891Sraf 	}								\
3370Sstevel@tonic-gate 	if (nocancel)							\
3380Sstevel@tonic-gate 		return (function_call);					\
3390Sstevel@tonic-gate 	rv = function_call;						\
3400Sstevel@tonic-gate 	EPILOGUE							\
3410Sstevel@tonic-gate 	return (rv);
3420Sstevel@tonic-gate 
3430Sstevel@tonic-gate /*
3440Sstevel@tonic-gate  * Specialized prologue for sigsuspend() and pollsys().
3450Sstevel@tonic-gate  * These system calls pass a signal mask to the kernel.
3460Sstevel@tonic-gate  * The kernel replaces the thread's signal mask with the
3470Sstevel@tonic-gate  * temporary mask before the thread goes to sleep.  If
3480Sstevel@tonic-gate  * a signal is received, the signal handler will execute
3490Sstevel@tonic-gate  * with the temporary mask, as modified by the sigaction
3500Sstevel@tonic-gate  * for the particular signal.
3510Sstevel@tonic-gate  *
3520Sstevel@tonic-gate  * We block all signals until we reach the kernel with the
3530Sstevel@tonic-gate  * temporary mask.  This eliminates race conditions with
3540Sstevel@tonic-gate  * setting the signal mask while signals are being posted.
3550Sstevel@tonic-gate  */
3560Sstevel@tonic-gate #define	PROLOGUE_MASK(sigmask)						\
3570Sstevel@tonic-gate {									\
3580Sstevel@tonic-gate 	ulwp_t *self = curthread;					\
359*5891Sraf 	int nocancel =							\
360*5891Sraf 	    (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks |	\
361*5891Sraf 	    self->ul_critical | self->ul_sigdefer);			\
3620Sstevel@tonic-gate 	if (!self->ul_vfork) {						\
3630Sstevel@tonic-gate 		if (sigmask) {						\
3640Sstevel@tonic-gate 			block_all_signals(self);			\
3651111Sraf 			self->ul_tmpmask.__sigbits[0] = sigmask->__sigbits[0]; \
3661111Sraf 			self->ul_tmpmask.__sigbits[1] = sigmask->__sigbits[1]; \
3670Sstevel@tonic-gate 			delete_reserved_signals(&self->ul_tmpmask);	\
3680Sstevel@tonic-gate 			self->ul_sigsuspend = 1;			\
3690Sstevel@tonic-gate 		}							\
3700Sstevel@tonic-gate 		if (nocancel == 0) {					\
3710Sstevel@tonic-gate 			self->ul_save_async = self->ul_cancel_async;	\
3720Sstevel@tonic-gate 			if (!self->ul_cancel_disabled) {		\
3730Sstevel@tonic-gate 				self->ul_cancel_async = 1;		\
3740Sstevel@tonic-gate 				if (self->ul_cancel_pending) {		\
3750Sstevel@tonic-gate 					if (self->ul_sigsuspend) {	\
3760Sstevel@tonic-gate 						self->ul_sigsuspend = 0;\
3770Sstevel@tonic-gate 						restore_signals(self);	\
3780Sstevel@tonic-gate 					}				\
3790Sstevel@tonic-gate 					_pthread_exit(PTHREAD_CANCELED);\
3800Sstevel@tonic-gate 				}					\
3810Sstevel@tonic-gate 			}						\
3820Sstevel@tonic-gate 			self->ul_sp = stkptr();				\
3830Sstevel@tonic-gate 		}							\
3840Sstevel@tonic-gate 	}
3850Sstevel@tonic-gate 
3860Sstevel@tonic-gate /*
3870Sstevel@tonic-gate  * If a signal is taken, we return from the system call wrapper with
3880Sstevel@tonic-gate  * our original signal mask restored (see code in call_user_handler()).
3890Sstevel@tonic-gate  * If not (self->ul_sigsuspend is still non-zero), we must restore our
3900Sstevel@tonic-gate  * original signal mask ourself.
3910Sstevel@tonic-gate  */
3920Sstevel@tonic-gate #define	EPILOGUE_MASK							\
3930Sstevel@tonic-gate 	if (nocancel == 0) {						\
3940Sstevel@tonic-gate 		self->ul_sp = 0;					\
3950Sstevel@tonic-gate 		self->ul_cancel_async = self->ul_save_async;		\
3960Sstevel@tonic-gate 	}								\
3970Sstevel@tonic-gate 	if (self->ul_sigsuspend) {					\
3980Sstevel@tonic-gate 		self->ul_sigsuspend = 0;				\
3990Sstevel@tonic-gate 		restore_signals(self);					\
4000Sstevel@tonic-gate 	}								\
4010Sstevel@tonic-gate }
4020Sstevel@tonic-gate 
4030Sstevel@tonic-gate /*
4042248Sraf  * Cancellation prologue and epilogue functions,
4052248Sraf  * for cancellation points too complex to include here.
4061885Sraf  */
4071885Sraf void
4081885Sraf _cancel_prologue(void)
4091885Sraf {
4101885Sraf 	ulwp_t *self = curthread;
4111885Sraf 
412*5891Sraf 	self->ul_cancel_prologue =
413*5891Sraf 	    (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks |
414*5891Sraf 	    self->ul_critical | self->ul_sigdefer) != 0;
4151885Sraf 	if (self->ul_cancel_prologue == 0) {
4161885Sraf 		self->ul_save_async = self->ul_cancel_async;
4171885Sraf 		if (!self->ul_cancel_disabled) {
4181885Sraf 			self->ul_cancel_async = 1;
4191885Sraf 			if (self->ul_cancel_pending)
4201885Sraf 				_pthread_exit(PTHREAD_CANCELED);
4211885Sraf 		}
4221885Sraf 		self->ul_sp = stkptr();
423*5891Sraf 	} else if (self->ul_cancel_pending &&
424*5891Sraf 	    !self->ul_cancel_disabled) {
425*5891Sraf 		set_cancel_eintr_flag(self);
4261885Sraf 	}
4271885Sraf }
4281885Sraf 
4291885Sraf void
4301885Sraf _cancel_epilogue(void)
4311885Sraf {
4321885Sraf 	ulwp_t *self = curthread;
4331885Sraf 
4341885Sraf 	if (self->ul_cancel_prologue == 0) {
4351885Sraf 		self->ul_sp = 0;
4361885Sraf 		self->ul_cancel_async = self->ul_save_async;
4371885Sraf 	}
4381885Sraf }
4391885Sraf 
4401885Sraf /*
4410Sstevel@tonic-gate  * Called from _thrp_join() (thr_join() is a cancellation point)
4420Sstevel@tonic-gate  */
4430Sstevel@tonic-gate int
4440Sstevel@tonic-gate lwp_wait(thread_t tid, thread_t *found)
4450Sstevel@tonic-gate {
4460Sstevel@tonic-gate 	int error;
4470Sstevel@tonic-gate 
4480Sstevel@tonic-gate 	PROLOGUE
449*5891Sraf 	if (abort)
450*5891Sraf 		return (EINTR);
451*5891Sraf 	while ((error = __lwp_wait(tid, found)) == EINTR && !cancel_active())
452*5891Sraf 		continue;
4530Sstevel@tonic-gate 	EPILOGUE
4540Sstevel@tonic-gate 	return (error);
4550Sstevel@tonic-gate }
4560Sstevel@tonic-gate 
457*5891Sraf #pragma weak read = _read
4580Sstevel@tonic-gate ssize_t
459*5891Sraf _read(int fd, void *buf, size_t size)
4600Sstevel@tonic-gate {
461*5891Sraf 	extern ssize_t __read(int, void *, size_t);
4620Sstevel@tonic-gate 	ssize_t rv;
4630Sstevel@tonic-gate 
464*5891Sraf 	PERFORM(__read(fd, buf, size))
4650Sstevel@tonic-gate }
4660Sstevel@tonic-gate 
467*5891Sraf #pragma weak write = _write
4680Sstevel@tonic-gate ssize_t
469*5891Sraf _write(int fd, const void *buf, size_t size)
4700Sstevel@tonic-gate {
471*5891Sraf 	extern ssize_t __write(int, const void *, size_t);
4720Sstevel@tonic-gate 	ssize_t rv;
4730Sstevel@tonic-gate 
474*5891Sraf 	PERFORM(__write(fd, buf, size))
4750Sstevel@tonic-gate }
4760Sstevel@tonic-gate 
477*5891Sraf #pragma weak getmsg = _getmsg
4780Sstevel@tonic-gate int
479*5891Sraf _getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
4800Sstevel@tonic-gate 	int *flagsp)
4810Sstevel@tonic-gate {
482*5891Sraf 	extern int __getmsg(int, struct strbuf *, struct strbuf *, int *);
4830Sstevel@tonic-gate 	int rv;
4840Sstevel@tonic-gate 
485*5891Sraf 	PERFORM(__getmsg(fd, ctlptr, dataptr, flagsp))
4860Sstevel@tonic-gate }
4870Sstevel@tonic-gate 
488*5891Sraf #pragma weak getpmsg = _getpmsg
4890Sstevel@tonic-gate int
490*5891Sraf _getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
4910Sstevel@tonic-gate 	int *bandp, int *flagsp)
4920Sstevel@tonic-gate {
493*5891Sraf 	extern int __getpmsg(int, struct strbuf *, struct strbuf *,
4944843Sraf 	    int *, int *);
4950Sstevel@tonic-gate 	int rv;
4960Sstevel@tonic-gate 
497*5891Sraf 	PERFORM(__getpmsg(fd, ctlptr, dataptr, bandp, flagsp))
4980Sstevel@tonic-gate }
4990Sstevel@tonic-gate 
500*5891Sraf #pragma weak putmsg = _putmsg
5010Sstevel@tonic-gate int
502*5891Sraf _putmsg(int fd, const struct strbuf *ctlptr,
5030Sstevel@tonic-gate 	const struct strbuf *dataptr, int flags)
5040Sstevel@tonic-gate {
505*5891Sraf 	extern int __putmsg(int, const struct strbuf *,
5064843Sraf 	    const struct strbuf *, int);
5070Sstevel@tonic-gate 	int rv;
5080Sstevel@tonic-gate 
509*5891Sraf 	PERFORM(__putmsg(fd, ctlptr, dataptr, flags))
5100Sstevel@tonic-gate }
5110Sstevel@tonic-gate 
5120Sstevel@tonic-gate int
5130Sstevel@tonic-gate __xpg4_putmsg(int fd, const struct strbuf *ctlptr,
5140Sstevel@tonic-gate 	const struct strbuf *dataptr, int flags)
5150Sstevel@tonic-gate {
516*5891Sraf 	extern int __putmsg(int, const struct strbuf *,
5174843Sraf 	    const struct strbuf *, int);
5180Sstevel@tonic-gate 	int rv;
5190Sstevel@tonic-gate 
520*5891Sraf 	PERFORM(__putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4))
5210Sstevel@tonic-gate }
5220Sstevel@tonic-gate 
523*5891Sraf #pragma weak putpmsg = _putpmsg
5240Sstevel@tonic-gate int
525*5891Sraf _putpmsg(int fd, const struct strbuf *ctlptr,
5260Sstevel@tonic-gate 	const struct strbuf *dataptr, int band, int flags)
5270Sstevel@tonic-gate {
528*5891Sraf 	extern int __putpmsg(int, const struct strbuf *,
5294843Sraf 	    const struct strbuf *, int, int);
5300Sstevel@tonic-gate 	int rv;
5310Sstevel@tonic-gate 
532*5891Sraf 	PERFORM(__putpmsg(fd, ctlptr, dataptr, band, flags))
5330Sstevel@tonic-gate }
5340Sstevel@tonic-gate 
5350Sstevel@tonic-gate int
5360Sstevel@tonic-gate __xpg4_putpmsg(int fd, const struct strbuf *ctlptr,
5370Sstevel@tonic-gate 	const struct strbuf *dataptr, int band, int flags)
5380Sstevel@tonic-gate {
539*5891Sraf 	extern int __putpmsg(int, const struct strbuf *,
5404843Sraf 	    const struct strbuf *, int, int);
5410Sstevel@tonic-gate 	int rv;
5420Sstevel@tonic-gate 
543*5891Sraf 	PERFORM(__putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4))
5440Sstevel@tonic-gate }
5450Sstevel@tonic-gate 
5462248Sraf #pragma weak nanosleep = _nanosleep
5470Sstevel@tonic-gate int
5482248Sraf _nanosleep(const timespec_t *rqtp, timespec_t *rmtp)
5490Sstevel@tonic-gate {
5500Sstevel@tonic-gate 	int error;
5510Sstevel@tonic-gate 
5520Sstevel@tonic-gate 	PROLOGUE
553*5891Sraf 	error = abort? EINTR : __nanosleep(rqtp, rmtp);
5540Sstevel@tonic-gate 	EPILOGUE
5550Sstevel@tonic-gate 	if (error) {
5560Sstevel@tonic-gate 		errno = error;
5570Sstevel@tonic-gate 		return (-1);
5580Sstevel@tonic-gate 	}
5590Sstevel@tonic-gate 	return (0);
5600Sstevel@tonic-gate }
5610Sstevel@tonic-gate 
5622248Sraf #pragma weak clock_nanosleep = _clock_nanosleep
5630Sstevel@tonic-gate int
5642248Sraf _clock_nanosleep(clockid_t clock_id, int flags,
5650Sstevel@tonic-gate 	const timespec_t *rqtp, timespec_t *rmtp)
5660Sstevel@tonic-gate {
5670Sstevel@tonic-gate 	timespec_t reltime;
5680Sstevel@tonic-gate 	hrtime_t start;
5690Sstevel@tonic-gate 	hrtime_t rqlapse;
5700Sstevel@tonic-gate 	hrtime_t lapse;
5710Sstevel@tonic-gate 	int error;
5720Sstevel@tonic-gate 
5730Sstevel@tonic-gate 	switch (clock_id) {
5740Sstevel@tonic-gate 	case CLOCK_VIRTUAL:
5750Sstevel@tonic-gate 	case CLOCK_PROCESS_CPUTIME_ID:
5760Sstevel@tonic-gate 	case CLOCK_THREAD_CPUTIME_ID:
5770Sstevel@tonic-gate 		return (ENOTSUP);
5780Sstevel@tonic-gate 	case CLOCK_REALTIME:
5790Sstevel@tonic-gate 	case CLOCK_HIGHRES:
5800Sstevel@tonic-gate 		break;
5810Sstevel@tonic-gate 	default:
5820Sstevel@tonic-gate 		return (EINVAL);
5830Sstevel@tonic-gate 	}
5840Sstevel@tonic-gate 	if (flags & TIMER_ABSTIME) {
5850Sstevel@tonic-gate 		abstime_to_reltime(clock_id, rqtp, &reltime);
5860Sstevel@tonic-gate 		rmtp = NULL;
5870Sstevel@tonic-gate 	} else {
5880Sstevel@tonic-gate 		reltime = *rqtp;
5890Sstevel@tonic-gate 		if (clock_id == CLOCK_HIGHRES)
5900Sstevel@tonic-gate 			start = gethrtime();
5910Sstevel@tonic-gate 	}
5920Sstevel@tonic-gate restart:
5930Sstevel@tonic-gate 	PROLOGUE
594*5891Sraf 	error = abort? EINTR : __nanosleep(&reltime, rmtp);
5950Sstevel@tonic-gate 	EPILOGUE
5960Sstevel@tonic-gate 	if (error == 0 && clock_id == CLOCK_HIGHRES) {
5970Sstevel@tonic-gate 		/*
5980Sstevel@tonic-gate 		 * Don't return yet if we didn't really get a timeout.
5990Sstevel@tonic-gate 		 * This can happen if we return because someone resets
6000Sstevel@tonic-gate 		 * the system clock.
6010Sstevel@tonic-gate 		 */
6020Sstevel@tonic-gate 		if (flags & TIMER_ABSTIME) {
6030Sstevel@tonic-gate 			if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
6040Sstevel@tonic-gate 			    rqtp->tv_nsec > gethrtime()) {
6050Sstevel@tonic-gate 				abstime_to_reltime(clock_id, rqtp, &reltime);
6060Sstevel@tonic-gate 				goto restart;
6070Sstevel@tonic-gate 			}
6080Sstevel@tonic-gate 		} else {
6090Sstevel@tonic-gate 			rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
6104843Sraf 			    rqtp->tv_nsec;
6110Sstevel@tonic-gate 			lapse = gethrtime() - start;
6120Sstevel@tonic-gate 			if (rqlapse > lapse) {
6130Sstevel@tonic-gate 				hrt2ts(rqlapse - lapse, &reltime);
6140Sstevel@tonic-gate 				goto restart;
6150Sstevel@tonic-gate 			}
6160Sstevel@tonic-gate 		}
6170Sstevel@tonic-gate 	}
6180Sstevel@tonic-gate 	if (error == 0 && clock_id == CLOCK_REALTIME &&
6190Sstevel@tonic-gate 	    (flags & TIMER_ABSTIME)) {
6200Sstevel@tonic-gate 		/*
6210Sstevel@tonic-gate 		 * Don't return yet just because someone reset the
6220Sstevel@tonic-gate 		 * system clock.  Recompute the new relative time
6230Sstevel@tonic-gate 		 * and reissue the nanosleep() call if necessary.
6240Sstevel@tonic-gate 		 *
6250Sstevel@tonic-gate 		 * Resetting the system clock causes all sorts of
6260Sstevel@tonic-gate 		 * problems and the SUSV3 standards body should
6270Sstevel@tonic-gate 		 * have made the behavior of clock_nanosleep() be
6280Sstevel@tonic-gate 		 * implementation-defined in such a case rather than
6290Sstevel@tonic-gate 		 * being specific about honoring the new system time.
6300Sstevel@tonic-gate 		 * Standards bodies are filled with fools and idiots.
6310Sstevel@tonic-gate 		 */
6320Sstevel@tonic-gate 		abstime_to_reltime(clock_id, rqtp, &reltime);
6330Sstevel@tonic-gate 		if (reltime.tv_sec != 0 || reltime.tv_nsec != 0)
6340Sstevel@tonic-gate 			goto restart;
6350Sstevel@tonic-gate 	}
6360Sstevel@tonic-gate 	return (error);
6370Sstevel@tonic-gate }
6380Sstevel@tonic-gate 
6390Sstevel@tonic-gate #pragma weak sleep = _sleep
6400Sstevel@tonic-gate unsigned int
6410Sstevel@tonic-gate _sleep(unsigned int sec)
6420Sstevel@tonic-gate {
6430Sstevel@tonic-gate 	unsigned int rem = 0;
6440Sstevel@tonic-gate 	timespec_t ts;
6450Sstevel@tonic-gate 	timespec_t tsr;
6460Sstevel@tonic-gate 
6470Sstevel@tonic-gate 	ts.tv_sec = (time_t)sec;
6480Sstevel@tonic-gate 	ts.tv_nsec = 0;
649*5891Sraf 	if (_nanosleep(&ts, &tsr) == -1 && errno == EINTR) {
6500Sstevel@tonic-gate 		rem = (unsigned int)tsr.tv_sec;
6510Sstevel@tonic-gate 		if (tsr.tv_nsec >= NANOSEC / 2)
6520Sstevel@tonic-gate 			rem++;
6530Sstevel@tonic-gate 	}
6540Sstevel@tonic-gate 	return (rem);
6550Sstevel@tonic-gate }
6560Sstevel@tonic-gate 
6570Sstevel@tonic-gate #pragma weak usleep = _usleep
6580Sstevel@tonic-gate int
6590Sstevel@tonic-gate _usleep(useconds_t usec)
6600Sstevel@tonic-gate {
6610Sstevel@tonic-gate 	timespec_t ts;
6620Sstevel@tonic-gate 
6630Sstevel@tonic-gate 	ts.tv_sec = usec / MICROSEC;
6640Sstevel@tonic-gate 	ts.tv_nsec = (long)(usec % MICROSEC) * 1000;
665*5891Sraf 	(void) _nanosleep(&ts, NULL);
6660Sstevel@tonic-gate 	return (0);
6670Sstevel@tonic-gate }
6680Sstevel@tonic-gate 
669*5891Sraf #pragma weak close = _close
6700Sstevel@tonic-gate int
671*5891Sraf _close(int fildes)
6720Sstevel@tonic-gate {
6732248Sraf 	extern void _aio_close(int);
674*5891Sraf 	extern int __close(int);
6750Sstevel@tonic-gate 	int rv;
6760Sstevel@tonic-gate 
6772248Sraf 	_aio_close(fildes);
678*5891Sraf 	PERFORM(__close(fildes))
6790Sstevel@tonic-gate }
6800Sstevel@tonic-gate 
681*5891Sraf #pragma weak creat = _creat
6820Sstevel@tonic-gate int
683*5891Sraf _creat(const char *path, mode_t mode)
6840Sstevel@tonic-gate {
685*5891Sraf 	extern int __creat(const char *, mode_t);
6860Sstevel@tonic-gate 	int rv;
6870Sstevel@tonic-gate 
688*5891Sraf 	PERFORM(__creat(path, mode))
6890Sstevel@tonic-gate }
6900Sstevel@tonic-gate 
6910Sstevel@tonic-gate #if !defined(_LP64)
692*5891Sraf #pragma weak creat64 = _creat64
6930Sstevel@tonic-gate int
694*5891Sraf _creat64(const char *path, mode_t mode)
6950Sstevel@tonic-gate {
696*5891Sraf 	extern int __creat64(const char *, mode_t);
6970Sstevel@tonic-gate 	int rv;
6980Sstevel@tonic-gate 
699*5891Sraf 	PERFORM(__creat64(path, mode))
7000Sstevel@tonic-gate }
7010Sstevel@tonic-gate #endif	/* !_LP64 */
7020Sstevel@tonic-gate 
703*5891Sraf #pragma weak door_call = _door_call
7040Sstevel@tonic-gate int
705*5891Sraf _door_call(int d, door_arg_t *params)
7060Sstevel@tonic-gate {
707*5891Sraf 	extern int __door_call(int, door_arg_t *);
708*5891Sraf 	int rv;
709*5891Sraf 
710*5891Sraf 	PERFORM(__door_call(d, params))
711*5891Sraf }
712*5891Sraf 
713*5891Sraf #pragma weak fcntl = _fcntl
714*5891Sraf int
715*5891Sraf _fcntl(int fildes, int cmd, ...)
716*5891Sraf {
717*5891Sraf 	extern int __fcntl(int, int, ...);
7180Sstevel@tonic-gate 	intptr_t arg;
7190Sstevel@tonic-gate 	int rv;
7200Sstevel@tonic-gate 	va_list ap;
7210Sstevel@tonic-gate 
7220Sstevel@tonic-gate 	va_start(ap, cmd);
7230Sstevel@tonic-gate 	arg = va_arg(ap, intptr_t);
7240Sstevel@tonic-gate 	va_end(ap);
7250Sstevel@tonic-gate 	if (cmd != F_SETLKW)
726*5891Sraf 		return (__fcntl(fildes, cmd, arg));
727*5891Sraf 	PERFORM(__fcntl(fildes, cmd, arg))
7280Sstevel@tonic-gate }
7290Sstevel@tonic-gate 
730*5891Sraf #pragma weak fdatasync = _fdatasync
7310Sstevel@tonic-gate int
732*5891Sraf _fdatasync(int fildes)
7334722Sraf {
734*5891Sraf 	extern int __fdsync(int, int);
7354722Sraf 	int rv;
7364722Sraf 
737*5891Sraf 	PERFORM(__fdsync(fildes, FDSYNC))
7384722Sraf }
7394722Sraf 
740*5891Sraf #pragma weak fsync = _fsync
7414722Sraf int
742*5891Sraf _fsync(int fildes)
7430Sstevel@tonic-gate {
744*5891Sraf 	extern int __fdsync(int, int);
7450Sstevel@tonic-gate 	int rv;
7460Sstevel@tonic-gate 
747*5891Sraf 	PERFORM(__fdsync(fildes, FSYNC))
7480Sstevel@tonic-gate }
7490Sstevel@tonic-gate 
750*5891Sraf #pragma weak lockf = _lockf
7510Sstevel@tonic-gate int
752*5891Sraf _lockf(int fildes, int function, off_t size)
7530Sstevel@tonic-gate {
754*5891Sraf 	extern int __lockf(int, int, off_t);
7550Sstevel@tonic-gate 	int rv;
7560Sstevel@tonic-gate 
757*5891Sraf 	PERFORM(__lockf(fildes, function, size))
7580Sstevel@tonic-gate }
7590Sstevel@tonic-gate 
7600Sstevel@tonic-gate #if !defined(_LP64)
761*5891Sraf #pragma weak lockf64 = _lockf64
7620Sstevel@tonic-gate int
763*5891Sraf _lockf64(int fildes, int function, off64_t size)
7640Sstevel@tonic-gate {
765*5891Sraf 	extern int __lockf64(int, int, off64_t);
7660Sstevel@tonic-gate 	int rv;
7670Sstevel@tonic-gate 
768*5891Sraf 	PERFORM(__lockf64(fildes, function, size))
7690Sstevel@tonic-gate }
7700Sstevel@tonic-gate #endif	/* !_LP64 */
7710Sstevel@tonic-gate 
772*5891Sraf #pragma weak msgrcv = _msgrcv
7730Sstevel@tonic-gate ssize_t
774*5891Sraf _msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg)
7750Sstevel@tonic-gate {
776*5891Sraf 	extern ssize_t __msgrcv(int, void *, size_t, long, int);
7770Sstevel@tonic-gate 	ssize_t rv;
7780Sstevel@tonic-gate 
779*5891Sraf 	PERFORM(__msgrcv(msqid, msgp, msgsz, msgtyp, msgflg))
7800Sstevel@tonic-gate }
7810Sstevel@tonic-gate 
782*5891Sraf #pragma weak msgsnd = _msgsnd
7830Sstevel@tonic-gate int
784*5891Sraf _msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg)
7850Sstevel@tonic-gate {
786*5891Sraf 	extern int __msgsnd(int, const void *, size_t, int);
7870Sstevel@tonic-gate 	int rv;
7880Sstevel@tonic-gate 
789*5891Sraf 	PERFORM(__msgsnd(msqid, msgp, msgsz, msgflg))
790*5891Sraf }
791*5891Sraf 
792*5891Sraf #pragma weak msync = _msync
793*5891Sraf int
794*5891Sraf _msync(caddr_t addr, size_t len, int flags)
795*5891Sraf {
796*5891Sraf 	extern int __msync(caddr_t, size_t, int);
797*5891Sraf 	int rv;
798*5891Sraf 
799*5891Sraf 	PERFORM(__msync(addr, len, flags))
8000Sstevel@tonic-gate }
8010Sstevel@tonic-gate 
802*5891Sraf #pragma weak open = _open
8030Sstevel@tonic-gate int
804*5891Sraf _open(const char *path, int oflag, ...)
8050Sstevel@tonic-gate {
806*5891Sraf 	extern int __open(const char *, int, ...);
807*5891Sraf 	mode_t mode;
8080Sstevel@tonic-gate 	int rv;
809*5891Sraf 	va_list ap;
8100Sstevel@tonic-gate 
811*5891Sraf 	va_start(ap, oflag);
812*5891Sraf 	mode = va_arg(ap, mode_t);
813*5891Sraf 	va_end(ap);
814*5891Sraf 	PERFORM(__open(path, oflag, mode))
8150Sstevel@tonic-gate }
8160Sstevel@tonic-gate 
817*5891Sraf #pragma weak openat = _openat
8180Sstevel@tonic-gate int
819*5891Sraf _openat(int fd, const char *path, int oflag, ...)
8200Sstevel@tonic-gate {
821*5891Sraf 	extern int __openat(int, const char *, int, ...);
8220Sstevel@tonic-gate 	mode_t mode;
8230Sstevel@tonic-gate 	int rv;
8240Sstevel@tonic-gate 	va_list ap;
8250Sstevel@tonic-gate 
8260Sstevel@tonic-gate 	va_start(ap, oflag);
8270Sstevel@tonic-gate 	mode = va_arg(ap, mode_t);
8280Sstevel@tonic-gate 	va_end(ap);
829*5891Sraf 	PERFORM(__openat(fd, path, oflag, mode))
8300Sstevel@tonic-gate }
8310Sstevel@tonic-gate 
8320Sstevel@tonic-gate #if !defined(_LP64)
833*5891Sraf #pragma weak open64 = _open64
8340Sstevel@tonic-gate int
835*5891Sraf _open64(const char *path, int oflag, ...)
8360Sstevel@tonic-gate {
837*5891Sraf 	extern int __open64(const char *, int, ...);
838*5891Sraf 	mode_t mode;
839*5891Sraf 	int rv;
840*5891Sraf 	va_list ap;
841*5891Sraf 
842*5891Sraf 	va_start(ap, oflag);
843*5891Sraf 	mode = va_arg(ap, mode_t);
844*5891Sraf 	va_end(ap);
845*5891Sraf 	PERFORM(__open64(path, oflag, mode))
846*5891Sraf }
847*5891Sraf 
848*5891Sraf #pragma weak openat64 = _openat64
849*5891Sraf int
850*5891Sraf _openat64(int fd, const char *path, int oflag, ...)
851*5891Sraf {
852*5891Sraf 	extern int __openat64(int, const char *, int, ...);
8530Sstevel@tonic-gate 	mode_t mode;
8540Sstevel@tonic-gate 	int rv;
8550Sstevel@tonic-gate 	va_list ap;
8560Sstevel@tonic-gate 
8570Sstevel@tonic-gate 	va_start(ap, oflag);
8580Sstevel@tonic-gate 	mode = va_arg(ap, mode_t);
8590Sstevel@tonic-gate 	va_end(ap);
860*5891Sraf 	PERFORM(__openat64(fd, path, oflag, mode))
8610Sstevel@tonic-gate }
8620Sstevel@tonic-gate #endif	/* !_LP64 */
8630Sstevel@tonic-gate 
864*5891Sraf #pragma weak pause = _pause
8650Sstevel@tonic-gate int
866*5891Sraf _pause(void)
8670Sstevel@tonic-gate {
868*5891Sraf 	extern int __pause(void);
8690Sstevel@tonic-gate 	int rv;
8700Sstevel@tonic-gate 
871*5891Sraf 	PERFORM(__pause())
8720Sstevel@tonic-gate }
8730Sstevel@tonic-gate 
874*5891Sraf #pragma weak pread = _pread
8750Sstevel@tonic-gate ssize_t
876*5891Sraf _pread(int fildes, void *buf, size_t nbyte, off_t offset)
8770Sstevel@tonic-gate {
878*5891Sraf 	extern ssize_t __pread(int, void *, size_t, off_t);
8790Sstevel@tonic-gate 	ssize_t rv;
8800Sstevel@tonic-gate 
881*5891Sraf 	PERFORM(__pread(fildes, buf, nbyte, offset))
8820Sstevel@tonic-gate }
8830Sstevel@tonic-gate 
8840Sstevel@tonic-gate #if !defined(_LP64)
885*5891Sraf #pragma weak pread64 = _pread64
8860Sstevel@tonic-gate ssize_t
887*5891Sraf _pread64(int fildes, void *buf, size_t nbyte, off64_t offset)
8880Sstevel@tonic-gate {
889*5891Sraf 	extern ssize_t __pread64(int, void *, size_t, off64_t);
8900Sstevel@tonic-gate 	ssize_t rv;
8910Sstevel@tonic-gate 
892*5891Sraf 	PERFORM(__pread64(fildes, buf, nbyte, offset))
8930Sstevel@tonic-gate }
8940Sstevel@tonic-gate #endif	/* !_LP64 */
8950Sstevel@tonic-gate 
896*5891Sraf #pragma weak pwrite = _pwrite
8970Sstevel@tonic-gate ssize_t
898*5891Sraf _pwrite(int fildes, const void *buf, size_t nbyte, off_t offset)
8990Sstevel@tonic-gate {
900*5891Sraf 	extern ssize_t __pwrite(int, const void *, size_t, off_t);
9010Sstevel@tonic-gate 	ssize_t rv;
9020Sstevel@tonic-gate 
903*5891Sraf 	PERFORM(__pwrite(fildes, buf, nbyte, offset))
9040Sstevel@tonic-gate }
9050Sstevel@tonic-gate 
9060Sstevel@tonic-gate #if !defined(_LP64)
907*5891Sraf #pragma weak pwrite64 = _pwrite64
9080Sstevel@tonic-gate ssize_t
909*5891Sraf _pwrite64(int fildes, const void *buf, size_t nbyte, off64_t offset)
9100Sstevel@tonic-gate {
911*5891Sraf 	extern ssize_t __pwrite64(int, const void *, size_t, off64_t);
9120Sstevel@tonic-gate 	ssize_t rv;
9130Sstevel@tonic-gate 
914*5891Sraf 	PERFORM(__pwrite64(fildes, buf, nbyte, offset))
9150Sstevel@tonic-gate }
9160Sstevel@tonic-gate #endif	/* !_LP64 */
9170Sstevel@tonic-gate 
918*5891Sraf #pragma weak readv = _readv
9190Sstevel@tonic-gate ssize_t
920*5891Sraf _readv(int fildes, const struct iovec *iov, int iovcnt)
9210Sstevel@tonic-gate {
922*5891Sraf 	extern ssize_t __readv(int, const struct iovec *, int);
9230Sstevel@tonic-gate 	ssize_t rv;
9240Sstevel@tonic-gate 
925*5891Sraf 	PERFORM(__readv(fildes, iov, iovcnt))
9260Sstevel@tonic-gate }
9270Sstevel@tonic-gate 
928*5891Sraf #pragma weak sigpause = _sigpause
9290Sstevel@tonic-gate int
930*5891Sraf _sigpause(int sig)
9310Sstevel@tonic-gate {
932*5891Sraf 	extern int __sigpause(int);
9330Sstevel@tonic-gate 	int rv;
9340Sstevel@tonic-gate 
935*5891Sraf 	PERFORM(__sigpause(sig))
9360Sstevel@tonic-gate }
9370Sstevel@tonic-gate 
9380Sstevel@tonic-gate #pragma weak sigsuspend = _sigsuspend
9390Sstevel@tonic-gate int
9400Sstevel@tonic-gate _sigsuspend(const sigset_t *set)
9410Sstevel@tonic-gate {
9420Sstevel@tonic-gate 	extern int __sigsuspend(const sigset_t *);
9430Sstevel@tonic-gate 	int rv;
9440Sstevel@tonic-gate 
9450Sstevel@tonic-gate 	PROLOGUE_MASK(set)
9460Sstevel@tonic-gate 	rv = __sigsuspend(set);
9470Sstevel@tonic-gate 	EPILOGUE_MASK
9480Sstevel@tonic-gate 	return (rv);
9490Sstevel@tonic-gate }
9500Sstevel@tonic-gate 
9510Sstevel@tonic-gate int
9520Sstevel@tonic-gate _pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout,
9530Sstevel@tonic-gate 	const sigset_t *sigmask)
9540Sstevel@tonic-gate {
9550Sstevel@tonic-gate 	extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *,
9564843Sraf 	    const sigset_t *);
9570Sstevel@tonic-gate 	int rv;
9580Sstevel@tonic-gate 
9590Sstevel@tonic-gate 	PROLOGUE_MASK(sigmask)
9600Sstevel@tonic-gate 	rv = __pollsys(fds, nfd, timeout, sigmask);
9610Sstevel@tonic-gate 	EPILOGUE_MASK
9620Sstevel@tonic-gate 	return (rv);
9630Sstevel@tonic-gate }
9640Sstevel@tonic-gate 
9652248Sraf #pragma weak sigtimedwait = _sigtimedwait
9660Sstevel@tonic-gate int
9672248Sraf _sigtimedwait(const sigset_t *set, siginfo_t *infop, const timespec_t *timeout)
9680Sstevel@tonic-gate {
9692248Sraf 	extern int __sigtimedwait(const sigset_t *, siginfo_t *,
9704843Sraf 	    const timespec_t *);
9710Sstevel@tonic-gate 	siginfo_t info;
9720Sstevel@tonic-gate 	int sig;
9730Sstevel@tonic-gate 
9740Sstevel@tonic-gate 	PROLOGUE
975*5891Sraf 	if (abort) {
976*5891Sraf 		*self->ul_errnop = EINTR;
9770Sstevel@tonic-gate 		sig = -1;
978*5891Sraf 	} else {
979*5891Sraf 		sig = __sigtimedwait(set, &info, timeout);
980*5891Sraf 		if (sig == SIGCANCEL &&
981*5891Sraf 		    (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) {
982*5891Sraf 			do_sigcancel();
983*5891Sraf 			*self->ul_errnop = EINTR;
984*5891Sraf 			sig = -1;
985*5891Sraf 		}
9860Sstevel@tonic-gate 	}
9870Sstevel@tonic-gate 	EPILOGUE
9880Sstevel@tonic-gate 	if (sig != -1 && infop)
9891111Sraf 		(void) _private_memcpy(infop, &info, sizeof (*infop));
9900Sstevel@tonic-gate 	return (sig);
9910Sstevel@tonic-gate }
9920Sstevel@tonic-gate 
9930Sstevel@tonic-gate #pragma weak sigwait = _sigwait
9940Sstevel@tonic-gate int
9950Sstevel@tonic-gate _sigwait(sigset_t *set)
9960Sstevel@tonic-gate {
9972248Sraf 	return (_sigtimedwait(set, NULL, NULL));
9982248Sraf }
9992248Sraf 
10002248Sraf #pragma weak sigwaitinfo = _sigwaitinfo
10012248Sraf int
10022248Sraf _sigwaitinfo(const sigset_t *set, siginfo_t *info)
10032248Sraf {
10042248Sraf 	return (_sigtimedwait(set, info, NULL));
10052248Sraf }
10062248Sraf 
10072248Sraf #pragma weak sigqueue = _sigqueue
10082248Sraf int
10092248Sraf _sigqueue(pid_t pid, int signo, const union sigval value)
10102248Sraf {
10112248Sraf 	extern int __sigqueue(pid_t pid, int signo,
10124843Sraf 	    /* const union sigval */ void *value, int si_code, int block);
10132248Sraf 	return (__sigqueue(pid, signo, value.sival_ptr, SI_QUEUE, 0));
10140Sstevel@tonic-gate }
10150Sstevel@tonic-gate 
10160Sstevel@tonic-gate int
1017*5891Sraf _so_accept(int sock, struct sockaddr *addr, uint_t *addrlen, int version)
10180Sstevel@tonic-gate {
1019*5891Sraf 	extern int __so_accept(int, struct sockaddr *, uint_t *, int);
10200Sstevel@tonic-gate 	int rv;
10210Sstevel@tonic-gate 
1022*5891Sraf 	PERFORM(__so_accept(sock, addr, addrlen, version))
1023*5891Sraf }
1024*5891Sraf 
1025*5891Sraf int
1026*5891Sraf _so_connect(int sock, struct sockaddr *addr, uint_t addrlen, int version)
1027*5891Sraf {
1028*5891Sraf 	extern int __so_connect(int, struct sockaddr *, uint_t, int);
1029*5891Sraf 	int rv;
1030*5891Sraf 
1031*5891Sraf 	PERFORM(__so_connect(sock, addr, addrlen, version))
10320Sstevel@tonic-gate }
10330Sstevel@tonic-gate 
1034*5891Sraf int
1035*5891Sraf _so_recv(int sock, void *buf, size_t len, int flags)
10360Sstevel@tonic-gate {
1037*5891Sraf 	extern int __so_recv(int, void *, size_t, int);
1038*5891Sraf 	int rv;
10390Sstevel@tonic-gate 
1040*5891Sraf 	PERFORM(__so_recv(sock, buf, len, flags))
10410Sstevel@tonic-gate }
10420Sstevel@tonic-gate 
1043*5891Sraf int
1044*5891Sraf _so_recvfrom(int sock, void *buf, size_t len, int flags,
1045*5891Sraf     struct sockaddr *addr, int *addrlen)
10460Sstevel@tonic-gate {
1047*5891Sraf 	extern int __so_recvfrom(int, void *, size_t, int,
1048*5891Sraf 	    struct sockaddr *, int *);
1049*5891Sraf 	int rv;
1050*5891Sraf 
1051*5891Sraf 	PERFORM(__so_recvfrom(sock, buf, len, flags, addr, addrlen))
1052*5891Sraf }
10530Sstevel@tonic-gate 
1054*5891Sraf int
1055*5891Sraf _so_recvmsg(int sock, struct msghdr *msg, int flags)
1056*5891Sraf {
1057*5891Sraf 	extern int __so_recvmsg(int, struct msghdr *, int);
1058*5891Sraf 	int rv;
1059*5891Sraf 
1060*5891Sraf 	PERFORM(__so_recvmsg(sock, msg, flags))
10610Sstevel@tonic-gate }
10620Sstevel@tonic-gate 
10630Sstevel@tonic-gate int
1064*5891Sraf _so_send(int sock, const void *buf, size_t len, int flags)
10650Sstevel@tonic-gate {
1066*5891Sraf 	extern int __so_send(int, const void *, size_t, int);
10670Sstevel@tonic-gate 	int rv;
10680Sstevel@tonic-gate 
1069*5891Sraf 	PERFORM(__so_send(sock, buf, len, flags))
1070*5891Sraf }
1071*5891Sraf 
1072*5891Sraf int
1073*5891Sraf _so_sendmsg(int sock, const struct msghdr *msg, int flags)
1074*5891Sraf {
1075*5891Sraf 	extern int __so_sendmsg(int, const struct msghdr *, int);
1076*5891Sraf 	int rv;
1077*5891Sraf 
1078*5891Sraf 	PERFORM(__so_sendmsg(sock, msg, flags))
1079*5891Sraf }
1080*5891Sraf 
1081*5891Sraf int
1082*5891Sraf _so_sendto(int sock, const void *buf, size_t len, int flags,
1083*5891Sraf     const struct sockaddr *addr, int *addrlen)
1084*5891Sraf {
1085*5891Sraf 	extern int __so_sendto(int, const void *, size_t, int,
1086*5891Sraf 	    const struct sockaddr *, int *);
1087*5891Sraf 	int rv;
1088*5891Sraf 
1089*5891Sraf 	PERFORM(__so_sendto(sock, buf, len, flags, addr, addrlen))
10900Sstevel@tonic-gate }
10910Sstevel@tonic-gate 
1092*5891Sraf #pragma weak tcdrain = _tcdrain
1093*5891Sraf int
1094*5891Sraf _tcdrain(int fildes)
10950Sstevel@tonic-gate {
1096*5891Sraf 	extern int __tcdrain(int);
1097*5891Sraf 	int rv;
10980Sstevel@tonic-gate 
1099*5891Sraf 	PERFORM(__tcdrain(fildes))
11000Sstevel@tonic-gate }
11010Sstevel@tonic-gate 
1102*5891Sraf #pragma weak waitid = _waitid
1103*5891Sraf int
1104*5891Sraf _waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options)
1105*5891Sraf {
1106*5891Sraf 	extern int __waitid(idtype_t, id_t, siginfo_t *, int);
1107*5891Sraf 	int rv;
1108*5891Sraf 
1109*5891Sraf 	if (options & WNOHANG)
1110*5891Sraf 		return (__waitid(idtype, id, infop, options));
1111*5891Sraf 	PERFORM(__waitid(idtype, id, infop, options))
1112*5891Sraf }
1113*5891Sraf 
1114*5891Sraf #pragma weak writev = _writev
11150Sstevel@tonic-gate ssize_t
1116*5891Sraf _writev(int fildes, const struct iovec *iov, int iovcnt)
11170Sstevel@tonic-gate {
1118*5891Sraf 	extern ssize_t __writev(int, const struct iovec *, int);
11190Sstevel@tonic-gate 	ssize_t rv;
11200Sstevel@tonic-gate 
1121*5891Sraf 	PERFORM(__writev(fildes, iov, iovcnt))
11220Sstevel@tonic-gate }
1123