xref: /onnv-gate/usr/src/lib/libc/port/threads/scalls.c (revision 0:68f95e015346)
1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*0Sstevel@tonic-gate 
29*0Sstevel@tonic-gate #include "lint.h"
30*0Sstevel@tonic-gate #include "thr_uberdata.h"
31*0Sstevel@tonic-gate #include <stdarg.h>
32*0Sstevel@tonic-gate #include <poll.h>
33*0Sstevel@tonic-gate #include <stropts.h>
34*0Sstevel@tonic-gate #include <dlfcn.h>
35*0Sstevel@tonic-gate #include <sys/uio.h>
36*0Sstevel@tonic-gate 
37*0Sstevel@tonic-gate /*
38*0Sstevel@tonic-gate  * fork_lock is special -- We can't use lmutex_lock() (and thereby enter
39*0Sstevel@tonic-gate  * a critical region) because the second thread to reach this point would
40*0Sstevel@tonic-gate  * become unstoppable and the first thread would hang waiting for the
41*0Sstevel@tonic-gate  * second thread to stop itself.  Therefore we don't use lmutex_lock() in
42*0Sstevel@tonic-gate  * fork_lock_enter(), but we do defer signals (the other form of concurrency).
43*0Sstevel@tonic-gate  *
44*0Sstevel@tonic-gate  * fork_lock_enter() does triple-duty.  Not only does it serialize
45*0Sstevel@tonic-gate  * calls to fork() and forkall(), but it also serializes calls to
46*0Sstevel@tonic-gate  * thr_suspend() (fork() and forkall() also suspend other threads),
47*0Sstevel@tonic-gate  * and furthermore it serializes I18N calls to functions in other
48*0Sstevel@tonic-gate  * dlopen()ed L10N objects that might be calling malloc()/free().
49*0Sstevel@tonic-gate  */
50*0Sstevel@tonic-gate 
51*0Sstevel@tonic-gate static void
52*0Sstevel@tonic-gate fork_lock_error(const char *who)
53*0Sstevel@tonic-gate {
54*0Sstevel@tonic-gate 	char msg[200];
55*0Sstevel@tonic-gate 
56*0Sstevel@tonic-gate 	(void) strlcpy(msg, "deadlock condition: ", sizeof (msg));
57*0Sstevel@tonic-gate 	(void) strlcat(msg, who, sizeof (msg));
58*0Sstevel@tonic-gate 	(void) strlcat(msg, "() called from a fork handler", sizeof (msg));
59*0Sstevel@tonic-gate 	thread_error(msg);
60*0Sstevel@tonic-gate }
61*0Sstevel@tonic-gate 
62*0Sstevel@tonic-gate int
63*0Sstevel@tonic-gate fork_lock_enter(const char *who)
64*0Sstevel@tonic-gate {
65*0Sstevel@tonic-gate 	ulwp_t *self = curthread;
66*0Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
67*0Sstevel@tonic-gate 	int error = 0;
68*0Sstevel@tonic-gate 
69*0Sstevel@tonic-gate 	ASSERT(self->ul_critical == 0);
70*0Sstevel@tonic-gate 	sigoff(self);
71*0Sstevel@tonic-gate 	(void) _private_mutex_lock(&udp->fork_lock);
72*0Sstevel@tonic-gate 	while (udp->fork_count) {
73*0Sstevel@tonic-gate 		if (udp->fork_owner == self) {
74*0Sstevel@tonic-gate 			/*
75*0Sstevel@tonic-gate 			 * This is like a recursive lock except that we
76*0Sstevel@tonic-gate 			 * inform the caller if we have been called from
77*0Sstevel@tonic-gate 			 * a fork handler and let it deal with that fact.
78*0Sstevel@tonic-gate 			 */
79*0Sstevel@tonic-gate 			if (self->ul_fork) {
80*0Sstevel@tonic-gate 				/*
81*0Sstevel@tonic-gate 				 * We have been called from a fork handler.
82*0Sstevel@tonic-gate 				 */
83*0Sstevel@tonic-gate 				if (who != NULL &&
84*0Sstevel@tonic-gate 				    udp->uberflags.uf_thread_error_detection)
85*0Sstevel@tonic-gate 					fork_lock_error(who);
86*0Sstevel@tonic-gate 				error = EDEADLK;
87*0Sstevel@tonic-gate 			}
88*0Sstevel@tonic-gate 			break;
89*0Sstevel@tonic-gate 		}
90*0Sstevel@tonic-gate 		ASSERT(self->ul_fork == 0);
91*0Sstevel@tonic-gate 		(void) _cond_wait(&udp->fork_cond, &udp->fork_lock);
92*0Sstevel@tonic-gate 	}
93*0Sstevel@tonic-gate 	udp->fork_owner = self;
94*0Sstevel@tonic-gate 	udp->fork_count++;
95*0Sstevel@tonic-gate 	(void) _private_mutex_unlock(&udp->fork_lock);
96*0Sstevel@tonic-gate 	return (error);
97*0Sstevel@tonic-gate }
98*0Sstevel@tonic-gate 
99*0Sstevel@tonic-gate void
100*0Sstevel@tonic-gate fork_lock_exit(void)
101*0Sstevel@tonic-gate {
102*0Sstevel@tonic-gate 	ulwp_t *self = curthread;
103*0Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
104*0Sstevel@tonic-gate 
105*0Sstevel@tonic-gate 	ASSERT(self->ul_critical == 0);
106*0Sstevel@tonic-gate 	(void) _private_mutex_lock(&udp->fork_lock);
107*0Sstevel@tonic-gate 	ASSERT(udp->fork_count != 0 && udp->fork_owner == self);
108*0Sstevel@tonic-gate 	if (--udp->fork_count == 0) {
109*0Sstevel@tonic-gate 		udp->fork_owner = NULL;
110*0Sstevel@tonic-gate 		(void) _cond_signal(&udp->fork_cond);
111*0Sstevel@tonic-gate 	}
112*0Sstevel@tonic-gate 	(void) _private_mutex_unlock(&udp->fork_lock);
113*0Sstevel@tonic-gate 	sigon(self);
114*0Sstevel@tonic-gate }
115*0Sstevel@tonic-gate 
116*0Sstevel@tonic-gate /*
117*0Sstevel@tonic-gate  * fork() is fork1() for both Posix threads and Solaris threads.
118*0Sstevel@tonic-gate  * The forkall() interface exists for applications that require
119*0Sstevel@tonic-gate  * the semantics of replicating all threads.
120*0Sstevel@tonic-gate  */
121*0Sstevel@tonic-gate #pragma weak fork = _fork1
122*0Sstevel@tonic-gate #pragma weak _fork = _fork1
123*0Sstevel@tonic-gate #pragma weak fork1 = _fork1
124*0Sstevel@tonic-gate pid_t
125*0Sstevel@tonic-gate _fork1(void)
126*0Sstevel@tonic-gate {
127*0Sstevel@tonic-gate 	ulwp_t *self = curthread;
128*0Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
129*0Sstevel@tonic-gate 	pid_t pid;
130*0Sstevel@tonic-gate 	int error;
131*0Sstevel@tonic-gate 
132*0Sstevel@tonic-gate 	if (self->ul_vfork) {
133*0Sstevel@tonic-gate 		/*
134*0Sstevel@tonic-gate 		 * We are a child of vfork(); omit all of the fork
135*0Sstevel@tonic-gate 		 * logic and go straight to the system call trap.
136*0Sstevel@tonic-gate 		 * A vfork() child of a multithreaded parent
137*0Sstevel@tonic-gate 		 * must never call fork().
138*0Sstevel@tonic-gate 		 */
139*0Sstevel@tonic-gate 		if (udp->uberflags.uf_mt) {
140*0Sstevel@tonic-gate 			errno = ENOTSUP;
141*0Sstevel@tonic-gate 			return (-1);
142*0Sstevel@tonic-gate 		}
143*0Sstevel@tonic-gate 		pid = __fork1();
144*0Sstevel@tonic-gate 		if (pid == 0) {		/* child */
145*0Sstevel@tonic-gate 			udp->pid = _private_getpid();
146*0Sstevel@tonic-gate 			self->ul_vfork = 0;
147*0Sstevel@tonic-gate 		}
148*0Sstevel@tonic-gate 		return (pid);
149*0Sstevel@tonic-gate 	}
150*0Sstevel@tonic-gate 
151*0Sstevel@tonic-gate 	if ((error = fork_lock_enter("fork")) != 0) {
152*0Sstevel@tonic-gate 		/*
153*0Sstevel@tonic-gate 		 * Cannot call fork() from a fork handler.
154*0Sstevel@tonic-gate 		 */
155*0Sstevel@tonic-gate 		fork_lock_exit();
156*0Sstevel@tonic-gate 		errno = error;
157*0Sstevel@tonic-gate 		return (-1);
158*0Sstevel@tonic-gate 	}
159*0Sstevel@tonic-gate 	self->ul_fork = 1;
160*0Sstevel@tonic-gate 
161*0Sstevel@tonic-gate 	/*
162*0Sstevel@tonic-gate 	 * The functions registered by pthread_atfork() are defined by
163*0Sstevel@tonic-gate 	 * the application and its libraries and we must not hold any
164*0Sstevel@tonic-gate 	 * internal libc locks while invoking them.  The fork_lock_enter()
165*0Sstevel@tonic-gate 	 * function serializes fork(), thr_suspend(), pthread_atfork() and
166*0Sstevel@tonic-gate 	 * dlclose() (which destroys whatever pthread_atfork() functions
167*0Sstevel@tonic-gate 	 * the library may have set up).  If one of these pthread_atfork()
168*0Sstevel@tonic-gate 	 * functions attempts to fork or suspend another thread or call
169*0Sstevel@tonic-gate 	 * pthread_atfork() or dlclose a library, it will detect a deadlock
170*0Sstevel@tonic-gate 	 * in fork_lock_enter().  Otherwise, the pthread_atfork() functions
171*0Sstevel@tonic-gate 	 * are free to do anything they please (except they will not
172*0Sstevel@tonic-gate 	 * receive any signals).
173*0Sstevel@tonic-gate 	 */
174*0Sstevel@tonic-gate 	_prefork_handler();
175*0Sstevel@tonic-gate 
176*0Sstevel@tonic-gate 	/*
177*0Sstevel@tonic-gate 	 * Block all signals.
178*0Sstevel@tonic-gate 	 * Just deferring them via sigon() is not enough.
179*0Sstevel@tonic-gate 	 * We have to avoid taking a deferred signal in the child
180*0Sstevel@tonic-gate 	 * that was actually sent to the parent before __fork1().
181*0Sstevel@tonic-gate 	 */
182*0Sstevel@tonic-gate 	block_all_signals(self);
183*0Sstevel@tonic-gate 
184*0Sstevel@tonic-gate 	/*
185*0Sstevel@tonic-gate 	 * This suspends all threads but this one, leaving them
186*0Sstevel@tonic-gate 	 * suspended outside of any critical regions in the library.
187*0Sstevel@tonic-gate 	 * Thus, we are assured that no library locks are held
188*0Sstevel@tonic-gate 	 * while we invoke fork1() from the current thread.
189*0Sstevel@tonic-gate 	 */
190*0Sstevel@tonic-gate 	(void) _private_mutex_lock(&udp->fork_lock);
191*0Sstevel@tonic-gate 	suspend_fork();
192*0Sstevel@tonic-gate 	(void) _private_mutex_unlock(&udp->fork_lock);
193*0Sstevel@tonic-gate 
194*0Sstevel@tonic-gate 	pid = __fork1();
195*0Sstevel@tonic-gate 
196*0Sstevel@tonic-gate 	if (pid == 0) {		/* child */
197*0Sstevel@tonic-gate 		/*
198*0Sstevel@tonic-gate 		 * Clear our schedctl pointer.
199*0Sstevel@tonic-gate 		 * Discard any deferred signal that was sent to the parent.
200*0Sstevel@tonic-gate 		 * Because we blocked all signals before __fork1(), a
201*0Sstevel@tonic-gate 		 * deferred signal cannot have been taken by the child.
202*0Sstevel@tonic-gate 		 */
203*0Sstevel@tonic-gate 		self->ul_schedctl_called = NULL;
204*0Sstevel@tonic-gate 		self->ul_schedctl = NULL;
205*0Sstevel@tonic-gate 		self->ul_cursig = 0;
206*0Sstevel@tonic-gate 		self->ul_siginfo.si_signo = 0;
207*0Sstevel@tonic-gate 		udp->pid = _private_getpid();
208*0Sstevel@tonic-gate 		/* reset the library's data structures to reflect one thread */
209*0Sstevel@tonic-gate 		_postfork1_child();
210*0Sstevel@tonic-gate 		restore_signals(self);
211*0Sstevel@tonic-gate 		_postfork_child_handler();
212*0Sstevel@tonic-gate 	} else {
213*0Sstevel@tonic-gate 		/* restart all threads that were suspended for fork1() */
214*0Sstevel@tonic-gate 		continue_fork(0);
215*0Sstevel@tonic-gate 		restore_signals(self);
216*0Sstevel@tonic-gate 		_postfork_parent_handler();
217*0Sstevel@tonic-gate 	}
218*0Sstevel@tonic-gate 
219*0Sstevel@tonic-gate 	self->ul_fork = 0;
220*0Sstevel@tonic-gate 	fork_lock_exit();
221*0Sstevel@tonic-gate 
222*0Sstevel@tonic-gate 	return (pid);
223*0Sstevel@tonic-gate }
224*0Sstevel@tonic-gate 
225*0Sstevel@tonic-gate /*
226*0Sstevel@tonic-gate  * Much of the logic here is the same as in fork1().
227*0Sstevel@tonic-gate  * See the comments in fork1(), above.
228*0Sstevel@tonic-gate  */
229*0Sstevel@tonic-gate #pragma weak forkall = _forkall
230*0Sstevel@tonic-gate pid_t
231*0Sstevel@tonic-gate _forkall(void)
232*0Sstevel@tonic-gate {
233*0Sstevel@tonic-gate 	ulwp_t *self = curthread;
234*0Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
235*0Sstevel@tonic-gate 	pid_t pid;
236*0Sstevel@tonic-gate 	int error;
237*0Sstevel@tonic-gate 
238*0Sstevel@tonic-gate 	if (self->ul_vfork) {
239*0Sstevel@tonic-gate 		if (udp->uberflags.uf_mt) {
240*0Sstevel@tonic-gate 			errno = ENOTSUP;
241*0Sstevel@tonic-gate 			return (-1);
242*0Sstevel@tonic-gate 		}
243*0Sstevel@tonic-gate 		pid = __forkall();
244*0Sstevel@tonic-gate 		if (pid == 0) {		/* child */
245*0Sstevel@tonic-gate 			udp->pid = _private_getpid();
246*0Sstevel@tonic-gate 			self->ul_vfork = 0;
247*0Sstevel@tonic-gate 		}
248*0Sstevel@tonic-gate 		return (pid);
249*0Sstevel@tonic-gate 	}
250*0Sstevel@tonic-gate 
251*0Sstevel@tonic-gate 	if ((error = fork_lock_enter("forkall")) != 0) {
252*0Sstevel@tonic-gate 		fork_lock_exit();
253*0Sstevel@tonic-gate 		errno = error;
254*0Sstevel@tonic-gate 		return (-1);
255*0Sstevel@tonic-gate 	}
256*0Sstevel@tonic-gate 	self->ul_fork = 1;
257*0Sstevel@tonic-gate 	block_all_signals(self);
258*0Sstevel@tonic-gate 	suspend_fork();
259*0Sstevel@tonic-gate 
260*0Sstevel@tonic-gate 	pid = __forkall();
261*0Sstevel@tonic-gate 
262*0Sstevel@tonic-gate 	if (pid == 0) {
263*0Sstevel@tonic-gate 		self->ul_schedctl_called = NULL;
264*0Sstevel@tonic-gate 		self->ul_schedctl = NULL;
265*0Sstevel@tonic-gate 		self->ul_cursig = 0;
266*0Sstevel@tonic-gate 		self->ul_siginfo.si_signo = 0;
267*0Sstevel@tonic-gate 		udp->pid = _private_getpid();
268*0Sstevel@tonic-gate 		continue_fork(1);
269*0Sstevel@tonic-gate 	} else {
270*0Sstevel@tonic-gate 		continue_fork(0);
271*0Sstevel@tonic-gate 	}
272*0Sstevel@tonic-gate 	restore_signals(self);
273*0Sstevel@tonic-gate 	self->ul_fork = 0;
274*0Sstevel@tonic-gate 	fork_lock_exit();
275*0Sstevel@tonic-gate 
276*0Sstevel@tonic-gate 	return (pid);
277*0Sstevel@tonic-gate }
278*0Sstevel@tonic-gate 
279*0Sstevel@tonic-gate /*
280*0Sstevel@tonic-gate  * Hacks for system calls to provide cancellation
281*0Sstevel@tonic-gate  * and improve java garbage collection.
282*0Sstevel@tonic-gate  */
283*0Sstevel@tonic-gate #define	PROLOGUE							\
284*0Sstevel@tonic-gate {									\
285*0Sstevel@tonic-gate 	ulwp_t *self = curthread;					\
286*0Sstevel@tonic-gate 	int nocancel = (self->ul_vfork | self->ul_nocancel);		\
287*0Sstevel@tonic-gate 	if (nocancel == 0) {						\
288*0Sstevel@tonic-gate 		self->ul_save_async = self->ul_cancel_async;		\
289*0Sstevel@tonic-gate 		if (!self->ul_cancel_disabled) {			\
290*0Sstevel@tonic-gate 			self->ul_cancel_async = 1;			\
291*0Sstevel@tonic-gate 			if (self->ul_cancel_pending)			\
292*0Sstevel@tonic-gate 				_pthread_exit(PTHREAD_CANCELED);	\
293*0Sstevel@tonic-gate 		}							\
294*0Sstevel@tonic-gate 		self->ul_sp = stkptr();					\
295*0Sstevel@tonic-gate 	}
296*0Sstevel@tonic-gate 
297*0Sstevel@tonic-gate #define	EPILOGUE							\
298*0Sstevel@tonic-gate 	if (nocancel == 0) {						\
299*0Sstevel@tonic-gate 		self->ul_sp = 0;					\
300*0Sstevel@tonic-gate 		self->ul_cancel_async = self->ul_save_async;		\
301*0Sstevel@tonic-gate 	}								\
302*0Sstevel@tonic-gate }
303*0Sstevel@tonic-gate 
304*0Sstevel@tonic-gate /*
305*0Sstevel@tonic-gate  * Perform the body of the action required by most of the cancelable
306*0Sstevel@tonic-gate  * function calls.  The return(function_call) part is to allow the
307*0Sstevel@tonic-gate  * compiler to make the call be executed with tail recursion, which
308*0Sstevel@tonic-gate  * saves a register window on sparc and slightly (not much) improves
309*0Sstevel@tonic-gate  * the code for x86/x64 compilations.
310*0Sstevel@tonic-gate  */
311*0Sstevel@tonic-gate #define	PERFORM(function_call)						\
312*0Sstevel@tonic-gate 	PROLOGUE							\
313*0Sstevel@tonic-gate 	if (nocancel)							\
314*0Sstevel@tonic-gate 		return (function_call);					\
315*0Sstevel@tonic-gate 	rv = function_call;						\
316*0Sstevel@tonic-gate 	EPILOGUE							\
317*0Sstevel@tonic-gate 	return (rv);
318*0Sstevel@tonic-gate 
319*0Sstevel@tonic-gate /*
320*0Sstevel@tonic-gate  * Specialized prologue for sigsuspend() and pollsys().
321*0Sstevel@tonic-gate  * These system calls pass a signal mask to the kernel.
322*0Sstevel@tonic-gate  * The kernel replaces the thread's signal mask with the
323*0Sstevel@tonic-gate  * temporary mask before the thread goes to sleep.  If
324*0Sstevel@tonic-gate  * a signal is received, the signal handler will execute
325*0Sstevel@tonic-gate  * with the temporary mask, as modified by the sigaction
326*0Sstevel@tonic-gate  * for the particular signal.
327*0Sstevel@tonic-gate  *
328*0Sstevel@tonic-gate  * We block all signals until we reach the kernel with the
329*0Sstevel@tonic-gate  * temporary mask.  This eliminates race conditions with
330*0Sstevel@tonic-gate  * setting the signal mask while signals are being posted.
331*0Sstevel@tonic-gate  */
332*0Sstevel@tonic-gate #define	PROLOGUE_MASK(sigmask)						\
333*0Sstevel@tonic-gate {									\
334*0Sstevel@tonic-gate 	ulwp_t *self = curthread;					\
335*0Sstevel@tonic-gate 	int nocancel = (self->ul_vfork | self->ul_nocancel);		\
336*0Sstevel@tonic-gate 	if (!self->ul_vfork) {						\
337*0Sstevel@tonic-gate 		if (sigmask) {						\
338*0Sstevel@tonic-gate 			block_all_signals(self);			\
339*0Sstevel@tonic-gate 			self->ul_tmpmask = *sigmask;			\
340*0Sstevel@tonic-gate 			delete_reserved_signals(&self->ul_tmpmask);	\
341*0Sstevel@tonic-gate 			self->ul_sigsuspend = 1;			\
342*0Sstevel@tonic-gate 		}							\
343*0Sstevel@tonic-gate 		if (nocancel == 0) {					\
344*0Sstevel@tonic-gate 			self->ul_save_async = self->ul_cancel_async;	\
345*0Sstevel@tonic-gate 			if (!self->ul_cancel_disabled) {		\
346*0Sstevel@tonic-gate 				self->ul_cancel_async = 1;		\
347*0Sstevel@tonic-gate 				if (self->ul_cancel_pending) {		\
348*0Sstevel@tonic-gate 					if (self->ul_sigsuspend) {	\
349*0Sstevel@tonic-gate 						self->ul_sigsuspend = 0;\
350*0Sstevel@tonic-gate 						restore_signals(self);	\
351*0Sstevel@tonic-gate 					}				\
352*0Sstevel@tonic-gate 					_pthread_exit(PTHREAD_CANCELED);\
353*0Sstevel@tonic-gate 				}					\
354*0Sstevel@tonic-gate 			}						\
355*0Sstevel@tonic-gate 			self->ul_sp = stkptr();				\
356*0Sstevel@tonic-gate 		}							\
357*0Sstevel@tonic-gate 	}
358*0Sstevel@tonic-gate 
359*0Sstevel@tonic-gate /*
360*0Sstevel@tonic-gate  * If a signal is taken, we return from the system call wrapper with
361*0Sstevel@tonic-gate  * our original signal mask restored (see code in call_user_handler()).
362*0Sstevel@tonic-gate  * If not (self->ul_sigsuspend is still non-zero), we must restore our
363*0Sstevel@tonic-gate  * original signal mask ourself.
364*0Sstevel@tonic-gate  */
365*0Sstevel@tonic-gate #define	EPILOGUE_MASK							\
366*0Sstevel@tonic-gate 	if (nocancel == 0) {						\
367*0Sstevel@tonic-gate 		self->ul_sp = 0;					\
368*0Sstevel@tonic-gate 		self->ul_cancel_async = self->ul_save_async;		\
369*0Sstevel@tonic-gate 	}								\
370*0Sstevel@tonic-gate 	if (self->ul_sigsuspend) {					\
371*0Sstevel@tonic-gate 		self->ul_sigsuspend = 0;				\
372*0Sstevel@tonic-gate 		restore_signals(self);					\
373*0Sstevel@tonic-gate 	}								\
374*0Sstevel@tonic-gate }
375*0Sstevel@tonic-gate 
376*0Sstevel@tonic-gate /*
377*0Sstevel@tonic-gate  * Called from _thrp_join() (thr_join() is a cancellation point)
378*0Sstevel@tonic-gate  */
379*0Sstevel@tonic-gate int
380*0Sstevel@tonic-gate lwp_wait(thread_t tid, thread_t *found)
381*0Sstevel@tonic-gate {
382*0Sstevel@tonic-gate 	int error;
383*0Sstevel@tonic-gate 
384*0Sstevel@tonic-gate 	PROLOGUE
385*0Sstevel@tonic-gate 	while ((error = __lwp_wait(tid, found)) == EINTR)
386*0Sstevel@tonic-gate 		;
387*0Sstevel@tonic-gate 	EPILOGUE
388*0Sstevel@tonic-gate 	return (error);
389*0Sstevel@tonic-gate }
390*0Sstevel@tonic-gate 
391*0Sstevel@tonic-gate ssize_t
392*0Sstevel@tonic-gate read(int fd, void *buf, size_t size)
393*0Sstevel@tonic-gate {
394*0Sstevel@tonic-gate 	extern ssize_t _read(int, void *, size_t);
395*0Sstevel@tonic-gate 	ssize_t rv;
396*0Sstevel@tonic-gate 
397*0Sstevel@tonic-gate 	PERFORM(_read(fd, buf, size))
398*0Sstevel@tonic-gate }
399*0Sstevel@tonic-gate 
400*0Sstevel@tonic-gate ssize_t
401*0Sstevel@tonic-gate write(int fd, const void *buf, size_t size)
402*0Sstevel@tonic-gate {
403*0Sstevel@tonic-gate 	extern ssize_t _write(int, const void *, size_t);
404*0Sstevel@tonic-gate 	ssize_t rv;
405*0Sstevel@tonic-gate 
406*0Sstevel@tonic-gate 	PERFORM(_write(fd, buf, size))
407*0Sstevel@tonic-gate }
408*0Sstevel@tonic-gate 
409*0Sstevel@tonic-gate int
410*0Sstevel@tonic-gate getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
411*0Sstevel@tonic-gate 	int *flagsp)
412*0Sstevel@tonic-gate {
413*0Sstevel@tonic-gate 	extern int _getmsg(int, struct strbuf *, struct strbuf *, int *);
414*0Sstevel@tonic-gate 	int rv;
415*0Sstevel@tonic-gate 
416*0Sstevel@tonic-gate 	PERFORM(_getmsg(fd, ctlptr, dataptr, flagsp))
417*0Sstevel@tonic-gate }
418*0Sstevel@tonic-gate 
419*0Sstevel@tonic-gate int
420*0Sstevel@tonic-gate getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
421*0Sstevel@tonic-gate 	int *bandp, int *flagsp)
422*0Sstevel@tonic-gate {
423*0Sstevel@tonic-gate 	extern int _getpmsg(int, struct strbuf *, struct strbuf *,
424*0Sstevel@tonic-gate 		int *, int *);
425*0Sstevel@tonic-gate 	int rv;
426*0Sstevel@tonic-gate 
427*0Sstevel@tonic-gate 	PERFORM(_getpmsg(fd, ctlptr, dataptr, bandp, flagsp))
428*0Sstevel@tonic-gate }
429*0Sstevel@tonic-gate 
430*0Sstevel@tonic-gate int
431*0Sstevel@tonic-gate putmsg(int fd, const struct strbuf *ctlptr,
432*0Sstevel@tonic-gate 	const struct strbuf *dataptr, int flags)
433*0Sstevel@tonic-gate {
434*0Sstevel@tonic-gate 	extern int _putmsg(int, const struct strbuf *,
435*0Sstevel@tonic-gate 		const struct strbuf *, int);
436*0Sstevel@tonic-gate 	int rv;
437*0Sstevel@tonic-gate 
438*0Sstevel@tonic-gate 	PERFORM(_putmsg(fd, ctlptr, dataptr, flags))
439*0Sstevel@tonic-gate }
440*0Sstevel@tonic-gate 
441*0Sstevel@tonic-gate int
442*0Sstevel@tonic-gate __xpg4_putmsg(int fd, const struct strbuf *ctlptr,
443*0Sstevel@tonic-gate 	const struct strbuf *dataptr, int flags)
444*0Sstevel@tonic-gate {
445*0Sstevel@tonic-gate 	extern int _putmsg(int, const struct strbuf *,
446*0Sstevel@tonic-gate 		const struct strbuf *, int);
447*0Sstevel@tonic-gate 	int rv;
448*0Sstevel@tonic-gate 
449*0Sstevel@tonic-gate 	PERFORM(_putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4))
450*0Sstevel@tonic-gate }
451*0Sstevel@tonic-gate 
452*0Sstevel@tonic-gate int
453*0Sstevel@tonic-gate putpmsg(int fd, const struct strbuf *ctlptr,
454*0Sstevel@tonic-gate 	const struct strbuf *dataptr, int band, int flags)
455*0Sstevel@tonic-gate {
456*0Sstevel@tonic-gate 	extern int _putpmsg(int, const struct strbuf *,
457*0Sstevel@tonic-gate 		const struct strbuf *, int, int);
458*0Sstevel@tonic-gate 	int rv;
459*0Sstevel@tonic-gate 
460*0Sstevel@tonic-gate 	PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags))
461*0Sstevel@tonic-gate }
462*0Sstevel@tonic-gate 
463*0Sstevel@tonic-gate int
464*0Sstevel@tonic-gate __xpg4_putpmsg(int fd, const struct strbuf *ctlptr,
465*0Sstevel@tonic-gate 	const struct strbuf *dataptr, int band, int flags)
466*0Sstevel@tonic-gate {
467*0Sstevel@tonic-gate 	extern int _putpmsg(int, const struct strbuf *,
468*0Sstevel@tonic-gate 		const struct strbuf *, int, int);
469*0Sstevel@tonic-gate 	int rv;
470*0Sstevel@tonic-gate 
471*0Sstevel@tonic-gate 	PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4))
472*0Sstevel@tonic-gate }
473*0Sstevel@tonic-gate 
474*0Sstevel@tonic-gate int
475*0Sstevel@tonic-gate __nanosleep(const timespec_t *rqtp, timespec_t *rmtp)
476*0Sstevel@tonic-gate {
477*0Sstevel@tonic-gate 	int error;
478*0Sstevel@tonic-gate 
479*0Sstevel@tonic-gate 	PROLOGUE
480*0Sstevel@tonic-gate 	error = ___nanosleep(rqtp, rmtp);
481*0Sstevel@tonic-gate 	EPILOGUE
482*0Sstevel@tonic-gate 	if (error) {
483*0Sstevel@tonic-gate 		errno = error;
484*0Sstevel@tonic-gate 		return (-1);
485*0Sstevel@tonic-gate 	}
486*0Sstevel@tonic-gate 	return (0);
487*0Sstevel@tonic-gate }
488*0Sstevel@tonic-gate 
489*0Sstevel@tonic-gate int
490*0Sstevel@tonic-gate __clock_nanosleep(clockid_t clock_id, int flags,
491*0Sstevel@tonic-gate 	const timespec_t *rqtp, timespec_t *rmtp)
492*0Sstevel@tonic-gate {
493*0Sstevel@tonic-gate 	timespec_t reltime;
494*0Sstevel@tonic-gate 	hrtime_t start;
495*0Sstevel@tonic-gate 	hrtime_t rqlapse;
496*0Sstevel@tonic-gate 	hrtime_t lapse;
497*0Sstevel@tonic-gate 	int error;
498*0Sstevel@tonic-gate 
499*0Sstevel@tonic-gate 	switch (clock_id) {
500*0Sstevel@tonic-gate 	case CLOCK_VIRTUAL:
501*0Sstevel@tonic-gate 	case CLOCK_PROCESS_CPUTIME_ID:
502*0Sstevel@tonic-gate 	case CLOCK_THREAD_CPUTIME_ID:
503*0Sstevel@tonic-gate 		return (ENOTSUP);
504*0Sstevel@tonic-gate 	case CLOCK_REALTIME:
505*0Sstevel@tonic-gate 	case CLOCK_HIGHRES:
506*0Sstevel@tonic-gate 		break;
507*0Sstevel@tonic-gate 	default:
508*0Sstevel@tonic-gate 		return (EINVAL);
509*0Sstevel@tonic-gate 	}
510*0Sstevel@tonic-gate 	if (flags & TIMER_ABSTIME) {
511*0Sstevel@tonic-gate 		abstime_to_reltime(clock_id, rqtp, &reltime);
512*0Sstevel@tonic-gate 		rmtp = NULL;
513*0Sstevel@tonic-gate 	} else {
514*0Sstevel@tonic-gate 		reltime = *rqtp;
515*0Sstevel@tonic-gate 		if (clock_id == CLOCK_HIGHRES)
516*0Sstevel@tonic-gate 			start = gethrtime();
517*0Sstevel@tonic-gate 	}
518*0Sstevel@tonic-gate restart:
519*0Sstevel@tonic-gate 	PROLOGUE
520*0Sstevel@tonic-gate 	error = ___nanosleep(&reltime, rmtp);
521*0Sstevel@tonic-gate 	EPILOGUE
522*0Sstevel@tonic-gate 	if (error == 0 && clock_id == CLOCK_HIGHRES) {
523*0Sstevel@tonic-gate 		/*
524*0Sstevel@tonic-gate 		 * Don't return yet if we didn't really get a timeout.
525*0Sstevel@tonic-gate 		 * This can happen if we return because someone resets
526*0Sstevel@tonic-gate 		 * the system clock.
527*0Sstevel@tonic-gate 		 */
528*0Sstevel@tonic-gate 		if (flags & TIMER_ABSTIME) {
529*0Sstevel@tonic-gate 			if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
530*0Sstevel@tonic-gate 			    rqtp->tv_nsec > gethrtime()) {
531*0Sstevel@tonic-gate 				abstime_to_reltime(clock_id, rqtp, &reltime);
532*0Sstevel@tonic-gate 				goto restart;
533*0Sstevel@tonic-gate 			}
534*0Sstevel@tonic-gate 		} else {
535*0Sstevel@tonic-gate 			rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
536*0Sstevel@tonic-gate 				rqtp->tv_nsec;
537*0Sstevel@tonic-gate 			lapse = gethrtime() - start;
538*0Sstevel@tonic-gate 			if (rqlapse > lapse) {
539*0Sstevel@tonic-gate 				hrt2ts(rqlapse - lapse, &reltime);
540*0Sstevel@tonic-gate 				goto restart;
541*0Sstevel@tonic-gate 			}
542*0Sstevel@tonic-gate 		}
543*0Sstevel@tonic-gate 	}
544*0Sstevel@tonic-gate 	if (error == 0 && clock_id == CLOCK_REALTIME &&
545*0Sstevel@tonic-gate 	    (flags & TIMER_ABSTIME)) {
546*0Sstevel@tonic-gate 		/*
547*0Sstevel@tonic-gate 		 * Don't return yet just because someone reset the
548*0Sstevel@tonic-gate 		 * system clock.  Recompute the new relative time
549*0Sstevel@tonic-gate 		 * and reissue the nanosleep() call if necessary.
550*0Sstevel@tonic-gate 		 *
551*0Sstevel@tonic-gate 		 * Resetting the system clock causes all sorts of
552*0Sstevel@tonic-gate 		 * problems and the SUSV3 standards body should
553*0Sstevel@tonic-gate 		 * have made the behavior of clock_nanosleep() be
554*0Sstevel@tonic-gate 		 * implementation-defined in such a case rather than
555*0Sstevel@tonic-gate 		 * being specific about honoring the new system time.
556*0Sstevel@tonic-gate 		 * Standards bodies are filled with fools and idiots.
557*0Sstevel@tonic-gate 		 */
558*0Sstevel@tonic-gate 		abstime_to_reltime(clock_id, rqtp, &reltime);
559*0Sstevel@tonic-gate 		if (reltime.tv_sec != 0 || reltime.tv_nsec != 0)
560*0Sstevel@tonic-gate 			goto restart;
561*0Sstevel@tonic-gate 	}
562*0Sstevel@tonic-gate 	return (error);
563*0Sstevel@tonic-gate }
564*0Sstevel@tonic-gate 
565*0Sstevel@tonic-gate #pragma weak sleep = _sleep
566*0Sstevel@tonic-gate unsigned int
567*0Sstevel@tonic-gate _sleep(unsigned int sec)
568*0Sstevel@tonic-gate {
569*0Sstevel@tonic-gate 	unsigned int rem = 0;
570*0Sstevel@tonic-gate 	int error;
571*0Sstevel@tonic-gate 	timespec_t ts;
572*0Sstevel@tonic-gate 	timespec_t tsr;
573*0Sstevel@tonic-gate 
574*0Sstevel@tonic-gate 	ts.tv_sec = (time_t)sec;
575*0Sstevel@tonic-gate 	ts.tv_nsec = 0;
576*0Sstevel@tonic-gate 	PROLOGUE
577*0Sstevel@tonic-gate 	error = ___nanosleep(&ts, &tsr);
578*0Sstevel@tonic-gate 	EPILOGUE
579*0Sstevel@tonic-gate 	if (error == EINTR) {
580*0Sstevel@tonic-gate 		rem = (unsigned int)tsr.tv_sec;
581*0Sstevel@tonic-gate 		if (tsr.tv_nsec >= NANOSEC / 2)
582*0Sstevel@tonic-gate 			rem++;
583*0Sstevel@tonic-gate 	}
584*0Sstevel@tonic-gate 	return (rem);
585*0Sstevel@tonic-gate }
586*0Sstevel@tonic-gate 
587*0Sstevel@tonic-gate #pragma weak usleep = _usleep
588*0Sstevel@tonic-gate int
589*0Sstevel@tonic-gate _usleep(useconds_t usec)
590*0Sstevel@tonic-gate {
591*0Sstevel@tonic-gate 	timespec_t ts;
592*0Sstevel@tonic-gate 
593*0Sstevel@tonic-gate 	ts.tv_sec = usec / MICROSEC;
594*0Sstevel@tonic-gate 	ts.tv_nsec = (long)(usec % MICROSEC) * 1000;
595*0Sstevel@tonic-gate 	PROLOGUE
596*0Sstevel@tonic-gate 	(void) ___nanosleep(&ts, NULL);
597*0Sstevel@tonic-gate 	EPILOGUE
598*0Sstevel@tonic-gate 	return (0);
599*0Sstevel@tonic-gate }
600*0Sstevel@tonic-gate 
601*0Sstevel@tonic-gate int
602*0Sstevel@tonic-gate close(int fildes)
603*0Sstevel@tonic-gate {
604*0Sstevel@tonic-gate 	extern int _close(int);
605*0Sstevel@tonic-gate 	int rv;
606*0Sstevel@tonic-gate 
607*0Sstevel@tonic-gate 	PERFORM(_close(fildes))
608*0Sstevel@tonic-gate }
609*0Sstevel@tonic-gate 
610*0Sstevel@tonic-gate int
611*0Sstevel@tonic-gate creat(const char *path, mode_t mode)
612*0Sstevel@tonic-gate {
613*0Sstevel@tonic-gate 	extern int _creat(const char *, mode_t);
614*0Sstevel@tonic-gate 	int rv;
615*0Sstevel@tonic-gate 
616*0Sstevel@tonic-gate 	PERFORM(_creat(path, mode))
617*0Sstevel@tonic-gate }
618*0Sstevel@tonic-gate 
619*0Sstevel@tonic-gate #if !defined(_LP64)
620*0Sstevel@tonic-gate int
621*0Sstevel@tonic-gate creat64(const char *path, mode_t mode)
622*0Sstevel@tonic-gate {
623*0Sstevel@tonic-gate 	extern int _creat64(const char *, mode_t);
624*0Sstevel@tonic-gate 	int rv;
625*0Sstevel@tonic-gate 
626*0Sstevel@tonic-gate 	PERFORM(_creat64(path, mode))
627*0Sstevel@tonic-gate }
628*0Sstevel@tonic-gate #endif	/* !_LP64 */
629*0Sstevel@tonic-gate 
630*0Sstevel@tonic-gate int
631*0Sstevel@tonic-gate fcntl(int fildes, int cmd, ...)
632*0Sstevel@tonic-gate {
633*0Sstevel@tonic-gate 	extern int _fcntl(int, int, ...);
634*0Sstevel@tonic-gate 	intptr_t arg;
635*0Sstevel@tonic-gate 	int rv;
636*0Sstevel@tonic-gate 	va_list ap;
637*0Sstevel@tonic-gate 
638*0Sstevel@tonic-gate 	va_start(ap, cmd);
639*0Sstevel@tonic-gate 	arg = va_arg(ap, intptr_t);
640*0Sstevel@tonic-gate 	va_end(ap);
641*0Sstevel@tonic-gate 	if (cmd != F_SETLKW)
642*0Sstevel@tonic-gate 		return (_fcntl(fildes, cmd, arg));
643*0Sstevel@tonic-gate 	PERFORM(_fcntl(fildes, cmd, arg))
644*0Sstevel@tonic-gate }
645*0Sstevel@tonic-gate 
646*0Sstevel@tonic-gate int
647*0Sstevel@tonic-gate fsync(int fildes)
648*0Sstevel@tonic-gate {
649*0Sstevel@tonic-gate 	extern int _fsync(int);
650*0Sstevel@tonic-gate 	int rv;
651*0Sstevel@tonic-gate 
652*0Sstevel@tonic-gate 	PERFORM(_fsync(fildes))
653*0Sstevel@tonic-gate }
654*0Sstevel@tonic-gate 
655*0Sstevel@tonic-gate int
656*0Sstevel@tonic-gate lockf(int fildes, int function, off_t size)
657*0Sstevel@tonic-gate {
658*0Sstevel@tonic-gate 	extern int _lockf(int, int, off_t);
659*0Sstevel@tonic-gate 	int rv;
660*0Sstevel@tonic-gate 
661*0Sstevel@tonic-gate 	PERFORM(_lockf(fildes, function, size))
662*0Sstevel@tonic-gate }
663*0Sstevel@tonic-gate 
664*0Sstevel@tonic-gate #if !defined(_LP64)
665*0Sstevel@tonic-gate int
666*0Sstevel@tonic-gate lockf64(int fildes, int function, off64_t size)
667*0Sstevel@tonic-gate {
668*0Sstevel@tonic-gate 	extern int _lockf64(int, int, off64_t);
669*0Sstevel@tonic-gate 	int rv;
670*0Sstevel@tonic-gate 
671*0Sstevel@tonic-gate 	PERFORM(_lockf64(fildes, function, size))
672*0Sstevel@tonic-gate }
673*0Sstevel@tonic-gate #endif	/* !_LP64 */
674*0Sstevel@tonic-gate 
675*0Sstevel@tonic-gate ssize_t
676*0Sstevel@tonic-gate msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg)
677*0Sstevel@tonic-gate {
678*0Sstevel@tonic-gate 	extern ssize_t _msgrcv(int, void *, size_t, long, int);
679*0Sstevel@tonic-gate 	ssize_t rv;
680*0Sstevel@tonic-gate 
681*0Sstevel@tonic-gate 	PERFORM(_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg))
682*0Sstevel@tonic-gate }
683*0Sstevel@tonic-gate 
684*0Sstevel@tonic-gate int
685*0Sstevel@tonic-gate msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg)
686*0Sstevel@tonic-gate {
687*0Sstevel@tonic-gate 	extern int _msgsnd(int, const void *, size_t, int);
688*0Sstevel@tonic-gate 	int rv;
689*0Sstevel@tonic-gate 
690*0Sstevel@tonic-gate 	PERFORM(_msgsnd(msqid, msgp, msgsz, msgflg))
691*0Sstevel@tonic-gate }
692*0Sstevel@tonic-gate 
693*0Sstevel@tonic-gate int
694*0Sstevel@tonic-gate msync(caddr_t addr, size_t len, int flags)
695*0Sstevel@tonic-gate {
696*0Sstevel@tonic-gate 	extern int _msync(caddr_t, size_t, int);
697*0Sstevel@tonic-gate 	int rv;
698*0Sstevel@tonic-gate 
699*0Sstevel@tonic-gate 	PERFORM(_msync(addr, len, flags))
700*0Sstevel@tonic-gate }
701*0Sstevel@tonic-gate 
702*0Sstevel@tonic-gate int
703*0Sstevel@tonic-gate open(const char *path, int oflag, ...)
704*0Sstevel@tonic-gate {
705*0Sstevel@tonic-gate 	extern int _open(const char *, int, ...);
706*0Sstevel@tonic-gate 	mode_t mode;
707*0Sstevel@tonic-gate 	int rv;
708*0Sstevel@tonic-gate 	va_list ap;
709*0Sstevel@tonic-gate 
710*0Sstevel@tonic-gate 	va_start(ap, oflag);
711*0Sstevel@tonic-gate 	mode = va_arg(ap, mode_t);
712*0Sstevel@tonic-gate 	va_end(ap);
713*0Sstevel@tonic-gate 	PERFORM(_open(path, oflag, mode))
714*0Sstevel@tonic-gate }
715*0Sstevel@tonic-gate 
716*0Sstevel@tonic-gate #if !defined(_LP64)
717*0Sstevel@tonic-gate int
718*0Sstevel@tonic-gate open64(const char *path, int oflag, ...)
719*0Sstevel@tonic-gate {
720*0Sstevel@tonic-gate 	extern int _open64(const char *, int, ...);
721*0Sstevel@tonic-gate 	mode_t mode;
722*0Sstevel@tonic-gate 	int rv;
723*0Sstevel@tonic-gate 	va_list ap;
724*0Sstevel@tonic-gate 
725*0Sstevel@tonic-gate 	va_start(ap, oflag);
726*0Sstevel@tonic-gate 	mode = va_arg(ap, mode_t);
727*0Sstevel@tonic-gate 	va_end(ap);
728*0Sstevel@tonic-gate 	PERFORM(_open64(path, oflag, mode))
729*0Sstevel@tonic-gate }
730*0Sstevel@tonic-gate #endif	/* !_LP64 */
731*0Sstevel@tonic-gate 
732*0Sstevel@tonic-gate int
733*0Sstevel@tonic-gate pause(void)
734*0Sstevel@tonic-gate {
735*0Sstevel@tonic-gate 	extern int _pause(void);
736*0Sstevel@tonic-gate 	int rv;
737*0Sstevel@tonic-gate 
738*0Sstevel@tonic-gate 	PERFORM(_pause())
739*0Sstevel@tonic-gate }
740*0Sstevel@tonic-gate 
741*0Sstevel@tonic-gate ssize_t
742*0Sstevel@tonic-gate pread(int fildes, void *buf, size_t nbyte, off_t offset)
743*0Sstevel@tonic-gate {
744*0Sstevel@tonic-gate 	extern ssize_t _pread(int, void *, size_t, off_t);
745*0Sstevel@tonic-gate 	ssize_t rv;
746*0Sstevel@tonic-gate 
747*0Sstevel@tonic-gate 	PERFORM(_pread(fildes, buf, nbyte, offset))
748*0Sstevel@tonic-gate }
749*0Sstevel@tonic-gate 
750*0Sstevel@tonic-gate #if !defined(_LP64)
751*0Sstevel@tonic-gate ssize_t
752*0Sstevel@tonic-gate pread64(int fildes, void *buf, size_t nbyte, off64_t offset)
753*0Sstevel@tonic-gate {
754*0Sstevel@tonic-gate 	extern ssize_t _pread64(int, void *, size_t, off64_t);
755*0Sstevel@tonic-gate 	ssize_t rv;
756*0Sstevel@tonic-gate 
757*0Sstevel@tonic-gate 	PERFORM(_pread64(fildes, buf, nbyte, offset))
758*0Sstevel@tonic-gate }
759*0Sstevel@tonic-gate #endif	/* !_LP64 */
760*0Sstevel@tonic-gate 
761*0Sstevel@tonic-gate ssize_t
762*0Sstevel@tonic-gate pwrite(int fildes, const void *buf, size_t nbyte, off_t offset)
763*0Sstevel@tonic-gate {
764*0Sstevel@tonic-gate 	extern ssize_t _pwrite(int, const void *, size_t, off_t);
765*0Sstevel@tonic-gate 	ssize_t rv;
766*0Sstevel@tonic-gate 
767*0Sstevel@tonic-gate 	PERFORM(_pwrite(fildes, buf, nbyte, offset))
768*0Sstevel@tonic-gate }
769*0Sstevel@tonic-gate 
770*0Sstevel@tonic-gate #if !defined(_LP64)
771*0Sstevel@tonic-gate ssize_t
772*0Sstevel@tonic-gate pwrite64(int fildes, const void *buf, size_t nbyte, off64_t offset)
773*0Sstevel@tonic-gate {
774*0Sstevel@tonic-gate 	extern ssize_t _pwrite64(int, const void *, size_t, off64_t);
775*0Sstevel@tonic-gate 	ssize_t rv;
776*0Sstevel@tonic-gate 
777*0Sstevel@tonic-gate 	PERFORM(_pwrite64(fildes, buf, nbyte, offset))
778*0Sstevel@tonic-gate }
779*0Sstevel@tonic-gate #endif	/* !_LP64 */
780*0Sstevel@tonic-gate 
781*0Sstevel@tonic-gate ssize_t
782*0Sstevel@tonic-gate readv(int fildes, const struct iovec *iov, int iovcnt)
783*0Sstevel@tonic-gate {
784*0Sstevel@tonic-gate 	extern ssize_t _readv(int, const struct iovec *, int);
785*0Sstevel@tonic-gate 	ssize_t rv;
786*0Sstevel@tonic-gate 
787*0Sstevel@tonic-gate 	PERFORM(_readv(fildes, iov, iovcnt))
788*0Sstevel@tonic-gate }
789*0Sstevel@tonic-gate 
790*0Sstevel@tonic-gate int
791*0Sstevel@tonic-gate sigpause(int sig)
792*0Sstevel@tonic-gate {
793*0Sstevel@tonic-gate 	extern int _sigpause(int);
794*0Sstevel@tonic-gate 	int rv;
795*0Sstevel@tonic-gate 
796*0Sstevel@tonic-gate 	PERFORM(_sigpause(sig))
797*0Sstevel@tonic-gate }
798*0Sstevel@tonic-gate 
799*0Sstevel@tonic-gate #pragma weak sigsuspend = _sigsuspend
800*0Sstevel@tonic-gate int
801*0Sstevel@tonic-gate _sigsuspend(const sigset_t *set)
802*0Sstevel@tonic-gate {
803*0Sstevel@tonic-gate 	extern int __sigsuspend(const sigset_t *);
804*0Sstevel@tonic-gate 	int rv;
805*0Sstevel@tonic-gate 
806*0Sstevel@tonic-gate 	PROLOGUE_MASK(set)
807*0Sstevel@tonic-gate 	rv = __sigsuspend(set);
808*0Sstevel@tonic-gate 	EPILOGUE_MASK
809*0Sstevel@tonic-gate 	return (rv);
810*0Sstevel@tonic-gate }
811*0Sstevel@tonic-gate 
812*0Sstevel@tonic-gate int
813*0Sstevel@tonic-gate _pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout,
814*0Sstevel@tonic-gate 	const sigset_t *sigmask)
815*0Sstevel@tonic-gate {
816*0Sstevel@tonic-gate 	extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *,
817*0Sstevel@tonic-gate 		const sigset_t *);
818*0Sstevel@tonic-gate 	int rv;
819*0Sstevel@tonic-gate 
820*0Sstevel@tonic-gate 	PROLOGUE_MASK(sigmask)
821*0Sstevel@tonic-gate 	rv = __pollsys(fds, nfd, timeout, sigmask);
822*0Sstevel@tonic-gate 	EPILOGUE_MASK
823*0Sstevel@tonic-gate 	return (rv);
824*0Sstevel@tonic-gate }
825*0Sstevel@tonic-gate 
826*0Sstevel@tonic-gate int
827*0Sstevel@tonic-gate __sigtimedwait(const sigset_t *set, siginfo_t *infop,
828*0Sstevel@tonic-gate 	const timespec_t *timeout)
829*0Sstevel@tonic-gate {
830*0Sstevel@tonic-gate 	extern int ___sigtimedwait(const sigset_t *, siginfo_t *,
831*0Sstevel@tonic-gate 		const timespec_t *);
832*0Sstevel@tonic-gate 	siginfo_t info;
833*0Sstevel@tonic-gate 	int sig;
834*0Sstevel@tonic-gate 
835*0Sstevel@tonic-gate 	PROLOGUE
836*0Sstevel@tonic-gate 	sig = ___sigtimedwait(set, &info, timeout);
837*0Sstevel@tonic-gate 	if (sig == SIGCANCEL &&
838*0Sstevel@tonic-gate 	    (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) {
839*0Sstevel@tonic-gate 		do_sigcancel();
840*0Sstevel@tonic-gate 		errno = EINTR;
841*0Sstevel@tonic-gate 		sig = -1;
842*0Sstevel@tonic-gate 	}
843*0Sstevel@tonic-gate 	EPILOGUE
844*0Sstevel@tonic-gate 	if (sig != -1 && infop)
845*0Sstevel@tonic-gate 		*infop = info;
846*0Sstevel@tonic-gate 	return (sig);
847*0Sstevel@tonic-gate }
848*0Sstevel@tonic-gate 
849*0Sstevel@tonic-gate #pragma weak sigwait = _sigwait
850*0Sstevel@tonic-gate int
851*0Sstevel@tonic-gate _sigwait(sigset_t *set)
852*0Sstevel@tonic-gate {
853*0Sstevel@tonic-gate 	return (__sigtimedwait(set, NULL, NULL));
854*0Sstevel@tonic-gate }
855*0Sstevel@tonic-gate 
856*0Sstevel@tonic-gate int
857*0Sstevel@tonic-gate tcdrain(int fildes)
858*0Sstevel@tonic-gate {
859*0Sstevel@tonic-gate 	extern int _tcdrain(int);
860*0Sstevel@tonic-gate 	int rv;
861*0Sstevel@tonic-gate 
862*0Sstevel@tonic-gate 	PERFORM(_tcdrain(fildes))
863*0Sstevel@tonic-gate }
864*0Sstevel@tonic-gate 
865*0Sstevel@tonic-gate pid_t
866*0Sstevel@tonic-gate wait(int *stat_loc)
867*0Sstevel@tonic-gate {
868*0Sstevel@tonic-gate 	extern pid_t _wait(int *);
869*0Sstevel@tonic-gate 	pid_t rv;
870*0Sstevel@tonic-gate 
871*0Sstevel@tonic-gate 	PERFORM(_wait(stat_loc))
872*0Sstevel@tonic-gate }
873*0Sstevel@tonic-gate 
874*0Sstevel@tonic-gate pid_t
875*0Sstevel@tonic-gate wait3(int *statusp, int options, struct rusage *rusage)
876*0Sstevel@tonic-gate {
877*0Sstevel@tonic-gate 	extern pid_t _wait3(int *, int, struct rusage *);
878*0Sstevel@tonic-gate 	pid_t rv;
879*0Sstevel@tonic-gate 
880*0Sstevel@tonic-gate 	PERFORM(_wait3(statusp, options, rusage))
881*0Sstevel@tonic-gate }
882*0Sstevel@tonic-gate 
883*0Sstevel@tonic-gate int
884*0Sstevel@tonic-gate waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options)
885*0Sstevel@tonic-gate {
886*0Sstevel@tonic-gate 	extern int _waitid(idtype_t, id_t, siginfo_t *, int);
887*0Sstevel@tonic-gate 	int rv;
888*0Sstevel@tonic-gate 
889*0Sstevel@tonic-gate 	PERFORM(_waitid(idtype, id, infop, options))
890*0Sstevel@tonic-gate }
891*0Sstevel@tonic-gate 
892*0Sstevel@tonic-gate pid_t
893*0Sstevel@tonic-gate waitpid(pid_t pid, int *stat_loc, int options)
894*0Sstevel@tonic-gate {
895*0Sstevel@tonic-gate 	extern pid_t _waitpid(pid_t, int *, int);
896*0Sstevel@tonic-gate 	pid_t rv;
897*0Sstevel@tonic-gate 
898*0Sstevel@tonic-gate 	PERFORM(_waitpid(pid, stat_loc, options))
899*0Sstevel@tonic-gate }
900*0Sstevel@tonic-gate 
901*0Sstevel@tonic-gate ssize_t
902*0Sstevel@tonic-gate writev(int fildes, const struct iovec *iov, int iovcnt)
903*0Sstevel@tonic-gate {
904*0Sstevel@tonic-gate 	extern ssize_t _writev(int, const struct iovec *, int);
905*0Sstevel@tonic-gate 	ssize_t rv;
906*0Sstevel@tonic-gate 
907*0Sstevel@tonic-gate 	PERFORM(_writev(fildes, iov, iovcnt))
908*0Sstevel@tonic-gate }
909