10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 50Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 60Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 70Sstevel@tonic-gate * with the License. 80Sstevel@tonic-gate * 90Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 100Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 110Sstevel@tonic-gate * See the License for the specific language governing permissions 120Sstevel@tonic-gate * and limitations under the License. 130Sstevel@tonic-gate * 140Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 150Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 160Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 170Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 180Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 190Sstevel@tonic-gate * 200Sstevel@tonic-gate * CDDL HEADER END 210Sstevel@tonic-gate */ 22*1111Sraf 230Sstevel@tonic-gate /* 240Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 250Sstevel@tonic-gate * Use is subject to license terms. 260Sstevel@tonic-gate */ 270Sstevel@tonic-gate 280Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 290Sstevel@tonic-gate 300Sstevel@tonic-gate #include "lint.h" 310Sstevel@tonic-gate #include "thr_uberdata.h" 320Sstevel@tonic-gate #include <stdarg.h> 330Sstevel@tonic-gate #include <poll.h> 340Sstevel@tonic-gate #include <stropts.h> 350Sstevel@tonic-gate #include <dlfcn.h> 360Sstevel@tonic-gate #include <sys/uio.h> 370Sstevel@tonic-gate 380Sstevel@tonic-gate /* 390Sstevel@tonic-gate * fork_lock is special -- We can't use lmutex_lock() (and thereby enter 400Sstevel@tonic-gate * a critical region) because the second thread to reach this point would 410Sstevel@tonic-gate * become unstoppable and the first thread would hang waiting for the 420Sstevel@tonic-gate * second thread to stop itself. Therefore we don't use lmutex_lock() in 430Sstevel@tonic-gate * fork_lock_enter(), but we do defer signals (the other form of concurrency). 440Sstevel@tonic-gate * 450Sstevel@tonic-gate * fork_lock_enter() does triple-duty. Not only does it serialize 460Sstevel@tonic-gate * calls to fork() and forkall(), but it also serializes calls to 470Sstevel@tonic-gate * thr_suspend() (fork() and forkall() also suspend other threads), 480Sstevel@tonic-gate * and furthermore it serializes I18N calls to functions in other 490Sstevel@tonic-gate * dlopen()ed L10N objects that might be calling malloc()/free(). 500Sstevel@tonic-gate */ 510Sstevel@tonic-gate 520Sstevel@tonic-gate static void 530Sstevel@tonic-gate fork_lock_error(const char *who) 540Sstevel@tonic-gate { 550Sstevel@tonic-gate char msg[200]; 560Sstevel@tonic-gate 570Sstevel@tonic-gate (void) strlcpy(msg, "deadlock condition: ", sizeof (msg)); 580Sstevel@tonic-gate (void) strlcat(msg, who, sizeof (msg)); 590Sstevel@tonic-gate (void) strlcat(msg, "() called from a fork handler", sizeof (msg)); 600Sstevel@tonic-gate thread_error(msg); 610Sstevel@tonic-gate } 620Sstevel@tonic-gate 630Sstevel@tonic-gate int 640Sstevel@tonic-gate fork_lock_enter(const char *who) 650Sstevel@tonic-gate { 660Sstevel@tonic-gate ulwp_t *self = curthread; 670Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 680Sstevel@tonic-gate int error = 0; 690Sstevel@tonic-gate 700Sstevel@tonic-gate ASSERT(self->ul_critical == 0); 710Sstevel@tonic-gate sigoff(self); 720Sstevel@tonic-gate (void) _private_mutex_lock(&udp->fork_lock); 730Sstevel@tonic-gate while (udp->fork_count) { 740Sstevel@tonic-gate if (udp->fork_owner == self) { 750Sstevel@tonic-gate /* 760Sstevel@tonic-gate * This is like a recursive lock except that we 770Sstevel@tonic-gate * inform the caller if we have been called from 780Sstevel@tonic-gate * a fork handler and let it deal with that fact. 790Sstevel@tonic-gate */ 800Sstevel@tonic-gate if (self->ul_fork) { 810Sstevel@tonic-gate /* 820Sstevel@tonic-gate * We have been called from a fork handler. 830Sstevel@tonic-gate */ 840Sstevel@tonic-gate if (who != NULL && 850Sstevel@tonic-gate udp->uberflags.uf_thread_error_detection) 860Sstevel@tonic-gate fork_lock_error(who); 870Sstevel@tonic-gate error = EDEADLK; 880Sstevel@tonic-gate } 890Sstevel@tonic-gate break; 900Sstevel@tonic-gate } 910Sstevel@tonic-gate ASSERT(self->ul_fork == 0); 920Sstevel@tonic-gate (void) _cond_wait(&udp->fork_cond, &udp->fork_lock); 930Sstevel@tonic-gate } 940Sstevel@tonic-gate udp->fork_owner = self; 950Sstevel@tonic-gate udp->fork_count++; 960Sstevel@tonic-gate (void) _private_mutex_unlock(&udp->fork_lock); 970Sstevel@tonic-gate return (error); 980Sstevel@tonic-gate } 990Sstevel@tonic-gate 1000Sstevel@tonic-gate void 1010Sstevel@tonic-gate fork_lock_exit(void) 1020Sstevel@tonic-gate { 1030Sstevel@tonic-gate ulwp_t *self = curthread; 1040Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 1050Sstevel@tonic-gate 1060Sstevel@tonic-gate ASSERT(self->ul_critical == 0); 1070Sstevel@tonic-gate (void) _private_mutex_lock(&udp->fork_lock); 1080Sstevel@tonic-gate ASSERT(udp->fork_count != 0 && udp->fork_owner == self); 1090Sstevel@tonic-gate if (--udp->fork_count == 0) { 1100Sstevel@tonic-gate udp->fork_owner = NULL; 1110Sstevel@tonic-gate (void) _cond_signal(&udp->fork_cond); 1120Sstevel@tonic-gate } 1130Sstevel@tonic-gate (void) _private_mutex_unlock(&udp->fork_lock); 1140Sstevel@tonic-gate sigon(self); 1150Sstevel@tonic-gate } 1160Sstevel@tonic-gate 1170Sstevel@tonic-gate /* 1180Sstevel@tonic-gate * fork() is fork1() for both Posix threads and Solaris threads. 1190Sstevel@tonic-gate * The forkall() interface exists for applications that require 1200Sstevel@tonic-gate * the semantics of replicating all threads. 1210Sstevel@tonic-gate */ 1220Sstevel@tonic-gate #pragma weak fork = _fork1 1230Sstevel@tonic-gate #pragma weak _fork = _fork1 1240Sstevel@tonic-gate #pragma weak fork1 = _fork1 1250Sstevel@tonic-gate pid_t 1260Sstevel@tonic-gate _fork1(void) 1270Sstevel@tonic-gate { 1280Sstevel@tonic-gate ulwp_t *self = curthread; 1290Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 1300Sstevel@tonic-gate pid_t pid; 1310Sstevel@tonic-gate int error; 1320Sstevel@tonic-gate 1330Sstevel@tonic-gate if (self->ul_vfork) { 1340Sstevel@tonic-gate /* 1350Sstevel@tonic-gate * We are a child of vfork(); omit all of the fork 1360Sstevel@tonic-gate * logic and go straight to the system call trap. 1370Sstevel@tonic-gate * A vfork() child of a multithreaded parent 1380Sstevel@tonic-gate * must never call fork(). 1390Sstevel@tonic-gate */ 1400Sstevel@tonic-gate if (udp->uberflags.uf_mt) { 1410Sstevel@tonic-gate errno = ENOTSUP; 1420Sstevel@tonic-gate return (-1); 1430Sstevel@tonic-gate } 1440Sstevel@tonic-gate pid = __fork1(); 1450Sstevel@tonic-gate if (pid == 0) { /* child */ 1460Sstevel@tonic-gate udp->pid = _private_getpid(); 1470Sstevel@tonic-gate self->ul_vfork = 0; 1480Sstevel@tonic-gate } 1490Sstevel@tonic-gate return (pid); 1500Sstevel@tonic-gate } 1510Sstevel@tonic-gate 1520Sstevel@tonic-gate if ((error = fork_lock_enter("fork")) != 0) { 1530Sstevel@tonic-gate /* 1540Sstevel@tonic-gate * Cannot call fork() from a fork handler. 1550Sstevel@tonic-gate */ 1560Sstevel@tonic-gate fork_lock_exit(); 1570Sstevel@tonic-gate errno = error; 1580Sstevel@tonic-gate return (-1); 1590Sstevel@tonic-gate } 1600Sstevel@tonic-gate self->ul_fork = 1; 1610Sstevel@tonic-gate 1620Sstevel@tonic-gate /* 1630Sstevel@tonic-gate * The functions registered by pthread_atfork() are defined by 1640Sstevel@tonic-gate * the application and its libraries and we must not hold any 1650Sstevel@tonic-gate * internal libc locks while invoking them. The fork_lock_enter() 1660Sstevel@tonic-gate * function serializes fork(), thr_suspend(), pthread_atfork() and 1670Sstevel@tonic-gate * dlclose() (which destroys whatever pthread_atfork() functions 1680Sstevel@tonic-gate * the library may have set up). If one of these pthread_atfork() 1690Sstevel@tonic-gate * functions attempts to fork or suspend another thread or call 1700Sstevel@tonic-gate * pthread_atfork() or dlclose a library, it will detect a deadlock 1710Sstevel@tonic-gate * in fork_lock_enter(). Otherwise, the pthread_atfork() functions 1720Sstevel@tonic-gate * are free to do anything they please (except they will not 1730Sstevel@tonic-gate * receive any signals). 1740Sstevel@tonic-gate */ 1750Sstevel@tonic-gate _prefork_handler(); 1760Sstevel@tonic-gate 1770Sstevel@tonic-gate /* 1780Sstevel@tonic-gate * Block all signals. 1790Sstevel@tonic-gate * Just deferring them via sigon() is not enough. 1800Sstevel@tonic-gate * We have to avoid taking a deferred signal in the child 1810Sstevel@tonic-gate * that was actually sent to the parent before __fork1(). 1820Sstevel@tonic-gate */ 1830Sstevel@tonic-gate block_all_signals(self); 1840Sstevel@tonic-gate 1850Sstevel@tonic-gate /* 1860Sstevel@tonic-gate * This suspends all threads but this one, leaving them 1870Sstevel@tonic-gate * suspended outside of any critical regions in the library. 1880Sstevel@tonic-gate * Thus, we are assured that no library locks are held 1890Sstevel@tonic-gate * while we invoke fork1() from the current thread. 1900Sstevel@tonic-gate */ 1910Sstevel@tonic-gate (void) _private_mutex_lock(&udp->fork_lock); 1920Sstevel@tonic-gate suspend_fork(); 1930Sstevel@tonic-gate (void) _private_mutex_unlock(&udp->fork_lock); 1940Sstevel@tonic-gate 1950Sstevel@tonic-gate pid = __fork1(); 1960Sstevel@tonic-gate 1970Sstevel@tonic-gate if (pid == 0) { /* child */ 1980Sstevel@tonic-gate /* 1990Sstevel@tonic-gate * Clear our schedctl pointer. 2000Sstevel@tonic-gate * Discard any deferred signal that was sent to the parent. 2010Sstevel@tonic-gate * Because we blocked all signals before __fork1(), a 2020Sstevel@tonic-gate * deferred signal cannot have been taken by the child. 2030Sstevel@tonic-gate */ 2040Sstevel@tonic-gate self->ul_schedctl_called = NULL; 2050Sstevel@tonic-gate self->ul_schedctl = NULL; 2060Sstevel@tonic-gate self->ul_cursig = 0; 2070Sstevel@tonic-gate self->ul_siginfo.si_signo = 0; 2080Sstevel@tonic-gate udp->pid = _private_getpid(); 2090Sstevel@tonic-gate /* reset the library's data structures to reflect one thread */ 2100Sstevel@tonic-gate _postfork1_child(); 2110Sstevel@tonic-gate restore_signals(self); 2120Sstevel@tonic-gate _postfork_child_handler(); 2130Sstevel@tonic-gate } else { 2140Sstevel@tonic-gate /* restart all threads that were suspended for fork1() */ 2150Sstevel@tonic-gate continue_fork(0); 2160Sstevel@tonic-gate restore_signals(self); 2170Sstevel@tonic-gate _postfork_parent_handler(); 2180Sstevel@tonic-gate } 2190Sstevel@tonic-gate 2200Sstevel@tonic-gate self->ul_fork = 0; 2210Sstevel@tonic-gate fork_lock_exit(); 2220Sstevel@tonic-gate 2230Sstevel@tonic-gate return (pid); 2240Sstevel@tonic-gate } 2250Sstevel@tonic-gate 2260Sstevel@tonic-gate /* 2270Sstevel@tonic-gate * Much of the logic here is the same as in fork1(). 2280Sstevel@tonic-gate * See the comments in fork1(), above. 2290Sstevel@tonic-gate */ 2300Sstevel@tonic-gate #pragma weak forkall = _forkall 2310Sstevel@tonic-gate pid_t 2320Sstevel@tonic-gate _forkall(void) 2330Sstevel@tonic-gate { 2340Sstevel@tonic-gate ulwp_t *self = curthread; 2350Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 2360Sstevel@tonic-gate pid_t pid; 2370Sstevel@tonic-gate int error; 2380Sstevel@tonic-gate 2390Sstevel@tonic-gate if (self->ul_vfork) { 2400Sstevel@tonic-gate if (udp->uberflags.uf_mt) { 2410Sstevel@tonic-gate errno = ENOTSUP; 2420Sstevel@tonic-gate return (-1); 2430Sstevel@tonic-gate } 2440Sstevel@tonic-gate pid = __forkall(); 2450Sstevel@tonic-gate if (pid == 0) { /* child */ 2460Sstevel@tonic-gate udp->pid = _private_getpid(); 2470Sstevel@tonic-gate self->ul_vfork = 0; 2480Sstevel@tonic-gate } 2490Sstevel@tonic-gate return (pid); 2500Sstevel@tonic-gate } 2510Sstevel@tonic-gate 2520Sstevel@tonic-gate if ((error = fork_lock_enter("forkall")) != 0) { 2530Sstevel@tonic-gate fork_lock_exit(); 2540Sstevel@tonic-gate errno = error; 2550Sstevel@tonic-gate return (-1); 2560Sstevel@tonic-gate } 2570Sstevel@tonic-gate self->ul_fork = 1; 2580Sstevel@tonic-gate block_all_signals(self); 2590Sstevel@tonic-gate suspend_fork(); 2600Sstevel@tonic-gate 2610Sstevel@tonic-gate pid = __forkall(); 2620Sstevel@tonic-gate 2630Sstevel@tonic-gate if (pid == 0) { 2640Sstevel@tonic-gate self->ul_schedctl_called = NULL; 2650Sstevel@tonic-gate self->ul_schedctl = NULL; 2660Sstevel@tonic-gate self->ul_cursig = 0; 2670Sstevel@tonic-gate self->ul_siginfo.si_signo = 0; 2680Sstevel@tonic-gate udp->pid = _private_getpid(); 2690Sstevel@tonic-gate continue_fork(1); 2700Sstevel@tonic-gate } else { 2710Sstevel@tonic-gate continue_fork(0); 2720Sstevel@tonic-gate } 2730Sstevel@tonic-gate restore_signals(self); 2740Sstevel@tonic-gate self->ul_fork = 0; 2750Sstevel@tonic-gate fork_lock_exit(); 2760Sstevel@tonic-gate 2770Sstevel@tonic-gate return (pid); 2780Sstevel@tonic-gate } 2790Sstevel@tonic-gate 2800Sstevel@tonic-gate /* 2810Sstevel@tonic-gate * Hacks for system calls to provide cancellation 2820Sstevel@tonic-gate * and improve java garbage collection. 2830Sstevel@tonic-gate */ 2840Sstevel@tonic-gate #define PROLOGUE \ 2850Sstevel@tonic-gate { \ 2860Sstevel@tonic-gate ulwp_t *self = curthread; \ 2870Sstevel@tonic-gate int nocancel = (self->ul_vfork | self->ul_nocancel); \ 2880Sstevel@tonic-gate if (nocancel == 0) { \ 2890Sstevel@tonic-gate self->ul_save_async = self->ul_cancel_async; \ 2900Sstevel@tonic-gate if (!self->ul_cancel_disabled) { \ 2910Sstevel@tonic-gate self->ul_cancel_async = 1; \ 2920Sstevel@tonic-gate if (self->ul_cancel_pending) \ 2930Sstevel@tonic-gate _pthread_exit(PTHREAD_CANCELED); \ 2940Sstevel@tonic-gate } \ 2950Sstevel@tonic-gate self->ul_sp = stkptr(); \ 2960Sstevel@tonic-gate } 2970Sstevel@tonic-gate 2980Sstevel@tonic-gate #define EPILOGUE \ 2990Sstevel@tonic-gate if (nocancel == 0) { \ 3000Sstevel@tonic-gate self->ul_sp = 0; \ 3010Sstevel@tonic-gate self->ul_cancel_async = self->ul_save_async; \ 3020Sstevel@tonic-gate } \ 3030Sstevel@tonic-gate } 3040Sstevel@tonic-gate 3050Sstevel@tonic-gate /* 3060Sstevel@tonic-gate * Perform the body of the action required by most of the cancelable 3070Sstevel@tonic-gate * function calls. The return(function_call) part is to allow the 3080Sstevel@tonic-gate * compiler to make the call be executed with tail recursion, which 3090Sstevel@tonic-gate * saves a register window on sparc and slightly (not much) improves 3100Sstevel@tonic-gate * the code for x86/x64 compilations. 3110Sstevel@tonic-gate */ 3120Sstevel@tonic-gate #define PERFORM(function_call) \ 3130Sstevel@tonic-gate PROLOGUE \ 3140Sstevel@tonic-gate if (nocancel) \ 3150Sstevel@tonic-gate return (function_call); \ 3160Sstevel@tonic-gate rv = function_call; \ 3170Sstevel@tonic-gate EPILOGUE \ 3180Sstevel@tonic-gate return (rv); 3190Sstevel@tonic-gate 3200Sstevel@tonic-gate /* 3210Sstevel@tonic-gate * Specialized prologue for sigsuspend() and pollsys(). 3220Sstevel@tonic-gate * These system calls pass a signal mask to the kernel. 3230Sstevel@tonic-gate * The kernel replaces the thread's signal mask with the 3240Sstevel@tonic-gate * temporary mask before the thread goes to sleep. If 3250Sstevel@tonic-gate * a signal is received, the signal handler will execute 3260Sstevel@tonic-gate * with the temporary mask, as modified by the sigaction 3270Sstevel@tonic-gate * for the particular signal. 3280Sstevel@tonic-gate * 3290Sstevel@tonic-gate * We block all signals until we reach the kernel with the 3300Sstevel@tonic-gate * temporary mask. This eliminates race conditions with 3310Sstevel@tonic-gate * setting the signal mask while signals are being posted. 3320Sstevel@tonic-gate */ 3330Sstevel@tonic-gate #define PROLOGUE_MASK(sigmask) \ 3340Sstevel@tonic-gate { \ 3350Sstevel@tonic-gate ulwp_t *self = curthread; \ 3360Sstevel@tonic-gate int nocancel = (self->ul_vfork | self->ul_nocancel); \ 3370Sstevel@tonic-gate if (!self->ul_vfork) { \ 3380Sstevel@tonic-gate if (sigmask) { \ 3390Sstevel@tonic-gate block_all_signals(self); \ 340*1111Sraf self->ul_tmpmask.__sigbits[0] = sigmask->__sigbits[0]; \ 341*1111Sraf self->ul_tmpmask.__sigbits[1] = sigmask->__sigbits[1]; \ 3420Sstevel@tonic-gate delete_reserved_signals(&self->ul_tmpmask); \ 3430Sstevel@tonic-gate self->ul_sigsuspend = 1; \ 3440Sstevel@tonic-gate } \ 3450Sstevel@tonic-gate if (nocancel == 0) { \ 3460Sstevel@tonic-gate self->ul_save_async = self->ul_cancel_async; \ 3470Sstevel@tonic-gate if (!self->ul_cancel_disabled) { \ 3480Sstevel@tonic-gate self->ul_cancel_async = 1; \ 3490Sstevel@tonic-gate if (self->ul_cancel_pending) { \ 3500Sstevel@tonic-gate if (self->ul_sigsuspend) { \ 3510Sstevel@tonic-gate self->ul_sigsuspend = 0;\ 3520Sstevel@tonic-gate restore_signals(self); \ 3530Sstevel@tonic-gate } \ 3540Sstevel@tonic-gate _pthread_exit(PTHREAD_CANCELED);\ 3550Sstevel@tonic-gate } \ 3560Sstevel@tonic-gate } \ 3570Sstevel@tonic-gate self->ul_sp = stkptr(); \ 3580Sstevel@tonic-gate } \ 3590Sstevel@tonic-gate } 3600Sstevel@tonic-gate 3610Sstevel@tonic-gate /* 3620Sstevel@tonic-gate * If a signal is taken, we return from the system call wrapper with 3630Sstevel@tonic-gate * our original signal mask restored (see code in call_user_handler()). 3640Sstevel@tonic-gate * If not (self->ul_sigsuspend is still non-zero), we must restore our 3650Sstevel@tonic-gate * original signal mask ourself. 3660Sstevel@tonic-gate */ 3670Sstevel@tonic-gate #define EPILOGUE_MASK \ 3680Sstevel@tonic-gate if (nocancel == 0) { \ 3690Sstevel@tonic-gate self->ul_sp = 0; \ 3700Sstevel@tonic-gate self->ul_cancel_async = self->ul_save_async; \ 3710Sstevel@tonic-gate } \ 3720Sstevel@tonic-gate if (self->ul_sigsuspend) { \ 3730Sstevel@tonic-gate self->ul_sigsuspend = 0; \ 3740Sstevel@tonic-gate restore_signals(self); \ 3750Sstevel@tonic-gate } \ 3760Sstevel@tonic-gate } 3770Sstevel@tonic-gate 3780Sstevel@tonic-gate /* 3790Sstevel@tonic-gate * Called from _thrp_join() (thr_join() is a cancellation point) 3800Sstevel@tonic-gate */ 3810Sstevel@tonic-gate int 3820Sstevel@tonic-gate lwp_wait(thread_t tid, thread_t *found) 3830Sstevel@tonic-gate { 3840Sstevel@tonic-gate int error; 3850Sstevel@tonic-gate 3860Sstevel@tonic-gate PROLOGUE 3870Sstevel@tonic-gate while ((error = __lwp_wait(tid, found)) == EINTR) 3880Sstevel@tonic-gate ; 3890Sstevel@tonic-gate EPILOGUE 3900Sstevel@tonic-gate return (error); 3910Sstevel@tonic-gate } 3920Sstevel@tonic-gate 3930Sstevel@tonic-gate ssize_t 3940Sstevel@tonic-gate read(int fd, void *buf, size_t size) 3950Sstevel@tonic-gate { 3960Sstevel@tonic-gate extern ssize_t _read(int, void *, size_t); 3970Sstevel@tonic-gate ssize_t rv; 3980Sstevel@tonic-gate 3990Sstevel@tonic-gate PERFORM(_read(fd, buf, size)) 4000Sstevel@tonic-gate } 4010Sstevel@tonic-gate 4020Sstevel@tonic-gate ssize_t 4030Sstevel@tonic-gate write(int fd, const void *buf, size_t size) 4040Sstevel@tonic-gate { 4050Sstevel@tonic-gate extern ssize_t _write(int, const void *, size_t); 4060Sstevel@tonic-gate ssize_t rv; 4070Sstevel@tonic-gate 4080Sstevel@tonic-gate PERFORM(_write(fd, buf, size)) 4090Sstevel@tonic-gate } 4100Sstevel@tonic-gate 4110Sstevel@tonic-gate int 4120Sstevel@tonic-gate getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr, 4130Sstevel@tonic-gate int *flagsp) 4140Sstevel@tonic-gate { 4150Sstevel@tonic-gate extern int _getmsg(int, struct strbuf *, struct strbuf *, int *); 4160Sstevel@tonic-gate int rv; 4170Sstevel@tonic-gate 4180Sstevel@tonic-gate PERFORM(_getmsg(fd, ctlptr, dataptr, flagsp)) 4190Sstevel@tonic-gate } 4200Sstevel@tonic-gate 4210Sstevel@tonic-gate int 4220Sstevel@tonic-gate getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr, 4230Sstevel@tonic-gate int *bandp, int *flagsp) 4240Sstevel@tonic-gate { 4250Sstevel@tonic-gate extern int _getpmsg(int, struct strbuf *, struct strbuf *, 4260Sstevel@tonic-gate int *, int *); 4270Sstevel@tonic-gate int rv; 4280Sstevel@tonic-gate 4290Sstevel@tonic-gate PERFORM(_getpmsg(fd, ctlptr, dataptr, bandp, flagsp)) 4300Sstevel@tonic-gate } 4310Sstevel@tonic-gate 4320Sstevel@tonic-gate int 4330Sstevel@tonic-gate putmsg(int fd, const struct strbuf *ctlptr, 4340Sstevel@tonic-gate const struct strbuf *dataptr, int flags) 4350Sstevel@tonic-gate { 4360Sstevel@tonic-gate extern int _putmsg(int, const struct strbuf *, 4370Sstevel@tonic-gate const struct strbuf *, int); 4380Sstevel@tonic-gate int rv; 4390Sstevel@tonic-gate 4400Sstevel@tonic-gate PERFORM(_putmsg(fd, ctlptr, dataptr, flags)) 4410Sstevel@tonic-gate } 4420Sstevel@tonic-gate 4430Sstevel@tonic-gate int 4440Sstevel@tonic-gate __xpg4_putmsg(int fd, const struct strbuf *ctlptr, 4450Sstevel@tonic-gate const struct strbuf *dataptr, int flags) 4460Sstevel@tonic-gate { 4470Sstevel@tonic-gate extern int _putmsg(int, const struct strbuf *, 4480Sstevel@tonic-gate const struct strbuf *, int); 4490Sstevel@tonic-gate int rv; 4500Sstevel@tonic-gate 4510Sstevel@tonic-gate PERFORM(_putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4)) 4520Sstevel@tonic-gate } 4530Sstevel@tonic-gate 4540Sstevel@tonic-gate int 4550Sstevel@tonic-gate putpmsg(int fd, const struct strbuf *ctlptr, 4560Sstevel@tonic-gate const struct strbuf *dataptr, int band, int flags) 4570Sstevel@tonic-gate { 4580Sstevel@tonic-gate extern int _putpmsg(int, const struct strbuf *, 4590Sstevel@tonic-gate const struct strbuf *, int, int); 4600Sstevel@tonic-gate int rv; 4610Sstevel@tonic-gate 4620Sstevel@tonic-gate PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags)) 4630Sstevel@tonic-gate } 4640Sstevel@tonic-gate 4650Sstevel@tonic-gate int 4660Sstevel@tonic-gate __xpg4_putpmsg(int fd, const struct strbuf *ctlptr, 4670Sstevel@tonic-gate const struct strbuf *dataptr, int band, int flags) 4680Sstevel@tonic-gate { 4690Sstevel@tonic-gate extern int _putpmsg(int, const struct strbuf *, 4700Sstevel@tonic-gate const struct strbuf *, int, int); 4710Sstevel@tonic-gate int rv; 4720Sstevel@tonic-gate 4730Sstevel@tonic-gate PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4)) 4740Sstevel@tonic-gate } 4750Sstevel@tonic-gate 4760Sstevel@tonic-gate int 4770Sstevel@tonic-gate __nanosleep(const timespec_t *rqtp, timespec_t *rmtp) 4780Sstevel@tonic-gate { 4790Sstevel@tonic-gate int error; 4800Sstevel@tonic-gate 4810Sstevel@tonic-gate PROLOGUE 4820Sstevel@tonic-gate error = ___nanosleep(rqtp, rmtp); 4830Sstevel@tonic-gate EPILOGUE 4840Sstevel@tonic-gate if (error) { 4850Sstevel@tonic-gate errno = error; 4860Sstevel@tonic-gate return (-1); 4870Sstevel@tonic-gate } 4880Sstevel@tonic-gate return (0); 4890Sstevel@tonic-gate } 4900Sstevel@tonic-gate 4910Sstevel@tonic-gate int 4920Sstevel@tonic-gate __clock_nanosleep(clockid_t clock_id, int flags, 4930Sstevel@tonic-gate const timespec_t *rqtp, timespec_t *rmtp) 4940Sstevel@tonic-gate { 4950Sstevel@tonic-gate timespec_t reltime; 4960Sstevel@tonic-gate hrtime_t start; 4970Sstevel@tonic-gate hrtime_t rqlapse; 4980Sstevel@tonic-gate hrtime_t lapse; 4990Sstevel@tonic-gate int error; 5000Sstevel@tonic-gate 5010Sstevel@tonic-gate switch (clock_id) { 5020Sstevel@tonic-gate case CLOCK_VIRTUAL: 5030Sstevel@tonic-gate case CLOCK_PROCESS_CPUTIME_ID: 5040Sstevel@tonic-gate case CLOCK_THREAD_CPUTIME_ID: 5050Sstevel@tonic-gate return (ENOTSUP); 5060Sstevel@tonic-gate case CLOCK_REALTIME: 5070Sstevel@tonic-gate case CLOCK_HIGHRES: 5080Sstevel@tonic-gate break; 5090Sstevel@tonic-gate default: 5100Sstevel@tonic-gate return (EINVAL); 5110Sstevel@tonic-gate } 5120Sstevel@tonic-gate if (flags & TIMER_ABSTIME) { 5130Sstevel@tonic-gate abstime_to_reltime(clock_id, rqtp, &reltime); 5140Sstevel@tonic-gate rmtp = NULL; 5150Sstevel@tonic-gate } else { 5160Sstevel@tonic-gate reltime = *rqtp; 5170Sstevel@tonic-gate if (clock_id == CLOCK_HIGHRES) 5180Sstevel@tonic-gate start = gethrtime(); 5190Sstevel@tonic-gate } 5200Sstevel@tonic-gate restart: 5210Sstevel@tonic-gate PROLOGUE 5220Sstevel@tonic-gate error = ___nanosleep(&reltime, rmtp); 5230Sstevel@tonic-gate EPILOGUE 5240Sstevel@tonic-gate if (error == 0 && clock_id == CLOCK_HIGHRES) { 5250Sstevel@tonic-gate /* 5260Sstevel@tonic-gate * Don't return yet if we didn't really get a timeout. 5270Sstevel@tonic-gate * This can happen if we return because someone resets 5280Sstevel@tonic-gate * the system clock. 5290Sstevel@tonic-gate */ 5300Sstevel@tonic-gate if (flags & TIMER_ABSTIME) { 5310Sstevel@tonic-gate if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC + 5320Sstevel@tonic-gate rqtp->tv_nsec > gethrtime()) { 5330Sstevel@tonic-gate abstime_to_reltime(clock_id, rqtp, &reltime); 5340Sstevel@tonic-gate goto restart; 5350Sstevel@tonic-gate } 5360Sstevel@tonic-gate } else { 5370Sstevel@tonic-gate rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC + 5380Sstevel@tonic-gate rqtp->tv_nsec; 5390Sstevel@tonic-gate lapse = gethrtime() - start; 5400Sstevel@tonic-gate if (rqlapse > lapse) { 5410Sstevel@tonic-gate hrt2ts(rqlapse - lapse, &reltime); 5420Sstevel@tonic-gate goto restart; 5430Sstevel@tonic-gate } 5440Sstevel@tonic-gate } 5450Sstevel@tonic-gate } 5460Sstevel@tonic-gate if (error == 0 && clock_id == CLOCK_REALTIME && 5470Sstevel@tonic-gate (flags & TIMER_ABSTIME)) { 5480Sstevel@tonic-gate /* 5490Sstevel@tonic-gate * Don't return yet just because someone reset the 5500Sstevel@tonic-gate * system clock. Recompute the new relative time 5510Sstevel@tonic-gate * and reissue the nanosleep() call if necessary. 5520Sstevel@tonic-gate * 5530Sstevel@tonic-gate * Resetting the system clock causes all sorts of 5540Sstevel@tonic-gate * problems and the SUSV3 standards body should 5550Sstevel@tonic-gate * have made the behavior of clock_nanosleep() be 5560Sstevel@tonic-gate * implementation-defined in such a case rather than 5570Sstevel@tonic-gate * being specific about honoring the new system time. 5580Sstevel@tonic-gate * Standards bodies are filled with fools and idiots. 5590Sstevel@tonic-gate */ 5600Sstevel@tonic-gate abstime_to_reltime(clock_id, rqtp, &reltime); 5610Sstevel@tonic-gate if (reltime.tv_sec != 0 || reltime.tv_nsec != 0) 5620Sstevel@tonic-gate goto restart; 5630Sstevel@tonic-gate } 5640Sstevel@tonic-gate return (error); 5650Sstevel@tonic-gate } 5660Sstevel@tonic-gate 5670Sstevel@tonic-gate #pragma weak sleep = _sleep 5680Sstevel@tonic-gate unsigned int 5690Sstevel@tonic-gate _sleep(unsigned int sec) 5700Sstevel@tonic-gate { 5710Sstevel@tonic-gate unsigned int rem = 0; 5720Sstevel@tonic-gate int error; 5730Sstevel@tonic-gate timespec_t ts; 5740Sstevel@tonic-gate timespec_t tsr; 5750Sstevel@tonic-gate 5760Sstevel@tonic-gate ts.tv_sec = (time_t)sec; 5770Sstevel@tonic-gate ts.tv_nsec = 0; 5780Sstevel@tonic-gate PROLOGUE 5790Sstevel@tonic-gate error = ___nanosleep(&ts, &tsr); 5800Sstevel@tonic-gate EPILOGUE 5810Sstevel@tonic-gate if (error == EINTR) { 5820Sstevel@tonic-gate rem = (unsigned int)tsr.tv_sec; 5830Sstevel@tonic-gate if (tsr.tv_nsec >= NANOSEC / 2) 5840Sstevel@tonic-gate rem++; 5850Sstevel@tonic-gate } 5860Sstevel@tonic-gate return (rem); 5870Sstevel@tonic-gate } 5880Sstevel@tonic-gate 5890Sstevel@tonic-gate #pragma weak usleep = _usleep 5900Sstevel@tonic-gate int 5910Sstevel@tonic-gate _usleep(useconds_t usec) 5920Sstevel@tonic-gate { 5930Sstevel@tonic-gate timespec_t ts; 5940Sstevel@tonic-gate 5950Sstevel@tonic-gate ts.tv_sec = usec / MICROSEC; 5960Sstevel@tonic-gate ts.tv_nsec = (long)(usec % MICROSEC) * 1000; 5970Sstevel@tonic-gate PROLOGUE 5980Sstevel@tonic-gate (void) ___nanosleep(&ts, NULL); 5990Sstevel@tonic-gate EPILOGUE 6000Sstevel@tonic-gate return (0); 6010Sstevel@tonic-gate } 6020Sstevel@tonic-gate 6030Sstevel@tonic-gate int 6040Sstevel@tonic-gate close(int fildes) 6050Sstevel@tonic-gate { 6060Sstevel@tonic-gate extern int _close(int); 6070Sstevel@tonic-gate int rv; 6080Sstevel@tonic-gate 6090Sstevel@tonic-gate PERFORM(_close(fildes)) 6100Sstevel@tonic-gate } 6110Sstevel@tonic-gate 6120Sstevel@tonic-gate int 6130Sstevel@tonic-gate creat(const char *path, mode_t mode) 6140Sstevel@tonic-gate { 6150Sstevel@tonic-gate extern int _creat(const char *, mode_t); 6160Sstevel@tonic-gate int rv; 6170Sstevel@tonic-gate 6180Sstevel@tonic-gate PERFORM(_creat(path, mode)) 6190Sstevel@tonic-gate } 6200Sstevel@tonic-gate 6210Sstevel@tonic-gate #if !defined(_LP64) 6220Sstevel@tonic-gate int 6230Sstevel@tonic-gate creat64(const char *path, mode_t mode) 6240Sstevel@tonic-gate { 6250Sstevel@tonic-gate extern int _creat64(const char *, mode_t); 6260Sstevel@tonic-gate int rv; 6270Sstevel@tonic-gate 6280Sstevel@tonic-gate PERFORM(_creat64(path, mode)) 6290Sstevel@tonic-gate } 6300Sstevel@tonic-gate #endif /* !_LP64 */ 6310Sstevel@tonic-gate 6320Sstevel@tonic-gate int 6330Sstevel@tonic-gate fcntl(int fildes, int cmd, ...) 6340Sstevel@tonic-gate { 6350Sstevel@tonic-gate extern int _fcntl(int, int, ...); 6360Sstevel@tonic-gate intptr_t arg; 6370Sstevel@tonic-gate int rv; 6380Sstevel@tonic-gate va_list ap; 6390Sstevel@tonic-gate 6400Sstevel@tonic-gate va_start(ap, cmd); 6410Sstevel@tonic-gate arg = va_arg(ap, intptr_t); 6420Sstevel@tonic-gate va_end(ap); 6430Sstevel@tonic-gate if (cmd != F_SETLKW) 6440Sstevel@tonic-gate return (_fcntl(fildes, cmd, arg)); 6450Sstevel@tonic-gate PERFORM(_fcntl(fildes, cmd, arg)) 6460Sstevel@tonic-gate } 6470Sstevel@tonic-gate 6480Sstevel@tonic-gate int 6490Sstevel@tonic-gate fsync(int fildes) 6500Sstevel@tonic-gate { 6510Sstevel@tonic-gate extern int _fsync(int); 6520Sstevel@tonic-gate int rv; 6530Sstevel@tonic-gate 6540Sstevel@tonic-gate PERFORM(_fsync(fildes)) 6550Sstevel@tonic-gate } 6560Sstevel@tonic-gate 6570Sstevel@tonic-gate int 6580Sstevel@tonic-gate lockf(int fildes, int function, off_t size) 6590Sstevel@tonic-gate { 6600Sstevel@tonic-gate extern int _lockf(int, int, off_t); 6610Sstevel@tonic-gate int rv; 6620Sstevel@tonic-gate 6630Sstevel@tonic-gate PERFORM(_lockf(fildes, function, size)) 6640Sstevel@tonic-gate } 6650Sstevel@tonic-gate 6660Sstevel@tonic-gate #if !defined(_LP64) 6670Sstevel@tonic-gate int 6680Sstevel@tonic-gate lockf64(int fildes, int function, off64_t size) 6690Sstevel@tonic-gate { 6700Sstevel@tonic-gate extern int _lockf64(int, int, off64_t); 6710Sstevel@tonic-gate int rv; 6720Sstevel@tonic-gate 6730Sstevel@tonic-gate PERFORM(_lockf64(fildes, function, size)) 6740Sstevel@tonic-gate } 6750Sstevel@tonic-gate #endif /* !_LP64 */ 6760Sstevel@tonic-gate 6770Sstevel@tonic-gate ssize_t 6780Sstevel@tonic-gate msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg) 6790Sstevel@tonic-gate { 6800Sstevel@tonic-gate extern ssize_t _msgrcv(int, void *, size_t, long, int); 6810Sstevel@tonic-gate ssize_t rv; 6820Sstevel@tonic-gate 6830Sstevel@tonic-gate PERFORM(_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg)) 6840Sstevel@tonic-gate } 6850Sstevel@tonic-gate 6860Sstevel@tonic-gate int 6870Sstevel@tonic-gate msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg) 6880Sstevel@tonic-gate { 6890Sstevel@tonic-gate extern int _msgsnd(int, const void *, size_t, int); 6900Sstevel@tonic-gate int rv; 6910Sstevel@tonic-gate 6920Sstevel@tonic-gate PERFORM(_msgsnd(msqid, msgp, msgsz, msgflg)) 6930Sstevel@tonic-gate } 6940Sstevel@tonic-gate 6950Sstevel@tonic-gate int 6960Sstevel@tonic-gate msync(caddr_t addr, size_t len, int flags) 6970Sstevel@tonic-gate { 6980Sstevel@tonic-gate extern int _msync(caddr_t, size_t, int); 6990Sstevel@tonic-gate int rv; 7000Sstevel@tonic-gate 7010Sstevel@tonic-gate PERFORM(_msync(addr, len, flags)) 7020Sstevel@tonic-gate } 7030Sstevel@tonic-gate 7040Sstevel@tonic-gate int 7050Sstevel@tonic-gate open(const char *path, int oflag, ...) 7060Sstevel@tonic-gate { 7070Sstevel@tonic-gate extern int _open(const char *, int, ...); 7080Sstevel@tonic-gate mode_t mode; 7090Sstevel@tonic-gate int rv; 7100Sstevel@tonic-gate va_list ap; 7110Sstevel@tonic-gate 7120Sstevel@tonic-gate va_start(ap, oflag); 7130Sstevel@tonic-gate mode = va_arg(ap, mode_t); 7140Sstevel@tonic-gate va_end(ap); 7150Sstevel@tonic-gate PERFORM(_open(path, oflag, mode)) 7160Sstevel@tonic-gate } 7170Sstevel@tonic-gate 7180Sstevel@tonic-gate #if !defined(_LP64) 7190Sstevel@tonic-gate int 7200Sstevel@tonic-gate open64(const char *path, int oflag, ...) 7210Sstevel@tonic-gate { 7220Sstevel@tonic-gate extern int _open64(const char *, int, ...); 7230Sstevel@tonic-gate mode_t mode; 7240Sstevel@tonic-gate int rv; 7250Sstevel@tonic-gate va_list ap; 7260Sstevel@tonic-gate 7270Sstevel@tonic-gate va_start(ap, oflag); 7280Sstevel@tonic-gate mode = va_arg(ap, mode_t); 7290Sstevel@tonic-gate va_end(ap); 7300Sstevel@tonic-gate PERFORM(_open64(path, oflag, mode)) 7310Sstevel@tonic-gate } 7320Sstevel@tonic-gate #endif /* !_LP64 */ 7330Sstevel@tonic-gate 7340Sstevel@tonic-gate int 7350Sstevel@tonic-gate pause(void) 7360Sstevel@tonic-gate { 7370Sstevel@tonic-gate extern int _pause(void); 7380Sstevel@tonic-gate int rv; 7390Sstevel@tonic-gate 7400Sstevel@tonic-gate PERFORM(_pause()) 7410Sstevel@tonic-gate } 7420Sstevel@tonic-gate 7430Sstevel@tonic-gate ssize_t 7440Sstevel@tonic-gate pread(int fildes, void *buf, size_t nbyte, off_t offset) 7450Sstevel@tonic-gate { 7460Sstevel@tonic-gate extern ssize_t _pread(int, void *, size_t, off_t); 7470Sstevel@tonic-gate ssize_t rv; 7480Sstevel@tonic-gate 7490Sstevel@tonic-gate PERFORM(_pread(fildes, buf, nbyte, offset)) 7500Sstevel@tonic-gate } 7510Sstevel@tonic-gate 7520Sstevel@tonic-gate #if !defined(_LP64) 7530Sstevel@tonic-gate ssize_t 7540Sstevel@tonic-gate pread64(int fildes, void *buf, size_t nbyte, off64_t offset) 7550Sstevel@tonic-gate { 7560Sstevel@tonic-gate extern ssize_t _pread64(int, void *, size_t, off64_t); 7570Sstevel@tonic-gate ssize_t rv; 7580Sstevel@tonic-gate 7590Sstevel@tonic-gate PERFORM(_pread64(fildes, buf, nbyte, offset)) 7600Sstevel@tonic-gate } 7610Sstevel@tonic-gate #endif /* !_LP64 */ 7620Sstevel@tonic-gate 7630Sstevel@tonic-gate ssize_t 7640Sstevel@tonic-gate pwrite(int fildes, const void *buf, size_t nbyte, off_t offset) 7650Sstevel@tonic-gate { 7660Sstevel@tonic-gate extern ssize_t _pwrite(int, const void *, size_t, off_t); 7670Sstevel@tonic-gate ssize_t rv; 7680Sstevel@tonic-gate 7690Sstevel@tonic-gate PERFORM(_pwrite(fildes, buf, nbyte, offset)) 7700Sstevel@tonic-gate } 7710Sstevel@tonic-gate 7720Sstevel@tonic-gate #if !defined(_LP64) 7730Sstevel@tonic-gate ssize_t 7740Sstevel@tonic-gate pwrite64(int fildes, const void *buf, size_t nbyte, off64_t offset) 7750Sstevel@tonic-gate { 7760Sstevel@tonic-gate extern ssize_t _pwrite64(int, const void *, size_t, off64_t); 7770Sstevel@tonic-gate ssize_t rv; 7780Sstevel@tonic-gate 7790Sstevel@tonic-gate PERFORM(_pwrite64(fildes, buf, nbyte, offset)) 7800Sstevel@tonic-gate } 7810Sstevel@tonic-gate #endif /* !_LP64 */ 7820Sstevel@tonic-gate 7830Sstevel@tonic-gate ssize_t 7840Sstevel@tonic-gate readv(int fildes, const struct iovec *iov, int iovcnt) 7850Sstevel@tonic-gate { 7860Sstevel@tonic-gate extern ssize_t _readv(int, const struct iovec *, int); 7870Sstevel@tonic-gate ssize_t rv; 7880Sstevel@tonic-gate 7890Sstevel@tonic-gate PERFORM(_readv(fildes, iov, iovcnt)) 7900Sstevel@tonic-gate } 7910Sstevel@tonic-gate 7920Sstevel@tonic-gate int 7930Sstevel@tonic-gate sigpause(int sig) 7940Sstevel@tonic-gate { 7950Sstevel@tonic-gate extern int _sigpause(int); 7960Sstevel@tonic-gate int rv; 7970Sstevel@tonic-gate 7980Sstevel@tonic-gate PERFORM(_sigpause(sig)) 7990Sstevel@tonic-gate } 8000Sstevel@tonic-gate 8010Sstevel@tonic-gate #pragma weak sigsuspend = _sigsuspend 8020Sstevel@tonic-gate int 8030Sstevel@tonic-gate _sigsuspend(const sigset_t *set) 8040Sstevel@tonic-gate { 8050Sstevel@tonic-gate extern int __sigsuspend(const sigset_t *); 8060Sstevel@tonic-gate int rv; 8070Sstevel@tonic-gate 8080Sstevel@tonic-gate PROLOGUE_MASK(set) 8090Sstevel@tonic-gate rv = __sigsuspend(set); 8100Sstevel@tonic-gate EPILOGUE_MASK 8110Sstevel@tonic-gate return (rv); 8120Sstevel@tonic-gate } 8130Sstevel@tonic-gate 8140Sstevel@tonic-gate int 8150Sstevel@tonic-gate _pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout, 8160Sstevel@tonic-gate const sigset_t *sigmask) 8170Sstevel@tonic-gate { 8180Sstevel@tonic-gate extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *, 8190Sstevel@tonic-gate const sigset_t *); 8200Sstevel@tonic-gate int rv; 8210Sstevel@tonic-gate 8220Sstevel@tonic-gate PROLOGUE_MASK(sigmask) 8230Sstevel@tonic-gate rv = __pollsys(fds, nfd, timeout, sigmask); 8240Sstevel@tonic-gate EPILOGUE_MASK 8250Sstevel@tonic-gate return (rv); 8260Sstevel@tonic-gate } 8270Sstevel@tonic-gate 8280Sstevel@tonic-gate int 8290Sstevel@tonic-gate __sigtimedwait(const sigset_t *set, siginfo_t *infop, 8300Sstevel@tonic-gate const timespec_t *timeout) 8310Sstevel@tonic-gate { 8320Sstevel@tonic-gate extern int ___sigtimedwait(const sigset_t *, siginfo_t *, 8330Sstevel@tonic-gate const timespec_t *); 8340Sstevel@tonic-gate siginfo_t info; 8350Sstevel@tonic-gate int sig; 8360Sstevel@tonic-gate 8370Sstevel@tonic-gate PROLOGUE 8380Sstevel@tonic-gate sig = ___sigtimedwait(set, &info, timeout); 8390Sstevel@tonic-gate if (sig == SIGCANCEL && 8400Sstevel@tonic-gate (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) { 8410Sstevel@tonic-gate do_sigcancel(); 8420Sstevel@tonic-gate errno = EINTR; 8430Sstevel@tonic-gate sig = -1; 8440Sstevel@tonic-gate } 8450Sstevel@tonic-gate EPILOGUE 8460Sstevel@tonic-gate if (sig != -1 && infop) 847*1111Sraf (void) _private_memcpy(infop, &info, sizeof (*infop)); 8480Sstevel@tonic-gate return (sig); 8490Sstevel@tonic-gate } 8500Sstevel@tonic-gate 8510Sstevel@tonic-gate #pragma weak sigwait = _sigwait 8520Sstevel@tonic-gate int 8530Sstevel@tonic-gate _sigwait(sigset_t *set) 8540Sstevel@tonic-gate { 8550Sstevel@tonic-gate return (__sigtimedwait(set, NULL, NULL)); 8560Sstevel@tonic-gate } 8570Sstevel@tonic-gate 8580Sstevel@tonic-gate int 8590Sstevel@tonic-gate tcdrain(int fildes) 8600Sstevel@tonic-gate { 8610Sstevel@tonic-gate extern int _tcdrain(int); 8620Sstevel@tonic-gate int rv; 8630Sstevel@tonic-gate 8640Sstevel@tonic-gate PERFORM(_tcdrain(fildes)) 8650Sstevel@tonic-gate } 8660Sstevel@tonic-gate 8670Sstevel@tonic-gate pid_t 8680Sstevel@tonic-gate wait(int *stat_loc) 8690Sstevel@tonic-gate { 8700Sstevel@tonic-gate extern pid_t _wait(int *); 8710Sstevel@tonic-gate pid_t rv; 8720Sstevel@tonic-gate 8730Sstevel@tonic-gate PERFORM(_wait(stat_loc)) 8740Sstevel@tonic-gate } 8750Sstevel@tonic-gate 8760Sstevel@tonic-gate pid_t 8770Sstevel@tonic-gate wait3(int *statusp, int options, struct rusage *rusage) 8780Sstevel@tonic-gate { 8790Sstevel@tonic-gate extern pid_t _wait3(int *, int, struct rusage *); 8800Sstevel@tonic-gate pid_t rv; 8810Sstevel@tonic-gate 8820Sstevel@tonic-gate PERFORM(_wait3(statusp, options, rusage)) 8830Sstevel@tonic-gate } 8840Sstevel@tonic-gate 8850Sstevel@tonic-gate int 8860Sstevel@tonic-gate waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options) 8870Sstevel@tonic-gate { 8880Sstevel@tonic-gate extern int _waitid(idtype_t, id_t, siginfo_t *, int); 8890Sstevel@tonic-gate int rv; 8900Sstevel@tonic-gate 8910Sstevel@tonic-gate PERFORM(_waitid(idtype, id, infop, options)) 8920Sstevel@tonic-gate } 8930Sstevel@tonic-gate 8940Sstevel@tonic-gate pid_t 8950Sstevel@tonic-gate waitpid(pid_t pid, int *stat_loc, int options) 8960Sstevel@tonic-gate { 8970Sstevel@tonic-gate extern pid_t _waitpid(pid_t, int *, int); 8980Sstevel@tonic-gate pid_t rv; 8990Sstevel@tonic-gate 9000Sstevel@tonic-gate PERFORM(_waitpid(pid, stat_loc, options)) 9010Sstevel@tonic-gate } 9020Sstevel@tonic-gate 9030Sstevel@tonic-gate ssize_t 9040Sstevel@tonic-gate writev(int fildes, const struct iovec *iov, int iovcnt) 9050Sstevel@tonic-gate { 9060Sstevel@tonic-gate extern ssize_t _writev(int, const struct iovec *, int); 9070Sstevel@tonic-gate ssize_t rv; 9080Sstevel@tonic-gate 9090Sstevel@tonic-gate PERFORM(_writev(fildes, iov, iovcnt)) 9100Sstevel@tonic-gate } 911