10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51885Sraf * Common Development and Distribution License (the "License"). 61885Sraf * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 211111Sraf 220Sstevel@tonic-gate /* 233512Sraf * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 280Sstevel@tonic-gate 290Sstevel@tonic-gate #include "lint.h" 300Sstevel@tonic-gate #include "thr_uberdata.h" 310Sstevel@tonic-gate #include <stdarg.h> 320Sstevel@tonic-gate #include <poll.h> 330Sstevel@tonic-gate #include <stropts.h> 340Sstevel@tonic-gate #include <dlfcn.h> 350Sstevel@tonic-gate #include <sys/uio.h> 360Sstevel@tonic-gate 370Sstevel@tonic-gate /* 380Sstevel@tonic-gate * fork_lock is special -- We can't use lmutex_lock() (and thereby enter 390Sstevel@tonic-gate * a critical region) because the second thread to reach this point would 400Sstevel@tonic-gate * become unstoppable and the first thread would hang waiting for the 410Sstevel@tonic-gate * second thread to stop itself. Therefore we don't use lmutex_lock() in 420Sstevel@tonic-gate * fork_lock_enter(), but we do defer signals (the other form of concurrency). 430Sstevel@tonic-gate * 440Sstevel@tonic-gate * fork_lock_enter() does triple-duty. Not only does it serialize 450Sstevel@tonic-gate * calls to fork() and forkall(), but it also serializes calls to 460Sstevel@tonic-gate * thr_suspend() (fork() and forkall() also suspend other threads), 470Sstevel@tonic-gate * and furthermore it serializes I18N calls to functions in other 480Sstevel@tonic-gate * dlopen()ed L10N objects that might be calling malloc()/free(). 490Sstevel@tonic-gate */ 500Sstevel@tonic-gate 510Sstevel@tonic-gate static void 520Sstevel@tonic-gate fork_lock_error(const char *who) 530Sstevel@tonic-gate { 540Sstevel@tonic-gate char msg[200]; 550Sstevel@tonic-gate 560Sstevel@tonic-gate (void) strlcpy(msg, "deadlock condition: ", sizeof (msg)); 570Sstevel@tonic-gate (void) strlcat(msg, who, sizeof (msg)); 580Sstevel@tonic-gate (void) strlcat(msg, "() called from a fork handler", sizeof (msg)); 590Sstevel@tonic-gate thread_error(msg); 600Sstevel@tonic-gate } 610Sstevel@tonic-gate 620Sstevel@tonic-gate int 630Sstevel@tonic-gate fork_lock_enter(const char *who) 640Sstevel@tonic-gate { 650Sstevel@tonic-gate ulwp_t *self = curthread; 660Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 670Sstevel@tonic-gate int error = 0; 680Sstevel@tonic-gate 690Sstevel@tonic-gate ASSERT(self->ul_critical == 0); 700Sstevel@tonic-gate sigoff(self); 710Sstevel@tonic-gate (void) _private_mutex_lock(&udp->fork_lock); 723512Sraf if (udp->fork_count) { 733512Sraf ASSERT(udp->fork_owner == self); 743512Sraf /* 753512Sraf * This is a simple recursive lock except that we 763512Sraf * inform the caller if we have been called from 773512Sraf * a fork handler and let it deal with that fact. 783512Sraf */ 793512Sraf if (self->ul_fork) { 800Sstevel@tonic-gate /* 813512Sraf * We have been called from a fork handler. 820Sstevel@tonic-gate */ 833512Sraf if (who != NULL && 843512Sraf udp->uberflags.uf_thread_error_detection) 853512Sraf fork_lock_error(who); 863512Sraf error = EDEADLK; 870Sstevel@tonic-gate } 880Sstevel@tonic-gate } 890Sstevel@tonic-gate udp->fork_owner = self; 900Sstevel@tonic-gate udp->fork_count++; 910Sstevel@tonic-gate return (error); 920Sstevel@tonic-gate } 930Sstevel@tonic-gate 940Sstevel@tonic-gate void 950Sstevel@tonic-gate fork_lock_exit(void) 960Sstevel@tonic-gate { 970Sstevel@tonic-gate ulwp_t *self = curthread; 980Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 990Sstevel@tonic-gate 1000Sstevel@tonic-gate ASSERT(self->ul_critical == 0); 1010Sstevel@tonic-gate ASSERT(udp->fork_count != 0 && udp->fork_owner == self); 1023512Sraf if (--udp->fork_count == 0) 1030Sstevel@tonic-gate udp->fork_owner = NULL; 1040Sstevel@tonic-gate (void) _private_mutex_unlock(&udp->fork_lock); 1050Sstevel@tonic-gate sigon(self); 1060Sstevel@tonic-gate } 1070Sstevel@tonic-gate 108*4292Sab196087 /* 109*4292Sab196087 * Note: Instead of making this function static, we reduce it to local 110*4292Sab196087 * scope in the mapfile. That allows the linker to prevent it from 111*4292Sab196087 * appearing in the .SUNW_dynsymsort section. 112*4292Sab196087 */ 1133235Sraf #pragma weak forkx = _private_forkx 1143235Sraf #pragma weak _forkx = _private_forkx 115*4292Sab196087 pid_t 1163235Sraf _private_forkx(int flags) 1170Sstevel@tonic-gate { 1180Sstevel@tonic-gate ulwp_t *self = curthread; 1190Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 1200Sstevel@tonic-gate pid_t pid; 1210Sstevel@tonic-gate int error; 1220Sstevel@tonic-gate 1230Sstevel@tonic-gate if (self->ul_vfork) { 1240Sstevel@tonic-gate /* 1250Sstevel@tonic-gate * We are a child of vfork(); omit all of the fork 1260Sstevel@tonic-gate * logic and go straight to the system call trap. 1270Sstevel@tonic-gate * A vfork() child of a multithreaded parent 1280Sstevel@tonic-gate * must never call fork(). 1290Sstevel@tonic-gate */ 1300Sstevel@tonic-gate if (udp->uberflags.uf_mt) { 1310Sstevel@tonic-gate errno = ENOTSUP; 1320Sstevel@tonic-gate return (-1); 1330Sstevel@tonic-gate } 1343235Sraf pid = __forkx(flags); 1350Sstevel@tonic-gate if (pid == 0) { /* child */ 1360Sstevel@tonic-gate udp->pid = _private_getpid(); 1370Sstevel@tonic-gate self->ul_vfork = 0; 1380Sstevel@tonic-gate } 1390Sstevel@tonic-gate return (pid); 1400Sstevel@tonic-gate } 1410Sstevel@tonic-gate 1420Sstevel@tonic-gate if ((error = fork_lock_enter("fork")) != 0) { 1430Sstevel@tonic-gate /* 1440Sstevel@tonic-gate * Cannot call fork() from a fork handler. 1450Sstevel@tonic-gate */ 1460Sstevel@tonic-gate fork_lock_exit(); 1470Sstevel@tonic-gate errno = error; 1480Sstevel@tonic-gate return (-1); 1490Sstevel@tonic-gate } 1500Sstevel@tonic-gate self->ul_fork = 1; 1510Sstevel@tonic-gate 1520Sstevel@tonic-gate /* 1530Sstevel@tonic-gate * The functions registered by pthread_atfork() are defined by 1540Sstevel@tonic-gate * the application and its libraries and we must not hold any 1550Sstevel@tonic-gate * internal libc locks while invoking them. The fork_lock_enter() 1560Sstevel@tonic-gate * function serializes fork(), thr_suspend(), pthread_atfork() and 1570Sstevel@tonic-gate * dlclose() (which destroys whatever pthread_atfork() functions 1580Sstevel@tonic-gate * the library may have set up). If one of these pthread_atfork() 1590Sstevel@tonic-gate * functions attempts to fork or suspend another thread or call 1600Sstevel@tonic-gate * pthread_atfork() or dlclose a library, it will detect a deadlock 1610Sstevel@tonic-gate * in fork_lock_enter(). Otherwise, the pthread_atfork() functions 1620Sstevel@tonic-gate * are free to do anything they please (except they will not 1630Sstevel@tonic-gate * receive any signals). 1640Sstevel@tonic-gate */ 1650Sstevel@tonic-gate _prefork_handler(); 1660Sstevel@tonic-gate 1670Sstevel@tonic-gate /* 1680Sstevel@tonic-gate * Block all signals. 1690Sstevel@tonic-gate * Just deferring them via sigon() is not enough. 1700Sstevel@tonic-gate * We have to avoid taking a deferred signal in the child 1713235Sraf * that was actually sent to the parent before __forkx(). 1720Sstevel@tonic-gate */ 1730Sstevel@tonic-gate block_all_signals(self); 1740Sstevel@tonic-gate 1750Sstevel@tonic-gate /* 1760Sstevel@tonic-gate * This suspends all threads but this one, leaving them 1770Sstevel@tonic-gate * suspended outside of any critical regions in the library. 1780Sstevel@tonic-gate * Thus, we are assured that no library locks are held 1793235Sraf * while we invoke fork() from the current thread. 1800Sstevel@tonic-gate */ 1810Sstevel@tonic-gate suspend_fork(); 1820Sstevel@tonic-gate 1833235Sraf pid = __forkx(flags); 1840Sstevel@tonic-gate 1850Sstevel@tonic-gate if (pid == 0) { /* child */ 1860Sstevel@tonic-gate /* 1870Sstevel@tonic-gate * Clear our schedctl pointer. 1880Sstevel@tonic-gate * Discard any deferred signal that was sent to the parent. 1893235Sraf * Because we blocked all signals before __forkx(), a 1900Sstevel@tonic-gate * deferred signal cannot have been taken by the child. 1910Sstevel@tonic-gate */ 1920Sstevel@tonic-gate self->ul_schedctl_called = NULL; 1930Sstevel@tonic-gate self->ul_schedctl = NULL; 1940Sstevel@tonic-gate self->ul_cursig = 0; 1950Sstevel@tonic-gate self->ul_siginfo.si_signo = 0; 1960Sstevel@tonic-gate udp->pid = _private_getpid(); 1970Sstevel@tonic-gate /* reset the library's data structures to reflect one thread */ 1982248Sraf postfork1_child(); 1990Sstevel@tonic-gate restore_signals(self); 2000Sstevel@tonic-gate _postfork_child_handler(); 2010Sstevel@tonic-gate } else { 2023235Sraf /* restart all threads that were suspended for fork() */ 2030Sstevel@tonic-gate continue_fork(0); 2040Sstevel@tonic-gate restore_signals(self); 2050Sstevel@tonic-gate _postfork_parent_handler(); 2060Sstevel@tonic-gate } 2070Sstevel@tonic-gate 2080Sstevel@tonic-gate self->ul_fork = 0; 2090Sstevel@tonic-gate fork_lock_exit(); 2100Sstevel@tonic-gate 2110Sstevel@tonic-gate return (pid); 2120Sstevel@tonic-gate } 2130Sstevel@tonic-gate 2140Sstevel@tonic-gate /* 2153235Sraf * fork() is fork1() for both Posix threads and Solaris threads. 2163235Sraf * The forkall() interface exists for applications that require 2173235Sraf * the semantics of replicating all threads. 2180Sstevel@tonic-gate */ 2193235Sraf #pragma weak fork1 = _fork 2203235Sraf #pragma weak _fork1 = _fork 2213235Sraf #pragma weak fork = _fork 2220Sstevel@tonic-gate pid_t 2233235Sraf _fork(void) 2243235Sraf { 2253235Sraf return (_private_forkx(0)); 2263235Sraf } 2273235Sraf 2283235Sraf /* 2293235Sraf * Much of the logic here is the same as in forkx(). 2303235Sraf * See the comments in forkx(), above. 2313235Sraf */ 2323235Sraf #pragma weak forkallx = _private_forkallx 2333235Sraf #pragma weak _forkallx = _private_forkallx 234*4292Sab196087 pid_t 2353235Sraf _private_forkallx(int flags) 2360Sstevel@tonic-gate { 2370Sstevel@tonic-gate ulwp_t *self = curthread; 2380Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 2390Sstevel@tonic-gate pid_t pid; 2400Sstevel@tonic-gate int error; 2410Sstevel@tonic-gate 2420Sstevel@tonic-gate if (self->ul_vfork) { 2430Sstevel@tonic-gate if (udp->uberflags.uf_mt) { 2440Sstevel@tonic-gate errno = ENOTSUP; 2450Sstevel@tonic-gate return (-1); 2460Sstevel@tonic-gate } 2473235Sraf pid = __forkallx(flags); 2480Sstevel@tonic-gate if (pid == 0) { /* child */ 2490Sstevel@tonic-gate udp->pid = _private_getpid(); 2500Sstevel@tonic-gate self->ul_vfork = 0; 2510Sstevel@tonic-gate } 2520Sstevel@tonic-gate return (pid); 2530Sstevel@tonic-gate } 2540Sstevel@tonic-gate 2550Sstevel@tonic-gate if ((error = fork_lock_enter("forkall")) != 0) { 2560Sstevel@tonic-gate fork_lock_exit(); 2570Sstevel@tonic-gate errno = error; 2580Sstevel@tonic-gate return (-1); 2590Sstevel@tonic-gate } 2600Sstevel@tonic-gate self->ul_fork = 1; 2610Sstevel@tonic-gate block_all_signals(self); 2620Sstevel@tonic-gate suspend_fork(); 2630Sstevel@tonic-gate 2643235Sraf pid = __forkallx(flags); 2650Sstevel@tonic-gate 2660Sstevel@tonic-gate if (pid == 0) { 2670Sstevel@tonic-gate self->ul_schedctl_called = NULL; 2680Sstevel@tonic-gate self->ul_schedctl = NULL; 2690Sstevel@tonic-gate self->ul_cursig = 0; 2700Sstevel@tonic-gate self->ul_siginfo.si_signo = 0; 2710Sstevel@tonic-gate udp->pid = _private_getpid(); 2720Sstevel@tonic-gate continue_fork(1); 2730Sstevel@tonic-gate } else { 2740Sstevel@tonic-gate continue_fork(0); 2750Sstevel@tonic-gate } 2760Sstevel@tonic-gate restore_signals(self); 2770Sstevel@tonic-gate self->ul_fork = 0; 2780Sstevel@tonic-gate fork_lock_exit(); 2790Sstevel@tonic-gate 2800Sstevel@tonic-gate return (pid); 2810Sstevel@tonic-gate } 2820Sstevel@tonic-gate 2833235Sraf #pragma weak forkall = _forkall 2843235Sraf pid_t 2853235Sraf _forkall(void) 2863235Sraf { 2873235Sraf return (_private_forkallx(0)); 2883235Sraf } 2893235Sraf 2900Sstevel@tonic-gate /* 2910Sstevel@tonic-gate * Hacks for system calls to provide cancellation 2920Sstevel@tonic-gate * and improve java garbage collection. 2930Sstevel@tonic-gate */ 2940Sstevel@tonic-gate #define PROLOGUE \ 2950Sstevel@tonic-gate { \ 2960Sstevel@tonic-gate ulwp_t *self = curthread; \ 2970Sstevel@tonic-gate int nocancel = (self->ul_vfork | self->ul_nocancel); \ 2980Sstevel@tonic-gate if (nocancel == 0) { \ 2990Sstevel@tonic-gate self->ul_save_async = self->ul_cancel_async; \ 3000Sstevel@tonic-gate if (!self->ul_cancel_disabled) { \ 3010Sstevel@tonic-gate self->ul_cancel_async = 1; \ 3020Sstevel@tonic-gate if (self->ul_cancel_pending) \ 3030Sstevel@tonic-gate _pthread_exit(PTHREAD_CANCELED); \ 3040Sstevel@tonic-gate } \ 3050Sstevel@tonic-gate self->ul_sp = stkptr(); \ 3060Sstevel@tonic-gate } 3070Sstevel@tonic-gate 3080Sstevel@tonic-gate #define EPILOGUE \ 3090Sstevel@tonic-gate if (nocancel == 0) { \ 3100Sstevel@tonic-gate self->ul_sp = 0; \ 3110Sstevel@tonic-gate self->ul_cancel_async = self->ul_save_async; \ 3120Sstevel@tonic-gate } \ 3130Sstevel@tonic-gate } 3140Sstevel@tonic-gate 3150Sstevel@tonic-gate /* 3160Sstevel@tonic-gate * Perform the body of the action required by most of the cancelable 3170Sstevel@tonic-gate * function calls. The return(function_call) part is to allow the 3180Sstevel@tonic-gate * compiler to make the call be executed with tail recursion, which 3190Sstevel@tonic-gate * saves a register window on sparc and slightly (not much) improves 3200Sstevel@tonic-gate * the code for x86/x64 compilations. 3210Sstevel@tonic-gate */ 3220Sstevel@tonic-gate #define PERFORM(function_call) \ 3230Sstevel@tonic-gate PROLOGUE \ 3240Sstevel@tonic-gate if (nocancel) \ 3250Sstevel@tonic-gate return (function_call); \ 3260Sstevel@tonic-gate rv = function_call; \ 3270Sstevel@tonic-gate EPILOGUE \ 3280Sstevel@tonic-gate return (rv); 3290Sstevel@tonic-gate 3300Sstevel@tonic-gate /* 3310Sstevel@tonic-gate * Specialized prologue for sigsuspend() and pollsys(). 3320Sstevel@tonic-gate * These system calls pass a signal mask to the kernel. 3330Sstevel@tonic-gate * The kernel replaces the thread's signal mask with the 3340Sstevel@tonic-gate * temporary mask before the thread goes to sleep. If 3350Sstevel@tonic-gate * a signal is received, the signal handler will execute 3360Sstevel@tonic-gate * with the temporary mask, as modified by the sigaction 3370Sstevel@tonic-gate * for the particular signal. 3380Sstevel@tonic-gate * 3390Sstevel@tonic-gate * We block all signals until we reach the kernel with the 3400Sstevel@tonic-gate * temporary mask. This eliminates race conditions with 3410Sstevel@tonic-gate * setting the signal mask while signals are being posted. 3420Sstevel@tonic-gate */ 3430Sstevel@tonic-gate #define PROLOGUE_MASK(sigmask) \ 3440Sstevel@tonic-gate { \ 3450Sstevel@tonic-gate ulwp_t *self = curthread; \ 3460Sstevel@tonic-gate int nocancel = (self->ul_vfork | self->ul_nocancel); \ 3470Sstevel@tonic-gate if (!self->ul_vfork) { \ 3480Sstevel@tonic-gate if (sigmask) { \ 3490Sstevel@tonic-gate block_all_signals(self); \ 3501111Sraf self->ul_tmpmask.__sigbits[0] = sigmask->__sigbits[0]; \ 3511111Sraf self->ul_tmpmask.__sigbits[1] = sigmask->__sigbits[1]; \ 3520Sstevel@tonic-gate delete_reserved_signals(&self->ul_tmpmask); \ 3530Sstevel@tonic-gate self->ul_sigsuspend = 1; \ 3540Sstevel@tonic-gate } \ 3550Sstevel@tonic-gate if (nocancel == 0) { \ 3560Sstevel@tonic-gate self->ul_save_async = self->ul_cancel_async; \ 3570Sstevel@tonic-gate if (!self->ul_cancel_disabled) { \ 3580Sstevel@tonic-gate self->ul_cancel_async = 1; \ 3590Sstevel@tonic-gate if (self->ul_cancel_pending) { \ 3600Sstevel@tonic-gate if (self->ul_sigsuspend) { \ 3610Sstevel@tonic-gate self->ul_sigsuspend = 0;\ 3620Sstevel@tonic-gate restore_signals(self); \ 3630Sstevel@tonic-gate } \ 3640Sstevel@tonic-gate _pthread_exit(PTHREAD_CANCELED);\ 3650Sstevel@tonic-gate } \ 3660Sstevel@tonic-gate } \ 3670Sstevel@tonic-gate self->ul_sp = stkptr(); \ 3680Sstevel@tonic-gate } \ 3690Sstevel@tonic-gate } 3700Sstevel@tonic-gate 3710Sstevel@tonic-gate /* 3720Sstevel@tonic-gate * If a signal is taken, we return from the system call wrapper with 3730Sstevel@tonic-gate * our original signal mask restored (see code in call_user_handler()). 3740Sstevel@tonic-gate * If not (self->ul_sigsuspend is still non-zero), we must restore our 3750Sstevel@tonic-gate * original signal mask ourself. 3760Sstevel@tonic-gate */ 3770Sstevel@tonic-gate #define EPILOGUE_MASK \ 3780Sstevel@tonic-gate if (nocancel == 0) { \ 3790Sstevel@tonic-gate self->ul_sp = 0; \ 3800Sstevel@tonic-gate self->ul_cancel_async = self->ul_save_async; \ 3810Sstevel@tonic-gate } \ 3820Sstevel@tonic-gate if (self->ul_sigsuspend) { \ 3830Sstevel@tonic-gate self->ul_sigsuspend = 0; \ 3840Sstevel@tonic-gate restore_signals(self); \ 3850Sstevel@tonic-gate } \ 3860Sstevel@tonic-gate } 3870Sstevel@tonic-gate 3880Sstevel@tonic-gate /* 3892248Sraf * Cancellation prologue and epilogue functions, 3902248Sraf * for cancellation points too complex to include here. 3911885Sraf */ 3921885Sraf void 3931885Sraf _cancel_prologue(void) 3941885Sraf { 3951885Sraf ulwp_t *self = curthread; 3961885Sraf 3971885Sraf self->ul_cancel_prologue = (self->ul_vfork | self->ul_nocancel); 3981885Sraf if (self->ul_cancel_prologue == 0) { 3991885Sraf self->ul_save_async = self->ul_cancel_async; 4001885Sraf if (!self->ul_cancel_disabled) { 4011885Sraf self->ul_cancel_async = 1; 4021885Sraf if (self->ul_cancel_pending) 4031885Sraf _pthread_exit(PTHREAD_CANCELED); 4041885Sraf } 4051885Sraf self->ul_sp = stkptr(); 4061885Sraf } 4071885Sraf } 4081885Sraf 4091885Sraf void 4101885Sraf _cancel_epilogue(void) 4111885Sraf { 4121885Sraf ulwp_t *self = curthread; 4131885Sraf 4141885Sraf if (self->ul_cancel_prologue == 0) { 4151885Sraf self->ul_sp = 0; 4161885Sraf self->ul_cancel_async = self->ul_save_async; 4171885Sraf } 4181885Sraf } 4191885Sraf 4201885Sraf /* 4210Sstevel@tonic-gate * Called from _thrp_join() (thr_join() is a cancellation point) 4220Sstevel@tonic-gate */ 4230Sstevel@tonic-gate int 4240Sstevel@tonic-gate lwp_wait(thread_t tid, thread_t *found) 4250Sstevel@tonic-gate { 4260Sstevel@tonic-gate int error; 4270Sstevel@tonic-gate 4280Sstevel@tonic-gate PROLOGUE 4290Sstevel@tonic-gate while ((error = __lwp_wait(tid, found)) == EINTR) 4300Sstevel@tonic-gate ; 4310Sstevel@tonic-gate EPILOGUE 4320Sstevel@tonic-gate return (error); 4330Sstevel@tonic-gate } 4340Sstevel@tonic-gate 4350Sstevel@tonic-gate ssize_t 4360Sstevel@tonic-gate read(int fd, void *buf, size_t size) 4370Sstevel@tonic-gate { 4380Sstevel@tonic-gate extern ssize_t _read(int, void *, size_t); 4390Sstevel@tonic-gate ssize_t rv; 4400Sstevel@tonic-gate 4410Sstevel@tonic-gate PERFORM(_read(fd, buf, size)) 4420Sstevel@tonic-gate } 4430Sstevel@tonic-gate 4440Sstevel@tonic-gate ssize_t 4450Sstevel@tonic-gate write(int fd, const void *buf, size_t size) 4460Sstevel@tonic-gate { 4470Sstevel@tonic-gate extern ssize_t _write(int, const void *, size_t); 4480Sstevel@tonic-gate ssize_t rv; 4490Sstevel@tonic-gate 4500Sstevel@tonic-gate PERFORM(_write(fd, buf, size)) 4510Sstevel@tonic-gate } 4520Sstevel@tonic-gate 4530Sstevel@tonic-gate int 4540Sstevel@tonic-gate getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr, 4550Sstevel@tonic-gate int *flagsp) 4560Sstevel@tonic-gate { 4570Sstevel@tonic-gate extern int _getmsg(int, struct strbuf *, struct strbuf *, int *); 4580Sstevel@tonic-gate int rv; 4590Sstevel@tonic-gate 4600Sstevel@tonic-gate PERFORM(_getmsg(fd, ctlptr, dataptr, flagsp)) 4610Sstevel@tonic-gate } 4620Sstevel@tonic-gate 4630Sstevel@tonic-gate int 4640Sstevel@tonic-gate getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr, 4650Sstevel@tonic-gate int *bandp, int *flagsp) 4660Sstevel@tonic-gate { 4670Sstevel@tonic-gate extern int _getpmsg(int, struct strbuf *, struct strbuf *, 4680Sstevel@tonic-gate int *, int *); 4690Sstevel@tonic-gate int rv; 4700Sstevel@tonic-gate 4710Sstevel@tonic-gate PERFORM(_getpmsg(fd, ctlptr, dataptr, bandp, flagsp)) 4720Sstevel@tonic-gate } 4730Sstevel@tonic-gate 4740Sstevel@tonic-gate int 4750Sstevel@tonic-gate putmsg(int fd, const struct strbuf *ctlptr, 4760Sstevel@tonic-gate const struct strbuf *dataptr, int flags) 4770Sstevel@tonic-gate { 4780Sstevel@tonic-gate extern int _putmsg(int, const struct strbuf *, 4790Sstevel@tonic-gate const struct strbuf *, int); 4800Sstevel@tonic-gate int rv; 4810Sstevel@tonic-gate 4820Sstevel@tonic-gate PERFORM(_putmsg(fd, ctlptr, dataptr, flags)) 4830Sstevel@tonic-gate } 4840Sstevel@tonic-gate 4850Sstevel@tonic-gate int 4860Sstevel@tonic-gate __xpg4_putmsg(int fd, const struct strbuf *ctlptr, 4870Sstevel@tonic-gate const struct strbuf *dataptr, int flags) 4880Sstevel@tonic-gate { 4890Sstevel@tonic-gate extern int _putmsg(int, const struct strbuf *, 4900Sstevel@tonic-gate const struct strbuf *, int); 4910Sstevel@tonic-gate int rv; 4920Sstevel@tonic-gate 4930Sstevel@tonic-gate PERFORM(_putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4)) 4940Sstevel@tonic-gate } 4950Sstevel@tonic-gate 4960Sstevel@tonic-gate int 4970Sstevel@tonic-gate putpmsg(int fd, const struct strbuf *ctlptr, 4980Sstevel@tonic-gate const struct strbuf *dataptr, int band, int flags) 4990Sstevel@tonic-gate { 5000Sstevel@tonic-gate extern int _putpmsg(int, const struct strbuf *, 5010Sstevel@tonic-gate const struct strbuf *, int, int); 5020Sstevel@tonic-gate int rv; 5030Sstevel@tonic-gate 5040Sstevel@tonic-gate PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags)) 5050Sstevel@tonic-gate } 5060Sstevel@tonic-gate 5070Sstevel@tonic-gate int 5080Sstevel@tonic-gate __xpg4_putpmsg(int fd, const struct strbuf *ctlptr, 5090Sstevel@tonic-gate const struct strbuf *dataptr, int band, int flags) 5100Sstevel@tonic-gate { 5110Sstevel@tonic-gate extern int _putpmsg(int, const struct strbuf *, 5120Sstevel@tonic-gate const struct strbuf *, int, int); 5130Sstevel@tonic-gate int rv; 5140Sstevel@tonic-gate 5150Sstevel@tonic-gate PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4)) 5160Sstevel@tonic-gate } 5170Sstevel@tonic-gate 5182248Sraf #pragma weak nanosleep = _nanosleep 5190Sstevel@tonic-gate int 5202248Sraf _nanosleep(const timespec_t *rqtp, timespec_t *rmtp) 5210Sstevel@tonic-gate { 5220Sstevel@tonic-gate int error; 5230Sstevel@tonic-gate 5240Sstevel@tonic-gate PROLOGUE 5252248Sraf error = __nanosleep(rqtp, rmtp); 5260Sstevel@tonic-gate EPILOGUE 5270Sstevel@tonic-gate if (error) { 5280Sstevel@tonic-gate errno = error; 5290Sstevel@tonic-gate return (-1); 5300Sstevel@tonic-gate } 5310Sstevel@tonic-gate return (0); 5320Sstevel@tonic-gate } 5330Sstevel@tonic-gate 5342248Sraf #pragma weak clock_nanosleep = _clock_nanosleep 5350Sstevel@tonic-gate int 5362248Sraf _clock_nanosleep(clockid_t clock_id, int flags, 5370Sstevel@tonic-gate const timespec_t *rqtp, timespec_t *rmtp) 5380Sstevel@tonic-gate { 5390Sstevel@tonic-gate timespec_t reltime; 5400Sstevel@tonic-gate hrtime_t start; 5410Sstevel@tonic-gate hrtime_t rqlapse; 5420Sstevel@tonic-gate hrtime_t lapse; 5430Sstevel@tonic-gate int error; 5440Sstevel@tonic-gate 5450Sstevel@tonic-gate switch (clock_id) { 5460Sstevel@tonic-gate case CLOCK_VIRTUAL: 5470Sstevel@tonic-gate case CLOCK_PROCESS_CPUTIME_ID: 5480Sstevel@tonic-gate case CLOCK_THREAD_CPUTIME_ID: 5490Sstevel@tonic-gate return (ENOTSUP); 5500Sstevel@tonic-gate case CLOCK_REALTIME: 5510Sstevel@tonic-gate case CLOCK_HIGHRES: 5520Sstevel@tonic-gate break; 5530Sstevel@tonic-gate default: 5540Sstevel@tonic-gate return (EINVAL); 5550Sstevel@tonic-gate } 5560Sstevel@tonic-gate if (flags & TIMER_ABSTIME) { 5570Sstevel@tonic-gate abstime_to_reltime(clock_id, rqtp, &reltime); 5580Sstevel@tonic-gate rmtp = NULL; 5590Sstevel@tonic-gate } else { 5600Sstevel@tonic-gate reltime = *rqtp; 5610Sstevel@tonic-gate if (clock_id == CLOCK_HIGHRES) 5620Sstevel@tonic-gate start = gethrtime(); 5630Sstevel@tonic-gate } 5640Sstevel@tonic-gate restart: 5650Sstevel@tonic-gate PROLOGUE 5662248Sraf error = __nanosleep(&reltime, rmtp); 5670Sstevel@tonic-gate EPILOGUE 5680Sstevel@tonic-gate if (error == 0 && clock_id == CLOCK_HIGHRES) { 5690Sstevel@tonic-gate /* 5700Sstevel@tonic-gate * Don't return yet if we didn't really get a timeout. 5710Sstevel@tonic-gate * This can happen if we return because someone resets 5720Sstevel@tonic-gate * the system clock. 5730Sstevel@tonic-gate */ 5740Sstevel@tonic-gate if (flags & TIMER_ABSTIME) { 5750Sstevel@tonic-gate if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC + 5760Sstevel@tonic-gate rqtp->tv_nsec > gethrtime()) { 5770Sstevel@tonic-gate abstime_to_reltime(clock_id, rqtp, &reltime); 5780Sstevel@tonic-gate goto restart; 5790Sstevel@tonic-gate } 5800Sstevel@tonic-gate } else { 5810Sstevel@tonic-gate rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC + 5820Sstevel@tonic-gate rqtp->tv_nsec; 5830Sstevel@tonic-gate lapse = gethrtime() - start; 5840Sstevel@tonic-gate if (rqlapse > lapse) { 5850Sstevel@tonic-gate hrt2ts(rqlapse - lapse, &reltime); 5860Sstevel@tonic-gate goto restart; 5870Sstevel@tonic-gate } 5880Sstevel@tonic-gate } 5890Sstevel@tonic-gate } 5900Sstevel@tonic-gate if (error == 0 && clock_id == CLOCK_REALTIME && 5910Sstevel@tonic-gate (flags & TIMER_ABSTIME)) { 5920Sstevel@tonic-gate /* 5930Sstevel@tonic-gate * Don't return yet just because someone reset the 5940Sstevel@tonic-gate * system clock. Recompute the new relative time 5950Sstevel@tonic-gate * and reissue the nanosleep() call if necessary. 5960Sstevel@tonic-gate * 5970Sstevel@tonic-gate * Resetting the system clock causes all sorts of 5980Sstevel@tonic-gate * problems and the SUSV3 standards body should 5990Sstevel@tonic-gate * have made the behavior of clock_nanosleep() be 6000Sstevel@tonic-gate * implementation-defined in such a case rather than 6010Sstevel@tonic-gate * being specific about honoring the new system time. 6020Sstevel@tonic-gate * Standards bodies are filled with fools and idiots. 6030Sstevel@tonic-gate */ 6040Sstevel@tonic-gate abstime_to_reltime(clock_id, rqtp, &reltime); 6050Sstevel@tonic-gate if (reltime.tv_sec != 0 || reltime.tv_nsec != 0) 6060Sstevel@tonic-gate goto restart; 6070Sstevel@tonic-gate } 6080Sstevel@tonic-gate return (error); 6090Sstevel@tonic-gate } 6100Sstevel@tonic-gate 6110Sstevel@tonic-gate #pragma weak sleep = _sleep 6120Sstevel@tonic-gate unsigned int 6130Sstevel@tonic-gate _sleep(unsigned int sec) 6140Sstevel@tonic-gate { 6150Sstevel@tonic-gate unsigned int rem = 0; 6160Sstevel@tonic-gate int error; 6170Sstevel@tonic-gate timespec_t ts; 6180Sstevel@tonic-gate timespec_t tsr; 6190Sstevel@tonic-gate 6200Sstevel@tonic-gate ts.tv_sec = (time_t)sec; 6210Sstevel@tonic-gate ts.tv_nsec = 0; 6220Sstevel@tonic-gate PROLOGUE 6232248Sraf error = __nanosleep(&ts, &tsr); 6240Sstevel@tonic-gate EPILOGUE 6250Sstevel@tonic-gate if (error == EINTR) { 6260Sstevel@tonic-gate rem = (unsigned int)tsr.tv_sec; 6270Sstevel@tonic-gate if (tsr.tv_nsec >= NANOSEC / 2) 6280Sstevel@tonic-gate rem++; 6290Sstevel@tonic-gate } 6300Sstevel@tonic-gate return (rem); 6310Sstevel@tonic-gate } 6320Sstevel@tonic-gate 6330Sstevel@tonic-gate #pragma weak usleep = _usleep 6340Sstevel@tonic-gate int 6350Sstevel@tonic-gate _usleep(useconds_t usec) 6360Sstevel@tonic-gate { 6370Sstevel@tonic-gate timespec_t ts; 6380Sstevel@tonic-gate 6390Sstevel@tonic-gate ts.tv_sec = usec / MICROSEC; 6400Sstevel@tonic-gate ts.tv_nsec = (long)(usec % MICROSEC) * 1000; 6410Sstevel@tonic-gate PROLOGUE 6422248Sraf (void) __nanosleep(&ts, NULL); 6430Sstevel@tonic-gate EPILOGUE 6440Sstevel@tonic-gate return (0); 6450Sstevel@tonic-gate } 6460Sstevel@tonic-gate 6470Sstevel@tonic-gate int 6480Sstevel@tonic-gate close(int fildes) 6490Sstevel@tonic-gate { 6502248Sraf extern void _aio_close(int); 6510Sstevel@tonic-gate extern int _close(int); 6520Sstevel@tonic-gate int rv; 6530Sstevel@tonic-gate 6542248Sraf _aio_close(fildes); 6550Sstevel@tonic-gate PERFORM(_close(fildes)) 6560Sstevel@tonic-gate } 6570Sstevel@tonic-gate 6580Sstevel@tonic-gate int 6590Sstevel@tonic-gate creat(const char *path, mode_t mode) 6600Sstevel@tonic-gate { 6610Sstevel@tonic-gate extern int _creat(const char *, mode_t); 6620Sstevel@tonic-gate int rv; 6630Sstevel@tonic-gate 6640Sstevel@tonic-gate PERFORM(_creat(path, mode)) 6650Sstevel@tonic-gate } 6660Sstevel@tonic-gate 6670Sstevel@tonic-gate #if !defined(_LP64) 6680Sstevel@tonic-gate int 6690Sstevel@tonic-gate creat64(const char *path, mode_t mode) 6700Sstevel@tonic-gate { 6710Sstevel@tonic-gate extern int _creat64(const char *, mode_t); 6720Sstevel@tonic-gate int rv; 6730Sstevel@tonic-gate 6740Sstevel@tonic-gate PERFORM(_creat64(path, mode)) 6750Sstevel@tonic-gate } 6760Sstevel@tonic-gate #endif /* !_LP64 */ 6770Sstevel@tonic-gate 6780Sstevel@tonic-gate int 6790Sstevel@tonic-gate fcntl(int fildes, int cmd, ...) 6800Sstevel@tonic-gate { 6810Sstevel@tonic-gate extern int _fcntl(int, int, ...); 6820Sstevel@tonic-gate intptr_t arg; 6830Sstevel@tonic-gate int rv; 6840Sstevel@tonic-gate va_list ap; 6850Sstevel@tonic-gate 6860Sstevel@tonic-gate va_start(ap, cmd); 6870Sstevel@tonic-gate arg = va_arg(ap, intptr_t); 6880Sstevel@tonic-gate va_end(ap); 6890Sstevel@tonic-gate if (cmd != F_SETLKW) 6900Sstevel@tonic-gate return (_fcntl(fildes, cmd, arg)); 6910Sstevel@tonic-gate PERFORM(_fcntl(fildes, cmd, arg)) 6920Sstevel@tonic-gate } 6930Sstevel@tonic-gate 6940Sstevel@tonic-gate int 6950Sstevel@tonic-gate fsync(int fildes) 6960Sstevel@tonic-gate { 6970Sstevel@tonic-gate extern int _fsync(int); 6980Sstevel@tonic-gate int rv; 6990Sstevel@tonic-gate 7000Sstevel@tonic-gate PERFORM(_fsync(fildes)) 7010Sstevel@tonic-gate } 7020Sstevel@tonic-gate 7030Sstevel@tonic-gate int 7040Sstevel@tonic-gate lockf(int fildes, int function, off_t size) 7050Sstevel@tonic-gate { 7060Sstevel@tonic-gate extern int _lockf(int, int, off_t); 7070Sstevel@tonic-gate int rv; 7080Sstevel@tonic-gate 7090Sstevel@tonic-gate PERFORM(_lockf(fildes, function, size)) 7100Sstevel@tonic-gate } 7110Sstevel@tonic-gate 7120Sstevel@tonic-gate #if !defined(_LP64) 7130Sstevel@tonic-gate int 7140Sstevel@tonic-gate lockf64(int fildes, int function, off64_t size) 7150Sstevel@tonic-gate { 7160Sstevel@tonic-gate extern int _lockf64(int, int, off64_t); 7170Sstevel@tonic-gate int rv; 7180Sstevel@tonic-gate 7190Sstevel@tonic-gate PERFORM(_lockf64(fildes, function, size)) 7200Sstevel@tonic-gate } 7210Sstevel@tonic-gate #endif /* !_LP64 */ 7220Sstevel@tonic-gate 7230Sstevel@tonic-gate ssize_t 7240Sstevel@tonic-gate msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg) 7250Sstevel@tonic-gate { 7260Sstevel@tonic-gate extern ssize_t _msgrcv(int, void *, size_t, long, int); 7270Sstevel@tonic-gate ssize_t rv; 7280Sstevel@tonic-gate 7290Sstevel@tonic-gate PERFORM(_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg)) 7300Sstevel@tonic-gate } 7310Sstevel@tonic-gate 7320Sstevel@tonic-gate int 7330Sstevel@tonic-gate msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg) 7340Sstevel@tonic-gate { 7350Sstevel@tonic-gate extern int _msgsnd(int, const void *, size_t, int); 7360Sstevel@tonic-gate int rv; 7370Sstevel@tonic-gate 7380Sstevel@tonic-gate PERFORM(_msgsnd(msqid, msgp, msgsz, msgflg)) 7390Sstevel@tonic-gate } 7400Sstevel@tonic-gate 7410Sstevel@tonic-gate int 7420Sstevel@tonic-gate msync(caddr_t addr, size_t len, int flags) 7430Sstevel@tonic-gate { 7440Sstevel@tonic-gate extern int _msync(caddr_t, size_t, int); 7450Sstevel@tonic-gate int rv; 7460Sstevel@tonic-gate 7470Sstevel@tonic-gate PERFORM(_msync(addr, len, flags)) 7480Sstevel@tonic-gate } 7490Sstevel@tonic-gate 7500Sstevel@tonic-gate int 7510Sstevel@tonic-gate open(const char *path, int oflag, ...) 7520Sstevel@tonic-gate { 7530Sstevel@tonic-gate extern int _open(const char *, int, ...); 7540Sstevel@tonic-gate mode_t mode; 7550Sstevel@tonic-gate int rv; 7560Sstevel@tonic-gate va_list ap; 7570Sstevel@tonic-gate 7580Sstevel@tonic-gate va_start(ap, oflag); 7590Sstevel@tonic-gate mode = va_arg(ap, mode_t); 7600Sstevel@tonic-gate va_end(ap); 7610Sstevel@tonic-gate PERFORM(_open(path, oflag, mode)) 7620Sstevel@tonic-gate } 7630Sstevel@tonic-gate 7640Sstevel@tonic-gate #if !defined(_LP64) 7650Sstevel@tonic-gate int 7660Sstevel@tonic-gate open64(const char *path, int oflag, ...) 7670Sstevel@tonic-gate { 7680Sstevel@tonic-gate extern int _open64(const char *, int, ...); 7690Sstevel@tonic-gate mode_t mode; 7700Sstevel@tonic-gate int rv; 7710Sstevel@tonic-gate va_list ap; 7720Sstevel@tonic-gate 7730Sstevel@tonic-gate va_start(ap, oflag); 7740Sstevel@tonic-gate mode = va_arg(ap, mode_t); 7750Sstevel@tonic-gate va_end(ap); 7760Sstevel@tonic-gate PERFORM(_open64(path, oflag, mode)) 7770Sstevel@tonic-gate } 7780Sstevel@tonic-gate #endif /* !_LP64 */ 7790Sstevel@tonic-gate 7800Sstevel@tonic-gate int 7810Sstevel@tonic-gate pause(void) 7820Sstevel@tonic-gate { 7830Sstevel@tonic-gate extern int _pause(void); 7840Sstevel@tonic-gate int rv; 7850Sstevel@tonic-gate 7860Sstevel@tonic-gate PERFORM(_pause()) 7870Sstevel@tonic-gate } 7880Sstevel@tonic-gate 7890Sstevel@tonic-gate ssize_t 7900Sstevel@tonic-gate pread(int fildes, void *buf, size_t nbyte, off_t offset) 7910Sstevel@tonic-gate { 7920Sstevel@tonic-gate extern ssize_t _pread(int, void *, size_t, off_t); 7930Sstevel@tonic-gate ssize_t rv; 7940Sstevel@tonic-gate 7950Sstevel@tonic-gate PERFORM(_pread(fildes, buf, nbyte, offset)) 7960Sstevel@tonic-gate } 7970Sstevel@tonic-gate 7980Sstevel@tonic-gate #if !defined(_LP64) 7990Sstevel@tonic-gate ssize_t 8000Sstevel@tonic-gate pread64(int fildes, void *buf, size_t nbyte, off64_t offset) 8010Sstevel@tonic-gate { 8020Sstevel@tonic-gate extern ssize_t _pread64(int, void *, size_t, off64_t); 8030Sstevel@tonic-gate ssize_t rv; 8040Sstevel@tonic-gate 8050Sstevel@tonic-gate PERFORM(_pread64(fildes, buf, nbyte, offset)) 8060Sstevel@tonic-gate } 8070Sstevel@tonic-gate #endif /* !_LP64 */ 8080Sstevel@tonic-gate 8090Sstevel@tonic-gate ssize_t 8100Sstevel@tonic-gate pwrite(int fildes, const void *buf, size_t nbyte, off_t offset) 8110Sstevel@tonic-gate { 8120Sstevel@tonic-gate extern ssize_t _pwrite(int, const void *, size_t, off_t); 8130Sstevel@tonic-gate ssize_t rv; 8140Sstevel@tonic-gate 8150Sstevel@tonic-gate PERFORM(_pwrite(fildes, buf, nbyte, offset)) 8160Sstevel@tonic-gate } 8170Sstevel@tonic-gate 8180Sstevel@tonic-gate #if !defined(_LP64) 8190Sstevel@tonic-gate ssize_t 8200Sstevel@tonic-gate pwrite64(int fildes, const void *buf, size_t nbyte, off64_t offset) 8210Sstevel@tonic-gate { 8220Sstevel@tonic-gate extern ssize_t _pwrite64(int, const void *, size_t, off64_t); 8230Sstevel@tonic-gate ssize_t rv; 8240Sstevel@tonic-gate 8250Sstevel@tonic-gate PERFORM(_pwrite64(fildes, buf, nbyte, offset)) 8260Sstevel@tonic-gate } 8270Sstevel@tonic-gate #endif /* !_LP64 */ 8280Sstevel@tonic-gate 8290Sstevel@tonic-gate ssize_t 8300Sstevel@tonic-gate readv(int fildes, const struct iovec *iov, int iovcnt) 8310Sstevel@tonic-gate { 8320Sstevel@tonic-gate extern ssize_t _readv(int, const struct iovec *, int); 8330Sstevel@tonic-gate ssize_t rv; 8340Sstevel@tonic-gate 8350Sstevel@tonic-gate PERFORM(_readv(fildes, iov, iovcnt)) 8360Sstevel@tonic-gate } 8370Sstevel@tonic-gate 8380Sstevel@tonic-gate int 8390Sstevel@tonic-gate sigpause(int sig) 8400Sstevel@tonic-gate { 8410Sstevel@tonic-gate extern int _sigpause(int); 8420Sstevel@tonic-gate int rv; 8430Sstevel@tonic-gate 8440Sstevel@tonic-gate PERFORM(_sigpause(sig)) 8450Sstevel@tonic-gate } 8460Sstevel@tonic-gate 8470Sstevel@tonic-gate #pragma weak sigsuspend = _sigsuspend 8480Sstevel@tonic-gate int 8490Sstevel@tonic-gate _sigsuspend(const sigset_t *set) 8500Sstevel@tonic-gate { 8510Sstevel@tonic-gate extern int __sigsuspend(const sigset_t *); 8520Sstevel@tonic-gate int rv; 8530Sstevel@tonic-gate 8540Sstevel@tonic-gate PROLOGUE_MASK(set) 8550Sstevel@tonic-gate rv = __sigsuspend(set); 8560Sstevel@tonic-gate EPILOGUE_MASK 8570Sstevel@tonic-gate return (rv); 8580Sstevel@tonic-gate } 8590Sstevel@tonic-gate 8600Sstevel@tonic-gate int 8610Sstevel@tonic-gate _pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout, 8620Sstevel@tonic-gate const sigset_t *sigmask) 8630Sstevel@tonic-gate { 8640Sstevel@tonic-gate extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *, 8650Sstevel@tonic-gate const sigset_t *); 8660Sstevel@tonic-gate int rv; 8670Sstevel@tonic-gate 8680Sstevel@tonic-gate PROLOGUE_MASK(sigmask) 8690Sstevel@tonic-gate rv = __pollsys(fds, nfd, timeout, sigmask); 8700Sstevel@tonic-gate EPILOGUE_MASK 8710Sstevel@tonic-gate return (rv); 8720Sstevel@tonic-gate } 8730Sstevel@tonic-gate 8742248Sraf #pragma weak sigtimedwait = _sigtimedwait 8750Sstevel@tonic-gate int 8762248Sraf _sigtimedwait(const sigset_t *set, siginfo_t *infop, const timespec_t *timeout) 8770Sstevel@tonic-gate { 8782248Sraf extern int __sigtimedwait(const sigset_t *, siginfo_t *, 8790Sstevel@tonic-gate const timespec_t *); 8800Sstevel@tonic-gate siginfo_t info; 8810Sstevel@tonic-gate int sig; 8820Sstevel@tonic-gate 8830Sstevel@tonic-gate PROLOGUE 8842248Sraf sig = __sigtimedwait(set, &info, timeout); 8850Sstevel@tonic-gate if (sig == SIGCANCEL && 8860Sstevel@tonic-gate (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) { 8870Sstevel@tonic-gate do_sigcancel(); 8880Sstevel@tonic-gate errno = EINTR; 8890Sstevel@tonic-gate sig = -1; 8900Sstevel@tonic-gate } 8910Sstevel@tonic-gate EPILOGUE 8920Sstevel@tonic-gate if (sig != -1 && infop) 8931111Sraf (void) _private_memcpy(infop, &info, sizeof (*infop)); 8940Sstevel@tonic-gate return (sig); 8950Sstevel@tonic-gate } 8960Sstevel@tonic-gate 8970Sstevel@tonic-gate #pragma weak sigwait = _sigwait 8980Sstevel@tonic-gate int 8990Sstevel@tonic-gate _sigwait(sigset_t *set) 9000Sstevel@tonic-gate { 9012248Sraf return (_sigtimedwait(set, NULL, NULL)); 9022248Sraf } 9032248Sraf 9042248Sraf #pragma weak sigwaitinfo = _sigwaitinfo 9052248Sraf int 9062248Sraf _sigwaitinfo(const sigset_t *set, siginfo_t *info) 9072248Sraf { 9082248Sraf return (_sigtimedwait(set, info, NULL)); 9092248Sraf } 9102248Sraf 9112248Sraf #pragma weak sigqueue = _sigqueue 9122248Sraf int 9132248Sraf _sigqueue(pid_t pid, int signo, const union sigval value) 9142248Sraf { 9152248Sraf extern int __sigqueue(pid_t pid, int signo, 9162248Sraf /* const union sigval */ void *value, int si_code, int block); 9172248Sraf return (__sigqueue(pid, signo, value.sival_ptr, SI_QUEUE, 0)); 9180Sstevel@tonic-gate } 9190Sstevel@tonic-gate 9200Sstevel@tonic-gate int 9210Sstevel@tonic-gate tcdrain(int fildes) 9220Sstevel@tonic-gate { 9230Sstevel@tonic-gate extern int _tcdrain(int); 9240Sstevel@tonic-gate int rv; 9250Sstevel@tonic-gate 9260Sstevel@tonic-gate PERFORM(_tcdrain(fildes)) 9270Sstevel@tonic-gate } 9280Sstevel@tonic-gate 9290Sstevel@tonic-gate pid_t 9300Sstevel@tonic-gate wait(int *stat_loc) 9310Sstevel@tonic-gate { 9320Sstevel@tonic-gate extern pid_t _wait(int *); 9330Sstevel@tonic-gate pid_t rv; 9340Sstevel@tonic-gate 9350Sstevel@tonic-gate PERFORM(_wait(stat_loc)) 9360Sstevel@tonic-gate } 9370Sstevel@tonic-gate 9380Sstevel@tonic-gate pid_t 9390Sstevel@tonic-gate wait3(int *statusp, int options, struct rusage *rusage) 9400Sstevel@tonic-gate { 9410Sstevel@tonic-gate extern pid_t _wait3(int *, int, struct rusage *); 9420Sstevel@tonic-gate pid_t rv; 9430Sstevel@tonic-gate 9440Sstevel@tonic-gate PERFORM(_wait3(statusp, options, rusage)) 9450Sstevel@tonic-gate } 9460Sstevel@tonic-gate 9470Sstevel@tonic-gate int 9480Sstevel@tonic-gate waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options) 9490Sstevel@tonic-gate { 9500Sstevel@tonic-gate extern int _waitid(idtype_t, id_t, siginfo_t *, int); 9510Sstevel@tonic-gate int rv; 9520Sstevel@tonic-gate 9530Sstevel@tonic-gate PERFORM(_waitid(idtype, id, infop, options)) 9540Sstevel@tonic-gate } 9550Sstevel@tonic-gate 9561219Sraf /* 9571219Sraf * waitpid_cancel() is a libc-private symbol for internal use 9581219Sraf * where cancellation semantics is desired (see system()). 9591219Sraf */ 9601219Sraf #pragma weak waitpid_cancel = waitpid 9610Sstevel@tonic-gate pid_t 9620Sstevel@tonic-gate waitpid(pid_t pid, int *stat_loc, int options) 9630Sstevel@tonic-gate { 9640Sstevel@tonic-gate extern pid_t _waitpid(pid_t, int *, int); 9650Sstevel@tonic-gate pid_t rv; 9660Sstevel@tonic-gate 9670Sstevel@tonic-gate PERFORM(_waitpid(pid, stat_loc, options)) 9680Sstevel@tonic-gate } 9690Sstevel@tonic-gate 9700Sstevel@tonic-gate ssize_t 9710Sstevel@tonic-gate writev(int fildes, const struct iovec *iov, int iovcnt) 9720Sstevel@tonic-gate { 9730Sstevel@tonic-gate extern ssize_t _writev(int, const struct iovec *, int); 9740Sstevel@tonic-gate ssize_t rv; 9750Sstevel@tonic-gate 9760Sstevel@tonic-gate PERFORM(_writev(fildes, iov, iovcnt)) 9770Sstevel@tonic-gate } 978