10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51885Sraf * Common Development and Distribution License (the "License"). 61885Sraf * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 211111Sraf 220Sstevel@tonic-gate /* 231219Sraf * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 280Sstevel@tonic-gate 290Sstevel@tonic-gate #include "lint.h" 300Sstevel@tonic-gate #include "thr_uberdata.h" 310Sstevel@tonic-gate #include <stdarg.h> 320Sstevel@tonic-gate #include <poll.h> 330Sstevel@tonic-gate #include <stropts.h> 340Sstevel@tonic-gate #include <dlfcn.h> 350Sstevel@tonic-gate #include <sys/uio.h> 360Sstevel@tonic-gate 370Sstevel@tonic-gate /* 380Sstevel@tonic-gate * fork_lock is special -- We can't use lmutex_lock() (and thereby enter 390Sstevel@tonic-gate * a critical region) because the second thread to reach this point would 400Sstevel@tonic-gate * become unstoppable and the first thread would hang waiting for the 410Sstevel@tonic-gate * second thread to stop itself. Therefore we don't use lmutex_lock() in 420Sstevel@tonic-gate * fork_lock_enter(), but we do defer signals (the other form of concurrency). 430Sstevel@tonic-gate * 440Sstevel@tonic-gate * fork_lock_enter() does triple-duty. Not only does it serialize 450Sstevel@tonic-gate * calls to fork() and forkall(), but it also serializes calls to 460Sstevel@tonic-gate * thr_suspend() (fork() and forkall() also suspend other threads), 470Sstevel@tonic-gate * and furthermore it serializes I18N calls to functions in other 480Sstevel@tonic-gate * dlopen()ed L10N objects that might be calling malloc()/free(). 490Sstevel@tonic-gate */ 500Sstevel@tonic-gate 510Sstevel@tonic-gate static void 520Sstevel@tonic-gate fork_lock_error(const char *who) 530Sstevel@tonic-gate { 540Sstevel@tonic-gate char msg[200]; 550Sstevel@tonic-gate 560Sstevel@tonic-gate (void) strlcpy(msg, "deadlock condition: ", sizeof (msg)); 570Sstevel@tonic-gate (void) strlcat(msg, who, sizeof (msg)); 580Sstevel@tonic-gate (void) strlcat(msg, "() called from a fork handler", sizeof (msg)); 590Sstevel@tonic-gate thread_error(msg); 600Sstevel@tonic-gate } 610Sstevel@tonic-gate 620Sstevel@tonic-gate int 630Sstevel@tonic-gate fork_lock_enter(const char *who) 640Sstevel@tonic-gate { 650Sstevel@tonic-gate ulwp_t *self = curthread; 660Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 670Sstevel@tonic-gate int error = 0; 680Sstevel@tonic-gate 690Sstevel@tonic-gate ASSERT(self->ul_critical == 0); 700Sstevel@tonic-gate sigoff(self); 710Sstevel@tonic-gate (void) _private_mutex_lock(&udp->fork_lock); 720Sstevel@tonic-gate while (udp->fork_count) { 730Sstevel@tonic-gate if (udp->fork_owner == self) { 740Sstevel@tonic-gate /* 750Sstevel@tonic-gate * This is like a recursive lock except that we 760Sstevel@tonic-gate * inform the caller if we have been called from 770Sstevel@tonic-gate * a fork handler and let it deal with that fact. 780Sstevel@tonic-gate */ 790Sstevel@tonic-gate if (self->ul_fork) { 800Sstevel@tonic-gate /* 810Sstevel@tonic-gate * We have been called from a fork handler. 820Sstevel@tonic-gate */ 830Sstevel@tonic-gate if (who != NULL && 840Sstevel@tonic-gate udp->uberflags.uf_thread_error_detection) 850Sstevel@tonic-gate fork_lock_error(who); 860Sstevel@tonic-gate error = EDEADLK; 870Sstevel@tonic-gate } 880Sstevel@tonic-gate break; 890Sstevel@tonic-gate } 900Sstevel@tonic-gate ASSERT(self->ul_fork == 0); 910Sstevel@tonic-gate (void) _cond_wait(&udp->fork_cond, &udp->fork_lock); 920Sstevel@tonic-gate } 930Sstevel@tonic-gate udp->fork_owner = self; 940Sstevel@tonic-gate udp->fork_count++; 950Sstevel@tonic-gate (void) _private_mutex_unlock(&udp->fork_lock); 960Sstevel@tonic-gate return (error); 970Sstevel@tonic-gate } 980Sstevel@tonic-gate 990Sstevel@tonic-gate void 1000Sstevel@tonic-gate fork_lock_exit(void) 1010Sstevel@tonic-gate { 1020Sstevel@tonic-gate ulwp_t *self = curthread; 1030Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 1040Sstevel@tonic-gate 1050Sstevel@tonic-gate ASSERT(self->ul_critical == 0); 1060Sstevel@tonic-gate (void) _private_mutex_lock(&udp->fork_lock); 1070Sstevel@tonic-gate ASSERT(udp->fork_count != 0 && udp->fork_owner == self); 1080Sstevel@tonic-gate if (--udp->fork_count == 0) { 1090Sstevel@tonic-gate udp->fork_owner = NULL; 1100Sstevel@tonic-gate (void) _cond_signal(&udp->fork_cond); 1110Sstevel@tonic-gate } 1120Sstevel@tonic-gate (void) _private_mutex_unlock(&udp->fork_lock); 1130Sstevel@tonic-gate sigon(self); 1140Sstevel@tonic-gate } 1150Sstevel@tonic-gate 1160Sstevel@tonic-gate /* 1170Sstevel@tonic-gate * fork() is fork1() for both Posix threads and Solaris threads. 1180Sstevel@tonic-gate * The forkall() interface exists for applications that require 1190Sstevel@tonic-gate * the semantics of replicating all threads. 1200Sstevel@tonic-gate */ 1210Sstevel@tonic-gate #pragma weak fork = _fork1 1220Sstevel@tonic-gate #pragma weak _fork = _fork1 1230Sstevel@tonic-gate #pragma weak fork1 = _fork1 1240Sstevel@tonic-gate pid_t 1250Sstevel@tonic-gate _fork1(void) 1260Sstevel@tonic-gate { 1270Sstevel@tonic-gate ulwp_t *self = curthread; 1280Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 1290Sstevel@tonic-gate pid_t pid; 1300Sstevel@tonic-gate int error; 1310Sstevel@tonic-gate 1320Sstevel@tonic-gate if (self->ul_vfork) { 1330Sstevel@tonic-gate /* 1340Sstevel@tonic-gate * We are a child of vfork(); omit all of the fork 1350Sstevel@tonic-gate * logic and go straight to the system call trap. 1360Sstevel@tonic-gate * A vfork() child of a multithreaded parent 1370Sstevel@tonic-gate * must never call fork(). 1380Sstevel@tonic-gate */ 1390Sstevel@tonic-gate if (udp->uberflags.uf_mt) { 1400Sstevel@tonic-gate errno = ENOTSUP; 1410Sstevel@tonic-gate return (-1); 1420Sstevel@tonic-gate } 1430Sstevel@tonic-gate pid = __fork1(); 1440Sstevel@tonic-gate if (pid == 0) { /* child */ 1450Sstevel@tonic-gate udp->pid = _private_getpid(); 1460Sstevel@tonic-gate self->ul_vfork = 0; 1470Sstevel@tonic-gate } 1480Sstevel@tonic-gate return (pid); 1490Sstevel@tonic-gate } 1500Sstevel@tonic-gate 1510Sstevel@tonic-gate if ((error = fork_lock_enter("fork")) != 0) { 1520Sstevel@tonic-gate /* 1530Sstevel@tonic-gate * Cannot call fork() from a fork handler. 1540Sstevel@tonic-gate */ 1550Sstevel@tonic-gate fork_lock_exit(); 1560Sstevel@tonic-gate errno = error; 1570Sstevel@tonic-gate return (-1); 1580Sstevel@tonic-gate } 1590Sstevel@tonic-gate self->ul_fork = 1; 1600Sstevel@tonic-gate 1610Sstevel@tonic-gate /* 1620Sstevel@tonic-gate * The functions registered by pthread_atfork() are defined by 1630Sstevel@tonic-gate * the application and its libraries and we must not hold any 1640Sstevel@tonic-gate * internal libc locks while invoking them. The fork_lock_enter() 1650Sstevel@tonic-gate * function serializes fork(), thr_suspend(), pthread_atfork() and 1660Sstevel@tonic-gate * dlclose() (which destroys whatever pthread_atfork() functions 1670Sstevel@tonic-gate * the library may have set up). If one of these pthread_atfork() 1680Sstevel@tonic-gate * functions attempts to fork or suspend another thread or call 1690Sstevel@tonic-gate * pthread_atfork() or dlclose a library, it will detect a deadlock 1700Sstevel@tonic-gate * in fork_lock_enter(). Otherwise, the pthread_atfork() functions 1710Sstevel@tonic-gate * are free to do anything they please (except they will not 1720Sstevel@tonic-gate * receive any signals). 1730Sstevel@tonic-gate */ 1740Sstevel@tonic-gate _prefork_handler(); 1750Sstevel@tonic-gate 1760Sstevel@tonic-gate /* 1770Sstevel@tonic-gate * Block all signals. 1780Sstevel@tonic-gate * Just deferring them via sigon() is not enough. 1790Sstevel@tonic-gate * We have to avoid taking a deferred signal in the child 1800Sstevel@tonic-gate * that was actually sent to the parent before __fork1(). 1810Sstevel@tonic-gate */ 1820Sstevel@tonic-gate block_all_signals(self); 1830Sstevel@tonic-gate 1840Sstevel@tonic-gate /* 1850Sstevel@tonic-gate * This suspends all threads but this one, leaving them 1860Sstevel@tonic-gate * suspended outside of any critical regions in the library. 1870Sstevel@tonic-gate * Thus, we are assured that no library locks are held 1880Sstevel@tonic-gate * while we invoke fork1() from the current thread. 1890Sstevel@tonic-gate */ 1900Sstevel@tonic-gate (void) _private_mutex_lock(&udp->fork_lock); 1910Sstevel@tonic-gate suspend_fork(); 1920Sstevel@tonic-gate (void) _private_mutex_unlock(&udp->fork_lock); 1930Sstevel@tonic-gate 1940Sstevel@tonic-gate pid = __fork1(); 1950Sstevel@tonic-gate 1960Sstevel@tonic-gate if (pid == 0) { /* child */ 1970Sstevel@tonic-gate /* 1980Sstevel@tonic-gate * Clear our schedctl pointer. 1990Sstevel@tonic-gate * Discard any deferred signal that was sent to the parent. 2000Sstevel@tonic-gate * Because we blocked all signals before __fork1(), a 2010Sstevel@tonic-gate * deferred signal cannot have been taken by the child. 2020Sstevel@tonic-gate */ 2030Sstevel@tonic-gate self->ul_schedctl_called = NULL; 2040Sstevel@tonic-gate self->ul_schedctl = NULL; 2050Sstevel@tonic-gate self->ul_cursig = 0; 2060Sstevel@tonic-gate self->ul_siginfo.si_signo = 0; 2070Sstevel@tonic-gate udp->pid = _private_getpid(); 2080Sstevel@tonic-gate /* reset the library's data structures to reflect one thread */ 209*2248Sraf postfork1_child(); 2100Sstevel@tonic-gate restore_signals(self); 2110Sstevel@tonic-gate _postfork_child_handler(); 2120Sstevel@tonic-gate } else { 2130Sstevel@tonic-gate /* restart all threads that were suspended for fork1() */ 2140Sstevel@tonic-gate continue_fork(0); 2150Sstevel@tonic-gate restore_signals(self); 2160Sstevel@tonic-gate _postfork_parent_handler(); 2170Sstevel@tonic-gate } 2180Sstevel@tonic-gate 2190Sstevel@tonic-gate self->ul_fork = 0; 2200Sstevel@tonic-gate fork_lock_exit(); 2210Sstevel@tonic-gate 2220Sstevel@tonic-gate return (pid); 2230Sstevel@tonic-gate } 2240Sstevel@tonic-gate 2250Sstevel@tonic-gate /* 2260Sstevel@tonic-gate * Much of the logic here is the same as in fork1(). 2270Sstevel@tonic-gate * See the comments in fork1(), above. 2280Sstevel@tonic-gate */ 2290Sstevel@tonic-gate #pragma weak forkall = _forkall 2300Sstevel@tonic-gate pid_t 2310Sstevel@tonic-gate _forkall(void) 2320Sstevel@tonic-gate { 2330Sstevel@tonic-gate ulwp_t *self = curthread; 2340Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 2350Sstevel@tonic-gate pid_t pid; 2360Sstevel@tonic-gate int error; 2370Sstevel@tonic-gate 2380Sstevel@tonic-gate if (self->ul_vfork) { 2390Sstevel@tonic-gate if (udp->uberflags.uf_mt) { 2400Sstevel@tonic-gate errno = ENOTSUP; 2410Sstevel@tonic-gate return (-1); 2420Sstevel@tonic-gate } 2430Sstevel@tonic-gate pid = __forkall(); 2440Sstevel@tonic-gate if (pid == 0) { /* child */ 2450Sstevel@tonic-gate udp->pid = _private_getpid(); 2460Sstevel@tonic-gate self->ul_vfork = 0; 2470Sstevel@tonic-gate } 2480Sstevel@tonic-gate return (pid); 2490Sstevel@tonic-gate } 2500Sstevel@tonic-gate 2510Sstevel@tonic-gate if ((error = fork_lock_enter("forkall")) != 0) { 2520Sstevel@tonic-gate fork_lock_exit(); 2530Sstevel@tonic-gate errno = error; 2540Sstevel@tonic-gate return (-1); 2550Sstevel@tonic-gate } 2560Sstevel@tonic-gate self->ul_fork = 1; 2570Sstevel@tonic-gate block_all_signals(self); 2580Sstevel@tonic-gate suspend_fork(); 2590Sstevel@tonic-gate 2600Sstevel@tonic-gate pid = __forkall(); 2610Sstevel@tonic-gate 2620Sstevel@tonic-gate if (pid == 0) { 2630Sstevel@tonic-gate self->ul_schedctl_called = NULL; 2640Sstevel@tonic-gate self->ul_schedctl = NULL; 2650Sstevel@tonic-gate self->ul_cursig = 0; 2660Sstevel@tonic-gate self->ul_siginfo.si_signo = 0; 2670Sstevel@tonic-gate udp->pid = _private_getpid(); 2680Sstevel@tonic-gate continue_fork(1); 2690Sstevel@tonic-gate } else { 2700Sstevel@tonic-gate continue_fork(0); 2710Sstevel@tonic-gate } 2720Sstevel@tonic-gate restore_signals(self); 2730Sstevel@tonic-gate self->ul_fork = 0; 2740Sstevel@tonic-gate fork_lock_exit(); 2750Sstevel@tonic-gate 2760Sstevel@tonic-gate return (pid); 2770Sstevel@tonic-gate } 2780Sstevel@tonic-gate 2790Sstevel@tonic-gate /* 2800Sstevel@tonic-gate * Hacks for system calls to provide cancellation 2810Sstevel@tonic-gate * and improve java garbage collection. 2820Sstevel@tonic-gate */ 2830Sstevel@tonic-gate #define PROLOGUE \ 2840Sstevel@tonic-gate { \ 2850Sstevel@tonic-gate ulwp_t *self = curthread; \ 2860Sstevel@tonic-gate int nocancel = (self->ul_vfork | self->ul_nocancel); \ 2870Sstevel@tonic-gate if (nocancel == 0) { \ 2880Sstevel@tonic-gate self->ul_save_async = self->ul_cancel_async; \ 2890Sstevel@tonic-gate if (!self->ul_cancel_disabled) { \ 2900Sstevel@tonic-gate self->ul_cancel_async = 1; \ 2910Sstevel@tonic-gate if (self->ul_cancel_pending) \ 2920Sstevel@tonic-gate _pthread_exit(PTHREAD_CANCELED); \ 2930Sstevel@tonic-gate } \ 2940Sstevel@tonic-gate self->ul_sp = stkptr(); \ 2950Sstevel@tonic-gate } 2960Sstevel@tonic-gate 2970Sstevel@tonic-gate #define EPILOGUE \ 2980Sstevel@tonic-gate if (nocancel == 0) { \ 2990Sstevel@tonic-gate self->ul_sp = 0; \ 3000Sstevel@tonic-gate self->ul_cancel_async = self->ul_save_async; \ 3010Sstevel@tonic-gate } \ 3020Sstevel@tonic-gate } 3030Sstevel@tonic-gate 3040Sstevel@tonic-gate /* 3050Sstevel@tonic-gate * Perform the body of the action required by most of the cancelable 3060Sstevel@tonic-gate * function calls. The return(function_call) part is to allow the 3070Sstevel@tonic-gate * compiler to make the call be executed with tail recursion, which 3080Sstevel@tonic-gate * saves a register window on sparc and slightly (not much) improves 3090Sstevel@tonic-gate * the code for x86/x64 compilations. 3100Sstevel@tonic-gate */ 3110Sstevel@tonic-gate #define PERFORM(function_call) \ 3120Sstevel@tonic-gate PROLOGUE \ 3130Sstevel@tonic-gate if (nocancel) \ 3140Sstevel@tonic-gate return (function_call); \ 3150Sstevel@tonic-gate rv = function_call; \ 3160Sstevel@tonic-gate EPILOGUE \ 3170Sstevel@tonic-gate return (rv); 3180Sstevel@tonic-gate 3190Sstevel@tonic-gate /* 3200Sstevel@tonic-gate * Specialized prologue for sigsuspend() and pollsys(). 3210Sstevel@tonic-gate * These system calls pass a signal mask to the kernel. 3220Sstevel@tonic-gate * The kernel replaces the thread's signal mask with the 3230Sstevel@tonic-gate * temporary mask before the thread goes to sleep. If 3240Sstevel@tonic-gate * a signal is received, the signal handler will execute 3250Sstevel@tonic-gate * with the temporary mask, as modified by the sigaction 3260Sstevel@tonic-gate * for the particular signal. 3270Sstevel@tonic-gate * 3280Sstevel@tonic-gate * We block all signals until we reach the kernel with the 3290Sstevel@tonic-gate * temporary mask. This eliminates race conditions with 3300Sstevel@tonic-gate * setting the signal mask while signals are being posted. 3310Sstevel@tonic-gate */ 3320Sstevel@tonic-gate #define PROLOGUE_MASK(sigmask) \ 3330Sstevel@tonic-gate { \ 3340Sstevel@tonic-gate ulwp_t *self = curthread; \ 3350Sstevel@tonic-gate int nocancel = (self->ul_vfork | self->ul_nocancel); \ 3360Sstevel@tonic-gate if (!self->ul_vfork) { \ 3370Sstevel@tonic-gate if (sigmask) { \ 3380Sstevel@tonic-gate block_all_signals(self); \ 3391111Sraf self->ul_tmpmask.__sigbits[0] = sigmask->__sigbits[0]; \ 3401111Sraf self->ul_tmpmask.__sigbits[1] = sigmask->__sigbits[1]; \ 3410Sstevel@tonic-gate delete_reserved_signals(&self->ul_tmpmask); \ 3420Sstevel@tonic-gate self->ul_sigsuspend = 1; \ 3430Sstevel@tonic-gate } \ 3440Sstevel@tonic-gate if (nocancel == 0) { \ 3450Sstevel@tonic-gate self->ul_save_async = self->ul_cancel_async; \ 3460Sstevel@tonic-gate if (!self->ul_cancel_disabled) { \ 3470Sstevel@tonic-gate self->ul_cancel_async = 1; \ 3480Sstevel@tonic-gate if (self->ul_cancel_pending) { \ 3490Sstevel@tonic-gate if (self->ul_sigsuspend) { \ 3500Sstevel@tonic-gate self->ul_sigsuspend = 0;\ 3510Sstevel@tonic-gate restore_signals(self); \ 3520Sstevel@tonic-gate } \ 3530Sstevel@tonic-gate _pthread_exit(PTHREAD_CANCELED);\ 3540Sstevel@tonic-gate } \ 3550Sstevel@tonic-gate } \ 3560Sstevel@tonic-gate self->ul_sp = stkptr(); \ 3570Sstevel@tonic-gate } \ 3580Sstevel@tonic-gate } 3590Sstevel@tonic-gate 3600Sstevel@tonic-gate /* 3610Sstevel@tonic-gate * If a signal is taken, we return from the system call wrapper with 3620Sstevel@tonic-gate * our original signal mask restored (see code in call_user_handler()). 3630Sstevel@tonic-gate * If not (self->ul_sigsuspend is still non-zero), we must restore our 3640Sstevel@tonic-gate * original signal mask ourself. 3650Sstevel@tonic-gate */ 3660Sstevel@tonic-gate #define EPILOGUE_MASK \ 3670Sstevel@tonic-gate if (nocancel == 0) { \ 3680Sstevel@tonic-gate self->ul_sp = 0; \ 3690Sstevel@tonic-gate self->ul_cancel_async = self->ul_save_async; \ 3700Sstevel@tonic-gate } \ 3710Sstevel@tonic-gate if (self->ul_sigsuspend) { \ 3720Sstevel@tonic-gate self->ul_sigsuspend = 0; \ 3730Sstevel@tonic-gate restore_signals(self); \ 3740Sstevel@tonic-gate } \ 3750Sstevel@tonic-gate } 3760Sstevel@tonic-gate 3770Sstevel@tonic-gate /* 378*2248Sraf * Cancellation prologue and epilogue functions, 379*2248Sraf * for cancellation points too complex to include here. 3801885Sraf */ 3811885Sraf void 3821885Sraf _cancel_prologue(void) 3831885Sraf { 3841885Sraf ulwp_t *self = curthread; 3851885Sraf 3861885Sraf self->ul_cancel_prologue = (self->ul_vfork | self->ul_nocancel); 3871885Sraf if (self->ul_cancel_prologue == 0) { 3881885Sraf self->ul_save_async = self->ul_cancel_async; 3891885Sraf if (!self->ul_cancel_disabled) { 3901885Sraf self->ul_cancel_async = 1; 3911885Sraf if (self->ul_cancel_pending) 3921885Sraf _pthread_exit(PTHREAD_CANCELED); 3931885Sraf } 3941885Sraf self->ul_sp = stkptr(); 3951885Sraf } 3961885Sraf } 3971885Sraf 3981885Sraf void 3991885Sraf _cancel_epilogue(void) 4001885Sraf { 4011885Sraf ulwp_t *self = curthread; 4021885Sraf 4031885Sraf if (self->ul_cancel_prologue == 0) { 4041885Sraf self->ul_sp = 0; 4051885Sraf self->ul_cancel_async = self->ul_save_async; 4061885Sraf } 4071885Sraf } 4081885Sraf 4091885Sraf /* 4100Sstevel@tonic-gate * Called from _thrp_join() (thr_join() is a cancellation point) 4110Sstevel@tonic-gate */ 4120Sstevel@tonic-gate int 4130Sstevel@tonic-gate lwp_wait(thread_t tid, thread_t *found) 4140Sstevel@tonic-gate { 4150Sstevel@tonic-gate int error; 4160Sstevel@tonic-gate 4170Sstevel@tonic-gate PROLOGUE 4180Sstevel@tonic-gate while ((error = __lwp_wait(tid, found)) == EINTR) 4190Sstevel@tonic-gate ; 4200Sstevel@tonic-gate EPILOGUE 4210Sstevel@tonic-gate return (error); 4220Sstevel@tonic-gate } 4230Sstevel@tonic-gate 4240Sstevel@tonic-gate ssize_t 4250Sstevel@tonic-gate read(int fd, void *buf, size_t size) 4260Sstevel@tonic-gate { 4270Sstevel@tonic-gate extern ssize_t _read(int, void *, size_t); 4280Sstevel@tonic-gate ssize_t rv; 4290Sstevel@tonic-gate 4300Sstevel@tonic-gate PERFORM(_read(fd, buf, size)) 4310Sstevel@tonic-gate } 4320Sstevel@tonic-gate 4330Sstevel@tonic-gate ssize_t 4340Sstevel@tonic-gate write(int fd, const void *buf, size_t size) 4350Sstevel@tonic-gate { 4360Sstevel@tonic-gate extern ssize_t _write(int, const void *, size_t); 4370Sstevel@tonic-gate ssize_t rv; 4380Sstevel@tonic-gate 4390Sstevel@tonic-gate PERFORM(_write(fd, buf, size)) 4400Sstevel@tonic-gate } 4410Sstevel@tonic-gate 4420Sstevel@tonic-gate int 4430Sstevel@tonic-gate getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr, 4440Sstevel@tonic-gate int *flagsp) 4450Sstevel@tonic-gate { 4460Sstevel@tonic-gate extern int _getmsg(int, struct strbuf *, struct strbuf *, int *); 4470Sstevel@tonic-gate int rv; 4480Sstevel@tonic-gate 4490Sstevel@tonic-gate PERFORM(_getmsg(fd, ctlptr, dataptr, flagsp)) 4500Sstevel@tonic-gate } 4510Sstevel@tonic-gate 4520Sstevel@tonic-gate int 4530Sstevel@tonic-gate getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr, 4540Sstevel@tonic-gate int *bandp, int *flagsp) 4550Sstevel@tonic-gate { 4560Sstevel@tonic-gate extern int _getpmsg(int, struct strbuf *, struct strbuf *, 4570Sstevel@tonic-gate int *, int *); 4580Sstevel@tonic-gate int rv; 4590Sstevel@tonic-gate 4600Sstevel@tonic-gate PERFORM(_getpmsg(fd, ctlptr, dataptr, bandp, flagsp)) 4610Sstevel@tonic-gate } 4620Sstevel@tonic-gate 4630Sstevel@tonic-gate int 4640Sstevel@tonic-gate putmsg(int fd, const struct strbuf *ctlptr, 4650Sstevel@tonic-gate const struct strbuf *dataptr, int flags) 4660Sstevel@tonic-gate { 4670Sstevel@tonic-gate extern int _putmsg(int, const struct strbuf *, 4680Sstevel@tonic-gate const struct strbuf *, int); 4690Sstevel@tonic-gate int rv; 4700Sstevel@tonic-gate 4710Sstevel@tonic-gate PERFORM(_putmsg(fd, ctlptr, dataptr, flags)) 4720Sstevel@tonic-gate } 4730Sstevel@tonic-gate 4740Sstevel@tonic-gate int 4750Sstevel@tonic-gate __xpg4_putmsg(int fd, const struct strbuf *ctlptr, 4760Sstevel@tonic-gate const struct strbuf *dataptr, int flags) 4770Sstevel@tonic-gate { 4780Sstevel@tonic-gate extern int _putmsg(int, const struct strbuf *, 4790Sstevel@tonic-gate const struct strbuf *, int); 4800Sstevel@tonic-gate int rv; 4810Sstevel@tonic-gate 4820Sstevel@tonic-gate PERFORM(_putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4)) 4830Sstevel@tonic-gate } 4840Sstevel@tonic-gate 4850Sstevel@tonic-gate int 4860Sstevel@tonic-gate putpmsg(int fd, const struct strbuf *ctlptr, 4870Sstevel@tonic-gate const struct strbuf *dataptr, int band, int flags) 4880Sstevel@tonic-gate { 4890Sstevel@tonic-gate extern int _putpmsg(int, const struct strbuf *, 4900Sstevel@tonic-gate const struct strbuf *, int, int); 4910Sstevel@tonic-gate int rv; 4920Sstevel@tonic-gate 4930Sstevel@tonic-gate PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags)) 4940Sstevel@tonic-gate } 4950Sstevel@tonic-gate 4960Sstevel@tonic-gate int 4970Sstevel@tonic-gate __xpg4_putpmsg(int fd, const struct strbuf *ctlptr, 4980Sstevel@tonic-gate const struct strbuf *dataptr, int band, int flags) 4990Sstevel@tonic-gate { 5000Sstevel@tonic-gate extern int _putpmsg(int, const struct strbuf *, 5010Sstevel@tonic-gate const struct strbuf *, int, int); 5020Sstevel@tonic-gate int rv; 5030Sstevel@tonic-gate 5040Sstevel@tonic-gate PERFORM(_putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4)) 5050Sstevel@tonic-gate } 5060Sstevel@tonic-gate 507*2248Sraf #pragma weak nanosleep = _nanosleep 5080Sstevel@tonic-gate int 509*2248Sraf _nanosleep(const timespec_t *rqtp, timespec_t *rmtp) 5100Sstevel@tonic-gate { 5110Sstevel@tonic-gate int error; 5120Sstevel@tonic-gate 5130Sstevel@tonic-gate PROLOGUE 514*2248Sraf error = __nanosleep(rqtp, rmtp); 5150Sstevel@tonic-gate EPILOGUE 5160Sstevel@tonic-gate if (error) { 5170Sstevel@tonic-gate errno = error; 5180Sstevel@tonic-gate return (-1); 5190Sstevel@tonic-gate } 5200Sstevel@tonic-gate return (0); 5210Sstevel@tonic-gate } 5220Sstevel@tonic-gate 523*2248Sraf #pragma weak clock_nanosleep = _clock_nanosleep 5240Sstevel@tonic-gate int 525*2248Sraf _clock_nanosleep(clockid_t clock_id, int flags, 5260Sstevel@tonic-gate const timespec_t *rqtp, timespec_t *rmtp) 5270Sstevel@tonic-gate { 5280Sstevel@tonic-gate timespec_t reltime; 5290Sstevel@tonic-gate hrtime_t start; 5300Sstevel@tonic-gate hrtime_t rqlapse; 5310Sstevel@tonic-gate hrtime_t lapse; 5320Sstevel@tonic-gate int error; 5330Sstevel@tonic-gate 5340Sstevel@tonic-gate switch (clock_id) { 5350Sstevel@tonic-gate case CLOCK_VIRTUAL: 5360Sstevel@tonic-gate case CLOCK_PROCESS_CPUTIME_ID: 5370Sstevel@tonic-gate case CLOCK_THREAD_CPUTIME_ID: 5380Sstevel@tonic-gate return (ENOTSUP); 5390Sstevel@tonic-gate case CLOCK_REALTIME: 5400Sstevel@tonic-gate case CLOCK_HIGHRES: 5410Sstevel@tonic-gate break; 5420Sstevel@tonic-gate default: 5430Sstevel@tonic-gate return (EINVAL); 5440Sstevel@tonic-gate } 5450Sstevel@tonic-gate if (flags & TIMER_ABSTIME) { 5460Sstevel@tonic-gate abstime_to_reltime(clock_id, rqtp, &reltime); 5470Sstevel@tonic-gate rmtp = NULL; 5480Sstevel@tonic-gate } else { 5490Sstevel@tonic-gate reltime = *rqtp; 5500Sstevel@tonic-gate if (clock_id == CLOCK_HIGHRES) 5510Sstevel@tonic-gate start = gethrtime(); 5520Sstevel@tonic-gate } 5530Sstevel@tonic-gate restart: 5540Sstevel@tonic-gate PROLOGUE 555*2248Sraf error = __nanosleep(&reltime, rmtp); 5560Sstevel@tonic-gate EPILOGUE 5570Sstevel@tonic-gate if (error == 0 && clock_id == CLOCK_HIGHRES) { 5580Sstevel@tonic-gate /* 5590Sstevel@tonic-gate * Don't return yet if we didn't really get a timeout. 5600Sstevel@tonic-gate * This can happen if we return because someone resets 5610Sstevel@tonic-gate * the system clock. 5620Sstevel@tonic-gate */ 5630Sstevel@tonic-gate if (flags & TIMER_ABSTIME) { 5640Sstevel@tonic-gate if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC + 5650Sstevel@tonic-gate rqtp->tv_nsec > gethrtime()) { 5660Sstevel@tonic-gate abstime_to_reltime(clock_id, rqtp, &reltime); 5670Sstevel@tonic-gate goto restart; 5680Sstevel@tonic-gate } 5690Sstevel@tonic-gate } else { 5700Sstevel@tonic-gate rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC + 5710Sstevel@tonic-gate rqtp->tv_nsec; 5720Sstevel@tonic-gate lapse = gethrtime() - start; 5730Sstevel@tonic-gate if (rqlapse > lapse) { 5740Sstevel@tonic-gate hrt2ts(rqlapse - lapse, &reltime); 5750Sstevel@tonic-gate goto restart; 5760Sstevel@tonic-gate } 5770Sstevel@tonic-gate } 5780Sstevel@tonic-gate } 5790Sstevel@tonic-gate if (error == 0 && clock_id == CLOCK_REALTIME && 5800Sstevel@tonic-gate (flags & TIMER_ABSTIME)) { 5810Sstevel@tonic-gate /* 5820Sstevel@tonic-gate * Don't return yet just because someone reset the 5830Sstevel@tonic-gate * system clock. Recompute the new relative time 5840Sstevel@tonic-gate * and reissue the nanosleep() call if necessary. 5850Sstevel@tonic-gate * 5860Sstevel@tonic-gate * Resetting the system clock causes all sorts of 5870Sstevel@tonic-gate * problems and the SUSV3 standards body should 5880Sstevel@tonic-gate * have made the behavior of clock_nanosleep() be 5890Sstevel@tonic-gate * implementation-defined in such a case rather than 5900Sstevel@tonic-gate * being specific about honoring the new system time. 5910Sstevel@tonic-gate * Standards bodies are filled with fools and idiots. 5920Sstevel@tonic-gate */ 5930Sstevel@tonic-gate abstime_to_reltime(clock_id, rqtp, &reltime); 5940Sstevel@tonic-gate if (reltime.tv_sec != 0 || reltime.tv_nsec != 0) 5950Sstevel@tonic-gate goto restart; 5960Sstevel@tonic-gate } 5970Sstevel@tonic-gate return (error); 5980Sstevel@tonic-gate } 5990Sstevel@tonic-gate 6000Sstevel@tonic-gate #pragma weak sleep = _sleep 6010Sstevel@tonic-gate unsigned int 6020Sstevel@tonic-gate _sleep(unsigned int sec) 6030Sstevel@tonic-gate { 6040Sstevel@tonic-gate unsigned int rem = 0; 6050Sstevel@tonic-gate int error; 6060Sstevel@tonic-gate timespec_t ts; 6070Sstevel@tonic-gate timespec_t tsr; 6080Sstevel@tonic-gate 6090Sstevel@tonic-gate ts.tv_sec = (time_t)sec; 6100Sstevel@tonic-gate ts.tv_nsec = 0; 6110Sstevel@tonic-gate PROLOGUE 612*2248Sraf error = __nanosleep(&ts, &tsr); 6130Sstevel@tonic-gate EPILOGUE 6140Sstevel@tonic-gate if (error == EINTR) { 6150Sstevel@tonic-gate rem = (unsigned int)tsr.tv_sec; 6160Sstevel@tonic-gate if (tsr.tv_nsec >= NANOSEC / 2) 6170Sstevel@tonic-gate rem++; 6180Sstevel@tonic-gate } 6190Sstevel@tonic-gate return (rem); 6200Sstevel@tonic-gate } 6210Sstevel@tonic-gate 6220Sstevel@tonic-gate #pragma weak usleep = _usleep 6230Sstevel@tonic-gate int 6240Sstevel@tonic-gate _usleep(useconds_t usec) 6250Sstevel@tonic-gate { 6260Sstevel@tonic-gate timespec_t ts; 6270Sstevel@tonic-gate 6280Sstevel@tonic-gate ts.tv_sec = usec / MICROSEC; 6290Sstevel@tonic-gate ts.tv_nsec = (long)(usec % MICROSEC) * 1000; 6300Sstevel@tonic-gate PROLOGUE 631*2248Sraf (void) __nanosleep(&ts, NULL); 6320Sstevel@tonic-gate EPILOGUE 6330Sstevel@tonic-gate return (0); 6340Sstevel@tonic-gate } 6350Sstevel@tonic-gate 6360Sstevel@tonic-gate int 6370Sstevel@tonic-gate close(int fildes) 6380Sstevel@tonic-gate { 639*2248Sraf extern void _aio_close(int); 6400Sstevel@tonic-gate extern int _close(int); 6410Sstevel@tonic-gate int rv; 6420Sstevel@tonic-gate 643*2248Sraf _aio_close(fildes); 6440Sstevel@tonic-gate PERFORM(_close(fildes)) 6450Sstevel@tonic-gate } 6460Sstevel@tonic-gate 6470Sstevel@tonic-gate int 6480Sstevel@tonic-gate creat(const char *path, mode_t mode) 6490Sstevel@tonic-gate { 6500Sstevel@tonic-gate extern int _creat(const char *, mode_t); 6510Sstevel@tonic-gate int rv; 6520Sstevel@tonic-gate 6530Sstevel@tonic-gate PERFORM(_creat(path, mode)) 6540Sstevel@tonic-gate } 6550Sstevel@tonic-gate 6560Sstevel@tonic-gate #if !defined(_LP64) 6570Sstevel@tonic-gate int 6580Sstevel@tonic-gate creat64(const char *path, mode_t mode) 6590Sstevel@tonic-gate { 6600Sstevel@tonic-gate extern int _creat64(const char *, mode_t); 6610Sstevel@tonic-gate int rv; 6620Sstevel@tonic-gate 6630Sstevel@tonic-gate PERFORM(_creat64(path, mode)) 6640Sstevel@tonic-gate } 6650Sstevel@tonic-gate #endif /* !_LP64 */ 6660Sstevel@tonic-gate 6670Sstevel@tonic-gate int 6680Sstevel@tonic-gate fcntl(int fildes, int cmd, ...) 6690Sstevel@tonic-gate { 6700Sstevel@tonic-gate extern int _fcntl(int, int, ...); 6710Sstevel@tonic-gate intptr_t arg; 6720Sstevel@tonic-gate int rv; 6730Sstevel@tonic-gate va_list ap; 6740Sstevel@tonic-gate 6750Sstevel@tonic-gate va_start(ap, cmd); 6760Sstevel@tonic-gate arg = va_arg(ap, intptr_t); 6770Sstevel@tonic-gate va_end(ap); 6780Sstevel@tonic-gate if (cmd != F_SETLKW) 6790Sstevel@tonic-gate return (_fcntl(fildes, cmd, arg)); 6800Sstevel@tonic-gate PERFORM(_fcntl(fildes, cmd, arg)) 6810Sstevel@tonic-gate } 6820Sstevel@tonic-gate 6830Sstevel@tonic-gate int 6840Sstevel@tonic-gate fsync(int fildes) 6850Sstevel@tonic-gate { 6860Sstevel@tonic-gate extern int _fsync(int); 6870Sstevel@tonic-gate int rv; 6880Sstevel@tonic-gate 6890Sstevel@tonic-gate PERFORM(_fsync(fildes)) 6900Sstevel@tonic-gate } 6910Sstevel@tonic-gate 6920Sstevel@tonic-gate int 6930Sstevel@tonic-gate lockf(int fildes, int function, off_t size) 6940Sstevel@tonic-gate { 6950Sstevel@tonic-gate extern int _lockf(int, int, off_t); 6960Sstevel@tonic-gate int rv; 6970Sstevel@tonic-gate 6980Sstevel@tonic-gate PERFORM(_lockf(fildes, function, size)) 6990Sstevel@tonic-gate } 7000Sstevel@tonic-gate 7010Sstevel@tonic-gate #if !defined(_LP64) 7020Sstevel@tonic-gate int 7030Sstevel@tonic-gate lockf64(int fildes, int function, off64_t size) 7040Sstevel@tonic-gate { 7050Sstevel@tonic-gate extern int _lockf64(int, int, off64_t); 7060Sstevel@tonic-gate int rv; 7070Sstevel@tonic-gate 7080Sstevel@tonic-gate PERFORM(_lockf64(fildes, function, size)) 7090Sstevel@tonic-gate } 7100Sstevel@tonic-gate #endif /* !_LP64 */ 7110Sstevel@tonic-gate 7120Sstevel@tonic-gate ssize_t 7130Sstevel@tonic-gate msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg) 7140Sstevel@tonic-gate { 7150Sstevel@tonic-gate extern ssize_t _msgrcv(int, void *, size_t, long, int); 7160Sstevel@tonic-gate ssize_t rv; 7170Sstevel@tonic-gate 7180Sstevel@tonic-gate PERFORM(_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg)) 7190Sstevel@tonic-gate } 7200Sstevel@tonic-gate 7210Sstevel@tonic-gate int 7220Sstevel@tonic-gate msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg) 7230Sstevel@tonic-gate { 7240Sstevel@tonic-gate extern int _msgsnd(int, const void *, size_t, int); 7250Sstevel@tonic-gate int rv; 7260Sstevel@tonic-gate 7270Sstevel@tonic-gate PERFORM(_msgsnd(msqid, msgp, msgsz, msgflg)) 7280Sstevel@tonic-gate } 7290Sstevel@tonic-gate 7300Sstevel@tonic-gate int 7310Sstevel@tonic-gate msync(caddr_t addr, size_t len, int flags) 7320Sstevel@tonic-gate { 7330Sstevel@tonic-gate extern int _msync(caddr_t, size_t, int); 7340Sstevel@tonic-gate int rv; 7350Sstevel@tonic-gate 7360Sstevel@tonic-gate PERFORM(_msync(addr, len, flags)) 7370Sstevel@tonic-gate } 7380Sstevel@tonic-gate 7390Sstevel@tonic-gate int 7400Sstevel@tonic-gate open(const char *path, int oflag, ...) 7410Sstevel@tonic-gate { 7420Sstevel@tonic-gate extern int _open(const char *, int, ...); 7430Sstevel@tonic-gate mode_t mode; 7440Sstevel@tonic-gate int rv; 7450Sstevel@tonic-gate va_list ap; 7460Sstevel@tonic-gate 7470Sstevel@tonic-gate va_start(ap, oflag); 7480Sstevel@tonic-gate mode = va_arg(ap, mode_t); 7490Sstevel@tonic-gate va_end(ap); 7500Sstevel@tonic-gate PERFORM(_open(path, oflag, mode)) 7510Sstevel@tonic-gate } 7520Sstevel@tonic-gate 7530Sstevel@tonic-gate #if !defined(_LP64) 7540Sstevel@tonic-gate int 7550Sstevel@tonic-gate open64(const char *path, int oflag, ...) 7560Sstevel@tonic-gate { 7570Sstevel@tonic-gate extern int _open64(const char *, int, ...); 7580Sstevel@tonic-gate mode_t mode; 7590Sstevel@tonic-gate int rv; 7600Sstevel@tonic-gate va_list ap; 7610Sstevel@tonic-gate 7620Sstevel@tonic-gate va_start(ap, oflag); 7630Sstevel@tonic-gate mode = va_arg(ap, mode_t); 7640Sstevel@tonic-gate va_end(ap); 7650Sstevel@tonic-gate PERFORM(_open64(path, oflag, mode)) 7660Sstevel@tonic-gate } 7670Sstevel@tonic-gate #endif /* !_LP64 */ 7680Sstevel@tonic-gate 7690Sstevel@tonic-gate int 7700Sstevel@tonic-gate pause(void) 7710Sstevel@tonic-gate { 7720Sstevel@tonic-gate extern int _pause(void); 7730Sstevel@tonic-gate int rv; 7740Sstevel@tonic-gate 7750Sstevel@tonic-gate PERFORM(_pause()) 7760Sstevel@tonic-gate } 7770Sstevel@tonic-gate 7780Sstevel@tonic-gate ssize_t 7790Sstevel@tonic-gate pread(int fildes, void *buf, size_t nbyte, off_t offset) 7800Sstevel@tonic-gate { 7810Sstevel@tonic-gate extern ssize_t _pread(int, void *, size_t, off_t); 7820Sstevel@tonic-gate ssize_t rv; 7830Sstevel@tonic-gate 7840Sstevel@tonic-gate PERFORM(_pread(fildes, buf, nbyte, offset)) 7850Sstevel@tonic-gate } 7860Sstevel@tonic-gate 7870Sstevel@tonic-gate #if !defined(_LP64) 7880Sstevel@tonic-gate ssize_t 7890Sstevel@tonic-gate pread64(int fildes, void *buf, size_t nbyte, off64_t offset) 7900Sstevel@tonic-gate { 7910Sstevel@tonic-gate extern ssize_t _pread64(int, void *, size_t, off64_t); 7920Sstevel@tonic-gate ssize_t rv; 7930Sstevel@tonic-gate 7940Sstevel@tonic-gate PERFORM(_pread64(fildes, buf, nbyte, offset)) 7950Sstevel@tonic-gate } 7960Sstevel@tonic-gate #endif /* !_LP64 */ 7970Sstevel@tonic-gate 7980Sstevel@tonic-gate ssize_t 7990Sstevel@tonic-gate pwrite(int fildes, const void *buf, size_t nbyte, off_t offset) 8000Sstevel@tonic-gate { 8010Sstevel@tonic-gate extern ssize_t _pwrite(int, const void *, size_t, off_t); 8020Sstevel@tonic-gate ssize_t rv; 8030Sstevel@tonic-gate 8040Sstevel@tonic-gate PERFORM(_pwrite(fildes, buf, nbyte, offset)) 8050Sstevel@tonic-gate } 8060Sstevel@tonic-gate 8070Sstevel@tonic-gate #if !defined(_LP64) 8080Sstevel@tonic-gate ssize_t 8090Sstevel@tonic-gate pwrite64(int fildes, const void *buf, size_t nbyte, off64_t offset) 8100Sstevel@tonic-gate { 8110Sstevel@tonic-gate extern ssize_t _pwrite64(int, const void *, size_t, off64_t); 8120Sstevel@tonic-gate ssize_t rv; 8130Sstevel@tonic-gate 8140Sstevel@tonic-gate PERFORM(_pwrite64(fildes, buf, nbyte, offset)) 8150Sstevel@tonic-gate } 8160Sstevel@tonic-gate #endif /* !_LP64 */ 8170Sstevel@tonic-gate 8180Sstevel@tonic-gate ssize_t 8190Sstevel@tonic-gate readv(int fildes, const struct iovec *iov, int iovcnt) 8200Sstevel@tonic-gate { 8210Sstevel@tonic-gate extern ssize_t _readv(int, const struct iovec *, int); 8220Sstevel@tonic-gate ssize_t rv; 8230Sstevel@tonic-gate 8240Sstevel@tonic-gate PERFORM(_readv(fildes, iov, iovcnt)) 8250Sstevel@tonic-gate } 8260Sstevel@tonic-gate 8270Sstevel@tonic-gate int 8280Sstevel@tonic-gate sigpause(int sig) 8290Sstevel@tonic-gate { 8300Sstevel@tonic-gate extern int _sigpause(int); 8310Sstevel@tonic-gate int rv; 8320Sstevel@tonic-gate 8330Sstevel@tonic-gate PERFORM(_sigpause(sig)) 8340Sstevel@tonic-gate } 8350Sstevel@tonic-gate 8360Sstevel@tonic-gate #pragma weak sigsuspend = _sigsuspend 8370Sstevel@tonic-gate int 8380Sstevel@tonic-gate _sigsuspend(const sigset_t *set) 8390Sstevel@tonic-gate { 8400Sstevel@tonic-gate extern int __sigsuspend(const sigset_t *); 8410Sstevel@tonic-gate int rv; 8420Sstevel@tonic-gate 8430Sstevel@tonic-gate PROLOGUE_MASK(set) 8440Sstevel@tonic-gate rv = __sigsuspend(set); 8450Sstevel@tonic-gate EPILOGUE_MASK 8460Sstevel@tonic-gate return (rv); 8470Sstevel@tonic-gate } 8480Sstevel@tonic-gate 8490Sstevel@tonic-gate int 8500Sstevel@tonic-gate _pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout, 8510Sstevel@tonic-gate const sigset_t *sigmask) 8520Sstevel@tonic-gate { 8530Sstevel@tonic-gate extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *, 8540Sstevel@tonic-gate const sigset_t *); 8550Sstevel@tonic-gate int rv; 8560Sstevel@tonic-gate 8570Sstevel@tonic-gate PROLOGUE_MASK(sigmask) 8580Sstevel@tonic-gate rv = __pollsys(fds, nfd, timeout, sigmask); 8590Sstevel@tonic-gate EPILOGUE_MASK 8600Sstevel@tonic-gate return (rv); 8610Sstevel@tonic-gate } 8620Sstevel@tonic-gate 863*2248Sraf #pragma weak sigtimedwait = _sigtimedwait 8640Sstevel@tonic-gate int 865*2248Sraf _sigtimedwait(const sigset_t *set, siginfo_t *infop, const timespec_t *timeout) 8660Sstevel@tonic-gate { 867*2248Sraf extern int __sigtimedwait(const sigset_t *, siginfo_t *, 8680Sstevel@tonic-gate const timespec_t *); 8690Sstevel@tonic-gate siginfo_t info; 8700Sstevel@tonic-gate int sig; 8710Sstevel@tonic-gate 8720Sstevel@tonic-gate PROLOGUE 873*2248Sraf sig = __sigtimedwait(set, &info, timeout); 8740Sstevel@tonic-gate if (sig == SIGCANCEL && 8750Sstevel@tonic-gate (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) { 8760Sstevel@tonic-gate do_sigcancel(); 8770Sstevel@tonic-gate errno = EINTR; 8780Sstevel@tonic-gate sig = -1; 8790Sstevel@tonic-gate } 8800Sstevel@tonic-gate EPILOGUE 8810Sstevel@tonic-gate if (sig != -1 && infop) 8821111Sraf (void) _private_memcpy(infop, &info, sizeof (*infop)); 8830Sstevel@tonic-gate return (sig); 8840Sstevel@tonic-gate } 8850Sstevel@tonic-gate 8860Sstevel@tonic-gate #pragma weak sigwait = _sigwait 8870Sstevel@tonic-gate int 8880Sstevel@tonic-gate _sigwait(sigset_t *set) 8890Sstevel@tonic-gate { 890*2248Sraf return (_sigtimedwait(set, NULL, NULL)); 891*2248Sraf } 892*2248Sraf 893*2248Sraf #pragma weak sigwaitinfo = _sigwaitinfo 894*2248Sraf int 895*2248Sraf _sigwaitinfo(const sigset_t *set, siginfo_t *info) 896*2248Sraf { 897*2248Sraf return (_sigtimedwait(set, info, NULL)); 898*2248Sraf } 899*2248Sraf 900*2248Sraf #pragma weak sigqueue = _sigqueue 901*2248Sraf int 902*2248Sraf _sigqueue(pid_t pid, int signo, const union sigval value) 903*2248Sraf { 904*2248Sraf extern int __sigqueue(pid_t pid, int signo, 905*2248Sraf /* const union sigval */ void *value, int si_code, int block); 906*2248Sraf return (__sigqueue(pid, signo, value.sival_ptr, SI_QUEUE, 0)); 9070Sstevel@tonic-gate } 9080Sstevel@tonic-gate 9090Sstevel@tonic-gate int 9100Sstevel@tonic-gate tcdrain(int fildes) 9110Sstevel@tonic-gate { 9120Sstevel@tonic-gate extern int _tcdrain(int); 9130Sstevel@tonic-gate int rv; 9140Sstevel@tonic-gate 9150Sstevel@tonic-gate PERFORM(_tcdrain(fildes)) 9160Sstevel@tonic-gate } 9170Sstevel@tonic-gate 9180Sstevel@tonic-gate pid_t 9190Sstevel@tonic-gate wait(int *stat_loc) 9200Sstevel@tonic-gate { 9210Sstevel@tonic-gate extern pid_t _wait(int *); 9220Sstevel@tonic-gate pid_t rv; 9230Sstevel@tonic-gate 9240Sstevel@tonic-gate PERFORM(_wait(stat_loc)) 9250Sstevel@tonic-gate } 9260Sstevel@tonic-gate 9270Sstevel@tonic-gate pid_t 9280Sstevel@tonic-gate wait3(int *statusp, int options, struct rusage *rusage) 9290Sstevel@tonic-gate { 9300Sstevel@tonic-gate extern pid_t _wait3(int *, int, struct rusage *); 9310Sstevel@tonic-gate pid_t rv; 9320Sstevel@tonic-gate 9330Sstevel@tonic-gate PERFORM(_wait3(statusp, options, rusage)) 9340Sstevel@tonic-gate } 9350Sstevel@tonic-gate 9360Sstevel@tonic-gate int 9370Sstevel@tonic-gate waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options) 9380Sstevel@tonic-gate { 9390Sstevel@tonic-gate extern int _waitid(idtype_t, id_t, siginfo_t *, int); 9400Sstevel@tonic-gate int rv; 9410Sstevel@tonic-gate 9420Sstevel@tonic-gate PERFORM(_waitid(idtype, id, infop, options)) 9430Sstevel@tonic-gate } 9440Sstevel@tonic-gate 9451219Sraf /* 9461219Sraf * waitpid_cancel() is a libc-private symbol for internal use 9471219Sraf * where cancellation semantics is desired (see system()). 9481219Sraf */ 9491219Sraf #pragma weak waitpid_cancel = waitpid 9500Sstevel@tonic-gate pid_t 9510Sstevel@tonic-gate waitpid(pid_t pid, int *stat_loc, int options) 9520Sstevel@tonic-gate { 9530Sstevel@tonic-gate extern pid_t _waitpid(pid_t, int *, int); 9540Sstevel@tonic-gate pid_t rv; 9550Sstevel@tonic-gate 9560Sstevel@tonic-gate PERFORM(_waitpid(pid, stat_loc, options)) 9570Sstevel@tonic-gate } 9580Sstevel@tonic-gate 9590Sstevel@tonic-gate ssize_t 9600Sstevel@tonic-gate writev(int fildes, const struct iovec *iov, int iovcnt) 9610Sstevel@tonic-gate { 9620Sstevel@tonic-gate extern ssize_t _writev(int, const struct iovec *, int); 9630Sstevel@tonic-gate ssize_t rv; 9640Sstevel@tonic-gate 9650Sstevel@tonic-gate PERFORM(_writev(fildes, iov, iovcnt)) 9660Sstevel@tonic-gate } 967