10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51829Ssudheer * Common Development and Distribution License (the "License"). 61829Ssudheer * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 21769Sraf 220Sstevel@tonic-gate /* 23*9870SRoger.Faulkner@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #include <sys/param.h> 280Sstevel@tonic-gate #include <sys/vmparam.h> 290Sstevel@tonic-gate #include <sys/types.h> 300Sstevel@tonic-gate #include <sys/sysmacros.h> 310Sstevel@tonic-gate #include <sys/systm.h> 320Sstevel@tonic-gate #include <sys/signal.h> 330Sstevel@tonic-gate #include <sys/stack.h> 340Sstevel@tonic-gate #include <sys/cred.h> 350Sstevel@tonic-gate #include <sys/cmn_err.h> 360Sstevel@tonic-gate #include <sys/user.h> 370Sstevel@tonic-gate #include <sys/privregs.h> 380Sstevel@tonic-gate #include <sys/psw.h> 390Sstevel@tonic-gate #include <sys/debug.h> 400Sstevel@tonic-gate #include <sys/errno.h> 410Sstevel@tonic-gate #include <sys/proc.h> 420Sstevel@tonic-gate #include <sys/modctl.h> 430Sstevel@tonic-gate #include <sys/var.h> 440Sstevel@tonic-gate #include <sys/inline.h> 450Sstevel@tonic-gate #include <sys/syscall.h> 460Sstevel@tonic-gate #include <sys/ucontext.h> 470Sstevel@tonic-gate #include <sys/cpuvar.h> 480Sstevel@tonic-gate #include <sys/siginfo.h> 490Sstevel@tonic-gate #include <sys/trap.h> 500Sstevel@tonic-gate #include <sys/vtrace.h> 510Sstevel@tonic-gate #include <sys/sysinfo.h> 520Sstevel@tonic-gate #include <sys/procfs.h> 530Sstevel@tonic-gate #include <c2/audit.h> 540Sstevel@tonic-gate #include <sys/modctl.h> 550Sstevel@tonic-gate #include <sys/aio_impl.h> 560Sstevel@tonic-gate #include <sys/tnf.h> 570Sstevel@tonic-gate #include <sys/tnf_probe.h> 580Sstevel@tonic-gate #include <sys/copyops.h> 590Sstevel@tonic-gate #include <sys/priv.h> 600Sstevel@tonic-gate #include <sys/msacct.h> 610Sstevel@tonic-gate 620Sstevel@tonic-gate int syscalltrace = 0; 630Sstevel@tonic-gate #ifdef SYSCALLTRACE 640Sstevel@tonic-gate static kmutex_t systrace_lock; /* syscall tracing lock */ 650Sstevel@tonic-gate #else 660Sstevel@tonic-gate #define syscalltrace 0 670Sstevel@tonic-gate #endif /* SYSCALLTRACE */ 680Sstevel@tonic-gate 690Sstevel@tonic-gate typedef int64_t (*llfcn_t)(); /* function returning long long */ 700Sstevel@tonic-gate 710Sstevel@tonic-gate int pre_syscall(void); 720Sstevel@tonic-gate void post_syscall(long rval1, long rval2); 730Sstevel@tonic-gate static krwlock_t *lock_syscall(struct sysent *, uint_t); 742086Ssudheer void deferred_singlestep_trap(caddr_t); 750Sstevel@tonic-gate 760Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 770Sstevel@tonic-gate #define LWP_GETSYSENT(lwp) \ 780Sstevel@tonic-gate (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE ? sysent : sysent32) 790Sstevel@tonic-gate #else 800Sstevel@tonic-gate #define LWP_GETSYSENT(lwp) (sysent) 810Sstevel@tonic-gate #endif 820Sstevel@tonic-gate 830Sstevel@tonic-gate /* 840Sstevel@tonic-gate * If watchpoints are active, don't make copying in of 850Sstevel@tonic-gate * system call arguments take a read watchpoint trap. 860Sstevel@tonic-gate */ 870Sstevel@tonic-gate static int 880Sstevel@tonic-gate copyin_args(struct regs *rp, long *ap, uint_t nargs) 890Sstevel@tonic-gate { 900Sstevel@tonic-gate greg_t *sp = 1 + (greg_t *)rp->r_sp; /* skip ret addr */ 910Sstevel@tonic-gate 920Sstevel@tonic-gate ASSERT(nargs <= MAXSYSARGS); 930Sstevel@tonic-gate 940Sstevel@tonic-gate return (copyin_nowatch(sp, ap, nargs * sizeof (*sp))); 950Sstevel@tonic-gate } 960Sstevel@tonic-gate 970Sstevel@tonic-gate #if defined(_SYSCALL32_IMPL) 980Sstevel@tonic-gate static int 990Sstevel@tonic-gate copyin_args32(struct regs *rp, long *ap, uint_t nargs) 1000Sstevel@tonic-gate { 1010Sstevel@tonic-gate greg32_t *sp = 1 + (greg32_t *)rp->r_sp; /* skip ret addr */ 1020Sstevel@tonic-gate uint32_t a32[MAXSYSARGS]; 1030Sstevel@tonic-gate int rc; 1040Sstevel@tonic-gate 1050Sstevel@tonic-gate ASSERT(nargs <= MAXSYSARGS); 1060Sstevel@tonic-gate 1070Sstevel@tonic-gate if ((rc = copyin_nowatch(sp, a32, nargs * sizeof (*sp))) == 0) { 1080Sstevel@tonic-gate uint32_t *a32p = &a32[0]; 1090Sstevel@tonic-gate 1100Sstevel@tonic-gate while (nargs--) 1110Sstevel@tonic-gate *ap++ = (ulong_t)*a32p++; 1120Sstevel@tonic-gate } 1130Sstevel@tonic-gate return (rc); 1140Sstevel@tonic-gate } 1150Sstevel@tonic-gate #define COPYIN_ARGS32 copyin_args32 1160Sstevel@tonic-gate #else 1170Sstevel@tonic-gate #define COPYIN_ARGS32 copyin_args 1180Sstevel@tonic-gate #endif 1190Sstevel@tonic-gate 1200Sstevel@tonic-gate /* 1210Sstevel@tonic-gate * Error handler for system calls where arg copy gets fault. 1220Sstevel@tonic-gate */ 1230Sstevel@tonic-gate static longlong_t 1240Sstevel@tonic-gate syscall_err() 1250Sstevel@tonic-gate { 1260Sstevel@tonic-gate return (0); 1270Sstevel@tonic-gate } 1280Sstevel@tonic-gate 1290Sstevel@tonic-gate /* 1300Sstevel@tonic-gate * Corresponding sysent entry to allow syscall_entry caller 1310Sstevel@tonic-gate * to invoke syscall_err. 1320Sstevel@tonic-gate */ 1330Sstevel@tonic-gate static struct sysent sysent_err = { 1340Sstevel@tonic-gate 0, SE_32RVAL1, NULL, NULL, (llfcn_t)syscall_err 1350Sstevel@tonic-gate }; 1360Sstevel@tonic-gate 1370Sstevel@tonic-gate /* 1380Sstevel@tonic-gate * Called from syscall() when a non-trivial 32-bit system call occurs. 1390Sstevel@tonic-gate * Sets up the args and returns a pointer to the handler. 1400Sstevel@tonic-gate */ 1410Sstevel@tonic-gate struct sysent * 1420Sstevel@tonic-gate syscall_entry(kthread_t *t, long *argp) 1430Sstevel@tonic-gate { 1440Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 1450Sstevel@tonic-gate struct regs *rp = lwptoregs(lwp); 1460Sstevel@tonic-gate unsigned int code; 1470Sstevel@tonic-gate struct sysent *callp; 1480Sstevel@tonic-gate struct sysent *se = LWP_GETSYSENT(lwp); 1490Sstevel@tonic-gate int error = 0; 1500Sstevel@tonic-gate uint_t nargs; 1510Sstevel@tonic-gate 1520Sstevel@tonic-gate ASSERT(t == curthread && curthread->t_schedflag & TS_DONT_SWAP); 1530Sstevel@tonic-gate 1540Sstevel@tonic-gate lwp->lwp_ru.sysc++; 1550Sstevel@tonic-gate lwp->lwp_eosys = NORMALRETURN; /* assume this will be normal */ 1560Sstevel@tonic-gate 1570Sstevel@tonic-gate /* 1580Sstevel@tonic-gate * Set lwp_ap to point to the args, even if none are needed for this 1590Sstevel@tonic-gate * system call. This is for the loadable-syscall case where the 1600Sstevel@tonic-gate * number of args won't be known until the system call is loaded, and 1610Sstevel@tonic-gate * also maintains a non-NULL lwp_ap setup for get_syscall_args(). Note 1620Sstevel@tonic-gate * that lwp_ap MUST be set to a non-NULL value _BEFORE_ t_sysnum is 1630Sstevel@tonic-gate * set to non-zero; otherwise get_syscall_args(), seeing a non-zero 1640Sstevel@tonic-gate * t_sysnum for this thread, will charge ahead and dereference lwp_ap. 1650Sstevel@tonic-gate */ 1660Sstevel@tonic-gate lwp->lwp_ap = argp; /* for get_syscall_args */ 1670Sstevel@tonic-gate 1680Sstevel@tonic-gate code = rp->r_r0; 1690Sstevel@tonic-gate t->t_sysnum = (short)code; 1700Sstevel@tonic-gate callp = code >= NSYSCALL ? &nosys_ent : se + code; 1710Sstevel@tonic-gate 1720Sstevel@tonic-gate if ((t->t_pre_sys | syscalltrace) != 0) { 1730Sstevel@tonic-gate error = pre_syscall(); 1742415Sdmick 1750Sstevel@tonic-gate /* 1762415Sdmick * pre_syscall() has taken care so that lwp_ap is current; 1772415Sdmick * it either points to syscall-entry-saved amd64 regs, 1782415Sdmick * or it points to lwp_arg[], which has been re-copied from 1792415Sdmick * the ia32 ustack, but either way, it's a current copy after 1802415Sdmick * /proc has possibly mucked with the syscall args. 1810Sstevel@tonic-gate */ 1822415Sdmick 1830Sstevel@tonic-gate if (error) 1840Sstevel@tonic-gate return (&sysent_err); /* use dummy handler */ 1850Sstevel@tonic-gate } 1860Sstevel@tonic-gate 1870Sstevel@tonic-gate /* 1882415Sdmick * Fetch the system call arguments to the kernel stack copy used 1892415Sdmick * for syscall handling. 1900Sstevel@tonic-gate * Note: for loadable system calls the number of arguments required 1910Sstevel@tonic-gate * may not be known at this point, and will be zero if the system call 1920Sstevel@tonic-gate * was never loaded. Once the system call has been loaded, the number 1930Sstevel@tonic-gate * of args is not allowed to be changed. 1940Sstevel@tonic-gate */ 1950Sstevel@tonic-gate if ((nargs = (uint_t)callp->sy_narg) != 0 && 1960Sstevel@tonic-gate COPYIN_ARGS32(rp, argp, nargs)) { 1970Sstevel@tonic-gate (void) set_errno(EFAULT); 1980Sstevel@tonic-gate return (&sysent_err); /* use dummy handler */ 1990Sstevel@tonic-gate } 2000Sstevel@tonic-gate 2010Sstevel@tonic-gate return (callp); /* return sysent entry for caller */ 2020Sstevel@tonic-gate } 2030Sstevel@tonic-gate 2040Sstevel@tonic-gate void 2050Sstevel@tonic-gate syscall_exit(kthread_t *t, long rval1, long rval2) 2060Sstevel@tonic-gate { 2070Sstevel@tonic-gate /* 2080Sstevel@tonic-gate * Handle signals and other post-call events if necessary. 2090Sstevel@tonic-gate */ 2100Sstevel@tonic-gate if ((t->t_post_sys_ast | syscalltrace) == 0) { 2110Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 2120Sstevel@tonic-gate struct regs *rp = lwptoregs(lwp); 2130Sstevel@tonic-gate 2140Sstevel@tonic-gate /* 2150Sstevel@tonic-gate * Normal return. 2160Sstevel@tonic-gate * Clear error indication and set return values. 2170Sstevel@tonic-gate */ 2180Sstevel@tonic-gate rp->r_ps &= ~PS_C; /* reset carry bit */ 2190Sstevel@tonic-gate rp->r_r0 = rval1; 2200Sstevel@tonic-gate rp->r_r1 = rval2; 2210Sstevel@tonic-gate lwp->lwp_state = LWP_USER; 2220Sstevel@tonic-gate } else 2230Sstevel@tonic-gate post_syscall(rval1, rval2); 2240Sstevel@tonic-gate t->t_sysnum = 0; /* invalidate args */ 2250Sstevel@tonic-gate } 2260Sstevel@tonic-gate 2270Sstevel@tonic-gate /* 2280Sstevel@tonic-gate * Perform pre-system-call processing, including stopping for tracing, 2290Sstevel@tonic-gate * auditing, etc. 2300Sstevel@tonic-gate * 2310Sstevel@tonic-gate * This routine is called only if the t_pre_sys flag is set. Any condition 2320Sstevel@tonic-gate * requiring pre-syscall handling must set the t_pre_sys flag. If the 2330Sstevel@tonic-gate * condition is persistent, this routine will repost t_pre_sys. 2340Sstevel@tonic-gate */ 2350Sstevel@tonic-gate int 2360Sstevel@tonic-gate pre_syscall() 2370Sstevel@tonic-gate { 2380Sstevel@tonic-gate kthread_t *t = curthread; 2390Sstevel@tonic-gate unsigned code = t->t_sysnum; 2400Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 2410Sstevel@tonic-gate proc_t *p = ttoproc(t); 2420Sstevel@tonic-gate int repost; 2430Sstevel@tonic-gate 2440Sstevel@tonic-gate t->t_pre_sys = repost = 0; /* clear pre-syscall processing flag */ 2450Sstevel@tonic-gate 2460Sstevel@tonic-gate ASSERT(t->t_schedflag & TS_DONT_SWAP); 2470Sstevel@tonic-gate 2480Sstevel@tonic-gate #if defined(DEBUG) 2490Sstevel@tonic-gate /* 2500Sstevel@tonic-gate * On the i386 kernel, lwp_ap points at the piece of the thread 2510Sstevel@tonic-gate * stack that we copy the users arguments into. 2520Sstevel@tonic-gate * 2530Sstevel@tonic-gate * On the amd64 kernel, the syscall arguments in the rdi..r9 2540Sstevel@tonic-gate * registers should be pointed at by lwp_ap. If the args need to 2550Sstevel@tonic-gate * be copied so that those registers can be changed without losing 2560Sstevel@tonic-gate * the ability to get the args for /proc, they can be saved by 2570Sstevel@tonic-gate * save_syscall_args(), and lwp_ap will be restored by post_syscall(). 2580Sstevel@tonic-gate */ 2590Sstevel@tonic-gate if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) { 2600Sstevel@tonic-gate #if defined(_LP64) 2610Sstevel@tonic-gate ASSERT(lwp->lwp_ap == (long *)&lwptoregs(lwp)->r_rdi); 2620Sstevel@tonic-gate } else { 2630Sstevel@tonic-gate #endif 2640Sstevel@tonic-gate ASSERT((caddr_t)lwp->lwp_ap > t->t_stkbase && 2655753Sgww (caddr_t)lwp->lwp_ap < t->t_stk); 2660Sstevel@tonic-gate } 2670Sstevel@tonic-gate #endif /* DEBUG */ 2680Sstevel@tonic-gate 2690Sstevel@tonic-gate /* 2700Sstevel@tonic-gate * Make sure the thread is holding the latest credentials for the 2710Sstevel@tonic-gate * process. The credentials in the process right now apply to this 2720Sstevel@tonic-gate * thread for the entire system call. 2730Sstevel@tonic-gate */ 2740Sstevel@tonic-gate if (t->t_cred != p->p_cred) { 2750Sstevel@tonic-gate cred_t *oldcred = t->t_cred; 2760Sstevel@tonic-gate /* 2770Sstevel@tonic-gate * DTrace accesses t_cred in probe context. t_cred must 2780Sstevel@tonic-gate * always be either NULL, or point to a valid, allocated cred 2790Sstevel@tonic-gate * structure. 2800Sstevel@tonic-gate */ 2810Sstevel@tonic-gate t->t_cred = crgetcred(); 2820Sstevel@tonic-gate crfree(oldcred); 2830Sstevel@tonic-gate } 2840Sstevel@tonic-gate 2850Sstevel@tonic-gate /* 2860Sstevel@tonic-gate * From the proc(4) manual page: 2870Sstevel@tonic-gate * When entry to a system call is being traced, the traced process 2880Sstevel@tonic-gate * stops after having begun the call to the system but before the 2890Sstevel@tonic-gate * system call arguments have been fetched from the process. 2900Sstevel@tonic-gate */ 2910Sstevel@tonic-gate if (PTOU(p)->u_systrap) { 2920Sstevel@tonic-gate if (prismember(&PTOU(p)->u_entrymask, code)) { 2930Sstevel@tonic-gate mutex_enter(&p->p_lock); 2940Sstevel@tonic-gate /* 2950Sstevel@tonic-gate * Recheck stop condition, now that lock is held. 2960Sstevel@tonic-gate */ 2970Sstevel@tonic-gate if (PTOU(p)->u_systrap && 2980Sstevel@tonic-gate prismember(&PTOU(p)->u_entrymask, code)) { 2990Sstevel@tonic-gate stop(PR_SYSENTRY, code); 3002415Sdmick 3010Sstevel@tonic-gate /* 3022415Sdmick * /proc may have modified syscall args, 3032415Sdmick * either in regs for amd64 or on ustack 3042415Sdmick * for ia32. Either way, arrange to 3052415Sdmick * copy them again, both for the syscall 3062415Sdmick * handler and for other consumers in 3072415Sdmick * post_syscall (like audit). Here, we 3082415Sdmick * only do amd64, and just set lwp_ap 3092415Sdmick * back to the kernel-entry stack copy; 3102415Sdmick * the syscall ml code redoes 3112415Sdmick * move-from-regs to set up for the 3122415Sdmick * syscall handler after we return. For 3132415Sdmick * ia32, save_syscall_args() below makes 3142415Sdmick * an lwp_ap-accessible copy. 3150Sstevel@tonic-gate */ 3162415Sdmick #if defined(_LP64) 3170Sstevel@tonic-gate if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) { 3180Sstevel@tonic-gate lwp->lwp_argsaved = 0; 3190Sstevel@tonic-gate lwp->lwp_ap = 3200Sstevel@tonic-gate (long *)&lwptoregs(lwp)->r_rdi; 3210Sstevel@tonic-gate } 3220Sstevel@tonic-gate #endif 3230Sstevel@tonic-gate } 3240Sstevel@tonic-gate mutex_exit(&p->p_lock); 3250Sstevel@tonic-gate } 3260Sstevel@tonic-gate repost = 1; 3270Sstevel@tonic-gate } 3280Sstevel@tonic-gate 3292415Sdmick /* 3302415Sdmick * ia32 kernel, or ia32 proc on amd64 kernel: keep args in 3312415Sdmick * lwp_arg for post-syscall processing, regardless of whether 3322415Sdmick * they might have been changed in /proc above. 3332415Sdmick */ 3342415Sdmick #if defined(_LP64) 3352415Sdmick if (lwp_getdatamodel(lwp) != DATAMODEL_NATIVE) 3362415Sdmick #endif 3372415Sdmick (void) save_syscall_args(); 3382415Sdmick 3390Sstevel@tonic-gate if (lwp->lwp_sysabort) { 3400Sstevel@tonic-gate /* 3410Sstevel@tonic-gate * lwp_sysabort may have been set via /proc while the process 3420Sstevel@tonic-gate * was stopped on PR_SYSENTRY. If so, abort the system call. 3430Sstevel@tonic-gate * Override any error from the copyin() of the arguments. 3440Sstevel@tonic-gate */ 3450Sstevel@tonic-gate lwp->lwp_sysabort = 0; 3460Sstevel@tonic-gate (void) set_errno(EINTR); /* forces post_sys */ 3470Sstevel@tonic-gate t->t_pre_sys = 1; /* repost anyway */ 3480Sstevel@tonic-gate return (1); /* don't do system call, return EINTR */ 3490Sstevel@tonic-gate } 3500Sstevel@tonic-gate 3510Sstevel@tonic-gate if (audit_active) { /* begin auditing for this syscall */ 3520Sstevel@tonic-gate int error; 3530Sstevel@tonic-gate if (error = audit_start(T_SYSCALL, code, 0, lwp)) { 3540Sstevel@tonic-gate t->t_pre_sys = 1; /* repost anyway */ 3550Sstevel@tonic-gate (void) set_errno(error); 3560Sstevel@tonic-gate return (1); 3570Sstevel@tonic-gate } 3580Sstevel@tonic-gate repost = 1; 3590Sstevel@tonic-gate } 3600Sstevel@tonic-gate 3610Sstevel@tonic-gate #ifndef NPROBE 3620Sstevel@tonic-gate /* Kernel probe */ 3630Sstevel@tonic-gate if (tnf_tracing_active) { 3640Sstevel@tonic-gate TNF_PROBE_1(syscall_start, "syscall thread", /* CSTYLED */, 3650Sstevel@tonic-gate tnf_sysnum, sysnum, t->t_sysnum); 3660Sstevel@tonic-gate t->t_post_sys = 1; /* make sure post_syscall runs */ 3670Sstevel@tonic-gate repost = 1; 3680Sstevel@tonic-gate } 3690Sstevel@tonic-gate #endif /* NPROBE */ 3700Sstevel@tonic-gate 3710Sstevel@tonic-gate #ifdef SYSCALLTRACE 3720Sstevel@tonic-gate if (syscalltrace) { 3730Sstevel@tonic-gate int i; 3740Sstevel@tonic-gate long *ap; 3750Sstevel@tonic-gate char *cp; 3760Sstevel@tonic-gate char *sysname; 3770Sstevel@tonic-gate struct sysent *callp; 3780Sstevel@tonic-gate 3790Sstevel@tonic-gate if (code >= NSYSCALL) 3800Sstevel@tonic-gate callp = &nosys_ent; /* nosys has no args */ 3810Sstevel@tonic-gate else 3820Sstevel@tonic-gate callp = LWP_GETSYSENT(lwp) + code; 3830Sstevel@tonic-gate (void) save_syscall_args(); 3840Sstevel@tonic-gate mutex_enter(&systrace_lock); 3850Sstevel@tonic-gate printf("%d: ", p->p_pid); 3860Sstevel@tonic-gate if (code >= NSYSCALL) 3870Sstevel@tonic-gate printf("0x%x", code); 3880Sstevel@tonic-gate else { 3890Sstevel@tonic-gate sysname = mod_getsysname(code); 3900Sstevel@tonic-gate printf("%s[0x%x/0x%p]", sysname == NULL ? "NULL" : 3910Sstevel@tonic-gate sysname, code, callp->sy_callc); 3920Sstevel@tonic-gate } 3930Sstevel@tonic-gate cp = "("; 3940Sstevel@tonic-gate for (i = 0, ap = lwp->lwp_ap; i < callp->sy_narg; i++, ap++) { 3950Sstevel@tonic-gate printf("%s%lx", cp, *ap); 3960Sstevel@tonic-gate cp = ", "; 3970Sstevel@tonic-gate } 3980Sstevel@tonic-gate if (i) 3990Sstevel@tonic-gate printf(")"); 4000Sstevel@tonic-gate printf(" %s id=0x%p\n", PTOU(p)->u_comm, curthread); 4010Sstevel@tonic-gate mutex_exit(&systrace_lock); 4020Sstevel@tonic-gate } 4030Sstevel@tonic-gate #endif /* SYSCALLTRACE */ 4040Sstevel@tonic-gate 4050Sstevel@tonic-gate /* 4060Sstevel@tonic-gate * If there was a continuing reason for pre-syscall processing, 4070Sstevel@tonic-gate * set the t_pre_sys flag for the next system call. 4080Sstevel@tonic-gate */ 4090Sstevel@tonic-gate if (repost) 4100Sstevel@tonic-gate t->t_pre_sys = 1; 4110Sstevel@tonic-gate lwp->lwp_error = 0; /* for old drivers */ 4120Sstevel@tonic-gate lwp->lwp_badpriv = PRIV_NONE; 4130Sstevel@tonic-gate return (0); 4140Sstevel@tonic-gate } 4150Sstevel@tonic-gate 4160Sstevel@tonic-gate 4170Sstevel@tonic-gate /* 4180Sstevel@tonic-gate * Post-syscall processing. Perform abnormal system call completion 4190Sstevel@tonic-gate * actions such as /proc tracing, profiling, signals, preemption, etc. 4200Sstevel@tonic-gate * 4210Sstevel@tonic-gate * This routine is called only if t_post_sys, t_sig_check, or t_astflag is set. 4220Sstevel@tonic-gate * Any condition requiring pre-syscall handling must set one of these. 4230Sstevel@tonic-gate * If the condition is persistent, this routine will repost t_post_sys. 4240Sstevel@tonic-gate */ 4250Sstevel@tonic-gate void 4260Sstevel@tonic-gate post_syscall(long rval1, long rval2) 4270Sstevel@tonic-gate { 4280Sstevel@tonic-gate kthread_t *t = curthread; 4290Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 4300Sstevel@tonic-gate proc_t *p = ttoproc(t); 4310Sstevel@tonic-gate struct regs *rp = lwptoregs(lwp); 4320Sstevel@tonic-gate uint_t error; 4330Sstevel@tonic-gate uint_t code = t->t_sysnum; 4340Sstevel@tonic-gate int repost = 0; 4350Sstevel@tonic-gate int proc_stop = 0; /* non-zero if stopping */ 4360Sstevel@tonic-gate int sigprof = 0; /* non-zero if sending SIGPROF */ 4370Sstevel@tonic-gate 4380Sstevel@tonic-gate t->t_post_sys = 0; 4390Sstevel@tonic-gate 4400Sstevel@tonic-gate error = lwp->lwp_errno; 4410Sstevel@tonic-gate 4420Sstevel@tonic-gate /* 4430Sstevel@tonic-gate * Code can be zero if this is a new LWP returning after a forkall(), 4440Sstevel@tonic-gate * other than the one which matches the one in the parent which called 4450Sstevel@tonic-gate * forkall(). In these LWPs, skip most of post-syscall activity. 4460Sstevel@tonic-gate */ 4470Sstevel@tonic-gate if (code == 0) 4480Sstevel@tonic-gate goto sig_check; 4491829Ssudheer /* 4501829Ssudheer * If the trace flag is set, mark the lwp to take a single-step trap 4511829Ssudheer * on return to user level (below). The x86 lcall interface and 4521829Ssudheer * sysenter has already done this, and turned off the flag, but 4531829Ssudheer * amd64 syscall interface has not. 4541829Ssudheer */ 4551829Ssudheer if (rp->r_ps & PS_T) { 4561829Ssudheer lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING; 4571829Ssudheer rp->r_ps &= ~PS_T; 4582086Ssudheer aston(curthread); 4591829Ssudheer } 4600Sstevel@tonic-gate if (audit_active) { /* put out audit record for this syscall */ 4610Sstevel@tonic-gate rval_t rval; 4620Sstevel@tonic-gate 4630Sstevel@tonic-gate /* XX64 -- truncation of 64-bit return values? */ 4640Sstevel@tonic-gate rval.r_val1 = (int)rval1; 4650Sstevel@tonic-gate rval.r_val2 = (int)rval2; 4660Sstevel@tonic-gate audit_finish(T_SYSCALL, code, error, &rval); 4670Sstevel@tonic-gate repost = 1; 4680Sstevel@tonic-gate } 4690Sstevel@tonic-gate 4700Sstevel@tonic-gate if (curthread->t_pdmsg != NULL) { 4710Sstevel@tonic-gate char *m = curthread->t_pdmsg; 4720Sstevel@tonic-gate 4730Sstevel@tonic-gate uprintf("%s", m); 4740Sstevel@tonic-gate kmem_free(m, strlen(m) + 1); 4750Sstevel@tonic-gate curthread->t_pdmsg = NULL; 4760Sstevel@tonic-gate } 4770Sstevel@tonic-gate 4780Sstevel@tonic-gate /* 4790Sstevel@tonic-gate * If we're going to stop for /proc tracing, set the flag and 4800Sstevel@tonic-gate * save the arguments so that the return values don't smash them. 4810Sstevel@tonic-gate */ 4820Sstevel@tonic-gate if (PTOU(p)->u_systrap) { 4830Sstevel@tonic-gate if (prismember(&PTOU(p)->u_exitmask, code)) { 4840Sstevel@tonic-gate if (lwp_getdatamodel(lwp) == DATAMODEL_LP64) 4850Sstevel@tonic-gate (void) save_syscall_args(); 4860Sstevel@tonic-gate proc_stop = 1; 4870Sstevel@tonic-gate } 4880Sstevel@tonic-gate repost = 1; 4890Sstevel@tonic-gate } 4900Sstevel@tonic-gate 4910Sstevel@tonic-gate /* 4920Sstevel@tonic-gate * Similarly check to see if SIGPROF might be sent. 4930Sstevel@tonic-gate */ 4940Sstevel@tonic-gate if (curthread->t_rprof != NULL && 4950Sstevel@tonic-gate curthread->t_rprof->rp_anystate != 0) { 4960Sstevel@tonic-gate if (lwp_getdatamodel(lwp) == DATAMODEL_LP64) 4970Sstevel@tonic-gate (void) save_syscall_args(); 4980Sstevel@tonic-gate sigprof = 1; 4990Sstevel@tonic-gate } 5000Sstevel@tonic-gate 5010Sstevel@tonic-gate if (lwp->lwp_eosys == NORMALRETURN) { 5020Sstevel@tonic-gate if (error == 0) { 5030Sstevel@tonic-gate #ifdef SYSCALLTRACE 5040Sstevel@tonic-gate if (syscalltrace) { 5050Sstevel@tonic-gate mutex_enter(&systrace_lock); 5060Sstevel@tonic-gate printf( 5070Sstevel@tonic-gate "%d: r_val1=0x%lx, r_val2=0x%lx, id 0x%p\n", 5080Sstevel@tonic-gate p->p_pid, rval1, rval2, curthread); 5090Sstevel@tonic-gate mutex_exit(&systrace_lock); 5100Sstevel@tonic-gate } 5110Sstevel@tonic-gate #endif /* SYSCALLTRACE */ 5120Sstevel@tonic-gate rp->r_ps &= ~PS_C; 5130Sstevel@tonic-gate rp->r_r0 = rval1; 5140Sstevel@tonic-gate rp->r_r1 = rval2; 5150Sstevel@tonic-gate } else { 5160Sstevel@tonic-gate int sig; 5170Sstevel@tonic-gate #ifdef SYSCALLTRACE 5180Sstevel@tonic-gate if (syscalltrace) { 5190Sstevel@tonic-gate mutex_enter(&systrace_lock); 5200Sstevel@tonic-gate printf("%d: error=%d, id 0x%p\n", 5210Sstevel@tonic-gate p->p_pid, error, curthread); 5220Sstevel@tonic-gate mutex_exit(&systrace_lock); 5230Sstevel@tonic-gate } 5240Sstevel@tonic-gate #endif /* SYSCALLTRACE */ 5250Sstevel@tonic-gate if (error == EINTR && t->t_activefd.a_stale) 5260Sstevel@tonic-gate error = EBADF; 5270Sstevel@tonic-gate if (error == EINTR && 5280Sstevel@tonic-gate (sig = lwp->lwp_cursig) != 0 && 5290Sstevel@tonic-gate sigismember(&PTOU(p)->u_sigrestart, sig) && 5300Sstevel@tonic-gate PTOU(p)->u_signal[sig - 1] != SIG_DFL && 5310Sstevel@tonic-gate PTOU(p)->u_signal[sig - 1] != SIG_IGN) 5320Sstevel@tonic-gate error = ERESTART; 5330Sstevel@tonic-gate rp->r_r0 = error; 5340Sstevel@tonic-gate rp->r_ps |= PS_C; 5350Sstevel@tonic-gate } 5360Sstevel@tonic-gate } 5370Sstevel@tonic-gate 5380Sstevel@tonic-gate /* 5390Sstevel@tonic-gate * From the proc(4) manual page: 5400Sstevel@tonic-gate * When exit from a system call is being traced, the traced process 5410Sstevel@tonic-gate * stops on completion of the system call just prior to checking for 5420Sstevel@tonic-gate * signals and returning to user level. At this point all return 5430Sstevel@tonic-gate * values have been stored into the traced process's saved registers. 5440Sstevel@tonic-gate */ 5450Sstevel@tonic-gate if (proc_stop) { 5460Sstevel@tonic-gate mutex_enter(&p->p_lock); 5470Sstevel@tonic-gate if (PTOU(p)->u_systrap && 5480Sstevel@tonic-gate prismember(&PTOU(p)->u_exitmask, code)) 5490Sstevel@tonic-gate stop(PR_SYSEXIT, code); 5500Sstevel@tonic-gate mutex_exit(&p->p_lock); 5510Sstevel@tonic-gate } 5520Sstevel@tonic-gate 5530Sstevel@tonic-gate /* 5540Sstevel@tonic-gate * If we are the parent returning from a successful 5550Sstevel@tonic-gate * vfork, wait for the child to exec or exit. 5560Sstevel@tonic-gate * This code must be here and not in the bowels of the system 5570Sstevel@tonic-gate * so that /proc can intercept exit from vfork in a timely way. 5580Sstevel@tonic-gate */ 5593828Sraf if (t->t_flag & T_VFPARENT) { 5603235Sraf ASSERT(code == SYS_vfork || code == SYS_forksys); 5613235Sraf ASSERT(rp->r_r1 == 0 && error == 0); 5620Sstevel@tonic-gate vfwait((pid_t)rval1); 5633828Sraf t->t_flag &= ~T_VFPARENT; 5643235Sraf } 5650Sstevel@tonic-gate 5660Sstevel@tonic-gate /* 5670Sstevel@tonic-gate * If profiling is active, bill the current PC in user-land 5680Sstevel@tonic-gate * and keep reposting until profiling is disabled. 5690Sstevel@tonic-gate */ 5700Sstevel@tonic-gate if (p->p_prof.pr_scale) { 5710Sstevel@tonic-gate if (lwp->lwp_oweupc) 5720Sstevel@tonic-gate profil_tick(rp->r_pc); 5730Sstevel@tonic-gate repost = 1; 5740Sstevel@tonic-gate } 5750Sstevel@tonic-gate 5760Sstevel@tonic-gate sig_check: 5770Sstevel@tonic-gate /* 5780Sstevel@tonic-gate * Reset flag for next time. 5790Sstevel@tonic-gate * We must do this after stopping on PR_SYSEXIT 5800Sstevel@tonic-gate * because /proc uses the information in lwp_eosys. 5810Sstevel@tonic-gate */ 5820Sstevel@tonic-gate lwp->lwp_eosys = NORMALRETURN; 5830Sstevel@tonic-gate clear_stale_fd(); 584769Sraf t->t_flag &= ~T_FORKALL; 5850Sstevel@tonic-gate 5860Sstevel@tonic-gate if (t->t_astflag | t->t_sig_check) { 5870Sstevel@tonic-gate /* 5880Sstevel@tonic-gate * Turn off the AST flag before checking all the conditions that 5890Sstevel@tonic-gate * may have caused an AST. This flag is on whenever a signal or 5900Sstevel@tonic-gate * unusual condition should be handled after the next trap or 5910Sstevel@tonic-gate * syscall. 5920Sstevel@tonic-gate */ 5930Sstevel@tonic-gate astoff(t); 5942086Ssudheer /* 5952086Ssudheer * If a single-step trap occurred on a syscall (see trap()) 5962086Ssudheer * recognize it now. Do this before checking for signals 5972086Ssudheer * because deferred_singlestep_trap() may generate a SIGTRAP to 5982086Ssudheer * the LWP or may otherwise mark the LWP to call issig(FORREAL). 5992086Ssudheer */ 6002086Ssudheer if (lwp->lwp_pcb.pcb_flags & DEBUG_PENDING) 6012086Ssudheer deferred_singlestep_trap((caddr_t)rp->r_pc); 6022086Ssudheer 6030Sstevel@tonic-gate t->t_sig_check = 0; 6040Sstevel@tonic-gate 6051972Sdv142724 /* 6061972Sdv142724 * The following check is legal for the following reasons: 6071972Sdv142724 * 1) The thread we are checking, is ourselves, so there is 6081972Sdv142724 * no way the proc can go away. 6091972Sdv142724 * 2) The only time we need to be protected by the 6101972Sdv142724 * lock is if the binding is changed. 6111972Sdv142724 * 6121972Sdv142724 * Note we will still take the lock and check the binding 6131972Sdv142724 * if the condition was true without the lock held. This 6141972Sdv142724 * prevents lock contention among threads owned by the 6151972Sdv142724 * same proc. 6161972Sdv142724 */ 6171972Sdv142724 6180Sstevel@tonic-gate if (curthread->t_proc_flag & TP_CHANGEBIND) { 6191972Sdv142724 mutex_enter(&p->p_lock); 6201972Sdv142724 if (curthread->t_proc_flag & TP_CHANGEBIND) { 6211972Sdv142724 timer_lwpbind(); 6221972Sdv142724 curthread->t_proc_flag &= ~TP_CHANGEBIND; 6231972Sdv142724 } 6241972Sdv142724 mutex_exit(&p->p_lock); 6250Sstevel@tonic-gate } 6260Sstevel@tonic-gate 6270Sstevel@tonic-gate /* 6280Sstevel@tonic-gate * for kaio requests on the special kaio poll queue, 6290Sstevel@tonic-gate * copyout their results to user memory. 6300Sstevel@tonic-gate */ 6310Sstevel@tonic-gate if (p->p_aio) 6320Sstevel@tonic-gate aio_cleanup(0); 6330Sstevel@tonic-gate /* 6340Sstevel@tonic-gate * If this LWP was asked to hold, call holdlwp(), which will 6350Sstevel@tonic-gate * stop. holdlwps() sets this up and calls pokelwps() which 6360Sstevel@tonic-gate * sets the AST flag. 6370Sstevel@tonic-gate * 6380Sstevel@tonic-gate * Also check TP_EXITLWP, since this is used by fresh new LWPs 6390Sstevel@tonic-gate * through lwp_rtt(). That flag is set if the lwp_create(2) 6400Sstevel@tonic-gate * syscall failed after creating the LWP. 6410Sstevel@tonic-gate */ 6420Sstevel@tonic-gate if (ISHOLD(p) || (t->t_proc_flag & TP_EXITLWP)) 6430Sstevel@tonic-gate holdlwp(); 6440Sstevel@tonic-gate 6450Sstevel@tonic-gate /* 6460Sstevel@tonic-gate * All code that sets signals and makes ISSIG_PENDING 6470Sstevel@tonic-gate * evaluate true must set t_sig_check afterwards. 6480Sstevel@tonic-gate */ 6490Sstevel@tonic-gate if (ISSIG_PENDING(t, lwp, p)) { 6500Sstevel@tonic-gate if (issig(FORREAL)) 6510Sstevel@tonic-gate psig(); 6520Sstevel@tonic-gate t->t_sig_check = 1; /* recheck next time */ 6530Sstevel@tonic-gate } 6540Sstevel@tonic-gate 6550Sstevel@tonic-gate if (sigprof) { 656*9870SRoger.Faulkner@Sun.COM int nargs = (code > 0 && code < NSYSCALL)? 657*9870SRoger.Faulkner@Sun.COM LWP_GETSYSENT(lwp)[code].sy_narg : 0; 658*9870SRoger.Faulkner@Sun.COM realsigprof(code, nargs, error); 6590Sstevel@tonic-gate t->t_sig_check = 1; /* recheck next time */ 6600Sstevel@tonic-gate } 6610Sstevel@tonic-gate 6620Sstevel@tonic-gate /* 6630Sstevel@tonic-gate * If a performance counter overflow interrupt was 6640Sstevel@tonic-gate * delivered *during* the syscall, then re-enable the 6650Sstevel@tonic-gate * AST so that we take a trip through trap() to cause 6660Sstevel@tonic-gate * the SIGEMT to be delivered. 6670Sstevel@tonic-gate */ 6680Sstevel@tonic-gate if (lwp->lwp_pcb.pcb_flags & CPC_OVERFLOW) 6690Sstevel@tonic-gate aston(t); 6701468Smarx 6711468Smarx /* 6721468Smarx * /proc can't enable/disable the trace bit itself 6731468Smarx * because that could race with the call gate used by 6741468Smarx * system calls via "lcall". If that happened, an 6751468Smarx * invalid EFLAGS would result. prstep()/prnostep() 6761468Smarx * therefore schedule an AST for the purpose. 6771468Smarx */ 6781468Smarx if (lwp->lwp_pcb.pcb_flags & REQUEST_STEP) { 6791468Smarx lwp->lwp_pcb.pcb_flags &= ~REQUEST_STEP; 6801468Smarx rp->r_ps |= PS_T; 6811468Smarx } 6821468Smarx if (lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP) { 6831468Smarx lwp->lwp_pcb.pcb_flags &= ~REQUEST_NOSTEP; 6841468Smarx rp->r_ps &= ~PS_T; 6851468Smarx } 6860Sstevel@tonic-gate } 6870Sstevel@tonic-gate 6880Sstevel@tonic-gate lwp->lwp_errno = 0; /* clear error for next time */ 6890Sstevel@tonic-gate 6900Sstevel@tonic-gate #ifndef NPROBE 6910Sstevel@tonic-gate /* Kernel probe */ 6920Sstevel@tonic-gate if (tnf_tracing_active) { 6930Sstevel@tonic-gate TNF_PROBE_3(syscall_end, "syscall thread", /* CSTYLED */, 6945753Sgww tnf_long, rval1, rval1, 6955753Sgww tnf_long, rval2, rval2, 6965753Sgww tnf_long, errno, (long)error); 6970Sstevel@tonic-gate repost = 1; 6980Sstevel@tonic-gate } 6990Sstevel@tonic-gate #endif /* NPROBE */ 7000Sstevel@tonic-gate 7010Sstevel@tonic-gate /* 7020Sstevel@tonic-gate * Set state to LWP_USER here so preempt won't give us a kernel 7030Sstevel@tonic-gate * priority if it occurs after this point. Call CL_TRAPRET() to 7040Sstevel@tonic-gate * restore the user-level priority. 7050Sstevel@tonic-gate * 7060Sstevel@tonic-gate * It is important that no locks (other than spinlocks) be entered 7070Sstevel@tonic-gate * after this point before returning to user mode (unless lwp_state 7080Sstevel@tonic-gate * is set back to LWP_SYS). 7090Sstevel@tonic-gate * 7100Sstevel@tonic-gate * XXX Sampled times past this point are charged to the user. 7110Sstevel@tonic-gate */ 7120Sstevel@tonic-gate lwp->lwp_state = LWP_USER; 7130Sstevel@tonic-gate 7140Sstevel@tonic-gate if (t->t_trapret) { 7150Sstevel@tonic-gate t->t_trapret = 0; 7160Sstevel@tonic-gate thread_lock(t); 7170Sstevel@tonic-gate CL_TRAPRET(t); 7180Sstevel@tonic-gate thread_unlock(t); 7190Sstevel@tonic-gate } 7203792Sakolb if (CPU->cpu_runrun || t->t_schedflag & TS_ANYWAITQ) 7210Sstevel@tonic-gate preempt(); 7220Sstevel@tonic-gate 7230Sstevel@tonic-gate lwp->lwp_errno = 0; /* clear error for next time */ 7240Sstevel@tonic-gate 7250Sstevel@tonic-gate /* 7260Sstevel@tonic-gate * The thread lock must be held in order to clear sysnum and reset 7270Sstevel@tonic-gate * lwp_ap atomically with respect to other threads in the system that 7280Sstevel@tonic-gate * may be looking at the args via lwp_ap from get_syscall_args(). 7290Sstevel@tonic-gate */ 7300Sstevel@tonic-gate 7310Sstevel@tonic-gate thread_lock(t); 7320Sstevel@tonic-gate t->t_sysnum = 0; /* no longer in a system call */ 7330Sstevel@tonic-gate 7340Sstevel@tonic-gate if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) { 7350Sstevel@tonic-gate #if defined(_LP64) 7360Sstevel@tonic-gate /* 7370Sstevel@tonic-gate * In case the args were copied to the lwp, reset the 7380Sstevel@tonic-gate * pointer so the next syscall will have the right 7390Sstevel@tonic-gate * lwp_ap pointer. 7400Sstevel@tonic-gate */ 7410Sstevel@tonic-gate lwp->lwp_ap = (long *)&rp->r_rdi; 7420Sstevel@tonic-gate } else { 7430Sstevel@tonic-gate #endif 7440Sstevel@tonic-gate lwp->lwp_ap = NULL; /* reset on every syscall entry */ 7450Sstevel@tonic-gate } 7460Sstevel@tonic-gate thread_unlock(t); 7470Sstevel@tonic-gate 7480Sstevel@tonic-gate lwp->lwp_argsaved = 0; 7490Sstevel@tonic-gate 7500Sstevel@tonic-gate /* 7510Sstevel@tonic-gate * If there was a continuing reason for post-syscall processing, 7520Sstevel@tonic-gate * set the t_post_sys flag for the next system call. 7530Sstevel@tonic-gate */ 7540Sstevel@tonic-gate if (repost) 7550Sstevel@tonic-gate t->t_post_sys = 1; 7560Sstevel@tonic-gate 7570Sstevel@tonic-gate /* 7580Sstevel@tonic-gate * If there is a ustack registered for this lwp, and the stack rlimit 7590Sstevel@tonic-gate * has been altered, read in the ustack. If the saved stack rlimit 7600Sstevel@tonic-gate * matches the bounds of the ustack, update the ustack to reflect 7610Sstevel@tonic-gate * the new rlimit. If the new stack rlimit is RLIM_INFINITY, disable 7620Sstevel@tonic-gate * stack checking by setting the size to 0. 7630Sstevel@tonic-gate */ 7640Sstevel@tonic-gate if (lwp->lwp_ustack != 0 && lwp->lwp_old_stk_ctl != 0) { 7650Sstevel@tonic-gate rlim64_t new_size; 7660Sstevel@tonic-gate caddr_t top; 7670Sstevel@tonic-gate stack_t stk; 7680Sstevel@tonic-gate struct rlimit64 rl; 7690Sstevel@tonic-gate 7700Sstevel@tonic-gate mutex_enter(&p->p_lock); 7710Sstevel@tonic-gate new_size = p->p_stk_ctl; 7720Sstevel@tonic-gate top = p->p_usrstack; 7730Sstevel@tonic-gate (void) rctl_rlimit_get(rctlproc_legacy[RLIMIT_STACK], p, &rl); 7740Sstevel@tonic-gate mutex_exit(&p->p_lock); 7750Sstevel@tonic-gate 7760Sstevel@tonic-gate if (rl.rlim_cur == RLIM64_INFINITY) 7770Sstevel@tonic-gate new_size = 0; 7780Sstevel@tonic-gate 7790Sstevel@tonic-gate if (copyin((stack_t *)lwp->lwp_ustack, &stk, 7800Sstevel@tonic-gate sizeof (stack_t)) == 0 && 7810Sstevel@tonic-gate (stk.ss_size == lwp->lwp_old_stk_ctl || 7825753Sgww stk.ss_size == 0) && 7830Sstevel@tonic-gate stk.ss_sp == top - stk.ss_size) { 7840Sstevel@tonic-gate stk.ss_sp = (void *)((uintptr_t)stk.ss_sp + 7850Sstevel@tonic-gate stk.ss_size - (uintptr_t)new_size); 7860Sstevel@tonic-gate stk.ss_size = new_size; 7870Sstevel@tonic-gate 7880Sstevel@tonic-gate (void) copyout(&stk, (stack_t *)lwp->lwp_ustack, 7890Sstevel@tonic-gate sizeof (stack_t)); 7900Sstevel@tonic-gate } 7910Sstevel@tonic-gate 7920Sstevel@tonic-gate lwp->lwp_old_stk_ctl = 0; 7930Sstevel@tonic-gate } 7940Sstevel@tonic-gate } 7950Sstevel@tonic-gate 7960Sstevel@tonic-gate /* 7970Sstevel@tonic-gate * Called from post_syscall() when a deferred singlestep is to be taken. 7980Sstevel@tonic-gate */ 7992086Ssudheer void 8000Sstevel@tonic-gate deferred_singlestep_trap(caddr_t pc) 8010Sstevel@tonic-gate { 8020Sstevel@tonic-gate proc_t *p = ttoproc(curthread); 8030Sstevel@tonic-gate klwp_t *lwp = ttolwp(curthread); 8040Sstevel@tonic-gate pcb_t *pcb = &lwp->lwp_pcb; 8050Sstevel@tonic-gate uint_t fault = 0; 8060Sstevel@tonic-gate k_siginfo_t siginfo; 8070Sstevel@tonic-gate 8080Sstevel@tonic-gate bzero(&siginfo, sizeof (siginfo)); 8090Sstevel@tonic-gate 8100Sstevel@tonic-gate /* 8110Sstevel@tonic-gate * If both NORMAL_STEP and WATCH_STEP are in 8122712Snn35248 * effect, give precedence to WATCH_STEP. 8130Sstevel@tonic-gate * If neither is set, user must have set the 8140Sstevel@tonic-gate * PS_T bit in %efl; treat this as NORMAL_STEP. 8150Sstevel@tonic-gate */ 8162712Snn35248 if ((fault = undo_watch_step(&siginfo)) == 0 && 8172712Snn35248 ((pcb->pcb_flags & NORMAL_STEP) || 8182712Snn35248 !(pcb->pcb_flags & WATCH_STEP))) { 8190Sstevel@tonic-gate siginfo.si_signo = SIGTRAP; 8200Sstevel@tonic-gate siginfo.si_code = TRAP_TRACE; 8210Sstevel@tonic-gate siginfo.si_addr = pc; 8220Sstevel@tonic-gate fault = FLTTRACE; 8230Sstevel@tonic-gate } 8240Sstevel@tonic-gate pcb->pcb_flags &= ~(DEBUG_PENDING|NORMAL_STEP|WATCH_STEP); 8250Sstevel@tonic-gate 8260Sstevel@tonic-gate if (fault) { 8270Sstevel@tonic-gate /* 8280Sstevel@tonic-gate * Remember the fault and fault adddress 8290Sstevel@tonic-gate * for real-time (SIGPROF) profiling. 8300Sstevel@tonic-gate */ 8310Sstevel@tonic-gate lwp->lwp_lastfault = fault; 8320Sstevel@tonic-gate lwp->lwp_lastfaddr = siginfo.si_addr; 8330Sstevel@tonic-gate /* 8340Sstevel@tonic-gate * If a debugger has declared this fault to be an 8350Sstevel@tonic-gate * event of interest, stop the lwp. Otherwise just 8360Sstevel@tonic-gate * deliver the associated signal. 8370Sstevel@tonic-gate */ 8380Sstevel@tonic-gate if (prismember(&p->p_fltmask, fault) && 8390Sstevel@tonic-gate stop_on_fault(fault, &siginfo) == 0) 8400Sstevel@tonic-gate siginfo.si_signo = 0; 8410Sstevel@tonic-gate } 8420Sstevel@tonic-gate 8430Sstevel@tonic-gate if (siginfo.si_signo) 8440Sstevel@tonic-gate trapsig(&siginfo, 1); 8450Sstevel@tonic-gate } 8460Sstevel@tonic-gate 8470Sstevel@tonic-gate /* 8480Sstevel@tonic-gate * nonexistent system call-- signal lwp (may want to handle it) 8490Sstevel@tonic-gate * flag error if lwp won't see signal immediately 8500Sstevel@tonic-gate */ 8510Sstevel@tonic-gate int64_t 8520Sstevel@tonic-gate nosys() 8530Sstevel@tonic-gate { 8540Sstevel@tonic-gate tsignal(curthread, SIGSYS); 8550Sstevel@tonic-gate return (set_errno(ENOSYS)); 8560Sstevel@tonic-gate } 8570Sstevel@tonic-gate 8580Sstevel@tonic-gate /* 8590Sstevel@tonic-gate * Execute a 32-bit system call on behalf of the current thread. 8600Sstevel@tonic-gate */ 8610Sstevel@tonic-gate void 8620Sstevel@tonic-gate dosyscall(void) 8630Sstevel@tonic-gate { 8640Sstevel@tonic-gate /* 8650Sstevel@tonic-gate * Need space on the stack to store syscall arguments. 8660Sstevel@tonic-gate */ 8670Sstevel@tonic-gate long syscall_args[MAXSYSARGS]; 8680Sstevel@tonic-gate struct sysent *se; 8690Sstevel@tonic-gate int64_t ret; 8700Sstevel@tonic-gate 8710Sstevel@tonic-gate syscall_mstate(LMS_TRAP, LMS_SYSTEM); 8720Sstevel@tonic-gate 8730Sstevel@tonic-gate ASSERT(curproc->p_model == DATAMODEL_ILP32); 8740Sstevel@tonic-gate 8750Sstevel@tonic-gate CPU_STATS_ENTER_K(); 8760Sstevel@tonic-gate CPU_STATS_ADDQ(CPU, sys, syscall, 1); 8770Sstevel@tonic-gate CPU_STATS_EXIT_K(); 8780Sstevel@tonic-gate 8790Sstevel@tonic-gate se = syscall_entry(curthread, syscall_args); 8800Sstevel@tonic-gate 8810Sstevel@tonic-gate /* 8820Sstevel@tonic-gate * syscall_entry() copied all 8 arguments into syscall_args. 8830Sstevel@tonic-gate */ 8840Sstevel@tonic-gate ret = se->sy_callc(syscall_args[0], syscall_args[1], syscall_args[2], 8850Sstevel@tonic-gate syscall_args[3], syscall_args[4], syscall_args[5], syscall_args[6], 8860Sstevel@tonic-gate syscall_args[7]); 8870Sstevel@tonic-gate 8880Sstevel@tonic-gate syscall_exit(curthread, (int)ret & 0xffffffffu, (int)(ret >> 32)); 8890Sstevel@tonic-gate syscall_mstate(LMS_SYSTEM, LMS_TRAP); 8900Sstevel@tonic-gate } 8910Sstevel@tonic-gate 8920Sstevel@tonic-gate /* 8930Sstevel@tonic-gate * Get the arguments to the current system call. See comment atop 8940Sstevel@tonic-gate * save_syscall_args() regarding lwp_ap usage. 8950Sstevel@tonic-gate */ 8960Sstevel@tonic-gate 8970Sstevel@tonic-gate uint_t 8980Sstevel@tonic-gate get_syscall_args(klwp_t *lwp, long *argp, int *nargsp) 8990Sstevel@tonic-gate { 9000Sstevel@tonic-gate kthread_t *t = lwptot(lwp); 9010Sstevel@tonic-gate ulong_t mask = 0xfffffffful; 9020Sstevel@tonic-gate uint_t code; 9030Sstevel@tonic-gate long *ap; 9040Sstevel@tonic-gate int nargs; 9050Sstevel@tonic-gate 9060Sstevel@tonic-gate #if defined(_LP64) 9070Sstevel@tonic-gate if (lwp_getdatamodel(lwp) == DATAMODEL_LP64) 9080Sstevel@tonic-gate mask = 0xfffffffffffffffful; 9090Sstevel@tonic-gate #endif 9100Sstevel@tonic-gate 9110Sstevel@tonic-gate /* 9120Sstevel@tonic-gate * The thread lock must be held while looking at the arguments to ensure 9130Sstevel@tonic-gate * they don't go away via post_syscall(). 9140Sstevel@tonic-gate * get_syscall_args() is the only routine to read them which is callable 9150Sstevel@tonic-gate * outside the LWP in question and hence the only one that must be 9160Sstevel@tonic-gate * synchronized in this manner. 9170Sstevel@tonic-gate */ 9180Sstevel@tonic-gate thread_lock(t); 9190Sstevel@tonic-gate 9200Sstevel@tonic-gate code = t->t_sysnum; 9210Sstevel@tonic-gate ap = lwp->lwp_ap; 9220Sstevel@tonic-gate 9230Sstevel@tonic-gate thread_unlock(t); 9240Sstevel@tonic-gate 9250Sstevel@tonic-gate if (code != 0 && code < NSYSCALL) { 9260Sstevel@tonic-gate nargs = LWP_GETSYSENT(lwp)[code].sy_narg; 9270Sstevel@tonic-gate 9280Sstevel@tonic-gate ASSERT(nargs <= MAXSYSARGS); 9290Sstevel@tonic-gate 9300Sstevel@tonic-gate *nargsp = nargs; 9310Sstevel@tonic-gate while (nargs-- > 0) 9320Sstevel@tonic-gate *argp++ = *ap++ & mask; 9330Sstevel@tonic-gate } else { 9340Sstevel@tonic-gate *nargsp = 0; 9350Sstevel@tonic-gate } 9360Sstevel@tonic-gate 9370Sstevel@tonic-gate return (code); 9380Sstevel@tonic-gate } 9390Sstevel@tonic-gate 9400Sstevel@tonic-gate #ifdef _SYSCALL32_IMPL 9410Sstevel@tonic-gate /* 9420Sstevel@tonic-gate * Get the arguments to the current 32-bit system call. 9430Sstevel@tonic-gate */ 9440Sstevel@tonic-gate uint_t 9450Sstevel@tonic-gate get_syscall32_args(klwp_t *lwp, int *argp, int *nargsp) 9460Sstevel@tonic-gate { 9470Sstevel@tonic-gate long args[MAXSYSARGS]; 9480Sstevel@tonic-gate uint_t i, code; 9490Sstevel@tonic-gate 9500Sstevel@tonic-gate code = get_syscall_args(lwp, args, nargsp); 9510Sstevel@tonic-gate 9520Sstevel@tonic-gate for (i = 0; i != *nargsp; i++) 9530Sstevel@tonic-gate *argp++ = (int)args[i]; 9540Sstevel@tonic-gate return (code); 9550Sstevel@tonic-gate } 9560Sstevel@tonic-gate #endif 9570Sstevel@tonic-gate 9580Sstevel@tonic-gate /* 9590Sstevel@tonic-gate * Save the system call arguments in a safe place. 9600Sstevel@tonic-gate * 9610Sstevel@tonic-gate * On the i386 kernel: 9620Sstevel@tonic-gate * 9630Sstevel@tonic-gate * Copy the users args prior to changing the stack or stack pointer. 9640Sstevel@tonic-gate * This is so /proc will be able to get a valid copy of the 9650Sstevel@tonic-gate * args from the user stack even after the user stack has been changed. 9660Sstevel@tonic-gate * Note that the kernel stack copy of the args may also have been 9670Sstevel@tonic-gate * changed by a system call handler which takes C-style arguments. 9680Sstevel@tonic-gate * 9690Sstevel@tonic-gate * Note that this may be called by stop() from trap(). In that case 9700Sstevel@tonic-gate * t_sysnum will be zero (syscall_exit clears it), so no args will be 9710Sstevel@tonic-gate * copied. 9720Sstevel@tonic-gate * 9730Sstevel@tonic-gate * On the amd64 kernel: 9740Sstevel@tonic-gate * 9750Sstevel@tonic-gate * For 64-bit applications, lwp->lwp_ap normally points to %rdi..%r9 9760Sstevel@tonic-gate * in the reg structure. If the user is going to change the argument 9770Sstevel@tonic-gate * registers, rax, or the stack and might want to get the args (for 9780Sstevel@tonic-gate * /proc tracing), it must copy the args elsewhere via save_syscall_args(). 9790Sstevel@tonic-gate * 9800Sstevel@tonic-gate * For 32-bit applications, lwp->lwp_ap normally points to a copy of 9810Sstevel@tonic-gate * the system call arguments on the kernel stack made from the user 9820Sstevel@tonic-gate * stack. Copy the args prior to change the stack or stack pointer. 9830Sstevel@tonic-gate * This is so /proc will be able to get a valid copy of the args 9840Sstevel@tonic-gate * from the user stack even after that stack has been changed. 9850Sstevel@tonic-gate * 9860Sstevel@tonic-gate * This may be called from stop() even when we're not in a system call. 9870Sstevel@tonic-gate * Since there's no easy way to tell, this must be safe (not panic). 9880Sstevel@tonic-gate * If the copyins get data faults, return non-zero. 9890Sstevel@tonic-gate */ 9900Sstevel@tonic-gate int 9910Sstevel@tonic-gate save_syscall_args() 9920Sstevel@tonic-gate { 9930Sstevel@tonic-gate kthread_t *t = curthread; 9940Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 9950Sstevel@tonic-gate uint_t code = t->t_sysnum; 9960Sstevel@tonic-gate uint_t nargs; 9970Sstevel@tonic-gate 9980Sstevel@tonic-gate if (lwp->lwp_argsaved || code == 0) 9990Sstevel@tonic-gate return (0); /* args already saved or not needed */ 10000Sstevel@tonic-gate 10010Sstevel@tonic-gate if (code >= NSYSCALL) { 10020Sstevel@tonic-gate nargs = 0; /* illegal syscall */ 10030Sstevel@tonic-gate } else { 10040Sstevel@tonic-gate struct sysent *se = LWP_GETSYSENT(lwp); 10050Sstevel@tonic-gate struct sysent *callp = se + code; 10060Sstevel@tonic-gate 10070Sstevel@tonic-gate nargs = callp->sy_narg; 10080Sstevel@tonic-gate if (LOADABLE_SYSCALL(callp) && nargs == 0) { 10090Sstevel@tonic-gate krwlock_t *module_lock; 10100Sstevel@tonic-gate 10110Sstevel@tonic-gate /* 10120Sstevel@tonic-gate * Find out how many arguments the system 10130Sstevel@tonic-gate * call uses. 10140Sstevel@tonic-gate * 10150Sstevel@tonic-gate * We have the property that loaded syscalls 10160Sstevel@tonic-gate * never change the number of arguments they 10170Sstevel@tonic-gate * use after they've been loaded once. This 10180Sstevel@tonic-gate * allows us to stop for /proc tracing without 10190Sstevel@tonic-gate * holding the module lock. 10200Sstevel@tonic-gate * /proc is assured that sy_narg is valid. 10210Sstevel@tonic-gate */ 10220Sstevel@tonic-gate module_lock = lock_syscall(se, code); 10230Sstevel@tonic-gate nargs = callp->sy_narg; 10240Sstevel@tonic-gate rw_exit(module_lock); 10250Sstevel@tonic-gate } 10260Sstevel@tonic-gate } 10270Sstevel@tonic-gate 10280Sstevel@tonic-gate /* 10290Sstevel@tonic-gate * Fetch the system call arguments. 10300Sstevel@tonic-gate */ 10310Sstevel@tonic-gate if (nargs == 0) 10320Sstevel@tonic-gate goto out; 10330Sstevel@tonic-gate 10340Sstevel@tonic-gate ASSERT(nargs <= MAXSYSARGS); 10350Sstevel@tonic-gate 10360Sstevel@tonic-gate if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) { 10370Sstevel@tonic-gate #if defined(_LP64) 10380Sstevel@tonic-gate struct regs *rp = lwptoregs(lwp); 10390Sstevel@tonic-gate 10400Sstevel@tonic-gate lwp->lwp_arg[0] = rp->r_rdi; 10410Sstevel@tonic-gate lwp->lwp_arg[1] = rp->r_rsi; 10420Sstevel@tonic-gate lwp->lwp_arg[2] = rp->r_rdx; 10430Sstevel@tonic-gate lwp->lwp_arg[3] = rp->r_rcx; 10440Sstevel@tonic-gate lwp->lwp_arg[4] = rp->r_r8; 10450Sstevel@tonic-gate lwp->lwp_arg[5] = rp->r_r9; 10460Sstevel@tonic-gate if (nargs > 6 && copyin_args(rp, &lwp->lwp_arg[6], nargs - 6)) 10470Sstevel@tonic-gate return (-1); 10480Sstevel@tonic-gate } else { 10490Sstevel@tonic-gate #endif 10500Sstevel@tonic-gate if (COPYIN_ARGS32(lwptoregs(lwp), lwp->lwp_arg, nargs)) 10510Sstevel@tonic-gate return (-1); 10520Sstevel@tonic-gate } 10530Sstevel@tonic-gate out: 10540Sstevel@tonic-gate lwp->lwp_ap = lwp->lwp_arg; 10550Sstevel@tonic-gate lwp->lwp_argsaved = 1; 10560Sstevel@tonic-gate t->t_post_sys = 1; /* so lwp_ap will be reset */ 10570Sstevel@tonic-gate return (0); 10580Sstevel@tonic-gate } 10590Sstevel@tonic-gate 10600Sstevel@tonic-gate void 10610Sstevel@tonic-gate reset_syscall_args(void) 10620Sstevel@tonic-gate { 10630Sstevel@tonic-gate ttolwp(curthread)->lwp_argsaved = 0; 10640Sstevel@tonic-gate } 10650Sstevel@tonic-gate 10660Sstevel@tonic-gate /* 10670Sstevel@tonic-gate * Call a system call which takes a pointer to the user args struct and 10680Sstevel@tonic-gate * a pointer to the return values. This is a bit slower than the standard 10690Sstevel@tonic-gate * C arg-passing method in some cases. 10700Sstevel@tonic-gate */ 10710Sstevel@tonic-gate int64_t 10720Sstevel@tonic-gate syscall_ap(void) 10730Sstevel@tonic-gate { 10740Sstevel@tonic-gate uint_t error; 10750Sstevel@tonic-gate struct sysent *callp; 10760Sstevel@tonic-gate rval_t rval; 10770Sstevel@tonic-gate kthread_t *t = curthread; 10780Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 10790Sstevel@tonic-gate struct regs *rp = lwptoregs(lwp); 10800Sstevel@tonic-gate 10810Sstevel@tonic-gate callp = LWP_GETSYSENT(lwp) + t->t_sysnum; 10820Sstevel@tonic-gate 10830Sstevel@tonic-gate #if defined(__amd64) 10840Sstevel@tonic-gate /* 10850Sstevel@tonic-gate * If the arguments don't fit in registers %rdi-%r9, make sure they 10860Sstevel@tonic-gate * have been copied to the lwp_arg array. 10870Sstevel@tonic-gate */ 10880Sstevel@tonic-gate if (callp->sy_narg > 6 && save_syscall_args()) 10890Sstevel@tonic-gate return ((int64_t)set_errno(EFAULT)); 10900Sstevel@tonic-gate #endif 10910Sstevel@tonic-gate 10920Sstevel@tonic-gate rval.r_val1 = 0; 10930Sstevel@tonic-gate rval.r_val2 = rp->r_r1; 10940Sstevel@tonic-gate lwp->lwp_error = 0; /* for old drivers */ 10950Sstevel@tonic-gate error = (*(callp->sy_call))(lwp->lwp_ap, &rval); 10960Sstevel@tonic-gate if (error) 10970Sstevel@tonic-gate return ((longlong_t)set_errno(error)); 10980Sstevel@tonic-gate return (rval.r_vals); 10990Sstevel@tonic-gate } 11000Sstevel@tonic-gate 11010Sstevel@tonic-gate /* 11020Sstevel@tonic-gate * Load system call module. 11030Sstevel@tonic-gate * Returns with pointer to held read lock for module. 11040Sstevel@tonic-gate */ 11050Sstevel@tonic-gate static krwlock_t * 11060Sstevel@tonic-gate lock_syscall(struct sysent *table, uint_t code) 11070Sstevel@tonic-gate { 11080Sstevel@tonic-gate krwlock_t *module_lock; 11090Sstevel@tonic-gate struct modctl *modp; 11100Sstevel@tonic-gate int id; 11110Sstevel@tonic-gate struct sysent *callp; 11120Sstevel@tonic-gate 11130Sstevel@tonic-gate callp = table + code; 11140Sstevel@tonic-gate module_lock = callp->sy_lock; 11150Sstevel@tonic-gate 11160Sstevel@tonic-gate /* 11170Sstevel@tonic-gate * Optimization to only call modload if we don't have a loaded 11180Sstevel@tonic-gate * syscall. 11190Sstevel@tonic-gate */ 11200Sstevel@tonic-gate rw_enter(module_lock, RW_READER); 11210Sstevel@tonic-gate if (LOADED_SYSCALL(callp)) 11220Sstevel@tonic-gate return (module_lock); 11230Sstevel@tonic-gate rw_exit(module_lock); 11240Sstevel@tonic-gate 11250Sstevel@tonic-gate for (;;) { 11260Sstevel@tonic-gate if ((id = modload("sys", syscallnames[code])) == -1) 11270Sstevel@tonic-gate break; 11280Sstevel@tonic-gate 11290Sstevel@tonic-gate /* 11300Sstevel@tonic-gate * If we loaded successfully at least once, the modctl 11310Sstevel@tonic-gate * will still be valid, so we try to grab it by filename. 11320Sstevel@tonic-gate * If this call fails, it's because the mod_filename 11330Sstevel@tonic-gate * was changed after the call to modload() (mod_hold_by_name() 11340Sstevel@tonic-gate * is the likely culprit). We can safely just take 11350Sstevel@tonic-gate * another lap if this is the case; the modload() will 11360Sstevel@tonic-gate * change the mod_filename back to one by which we can 11370Sstevel@tonic-gate * find the modctl. 11380Sstevel@tonic-gate */ 11390Sstevel@tonic-gate modp = mod_find_by_filename("sys", syscallnames[code]); 11400Sstevel@tonic-gate 11410Sstevel@tonic-gate if (modp == NULL) 11420Sstevel@tonic-gate continue; 11430Sstevel@tonic-gate 11440Sstevel@tonic-gate mutex_enter(&mod_lock); 11450Sstevel@tonic-gate 11460Sstevel@tonic-gate if (!modp->mod_installed) { 11470Sstevel@tonic-gate mutex_exit(&mod_lock); 11480Sstevel@tonic-gate continue; 11490Sstevel@tonic-gate } 11500Sstevel@tonic-gate break; 11510Sstevel@tonic-gate } 11520Sstevel@tonic-gate rw_enter(module_lock, RW_READER); 11530Sstevel@tonic-gate 11540Sstevel@tonic-gate if (id != -1) 11550Sstevel@tonic-gate mutex_exit(&mod_lock); 11560Sstevel@tonic-gate 11570Sstevel@tonic-gate return (module_lock); 11580Sstevel@tonic-gate } 11590Sstevel@tonic-gate 11600Sstevel@tonic-gate /* 11610Sstevel@tonic-gate * Loadable syscall support. 11620Sstevel@tonic-gate * If needed, load the module, then reserve it by holding a read 11630Sstevel@tonic-gate * lock for the duration of the call. 11640Sstevel@tonic-gate * Later, if the syscall is not unloadable, it could patch the vector. 11650Sstevel@tonic-gate */ 11660Sstevel@tonic-gate /*ARGSUSED*/ 11670Sstevel@tonic-gate int64_t 11680Sstevel@tonic-gate loadable_syscall( 11690Sstevel@tonic-gate long a0, long a1, long a2, long a3, 11700Sstevel@tonic-gate long a4, long a5, long a6, long a7) 11710Sstevel@tonic-gate { 11720Sstevel@tonic-gate klwp_t *lwp = ttolwp(curthread); 11730Sstevel@tonic-gate int64_t rval; 11740Sstevel@tonic-gate struct sysent *callp; 11750Sstevel@tonic-gate struct sysent *se = LWP_GETSYSENT(lwp); 11760Sstevel@tonic-gate krwlock_t *module_lock; 11770Sstevel@tonic-gate int code, error = 0; 11780Sstevel@tonic-gate int64_t (*sy_call)(); 11790Sstevel@tonic-gate 11800Sstevel@tonic-gate code = curthread->t_sysnum; 11810Sstevel@tonic-gate callp = se + code; 11820Sstevel@tonic-gate 11830Sstevel@tonic-gate /* 11840Sstevel@tonic-gate * Try to autoload the system call if necessary 11850Sstevel@tonic-gate */ 11860Sstevel@tonic-gate module_lock = lock_syscall(se, code); 11870Sstevel@tonic-gate THREAD_KPRI_RELEASE(); /* drop priority given by rw_enter */ 11880Sstevel@tonic-gate 11890Sstevel@tonic-gate /* 11900Sstevel@tonic-gate * we've locked either the loaded syscall or nosys 11910Sstevel@tonic-gate */ 11920Sstevel@tonic-gate 11930Sstevel@tonic-gate if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) { 11940Sstevel@tonic-gate #if defined(_LP64) 11950Sstevel@tonic-gate if (callp->sy_flags & SE_ARGC) { 11960Sstevel@tonic-gate sy_call = (int64_t (*)())callp->sy_call; 11970Sstevel@tonic-gate rval = (*sy_call)(a0, a1, a2, a3, a4, a5); 11980Sstevel@tonic-gate } else 11990Sstevel@tonic-gate rval = syscall_ap(); 12000Sstevel@tonic-gate } else { 12010Sstevel@tonic-gate #endif 12020Sstevel@tonic-gate /* 12030Sstevel@tonic-gate * Now that it's loaded, make sure enough args were copied. 12040Sstevel@tonic-gate */ 12050Sstevel@tonic-gate if (COPYIN_ARGS32(lwptoregs(lwp), lwp->lwp_ap, callp->sy_narg)) 12060Sstevel@tonic-gate error = EFAULT; 12070Sstevel@tonic-gate if (error) { 12080Sstevel@tonic-gate rval = set_errno(error); 12090Sstevel@tonic-gate } else if (callp->sy_flags & SE_ARGC) { 12100Sstevel@tonic-gate sy_call = (int64_t (*)())callp->sy_call; 12110Sstevel@tonic-gate rval = (*sy_call)(lwp->lwp_ap[0], lwp->lwp_ap[1], 12120Sstevel@tonic-gate lwp->lwp_ap[2], lwp->lwp_ap[3], lwp->lwp_ap[4], 12130Sstevel@tonic-gate lwp->lwp_ap[5]); 12140Sstevel@tonic-gate } else 12150Sstevel@tonic-gate rval = syscall_ap(); 12160Sstevel@tonic-gate } 12170Sstevel@tonic-gate 12180Sstevel@tonic-gate THREAD_KPRI_REQUEST(); /* regain priority from read lock */ 12190Sstevel@tonic-gate rw_exit(module_lock); 12200Sstevel@tonic-gate return (rval); 12210Sstevel@tonic-gate } 12220Sstevel@tonic-gate 12230Sstevel@tonic-gate /* 12240Sstevel@tonic-gate * Indirect syscall handled in libc on x86 architectures 12250Sstevel@tonic-gate */ 12260Sstevel@tonic-gate int64_t 12270Sstevel@tonic-gate indir() 12280Sstevel@tonic-gate { 12290Sstevel@tonic-gate return (nosys()); 12300Sstevel@tonic-gate } 12310Sstevel@tonic-gate 12320Sstevel@tonic-gate /* 12330Sstevel@tonic-gate * set_errno - set an error return from the current system call. 12340Sstevel@tonic-gate * This could be a macro. 12350Sstevel@tonic-gate * This returns the value it is passed, so that the caller can 12360Sstevel@tonic-gate * use tail-recursion-elimination and do return (set_errno(ERRNO)); 12370Sstevel@tonic-gate */ 12380Sstevel@tonic-gate uint_t 12390Sstevel@tonic-gate set_errno(uint_t error) 12400Sstevel@tonic-gate { 12410Sstevel@tonic-gate ASSERT(error != 0); /* must not be used to clear errno */ 12420Sstevel@tonic-gate 12430Sstevel@tonic-gate curthread->t_post_sys = 1; /* have post_syscall do error return */ 12440Sstevel@tonic-gate return (ttolwp(curthread)->lwp_errno = error); 12450Sstevel@tonic-gate } 12460Sstevel@tonic-gate 12470Sstevel@tonic-gate /* 12480Sstevel@tonic-gate * set_proc_pre_sys - Set pre-syscall processing for entire process. 12490Sstevel@tonic-gate */ 12500Sstevel@tonic-gate void 12510Sstevel@tonic-gate set_proc_pre_sys(proc_t *p) 12520Sstevel@tonic-gate { 12530Sstevel@tonic-gate kthread_t *t; 12540Sstevel@tonic-gate kthread_t *first; 12550Sstevel@tonic-gate 12560Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 12570Sstevel@tonic-gate 12580Sstevel@tonic-gate t = first = p->p_tlist; 12590Sstevel@tonic-gate do { 12600Sstevel@tonic-gate t->t_pre_sys = 1; 12610Sstevel@tonic-gate } while ((t = t->t_forw) != first); 12620Sstevel@tonic-gate } 12630Sstevel@tonic-gate 12640Sstevel@tonic-gate /* 12650Sstevel@tonic-gate * set_proc_post_sys - Set post-syscall processing for entire process. 12660Sstevel@tonic-gate */ 12670Sstevel@tonic-gate void 12680Sstevel@tonic-gate set_proc_post_sys(proc_t *p) 12690Sstevel@tonic-gate { 12700Sstevel@tonic-gate kthread_t *t; 12710Sstevel@tonic-gate kthread_t *first; 12720Sstevel@tonic-gate 12730Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 12740Sstevel@tonic-gate 12750Sstevel@tonic-gate t = first = p->p_tlist; 12760Sstevel@tonic-gate do { 12770Sstevel@tonic-gate t->t_post_sys = 1; 12780Sstevel@tonic-gate } while ((t = t->t_forw) != first); 12790Sstevel@tonic-gate } 12800Sstevel@tonic-gate 12810Sstevel@tonic-gate /* 12820Sstevel@tonic-gate * set_proc_sys - Set pre- and post-syscall processing for entire process. 12830Sstevel@tonic-gate */ 12840Sstevel@tonic-gate void 12850Sstevel@tonic-gate set_proc_sys(proc_t *p) 12860Sstevel@tonic-gate { 12870Sstevel@tonic-gate kthread_t *t; 12880Sstevel@tonic-gate kthread_t *first; 12890Sstevel@tonic-gate 12900Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 12910Sstevel@tonic-gate 12920Sstevel@tonic-gate t = first = p->p_tlist; 12930Sstevel@tonic-gate do { 12940Sstevel@tonic-gate t->t_pre_sys = 1; 12950Sstevel@tonic-gate t->t_post_sys = 1; 12960Sstevel@tonic-gate } while ((t = t->t_forw) != first); 12970Sstevel@tonic-gate } 12980Sstevel@tonic-gate 12990Sstevel@tonic-gate /* 13000Sstevel@tonic-gate * set_all_proc_sys - set pre- and post-syscall processing flags for all 13010Sstevel@tonic-gate * user processes. 13020Sstevel@tonic-gate * 13030Sstevel@tonic-gate * This is needed when auditing, tracing, or other facilities which affect 13040Sstevel@tonic-gate * all processes are turned on. 13050Sstevel@tonic-gate */ 13060Sstevel@tonic-gate void 13070Sstevel@tonic-gate set_all_proc_sys() 13080Sstevel@tonic-gate { 13090Sstevel@tonic-gate kthread_t *t; 13100Sstevel@tonic-gate kthread_t *first; 13110Sstevel@tonic-gate 13120Sstevel@tonic-gate mutex_enter(&pidlock); 13130Sstevel@tonic-gate t = first = curthread; 13140Sstevel@tonic-gate do { 13150Sstevel@tonic-gate t->t_pre_sys = 1; 13160Sstevel@tonic-gate t->t_post_sys = 1; 13170Sstevel@tonic-gate } while ((t = t->t_next) != first); 13180Sstevel@tonic-gate mutex_exit(&pidlock); 13190Sstevel@tonic-gate } 13200Sstevel@tonic-gate 13210Sstevel@tonic-gate /* 13220Sstevel@tonic-gate * set_proc_ast - Set asynchronous service trap (AST) flag for all 13230Sstevel@tonic-gate * threads in process. 13240Sstevel@tonic-gate */ 13250Sstevel@tonic-gate void 13260Sstevel@tonic-gate set_proc_ast(proc_t *p) 13270Sstevel@tonic-gate { 13280Sstevel@tonic-gate kthread_t *t; 13290Sstevel@tonic-gate kthread_t *first; 13300Sstevel@tonic-gate 13310Sstevel@tonic-gate ASSERT(MUTEX_HELD(&p->p_lock)); 13320Sstevel@tonic-gate 13330Sstevel@tonic-gate t = first = p->p_tlist; 13340Sstevel@tonic-gate do { 13350Sstevel@tonic-gate aston(t); 13360Sstevel@tonic-gate } while ((t = t->t_forw) != first); 13370Sstevel@tonic-gate } 1338