10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*1880Sahl * Common Development and Distribution License (the "License"). 6*1880Sahl * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 21*1880Sahl 220Sstevel@tonic-gate /* 23*1880Sahl * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 280Sstevel@tonic-gate 290Sstevel@tonic-gate #include <sys/dtrace_impl.h> 300Sstevel@tonic-gate #include <sys/stack.h> 310Sstevel@tonic-gate #include <sys/frame.h> 320Sstevel@tonic-gate #include <sys/cmn_err.h> 330Sstevel@tonic-gate #include <sys/privregs.h> 340Sstevel@tonic-gate #include <sys/sysmacros.h> 350Sstevel@tonic-gate 360Sstevel@tonic-gate /* 370Sstevel@tonic-gate * This is gross knowledge to have to encode here... 380Sstevel@tonic-gate */ 390Sstevel@tonic-gate extern void _interrupt(); 400Sstevel@tonic-gate extern void _cmntrap(); 410Sstevel@tonic-gate extern void _allsyscalls(); 420Sstevel@tonic-gate 430Sstevel@tonic-gate extern size_t _interrupt_size; 440Sstevel@tonic-gate extern size_t _cmntrap_size; 450Sstevel@tonic-gate extern size_t _allsyscalls_size; 460Sstevel@tonic-gate 470Sstevel@tonic-gate extern uintptr_t kernelbase; 480Sstevel@tonic-gate 490Sstevel@tonic-gate void 500Sstevel@tonic-gate dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes, 510Sstevel@tonic-gate uint32_t *intrpc) 520Sstevel@tonic-gate { 530Sstevel@tonic-gate struct frame *fp = (struct frame *)dtrace_getfp(); 540Sstevel@tonic-gate struct frame *nextfp, *minfp, *stacktop; 550Sstevel@tonic-gate int depth = 0; 560Sstevel@tonic-gate int on_intr, last = 0; 570Sstevel@tonic-gate uintptr_t pc; 580Sstevel@tonic-gate uintptr_t caller = CPU->cpu_dtrace_caller; 590Sstevel@tonic-gate 600Sstevel@tonic-gate if ((on_intr = CPU_ON_INTR(CPU)) != 0) 610Sstevel@tonic-gate stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME)); 620Sstevel@tonic-gate else 630Sstevel@tonic-gate stacktop = (struct frame *)curthread->t_stk; 640Sstevel@tonic-gate minfp = fp; 650Sstevel@tonic-gate 660Sstevel@tonic-gate aframes++; 670Sstevel@tonic-gate 680Sstevel@tonic-gate if (intrpc != NULL && depth < pcstack_limit) 690Sstevel@tonic-gate pcstack[depth++] = (pc_t)intrpc; 700Sstevel@tonic-gate 710Sstevel@tonic-gate while (depth < pcstack_limit) { 720Sstevel@tonic-gate nextfp = (struct frame *)fp->fr_savfp; 730Sstevel@tonic-gate pc = fp->fr_savpc; 740Sstevel@tonic-gate 750Sstevel@tonic-gate if (nextfp <= minfp || nextfp >= stacktop) { 760Sstevel@tonic-gate if (on_intr) { 770Sstevel@tonic-gate /* 780Sstevel@tonic-gate * Hop from interrupt stack to thread stack. 790Sstevel@tonic-gate */ 800Sstevel@tonic-gate stacktop = (struct frame *)curthread->t_stk; 810Sstevel@tonic-gate minfp = (struct frame *)curthread->t_stkbase; 820Sstevel@tonic-gate on_intr = 0; 830Sstevel@tonic-gate continue; 840Sstevel@tonic-gate } 850Sstevel@tonic-gate 860Sstevel@tonic-gate /* 870Sstevel@tonic-gate * This is the last frame we can process; indicate 880Sstevel@tonic-gate * that we should return after processing this frame. 890Sstevel@tonic-gate */ 900Sstevel@tonic-gate last = 1; 910Sstevel@tonic-gate } 920Sstevel@tonic-gate 930Sstevel@tonic-gate if (aframes > 0) { 940Sstevel@tonic-gate if (--aframes == 0 && caller != NULL) { 950Sstevel@tonic-gate /* 960Sstevel@tonic-gate * We've just run out of artificial frames, 970Sstevel@tonic-gate * and we have a valid caller -- fill it in 980Sstevel@tonic-gate * now. 990Sstevel@tonic-gate */ 1000Sstevel@tonic-gate ASSERT(depth < pcstack_limit); 1010Sstevel@tonic-gate pcstack[depth++] = (pc_t)caller; 1020Sstevel@tonic-gate caller = NULL; 1030Sstevel@tonic-gate } 1040Sstevel@tonic-gate } else { 1050Sstevel@tonic-gate if (depth < pcstack_limit) 1060Sstevel@tonic-gate pcstack[depth++] = (pc_t)pc; 1070Sstevel@tonic-gate } 1080Sstevel@tonic-gate 1090Sstevel@tonic-gate if (last) { 1100Sstevel@tonic-gate while (depth < pcstack_limit) 1110Sstevel@tonic-gate pcstack[depth++] = NULL; 1120Sstevel@tonic-gate return; 1130Sstevel@tonic-gate } 1140Sstevel@tonic-gate 1150Sstevel@tonic-gate fp = nextfp; 1160Sstevel@tonic-gate minfp = fp; 1170Sstevel@tonic-gate } 1180Sstevel@tonic-gate } 1190Sstevel@tonic-gate 120191Sahl static int 121191Sahl dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc, 122191Sahl uintptr_t sp) 1230Sstevel@tonic-gate { 1240Sstevel@tonic-gate klwp_t *lwp = ttolwp(curthread); 125191Sahl proc_t *p = curproc; 126191Sahl uintptr_t oldcontext = lwp->lwp_oldcontext; 127191Sahl volatile uint16_t *flags = 128191Sahl (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 1290Sstevel@tonic-gate size_t s1, s2; 130191Sahl int ret = 0; 1310Sstevel@tonic-gate 132191Sahl ASSERT(pcstack == NULL || pcstack_limit > 0); 1330Sstevel@tonic-gate 1340Sstevel@tonic-gate if (p->p_model == DATAMODEL_NATIVE) { 1350Sstevel@tonic-gate s1 = sizeof (struct frame) + 2 * sizeof (long); 1360Sstevel@tonic-gate s2 = s1 + sizeof (siginfo_t); 1370Sstevel@tonic-gate } else { 1380Sstevel@tonic-gate s1 = sizeof (struct frame32) + 3 * sizeof (int); 1390Sstevel@tonic-gate s2 = s1 + sizeof (siginfo32_t); 1400Sstevel@tonic-gate } 1410Sstevel@tonic-gate 142*1880Sahl while (pc != 0) { 143191Sahl ret++; 144191Sahl if (pcstack != NULL) { 145191Sahl *pcstack++ = (uint64_t)pc; 146191Sahl pcstack_limit--; 147191Sahl if (pcstack_limit <= 0) 148191Sahl break; 149191Sahl } 1500Sstevel@tonic-gate 151*1880Sahl if (sp == 0) 152*1880Sahl break; 153*1880Sahl 1540Sstevel@tonic-gate if (oldcontext == sp + s1 || oldcontext == sp + s2) { 1550Sstevel@tonic-gate if (p->p_model == DATAMODEL_NATIVE) { 1560Sstevel@tonic-gate ucontext_t *ucp = (ucontext_t *)oldcontext; 1570Sstevel@tonic-gate greg_t *gregs = ucp->uc_mcontext.gregs; 1580Sstevel@tonic-gate 1590Sstevel@tonic-gate sp = dtrace_fulword(&gregs[REG_FP]); 1600Sstevel@tonic-gate pc = dtrace_fulword(&gregs[REG_PC]); 1610Sstevel@tonic-gate 1620Sstevel@tonic-gate oldcontext = dtrace_fulword(&ucp->uc_link); 1630Sstevel@tonic-gate } else { 1640Sstevel@tonic-gate ucontext32_t *ucp = (ucontext32_t *)oldcontext; 1650Sstevel@tonic-gate greg32_t *gregs = ucp->uc_mcontext.gregs; 1660Sstevel@tonic-gate 1670Sstevel@tonic-gate sp = dtrace_fuword32(&gregs[EBP]); 1680Sstevel@tonic-gate pc = dtrace_fuword32(&gregs[EIP]); 1690Sstevel@tonic-gate 1700Sstevel@tonic-gate oldcontext = dtrace_fuword32(&ucp->uc_link); 1710Sstevel@tonic-gate } 1720Sstevel@tonic-gate } else { 1730Sstevel@tonic-gate if (p->p_model == DATAMODEL_NATIVE) { 1740Sstevel@tonic-gate struct frame *fr = (struct frame *)sp; 1750Sstevel@tonic-gate 1760Sstevel@tonic-gate pc = dtrace_fulword(&fr->fr_savpc); 1770Sstevel@tonic-gate sp = dtrace_fulword(&fr->fr_savfp); 1780Sstevel@tonic-gate } else { 1790Sstevel@tonic-gate struct frame32 *fr = (struct frame32 *)sp; 1800Sstevel@tonic-gate 1810Sstevel@tonic-gate pc = dtrace_fuword32(&fr->fr_savpc); 1820Sstevel@tonic-gate sp = dtrace_fuword32(&fr->fr_savfp); 1830Sstevel@tonic-gate } 1840Sstevel@tonic-gate } 1850Sstevel@tonic-gate 1860Sstevel@tonic-gate /* 1870Sstevel@tonic-gate * This is totally bogus: if we faulted, we're going to clear 1880Sstevel@tonic-gate * the fault and break. This is to deal with the apparently 1890Sstevel@tonic-gate * broken Java stacks on x86. 1900Sstevel@tonic-gate */ 1910Sstevel@tonic-gate if (*flags & CPU_DTRACE_FAULT) { 1920Sstevel@tonic-gate *flags &= ~CPU_DTRACE_FAULT; 1930Sstevel@tonic-gate break; 1940Sstevel@tonic-gate } 1950Sstevel@tonic-gate } 1960Sstevel@tonic-gate 197191Sahl return (ret); 198191Sahl } 199191Sahl 200191Sahl void 201191Sahl dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit) 202191Sahl { 203191Sahl klwp_t *lwp = ttolwp(curthread); 204191Sahl proc_t *p = curproc; 205191Sahl struct regs *rp; 206191Sahl uintptr_t pc, sp; 207191Sahl volatile uint16_t *flags = 208191Sahl (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 209191Sahl int n; 210191Sahl 211191Sahl if (*flags & CPU_DTRACE_FAULT) 212191Sahl return; 213191Sahl 214191Sahl if (pcstack_limit <= 0) 215191Sahl return; 216191Sahl 217630Sahl /* 218630Sahl * If there's no user context we still need to zero the stack. 219630Sahl */ 220630Sahl if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL) 221630Sahl goto zero; 222630Sahl 223191Sahl *pcstack++ = (uint64_t)p->p_pid; 224191Sahl pcstack_limit--; 225191Sahl 226191Sahl if (pcstack_limit <= 0) 227191Sahl return; 228191Sahl 229191Sahl pc = rp->r_pc; 230191Sahl sp = rp->r_fp; 231191Sahl 232191Sahl if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { 233191Sahl *pcstack++ = (uint64_t)pc; 234191Sahl pcstack_limit--; 235191Sahl if (pcstack_limit <= 0) 236191Sahl return; 237191Sahl 238191Sahl if (p->p_model == DATAMODEL_NATIVE) 239191Sahl pc = dtrace_fulword((void *)rp->r_sp); 240191Sahl else 241191Sahl pc = dtrace_fuword32((void *)rp->r_sp); 242191Sahl } 243191Sahl 244191Sahl n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp); 245191Sahl ASSERT(n >= 0); 246191Sahl ASSERT(n <= pcstack_limit); 247191Sahl 248191Sahl pcstack += n; 249191Sahl pcstack_limit -= n; 250191Sahl 251630Sahl zero: 2520Sstevel@tonic-gate while (pcstack_limit-- > 0) 2530Sstevel@tonic-gate *pcstack++ = NULL; 2540Sstevel@tonic-gate } 2550Sstevel@tonic-gate 256191Sahl int 257191Sahl dtrace_getustackdepth(void) 258191Sahl { 259191Sahl klwp_t *lwp = ttolwp(curthread); 260191Sahl proc_t *p = curproc; 261191Sahl struct regs *rp; 262191Sahl uintptr_t pc, sp; 263191Sahl int n = 0; 264191Sahl 265191Sahl if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL) 266191Sahl return (0); 267191Sahl 268191Sahl if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 269191Sahl return (-1); 270191Sahl 271191Sahl pc = rp->r_pc; 272191Sahl sp = rp->r_fp; 273191Sahl 274191Sahl if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { 275191Sahl n++; 276191Sahl 277191Sahl if (p->p_model == DATAMODEL_NATIVE) 278191Sahl pc = dtrace_fulword((void *)rp->r_sp); 279191Sahl else 280191Sahl pc = dtrace_fuword32((void *)rp->r_sp); 281191Sahl } 282191Sahl 283191Sahl n += dtrace_getustack_common(NULL, 0, pc, sp); 284191Sahl 285191Sahl return (n); 286191Sahl } 287191Sahl 2880Sstevel@tonic-gate void 2890Sstevel@tonic-gate dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit) 2900Sstevel@tonic-gate { 2910Sstevel@tonic-gate klwp_t *lwp = ttolwp(curthread); 292191Sahl proc_t *p = curproc; 2930Sstevel@tonic-gate struct regs *rp; 2940Sstevel@tonic-gate uintptr_t pc, sp, oldcontext; 295191Sahl volatile uint16_t *flags = 296191Sahl (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; 2970Sstevel@tonic-gate size_t s1, s2; 2980Sstevel@tonic-gate 2990Sstevel@tonic-gate if (*flags & CPU_DTRACE_FAULT) 3000Sstevel@tonic-gate return; 3010Sstevel@tonic-gate 3020Sstevel@tonic-gate if (pcstack_limit <= 0) 3030Sstevel@tonic-gate return; 3040Sstevel@tonic-gate 305630Sahl /* 306630Sahl * If there's no user context we still need to zero the stack. 307630Sahl */ 308630Sahl if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL) 309630Sahl goto zero; 310630Sahl 3110Sstevel@tonic-gate *pcstack++ = (uint64_t)p->p_pid; 3120Sstevel@tonic-gate pcstack_limit--; 3130Sstevel@tonic-gate 3140Sstevel@tonic-gate if (pcstack_limit <= 0) 3150Sstevel@tonic-gate return; 3160Sstevel@tonic-gate 3170Sstevel@tonic-gate pc = rp->r_pc; 3180Sstevel@tonic-gate sp = rp->r_fp; 3190Sstevel@tonic-gate oldcontext = lwp->lwp_oldcontext; 3200Sstevel@tonic-gate 3210Sstevel@tonic-gate if (p->p_model == DATAMODEL_NATIVE) { 3220Sstevel@tonic-gate s1 = sizeof (struct frame) + 2 * sizeof (long); 3230Sstevel@tonic-gate s2 = s1 + sizeof (siginfo_t); 3240Sstevel@tonic-gate } else { 3250Sstevel@tonic-gate s1 = sizeof (struct frame32) + 3 * sizeof (int); 3260Sstevel@tonic-gate s2 = s1 + sizeof (siginfo32_t); 3270Sstevel@tonic-gate } 3280Sstevel@tonic-gate 3290Sstevel@tonic-gate if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { 3300Sstevel@tonic-gate *pcstack++ = (uint64_t)pc; 3310Sstevel@tonic-gate *fpstack++ = 0; 3320Sstevel@tonic-gate pcstack_limit--; 3330Sstevel@tonic-gate if (pcstack_limit <= 0) 3340Sstevel@tonic-gate return; 3350Sstevel@tonic-gate 3360Sstevel@tonic-gate if (p->p_model == DATAMODEL_NATIVE) 3370Sstevel@tonic-gate pc = dtrace_fulword((void *)rp->r_sp); 3380Sstevel@tonic-gate else 3390Sstevel@tonic-gate pc = dtrace_fuword32((void *)rp->r_sp); 3400Sstevel@tonic-gate } 3410Sstevel@tonic-gate 342*1880Sahl while (pc != 0) { 3430Sstevel@tonic-gate *pcstack++ = (uint64_t)pc; 3440Sstevel@tonic-gate *fpstack++ = sp; 3450Sstevel@tonic-gate pcstack_limit--; 3460Sstevel@tonic-gate if (pcstack_limit <= 0) 3470Sstevel@tonic-gate break; 3480Sstevel@tonic-gate 349*1880Sahl if (sp == 0) 350*1880Sahl break; 351*1880Sahl 3520Sstevel@tonic-gate if (oldcontext == sp + s1 || oldcontext == sp + s2) { 3530Sstevel@tonic-gate if (p->p_model == DATAMODEL_NATIVE) { 3540Sstevel@tonic-gate ucontext_t *ucp = (ucontext_t *)oldcontext; 3550Sstevel@tonic-gate greg_t *gregs = ucp->uc_mcontext.gregs; 3560Sstevel@tonic-gate 3570Sstevel@tonic-gate sp = dtrace_fulword(&gregs[REG_FP]); 3580Sstevel@tonic-gate pc = dtrace_fulword(&gregs[REG_PC]); 3590Sstevel@tonic-gate 3600Sstevel@tonic-gate oldcontext = dtrace_fulword(&ucp->uc_link); 3610Sstevel@tonic-gate } else { 3620Sstevel@tonic-gate ucontext_t *ucp = (ucontext_t *)oldcontext; 3630Sstevel@tonic-gate greg_t *gregs = ucp->uc_mcontext.gregs; 3640Sstevel@tonic-gate 3650Sstevel@tonic-gate sp = dtrace_fuword32(&gregs[EBP]); 3660Sstevel@tonic-gate pc = dtrace_fuword32(&gregs[EIP]); 3670Sstevel@tonic-gate 3680Sstevel@tonic-gate oldcontext = dtrace_fuword32(&ucp->uc_link); 3690Sstevel@tonic-gate } 3700Sstevel@tonic-gate } else { 3710Sstevel@tonic-gate if (p->p_model == DATAMODEL_NATIVE) { 3720Sstevel@tonic-gate struct frame *fr = (struct frame *)sp; 3730Sstevel@tonic-gate 3740Sstevel@tonic-gate pc = dtrace_fulword(&fr->fr_savpc); 3750Sstevel@tonic-gate sp = dtrace_fulword(&fr->fr_savfp); 3760Sstevel@tonic-gate } else { 3770Sstevel@tonic-gate struct frame32 *fr = (struct frame32 *)sp; 3780Sstevel@tonic-gate 3790Sstevel@tonic-gate pc = dtrace_fuword32(&fr->fr_savpc); 3800Sstevel@tonic-gate sp = dtrace_fuword32(&fr->fr_savfp); 3810Sstevel@tonic-gate } 3820Sstevel@tonic-gate } 3830Sstevel@tonic-gate 3840Sstevel@tonic-gate /* 3850Sstevel@tonic-gate * This is totally bogus: if we faulted, we're going to clear 3860Sstevel@tonic-gate * the fault and break. This is to deal with the apparently 3870Sstevel@tonic-gate * broken Java stacks on x86. 3880Sstevel@tonic-gate */ 3890Sstevel@tonic-gate if (*flags & CPU_DTRACE_FAULT) { 3900Sstevel@tonic-gate *flags &= ~CPU_DTRACE_FAULT; 3910Sstevel@tonic-gate break; 3920Sstevel@tonic-gate } 3930Sstevel@tonic-gate } 3940Sstevel@tonic-gate 395630Sahl zero: 3960Sstevel@tonic-gate while (pcstack_limit-- > 0) 3970Sstevel@tonic-gate *pcstack++ = NULL; 3980Sstevel@tonic-gate } 3990Sstevel@tonic-gate 4000Sstevel@tonic-gate /*ARGSUSED*/ 4010Sstevel@tonic-gate uint64_t 4020Sstevel@tonic-gate dtrace_getarg(int arg, int aframes) 4030Sstevel@tonic-gate { 4040Sstevel@tonic-gate uintptr_t val; 4050Sstevel@tonic-gate struct frame *fp = (struct frame *)dtrace_getfp(); 4060Sstevel@tonic-gate uintptr_t *stack; 4070Sstevel@tonic-gate int i; 4080Sstevel@tonic-gate #if defined(__amd64) 4090Sstevel@tonic-gate /* 4100Sstevel@tonic-gate * A total of 6 arguments are passed via registers; any argument with 4110Sstevel@tonic-gate * index of 5 or lower is therefore in a register. 4120Sstevel@tonic-gate */ 4130Sstevel@tonic-gate int inreg = 5; 4140Sstevel@tonic-gate #endif 4150Sstevel@tonic-gate 4160Sstevel@tonic-gate for (i = 1; i <= aframes; i++) { 4170Sstevel@tonic-gate fp = (struct frame *)(fp->fr_savfp); 4180Sstevel@tonic-gate 4190Sstevel@tonic-gate if (fp->fr_savpc == (pc_t)dtrace_invop_callsite) { 4200Sstevel@tonic-gate #if !defined(__amd64) 4210Sstevel@tonic-gate /* 4220Sstevel@tonic-gate * If we pass through the invalid op handler, we will 4230Sstevel@tonic-gate * use the pointer that it passed to the stack as the 4240Sstevel@tonic-gate * second argument to dtrace_invop() as the pointer to 4250Sstevel@tonic-gate * the stack. When using this stack, we must step 4260Sstevel@tonic-gate * beyond the EIP/RIP that was pushed when the trap was 4270Sstevel@tonic-gate * taken -- hence the "+ 1" below. 4280Sstevel@tonic-gate */ 4290Sstevel@tonic-gate stack = ((uintptr_t **)&fp[1])[1] + 1; 4300Sstevel@tonic-gate #else 4310Sstevel@tonic-gate /* 4320Sstevel@tonic-gate * In the case of amd64, we will use the pointer to the 4330Sstevel@tonic-gate * regs structure that was pushed when we took the 4340Sstevel@tonic-gate * trap. To get this structure, we must increment 4350Sstevel@tonic-gate * beyond the frame structure, and then again beyond 4360Sstevel@tonic-gate * the calling RIP stored in dtrace_invop(). If the 4370Sstevel@tonic-gate * argument that we're seeking is passed on the stack, 4380Sstevel@tonic-gate * we'll pull the true stack pointer out of the saved 4390Sstevel@tonic-gate * registers and decrement our argument by the number 4400Sstevel@tonic-gate * of arguments passed in registers; if the argument 4410Sstevel@tonic-gate * we're seeking is passed in regsiters, we can just 4420Sstevel@tonic-gate * load it directly. 4430Sstevel@tonic-gate */ 4440Sstevel@tonic-gate struct regs *rp = (struct regs *)((uintptr_t)&fp[1] + 4450Sstevel@tonic-gate sizeof (uintptr_t)); 4460Sstevel@tonic-gate 4470Sstevel@tonic-gate if (arg <= inreg) { 4480Sstevel@tonic-gate stack = (uintptr_t *)&rp->r_rdi; 4490Sstevel@tonic-gate } else { 4500Sstevel@tonic-gate stack = (uintptr_t *)(rp->r_rsp); 4510Sstevel@tonic-gate arg -= inreg; 4520Sstevel@tonic-gate } 4530Sstevel@tonic-gate #endif 4540Sstevel@tonic-gate goto load; 4550Sstevel@tonic-gate } 4560Sstevel@tonic-gate 4570Sstevel@tonic-gate } 4580Sstevel@tonic-gate 4590Sstevel@tonic-gate /* 4600Sstevel@tonic-gate * We know that we did not come through a trap to get into 4610Sstevel@tonic-gate * dtrace_probe() -- the provider simply called dtrace_probe() 4620Sstevel@tonic-gate * directly. As this is the case, we need to shift the argument 4630Sstevel@tonic-gate * that we're looking for: the probe ID is the first argument to 4640Sstevel@tonic-gate * dtrace_probe(), so the argument n will actually be found where 4650Sstevel@tonic-gate * one would expect to find argument (n + 1). 4660Sstevel@tonic-gate */ 4670Sstevel@tonic-gate arg++; 4680Sstevel@tonic-gate 4690Sstevel@tonic-gate #if defined(__amd64) 4700Sstevel@tonic-gate if (arg <= inreg) { 4710Sstevel@tonic-gate /* 4720Sstevel@tonic-gate * This shouldn't happen. If the argument is passed in a 4730Sstevel@tonic-gate * register then it should have been, well, passed in a 4740Sstevel@tonic-gate * register... 4750Sstevel@tonic-gate */ 4760Sstevel@tonic-gate DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4770Sstevel@tonic-gate return (0); 4780Sstevel@tonic-gate } 4790Sstevel@tonic-gate 4800Sstevel@tonic-gate arg -= (inreg + 1); 4810Sstevel@tonic-gate #endif 4820Sstevel@tonic-gate stack = (uintptr_t *)&fp[1]; 4830Sstevel@tonic-gate 4840Sstevel@tonic-gate load: 4850Sstevel@tonic-gate DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 4860Sstevel@tonic-gate val = stack[arg]; 4870Sstevel@tonic-gate DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 4880Sstevel@tonic-gate 4890Sstevel@tonic-gate return (val); 4900Sstevel@tonic-gate } 4910Sstevel@tonic-gate 4920Sstevel@tonic-gate /*ARGSUSED*/ 4930Sstevel@tonic-gate int 4940Sstevel@tonic-gate dtrace_getstackdepth(int aframes) 4950Sstevel@tonic-gate { 4960Sstevel@tonic-gate struct frame *fp = (struct frame *)dtrace_getfp(); 4970Sstevel@tonic-gate struct frame *nextfp, *minfp, *stacktop; 4980Sstevel@tonic-gate int depth = 0; 4990Sstevel@tonic-gate int on_intr; 5000Sstevel@tonic-gate 5010Sstevel@tonic-gate if ((on_intr = CPU_ON_INTR(CPU)) != 0) 5020Sstevel@tonic-gate stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME)); 5030Sstevel@tonic-gate else 5040Sstevel@tonic-gate stacktop = (struct frame *)curthread->t_stk; 5050Sstevel@tonic-gate minfp = fp; 5060Sstevel@tonic-gate 5070Sstevel@tonic-gate aframes++; 5080Sstevel@tonic-gate 5090Sstevel@tonic-gate for (;;) { 5100Sstevel@tonic-gate depth++; 5110Sstevel@tonic-gate 5120Sstevel@tonic-gate nextfp = (struct frame *)fp->fr_savfp; 5130Sstevel@tonic-gate 5140Sstevel@tonic-gate if (nextfp <= minfp || nextfp >= stacktop) { 5150Sstevel@tonic-gate if (on_intr) { 5160Sstevel@tonic-gate /* 5170Sstevel@tonic-gate * Hop from interrupt stack to thread stack. 5180Sstevel@tonic-gate */ 5190Sstevel@tonic-gate stacktop = (struct frame *)curthread->t_stk; 5200Sstevel@tonic-gate minfp = (struct frame *)curthread->t_stkbase; 5210Sstevel@tonic-gate on_intr = 0; 5220Sstevel@tonic-gate continue; 5230Sstevel@tonic-gate } 5240Sstevel@tonic-gate break; 5250Sstevel@tonic-gate } 5260Sstevel@tonic-gate 5270Sstevel@tonic-gate fp = nextfp; 5280Sstevel@tonic-gate minfp = fp; 5290Sstevel@tonic-gate } 5300Sstevel@tonic-gate 5310Sstevel@tonic-gate if (depth <= aframes) 5320Sstevel@tonic-gate return (0); 5330Sstevel@tonic-gate 5340Sstevel@tonic-gate return (depth - aframes); 5350Sstevel@tonic-gate } 5360Sstevel@tonic-gate 5370Sstevel@tonic-gate ulong_t 5380Sstevel@tonic-gate dtrace_getreg(struct regs *rp, uint_t reg) 5390Sstevel@tonic-gate { 5400Sstevel@tonic-gate #if defined(__amd64) 5410Sstevel@tonic-gate int regmap[] = { 5420Sstevel@tonic-gate REG_GS, /* GS */ 5430Sstevel@tonic-gate REG_FS, /* FS */ 5440Sstevel@tonic-gate REG_ES, /* ES */ 5450Sstevel@tonic-gate REG_DS, /* DS */ 5460Sstevel@tonic-gate REG_RDI, /* EDI */ 5470Sstevel@tonic-gate REG_RSI, /* ESI */ 5480Sstevel@tonic-gate REG_RBP, /* EBP */ 5490Sstevel@tonic-gate REG_RSP, /* ESP */ 5500Sstevel@tonic-gate REG_RBX, /* EBX */ 5510Sstevel@tonic-gate REG_RDX, /* EDX */ 5520Sstevel@tonic-gate REG_RCX, /* ECX */ 5530Sstevel@tonic-gate REG_RAX, /* EAX */ 5540Sstevel@tonic-gate REG_TRAPNO, /* TRAPNO */ 5550Sstevel@tonic-gate REG_ERR, /* ERR */ 5560Sstevel@tonic-gate REG_RIP, /* EIP */ 5570Sstevel@tonic-gate REG_CS, /* CS */ 5580Sstevel@tonic-gate REG_RFL, /* EFL */ 5590Sstevel@tonic-gate REG_RSP, /* UESP */ 5600Sstevel@tonic-gate REG_SS /* SS */ 5610Sstevel@tonic-gate }; 5620Sstevel@tonic-gate 5630Sstevel@tonic-gate if (reg <= SS) { 5640Sstevel@tonic-gate if (reg >= sizeof (regmap) / sizeof (int)) { 5650Sstevel@tonic-gate DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 5660Sstevel@tonic-gate return (0); 5670Sstevel@tonic-gate } 5680Sstevel@tonic-gate 5690Sstevel@tonic-gate reg = regmap[reg]; 5700Sstevel@tonic-gate } else { 5710Sstevel@tonic-gate reg -= SS + 1; 5720Sstevel@tonic-gate } 5730Sstevel@tonic-gate 5740Sstevel@tonic-gate switch (reg) { 5750Sstevel@tonic-gate case REG_RDI: 5760Sstevel@tonic-gate return (rp->r_rdi); 5770Sstevel@tonic-gate case REG_RSI: 5780Sstevel@tonic-gate return (rp->r_rsi); 5790Sstevel@tonic-gate case REG_RDX: 5800Sstevel@tonic-gate return (rp->r_rdx); 5810Sstevel@tonic-gate case REG_RCX: 5820Sstevel@tonic-gate return (rp->r_rcx); 5830Sstevel@tonic-gate case REG_R8: 5840Sstevel@tonic-gate return (rp->r_r8); 5850Sstevel@tonic-gate case REG_R9: 5860Sstevel@tonic-gate return (rp->r_r9); 5870Sstevel@tonic-gate case REG_RAX: 5880Sstevel@tonic-gate return (rp->r_rax); 5890Sstevel@tonic-gate case REG_RBX: 5900Sstevel@tonic-gate return (rp->r_rbx); 5910Sstevel@tonic-gate case REG_RBP: 5920Sstevel@tonic-gate return (rp->r_rbp); 5930Sstevel@tonic-gate case REG_R10: 5940Sstevel@tonic-gate return (rp->r_r10); 5950Sstevel@tonic-gate case REG_R11: 5960Sstevel@tonic-gate return (rp->r_r11); 5970Sstevel@tonic-gate case REG_R12: 5980Sstevel@tonic-gate return (rp->r_r12); 5990Sstevel@tonic-gate case REG_R13: 6000Sstevel@tonic-gate return (rp->r_r13); 6010Sstevel@tonic-gate case REG_R14: 6020Sstevel@tonic-gate return (rp->r_r14); 6030Sstevel@tonic-gate case REG_R15: 6040Sstevel@tonic-gate return (rp->r_r15); 6050Sstevel@tonic-gate case REG_DS: 6060Sstevel@tonic-gate return (rp->r_ds); 6070Sstevel@tonic-gate case REG_ES: 6080Sstevel@tonic-gate return (rp->r_es); 6090Sstevel@tonic-gate case REG_FS: 6100Sstevel@tonic-gate return (rp->r_fs); 6110Sstevel@tonic-gate case REG_GS: 6120Sstevel@tonic-gate return (rp->r_gs); 6130Sstevel@tonic-gate case REG_TRAPNO: 6140Sstevel@tonic-gate return (rp->r_trapno); 6150Sstevel@tonic-gate case REG_ERR: 6160Sstevel@tonic-gate return (rp->r_err); 6170Sstevel@tonic-gate case REG_RIP: 6180Sstevel@tonic-gate return (rp->r_rip); 6190Sstevel@tonic-gate case REG_CS: 6200Sstevel@tonic-gate return (rp->r_cs); 6210Sstevel@tonic-gate case REG_SS: 6220Sstevel@tonic-gate return (rp->r_ss); 6230Sstevel@tonic-gate case REG_RFL: 6240Sstevel@tonic-gate return (rp->r_rfl); 6250Sstevel@tonic-gate case REG_RSP: 6260Sstevel@tonic-gate return (rp->r_rsp); 6270Sstevel@tonic-gate default: 6280Sstevel@tonic-gate DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 6290Sstevel@tonic-gate return (0); 6300Sstevel@tonic-gate } 6310Sstevel@tonic-gate 6320Sstevel@tonic-gate #else 6330Sstevel@tonic-gate if (reg > SS) { 6340Sstevel@tonic-gate DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 6350Sstevel@tonic-gate return (0); 6360Sstevel@tonic-gate } 6370Sstevel@tonic-gate 6380Sstevel@tonic-gate return ((&rp->r_gs)[reg]); 6390Sstevel@tonic-gate #endif 6400Sstevel@tonic-gate } 6410Sstevel@tonic-gate 6420Sstevel@tonic-gate static int 6430Sstevel@tonic-gate dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size) 6440Sstevel@tonic-gate { 6450Sstevel@tonic-gate ASSERT(kaddr >= kernelbase && kaddr + size >= kaddr); 6460Sstevel@tonic-gate 6470Sstevel@tonic-gate if (uaddr + size >= kernelbase || uaddr + size < uaddr) { 6480Sstevel@tonic-gate DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 6490Sstevel@tonic-gate cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr; 6500Sstevel@tonic-gate return (0); 6510Sstevel@tonic-gate } 6520Sstevel@tonic-gate 6530Sstevel@tonic-gate return (1); 6540Sstevel@tonic-gate } 6550Sstevel@tonic-gate 6560Sstevel@tonic-gate void 6570Sstevel@tonic-gate dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size) 6580Sstevel@tonic-gate { 6590Sstevel@tonic-gate if (dtrace_copycheck(uaddr, kaddr, size)) 6600Sstevel@tonic-gate dtrace_copy(uaddr, kaddr, size); 6610Sstevel@tonic-gate } 6620Sstevel@tonic-gate 6630Sstevel@tonic-gate void 6640Sstevel@tonic-gate dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size) 6650Sstevel@tonic-gate { 6660Sstevel@tonic-gate if (dtrace_copycheck(uaddr, kaddr, size)) 6670Sstevel@tonic-gate dtrace_copy(kaddr, uaddr, size); 6680Sstevel@tonic-gate } 6690Sstevel@tonic-gate 6700Sstevel@tonic-gate void 6710Sstevel@tonic-gate dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size) 6720Sstevel@tonic-gate { 6730Sstevel@tonic-gate if (dtrace_copycheck(uaddr, kaddr, size)) 6740Sstevel@tonic-gate dtrace_copystr(uaddr, kaddr, size); 6750Sstevel@tonic-gate } 6760Sstevel@tonic-gate 6770Sstevel@tonic-gate void 6780Sstevel@tonic-gate dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size) 6790Sstevel@tonic-gate { 6800Sstevel@tonic-gate if (dtrace_copycheck(uaddr, kaddr, size)) 6810Sstevel@tonic-gate dtrace_copystr(kaddr, uaddr, size); 6820Sstevel@tonic-gate } 6830Sstevel@tonic-gate 6840Sstevel@tonic-gate uint8_t 6850Sstevel@tonic-gate dtrace_fuword8(void *uaddr) 6860Sstevel@tonic-gate { 6870Sstevel@tonic-gate extern uint8_t dtrace_fuword8_nocheck(void *); 6880Sstevel@tonic-gate if ((uintptr_t)uaddr >= _userlimit) { 6890Sstevel@tonic-gate DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 6900Sstevel@tonic-gate cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr; 6910Sstevel@tonic-gate return (0); 6920Sstevel@tonic-gate } 6930Sstevel@tonic-gate return (dtrace_fuword8_nocheck(uaddr)); 6940Sstevel@tonic-gate } 6950Sstevel@tonic-gate 6960Sstevel@tonic-gate uint16_t 6970Sstevel@tonic-gate dtrace_fuword16(void *uaddr) 6980Sstevel@tonic-gate { 6990Sstevel@tonic-gate extern uint16_t dtrace_fuword16_nocheck(void *); 7000Sstevel@tonic-gate if ((uintptr_t)uaddr >= _userlimit) { 7010Sstevel@tonic-gate DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 7020Sstevel@tonic-gate cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr; 7030Sstevel@tonic-gate return (0); 7040Sstevel@tonic-gate } 7050Sstevel@tonic-gate return (dtrace_fuword16_nocheck(uaddr)); 7060Sstevel@tonic-gate } 7070Sstevel@tonic-gate 7080Sstevel@tonic-gate uint32_t 7090Sstevel@tonic-gate dtrace_fuword32(void *uaddr) 7100Sstevel@tonic-gate { 7110Sstevel@tonic-gate extern uint32_t dtrace_fuword32_nocheck(void *); 7120Sstevel@tonic-gate if ((uintptr_t)uaddr >= _userlimit) { 7130Sstevel@tonic-gate DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 7140Sstevel@tonic-gate cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr; 7150Sstevel@tonic-gate return (0); 7160Sstevel@tonic-gate } 7170Sstevel@tonic-gate return (dtrace_fuword32_nocheck(uaddr)); 7180Sstevel@tonic-gate } 7190Sstevel@tonic-gate 7200Sstevel@tonic-gate uint64_t 7210Sstevel@tonic-gate dtrace_fuword64(void *uaddr) 7220Sstevel@tonic-gate { 7230Sstevel@tonic-gate extern uint64_t dtrace_fuword64_nocheck(void *); 7240Sstevel@tonic-gate if ((uintptr_t)uaddr >= _userlimit) { 7250Sstevel@tonic-gate DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 7260Sstevel@tonic-gate cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr; 7270Sstevel@tonic-gate return (0); 7280Sstevel@tonic-gate } 7290Sstevel@tonic-gate return (dtrace_fuword64_nocheck(uaddr)); 7300Sstevel@tonic-gate } 731