xref: /onnv-gate/usr/src/uts/intel/dtrace/dtrace_isa.c (revision 0:68f95e015346)
1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*0Sstevel@tonic-gate 
29*0Sstevel@tonic-gate #include <sys/dtrace_impl.h>
30*0Sstevel@tonic-gate #include <sys/stack.h>
31*0Sstevel@tonic-gate #include <sys/frame.h>
32*0Sstevel@tonic-gate #include <sys/cmn_err.h>
33*0Sstevel@tonic-gate #include <sys/privregs.h>
34*0Sstevel@tonic-gate #include <sys/sysmacros.h>
35*0Sstevel@tonic-gate 
36*0Sstevel@tonic-gate /*
37*0Sstevel@tonic-gate  * This is gross knowledge to have to encode here...
38*0Sstevel@tonic-gate  */
39*0Sstevel@tonic-gate extern void _interrupt();
40*0Sstevel@tonic-gate extern void _cmntrap();
41*0Sstevel@tonic-gate extern void _allsyscalls();
42*0Sstevel@tonic-gate 
43*0Sstevel@tonic-gate extern size_t _interrupt_size;
44*0Sstevel@tonic-gate extern size_t _cmntrap_size;
45*0Sstevel@tonic-gate extern size_t _allsyscalls_size;
46*0Sstevel@tonic-gate 
47*0Sstevel@tonic-gate extern uintptr_t kernelbase;
48*0Sstevel@tonic-gate 
49*0Sstevel@tonic-gate void
50*0Sstevel@tonic-gate dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
51*0Sstevel@tonic-gate     uint32_t *intrpc)
52*0Sstevel@tonic-gate {
53*0Sstevel@tonic-gate 	struct frame *fp = (struct frame *)dtrace_getfp();
54*0Sstevel@tonic-gate 	struct frame *nextfp, *minfp, *stacktop;
55*0Sstevel@tonic-gate 	int depth = 0;
56*0Sstevel@tonic-gate 	int on_intr, last = 0;
57*0Sstevel@tonic-gate 	uintptr_t pc;
58*0Sstevel@tonic-gate 	uintptr_t caller = CPU->cpu_dtrace_caller;
59*0Sstevel@tonic-gate 
60*0Sstevel@tonic-gate 	if ((on_intr = CPU_ON_INTR(CPU)) != 0)
61*0Sstevel@tonic-gate 		stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
62*0Sstevel@tonic-gate 	else
63*0Sstevel@tonic-gate 		stacktop = (struct frame *)curthread->t_stk;
64*0Sstevel@tonic-gate 	minfp = fp;
65*0Sstevel@tonic-gate 
66*0Sstevel@tonic-gate 	aframes++;
67*0Sstevel@tonic-gate 
68*0Sstevel@tonic-gate 	if (intrpc != NULL && depth < pcstack_limit)
69*0Sstevel@tonic-gate 		pcstack[depth++] = (pc_t)intrpc;
70*0Sstevel@tonic-gate 
71*0Sstevel@tonic-gate 	while (depth < pcstack_limit) {
72*0Sstevel@tonic-gate 		nextfp = (struct frame *)fp->fr_savfp;
73*0Sstevel@tonic-gate 		pc = fp->fr_savpc;
74*0Sstevel@tonic-gate 
75*0Sstevel@tonic-gate 		if (nextfp <= minfp || nextfp >= stacktop) {
76*0Sstevel@tonic-gate 			if (on_intr) {
77*0Sstevel@tonic-gate 				/*
78*0Sstevel@tonic-gate 				 * Hop from interrupt stack to thread stack.
79*0Sstevel@tonic-gate 				 */
80*0Sstevel@tonic-gate 				stacktop = (struct frame *)curthread->t_stk;
81*0Sstevel@tonic-gate 				minfp = (struct frame *)curthread->t_stkbase;
82*0Sstevel@tonic-gate 				on_intr = 0;
83*0Sstevel@tonic-gate 				continue;
84*0Sstevel@tonic-gate 			}
85*0Sstevel@tonic-gate 
86*0Sstevel@tonic-gate 			/*
87*0Sstevel@tonic-gate 			 * This is the last frame we can process; indicate
88*0Sstevel@tonic-gate 			 * that we should return after processing this frame.
89*0Sstevel@tonic-gate 			 */
90*0Sstevel@tonic-gate 			last = 1;
91*0Sstevel@tonic-gate 		}
92*0Sstevel@tonic-gate 
93*0Sstevel@tonic-gate 		if (aframes > 0) {
94*0Sstevel@tonic-gate 			if (--aframes == 0 && caller != NULL) {
95*0Sstevel@tonic-gate 				/*
96*0Sstevel@tonic-gate 				 * We've just run out of artificial frames,
97*0Sstevel@tonic-gate 				 * and we have a valid caller -- fill it in
98*0Sstevel@tonic-gate 				 * now.
99*0Sstevel@tonic-gate 				 */
100*0Sstevel@tonic-gate 				ASSERT(depth < pcstack_limit);
101*0Sstevel@tonic-gate 				pcstack[depth++] = (pc_t)caller;
102*0Sstevel@tonic-gate 				caller = NULL;
103*0Sstevel@tonic-gate 			}
104*0Sstevel@tonic-gate 		} else {
105*0Sstevel@tonic-gate 			if (depth < pcstack_limit)
106*0Sstevel@tonic-gate 				pcstack[depth++] = (pc_t)pc;
107*0Sstevel@tonic-gate 		}
108*0Sstevel@tonic-gate 
109*0Sstevel@tonic-gate 		if (last) {
110*0Sstevel@tonic-gate 			while (depth < pcstack_limit)
111*0Sstevel@tonic-gate 				pcstack[depth++] = NULL;
112*0Sstevel@tonic-gate 			return;
113*0Sstevel@tonic-gate 		}
114*0Sstevel@tonic-gate 
115*0Sstevel@tonic-gate 		fp = nextfp;
116*0Sstevel@tonic-gate 		minfp = fp;
117*0Sstevel@tonic-gate 	}
118*0Sstevel@tonic-gate }
119*0Sstevel@tonic-gate 
120*0Sstevel@tonic-gate void
121*0Sstevel@tonic-gate dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
122*0Sstevel@tonic-gate {
123*0Sstevel@tonic-gate 	klwp_t *lwp = ttolwp(curthread);
124*0Sstevel@tonic-gate 	proc_t *p = ttoproc(curthread);
125*0Sstevel@tonic-gate 	struct regs *rp;
126*0Sstevel@tonic-gate 	uintptr_t pc, sp, oldcontext;
127*0Sstevel@tonic-gate 	volatile uint8_t *flags =
128*0Sstevel@tonic-gate 	    (volatile uint8_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
129*0Sstevel@tonic-gate 	size_t s1, s2;
130*0Sstevel@tonic-gate 
131*0Sstevel@tonic-gate 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
132*0Sstevel@tonic-gate 		return;
133*0Sstevel@tonic-gate 
134*0Sstevel@tonic-gate 	if (*flags & CPU_DTRACE_FAULT)
135*0Sstevel@tonic-gate 		return;
136*0Sstevel@tonic-gate 
137*0Sstevel@tonic-gate 	if (pcstack_limit <= 0)
138*0Sstevel@tonic-gate 		return;
139*0Sstevel@tonic-gate 
140*0Sstevel@tonic-gate 	*pcstack++ = (uint64_t)p->p_pid;
141*0Sstevel@tonic-gate 	pcstack_limit--;
142*0Sstevel@tonic-gate 
143*0Sstevel@tonic-gate 	if (pcstack_limit <= 0)
144*0Sstevel@tonic-gate 		return;
145*0Sstevel@tonic-gate 
146*0Sstevel@tonic-gate 	pc = rp->r_pc;
147*0Sstevel@tonic-gate 	sp = rp->r_fp;
148*0Sstevel@tonic-gate 	oldcontext = lwp->lwp_oldcontext;
149*0Sstevel@tonic-gate 
150*0Sstevel@tonic-gate 	if (p->p_model == DATAMODEL_NATIVE) {
151*0Sstevel@tonic-gate 		s1 = sizeof (struct frame) + 2 * sizeof (long);
152*0Sstevel@tonic-gate 		s2 = s1 + sizeof (siginfo_t);
153*0Sstevel@tonic-gate 	} else {
154*0Sstevel@tonic-gate 		s1 = sizeof (struct frame32) + 3 * sizeof (int);
155*0Sstevel@tonic-gate 		s2 = s1 + sizeof (siginfo32_t);
156*0Sstevel@tonic-gate 	}
157*0Sstevel@tonic-gate 
158*0Sstevel@tonic-gate 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
159*0Sstevel@tonic-gate 		*pcstack++ = (uint64_t)pc;
160*0Sstevel@tonic-gate 		pcstack_limit--;
161*0Sstevel@tonic-gate 		if (pcstack_limit <= 0)
162*0Sstevel@tonic-gate 			return;
163*0Sstevel@tonic-gate 
164*0Sstevel@tonic-gate 		if (p->p_model == DATAMODEL_NATIVE)
165*0Sstevel@tonic-gate 			pc = dtrace_fulword((void *)rp->r_sp);
166*0Sstevel@tonic-gate 		else
167*0Sstevel@tonic-gate 			pc = dtrace_fuword32((void *)rp->r_sp);
168*0Sstevel@tonic-gate 	}
169*0Sstevel@tonic-gate 
170*0Sstevel@tonic-gate 	while (pc != 0 && sp != 0) {
171*0Sstevel@tonic-gate 		*pcstack++ = (uint64_t)pc;
172*0Sstevel@tonic-gate 		pcstack_limit--;
173*0Sstevel@tonic-gate 		if (pcstack_limit <= 0)
174*0Sstevel@tonic-gate 			break;
175*0Sstevel@tonic-gate 
176*0Sstevel@tonic-gate 		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
177*0Sstevel@tonic-gate 			if (p->p_model == DATAMODEL_NATIVE) {
178*0Sstevel@tonic-gate 				ucontext_t *ucp = (ucontext_t *)oldcontext;
179*0Sstevel@tonic-gate 				greg_t *gregs = ucp->uc_mcontext.gregs;
180*0Sstevel@tonic-gate 
181*0Sstevel@tonic-gate 				sp = dtrace_fulword(&gregs[REG_FP]);
182*0Sstevel@tonic-gate 				pc = dtrace_fulword(&gregs[REG_PC]);
183*0Sstevel@tonic-gate 
184*0Sstevel@tonic-gate 				oldcontext = dtrace_fulword(&ucp->uc_link);
185*0Sstevel@tonic-gate 			} else {
186*0Sstevel@tonic-gate 				ucontext32_t *ucp = (ucontext32_t *)oldcontext;
187*0Sstevel@tonic-gate 				greg32_t *gregs = ucp->uc_mcontext.gregs;
188*0Sstevel@tonic-gate 
189*0Sstevel@tonic-gate 				sp = dtrace_fuword32(&gregs[EBP]);
190*0Sstevel@tonic-gate 				pc = dtrace_fuword32(&gregs[EIP]);
191*0Sstevel@tonic-gate 
192*0Sstevel@tonic-gate 				oldcontext = dtrace_fuword32(&ucp->uc_link);
193*0Sstevel@tonic-gate 			}
194*0Sstevel@tonic-gate 		} else {
195*0Sstevel@tonic-gate 			if (p->p_model == DATAMODEL_NATIVE) {
196*0Sstevel@tonic-gate 				struct frame *fr = (struct frame *)sp;
197*0Sstevel@tonic-gate 
198*0Sstevel@tonic-gate 				pc = dtrace_fulword(&fr->fr_savpc);
199*0Sstevel@tonic-gate 				sp = dtrace_fulword(&fr->fr_savfp);
200*0Sstevel@tonic-gate 			} else {
201*0Sstevel@tonic-gate 				struct frame32 *fr = (struct frame32 *)sp;
202*0Sstevel@tonic-gate 
203*0Sstevel@tonic-gate 				pc = dtrace_fuword32(&fr->fr_savpc);
204*0Sstevel@tonic-gate 				sp = dtrace_fuword32(&fr->fr_savfp);
205*0Sstevel@tonic-gate 			}
206*0Sstevel@tonic-gate 		}
207*0Sstevel@tonic-gate 
208*0Sstevel@tonic-gate 		/*
209*0Sstevel@tonic-gate 		 * This is totally bogus:  if we faulted, we're going to clear
210*0Sstevel@tonic-gate 		 * the fault and break.  This is to deal with the apparently
211*0Sstevel@tonic-gate 		 * broken Java stacks on x86.
212*0Sstevel@tonic-gate 		 */
213*0Sstevel@tonic-gate 		if (*flags & CPU_DTRACE_FAULT) {
214*0Sstevel@tonic-gate 			*flags &= ~CPU_DTRACE_FAULT;
215*0Sstevel@tonic-gate 			break;
216*0Sstevel@tonic-gate 		}
217*0Sstevel@tonic-gate 	}
218*0Sstevel@tonic-gate 
219*0Sstevel@tonic-gate 	while (pcstack_limit-- > 0)
220*0Sstevel@tonic-gate 		*pcstack++ = NULL;
221*0Sstevel@tonic-gate }
222*0Sstevel@tonic-gate 
223*0Sstevel@tonic-gate /*ARGSUSED*/
224*0Sstevel@tonic-gate void
225*0Sstevel@tonic-gate dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
226*0Sstevel@tonic-gate {
227*0Sstevel@tonic-gate 	klwp_t *lwp = ttolwp(curthread);
228*0Sstevel@tonic-gate 	proc_t *p = ttoproc(curthread);
229*0Sstevel@tonic-gate 	struct regs *rp;
230*0Sstevel@tonic-gate 	uintptr_t pc, sp, oldcontext;
231*0Sstevel@tonic-gate 	volatile uint8_t *flags =
232*0Sstevel@tonic-gate 	    (volatile uint8_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
233*0Sstevel@tonic-gate 	size_t s1, s2;
234*0Sstevel@tonic-gate 
235*0Sstevel@tonic-gate 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
236*0Sstevel@tonic-gate 		return;
237*0Sstevel@tonic-gate 
238*0Sstevel@tonic-gate 	if (*flags & CPU_DTRACE_FAULT)
239*0Sstevel@tonic-gate 		return;
240*0Sstevel@tonic-gate 
241*0Sstevel@tonic-gate 	if (pcstack_limit <= 0)
242*0Sstevel@tonic-gate 		return;
243*0Sstevel@tonic-gate 
244*0Sstevel@tonic-gate 	*pcstack++ = (uint64_t)p->p_pid;
245*0Sstevel@tonic-gate 	pcstack_limit--;
246*0Sstevel@tonic-gate 
247*0Sstevel@tonic-gate 	if (pcstack_limit <= 0)
248*0Sstevel@tonic-gate 		return;
249*0Sstevel@tonic-gate 
250*0Sstevel@tonic-gate 	pc = rp->r_pc;
251*0Sstevel@tonic-gate 	sp = rp->r_fp;
252*0Sstevel@tonic-gate 	oldcontext = lwp->lwp_oldcontext;
253*0Sstevel@tonic-gate 
254*0Sstevel@tonic-gate 	if (p->p_model == DATAMODEL_NATIVE) {
255*0Sstevel@tonic-gate 		s1 = sizeof (struct frame) + 2 * sizeof (long);
256*0Sstevel@tonic-gate 		s2 = s1 + sizeof (siginfo_t);
257*0Sstevel@tonic-gate 	} else {
258*0Sstevel@tonic-gate 		s1 = sizeof (struct frame32) + 3 * sizeof (int);
259*0Sstevel@tonic-gate 		s2 = s1 + sizeof (siginfo32_t);
260*0Sstevel@tonic-gate 	}
261*0Sstevel@tonic-gate 
262*0Sstevel@tonic-gate 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
263*0Sstevel@tonic-gate 		*pcstack++ = (uint64_t)pc;
264*0Sstevel@tonic-gate 		*fpstack++ = 0;
265*0Sstevel@tonic-gate 		pcstack_limit--;
266*0Sstevel@tonic-gate 		if (pcstack_limit <= 0)
267*0Sstevel@tonic-gate 			return;
268*0Sstevel@tonic-gate 
269*0Sstevel@tonic-gate 		if (p->p_model == DATAMODEL_NATIVE)
270*0Sstevel@tonic-gate 			pc = dtrace_fulword((void *)rp->r_sp);
271*0Sstevel@tonic-gate 		else
272*0Sstevel@tonic-gate 			pc = dtrace_fuword32((void *)rp->r_sp);
273*0Sstevel@tonic-gate 	}
274*0Sstevel@tonic-gate 
275*0Sstevel@tonic-gate 	while (pc != 0 && sp != 0) {
276*0Sstevel@tonic-gate 		*pcstack++ = (uint64_t)pc;
277*0Sstevel@tonic-gate 		*fpstack++ = sp;
278*0Sstevel@tonic-gate 		pcstack_limit--;
279*0Sstevel@tonic-gate 		if (pcstack_limit <= 0)
280*0Sstevel@tonic-gate 			break;
281*0Sstevel@tonic-gate 
282*0Sstevel@tonic-gate 		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
283*0Sstevel@tonic-gate 			if (p->p_model == DATAMODEL_NATIVE) {
284*0Sstevel@tonic-gate 				ucontext_t *ucp = (ucontext_t *)oldcontext;
285*0Sstevel@tonic-gate 				greg_t *gregs = ucp->uc_mcontext.gregs;
286*0Sstevel@tonic-gate 
287*0Sstevel@tonic-gate 				sp = dtrace_fulword(&gregs[REG_FP]);
288*0Sstevel@tonic-gate 				pc = dtrace_fulword(&gregs[REG_PC]);
289*0Sstevel@tonic-gate 
290*0Sstevel@tonic-gate 				oldcontext = dtrace_fulword(&ucp->uc_link);
291*0Sstevel@tonic-gate 			} else {
292*0Sstevel@tonic-gate 				ucontext_t *ucp = (ucontext_t *)oldcontext;
293*0Sstevel@tonic-gate 				greg_t *gregs = ucp->uc_mcontext.gregs;
294*0Sstevel@tonic-gate 
295*0Sstevel@tonic-gate 				sp = dtrace_fuword32(&gregs[EBP]);
296*0Sstevel@tonic-gate 				pc = dtrace_fuword32(&gregs[EIP]);
297*0Sstevel@tonic-gate 
298*0Sstevel@tonic-gate 				oldcontext = dtrace_fuword32(&ucp->uc_link);
299*0Sstevel@tonic-gate 			}
300*0Sstevel@tonic-gate 		} else {
301*0Sstevel@tonic-gate 			if (p->p_model == DATAMODEL_NATIVE) {
302*0Sstevel@tonic-gate 				struct frame *fr = (struct frame *)sp;
303*0Sstevel@tonic-gate 
304*0Sstevel@tonic-gate 				pc = dtrace_fulword(&fr->fr_savpc);
305*0Sstevel@tonic-gate 				sp = dtrace_fulword(&fr->fr_savfp);
306*0Sstevel@tonic-gate 			} else {
307*0Sstevel@tonic-gate 				struct frame32 *fr = (struct frame32 *)sp;
308*0Sstevel@tonic-gate 
309*0Sstevel@tonic-gate 				pc = dtrace_fuword32(&fr->fr_savpc);
310*0Sstevel@tonic-gate 				sp = dtrace_fuword32(&fr->fr_savfp);
311*0Sstevel@tonic-gate 			}
312*0Sstevel@tonic-gate 		}
313*0Sstevel@tonic-gate 
314*0Sstevel@tonic-gate 		/*
315*0Sstevel@tonic-gate 		 * This is totally bogus:  if we faulted, we're going to clear
316*0Sstevel@tonic-gate 		 * the fault and break.  This is to deal with the apparently
317*0Sstevel@tonic-gate 		 * broken Java stacks on x86.
318*0Sstevel@tonic-gate 		 */
319*0Sstevel@tonic-gate 		if (*flags & CPU_DTRACE_FAULT) {
320*0Sstevel@tonic-gate 			*flags &= ~CPU_DTRACE_FAULT;
321*0Sstevel@tonic-gate 			break;
322*0Sstevel@tonic-gate 		}
323*0Sstevel@tonic-gate 	}
324*0Sstevel@tonic-gate 
325*0Sstevel@tonic-gate 	while (pcstack_limit-- > 0)
326*0Sstevel@tonic-gate 		*pcstack++ = NULL;
327*0Sstevel@tonic-gate }
328*0Sstevel@tonic-gate 
329*0Sstevel@tonic-gate /*ARGSUSED*/
330*0Sstevel@tonic-gate uint64_t
331*0Sstevel@tonic-gate dtrace_getarg(int arg, int aframes)
332*0Sstevel@tonic-gate {
333*0Sstevel@tonic-gate 	uintptr_t val;
334*0Sstevel@tonic-gate 	struct frame *fp = (struct frame *)dtrace_getfp();
335*0Sstevel@tonic-gate 	uintptr_t *stack;
336*0Sstevel@tonic-gate 	int i;
337*0Sstevel@tonic-gate #if defined(__amd64)
338*0Sstevel@tonic-gate 	/*
339*0Sstevel@tonic-gate 	 * A total of 6 arguments are passed via registers; any argument with
340*0Sstevel@tonic-gate 	 * index of 5 or lower is therefore in a register.
341*0Sstevel@tonic-gate 	 */
342*0Sstevel@tonic-gate 	int inreg = 5;
343*0Sstevel@tonic-gate #endif
344*0Sstevel@tonic-gate 
345*0Sstevel@tonic-gate 	for (i = 1; i <= aframes; i++) {
346*0Sstevel@tonic-gate 		fp = (struct frame *)(fp->fr_savfp);
347*0Sstevel@tonic-gate 
348*0Sstevel@tonic-gate 		if (fp->fr_savpc == (pc_t)dtrace_invop_callsite) {
349*0Sstevel@tonic-gate #if !defined(__amd64)
350*0Sstevel@tonic-gate 			/*
351*0Sstevel@tonic-gate 			 * If we pass through the invalid op handler, we will
352*0Sstevel@tonic-gate 			 * use the pointer that it passed to the stack as the
353*0Sstevel@tonic-gate 			 * second argument to dtrace_invop() as the pointer to
354*0Sstevel@tonic-gate 			 * the stack.  When using this stack, we must step
355*0Sstevel@tonic-gate 			 * beyond the EIP/RIP that was pushed when the trap was
356*0Sstevel@tonic-gate 			 * taken -- hence the "+ 1" below.
357*0Sstevel@tonic-gate 			 */
358*0Sstevel@tonic-gate 			stack = ((uintptr_t **)&fp[1])[1] + 1;
359*0Sstevel@tonic-gate #else
360*0Sstevel@tonic-gate 			/*
361*0Sstevel@tonic-gate 			 * In the case of amd64, we will use the pointer to the
362*0Sstevel@tonic-gate 			 * regs structure that was pushed when we took the
363*0Sstevel@tonic-gate 			 * trap.  To get this structure, we must increment
364*0Sstevel@tonic-gate 			 * beyond the frame structure, and then again beyond
365*0Sstevel@tonic-gate 			 * the calling RIP stored in dtrace_invop().  If the
366*0Sstevel@tonic-gate 			 * argument that we're seeking is passed on the stack,
367*0Sstevel@tonic-gate 			 * we'll pull the true stack pointer out of the saved
368*0Sstevel@tonic-gate 			 * registers and decrement our argument by the number
369*0Sstevel@tonic-gate 			 * of arguments passed in registers; if the argument
370*0Sstevel@tonic-gate 			 * we're seeking is passed in regsiters, we can just
371*0Sstevel@tonic-gate 			 * load it directly.
372*0Sstevel@tonic-gate 			 */
373*0Sstevel@tonic-gate 			struct regs *rp = (struct regs *)((uintptr_t)&fp[1] +
374*0Sstevel@tonic-gate 			    sizeof (uintptr_t));
375*0Sstevel@tonic-gate 
376*0Sstevel@tonic-gate 			if (arg <= inreg) {
377*0Sstevel@tonic-gate 				stack = (uintptr_t *)&rp->r_rdi;
378*0Sstevel@tonic-gate 			} else {
379*0Sstevel@tonic-gate 				stack = (uintptr_t *)(rp->r_rsp);
380*0Sstevel@tonic-gate 				arg -= inreg;
381*0Sstevel@tonic-gate 			}
382*0Sstevel@tonic-gate #endif
383*0Sstevel@tonic-gate 			goto load;
384*0Sstevel@tonic-gate 		}
385*0Sstevel@tonic-gate 
386*0Sstevel@tonic-gate 	}
387*0Sstevel@tonic-gate 
388*0Sstevel@tonic-gate 	/*
389*0Sstevel@tonic-gate 	 * We know that we did not come through a trap to get into
390*0Sstevel@tonic-gate 	 * dtrace_probe() -- the provider simply called dtrace_probe()
391*0Sstevel@tonic-gate 	 * directly.  As this is the case, we need to shift the argument
392*0Sstevel@tonic-gate 	 * that we're looking for:  the probe ID is the first argument to
393*0Sstevel@tonic-gate 	 * dtrace_probe(), so the argument n will actually be found where
394*0Sstevel@tonic-gate 	 * one would expect to find argument (n + 1).
395*0Sstevel@tonic-gate 	 */
396*0Sstevel@tonic-gate 	arg++;
397*0Sstevel@tonic-gate 
398*0Sstevel@tonic-gate #if defined(__amd64)
399*0Sstevel@tonic-gate 	if (arg <= inreg) {
400*0Sstevel@tonic-gate 		/*
401*0Sstevel@tonic-gate 		 * This shouldn't happen.  If the argument is passed in a
402*0Sstevel@tonic-gate 		 * register then it should have been, well, passed in a
403*0Sstevel@tonic-gate 		 * register...
404*0Sstevel@tonic-gate 		 */
405*0Sstevel@tonic-gate 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
406*0Sstevel@tonic-gate 		return (0);
407*0Sstevel@tonic-gate 	}
408*0Sstevel@tonic-gate 
409*0Sstevel@tonic-gate 	arg -= (inreg + 1);
410*0Sstevel@tonic-gate #endif
411*0Sstevel@tonic-gate 	stack = (uintptr_t *)&fp[1];
412*0Sstevel@tonic-gate 
413*0Sstevel@tonic-gate load:
414*0Sstevel@tonic-gate 	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
415*0Sstevel@tonic-gate 	val = stack[arg];
416*0Sstevel@tonic-gate 	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
417*0Sstevel@tonic-gate 
418*0Sstevel@tonic-gate 	return (val);
419*0Sstevel@tonic-gate }
420*0Sstevel@tonic-gate 
421*0Sstevel@tonic-gate /*ARGSUSED*/
422*0Sstevel@tonic-gate int
423*0Sstevel@tonic-gate dtrace_getstackdepth(int aframes)
424*0Sstevel@tonic-gate {
425*0Sstevel@tonic-gate 	struct frame *fp = (struct frame *)dtrace_getfp();
426*0Sstevel@tonic-gate 	struct frame *nextfp, *minfp, *stacktop;
427*0Sstevel@tonic-gate 	int depth = 0;
428*0Sstevel@tonic-gate 	int on_intr;
429*0Sstevel@tonic-gate 
430*0Sstevel@tonic-gate 	if ((on_intr = CPU_ON_INTR(CPU)) != 0)
431*0Sstevel@tonic-gate 		stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
432*0Sstevel@tonic-gate 	else
433*0Sstevel@tonic-gate 		stacktop = (struct frame *)curthread->t_stk;
434*0Sstevel@tonic-gate 	minfp = fp;
435*0Sstevel@tonic-gate 
436*0Sstevel@tonic-gate 	aframes++;
437*0Sstevel@tonic-gate 
438*0Sstevel@tonic-gate 	for (;;) {
439*0Sstevel@tonic-gate 		depth++;
440*0Sstevel@tonic-gate 
441*0Sstevel@tonic-gate 		nextfp = (struct frame *)fp->fr_savfp;
442*0Sstevel@tonic-gate 
443*0Sstevel@tonic-gate 		if (nextfp <= minfp || nextfp >= stacktop) {
444*0Sstevel@tonic-gate 			if (on_intr) {
445*0Sstevel@tonic-gate 				/*
446*0Sstevel@tonic-gate 				 * Hop from interrupt stack to thread stack.
447*0Sstevel@tonic-gate 				 */
448*0Sstevel@tonic-gate 				stacktop = (struct frame *)curthread->t_stk;
449*0Sstevel@tonic-gate 				minfp = (struct frame *)curthread->t_stkbase;
450*0Sstevel@tonic-gate 				on_intr = 0;
451*0Sstevel@tonic-gate 				continue;
452*0Sstevel@tonic-gate 			}
453*0Sstevel@tonic-gate 			break;
454*0Sstevel@tonic-gate 		}
455*0Sstevel@tonic-gate 
456*0Sstevel@tonic-gate 		fp = nextfp;
457*0Sstevel@tonic-gate 		minfp = fp;
458*0Sstevel@tonic-gate 	}
459*0Sstevel@tonic-gate 
460*0Sstevel@tonic-gate 	if (depth <= aframes)
461*0Sstevel@tonic-gate 		return (0);
462*0Sstevel@tonic-gate 
463*0Sstevel@tonic-gate 	return (depth - aframes);
464*0Sstevel@tonic-gate }
465*0Sstevel@tonic-gate 
466*0Sstevel@tonic-gate ulong_t
467*0Sstevel@tonic-gate dtrace_getreg(struct regs *rp, uint_t reg)
468*0Sstevel@tonic-gate {
469*0Sstevel@tonic-gate #if defined(__amd64)
470*0Sstevel@tonic-gate 	int regmap[] = {
471*0Sstevel@tonic-gate 		REG_GS,		/* GS */
472*0Sstevel@tonic-gate 		REG_FS,		/* FS */
473*0Sstevel@tonic-gate 		REG_ES,		/* ES */
474*0Sstevel@tonic-gate 		REG_DS,		/* DS */
475*0Sstevel@tonic-gate 		REG_RDI,	/* EDI */
476*0Sstevel@tonic-gate 		REG_RSI,	/* ESI */
477*0Sstevel@tonic-gate 		REG_RBP,	/* EBP */
478*0Sstevel@tonic-gate 		REG_RSP,	/* ESP */
479*0Sstevel@tonic-gate 		REG_RBX,	/* EBX */
480*0Sstevel@tonic-gate 		REG_RDX,	/* EDX */
481*0Sstevel@tonic-gate 		REG_RCX,	/* ECX */
482*0Sstevel@tonic-gate 		REG_RAX,	/* EAX */
483*0Sstevel@tonic-gate 		REG_TRAPNO,	/* TRAPNO */
484*0Sstevel@tonic-gate 		REG_ERR,	/* ERR */
485*0Sstevel@tonic-gate 		REG_RIP,	/* EIP */
486*0Sstevel@tonic-gate 		REG_CS,		/* CS */
487*0Sstevel@tonic-gate 		REG_RFL,	/* EFL */
488*0Sstevel@tonic-gate 		REG_RSP,	/* UESP */
489*0Sstevel@tonic-gate 		REG_SS		/* SS */
490*0Sstevel@tonic-gate 	};
491*0Sstevel@tonic-gate 
492*0Sstevel@tonic-gate 	if (reg <= SS) {
493*0Sstevel@tonic-gate 		if (reg >= sizeof (regmap) / sizeof (int)) {
494*0Sstevel@tonic-gate 			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
495*0Sstevel@tonic-gate 			return (0);
496*0Sstevel@tonic-gate 		}
497*0Sstevel@tonic-gate 
498*0Sstevel@tonic-gate 		reg = regmap[reg];
499*0Sstevel@tonic-gate 	} else {
500*0Sstevel@tonic-gate 		reg -= SS + 1;
501*0Sstevel@tonic-gate 	}
502*0Sstevel@tonic-gate 
503*0Sstevel@tonic-gate 	switch (reg) {
504*0Sstevel@tonic-gate 	case REG_RDI:
505*0Sstevel@tonic-gate 		return (rp->r_rdi);
506*0Sstevel@tonic-gate 	case REG_RSI:
507*0Sstevel@tonic-gate 		return (rp->r_rsi);
508*0Sstevel@tonic-gate 	case REG_RDX:
509*0Sstevel@tonic-gate 		return (rp->r_rdx);
510*0Sstevel@tonic-gate 	case REG_RCX:
511*0Sstevel@tonic-gate 		return (rp->r_rcx);
512*0Sstevel@tonic-gate 	case REG_R8:
513*0Sstevel@tonic-gate 		return (rp->r_r8);
514*0Sstevel@tonic-gate 	case REG_R9:
515*0Sstevel@tonic-gate 		return (rp->r_r9);
516*0Sstevel@tonic-gate 	case REG_RAX:
517*0Sstevel@tonic-gate 		return (rp->r_rax);
518*0Sstevel@tonic-gate 	case REG_RBX:
519*0Sstevel@tonic-gate 		return (rp->r_rbx);
520*0Sstevel@tonic-gate 	case REG_RBP:
521*0Sstevel@tonic-gate 		return (rp->r_rbp);
522*0Sstevel@tonic-gate 	case REG_R10:
523*0Sstevel@tonic-gate 		return (rp->r_r10);
524*0Sstevel@tonic-gate 	case REG_R11:
525*0Sstevel@tonic-gate 		return (rp->r_r11);
526*0Sstevel@tonic-gate 	case REG_R12:
527*0Sstevel@tonic-gate 		return (rp->r_r12);
528*0Sstevel@tonic-gate 	case REG_R13:
529*0Sstevel@tonic-gate 		return (rp->r_r13);
530*0Sstevel@tonic-gate 	case REG_R14:
531*0Sstevel@tonic-gate 		return (rp->r_r14);
532*0Sstevel@tonic-gate 	case REG_R15:
533*0Sstevel@tonic-gate 		return (rp->r_r15);
534*0Sstevel@tonic-gate 	case REG_DS:
535*0Sstevel@tonic-gate 		return (rp->r_ds);
536*0Sstevel@tonic-gate 	case REG_ES:
537*0Sstevel@tonic-gate 		return (rp->r_es);
538*0Sstevel@tonic-gate 	case REG_FS:
539*0Sstevel@tonic-gate 		return (rp->r_fs);
540*0Sstevel@tonic-gate 	case REG_GS:
541*0Sstevel@tonic-gate 		return (rp->r_gs);
542*0Sstevel@tonic-gate 	case REG_TRAPNO:
543*0Sstevel@tonic-gate 		return (rp->r_trapno);
544*0Sstevel@tonic-gate 	case REG_ERR:
545*0Sstevel@tonic-gate 		return (rp->r_err);
546*0Sstevel@tonic-gate 	case REG_RIP:
547*0Sstevel@tonic-gate 		return (rp->r_rip);
548*0Sstevel@tonic-gate 	case REG_CS:
549*0Sstevel@tonic-gate 		return (rp->r_cs);
550*0Sstevel@tonic-gate 	case REG_SS:
551*0Sstevel@tonic-gate 		return (rp->r_ss);
552*0Sstevel@tonic-gate 	case REG_RFL:
553*0Sstevel@tonic-gate 		return (rp->r_rfl);
554*0Sstevel@tonic-gate 	case REG_RSP:
555*0Sstevel@tonic-gate 		return (rp->r_rsp);
556*0Sstevel@tonic-gate 	default:
557*0Sstevel@tonic-gate 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
558*0Sstevel@tonic-gate 		return (0);
559*0Sstevel@tonic-gate 	}
560*0Sstevel@tonic-gate 
561*0Sstevel@tonic-gate #else
562*0Sstevel@tonic-gate 	if (reg > SS) {
563*0Sstevel@tonic-gate 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
564*0Sstevel@tonic-gate 		return (0);
565*0Sstevel@tonic-gate 	}
566*0Sstevel@tonic-gate 
567*0Sstevel@tonic-gate 	return ((&rp->r_gs)[reg]);
568*0Sstevel@tonic-gate #endif
569*0Sstevel@tonic-gate }
570*0Sstevel@tonic-gate 
571*0Sstevel@tonic-gate static int
572*0Sstevel@tonic-gate dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
573*0Sstevel@tonic-gate {
574*0Sstevel@tonic-gate 	ASSERT(kaddr >= kernelbase && kaddr + size >= kaddr);
575*0Sstevel@tonic-gate 
576*0Sstevel@tonic-gate 	if (uaddr + size >= kernelbase || uaddr + size < uaddr) {
577*0Sstevel@tonic-gate 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
578*0Sstevel@tonic-gate 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;
579*0Sstevel@tonic-gate 		return (0);
580*0Sstevel@tonic-gate 	}
581*0Sstevel@tonic-gate 
582*0Sstevel@tonic-gate 	return (1);
583*0Sstevel@tonic-gate }
584*0Sstevel@tonic-gate 
585*0Sstevel@tonic-gate void
586*0Sstevel@tonic-gate dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size)
587*0Sstevel@tonic-gate {
588*0Sstevel@tonic-gate 	if (dtrace_copycheck(uaddr, kaddr, size))
589*0Sstevel@tonic-gate 		dtrace_copy(uaddr, kaddr, size);
590*0Sstevel@tonic-gate }
591*0Sstevel@tonic-gate 
592*0Sstevel@tonic-gate void
593*0Sstevel@tonic-gate dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size)
594*0Sstevel@tonic-gate {
595*0Sstevel@tonic-gate 	if (dtrace_copycheck(uaddr, kaddr, size))
596*0Sstevel@tonic-gate 		dtrace_copy(kaddr, uaddr, size);
597*0Sstevel@tonic-gate }
598*0Sstevel@tonic-gate 
599*0Sstevel@tonic-gate void
600*0Sstevel@tonic-gate dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size)
601*0Sstevel@tonic-gate {
602*0Sstevel@tonic-gate 	if (dtrace_copycheck(uaddr, kaddr, size))
603*0Sstevel@tonic-gate 		dtrace_copystr(uaddr, kaddr, size);
604*0Sstevel@tonic-gate }
605*0Sstevel@tonic-gate 
606*0Sstevel@tonic-gate void
607*0Sstevel@tonic-gate dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size)
608*0Sstevel@tonic-gate {
609*0Sstevel@tonic-gate 	if (dtrace_copycheck(uaddr, kaddr, size))
610*0Sstevel@tonic-gate 		dtrace_copystr(kaddr, uaddr, size);
611*0Sstevel@tonic-gate }
612*0Sstevel@tonic-gate 
613*0Sstevel@tonic-gate uint8_t
614*0Sstevel@tonic-gate dtrace_fuword8(void *uaddr)
615*0Sstevel@tonic-gate {
616*0Sstevel@tonic-gate 	extern uint8_t dtrace_fuword8_nocheck(void *);
617*0Sstevel@tonic-gate 	if ((uintptr_t)uaddr >= _userlimit) {
618*0Sstevel@tonic-gate 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
619*0Sstevel@tonic-gate 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
620*0Sstevel@tonic-gate 		return (0);
621*0Sstevel@tonic-gate 	}
622*0Sstevel@tonic-gate 	return (dtrace_fuword8_nocheck(uaddr));
623*0Sstevel@tonic-gate }
624*0Sstevel@tonic-gate 
625*0Sstevel@tonic-gate uint16_t
626*0Sstevel@tonic-gate dtrace_fuword16(void *uaddr)
627*0Sstevel@tonic-gate {
628*0Sstevel@tonic-gate 	extern uint16_t dtrace_fuword16_nocheck(void *);
629*0Sstevel@tonic-gate 	if ((uintptr_t)uaddr >= _userlimit) {
630*0Sstevel@tonic-gate 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
631*0Sstevel@tonic-gate 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
632*0Sstevel@tonic-gate 		return (0);
633*0Sstevel@tonic-gate 	}
634*0Sstevel@tonic-gate 	return (dtrace_fuword16_nocheck(uaddr));
635*0Sstevel@tonic-gate }
636*0Sstevel@tonic-gate 
637*0Sstevel@tonic-gate uint32_t
638*0Sstevel@tonic-gate dtrace_fuword32(void *uaddr)
639*0Sstevel@tonic-gate {
640*0Sstevel@tonic-gate 	extern uint32_t dtrace_fuword32_nocheck(void *);
641*0Sstevel@tonic-gate 	if ((uintptr_t)uaddr >= _userlimit) {
642*0Sstevel@tonic-gate 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
643*0Sstevel@tonic-gate 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
644*0Sstevel@tonic-gate 		return (0);
645*0Sstevel@tonic-gate 	}
646*0Sstevel@tonic-gate 	return (dtrace_fuword32_nocheck(uaddr));
647*0Sstevel@tonic-gate }
648*0Sstevel@tonic-gate 
649*0Sstevel@tonic-gate uint64_t
650*0Sstevel@tonic-gate dtrace_fuword64(void *uaddr)
651*0Sstevel@tonic-gate {
652*0Sstevel@tonic-gate 	extern uint64_t dtrace_fuword64_nocheck(void *);
653*0Sstevel@tonic-gate 	if ((uintptr_t)uaddr >= _userlimit) {
654*0Sstevel@tonic-gate 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
655*0Sstevel@tonic-gate 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;
656*0Sstevel@tonic-gate 		return (0);
657*0Sstevel@tonic-gate 	}
658*0Sstevel@tonic-gate 	return (dtrace_fuword64_nocheck(uaddr));
659*0Sstevel@tonic-gate }
660