xref: /onnv-gate/usr/src/uts/i86pc/os/dtrace_subr.c (revision 0:68f95e015346)
1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*0Sstevel@tonic-gate 
29*0Sstevel@tonic-gate #include <sys/dtrace.h>
30*0Sstevel@tonic-gate #include <sys/fasttrap.h>
31*0Sstevel@tonic-gate #include <sys/x_call.h>
32*0Sstevel@tonic-gate #include <sys/cmn_err.h>
33*0Sstevel@tonic-gate #include <sys/trap.h>
34*0Sstevel@tonic-gate #include <sys/psw.h>
35*0Sstevel@tonic-gate #include <sys/privregs.h>
36*0Sstevel@tonic-gate #include <sys/machsystm.h>
37*0Sstevel@tonic-gate #include <vm/seg_kmem.h>
38*0Sstevel@tonic-gate 
39*0Sstevel@tonic-gate typedef struct dtrace_invop_hdlr {
40*0Sstevel@tonic-gate 	int (*dtih_func)(uintptr_t, uintptr_t *, uintptr_t);
41*0Sstevel@tonic-gate 	struct dtrace_invop_hdlr *dtih_next;
42*0Sstevel@tonic-gate } dtrace_invop_hdlr_t;
43*0Sstevel@tonic-gate 
44*0Sstevel@tonic-gate dtrace_invop_hdlr_t *dtrace_invop_hdlr;
45*0Sstevel@tonic-gate 
46*0Sstevel@tonic-gate int
47*0Sstevel@tonic-gate dtrace_invop(uintptr_t addr, uintptr_t *stack, uintptr_t eax)
48*0Sstevel@tonic-gate {
49*0Sstevel@tonic-gate 	dtrace_invop_hdlr_t *hdlr;
50*0Sstevel@tonic-gate 	int rval;
51*0Sstevel@tonic-gate 
52*0Sstevel@tonic-gate 	for (hdlr = dtrace_invop_hdlr; hdlr != NULL; hdlr = hdlr->dtih_next) {
53*0Sstevel@tonic-gate 		if ((rval = hdlr->dtih_func(addr, stack, eax)) != 0)
54*0Sstevel@tonic-gate 			return (rval);
55*0Sstevel@tonic-gate 	}
56*0Sstevel@tonic-gate 
57*0Sstevel@tonic-gate 	return (0);
58*0Sstevel@tonic-gate }
59*0Sstevel@tonic-gate 
60*0Sstevel@tonic-gate void
61*0Sstevel@tonic-gate dtrace_invop_add(int (*func)(uintptr_t, uintptr_t *, uintptr_t))
62*0Sstevel@tonic-gate {
63*0Sstevel@tonic-gate 	dtrace_invop_hdlr_t *hdlr;
64*0Sstevel@tonic-gate 
65*0Sstevel@tonic-gate 	hdlr = kmem_alloc(sizeof (dtrace_invop_hdlr_t), KM_SLEEP);
66*0Sstevel@tonic-gate 	hdlr->dtih_func = func;
67*0Sstevel@tonic-gate 	hdlr->dtih_next = dtrace_invop_hdlr;
68*0Sstevel@tonic-gate 	dtrace_invop_hdlr = hdlr;
69*0Sstevel@tonic-gate }
70*0Sstevel@tonic-gate 
71*0Sstevel@tonic-gate void
72*0Sstevel@tonic-gate dtrace_invop_remove(int (*func)(uintptr_t, uintptr_t *, uintptr_t))
73*0Sstevel@tonic-gate {
74*0Sstevel@tonic-gate 	dtrace_invop_hdlr_t *hdlr = dtrace_invop_hdlr, *prev = NULL;
75*0Sstevel@tonic-gate 
76*0Sstevel@tonic-gate 	for (;;) {
77*0Sstevel@tonic-gate 		if (hdlr == NULL)
78*0Sstevel@tonic-gate 			panic("attempt to remove non-existent invop handler");
79*0Sstevel@tonic-gate 
80*0Sstevel@tonic-gate 		if (hdlr->dtih_func == func)
81*0Sstevel@tonic-gate 			break;
82*0Sstevel@tonic-gate 
83*0Sstevel@tonic-gate 		prev = hdlr;
84*0Sstevel@tonic-gate 		hdlr = hdlr->dtih_next;
85*0Sstevel@tonic-gate 	}
86*0Sstevel@tonic-gate 
87*0Sstevel@tonic-gate 	if (prev == NULL) {
88*0Sstevel@tonic-gate 		ASSERT(dtrace_invop_hdlr == hdlr);
89*0Sstevel@tonic-gate 		dtrace_invop_hdlr = hdlr->dtih_next;
90*0Sstevel@tonic-gate 	} else {
91*0Sstevel@tonic-gate 		ASSERT(dtrace_invop_hdlr != hdlr);
92*0Sstevel@tonic-gate 		prev->dtih_next = hdlr->dtih_next;
93*0Sstevel@tonic-gate 	}
94*0Sstevel@tonic-gate 
95*0Sstevel@tonic-gate 	kmem_free(hdlr, sizeof (dtrace_invop_hdlr_t));
96*0Sstevel@tonic-gate }
97*0Sstevel@tonic-gate 
98*0Sstevel@tonic-gate int
99*0Sstevel@tonic-gate dtrace_getipl(void)
100*0Sstevel@tonic-gate {
101*0Sstevel@tonic-gate 	return (CPU->cpu_pri);
102*0Sstevel@tonic-gate }
103*0Sstevel@tonic-gate 
104*0Sstevel@tonic-gate /*ARGSUSED*/
105*0Sstevel@tonic-gate void
106*0Sstevel@tonic-gate dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
107*0Sstevel@tonic-gate {
108*0Sstevel@tonic-gate #ifdef __amd64
109*0Sstevel@tonic-gate 	extern uintptr_t toxic_addr;
110*0Sstevel@tonic-gate 	extern size_t toxic_size;
111*0Sstevel@tonic-gate 
112*0Sstevel@tonic-gate 	(*func)(0, _userlimit);
113*0Sstevel@tonic-gate 
114*0Sstevel@tonic-gate 	if (hole_end > hole_start)
115*0Sstevel@tonic-gate 		(*func)(hole_start, hole_end);
116*0Sstevel@tonic-gate 	(*func)(toxic_addr, toxic_addr + toxic_size);
117*0Sstevel@tonic-gate #else
118*0Sstevel@tonic-gate 	extern void *device_arena_contains(void *, size_t, size_t *);
119*0Sstevel@tonic-gate 	caddr_t	vaddr;
120*0Sstevel@tonic-gate 	size_t	len;
121*0Sstevel@tonic-gate 
122*0Sstevel@tonic-gate 	for (vaddr = (caddr_t)kernelbase; vaddr < (caddr_t)KERNEL_TEXT;
123*0Sstevel@tonic-gate 	    vaddr += len) {
124*0Sstevel@tonic-gate 		len = (caddr_t)KERNEL_TEXT - vaddr;
125*0Sstevel@tonic-gate 		vaddr = device_arena_contains(vaddr, len, &len);
126*0Sstevel@tonic-gate 		if (vaddr == NULL)
127*0Sstevel@tonic-gate 		    break;
128*0Sstevel@tonic-gate 		(*func)((uintptr_t)vaddr, (uintptr_t)vaddr + len);
129*0Sstevel@tonic-gate 	}
130*0Sstevel@tonic-gate #endif
131*0Sstevel@tonic-gate 	(*func)(0, _userlimit);
132*0Sstevel@tonic-gate }
133*0Sstevel@tonic-gate 
134*0Sstevel@tonic-gate static int
135*0Sstevel@tonic-gate dtrace_xcall_func(dtrace_xcall_t func, void *arg)
136*0Sstevel@tonic-gate {
137*0Sstevel@tonic-gate 	(*func)(arg);
138*0Sstevel@tonic-gate 
139*0Sstevel@tonic-gate 	return (0);
140*0Sstevel@tonic-gate }
141*0Sstevel@tonic-gate 
142*0Sstevel@tonic-gate /*ARGSUSED*/
143*0Sstevel@tonic-gate void
144*0Sstevel@tonic-gate dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
145*0Sstevel@tonic-gate {
146*0Sstevel@tonic-gate 	cpuset_t set;
147*0Sstevel@tonic-gate 
148*0Sstevel@tonic-gate 	CPUSET_ZERO(set);
149*0Sstevel@tonic-gate 
150*0Sstevel@tonic-gate 	if (cpu == DTRACE_CPUALL) {
151*0Sstevel@tonic-gate 		CPUSET_ALL(set);
152*0Sstevel@tonic-gate 	} else {
153*0Sstevel@tonic-gate 		CPUSET_ADD(set, cpu);
154*0Sstevel@tonic-gate 	}
155*0Sstevel@tonic-gate 
156*0Sstevel@tonic-gate 	kpreempt_disable();
157*0Sstevel@tonic-gate 	xc_sync((xc_arg_t)func, (xc_arg_t)arg, 0, X_CALL_HIPRI, set,
158*0Sstevel@tonic-gate 		(xc_func_t)dtrace_xcall_func);
159*0Sstevel@tonic-gate 	kpreempt_enable();
160*0Sstevel@tonic-gate }
161*0Sstevel@tonic-gate 
162*0Sstevel@tonic-gate void
163*0Sstevel@tonic-gate dtrace_sync_func(void)
164*0Sstevel@tonic-gate {}
165*0Sstevel@tonic-gate 
166*0Sstevel@tonic-gate void
167*0Sstevel@tonic-gate dtrace_sync(void)
168*0Sstevel@tonic-gate {
169*0Sstevel@tonic-gate 	dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
170*0Sstevel@tonic-gate }
171*0Sstevel@tonic-gate 
172*0Sstevel@tonic-gate int (*dtrace_fasttrap_probe_ptr)(struct regs *);
173*0Sstevel@tonic-gate int (*dtrace_pid_probe_ptr)(struct regs *);
174*0Sstevel@tonic-gate int (*dtrace_return_probe_ptr)(struct regs *);
175*0Sstevel@tonic-gate 
176*0Sstevel@tonic-gate void
177*0Sstevel@tonic-gate dtrace_user_probe(struct regs *rp, caddr_t addr, processorid_t cpuid)
178*0Sstevel@tonic-gate {
179*0Sstevel@tonic-gate 	krwlock_t *rwp;
180*0Sstevel@tonic-gate 	proc_t *p = curproc;
181*0Sstevel@tonic-gate 	extern void trap(struct regs *, caddr_t, processorid_t);
182*0Sstevel@tonic-gate 
183*0Sstevel@tonic-gate 	if (USERMODE(rp->r_cs) || (rp->r_ps & PS_VM)) {
184*0Sstevel@tonic-gate 		if (curthread->t_cred != p->p_cred) {
185*0Sstevel@tonic-gate 			cred_t *oldcred = curthread->t_cred;
186*0Sstevel@tonic-gate 			/*
187*0Sstevel@tonic-gate 			 * DTrace accesses t_cred in probe context.  t_cred
188*0Sstevel@tonic-gate 			 * must always be either NULL, or point to a valid,
189*0Sstevel@tonic-gate 			 * allocated cred structure.
190*0Sstevel@tonic-gate 			 */
191*0Sstevel@tonic-gate 			curthread->t_cred = crgetcred();
192*0Sstevel@tonic-gate 			crfree(oldcred);
193*0Sstevel@tonic-gate 		}
194*0Sstevel@tonic-gate 	}
195*0Sstevel@tonic-gate 
196*0Sstevel@tonic-gate 	if (rp->r_trapno == T_DTRACE_RET) {
197*0Sstevel@tonic-gate 		uint8_t step = curthread->t_dtrace_step;
198*0Sstevel@tonic-gate 		uint8_t ret = curthread->t_dtrace_ret;
199*0Sstevel@tonic-gate 		uintptr_t npc = curthread->t_dtrace_npc;
200*0Sstevel@tonic-gate 
201*0Sstevel@tonic-gate 		if (curthread->t_dtrace_ast) {
202*0Sstevel@tonic-gate 			aston(curthread);
203*0Sstevel@tonic-gate 			curthread->t_sig_check = 1;
204*0Sstevel@tonic-gate 		}
205*0Sstevel@tonic-gate 
206*0Sstevel@tonic-gate 		/*
207*0Sstevel@tonic-gate 		 * Clear all user tracing flags.
208*0Sstevel@tonic-gate 		 */
209*0Sstevel@tonic-gate 		curthread->t_dtrace_ft = 0;
210*0Sstevel@tonic-gate 
211*0Sstevel@tonic-gate 		/*
212*0Sstevel@tonic-gate 		 * If we weren't expecting to take a return probe trap, kill
213*0Sstevel@tonic-gate 		 * the process as though it had just executed an unassigned
214*0Sstevel@tonic-gate 		 * trap instruction.
215*0Sstevel@tonic-gate 		 */
216*0Sstevel@tonic-gate 		if (step == 0) {
217*0Sstevel@tonic-gate 			tsignal(curthread, SIGILL);
218*0Sstevel@tonic-gate 			return;
219*0Sstevel@tonic-gate 		}
220*0Sstevel@tonic-gate 
221*0Sstevel@tonic-gate 		/*
222*0Sstevel@tonic-gate 		 * If we hit this trap unrelated to a return probe, we're
223*0Sstevel@tonic-gate 		 * just here to reset the AST flag since we deferred a signal
224*0Sstevel@tonic-gate 		 * until after we logically single-stepped the instruction we
225*0Sstevel@tonic-gate 		 * copied out.
226*0Sstevel@tonic-gate 		 */
227*0Sstevel@tonic-gate 		if (ret == 0) {
228*0Sstevel@tonic-gate 			rp->r_pc = npc;
229*0Sstevel@tonic-gate 			return;
230*0Sstevel@tonic-gate 		}
231*0Sstevel@tonic-gate 
232*0Sstevel@tonic-gate 		/*
233*0Sstevel@tonic-gate 		 * We need to wait until after we've called the
234*0Sstevel@tonic-gate 		 * dtrace_return_probe_ptr function pointer to set %pc.
235*0Sstevel@tonic-gate 		 */
236*0Sstevel@tonic-gate 		rwp = &CPU->cpu_ft_lock;
237*0Sstevel@tonic-gate 		rw_enter(rwp, RW_READER);
238*0Sstevel@tonic-gate 		if (dtrace_return_probe_ptr != NULL)
239*0Sstevel@tonic-gate 			(void) (*dtrace_return_probe_ptr)(rp);
240*0Sstevel@tonic-gate 		rw_exit(rwp);
241*0Sstevel@tonic-gate 		rp->r_pc = npc;
242*0Sstevel@tonic-gate 
243*0Sstevel@tonic-gate 	} else if (rp->r_trapno == T_DTRACE_PROBE) {
244*0Sstevel@tonic-gate 		rwp = &CPU->cpu_ft_lock;
245*0Sstevel@tonic-gate 		rw_enter(rwp, RW_READER);
246*0Sstevel@tonic-gate 		if (dtrace_fasttrap_probe_ptr != NULL)
247*0Sstevel@tonic-gate 			(void) (*dtrace_fasttrap_probe_ptr)(rp);
248*0Sstevel@tonic-gate 		rw_exit(rwp);
249*0Sstevel@tonic-gate 
250*0Sstevel@tonic-gate 	} else if (rp->r_trapno == T_BPTFLT) {
251*0Sstevel@tonic-gate 		uint8_t instr;
252*0Sstevel@tonic-gate 		rwp = &CPU->cpu_ft_lock;
253*0Sstevel@tonic-gate 
254*0Sstevel@tonic-gate 		/*
255*0Sstevel@tonic-gate 		 * The DTrace fasttrap provider uses the breakpoint trap
256*0Sstevel@tonic-gate 		 * (int 3). We let DTrace take the first crack at handling
257*0Sstevel@tonic-gate 		 * this trap; if it's not a probe that DTrace knowns about,
258*0Sstevel@tonic-gate 		 * we call into the trap() routine to handle it like a
259*0Sstevel@tonic-gate 		 * breakpoint placed by a conventional debugger.
260*0Sstevel@tonic-gate 		 */
261*0Sstevel@tonic-gate 		rw_enter(rwp, RW_READER);
262*0Sstevel@tonic-gate 		if (dtrace_pid_probe_ptr != NULL &&
263*0Sstevel@tonic-gate 		    (*dtrace_pid_probe_ptr)(rp) == 0) {
264*0Sstevel@tonic-gate 			rw_exit(rwp);
265*0Sstevel@tonic-gate 			return;
266*0Sstevel@tonic-gate 		}
267*0Sstevel@tonic-gate 		rw_exit(rwp);
268*0Sstevel@tonic-gate 
269*0Sstevel@tonic-gate 		/*
270*0Sstevel@tonic-gate 		 * If the instruction that caused the breakpoint trap doesn't
271*0Sstevel@tonic-gate 		 * look like an int 3 anymore, it may be that this tracepoint
272*0Sstevel@tonic-gate 		 * was removed just after the user thread executed it. In
273*0Sstevel@tonic-gate 		 * that case, return to user land to retry the instuction.
274*0Sstevel@tonic-gate 		 */
275*0Sstevel@tonic-gate 		if (fuword8((void *)(rp->r_pc - 1), &instr) == 0 &&
276*0Sstevel@tonic-gate 		    instr != FASTTRAP_INSTR) {
277*0Sstevel@tonic-gate 			rp->r_pc--;
278*0Sstevel@tonic-gate 			return;
279*0Sstevel@tonic-gate 		}
280*0Sstevel@tonic-gate 
281*0Sstevel@tonic-gate 		trap(rp, addr, cpuid);
282*0Sstevel@tonic-gate 
283*0Sstevel@tonic-gate 	} else {
284*0Sstevel@tonic-gate 		trap(rp, addr, cpuid);
285*0Sstevel@tonic-gate 	}
286*0Sstevel@tonic-gate }
287*0Sstevel@tonic-gate 
288*0Sstevel@tonic-gate void
289*0Sstevel@tonic-gate dtrace_safe_synchronous_signal(void)
290*0Sstevel@tonic-gate {
291*0Sstevel@tonic-gate 	kthread_t *t = curthread;
292*0Sstevel@tonic-gate 	struct regs *rp = lwptoregs(ttolwp(t));
293*0Sstevel@tonic-gate 	size_t isz = t->t_dtrace_npc - t->t_dtrace_pc;
294*0Sstevel@tonic-gate 
295*0Sstevel@tonic-gate 	ASSERT(t->t_dtrace_on);
296*0Sstevel@tonic-gate 
297*0Sstevel@tonic-gate 	/*
298*0Sstevel@tonic-gate 	 * If we're not in the range of scratch addresses, we're not actually
299*0Sstevel@tonic-gate 	 * tracing user instructions so turn off the flags. If the instruction
300*0Sstevel@tonic-gate 	 * we copied out caused a synchonous trap, reset the pc back to its
301*0Sstevel@tonic-gate 	 * original value and turn off the flags.
302*0Sstevel@tonic-gate 	 */
303*0Sstevel@tonic-gate 	if (rp->r_pc < t->t_dtrace_scrpc ||
304*0Sstevel@tonic-gate 	    rp->r_pc > t->t_dtrace_astpc + isz) {
305*0Sstevel@tonic-gate 		t->t_dtrace_ft = 0;
306*0Sstevel@tonic-gate 	} else if (rp->r_pc == t->t_dtrace_scrpc ||
307*0Sstevel@tonic-gate 	    rp->r_pc == t->t_dtrace_astpc) {
308*0Sstevel@tonic-gate 		rp->r_pc = t->t_dtrace_pc;
309*0Sstevel@tonic-gate 		t->t_dtrace_ft = 0;
310*0Sstevel@tonic-gate 	}
311*0Sstevel@tonic-gate }
312*0Sstevel@tonic-gate 
313*0Sstevel@tonic-gate int
314*0Sstevel@tonic-gate dtrace_safe_defer_signal(void)
315*0Sstevel@tonic-gate {
316*0Sstevel@tonic-gate 	kthread_t *t = curthread;
317*0Sstevel@tonic-gate 	struct regs *rp = lwptoregs(ttolwp(t));
318*0Sstevel@tonic-gate 	size_t isz = t->t_dtrace_npc - t->t_dtrace_pc;
319*0Sstevel@tonic-gate 
320*0Sstevel@tonic-gate 	ASSERT(t->t_dtrace_on);
321*0Sstevel@tonic-gate 
322*0Sstevel@tonic-gate 	/*
323*0Sstevel@tonic-gate 	 * If we're not in the range of scratch addresses, we're not actually
324*0Sstevel@tonic-gate 	 * tracing user instructions so turn off the flags.
325*0Sstevel@tonic-gate 	 */
326*0Sstevel@tonic-gate 	if (rp->r_pc < t->t_dtrace_scrpc ||
327*0Sstevel@tonic-gate 	    rp->r_pc > t->t_dtrace_astpc + isz) {
328*0Sstevel@tonic-gate 		t->t_dtrace_ft = 0;
329*0Sstevel@tonic-gate 		return (0);
330*0Sstevel@tonic-gate 	}
331*0Sstevel@tonic-gate 
332*0Sstevel@tonic-gate 	/*
333*0Sstevel@tonic-gate 	 * If we've executed the original instruction, but haven't performed
334*0Sstevel@tonic-gate 	 * the jmp back to t->t_dtrace_npc or the clean up of any registers
335*0Sstevel@tonic-gate 	 * used to emulate %rip-relative instructions in 64-bit mode, do that
336*0Sstevel@tonic-gate 	 * here and take the signal right away. We detect this condition by
337*0Sstevel@tonic-gate 	 * seeing if the program counter is the range [scrpc + isz, astpc).
338*0Sstevel@tonic-gate 	 */
339*0Sstevel@tonic-gate 	if (t->t_dtrace_astpc - rp->r_pc <
340*0Sstevel@tonic-gate 	    t->t_dtrace_astpc - t->t_dtrace_scrpc - isz) {
341*0Sstevel@tonic-gate #ifdef __amd64
342*0Sstevel@tonic-gate 		/*
343*0Sstevel@tonic-gate 		 * If there is a scratch register and we're on the
344*0Sstevel@tonic-gate 		 * instruction immediately after the modified instruction,
345*0Sstevel@tonic-gate 		 * restore the value of that scratch register.
346*0Sstevel@tonic-gate 		 */
347*0Sstevel@tonic-gate 		if (t->t_dtrace_reg != 0 &&
348*0Sstevel@tonic-gate 		    rp->r_pc == t->t_dtrace_scrpc + isz) {
349*0Sstevel@tonic-gate 			switch (t->t_dtrace_reg) {
350*0Sstevel@tonic-gate 			case REG_RAX:
351*0Sstevel@tonic-gate 				rp->r_rax = t->t_dtrace_regv;
352*0Sstevel@tonic-gate 				break;
353*0Sstevel@tonic-gate 			case REG_RCX:
354*0Sstevel@tonic-gate 				rp->r_rcx = t->t_dtrace_regv;
355*0Sstevel@tonic-gate 				break;
356*0Sstevel@tonic-gate 			case REG_R8:
357*0Sstevel@tonic-gate 				rp->r_r8 = t->t_dtrace_regv;
358*0Sstevel@tonic-gate 				break;
359*0Sstevel@tonic-gate 			case REG_R9:
360*0Sstevel@tonic-gate 				rp->r_r9 = t->t_dtrace_regv;
361*0Sstevel@tonic-gate 				break;
362*0Sstevel@tonic-gate 			}
363*0Sstevel@tonic-gate 		}
364*0Sstevel@tonic-gate #endif
365*0Sstevel@tonic-gate 		rp->r_pc = t->t_dtrace_npc;
366*0Sstevel@tonic-gate 		t->t_dtrace_ft = 0;
367*0Sstevel@tonic-gate 		return (0);
368*0Sstevel@tonic-gate 	}
369*0Sstevel@tonic-gate 
370*0Sstevel@tonic-gate 	/*
371*0Sstevel@tonic-gate 	 * Otherwise, make sure we'll return to the kernel after executing
372*0Sstevel@tonic-gate 	 * the copied out instruction and defer the signal.
373*0Sstevel@tonic-gate 	 */
374*0Sstevel@tonic-gate 	if (!t->t_dtrace_step) {
375*0Sstevel@tonic-gate 		ASSERT(rp->r_pc < t->t_dtrace_astpc);
376*0Sstevel@tonic-gate 		rp->r_pc += t->t_dtrace_astpc - t->t_dtrace_scrpc;
377*0Sstevel@tonic-gate 		t->t_dtrace_step = 1;
378*0Sstevel@tonic-gate 	}
379*0Sstevel@tonic-gate 
380*0Sstevel@tonic-gate 	t->t_dtrace_ast = 1;
381*0Sstevel@tonic-gate 
382*0Sstevel@tonic-gate 	return (1);
383*0Sstevel@tonic-gate }
384