xref: /netbsd-src/external/cddl/osnet/dev/dtrace/i386/dtrace_isa.c (revision 179b12252ecaf3553d9c2b7458ce62b6a2203d0c)
1 /*	$NetBSD: dtrace_isa.c,v 1.3 2010/03/18 10:57:58 tron Exp $	*/
2 
3 /*
4  * CDDL HEADER START
5  *
6  * The contents of this file are subject to the terms of the
7  * Common Development and Distribution License, Version 1.0 only
8  * (the "License").  You may not use this file except in compliance
9  * with the License.
10  *
11  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
12  * or http://www.opensolaris.org/os/licensing.
13  * See the License for the specific language governing permissions
14  * and limitations under the License.
15  *
16  * When distributing Covered Code, include this CDDL HEADER in each
17  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
18  * If applicable, add the following below this CDDL HEADER, with the
19  * fields enclosed by brackets "[]" replaced with your own identifying
20  * information: Portions Copyright [yyyy] [name of copyright owner]
21  *
22  * CDDL HEADER END
23  *
24  * $FreeBSD: src/sys/cddl/dev/dtrace/i386/dtrace_isa.c,v 1.1.4.1 2009/08/03 08:13:06 kensmith Exp $
25  */
26 /*
27  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
28  * Use is subject to license terms.
29  */
30 #include <sys/cdefs.h>
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 //#include <sys/pcpu.h>
36 
37 //#include <machine/md_var.h>
38 //#include <machine/stack.h>
39 
40 //#include <vm/vm.h>
41 #include <machine/vmparam.h>
42 #include <machine/pmap.h>
43 
44 uintptr_t kernelbase = (uintptr_t)KERNBASE;
45 
46 #define INKERNEL(va) (((vm_offset_t)(va)) >= USRSTACK && \
47 	 ((vm_offset_t)(va)) < VM_MAX_KERNEL_ADDRESS)
48 
49 struct i386_frame {
50 	struct i386_frame	*f_frame;
51 	int			 f_retaddr;
52 	int			 f_arg0;
53 };
54 
55 typedef	unsigned long	vm_offset_t;
56 
57 uint8_t dtrace_fuword8_nocheck(void *);
58 uint16_t dtrace_fuword16_nocheck(void *);
59 uint32_t dtrace_fuword32_nocheck(void *);
60 uint64_t dtrace_fuword64_nocheck(void *);
61 
62 void
63 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
64     uint32_t *intrpc)
65 {
66 	int depth = 0;
67 	register_t ebp;
68 	struct i386_frame *frame;
69 	vm_offset_t callpc;
70 #if 0	/* XXX TBD needs solaris_cpu (for fbt) */
71 	pc_t caller = (pc_t) solaris_cpu[cpu_number()].cpu_dtrace_caller;
72 #else
73 	pc_t caller = (pc_t) 0;
74 #endif
75 
76 	if (intrpc != 0)
77 		pcstack[depth++] = (pc_t) intrpc;
78 
79 	aframes++;
80 
81 	__asm __volatile("movl %%ebp,%0" : "=r" (ebp));
82 
83 	frame = (struct i386_frame *)ebp;
84 	while (depth < pcstack_limit) {
85 		if (!INKERNEL(frame))
86 			break;
87 
88 		callpc = frame->f_retaddr;
89 
90 		if (!INKERNEL(callpc))
91 			break;
92 
93 		if (aframes > 0) {
94 			aframes--;
95 			if ((aframes == 0) && (caller != 0)) {
96 				pcstack[depth++] = caller;
97 			}
98 		}
99 		else {
100 			pcstack[depth++] = callpc;
101 		}
102 
103 		if (frame->f_frame <= frame ||
104 		    (vm_offset_t)frame->f_frame >=
105 		    (vm_offset_t)ebp + KSTACK_SIZE)
106 			break;
107 		frame = frame->f_frame;
108 	}
109 
110 	for (; depth < pcstack_limit; depth++) {
111 		pcstack[depth] = 0;
112 	}
113 }
114 
115 #ifdef notyet
116 static int
117 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
118     uintptr_t sp)
119 {
120 	klwp_t *lwp = ttolwp(curthread);
121 	proc_t *p = curproc;
122 	uintptr_t oldcontext = lwp->lwp_oldcontext;
123 	volatile uint16_t *flags =
124 	    (volatile uint16_t *)&cpu_core[cpu_number()].cpuc_dtrace_flags;
125 	size_t s1, s2;
126 	int ret = 0;
127 
128 	ASSERT(pcstack == NULL || pcstack_limit > 0);
129 
130 	if (p->p_model == DATAMODEL_NATIVE) {
131 		s1 = sizeof (struct frame) + 2 * sizeof (long);
132 		s2 = s1 + sizeof (siginfo_t);
133 	} else {
134 		s1 = sizeof (struct frame32) + 3 * sizeof (int);
135 		s2 = s1 + sizeof (siginfo32_t);
136 	}
137 
138 	while (pc != 0 && sp != 0) {
139 		ret++;
140 		if (pcstack != NULL) {
141 			*pcstack++ = (uint64_t)pc;
142 			pcstack_limit--;
143 			if (pcstack_limit <= 0)
144 				break;
145 		}
146 
147 		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
148 			if (p->p_model == DATAMODEL_NATIVE) {
149 				ucontext_t *ucp = (ucontext_t *)oldcontext;
150 				greg_t *gregs = ucp->uc_mcontext.gregs;
151 
152 				sp = dtrace_fulword(&gregs[REG_FP]);
153 				pc = dtrace_fulword(&gregs[REG_PC]);
154 
155 				oldcontext = dtrace_fulword(&ucp->uc_link);
156 			} else {
157 				ucontext32_t *ucp = (ucontext32_t *)oldcontext;
158 				greg32_t *gregs = ucp->uc_mcontext.gregs;
159 
160 				sp = dtrace_fuword32(&gregs[EBP]);
161 				pc = dtrace_fuword32(&gregs[EIP]);
162 
163 				oldcontext = dtrace_fuword32(&ucp->uc_link);
164 			}
165 		} else {
166 			if (p->p_model == DATAMODEL_NATIVE) {
167 				struct frame *fr = (struct frame *)sp;
168 
169 				pc = dtrace_fulword(&fr->fr_savpc);
170 				sp = dtrace_fulword(&fr->fr_savfp);
171 			} else {
172 				struct frame32 *fr = (struct frame32 *)sp;
173 
174 				pc = dtrace_fuword32(&fr->fr_savpc);
175 				sp = dtrace_fuword32(&fr->fr_savfp);
176 			}
177 		}
178 
179 		/*
180 		 * This is totally bogus:  if we faulted, we're going to clear
181 		 * the fault and break.  This is to deal with the apparently
182 		 * broken Java stacks on x86.
183 		 */
184 		if (*flags & CPU_DTRACE_FAULT) {
185 			*flags &= ~CPU_DTRACE_FAULT;
186 			break;
187 		}
188 	}
189 
190 	return (ret);
191 }
192 
193 void
194 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
195 {
196 	klwp_t *lwp = ttolwp(curthread);
197 	proc_t *p = curproc;
198 	struct regs *rp;
199 	uintptr_t pc, sp;
200 	volatile uint16_t *flags =
201 	    (volatile uint16_t *)&cpu_core[cpu_number()].cpuc_dtrace_flags;
202 	int n;
203 
204 	if (*flags & CPU_DTRACE_FAULT)
205 		return;
206 
207 	if (pcstack_limit <= 0)
208 		return;
209 
210 	/*
211 	 * If there's no user context we still need to zero the stack.
212 	 */
213 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
214 		goto zero;
215 
216 	*pcstack++ = (uint64_t)p->p_pid;
217 	pcstack_limit--;
218 
219 	if (pcstack_limit <= 0)
220 		return;
221 
222 	pc = rp->r_pc;
223 	sp = rp->r_fp;
224 
225 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
226 		*pcstack++ = (uint64_t)pc;
227 		pcstack_limit--;
228 		if (pcstack_limit <= 0)
229 			return;
230 
231 		if (p->p_model == DATAMODEL_NATIVE)
232 			pc = dtrace_fulword((void *)rp->r_sp);
233 		else
234 			pc = dtrace_fuword32((void *)rp->r_sp);
235 	}
236 
237 	n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
238 	ASSERT(n >= 0);
239 	ASSERT(n <= pcstack_limit);
240 
241 	pcstack += n;
242 	pcstack_limit -= n;
243 
244 zero:
245 	while (pcstack_limit-- > 0)
246 		*pcstack++ = NULL;
247 }
248 
249 int
250 dtrace_getustackdepth(void)
251 {
252 }
253 
254 void
255 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
256 {
257 	klwp_t *lwp = ttolwp(curthread);
258 	proc_t *p = curproc;
259 	struct regs *rp;
260 	uintptr_t pc, sp, oldcontext;
261 	volatile uint16_t *flags =
262 	    (volatile uint16_t *)&cpu_core[cpu_number()].cpuc_dtrace_flags;
263 	size_t s1, s2;
264 
265 	if (*flags & CPU_DTRACE_FAULT)
266 		return;
267 
268 	if (pcstack_limit <= 0)
269 		return;
270 
271 	/*
272 	 * If there's no user context we still need to zero the stack.
273 	 */
274 	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
275 		goto zero;
276 
277 	*pcstack++ = (uint64_t)p->p_pid;
278 	pcstack_limit--;
279 
280 	if (pcstack_limit <= 0)
281 		return;
282 
283 	pc = rp->r_pc;
284 	sp = rp->r_fp;
285 	oldcontext = lwp->lwp_oldcontext;
286 
287 	if (p->p_model == DATAMODEL_NATIVE) {
288 		s1 = sizeof (struct frame) + 2 * sizeof (long);
289 		s2 = s1 + sizeof (siginfo_t);
290 	} else {
291 		s1 = sizeof (struct frame32) + 3 * sizeof (int);
292 		s2 = s1 + sizeof (siginfo32_t);
293 	}
294 
295 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
296 		*pcstack++ = (uint64_t)pc;
297 		*fpstack++ = 0;
298 		pcstack_limit--;
299 		if (pcstack_limit <= 0)
300 			return;
301 
302 		if (p->p_model == DATAMODEL_NATIVE)
303 			pc = dtrace_fulword((void *)rp->r_sp);
304 		else
305 			pc = dtrace_fuword32((void *)rp->r_sp);
306 	}
307 
308 	while (pc != 0 && sp != 0) {
309 		*pcstack++ = (uint64_t)pc;
310 		*fpstack++ = sp;
311 		pcstack_limit--;
312 		if (pcstack_limit <= 0)
313 			break;
314 
315 		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
316 			if (p->p_model == DATAMODEL_NATIVE) {
317 				ucontext_t *ucp = (ucontext_t *)oldcontext;
318 				greg_t *gregs = ucp->uc_mcontext.gregs;
319 
320 				sp = dtrace_fulword(&gregs[REG_FP]);
321 				pc = dtrace_fulword(&gregs[REG_PC]);
322 
323 				oldcontext = dtrace_fulword(&ucp->uc_link);
324 			} else {
325 				ucontext_t *ucp = (ucontext_t *)oldcontext;
326 				greg_t *gregs = ucp->uc_mcontext.gregs;
327 
328 				sp = dtrace_fuword32(&gregs[EBP]);
329 				pc = dtrace_fuword32(&gregs[EIP]);
330 
331 				oldcontext = dtrace_fuword32(&ucp->uc_link);
332 			}
333 		} else {
334 			if (p->p_model == DATAMODEL_NATIVE) {
335 				struct frame *fr = (struct frame *)sp;
336 
337 				pc = dtrace_fulword(&fr->fr_savpc);
338 				sp = dtrace_fulword(&fr->fr_savfp);
339 			} else {
340 				struct frame32 *fr = (struct frame32 *)sp;
341 
342 				pc = dtrace_fuword32(&fr->fr_savpc);
343 				sp = dtrace_fuword32(&fr->fr_savfp);
344 			}
345 		}
346 
347 		/*
348 		 * This is totally bogus:  if we faulted, we're going to clear
349 		 * the fault and break.  This is to deal with the apparently
350 		 * broken Java stacks on x86.
351 		 */
352 		if (*flags & CPU_DTRACE_FAULT) {
353 			*flags &= ~CPU_DTRACE_FAULT;
354 			break;
355 		}
356 	}
357 
358 zero:
359 	while (pcstack_limit-- > 0)
360 		*pcstack++ = NULL;
361 }
362 #endif
363 
364 uint64_t
365 dtrace_getarg(int arg, int aframes)
366 {
367 	uintptr_t val;
368 	struct i386_frame *fp = (struct i386_frame *)dtrace_getfp();
369 	uintptr_t *stack;
370 
371 #if 0 /* XXX TBD needs ALTENTRY in dtrace_asm.S */
372 	int i;
373 	for (i = 1; i <= aframes; i++) {
374 		fp = fp->f_frame;
375 
376 		if (fp->f_retaddr == (long)dtrace_invop_callsite) {
377 			/*
378 			 * If we pass through the invalid op handler, we will
379 			 * use the pointer that it passed to the stack as the
380 			 * second argument to dtrace_invop() as the pointer to
381 			 * the stack.  When using this stack, we must step
382 			 * beyond the EIP/RIP that was pushed when the trap was
383 			 * taken -- hence the "+ 1" below.
384 			 */
385 			stack = ((uintptr_t **)&fp[1])[1] + 1;
386 			goto load;
387 		}
388 	}
389 #endif
390 
391 	/*
392 	 * We know that we did not come through a trap to get into
393 	 * dtrace_probe() -- the provider simply called dtrace_probe()
394 	 * directly.  As this is the case, we need to shift the argument
395 	 * that we're looking for:  the probe ID is the first argument to
396 	 * dtrace_probe(), so the argument n will actually be found where
397 	 * one would expect to find argument (n + 1).
398 	 */
399 	arg++;
400 
401 	stack = (uintptr_t *)&fp[1];
402 
403 #if 0
404 load:
405 #endif
406 	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
407 	val = stack[arg];
408 	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
409 
410 	return (val);
411 }
412 
413 int
414 dtrace_getstackdepth(int aframes)
415 {
416 	int depth = 0;
417 	struct i386_frame *frame;
418 	vm_offset_t ebp;
419 
420 	aframes++;
421 	ebp = dtrace_getfp();
422 	frame = (struct i386_frame *)ebp;
423 	depth++;
424 	for(;;) {
425 		if (!INKERNEL((long) frame))
426 			break;
427 		if (!INKERNEL((long) frame->f_frame))
428 			break;
429 		depth++;
430 		if (frame->f_frame <= frame ||
431 		    (vm_offset_t)frame->f_frame >=
432 		    (vm_offset_t)ebp + KSTACK_SIZE)
433 			break;
434 		frame = frame->f_frame;
435 	}
436 	if (depth < aframes)
437 		return 0;
438 	else
439 		return depth - aframes;
440 }
441 
442 #ifdef notyet
443 ulong_t
444 dtrace_getreg(struct regs *rp, uint_t reg)
445 {
446 #if defined(__amd64)
447 	int regmap[] = {
448 		REG_GS,		/* GS */
449 		REG_FS,		/* FS */
450 		REG_ES,		/* ES */
451 		REG_DS,		/* DS */
452 		REG_RDI,	/* EDI */
453 		REG_RSI,	/* ESI */
454 		REG_RBP,	/* EBP */
455 		REG_RSP,	/* ESP */
456 		REG_RBX,	/* EBX */
457 		REG_RDX,	/* EDX */
458 		REG_RCX,	/* ECX */
459 		REG_RAX,	/* EAX */
460 		REG_TRAPNO,	/* TRAPNO */
461 		REG_ERR,	/* ERR */
462 		REG_RIP,	/* EIP */
463 		REG_CS,		/* CS */
464 		REG_RFL,	/* EFL */
465 		REG_RSP,	/* UESP */
466 		REG_SS		/* SS */
467 	};
468 
469 	if (reg <= SS) {
470 		if (reg >= sizeof (regmap) / sizeof (int)) {
471 			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
472 			return (0);
473 		}
474 
475 		reg = regmap[reg];
476 	} else {
477 		reg -= SS + 1;
478 	}
479 
480 	switch (reg) {
481 	case REG_RDI:
482 		return (rp->r_rdi);
483 	case REG_RSI:
484 		return (rp->r_rsi);
485 	case REG_RDX:
486 		return (rp->r_rdx);
487 	case REG_RCX:
488 		return (rp->r_rcx);
489 	case REG_R8:
490 		return (rp->r_r8);
491 	case REG_R9:
492 		return (rp->r_r9);
493 	case REG_RAX:
494 		return (rp->r_rax);
495 	case REG_RBX:
496 		return (rp->r_rbx);
497 	case REG_RBP:
498 		return (rp->r_rbp);
499 	case REG_R10:
500 		return (rp->r_r10);
501 	case REG_R11:
502 		return (rp->r_r11);
503 	case REG_R12:
504 		return (rp->r_r12);
505 	case REG_R13:
506 		return (rp->r_r13);
507 	case REG_R14:
508 		return (rp->r_r14);
509 	case REG_R15:
510 		return (rp->r_r15);
511 	case REG_DS:
512 		return (rp->r_ds);
513 	case REG_ES:
514 		return (rp->r_es);
515 	case REG_FS:
516 		return (rp->r_fs);
517 	case REG_GS:
518 		return (rp->r_gs);
519 	case REG_TRAPNO:
520 		return (rp->r_trapno);
521 	case REG_ERR:
522 		return (rp->r_err);
523 	case REG_RIP:
524 		return (rp->r_rip);
525 	case REG_CS:
526 		return (rp->r_cs);
527 	case REG_SS:
528 		return (rp->r_ss);
529 	case REG_RFL:
530 		return (rp->r_rfl);
531 	case REG_RSP:
532 		return (rp->r_rsp);
533 	default:
534 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
535 		return (0);
536 	}
537 
538 #else
539 	if (reg > SS) {
540 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
541 		return (0);
542 	}
543 
544 	return ((&rp->r_gs)[reg]);
545 #endif
546 }
547 #endif
548 
549 static int
550 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
551 {
552 	ASSERT(kaddr >= kernelbase && kaddr + size >= kaddr);
553 
554 	if (uaddr + size >= kernelbase || uaddr + size < uaddr) {
555 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
556 		cpu_core[cpu_number()].cpuc_dtrace_illval = uaddr;
557 		return (0);
558 	}
559 
560 	return (1);
561 }
562 
563 void
564 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
565     volatile uint16_t *flags)
566 {
567 	if (dtrace_copycheck(uaddr, kaddr, size))
568 		dtrace_copy(uaddr, kaddr, size);
569 }
570 
571 void
572 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
573     volatile uint16_t *flags)
574 {
575 	if (dtrace_copycheck(uaddr, kaddr, size))
576 		dtrace_copy(kaddr, uaddr, size);
577 }
578 
579 void
580 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
581     volatile uint16_t *flags)
582 {
583 	if (dtrace_copycheck(uaddr, kaddr, size))
584 		dtrace_copystr(uaddr, kaddr, size, flags);
585 }
586 
587 void
588 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
589     volatile uint16_t *flags)
590 {
591 	if (dtrace_copycheck(uaddr, kaddr, size))
592 		dtrace_copystr(kaddr, uaddr, size, flags);
593 }
594 
595 uint8_t
596 dtrace_fuword8(void *uaddr)
597 {
598 	if ((uintptr_t)uaddr >= kernelbase) {
599 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
600 		cpu_core[cpu_number()].cpuc_dtrace_illval = (uintptr_t)uaddr;
601 		return (0);
602 	}
603 	return (dtrace_fuword8_nocheck(uaddr));
604 }
605 
606 uint16_t
607 dtrace_fuword16(void *uaddr)
608 {
609 	if ((uintptr_t)uaddr >= kernelbase) {
610 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
611 		cpu_core[cpu_number()].cpuc_dtrace_illval = (uintptr_t)uaddr;
612 		return (0);
613 	}
614 	return (dtrace_fuword16_nocheck(uaddr));
615 }
616 
617 uint32_t
618 dtrace_fuword32(void *uaddr)
619 {
620 	if ((uintptr_t)uaddr >= kernelbase) {
621 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
622 		cpu_core[cpu_number()].cpuc_dtrace_illval = (uintptr_t)uaddr;
623 		return (0);
624 	}
625 	return (dtrace_fuword32_nocheck(uaddr));
626 }
627 
628 uint64_t
629 dtrace_fuword64(void *uaddr)
630 {
631 	if ((uintptr_t)uaddr >= kernelbase) {
632 		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
633 		cpu_core[cpu_number()].cpuc_dtrace_illval = (uintptr_t)uaddr;
634 		return (0);
635 	}
636 	return (dtrace_fuword64_nocheck(uaddr));
637 }
638