1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 *
22 * $FreeBSD$
23 */
24 /*
25 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
26 * Use is subject to license terms.
27 */
28 #include <sys/cdefs.h>
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33
34 #include <machine/frame.h>
35 #include <machine/reg.h>
36
37 #include <machine/db_machdep.h>
38 #include <machine/vmparam.h>
39 #include <ddb/db_sym.h>
40 #include <ddb/ddb.h>
41
42 #include "regset.h"
43
44 #define INKERNEL(va) ((VM_MIN_KERNEL_ADDRESS <= (va)) && ((va) < VM_MAX_KERNEL_ADDRESS))
45 #define CURRENT_CPU cpu_index(curcpu())
46
47 struct unwind_state {
48 register_t fp;
49 register_t sp;
50 register_t pc;
51 };
52
53 struct arm64_frame {
54 struct arm64_frame *f_frame;
55 uintptr_t f_retaddr;
56 };
57
58 static int
unwind_frame(struct unwind_state * frame)59 unwind_frame(struct unwind_state *frame)
60 {
61 uint64_t fp = frame->fp;
62
63 if (!INKERNEL(fp))
64 return (-1);
65
66 frame->sp = fp + 0x10;
67 /* FP to previous frame (X29) */
68 frame->fp = *(uint64_t *)(fp);
69 /* LR (X30) */
70 frame->pc = *(uint64_t *)(fp + 8) - 4;
71
72 return (0);
73 }
74
75 /*
76 * Wee need some reasonable default to prevent backtrace code
77 * from wandering too far
78 */
79 #define MAX_FUNCTION_SIZE 0x10000
80 #define MAX_PROLOGUE_SIZE 0x100
81 #define MAX_USTACK_DEPTH 2048
82
83 uint8_t dtrace_fuword8_nocheck(void *);
84 uint16_t dtrace_fuword16_nocheck(void *);
85 uint32_t dtrace_fuword32_nocheck(void *);
86 uint64_t dtrace_fuword64_nocheck(void *);
87
88 void
dtrace_getpcstack(pc_t * pcstack,int pcstack_limit,int aframes,uint32_t * intrpc)89 dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
90 uint32_t *intrpc)
91 {
92 extern const char el1_trap_exit[];
93 const register_t *fp;
94 int i = 0;
95
96 if (intrpc) {
97 if (i < pcstack_limit)
98 pcstack[i++] = (pc_t)intrpc;
99 }
100
101 /*
102 * fp[0] = x29 (saved frame pointer)
103 * fp[1] = x30 (saved link register == return address)
104 */
105 fp = __builtin_frame_address(0);
106 while (i < pcstack_limit && INKERNEL(fp[0]) && INKERNEL(fp[1])) {
107 /* Skip the specified number of artificial frames. */
108 if (aframes > 0)
109 aframes--;
110 else
111 pcstack[i++] = fp[1];
112
113 /* Check whether this frame is handling a trap. */
114 if (fp[1] == (register_t)el1_trap_exit) {
115 /*
116 * Trap from kernel. The trapframe is the
117 * saved frame pointer of the call to the trap
118 * handler whose return address is
119 * el1_trap_exit. The frame pointer of the
120 * interrupted code is in x29 stashed in the
121 * trapframe, alongside its pc.
122 */
123 const struct trapframe *tf = (const void *)fp[0];
124 /* x29 = frame pointer */
125 fp = (const void *)tf->tf_regs.r_reg[29];
126 if (INKERNEL(tf->tf_pc)) {
127 if (i >= pcstack_limit)
128 break;
129 if (aframes > 0)
130 aframes--;
131 else
132 pcstack[i++] = tf->tf_pc;
133 }
134 } else {
135 /*
136 * Not a trap. Keep going with fp[0] as the
137 * parent frame pointer.
138 */
139 fp = (const void *)fp[0];
140 }
141 }
142
143 /* Zero the rest of the return address stack. (Paranoia?) */
144 while (i < pcstack_limit)
145 pcstack[i++] = 0;
146 }
147
148 static int
dtrace_getustack_common(uint64_t * pcstack,int pcstack_limit,uintptr_t pc,uintptr_t fp)149 dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
150 uintptr_t fp)
151 {
152 volatile uint16_t *flags =
153 (volatile uint16_t *)&cpu_core[CURRENT_CPU].cpuc_dtrace_flags;
154 int ret = 0;
155 uintptr_t oldfp = fp;
156
157 ASSERT(pcstack == NULL || pcstack_limit > 0);
158
159 while (pc != 0) {
160 /*
161 * We limit the number of times we can go around this
162 * loop to account for a circular stack.
163 */
164 if (ret++ >= MAX_USTACK_DEPTH) {
165 *flags |= CPU_DTRACE_BADSTACK;
166 cpu_core[CURRENT_CPU].cpuc_dtrace_illval = fp;
167 break;
168 }
169
170 if (pcstack != NULL) {
171 *pcstack++ = (uint64_t)pc;
172 pcstack_limit--;
173 if (pcstack_limit <= 0)
174 break;
175 }
176
177 if (fp == 0)
178 break;
179
180 pc = dtrace_fuword64((void *)(fp +
181 offsetof(struct arm64_frame, f_retaddr)));
182 fp = dtrace_fuword64((void *)fp);
183
184 if (fp == oldfp) {
185 *flags |= CPU_DTRACE_BADSTACK;
186 cpu_core[CURRENT_CPU].cpuc_dtrace_illval = fp;
187 break;
188 }
189
190 /*
191 * ARM64TODO:
192 * This workaround might not be necessary. It needs to be
193 * revised and removed from all architectures if found
194 * unwanted. Leaving the original x86 comment for reference.
195 *
196 * This is totally bogus: if we faulted, we're going to clear
197 * the fault and break. This is to deal with the apparently
198 * broken Java stacks on x86.
199 */
200 if (*flags & CPU_DTRACE_FAULT) {
201 *flags &= ~CPU_DTRACE_FAULT;
202 break;
203 }
204
205 oldfp = fp;
206 }
207
208 return (ret);
209 }
210
211 void
dtrace_getupcstack(uint64_t * pcstack,int pcstack_limit)212 dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
213 {
214 proc_t *p = curproc;
215 struct trapframe *tf;
216 uintptr_t pc, sp, fp;
217 volatile uint16_t *flags =
218 (volatile uint16_t *)&cpu_core[CURRENT_CPU].cpuc_dtrace_flags;
219 int n;
220
221 if (*flags & CPU_DTRACE_FAULT)
222 return;
223
224 if (pcstack_limit <= 0)
225 return;
226
227 /*
228 * If there's no user context we still need to zero the stack.
229 */
230 if (p == NULL || (tf = curlwp->l_md.md_utf) == NULL)
231 goto zero;
232
233 *pcstack++ = (uint64_t)p->p_pid;
234 pcstack_limit--;
235
236 if (pcstack_limit <= 0)
237 return;
238
239 pc = tf->tf_pc;
240 sp = tf->tf_sp;
241 fp = tf->tf_regs.r_reg[29];
242
243 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
244 /*
245 * In an entry probe. The frame pointer has not yet been
246 * pushed (that happens in the function prologue). The
247 * best approach is to add the current pc as a missing top
248 * of stack and back the pc up to the caller, which is stored
249 * at the current stack pointer address since the call
250 * instruction puts it there right before the branch.
251 */
252
253 *pcstack++ = (uint64_t)pc;
254 pcstack_limit--;
255 if (pcstack_limit <= 0)
256 return;
257
258 pc = tf->tf_lr;
259 }
260
261 n = dtrace_getustack_common(pcstack, pcstack_limit, pc, fp);
262 ASSERT(n >= 0);
263 ASSERT(n <= pcstack_limit);
264
265 pcstack += n;
266 pcstack_limit -= n;
267
268 zero:
269 while (pcstack_limit-- > 0)
270 *pcstack++ = 0;
271 }
272
273 int
dtrace_getustackdepth(void)274 dtrace_getustackdepth(void)
275 {
276
277 printf("IMPLEMENT ME: %s\n", __func__);
278
279 return (0);
280 }
281
282 void
dtrace_getufpstack(uint64_t * pcstack,uint64_t * fpstack,int pcstack_limit)283 dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
284 {
285
286 printf("IMPLEMENT ME: %s\n", __func__);
287 }
288
289 /*ARGSUSED*/
290 uint64_t
dtrace_getarg(int arg,int aframes)291 dtrace_getarg(int arg, int aframes)
292 {
293 extern const char el1_trap_exit[];
294 const register_t *fp;
295 const struct trapframe *tf = NULL;
296 int i = 0;
297
298 /*
299 * The first arguments are passed in x0,...,x7. The rest are
300 * on the stack, too much trouble to figure out.
301 *
302 * XXX Shouldn't we ask ctf or dwarf or something to figure
303 * this stuff out for us?
304 */
305 KASSERT(arg >= 0);
306 if (arg >= 8)
307 return 0;
308
309 fp = __builtin_frame_address(0);
310 while (i < 1000 && INKERNEL(fp[0]) && INKERNEL(fp[1])) {
311 if (aframes > 0)
312 aframes--;
313 else
314 i++;
315 if (fp[1] == (register_t)el1_trap_exit) {
316 tf = (const void *)fp[0];
317 break;
318 } else {
319 fp = (const void *)fp[0];
320 }
321 }
322
323 /* If we didn't find a trap frame, give up. */
324 if (tf == NULL)
325 return 0;
326
327 /* Arg0, arg1, ..., arg7 are in registers x0, x1, ..., x7. */
328 return tf->tf_regs.r_reg[arg];
329 }
330
331 int
dtrace_getstackdepth(int aframes)332 dtrace_getstackdepth(int aframes)
333 {
334 extern const char el1_trap_exit[];
335 const register_t *fp;
336 int i = 0;
337
338 fp = __builtin_frame_address(0);
339 while (i < 1000 && INKERNEL(fp[0]) && INKERNEL(fp[1])) {
340 if (aframes > 0)
341 aframes--;
342 else
343 i++;
344 if (fp[1] == (register_t)el1_trap_exit) {
345 const struct trapframe *tf = (const void *)fp[0];
346 fp = (const void *)tf->tf_regs.r_reg[29];
347 if (aframes > 0)
348 aframes--;
349 else
350 i++;
351 } else {
352 fp = (const void *)fp[0];
353 }
354 }
355
356 return i;
357 }
358
359 ulong_t
dtrace_getreg(struct trapframe * tf,uint_t reg)360 dtrace_getreg(struct trapframe *tf, uint_t reg)
361 {
362
363 if (reg < 32)
364 return tf->tf_regs.r_reg[reg];
365
366 return (0);
367 }
368
369 static int
dtrace_copycheck(uintptr_t uaddr,uintptr_t kaddr,size_t size)370 dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
371 {
372
373 if (uaddr + size > VM_MAXUSER_ADDRESS || uaddr + size < uaddr) {
374 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
375 cpu_core[CURRENT_CPU].cpuc_dtrace_illval = uaddr;
376 return (0);
377 }
378
379 return (1);
380 }
381
382 void
dtrace_copyin(uintptr_t uaddr,uintptr_t kaddr,size_t size,volatile uint16_t * flags)383 dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
384 volatile uint16_t *flags)
385 {
386
387 if (dtrace_copycheck(uaddr, kaddr, size))
388 dtrace_copy(uaddr, kaddr, size);
389 }
390
391 void
dtrace_copyout(uintptr_t kaddr,uintptr_t uaddr,size_t size,volatile uint16_t * flags)392 dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
393 volatile uint16_t *flags)
394 {
395
396 if (dtrace_copycheck(uaddr, kaddr, size))
397 dtrace_copy(kaddr, uaddr, size);
398 }
399
400 void
dtrace_copyinstr(uintptr_t uaddr,uintptr_t kaddr,size_t size,volatile uint16_t * flags)401 dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
402 volatile uint16_t *flags)
403 {
404
405 if (dtrace_copycheck(uaddr, kaddr, size))
406 dtrace_copystr(uaddr, kaddr, size, flags);
407 }
408
409 void
dtrace_copyoutstr(uintptr_t kaddr,uintptr_t uaddr,size_t size,volatile uint16_t * flags)410 dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
411 volatile uint16_t *flags)
412 {
413
414 if (dtrace_copycheck(uaddr, kaddr, size))
415 dtrace_copystr(kaddr, uaddr, size, flags);
416 }
417
418 uint8_t
dtrace_fuword8(void * uaddr)419 dtrace_fuword8(void *uaddr)
420 {
421
422 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
423 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
424 cpu_core[CURRENT_CPU].cpuc_dtrace_illval = (uintptr_t)uaddr;
425 return (0);
426 }
427
428 return (dtrace_fuword8_nocheck(uaddr));
429 }
430
431 uint16_t
dtrace_fuword16(void * uaddr)432 dtrace_fuword16(void *uaddr)
433 {
434
435 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
436 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
437 cpu_core[CURRENT_CPU].cpuc_dtrace_illval = (uintptr_t)uaddr;
438 return (0);
439 }
440
441 return (dtrace_fuword16_nocheck(uaddr));
442 }
443
444 uint32_t
dtrace_fuword32(void * uaddr)445 dtrace_fuword32(void *uaddr)
446 {
447
448 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
449 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
450 cpu_core[CURRENT_CPU].cpuc_dtrace_illval = (uintptr_t)uaddr;
451 return (0);
452 }
453
454 return (dtrace_fuword32_nocheck(uaddr));
455 }
456
457 uint64_t
dtrace_fuword64(void * uaddr)458 dtrace_fuword64(void *uaddr)
459 {
460
461 if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
462 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
463 cpu_core[CURRENT_CPU].cpuc_dtrace_illval = (uintptr_t)uaddr;
464 return (0);
465 }
466
467 return (dtrace_fuword64_nocheck(uaddr));
468 }
469