xref: /openbsd-src/sys/arch/mips64/mips64/trap.c (revision 949c1c4ec8cc03255798b09f6078e1d0aed70a6a)
1 /*	$OpenBSD: trap.c,v 1.174 2024/11/07 16:02:29 miod Exp $	*/
2 
3 /*
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1992, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department and Ralph Campbell.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  * from: Utah Hdr: trap.c 1.32 91/04/06
41  *
42  *	from: @(#)trap.c	8.5 (Berkeley) 1/11/94
43  */
44 
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/exec.h>
49 #include <sys/proc.h>
50 #include <sys/kernel.h>
51 #include <sys/signalvar.h>
52 #include <sys/user.h>
53 #include <sys/stacktrace.h>
54 #include <sys/syscall.h>
55 #include <sys/syscall_mi.h>
56 #include <sys/buf.h>
57 #include <sys/device.h>
58 #include <sys/atomic.h>
59 #ifdef PTRACE
60 #include <sys/ptrace.h>
61 #endif
62 
63 #include <uvm/uvm_extern.h>
64 
65 #include <machine/autoconf.h>
66 #include <machine/cpu.h>
67 #include <mips64/mips_cpu.h>
68 #include <machine/fpu.h>
69 #include <machine/frame.h>
70 #include <machine/mips_opcode.h>
71 #include <machine/regnum.h>
72 #include <machine/tcb.h>
73 #include <machine/trap.h>
74 
75 #ifdef DDB
76 #include <mips64/db_machdep.h>
77 #include <ddb/db_access.h>
78 #include <ddb/db_output.h>
79 #include <ddb/db_sym.h>
80 #endif
81 
82 #include <sys/syslog.h>
83 
84 #define	USERMODE(ps)	(((ps) & SR_KSU_MASK) == SR_KSU_USER)
85 
86 const char *trap_type[] = {
87 	"external interrupt",
88 	"TLB modification",
89 	"TLB miss (load or instr. fetch)",
90 	"TLB miss (store)",
91 	"address error (load or I-fetch)",
92 	"address error (store)",
93 	"bus error (I-fetch)",
94 	"bus error (load or store)",
95 	"system call",
96 	"breakpoint",
97 	"reserved instruction",
98 	"coprocessor unusable",
99 	"arithmetic overflow",
100 	"trap",
101 	"virtual coherency instruction",
102 	"floating point",
103 	"reserved 16",
104 	"reserved 17",
105 	"reserved 18",
106 	"reserved 19",
107 	"reserved 20",
108 	"reserved 21",
109 	"reserved 22",
110 	"watch",
111 	"reserved 24",
112 	"reserved 25",
113 	"reserved 26",
114 	"reserved 27",
115 	"reserved 28",
116 	"reserved 29",
117 	"reserved 30",
118 	"virtual coherency data"
119 };
120 
121 #if defined(DDB) || defined(DEBUG)
122 struct trapdebug trapdebug[MAXCPUS * TRAPSIZE];
123 uint trppos[MAXCPUS];
124 
125 void	stacktrace(struct trapframe *);
126 uint32_t kdbpeek(vaddr_t);
127 uint64_t kdbpeekd(vaddr_t);
128 #endif	/* DDB || DEBUG */
129 
130 #if defined(DDB)
131 extern int db_ktrap(int, db_regs_t *);
132 #endif
133 
134 void	ast(void);
135 extern void interrupt(struct trapframe *);
136 void	itsa(struct trapframe *, struct cpu_info *, struct proc *, int);
137 void	trap(struct trapframe *);
138 #ifdef PTRACE
139 int	ptrace_read_insn(struct proc *, vaddr_t, uint32_t *);
140 int	ptrace_write_insn(struct proc *, vaddr_t, uint32_t);
141 int	process_sstep(struct proc *, int);
142 #endif
143 
144 /*
145  * Handle an AST for the current process.
146  */
147 void
148 ast(void)
149 {
150 	struct proc *p = curproc;
151 
152 	p->p_md.md_astpending = 0;
153 
154 	/*
155 	 * Make sure the AST flag gets cleared before handling the AST.
156 	 * Otherwise there is a risk of losing an AST that was sent
157 	 * by another CPU.
158 	 */
159 	membar_enter();
160 
161 	refreshcreds(p);
162 	atomic_inc_int(&uvmexp.softs);
163 	mi_ast(p, curcpu()->ci_want_resched);
164 	userret(p);
165 }
166 
167 /*
168  * Handle an exception.
169  * In the case of a kernel trap, we return the pc where to resume if
170  * pcb_onfault is set, otherwise, return old pc.
171  */
172 void
173 trap(struct trapframe *trapframe)
174 {
175 	struct cpu_info *ci = curcpu();
176 	struct proc *p = ci->ci_curproc;
177 	int type;
178 
179 	type = (trapframe->cause & CR_EXC_CODE) >> CR_EXC_CODE_SHIFT;
180 
181 	trapdebug_enter(ci, trapframe, -1);
182 
183 	if (type != T_SYSCALL)
184 		atomic_inc_int(&uvmexp.traps);
185 	if (USERMODE(trapframe->sr))
186 		type |= T_USER;
187 
188 	/*
189 	 * Enable hardware interrupts if they were on before the trap;
190 	 * enable IPI interrupts only otherwise.
191 	 */
192 	switch (type) {
193 	case T_BREAK:
194 		break;
195 	default:
196 		if (ISSET(trapframe->sr, SR_INT_ENAB))
197 			enableintr();
198 		else {
199 #ifdef MULTIPROCESSOR
200 			ENABLEIPI();
201 #endif
202 		}
203 		break;
204 	}
205 
206 	if (type & T_USER)
207 		refreshcreds(p);
208 
209 	itsa(trapframe, ci, p, type);
210 
211 	if (type & T_USER)
212 		userret(p);
213 }
214 
215 /*
216  * Handle a single exception.
217  */
218 void
219 itsa(struct trapframe *trapframe, struct cpu_info *ci, struct proc *p,
220     int type)
221 {
222 	unsigned ucode = 0;
223 	vm_prot_t access_type;
224 	extern vaddr_t onfault_table[];
225 	int onfault;
226 	int signal, sicode;
227 	union sigval sv;
228 	struct pcb *pcb;
229 
230 	switch (type) {
231 	case T_TLB_MOD:
232 		/* check for kernel address */
233 		if (trapframe->badvaddr < 0) {
234 			if (pmap_emulate_modify(pmap_kernel(),
235 			    trapframe->badvaddr)) {
236 				/* write to read only page in the kernel */
237 				access_type = PROT_WRITE;
238 				pcb = &p->p_addr->u_pcb;
239 				goto kernel_fault;
240 			}
241 			return;
242 		}
243 		/* FALLTHROUGH */
244 
245 	case T_TLB_MOD+T_USER:
246 		if (pmap_emulate_modify(p->p_vmspace->vm_map.pmap,
247 		    trapframe->badvaddr)) {
248 			/* write to read only page */
249 			access_type = PROT_WRITE;
250 			pcb = &p->p_addr->u_pcb;
251 			goto fault_common_no_miss;
252 		}
253 		return;
254 
255 	case T_TLB_LD_MISS:
256 	case T_TLB_ST_MISS:
257 		if (type == T_TLB_LD_MISS) {
258 			vaddr_t pc;
259 
260 			/*
261 			 * Check if the fault was caused by
262 			 * an instruction fetch.
263 			 */
264 			pc = trapframe->pc;
265 			if (trapframe->cause & CR_BR_DELAY)
266 				pc += 4;
267 			if (pc == trapframe->badvaddr)
268 				access_type = PROT_EXEC;
269 			else
270 				access_type = PROT_READ;
271 		} else
272 			access_type = PROT_WRITE;
273 
274 		pcb = &p->p_addr->u_pcb;
275 		/* check for kernel address */
276 		if (trapframe->badvaddr < 0) {
277 			vaddr_t va;
278 			int rv;
279 
280 	kernel_fault:
281 			va = trunc_page((vaddr_t)trapframe->badvaddr);
282 			onfault = pcb->pcb_onfault;
283 			pcb->pcb_onfault = 0;
284 			rv = uvm_fault(kernel_map, va, 0, access_type);
285 			pcb->pcb_onfault = onfault;
286 			if (rv == 0)
287 				return;
288 			if (onfault != 0) {
289 				pcb->pcb_onfault = 0;
290 				trapframe->pc = onfault_table[onfault];
291 				return;
292 			}
293 			goto err;
294 		}
295 		/*
296 		 * It is an error for the kernel to access user space except
297 		 * through the copyin/copyout routines.
298 		 */
299 		if (pcb->pcb_onfault != 0) {
300 			/*
301 			 * We want to resolve the TLB fault before invoking
302 			 * pcb_onfault if necessary.
303 			 */
304 			goto fault_common;
305 		} else {
306 			goto err;
307 		}
308 
309 	case T_TLB_LD_MISS+T_USER: {
310 		vaddr_t pc;
311 
312 		/* Check if the fault was caused by an instruction fetch. */
313 		pc = trapframe->pc;
314 		if (trapframe->cause & CR_BR_DELAY)
315 			pc += 4;
316 		if (pc == trapframe->badvaddr)
317 			access_type = PROT_EXEC;
318 		else
319 			access_type = PROT_READ;
320 		pcb = &p->p_addr->u_pcb;
321 		goto fault_common;
322 	}
323 
324 	case T_TLB_ST_MISS+T_USER:
325 		access_type = PROT_WRITE;
326 		pcb = &p->p_addr->u_pcb;
327 fault_common:
328 		if ((type & T_USER) &&
329 		    !uvm_map_inentry(p, &p->p_spinentry, PROC_STACK(p),
330 		    "[%s]%d/%d sp=%lx inside %lx-%lx: not MAP_STACK\n",
331 		    uvm_map_inentry_sp, p->p_vmspace->vm_map.sserial))
332 			return;
333 
334 fault_common_no_miss:
335 	    {
336 		vaddr_t va;
337 		struct vmspace *vm;
338 		vm_map_t map;
339 		int rv;
340 
341 		vm = p->p_vmspace;
342 		map = &vm->vm_map;
343 		va = trunc_page((vaddr_t)trapframe->badvaddr);
344 
345 		onfault = pcb->pcb_onfault;
346 		pcb->pcb_onfault = 0;
347 		rv = uvm_fault(map, va, 0, access_type);
348 		pcb->pcb_onfault = onfault;
349 
350 		/*
351 		 * If this was a stack access we keep track of the maximum
352 		 * accessed stack size.  Also, if vm_fault gets a protection
353 		 * failure it is due to accessing the stack region outside
354 		 * the current limit and we need to reflect that as an access
355 		 * error.
356 		 */
357 		if (rv == 0) {
358 			uvm_grow(p, va);
359 			return;
360 		}
361 
362 		if (!USERMODE(trapframe->sr)) {
363 			if (onfault != 0) {
364 				pcb->pcb_onfault = 0;
365 				trapframe->pc =  onfault_table[onfault];
366 				return;
367 			}
368 			goto err;
369 		}
370 
371 		ucode = access_type;
372 		signal = SIGSEGV;
373 		sicode = SEGV_MAPERR;
374 		if (rv == EACCES)
375 			sicode = SEGV_ACCERR;
376 		if (rv == EIO) {
377 			signal = SIGBUS;
378 			sicode = BUS_OBJERR;
379 		}
380 		break;
381 	    }
382 
383 	case T_ADDR_ERR_LD+T_USER:	/* misaligned or kseg access */
384 	case T_ADDR_ERR_ST+T_USER:	/* misaligned or kseg access */
385 		ucode = 0;		/* XXX should be PROT_something */
386 		signal = SIGBUS;
387 		sicode = BUS_ADRALN;
388 		break;
389 	case T_BUS_ERR_IFETCH+T_USER:	/* BERR asserted to cpu */
390 	case T_BUS_ERR_LD_ST+T_USER:	/* BERR asserted to cpu */
391 		ucode = 0;		/* XXX should be PROT_something */
392 		signal = SIGBUS;
393 		sicode = BUS_OBJERR;
394 		break;
395 
396 	case T_SYSCALL+T_USER:
397 	    {
398 		struct trapframe *locr0 = p->p_md.md_regs;
399 		const struct sysent *callp = sysent;
400 		unsigned int code;
401 		register_t tpc;
402 		uint32_t branch = 0;
403 		int error;
404 		register_t *args, rval[2];
405 
406 		atomic_inc_int(&uvmexp.syscalls);
407 
408 		/* compute next PC after syscall instruction */
409 		tpc = trapframe->pc; /* Remember if restart */
410 		if (trapframe->cause & CR_BR_DELAY) {
411 			/* Get the branch instruction. */
412 			if (copyinsn(p, locr0->pc, &branch) != 0) {
413 				signal = SIGBUS;
414 				sicode = BUS_OBJERR;
415 				break;
416 			}
417 
418 			locr0->pc = MipsEmulateBranch(locr0,
419 			    trapframe->pc, 0, branch);
420 		} else
421 			locr0->pc += 4;
422 		code = locr0->v0;
423 
424 		// XXX out of range stays on syscall0, which we assume is enosys
425 		if (code > 0 && code < SYS_MAXSYSCALL)
426 			callp += code;
427 
428 		/*
429 		 * This relies upon a0-a5 being contiguous in struct trapframe.
430 		 */
431 		args = &locr0->a0;
432 
433 		rval[0] = 0;
434 		rval[1] = 0;
435 
436 #if defined(DDB) || defined(DEBUG)
437 		trapdebug[TRAPSIZE * ci->ci_cpuid + (trppos[ci->ci_cpuid] == 0 ?
438 		    TRAPSIZE : trppos[ci->ci_cpuid]) - 1].code = code;
439 #endif
440 
441 		error = mi_syscall(p, code, callp, args, rval);
442 
443 		switch (error) {
444 		case 0:
445 			locr0->v0 = rval[0];
446 			locr0->a3 = 0;
447 			break;
448 		case ERESTART:
449 			locr0->pc = tpc;
450 			break;
451 		case EJUSTRETURN:
452 			break;	/* nothing to do */
453 		default:
454 			locr0->v0 = error;
455 			locr0->a3 = 1;
456 		}
457 
458 		mi_syscall_return(p, code, error, rval);
459 		return;
460 	    }
461 
462 	case T_BREAK:
463 #ifdef DDB
464 		db_ktrap(type, trapframe);
465 #endif
466 		/* Reenable interrupts if necessary */
467 		if (trapframe->sr & SR_INT_ENAB) {
468 			enableintr();
469 		}
470 		return;
471 
472 	case T_BREAK+T_USER:
473 	    {
474 		struct trapframe *locr0 = p->p_md.md_regs;
475 		vaddr_t va;
476 		uint32_t branch = 0;
477 		uint32_t instr;
478 
479 		/* compute address of break instruction */
480 		va = trapframe->pc;
481 		if (trapframe->cause & CR_BR_DELAY) {
482 			va += 4;
483 
484 			/* Read branch instruction. */
485 			if (copyinsn(p, trapframe->pc, &branch) != 0) {
486 				signal = SIGBUS;
487 				sicode = BUS_OBJERR;
488 				break;
489 			}
490 		}
491 
492 		/* read break instruction */
493 		if (copyinsn(p, va, &instr) != 0) {
494 			signal = SIGBUS;
495 			sicode = BUS_OBJERR;
496 			break;
497 		}
498 
499 		switch ((instr & BREAK_VAL_MASK) >> BREAK_VAL_SHIFT) {
500 		case 6:	/* gcc range error */
501 			signal = SIGFPE;
502 			sicode = FPE_FLTSUB;
503 			/* skip instruction */
504 			if (trapframe->cause & CR_BR_DELAY)
505 				locr0->pc = MipsEmulateBranch(locr0,
506 				    trapframe->pc, 0, branch);
507 			else
508 				locr0->pc += 4;
509 			break;
510 		case 7:	/* gcc3 divide by zero */
511 			signal = SIGFPE;
512 			sicode = FPE_INTDIV;
513 			/* skip instruction */
514 			if (trapframe->cause & CR_BR_DELAY)
515 				locr0->pc = MipsEmulateBranch(locr0,
516 				    trapframe->pc, 0, branch);
517 			else
518 				locr0->pc += 4;
519 			break;
520 #ifdef PTRACE
521 		case BREAK_SSTEP_VAL:
522 			if (p->p_md.md_ss_addr == (long)va) {
523 #ifdef DEBUG
524 				printf("trap: %s (%d): breakpoint at %p "
525 				    "(insn %08x)\n",
526 				    p->p_p->ps_comm, p->p_p->ps_pid,
527 				    (void *)p->p_md.md_ss_addr,
528 				    p->p_md.md_ss_instr);
529 #endif
530 
531 				/* Restore original instruction and clear BP */
532 				KERNEL_LOCK();
533 				process_sstep(p, 0);
534 				KERNEL_UNLOCK();
535 				sicode = TRAP_BRKPT;
536 			} else {
537 				sicode = TRAP_TRACE;
538 			}
539 			signal = SIGTRAP;
540 			break;
541 #endif
542 #ifdef FPUEMUL
543 		case BREAK_FPUEMUL_VAL:
544 			/*
545 			 * If this is a genuine FP emulation break,
546 			 * resume execution to our branch destination.
547 			 */
548 			if (!CPU_HAS_FPU(ci) &&
549 			    (p->p_md.md_flags & MDP_FPUSED) != 0 &&
550 			    p->p_md.md_fppgva + 4 == (vaddr_t)va) {
551 				struct vm_map *map = &p->p_vmspace->vm_map;
552 
553 				p->p_md.md_flags &= ~MDP_FPUSED;
554 				locr0->pc = p->p_md.md_fpbranchva;
555 
556 				/*
557 				 * Prevent access to the relocation page.
558 				 * XXX needs to be fixed to work with rthreads
559 				 */
560 				KERNEL_LOCK();
561 				uvm_fault_unwire(map, p->p_md.md_fppgva,
562 				    p->p_md.md_fppgva + PAGE_SIZE);
563 				KERNEL_UNLOCK();
564 				(void)uvm_map_protect(map, p->p_md.md_fppgva,
565 				    p->p_md.md_fppgva + PAGE_SIZE,
566 				    PROT_NONE, 0, FALSE, FALSE);
567 				return;
568 			}
569 			/* FALLTHROUGH */
570 #endif
571 		default:
572 			signal = SIGTRAP;
573 			sicode = TRAP_TRACE;
574 			break;
575 		}
576 		break;
577 	    }
578 
579 	case T_IWATCH+T_USER:
580 	case T_DWATCH+T_USER:
581 	    {
582 		caddr_t va;
583 		/* compute address of trapped instruction */
584 		va = (caddr_t)trapframe->pc;
585 		if (trapframe->cause & CR_BR_DELAY)
586 			va += 4;
587 		printf("watch exception @ %p\n", va);
588 		signal = SIGTRAP;
589 		sicode = TRAP_BRKPT;
590 		break;
591 	    }
592 
593 	case T_TRAP+T_USER:
594 	    {
595 		struct trapframe *locr0 = p->p_md.md_regs;
596 		vaddr_t va;
597 		uint32_t branch = 0;
598 		uint32_t instr;
599 
600 		/* compute address of trap instruction */
601 		va = trapframe->pc;
602 		if (trapframe->cause & CR_BR_DELAY) {
603 			va += 4;
604 
605 			/* Read branch instruction. */
606 			if (copyinsn(p, trapframe->pc, &branch) != 0) {
607 				signal = SIGBUS;
608 				sicode = BUS_OBJERR;
609 				break;
610 			}
611 		}
612 
613 		/* read break instruction */
614 		if (copyinsn(p, va, &instr) != 0) {
615 			signal = SIGBUS;
616 			sicode = BUS_OBJERR;
617 			break;
618 		}
619 
620 		if (trapframe->cause & CR_BR_DELAY)
621 			locr0->pc = MipsEmulateBranch(locr0,
622 			    trapframe->pc, 0, branch);
623 		else
624 			locr0->pc += 4;
625 		/*
626 		 * GCC 4 uses teq with code 7 to signal divide by
627 	 	 * zero at runtime. This is one instruction shorter
628 		 * than the BEQ + BREAK combination used by gcc 3.
629 		 */
630 		if ((instr & 0xfc00003f) == 0x00000034 /* teq */ &&
631 		    (instr & 0x001fffc0) == ((ZERO << 16) | (7 << 6))) {
632 			signal = SIGFPE;
633 			sicode = FPE_INTDIV;
634 		} else if (instr == (0x00000034 | (0x52 << 6)) /* teq */) {
635 			/* trap used by sigfill and similar */
636 			KERNEL_LOCK();
637 			sigexit(p, SIGABRT);
638 			/* NOTREACHED */
639 		} else if ((instr & 0xfc00003f) == 0x00000036 /* tne */ &&
640 		    (instr & 0x0000ffc0) == (0x52 << 6)) {
641 			KERNEL_LOCK();
642 			log(LOG_ERR, "%s[%d]: retguard trap\n",
643 			    p->p_p->ps_comm, p->p_p->ps_pid);
644 			/* Send uncatchable SIGABRT for coredump */
645 			sigexit(p, SIGABRT);
646 			/* NOTREACHED */
647 		} else {
648 			signal = SIGTRAP;
649 			sicode = TRAP_BRKPT;
650 		}
651 		break;
652 	    }
653 
654 	case T_RES_INST+T_USER:
655 	    {
656 		register_t *regs = (register_t *)trapframe;
657 		vaddr_t va;
658 		uint32_t branch = 0;
659 		InstFmt inst;
660 
661 		/* Compute the instruction's address. */
662 		va = trapframe->pc;
663 		if (trapframe->cause & CR_BR_DELAY) {
664 			va += 4;
665 
666 			/* Get the branch instruction. */
667 			if (copyinsn(p, trapframe->pc, &branch) != 0) {
668 				signal = SIGBUS;
669 				sicode = BUS_OBJERR;
670 				break;
671 			}
672 		}
673 
674 		/* Get the faulting instruction. */
675 		if (copyinsn(p, va, &inst.word) != 0) {
676 			signal = SIGBUS;
677 			sicode = BUS_OBJERR;
678 			break;
679 		}
680 
681 		/* Emulate "RDHWR rt, UserLocal". */
682 		if (inst.RType.op == OP_SPECIAL3 &&
683 		    inst.RType.rs == 0 &&
684 		    inst.RType.rd == 29 &&
685 		    inst.RType.shamt == 0 &&
686 		    inst.RType.func == OP_RDHWR) {
687 			regs[inst.RType.rt] = (register_t)TCB_GET(p);
688 
689 			/* Figure out where to continue. */
690 			if (trapframe->cause & CR_BR_DELAY)
691 				trapframe->pc = MipsEmulateBranch(trapframe,
692 				    trapframe->pc, 0, branch);
693 			else
694 				trapframe->pc += 4;
695 			return;
696 		}
697 
698 		signal = SIGILL;
699 		sicode = ILL_ILLOPC;
700 		break;
701 	    }
702 
703 	case T_COP_UNUSABLE+T_USER:
704 		/*
705 		 * Note MIPS IV COP1X instructions issued with FPU
706 		 * disabled correctly report coprocessor 1 as the
707 		 * unusable coprocessor number.
708 		 */
709 		if ((trapframe->cause & CR_COP_ERR) != CR_COP1_ERR) {
710 			signal = SIGILL; /* only FPU instructions allowed */
711 			sicode = ILL_ILLOPC;
712 			break;
713 		}
714 		if (CPU_HAS_FPU(ci))
715 			enable_fpu(p);
716 		else
717 			MipsFPTrap(trapframe);
718 		return;
719 
720 	case T_FPE:
721 		printf("FPU Trap: PC %lx CR %lx SR %lx\n",
722 			trapframe->pc, trapframe->cause, trapframe->sr);
723 		goto err;
724 
725 	case T_FPE+T_USER:
726 		MipsFPTrap(trapframe);
727 		return;
728 
729 	case T_OVFLOW+T_USER:
730 		signal = SIGFPE;
731 		sicode = FPE_FLTOVF;
732 		break;
733 
734 	case T_ADDR_ERR_LD:	/* misaligned access */
735 	case T_ADDR_ERR_ST:	/* misaligned access */
736 	case T_BUS_ERR_LD_ST:	/* BERR asserted to cpu */
737 		pcb = &p->p_addr->u_pcb;
738 		if ((onfault = pcb->pcb_onfault) != 0) {
739 			pcb->pcb_onfault = 0;
740 			trapframe->pc = onfault_table[onfault];
741 			return;
742 		}
743 		goto err;
744 
745 	default:
746 	err:
747 		disableintr();
748 #if !defined(DDB) && defined(DEBUG)
749 		trapDump("trap", printf);
750 #endif
751 		printf("\nTrap cause = %d Frame %p\n", type, trapframe);
752 		printf("Trap PC %p RA %p fault %p\n",
753 		    (void *)trapframe->pc, (void *)trapframe->ra,
754 		    (void *)trapframe->badvaddr);
755 #ifdef DDB
756 		stacktrace(!USERMODE(trapframe->sr) ? trapframe : p->p_md.md_regs);
757 		db_ktrap(type, trapframe);
758 #endif
759 		panic("trap");
760 	}
761 
762 #ifdef FPUEMUL
763 	/*
764 	 * If a relocated delay slot causes an exception, blame the
765 	 * original delay slot address - userland is not supposed to
766 	 * know anything about emulation bowels.
767 	 */
768 	if (!CPU_HAS_FPU(ci) && (p->p_md.md_flags & MDP_FPUSED) != 0 &&
769 	    trapframe->badvaddr == p->p_md.md_fppgva)
770 		trapframe->badvaddr = p->p_md.md_fpslotva;
771 #endif
772 	p->p_md.md_regs->pc = trapframe->pc;
773 	p->p_md.md_regs->cause = trapframe->cause;
774 	p->p_md.md_regs->badvaddr = trapframe->badvaddr;
775 	sv.sival_ptr = (void *)trapframe->badvaddr;
776 	trapsignal(p, signal, ucode, sicode, sv);
777 }
778 
779 void
780 child_return(void *arg)
781 {
782 	struct proc *p = arg;
783 	struct trapframe *trapframe;
784 
785 	trapframe = p->p_md.md_regs;
786 	trapframe->v0 = 0;
787 	trapframe->a3 = 0;
788 
789 	KERNEL_UNLOCK();
790 
791 	mi_child_return(p);
792 }
793 
794 int
795 copyinsn(struct proc *p, vaddr_t uva, uint32_t *insn)
796 {
797 	struct vm_map *map = &p->p_vmspace->vm_map;
798 	int error = 0;
799 
800 	if (__predict_false(uva >= VM_MAXUSER_ADDRESS || (uva & 3) != 0))
801 		return EFAULT;
802 
803 	do {
804 		if (pmap_copyinsn(map->pmap, uva, insn))
805 			break;
806 		error = uvm_fault(map, trunc_page(uva), 0, PROT_EXEC);
807 	} while (error == 0);
808 
809 	return error;
810 }
811 
812 #if defined(DDB) || defined(DEBUG)
813 void
814 trapDump(const char *msg, int (*pr)(const char *, ...))
815 {
816 #ifdef MULTIPROCESSOR
817 	CPU_INFO_ITERATOR cii;
818 #endif
819 	struct cpu_info *ci;
820 	struct trapdebug *base, *ptrp;
821 	int i;
822 	uint pos;
823 	int s;
824 
825 	s = splhigh();
826 	(*pr)("trapDump(%s)\n", msg);
827 #ifndef MULTIPROCESSOR
828 	ci = curcpu();
829 #else
830 	CPU_INFO_FOREACH(cii, ci)
831 #endif
832 	{
833 #ifdef MULTIPROCESSOR
834 		(*pr)("cpu%d\n", ci->ci_cpuid);
835 #endif
836 		/* walk in reverse order */
837 		pos = trppos[ci->ci_cpuid];
838 		base = trapdebug + ci->ci_cpuid * TRAPSIZE;
839 		for (i = TRAPSIZE - 1; i >= 0; i--) {
840 			if (pos + i >= TRAPSIZE)
841 				ptrp = base + pos + i - TRAPSIZE;
842 			else
843 				ptrp = base + pos + i;
844 
845 			if (ptrp->cause == 0)
846 				break;
847 
848 			(*pr)("%s: PC %p CR 0x%08lx SR 0x%08lx\n",
849 			    trap_type[(ptrp->cause & CR_EXC_CODE) >>
850 			      CR_EXC_CODE_SHIFT],
851 			    ptrp->pc, ptrp->cause & 0xffffffff,
852 			    ptrp->status & 0xffffffff);
853 			(*pr)(" RA %p SP %p ADR %p\n",
854 			    ptrp->ra, ptrp->sp, ptrp->vadr);
855 		}
856 	}
857 
858 	splx(s);
859 }
860 #endif
861 
862 
863 /*
864  * Return the resulting PC as if the branch was executed.
865  */
866 register_t
867 MipsEmulateBranch(struct trapframe *tf, vaddr_t instPC, uint32_t fsr,
868     uint32_t curinst)
869 {
870 	register_t *regsPtr = (register_t *)tf;
871 	InstFmt inst;
872 	vaddr_t retAddr;
873 	int condition;
874 	uint cc;
875 
876 #define	GetBranchDest(InstPtr, inst) \
877 	    (InstPtr + 4 + ((short)inst.IType.imm << 2))
878 
879 	inst.word = curinst;
880 
881 	regsPtr[ZERO] = 0;	/* Make sure zero is 0x0 */
882 
883 	switch ((int)inst.JType.op) {
884 	case OP_SPECIAL:
885 		switch ((int)inst.RType.func) {
886 		case OP_JR:
887 		case OP_JALR:
888 			retAddr = (vaddr_t)regsPtr[inst.RType.rs];
889 			break;
890 		default:
891 			retAddr = instPC + 4;
892 			break;
893 		}
894 		break;
895 	case OP_BCOND:
896 		switch ((int)inst.IType.rt) {
897 		case OP_BLTZ:
898 		case OP_BLTZL:
899 		case OP_BLTZAL:
900 		case OP_BLTZALL:
901 			if ((int64_t)(regsPtr[inst.RType.rs]) < 0)
902 				retAddr = GetBranchDest(instPC, inst);
903 			else
904 				retAddr = instPC + 8;
905 			break;
906 		case OP_BGEZ:
907 		case OP_BGEZL:
908 		case OP_BGEZAL:
909 		case OP_BGEZALL:
910 			if ((int64_t)(regsPtr[inst.RType.rs]) >= 0)
911 				retAddr = GetBranchDest(instPC, inst);
912 			else
913 				retAddr = instPC + 8;
914 			break;
915 		default:
916 			retAddr = instPC + 4;
917 			break;
918 		}
919 		break;
920 	case OP_J:
921 	case OP_JAL:
922 		retAddr = (inst.JType.target << 2) | (instPC & ~0x0fffffffUL);
923 		break;
924 	case OP_BEQ:
925 	case OP_BEQL:
926 		if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt])
927 			retAddr = GetBranchDest(instPC, inst);
928 		else
929 			retAddr = instPC + 8;
930 		break;
931 	case OP_BNE:
932 	case OP_BNEL:
933 		if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt])
934 			retAddr = GetBranchDest(instPC, inst);
935 		else
936 			retAddr = instPC + 8;
937 		break;
938 	case OP_BLEZ:
939 	case OP_BLEZL:
940 		if ((int64_t)(regsPtr[inst.RType.rs]) <= 0)
941 			retAddr = GetBranchDest(instPC, inst);
942 		else
943 			retAddr = instPC + 8;
944 		break;
945 	case OP_BGTZ:
946 	case OP_BGTZL:
947 		if ((int64_t)(regsPtr[inst.RType.rs]) > 0)
948 			retAddr = GetBranchDest(instPC, inst);
949 		else
950 			retAddr = instPC + 8;
951 		break;
952 	case OP_COP1:
953 		switch (inst.RType.rs) {
954 		case OP_BC:
955 			cc = (inst.RType.rt & COPz_BC_CC_MASK) >>
956 			    COPz_BC_CC_SHIFT;
957 			if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE)
958 				condition = fsr & FPCSR_CONDVAL(cc);
959 			else
960 				condition = !(fsr & FPCSR_CONDVAL(cc));
961 			if (condition)
962 				retAddr = GetBranchDest(instPC, inst);
963 			else
964 				retAddr = instPC + 8;
965 			break;
966 		default:
967 			retAddr = instPC + 4;
968 		}
969 		break;
970 	default:
971 		retAddr = instPC + 4;
972 	}
973 
974 	return (register_t)retAddr;
975 #undef	GetBranchDest
976 }
977 
978 #ifdef PTRACE
979 
980 int
981 ptrace_read_insn(struct proc *p, vaddr_t va, uint32_t *insn)
982 {
983 	struct iovec iov;
984 	struct uio uio;
985 
986 	iov.iov_base = (caddr_t)insn;
987 	iov.iov_len = sizeof(uint32_t);
988 	uio.uio_iov = &iov;
989 	uio.uio_iovcnt = 1;
990 	uio.uio_offset = (off_t)va;
991 	uio.uio_resid = sizeof(uint32_t);
992 	uio.uio_segflg = UIO_SYSSPACE;
993 	uio.uio_rw = UIO_READ;
994 	uio.uio_procp = curproc;
995 	return process_domem(curproc, p->p_p, &uio, PT_READ_I);
996 }
997 
998 int
999 ptrace_write_insn(struct proc *p, vaddr_t va, uint32_t insn)
1000 {
1001 	struct iovec iov;
1002 	struct uio uio;
1003 
1004 	iov.iov_base = (caddr_t)&insn;
1005 	iov.iov_len = sizeof(uint32_t);
1006 	uio.uio_iov = &iov;
1007 	uio.uio_iovcnt = 1;
1008 	uio.uio_offset = (off_t)va;
1009 	uio.uio_resid = sizeof(uint32_t);
1010 	uio.uio_segflg = UIO_SYSSPACE;
1011 	uio.uio_rw = UIO_WRITE;
1012 	uio.uio_procp = curproc;
1013 	return process_domem(curproc, p->p_p, &uio, PT_WRITE_I);
1014 }
1015 
1016 /*
1017  * This routine is called by procxmt() to single step one instruction.
1018  * We do this by storing a break instruction after the current instruction,
1019  * resuming execution, and then restoring the old instruction.
1020  */
1021 int
1022 process_sstep(struct proc *p, int sstep)
1023 {
1024 	struct trapframe *locr0 = p->p_md.md_regs;
1025 	int rc;
1026 	uint32_t curinstr;
1027 	vaddr_t va;
1028 
1029 	if (sstep == 0) {
1030 		/* clear the breakpoint */
1031 		if (p->p_md.md_ss_addr != 0) {
1032 			rc = ptrace_write_insn(p, p->p_md.md_ss_addr,
1033 			    p->p_md.md_ss_instr);
1034 #ifdef DIAGNOSTIC
1035 			if (rc != 0)
1036 				printf("WARNING: %s (%d): can't restore "
1037 				    "instruction at %p: %08x\n",
1038 				    p->p_p->ps_comm, p->p_p->ps_pid,
1039 				    (void *)p->p_md.md_ss_addr,
1040 				    p->p_md.md_ss_instr);
1041 #endif
1042 			p->p_md.md_ss_addr = 0;
1043 		} else
1044 			rc = 0;
1045 		return rc;
1046 	}
1047 
1048 	/* read current instruction */
1049 	rc = ptrace_read_insn(p, locr0->pc, &curinstr);
1050 	if (rc != 0)
1051 		return rc;
1052 
1053 	/* compute next address after current location */
1054 	if (curinstr != 0 /* nop */)
1055 		va = (vaddr_t)MipsEmulateBranch(locr0,
1056 		    locr0->pc, locr0->fsr, curinstr);
1057 	else
1058 		va = locr0->pc + 4;
1059 #ifdef DIAGNOSTIC
1060 	/* should not happen */
1061 	if (p->p_md.md_ss_addr != 0) {
1062 		printf("WARNING: %s (%d): breakpoint request "
1063 		    "at %p, already set at %p\n",
1064 		    p->p_p->ps_comm, p->p_p->ps_pid, (void *)va,
1065 		    (void *)p->p_md.md_ss_addr);
1066 		return EFAULT;
1067 	}
1068 #endif
1069 
1070 	/* read next instruction */
1071 	rc = ptrace_read_insn(p, va, &p->p_md.md_ss_instr);
1072 	if (rc != 0)
1073 		return rc;
1074 
1075 	/* replace with a breakpoint instruction */
1076 	rc = ptrace_write_insn(p, va, BREAK_SSTEP);
1077 	if (rc != 0)
1078 		return rc;
1079 
1080 	p->p_md.md_ss_addr = va;
1081 
1082 #ifdef DEBUG
1083 	printf("%s (%d): breakpoint set at %p: %08x (pc %p %08x)\n",
1084 		p->p_p->ps_comm, p->p_p->ps_pid, (void *)p->p_md.md_ss_addr,
1085 		p->p_md.md_ss_instr, (void *)locr0->pc, curinstr);
1086 #endif
1087 	return 0;
1088 }
1089 
1090 #endif /* PTRACE */
1091 
1092 #if defined(DDB) || defined(DEBUG)
1093 #define MIPS_JR_RA	0x03e00008	/* instruction code for jr ra */
1094 
1095 /* forward */
1096 #if !defined(DDB)
1097 const char *fn_name(vaddr_t);
1098 #endif
1099 void stacktrace_subr(struct trapframe *, int, int (*)(const char*, ...));
1100 
1101 extern char kernel_text[];
1102 extern char etext[];
1103 
1104 /*
1105  * Print a stack backtrace.
1106  */
1107 void
1108 stacktrace(struct trapframe *regs)
1109 {
1110 	stacktrace_subr(regs, 6, printf);
1111 }
1112 
1113 #define	VALID_ADDRESS(va) \
1114 	(((va) >= VM_MIN_KERNEL_ADDRESS && (va) < VM_MAX_KERNEL_ADDRESS) || \
1115 	 IS_XKPHYS(va) || ((va) >= CKSEG0_BASE && (va) < CKSEG1_BASE))
1116 
1117 void
1118 stacktrace_subr(struct trapframe *regs, int count,
1119     int (*pr)(const char*, ...))
1120 {
1121 	vaddr_t pc, sp, ra, va, subr;
1122 	register_t a0, a1, a2, a3;
1123 	uint32_t instr, mask;
1124 	InstFmt i;
1125 	int more, stksize;
1126 	extern char k_intr[];
1127 	extern char k_general[];
1128 #ifdef DDB
1129 	db_expr_t diff;
1130 	Elf_Sym *sym;
1131 	const char *symname;
1132 #endif
1133 
1134 	/* get initial values from the exception frame */
1135 	sp = (vaddr_t)regs->sp;
1136 	pc = (vaddr_t)regs->pc;
1137 	ra = (vaddr_t)regs->ra;		/* May be a 'leaf' function */
1138 	a0 = regs->a0;
1139 	a1 = regs->a1;
1140 	a2 = regs->a2;
1141 	a3 = regs->a3;
1142 
1143 /* Jump here when done with a frame, to start a new one */
1144 loop:
1145 #ifdef DDB
1146 	symname = NULL;
1147 #endif
1148 	subr = 0;
1149 	stksize = 0;
1150 
1151 	if (count-- == 0) {
1152 		ra = 0;
1153 		goto end;
1154 	}
1155 
1156 	/* check for bad SP: could foul up next frame */
1157 	if (sp & 3 || !VALID_ADDRESS(sp)) {
1158 		(*pr)("SP %p: not in kernel\n", sp);
1159 		ra = 0;
1160 		goto end;
1161 	}
1162 
1163 	/* check for bad PC */
1164 	if (pc & 3 || !VALID_ADDRESS(pc)) {
1165 		(*pr)("PC %p: not in kernel\n", pc);
1166 		ra = 0;
1167 		goto end;
1168 	}
1169 
1170 #ifdef DDB
1171 	/*
1172 	 * Dig out the function from the symbol table.
1173 	 * Watch out for function tail optimizations.
1174 	 */
1175 	sym = db_search_symbol(pc, DB_STGY_PROC, &diff);
1176 	if (sym != NULL && diff == 0) {
1177 		instr = kdbpeek(pc - 2 * sizeof(int));
1178 		i.word = instr;
1179 		if (i.JType.op == OP_JAL) {
1180 			sym = db_search_symbol(pc - sizeof(int),
1181 			    DB_STGY_PROC, &diff);
1182 			if (sym != NULL && diff != 0)
1183 				diff += sizeof(int);
1184 		}
1185 	}
1186 	if (sym != NULL) {
1187 		db_symbol_values(sym, &symname, 0);
1188 		subr = pc - (vaddr_t)diff;
1189 	}
1190 #endif
1191 
1192 	/*
1193 	 * Find the beginning of the current subroutine by scanning backwards
1194 	 * from the current PC for the end of the previous subroutine.
1195 	 */
1196 	if (!subr) {
1197 		va = pc - sizeof(int);
1198 		while ((instr = kdbpeek(va)) != MIPS_JR_RA)
1199 			va -= sizeof(int);
1200 		va += 2 * sizeof(int);	/* skip back over branch & delay slot */
1201 		/* skip over nulls which might separate .o files */
1202 		while ((instr = kdbpeek(va)) == 0)
1203 			va += sizeof(int);
1204 		subr = va;
1205 	}
1206 
1207 	/*
1208 	 * Jump here for locore entry points for which the preceding
1209 	 * function doesn't end in "j ra"
1210 	 */
1211 	/* scan forwards to find stack size and any saved registers */
1212 	stksize = 0;
1213 	more = 3;
1214 	mask = 0;
1215 	for (va = subr; more; va += sizeof(int),
1216 	    more = (more == 3) ? 3 : more - 1) {
1217 		/* stop if hit our current position */
1218 		if (va >= pc)
1219 			break;
1220 		instr = kdbpeek(va);
1221 		i.word = instr;
1222 		switch (i.JType.op) {
1223 		case OP_SPECIAL:
1224 			switch (i.RType.func) {
1225 			case OP_JR:
1226 			case OP_JALR:
1227 				more = 2; /* stop after next instruction */
1228 				break;
1229 
1230 			case OP_SYSCALL:
1231 			case OP_BREAK:
1232 				more = 1; /* stop now */
1233 			}
1234 			break;
1235 
1236 		case OP_BCOND:
1237 		case OP_J:
1238 		case OP_JAL:
1239 		case OP_BEQ:
1240 		case OP_BNE:
1241 		case OP_BLEZ:
1242 		case OP_BGTZ:
1243 			more = 2; /* stop after next instruction */
1244 			break;
1245 
1246 		case OP_COP0:
1247 		case OP_COP1:
1248 		case OP_COP2:
1249 		case OP_COP3:
1250 			switch (i.RType.rs) {
1251 			case OP_BC:
1252 				more = 2; /* stop after next instruction */
1253 			}
1254 			break;
1255 
1256 		case OP_SD:
1257 			/* look for saved registers on the stack */
1258 			if (i.IType.rs != SP)
1259 				break;
1260 			/* only restore the first one */
1261 			if (mask & (1 << i.IType.rt))
1262 				break;
1263 			mask |= (1 << i.IType.rt);
1264 			switch (i.IType.rt) {
1265 			case A0:
1266 				a0 = kdbpeekd(sp + (int16_t)i.IType.imm);
1267 				break;
1268 			case A1:
1269 				a1 = kdbpeekd(sp + (int16_t)i.IType.imm);
1270 				break;
1271 			case A2:
1272 				a2 = kdbpeekd(sp + (int16_t)i.IType.imm);
1273 				break;
1274 			case A3:
1275 				a3 = kdbpeekd(sp + (int16_t)i.IType.imm);
1276 				break;
1277 			case RA:
1278 				ra = kdbpeekd(sp + (int16_t)i.IType.imm);
1279 				break;
1280 			}
1281 			break;
1282 
1283 		case OP_DADDI:
1284 		case OP_DADDIU:
1285 			/* look for stack pointer adjustment */
1286 			if (i.IType.rs != SP || i.IType.rt != SP)
1287 				break;
1288 			stksize = -((int16_t)i.IType.imm);
1289 		}
1290 	}
1291 
1292 #ifdef DDB
1293 	if (symname == NULL)
1294 		(*pr)("%p ", subr);
1295 	else
1296 		(*pr)("%s+%p ", symname, diff);
1297 #else
1298 	(*pr)("%s+%p ", fn_name(subr), pc - subr);
1299 #endif
1300 	(*pr)("(%llx,%llx,%llx,%llx) ", a0, a1, a2, a3);
1301 	(*pr)(" ra %p sp %p, sz %d\n", ra, sp, stksize);
1302 
1303 	if (subr == (vaddr_t)k_intr || subr == (vaddr_t)k_general) {
1304 		if (subr == (vaddr_t)k_general)
1305 			(*pr)("(KERNEL TRAP)\n");
1306 		else
1307 			(*pr)("(KERNEL INTERRUPT)\n");
1308 		sp = *(register_t *)sp;
1309 		pc = ((struct trapframe *)sp)->pc;
1310 		ra = ((struct trapframe *)sp)->ra;
1311 		sp = ((struct trapframe *)sp)->sp;
1312 		goto loop;
1313 	}
1314 
1315 end:
1316 	if (ra) {
1317 		if (pc == ra && stksize == 0)
1318 			(*pr)("stacktrace: loop!\n");
1319 		else if (ra < (vaddr_t)kernel_text || ra > (vaddr_t)etext)
1320 			(*pr)("stacktrace: ra corrupted!\n");
1321 		else {
1322 			pc = ra;
1323 			sp += stksize;
1324 			ra = 0;
1325 			goto loop;
1326 		}
1327 	} else {
1328 		if (curproc)
1329 			(*pr)("User-level: pid %d\n", curproc->p_p->ps_pid);
1330 		else
1331 			(*pr)("User-level: curproc NULL\n");
1332 	}
1333 }
1334 
1335 #ifdef DDB
1336 void
1337 stacktrace_save_at(struct stacktrace *st, unsigned int skip)
1338 {
1339 	extern char k_general[];
1340 	extern char u_general[];
1341 	extern char k_intr[];
1342 	extern char u_intr[];
1343 	db_expr_t diff;
1344 	Elf_Sym *sym;
1345 	struct trapframe *tf;
1346 	vaddr_t pc, ra, sp, subr, va;
1347 	InstFmt inst;
1348 	int first = 1;
1349 	int done, framesize;
1350 
1351 	/* Get a pc that comes after the prologue in this subroutine. */
1352 	__asm__ volatile ("1: dla %0, 1b" : "=r" (pc));
1353 
1354 	ra = (vaddr_t)__builtin_return_address(0);
1355 	sp = (vaddr_t)__builtin_frame_address(0);
1356 
1357 	st->st_count = 0;
1358 	while (st->st_count < STACKTRACE_MAX && pc != 0) {
1359 		if ((pc & 0x3) != 0 ||
1360 		    pc < (vaddr_t)kernel_text || pc >= (vaddr_t)etext)
1361 			break;
1362 		if ((sp & 0x7) != 0 || !VALID_ADDRESS(sp))
1363 			break;
1364 
1365 		if (!first) {
1366 			if (skip == 0)
1367 				st->st_pc[st->st_count++] = pc;
1368 			else
1369 				skip--;
1370 		}
1371 		first = 0;
1372 
1373 		/* Determine the start address of the current subroutine. */
1374 		sym = db_search_symbol(pc, DB_STGY_PROC, &diff);
1375 		if (sym == NULL)
1376 			break;
1377 		subr = pc - (vaddr_t)diff;
1378 
1379 		if ((subr & 0x3) != 0)
1380 			break;
1381 		if (subr == (vaddr_t)u_general || subr == (vaddr_t)u_intr)
1382 			break;
1383 		if (subr == (vaddr_t)k_general || subr == (vaddr_t)k_intr) {
1384 			tf = (struct trapframe *)*(register_t *)sp;
1385 			pc = tf->pc;
1386 			ra = tf->ra;
1387 			sp = tf->sp;
1388 			continue;
1389 		}
1390 
1391 		/*
1392 		 * Figure out the return address and the size of the current
1393 		 * stack frame by analyzing the subroutine's prologue.
1394 		 */
1395 		done = 0;
1396 		framesize = 0;
1397 		for (va = subr; va < pc && !done; va += 4) {
1398 			inst.word = *(uint32_t *)va;
1399 			if (inst_call(inst.word) || inst_return(inst.word)) {
1400 				/* Check the delay slot and stop. */
1401 				va += 4;
1402 				inst.word = *(uint32_t *)va;
1403 				done = 1;
1404 			}
1405 			switch (inst.JType.op) {
1406 			case OP_SPECIAL:
1407 				switch (inst.RType.func) {
1408 				case OP_SYSCALL:
1409 				case OP_BREAK:
1410 					done = 1;
1411 				}
1412 				break;
1413 			case OP_SD:
1414 				if (inst.IType.rs == SP &&
1415 				    inst.IType.rt == RA && ra == 0)
1416 					ra = *(uint64_t *)(sp +
1417 					    (int16_t)inst.IType.imm);
1418 				break;
1419 			case OP_DADDI:
1420 			case OP_DADDIU:
1421 				if (inst.IType.rs == SP &&
1422 				    inst.IType.rt == SP &&
1423 				    (int16_t)inst.IType.imm < 0 &&
1424 				    framesize == 0)
1425 					framesize = -(int16_t)inst.IType.imm;
1426 				break;
1427 			}
1428 
1429 			if (framesize != 0 && ra != 0)
1430 				break;
1431 		}
1432 
1433 		pc = ra;
1434 		ra = 0;
1435 		sp += framesize;
1436 	}
1437 }
1438 
1439 void
1440 stacktrace_save_utrace(struct stacktrace *st)
1441 {
1442 	st->st_count = 0;
1443 }
1444 #endif
1445 
1446 #undef	VALID_ADDRESS
1447 
1448 #if !defined(DDB)
1449 /*
1450  * Functions ``special'' enough to print by name
1451  */
1452 #ifdef __STDC__
1453 #define Name(_fn)  { (void*)_fn, # _fn }
1454 #else
1455 #define Name(_fn) { _fn, "_fn"}
1456 #endif
1457 static const struct { void *addr; const char *name;} names[] = {
1458 	Name(trap),
1459 	{ 0, NULL }
1460 };
1461 
1462 /*
1463  * Map a function address to a string name, if known; or a hex string.
1464  */
1465 const char *
1466 fn_name(vaddr_t addr)
1467 {
1468 	static char buf[19];
1469 	int i = 0;
1470 
1471 	for (i = 0; names[i].name != NULL; i++)
1472 		if (names[i].addr == (void*)addr)
1473 			return (names[i].name);
1474 	snprintf(buf, sizeof(buf), "%p", (void *)addr);
1475 	return (buf);
1476 }
1477 #endif	/* !DDB */
1478 
1479 #endif /* DDB || DEBUG */
1480 
1481 #ifdef FPUEMUL
1482 /*
1483  * Set up a successful branch emulation.
1484  * The delay slot instruction is copied to a reserved page, followed by a
1485  * trap instruction to get control back, and resume at the branch
1486  * destination.
1487  */
1488 int
1489 fpe_branch_emulate(struct proc *p, struct trapframe *tf, uint32_t insn,
1490     vaddr_t dest)
1491 {
1492 	struct vm_map *map = &p->p_vmspace->vm_map;
1493 	InstFmt inst;
1494 	int rc;
1495 
1496 	/*
1497 	 * Check the delay slot instruction: since it will run as a
1498 	 * non-delay slot instruction, we want to reject branch instructions
1499 	 * (which behaviour, when in a delay slot, is undefined anyway).
1500 	 */
1501 
1502 	inst = *(InstFmt *)&insn;
1503 	rc = 0;
1504 	switch ((int)inst.JType.op) {
1505 	case OP_SPECIAL:
1506 		switch ((int)inst.RType.func) {
1507 		case OP_JR:
1508 		case OP_JALR:
1509 			rc = EINVAL;
1510 			break;
1511 		}
1512 		break;
1513 	case OP_BCOND:
1514 		switch ((int)inst.IType.rt) {
1515 		case OP_BLTZ:
1516 		case OP_BLTZL:
1517 		case OP_BLTZAL:
1518 		case OP_BLTZALL:
1519 		case OP_BGEZ:
1520 		case OP_BGEZL:
1521 		case OP_BGEZAL:
1522 		case OP_BGEZALL:
1523 			rc = EINVAL;
1524 			break;
1525 		}
1526 		break;
1527 	case OP_J:
1528 	case OP_JAL:
1529 	case OP_BEQ:
1530 	case OP_BEQL:
1531 	case OP_BNE:
1532 	case OP_BNEL:
1533 	case OP_BLEZ:
1534 	case OP_BLEZL:
1535 	case OP_BGTZ:
1536 	case OP_BGTZL:
1537 		rc = EINVAL;
1538 		break;
1539 	case OP_COP1:
1540 		if (inst.RType.rs == OP_BC)	/* oh the irony */
1541 			rc = EINVAL;
1542 		break;
1543 	}
1544 
1545 	if (rc != 0) {
1546 #ifdef DEBUG
1547 		printf("%s: bogus delay slot insn %08x\n", __func__, insn);
1548 #endif
1549 		return rc;
1550 	}
1551 
1552 	/*
1553 	 * Temporarily change protection over the page used to relocate
1554 	 * the delay slot, and fault it in.
1555 	 */
1556 
1557 	rc = uvm_map_protect(map, p->p_md.md_fppgva,
1558 	    p->p_md.md_fppgva + PAGE_SIZE, PROT_READ | PROT_WRITE, 0, FALSE,
1559 	    FALSE);
1560 	if (rc != 0) {
1561 #ifdef DEBUG
1562 		printf("%s: uvm_map_protect on %p failed: %d\n",
1563 		    __func__, (void *)p->p_md.md_fppgva, rc);
1564 #endif
1565 		return rc;
1566 	}
1567 	KERNEL_LOCK();
1568 	rc = uvm_fault_wire(map, p->p_md.md_fppgva,
1569 	    p->p_md.md_fppgva + PAGE_SIZE, PROT_READ | PROT_WRITE);
1570 	KERNEL_UNLOCK();
1571 	if (rc != 0) {
1572 #ifdef DEBUG
1573 		printf("%s: uvm_fault_wire on %p failed: %d\n",
1574 		    __func__, (void *)p->p_md.md_fppgva, rc);
1575 #endif
1576 		goto err2;
1577 	}
1578 
1579 	rc = copyout(&insn, (void *)p->p_md.md_fppgva, sizeof insn);
1580 	if (rc != 0) {
1581 #ifdef DEBUG
1582 		printf("%s: copyout %p failed %d\n",
1583 		    __func__, (void *)p->p_md.md_fppgva, rc);
1584 #endif
1585 		goto err;
1586 	}
1587 	insn = BREAK_FPUEMUL;
1588 	rc = copyout(&insn, (void *)(p->p_md.md_fppgva + 4), sizeof insn);
1589 	if (rc != 0) {
1590 #ifdef DEBUG
1591 		printf("%s: copyout %p failed %d\n",
1592 		    __func__, (void *)(p->p_md.md_fppgva + 4), rc);
1593 #endif
1594 		goto err;
1595 	}
1596 
1597 	(void)uvm_map_protect(map, p->p_md.md_fppgva,
1598 	    p->p_md.md_fppgva + PAGE_SIZE, PROT_READ | PROT_EXEC, 0, FALSE, FALSE);
1599 	p->p_md.md_fpbranchva = dest;
1600 	p->p_md.md_fpslotva = (vaddr_t)tf->pc + 4;
1601 	p->p_md.md_flags |= MDP_FPUSED;
1602 	tf->pc = p->p_md.md_fppgva;
1603 
1604 	return 0;
1605 
1606 err:
1607 	KERNEL_LOCK();
1608 	uvm_fault_unwire(map, p->p_md.md_fppgva, p->p_md.md_fppgva + PAGE_SIZE);
1609 	KERNEL_UNLOCK();
1610 err2:
1611 	(void)uvm_map_protect(map, p->p_md.md_fppgva,
1612 	    p->p_md.md_fppgva + PAGE_SIZE, PROT_NONE, 0, FALSE, FALSE);
1613 	return rc;
1614 }
1615 #endif
1616