xref: /openbsd-src/sys/arch/mips64/mips64/trap.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: trap.c,v 1.118 2016/08/16 13:03:58 visa Exp $	*/
2 
3 /*
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1992, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department and Ralph Campbell.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  * from: Utah Hdr: trap.c 1.32 91/04/06
41  *
42  *	from: @(#)trap.c	8.5 (Berkeley) 1/11/94
43  */
44 
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/exec.h>
49 #include <sys/proc.h>
50 #include <sys/kernel.h>
51 #include <sys/signalvar.h>
52 #include <sys/user.h>
53 #include <sys/syscall.h>
54 #include <sys/syscall_mi.h>
55 #include <sys/buf.h>
56 #include <sys/device.h>
57 #include <sys/atomic.h>
58 #ifdef PTRACE
59 #include <sys/ptrace.h>
60 #endif
61 
62 #include <uvm/uvm_extern.h>
63 
64 #include <machine/autoconf.h>
65 #include <machine/cpu.h>
66 #include <mips64/mips_cpu.h>
67 #include <machine/fpu.h>
68 #include <machine/frame.h>
69 #include <machine/mips_opcode.h>
70 #include <machine/regnum.h>
71 #include <machine/trap.h>
72 
73 #ifdef DDB
74 #include <mips64/db_machdep.h>
75 #include <ddb/db_output.h>
76 #include <ddb/db_sym.h>
77 #endif
78 
79 #include <sys/syslog.h>
80 
81 #define	USERMODE(ps)	(((ps) & SR_KSU_MASK) == SR_KSU_USER)
82 
83 const char *trap_type[] = {
84 	"external interrupt",
85 	"TLB modification",
86 	"TLB miss (load or instr. fetch)",
87 	"TLB miss (store)",
88 	"address error (load or I-fetch)",
89 	"address error (store)",
90 	"bus error (I-fetch)",
91 	"bus error (load or store)",
92 	"system call",
93 	"breakpoint",
94 	"reserved instruction",
95 	"coprocessor unusable",
96 	"arithmetic overflow",
97 	"trap",
98 	"virtual coherency instruction",
99 	"floating point",
100 	"reserved 16",
101 	"reserved 17",
102 	"reserved 18",
103 	"reserved 19",
104 	"reserved 20",
105 	"reserved 21",
106 	"reserved 22",
107 	"watch",
108 	"reserved 24",
109 	"reserved 25",
110 	"reserved 26",
111 	"reserved 27",
112 	"reserved 28",
113 	"reserved 29",
114 	"reserved 30",
115 	"virtual coherency data"
116 };
117 
118 #if defined(DDB) || defined(DEBUG)
119 struct trapdebug trapdebug[MAXCPUS * TRAPSIZE];
120 uint trppos[MAXCPUS];
121 
122 void	stacktrace(struct trapframe *);
123 uint32_t kdbpeek(vaddr_t);
124 uint64_t kdbpeekd(vaddr_t);
125 #endif	/* DDB || DEBUG */
126 
127 #if defined(DDB)
128 extern int db_ktrap(int, db_regs_t *);
129 #endif
130 
131 void	ast(void);
132 extern void interrupt(struct trapframe *);
133 void	itsa(struct trapframe *, struct cpu_info *, struct proc *, int);
134 void	trap(struct trapframe *);
135 #ifdef PTRACE
136 int	ptrace_read_insn(struct proc *, vaddr_t, uint32_t *);
137 int	ptrace_write_insn(struct proc *, vaddr_t, uint32_t);
138 int	process_sstep(struct proc *, int);
139 #endif
140 
141 /*
142  * Handle an AST for the current process.
143  */
144 void
145 ast(void)
146 {
147 	struct cpu_info *ci = curcpu();
148 	struct proc *p = ci->ci_curproc;
149 
150 	p->p_md.md_astpending = 0;
151 
152 	atomic_inc_int(&uvmexp.softs);
153 	mi_ast(p, ci->ci_want_resched);
154 	userret(p);
155 }
156 
157 /*
158  * Handle an exception.
159  * In the case of a kernel trap, we return the pc where to resume if
160  * pcb_onfault is set, otherwise, return old pc.
161  */
162 void
163 trap(struct trapframe *trapframe)
164 {
165 	struct cpu_info *ci = curcpu();
166 	struct proc *p = ci->ci_curproc;
167 	int type;
168 
169 	type = (trapframe->cause & CR_EXC_CODE) >> CR_EXC_CODE_SHIFT;
170 
171 #if defined(CPU_R8000) && !defined(DEBUG_INTERRUPT)
172 	if (type != T_INT)
173 #endif
174 		trapdebug_enter(ci, trapframe, -1);
175 
176 #ifdef CPU_R8000
177 	if (type != T_INT && type != T_SYSCALL)
178 #else
179 	if (type != T_SYSCALL)
180 #endif
181 		atomic_inc_int(&uvmexp.traps);
182 	if (USERMODE(trapframe->sr))
183 		type |= T_USER;
184 
185 	/*
186 	 * Enable hardware interrupts if they were on before the trap;
187 	 * enable IPI interrupts only otherwise.
188 	 */
189 	switch (type) {
190 #ifdef CPU_R8000
191 	case T_INT:
192 	case T_INT | T_USER:
193 #endif
194 	case T_BREAK:
195 		break;
196 	default:
197 		if (ISSET(trapframe->sr, SR_INT_ENAB))
198 			enableintr();
199 		else {
200 #ifdef MULTIPROCESSOR
201 			ENABLEIPI();
202 #endif
203 		}
204 		break;
205 	}
206 
207 #ifdef CPU_R8000
208 	/*
209 	 * Some exception causes on R8000 are actually detected by external
210 	 * circuitry, and as such are reported as external interrupts.
211 	 * On R8000 kernels, external interrupts vector to trap() instead of
212 	 * interrupt(), so that we can process these particular exceptions
213 	 * as if they were triggered as regular exceptions.
214 	 */
215 	if ((type & ~T_USER) == T_INT) {
216 		if (trapframe->cause & CR_VCE) {
217 #ifndef DEBUG_INTERRUPT
218 			trapdebug_enter(ci, trapframe, -1);
219 #endif
220 			panic("VCE or TLBX");
221 		}
222 
223 		if (trapframe->cause & CR_FPE) {
224 #ifndef DEBUG_INTERRUPT
225 			trapdebug_enter(ci, trapframe, -1);
226 #endif
227 			atomic_inc_int(&uvmexp.traps);
228 			if (type & T_USER)
229 				refreshcreds(p);
230 			itsa(trapframe, ci, p, T_FPE | (type & T_USER));
231 			cp0_reset_cause(CR_FPE);
232 		}
233 
234 		if (trapframe->cause & CR_INT_MASK) {
235 			/*
236 			 * Similar reality check as done in interrupt(), in
237 			 * case an interrupt occured between a write to
238 			 * COP_0_STATUS_REG and it taking effect.
239 			 * (I have never seen this occuring on R8000 but
240 			 *  this is cheap)
241 			 */
242 			if (ISSET(trapframe->sr, SR_INT_ENAB))
243 				interrupt(trapframe);
244 		}
245 
246 		if ((trapframe->cause & CR_FPE) && (type & T_USER))
247 			userret(p);
248 
249 		return;
250 	}
251 #endif
252 
253 	if (type & T_USER)
254 		refreshcreds(p);
255 
256 	itsa(trapframe, ci, p, type);
257 
258 	if (type & T_USER)
259 		userret(p);
260 }
261 
262 /*
263  * Handle a single exception.
264  */
265 void
266 itsa(struct trapframe *trapframe, struct cpu_info *ci, struct proc *p,
267     int type)
268 {
269 	int i;
270 	unsigned ucode = 0;
271 	vm_prot_t ftype;
272 	extern vaddr_t onfault_table[];
273 	int onfault;
274 	int typ = 0;
275 	union sigval sv;
276 	struct pcb *pcb;
277 
278 	switch (type) {
279 	case T_TLB_MOD:
280 		/* check for kernel address */
281 		if (trapframe->badvaddr < 0) {
282 			if (pmap_emulate_modify(pmap_kernel(),
283 			    trapframe->badvaddr)) {
284 				/* write to read only page in the kernel */
285 				ftype = PROT_WRITE;
286 				pcb = &p->p_addr->u_pcb;
287 				goto kernel_fault;
288 			}
289 			return;
290 		}
291 		/* FALLTHROUGH */
292 
293 	case T_TLB_MOD+T_USER:
294 		if (pmap_emulate_modify(p->p_vmspace->vm_map.pmap,
295 		    trapframe->badvaddr)) {
296 			/* write to read only page */
297 			ftype = PROT_WRITE;
298 			pcb = &p->p_addr->u_pcb;
299 			goto fault_common_no_miss;
300 		}
301 		return;
302 
303 	case T_TLB_LD_MISS:
304 	case T_TLB_ST_MISS:
305 		if (type == T_TLB_LD_MISS) {
306 #ifdef CPU_OCTEON
307 			vaddr_t pc;
308 
309 			/*
310 			 * Check if the fault was caused by
311 			 * an instruction fetch.
312 			 */
313 			pc = trapframe->pc;
314 			if (trapframe->cause & CR_BR_DELAY)
315 				pc += 4;
316 			if (pc == trapframe->badvaddr)
317 				ftype = PROT_EXEC;
318 			else
319 #endif
320 			ftype = PROT_READ;
321 		} else
322 			ftype = PROT_WRITE;
323 
324 		pcb = &p->p_addr->u_pcb;
325 		/* check for kernel address */
326 		if (trapframe->badvaddr < 0) {
327 			vaddr_t va;
328 			int rv;
329 
330 	kernel_fault:
331 			va = trunc_page((vaddr_t)trapframe->badvaddr);
332 			onfault = pcb->pcb_onfault;
333 			pcb->pcb_onfault = 0;
334 			KERNEL_LOCK();
335 			rv = uvm_fault(kernel_map, va, 0, ftype);
336 			KERNEL_UNLOCK();
337 			pcb->pcb_onfault = onfault;
338 			if (rv == 0)
339 				return;
340 			if (onfault != 0) {
341 				pcb->pcb_onfault = 0;
342 				trapframe->pc = onfault_table[onfault];
343 				return;
344 			}
345 			goto err;
346 		}
347 		/*
348 		 * It is an error for the kernel to access user space except
349 		 * through the copyin/copyout routines.
350 		 */
351 		if (pcb->pcb_onfault != 0) {
352 			/*
353 			 * We want to resolve the TLB fault before invoking
354 			 * pcb_onfault if necessary.
355 			 */
356 			goto fault_common;
357 		} else {
358 			goto err;
359 		}
360 
361 	case T_TLB_LD_MISS+T_USER: {
362 #ifdef CPU_OCTEON
363 		vaddr_t pc;
364 
365 		/* Check if the fault was caused by an instruction fetch. */
366 		pc = trapframe->pc;
367 		if (trapframe->cause & CR_BR_DELAY)
368 			pc += 4;
369 		if (pc == trapframe->badvaddr)
370 			ftype = PROT_EXEC;
371 		else
372 #endif
373 		ftype = PROT_READ;
374 		pcb = &p->p_addr->u_pcb;
375 		goto fault_common;
376 	}
377 
378 	case T_TLB_ST_MISS+T_USER:
379 		ftype = PROT_WRITE;
380 		pcb = &p->p_addr->u_pcb;
381 fault_common:
382 
383 #ifdef CPU_R4000
384 		if (r4000_errata != 0) {
385 			if (eop_tlb_miss_handler(trapframe, ci, p) != 0)
386 				return;
387 		}
388 #endif
389 
390 fault_common_no_miss:
391 
392 #ifdef CPU_R4000
393 		if (r4000_errata != 0) {
394 			eop_cleanup(trapframe, p);
395 		}
396 #endif
397 
398 	    {
399 		vaddr_t va;
400 		struct vmspace *vm;
401 		vm_map_t map;
402 		int rv;
403 
404 		vm = p->p_vmspace;
405 		map = &vm->vm_map;
406 		va = trunc_page((vaddr_t)trapframe->badvaddr);
407 
408 		onfault = pcb->pcb_onfault;
409 		pcb->pcb_onfault = 0;
410 		KERNEL_LOCK();
411 
412 		rv = uvm_fault(map, va, 0, ftype);
413 		pcb->pcb_onfault = onfault;
414 
415 		/*
416 		 * If this was a stack access we keep track of the maximum
417 		 * accessed stack size.  Also, if vm_fault gets a protection
418 		 * failure it is due to accessing the stack region outside
419 		 * the current limit and we need to reflect that as an access
420 		 * error.
421 		 */
422 		if ((caddr_t)va >= vm->vm_maxsaddr) {
423 			if (rv == 0)
424 				uvm_grow(p, va);
425 			else if (rv == EACCES)
426 				rv = EFAULT;
427 		}
428 		KERNEL_UNLOCK();
429 		if (rv == 0)
430 			return;
431 		if (!USERMODE(trapframe->sr)) {
432 			if (onfault != 0) {
433 				pcb->pcb_onfault = 0;
434 				trapframe->pc =  onfault_table[onfault];
435 				return;
436 			}
437 			goto err;
438 		}
439 
440 		ucode = ftype;
441 		i = SIGSEGV;
442 		typ = SEGV_MAPERR;
443 		break;
444 	    }
445 
446 	case T_ADDR_ERR_LD+T_USER:	/* misaligned or kseg access */
447 	case T_ADDR_ERR_ST+T_USER:	/* misaligned or kseg access */
448 		ucode = 0;		/* XXX should be PROT_something */
449 		i = SIGBUS;
450 		typ = BUS_ADRALN;
451 		break;
452 	case T_BUS_ERR_IFETCH+T_USER:	/* BERR asserted to cpu */
453 	case T_BUS_ERR_LD_ST+T_USER:	/* BERR asserted to cpu */
454 		ucode = 0;		/* XXX should be PROT_something */
455 		i = SIGBUS;
456 		typ = BUS_OBJERR;
457 		break;
458 
459 	case T_SYSCALL+T_USER:
460 	    {
461 		struct trapframe *locr0 = p->p_md.md_regs;
462 		struct sysent *callp;
463 		unsigned int code;
464 		register_t tpc;
465 		int numsys, error;
466 		struct args {
467 			register_t i[8];
468 		} args;
469 		register_t rval[2];
470 
471 		atomic_inc_int(&uvmexp.syscalls);
472 
473 		/* compute next PC after syscall instruction */
474 		tpc = trapframe->pc; /* Remember if restart */
475 		if (trapframe->cause & CR_BR_DELAY)
476 			locr0->pc = MipsEmulateBranch(locr0,
477 			    trapframe->pc, 0, 0);
478 		else
479 			locr0->pc += 4;
480 		callp = p->p_p->ps_emul->e_sysent;
481 		numsys = p->p_p->ps_emul->e_nsysent;
482 		code = locr0->v0;
483 		switch (code) {
484 		case SYS_syscall:
485 		case SYS___syscall:
486 			/*
487 			 * Code is first argument, followed by actual args.
488 			 * __syscall provides the code as a quad to maintain
489 			 * proper alignment of 64-bit arguments on 32-bit
490 			 * platforms, which doesn't change anything here.
491 			 */
492 			code = locr0->a0;
493 			if (code >= numsys)
494 				callp += p->p_p->ps_emul->e_nosys; /* (illegal) */
495 			else
496 				callp += code;
497 			i = callp->sy_argsize / sizeof(register_t);
498 			args.i[0] = locr0->a1;
499 			args.i[1] = locr0->a2;
500 			args.i[2] = locr0->a3;
501 			if (i > 3) {
502 				args.i[3] = locr0->a4;
503 				args.i[4] = locr0->a5;
504 				args.i[5] = locr0->a6;
505 				args.i[6] = locr0->a7;
506 				if (i > 7)
507 					if ((error = copyin((void *)locr0->sp,
508 					    &args.i[7], sizeof(register_t))))
509 						goto bad;
510 			}
511 			break;
512 		default:
513 			if (code >= numsys)
514 				callp += p->p_p->ps_emul->e_nosys; /* (illegal) */
515 			else
516 				callp += code;
517 
518 			i = callp->sy_narg;
519 			args.i[0] = locr0->a0;
520 			args.i[1] = locr0->a1;
521 			args.i[2] = locr0->a2;
522 			args.i[3] = locr0->a3;
523 			if (i > 4) {
524 				args.i[4] = locr0->a4;
525 				args.i[5] = locr0->a5;
526 				args.i[6] = locr0->a6;
527 				args.i[7] = locr0->a7;
528 			}
529 		}
530 
531 		rval[0] = 0;
532 		rval[1] = locr0->v1;
533 
534 #if defined(DDB) || defined(DEBUG)
535 		trapdebug[TRAPSIZE * ci->ci_cpuid + (trppos[ci->ci_cpuid] == 0 ?
536 		    TRAPSIZE : trppos[ci->ci_cpuid]) - 1].code = code;
537 #endif
538 
539 		error = mi_syscall(p, code, callp, args.i, rval);
540 
541 		switch (error) {
542 		case 0:
543 			locr0->v0 = rval[0];
544 			locr0->v1 = rval[1];
545 			locr0->a3 = 0;
546 			break;
547 
548 		case ERESTART:
549 			locr0->pc = tpc;
550 			break;
551 
552 		case EJUSTRETURN:
553 			break;	/* nothing to do */
554 
555 		default:
556 		bad:
557 			locr0->v0 = error;
558 			locr0->a3 = 1;
559 		}
560 
561 		mi_syscall_return(p, code, error, rval);
562 
563 		return;
564 	    }
565 
566 	case T_BREAK:
567 #ifdef DDB
568 		db_ktrap(type, trapframe);
569 #endif
570 		/* Reenable interrupts if necessary */
571 		if (trapframe->sr & SR_INT_ENAB) {
572 			enableintr();
573 		}
574 		return;
575 
576 	case T_BREAK+T_USER:
577 	    {
578 		caddr_t va;
579 		u_int32_t instr;
580 		struct trapframe *locr0 = p->p_md.md_regs;
581 
582 		/* compute address of break instruction */
583 		va = (caddr_t)trapframe->pc;
584 		if (trapframe->cause & CR_BR_DELAY)
585 			va += 4;
586 
587 		/* read break instruction */
588 		copyin(va, &instr, sizeof(int32_t));
589 
590 		switch ((instr & BREAK_VAL_MASK) >> BREAK_VAL_SHIFT) {
591 		case 6:	/* gcc range error */
592 			i = SIGFPE;
593 			typ = FPE_FLTSUB;
594 			/* skip instruction */
595 			if (trapframe->cause & CR_BR_DELAY)
596 				locr0->pc = MipsEmulateBranch(locr0,
597 				    trapframe->pc, 0, 0);
598 			else
599 				locr0->pc += 4;
600 			break;
601 		case 7:	/* gcc3 divide by zero */
602 			i = SIGFPE;
603 			typ = FPE_INTDIV;
604 			/* skip instruction */
605 			if (trapframe->cause & CR_BR_DELAY)
606 				locr0->pc = MipsEmulateBranch(locr0,
607 				    trapframe->pc, 0, 0);
608 			else
609 				locr0->pc += 4;
610 			break;
611 #ifdef PTRACE
612 		case BREAK_SSTEP_VAL:
613 			if (p->p_md.md_ss_addr == (long)va) {
614 #ifdef DEBUG
615 				printf("trap: %s (%d): breakpoint at %p "
616 				    "(insn %08x)\n",
617 				    p->p_comm, p->p_pid,
618 				    (void *)p->p_md.md_ss_addr,
619 				    p->p_md.md_ss_instr);
620 #endif
621 
622 				/* Restore original instruction and clear BP */
623 				KERNEL_LOCK();
624 				process_sstep(p, 0);
625 				KERNEL_UNLOCK();
626 				typ = TRAP_BRKPT;
627 			} else {
628 				typ = TRAP_TRACE;
629 			}
630 			i = SIGTRAP;
631 			break;
632 #endif
633 #ifdef FPUEMUL
634 		case BREAK_FPUEMUL_VAL:
635 			/*
636 			 * If this is a genuine FP emulation break,
637 			 * resume execution to our branch destination.
638 			 */
639 			if ((p->p_md.md_flags & MDP_FPUSED) != 0 &&
640 			    p->p_md.md_fppgva + 4 == (vaddr_t)va) {
641 				struct vm_map *map = &p->p_vmspace->vm_map;
642 
643 				p->p_md.md_flags &= ~MDP_FPUSED;
644 				locr0->pc = p->p_md.md_fpbranchva;
645 
646 				/*
647 				 * Prevent access to the relocation page.
648 				 * XXX needs to be fixed to work with rthreads
649 				 */
650 				KERNEL_LOCK();
651 				uvm_fault_unwire(map, p->p_md.md_fppgva,
652 				    p->p_md.md_fppgva + PAGE_SIZE);
653 				KERNEL_UNLOCK();
654 				(void)uvm_map_protect(map, p->p_md.md_fppgva,
655 				    p->p_md.md_fppgva + PAGE_SIZE,
656 				    PROT_NONE, FALSE);
657 				return;
658 			}
659 			/* FALLTHROUGH */
660 #endif
661 		default:
662 			typ = TRAP_TRACE;
663 			i = SIGTRAP;
664 			break;
665 		}
666 		break;
667 	    }
668 
669 	case T_IWATCH+T_USER:
670 	case T_DWATCH+T_USER:
671 	    {
672 		caddr_t va;
673 		/* compute address of trapped instruction */
674 		va = (caddr_t)trapframe->pc;
675 		if (trapframe->cause & CR_BR_DELAY)
676 			va += 4;
677 		printf("watch exception @ %p\n", va);
678 		i = SIGTRAP;
679 		typ = TRAP_BRKPT;
680 		break;
681 	    }
682 
683 	case T_TRAP+T_USER:
684 	    {
685 		caddr_t va;
686 		u_int32_t instr;
687 		struct trapframe *locr0 = p->p_md.md_regs;
688 
689 		/* compute address of trap instruction */
690 		va = (caddr_t)trapframe->pc;
691 		if (trapframe->cause & CR_BR_DELAY)
692 			va += 4;
693 		/* read break instruction */
694 		copyin(va, &instr, sizeof(int32_t));
695 
696 		if (trapframe->cause & CR_BR_DELAY)
697 			locr0->pc = MipsEmulateBranch(locr0,
698 			    trapframe->pc, 0, 0);
699 		else
700 			locr0->pc += 4;
701 		/*
702 		 * GCC 4 uses teq with code 7 to signal divide by
703 	 	 * zero at runtime. This is one instruction shorter
704 		 * than the BEQ + BREAK combination used by gcc 3.
705 		 */
706 		if ((instr & 0xfc00003f) == 0x00000034 /* teq */ &&
707 		    (instr & 0x001fffc0) == ((ZERO << 16) | (7 << 6))) {
708 			i = SIGFPE;
709 			typ = FPE_INTDIV;
710 		} else {
711 			i = SIGEMT;	/* Stuff it with something for now */
712 			typ = 0;
713 		}
714 		break;
715 	    }
716 
717 	case T_RES_INST+T_USER:
718 		i = SIGILL;
719 		typ = ILL_ILLOPC;
720 		break;
721 
722 	case T_COP_UNUSABLE+T_USER:
723 		/*
724 		 * Note MIPS IV COP1X instructions issued with FPU
725 		 * disabled correctly report coprocessor 1 as the
726 		 * unusable coprocessor number.
727 		 */
728 		if ((trapframe->cause & CR_COP_ERR) != CR_COP1_ERR) {
729 			i = SIGILL;	/* only FPU instructions allowed */
730 			typ = ILL_ILLOPC;
731 			break;
732 		}
733 #ifdef FPUEMUL
734 		MipsFPTrap(trapframe);
735 #else
736 		enable_fpu(p);
737 #endif
738 		return;
739 
740 	case T_FPE:
741 		printf("FPU Trap: PC %lx CR %lx SR %lx\n",
742 			trapframe->pc, trapframe->cause, trapframe->sr);
743 		goto err;
744 
745 	case T_FPE+T_USER:
746 		MipsFPTrap(trapframe);
747 		return;
748 
749 	case T_OVFLOW+T_USER:
750 		i = SIGFPE;
751 		typ = FPE_FLTOVF;
752 		break;
753 
754 	case T_ADDR_ERR_LD:	/* misaligned access */
755 	case T_ADDR_ERR_ST:	/* misaligned access */
756 	case T_BUS_ERR_LD_ST:	/* BERR asserted to cpu */
757 		pcb = &p->p_addr->u_pcb;
758 		if ((onfault = pcb->pcb_onfault) != 0) {
759 			pcb->pcb_onfault = 0;
760 			trapframe->pc = onfault_table[onfault];
761 			return;
762 		}
763 		goto err;
764 
765 #ifdef CPU_R10000
766 	case T_BUS_ERR_IFETCH:
767 		/*
768 		 * At least R16000 processor have been found triggering
769 		 * reproduceable bus error on instruction fetch in the
770 		 * kernel code, which are trivially recoverable (and
771 		 * look like an obscure errata to me).
772 		 *
773 		 * Thus, ignore these exceptions if the faulting address
774 		 * is in the kernel.
775 		 */
776 	    {
777 		extern void *kernel_text;
778 		extern void *etext;
779 		vaddr_t va;
780 
781 		va = (vaddr_t)trapframe->pc;
782 		if (trapframe->cause & CR_BR_DELAY)
783 			va += 4;
784 		if (va > (vaddr_t)&kernel_text && va < (vaddr_t)&etext)
785 			return;
786 	    }
787 		goto err;
788 #endif
789 
790 	default:
791 	err:
792 		disableintr();
793 #if !defined(DDB) && defined(DEBUG)
794 		trapDump("trap", printf);
795 #endif
796 		printf("\nTrap cause = %d Frame %p\n", type, trapframe);
797 		printf("Trap PC %p RA %p fault %p\n",
798 		    (void *)trapframe->pc, (void *)trapframe->ra,
799 		    (void *)trapframe->badvaddr);
800 #ifdef DDB
801 		stacktrace(!USERMODE(trapframe->sr) ? trapframe : p->p_md.md_regs);
802 		db_ktrap(type, trapframe);
803 #endif
804 		panic("trap");
805 	}
806 
807 #ifdef FPUEMUL
808 	/*
809 	 * If a relocated delay slot causes an exception, blame the
810 	 * original delay slot address - userland is not supposed to
811 	 * know anything about emulation bowels.
812 	 */
813 	if ((p->p_md.md_flags & MDP_FPUSED) != 0 &&
814 	    trapframe->badvaddr == p->p_md.md_fppgva)
815 		trapframe->badvaddr = p->p_md.md_fpslotva;
816 #endif
817 	p->p_md.md_regs->pc = trapframe->pc;
818 	p->p_md.md_regs->cause = trapframe->cause;
819 	p->p_md.md_regs->badvaddr = trapframe->badvaddr;
820 	sv.sival_ptr = (void *)trapframe->badvaddr;
821 	KERNEL_LOCK();
822 	trapsignal(p, i, ucode, typ, sv);
823 	KERNEL_UNLOCK();
824 }
825 
826 void
827 child_return(void *arg)
828 {
829 	struct proc *p = arg;
830 	struct trapframe *trapframe;
831 
832 	trapframe = p->p_md.md_regs;
833 	trapframe->v0 = 0;
834 	trapframe->v1 = 1;
835 	trapframe->a3 = 0;
836 
837 	KERNEL_UNLOCK();
838 
839 	mi_child_return(p);
840 }
841 
842 #if defined(DDB) || defined(DEBUG)
843 void
844 trapDump(const char *msg, int (*pr)(const char *, ...))
845 {
846 #ifdef MULTIPROCESSOR
847 	CPU_INFO_ITERATOR cii;
848 #endif
849 	struct cpu_info *ci;
850 	struct trapdebug *base, *ptrp;
851 	int i;
852 	uint pos;
853 	int s;
854 
855 	s = splhigh();
856 	(*pr)("trapDump(%s)\n", msg);
857 #ifndef MULTIPROCESSOR
858 	ci = curcpu();
859 #else
860 	CPU_INFO_FOREACH(cii, ci)
861 #endif
862 	{
863 #ifdef MULTIPROCESSOR
864 		(*pr)("cpu%d\n", ci->ci_cpuid);
865 #endif
866 		/* walk in reverse order */
867 		pos = trppos[ci->ci_cpuid];
868 		base = trapdebug + ci->ci_cpuid * TRAPSIZE;
869 		for (i = TRAPSIZE - 1; i >= 0; i--) {
870 			if (pos + i >= TRAPSIZE)
871 				ptrp = base + pos + i - TRAPSIZE;
872 			else
873 				ptrp = base + pos + i;
874 
875 			if (ptrp->cause == 0)
876 				break;
877 
878 #ifdef CPU_R8000
879 			(*pr)("%s: PC %p CR 0x%016lx SR 0x%011lx\n",
880 			    trap_type[(ptrp->cause & CR_EXC_CODE) >>
881 			      CR_EXC_CODE_SHIFT],
882 			    ptrp->pc, ptrp->cause, ptrp->status);
883 #else
884 			(*pr)("%s: PC %p CR 0x%08lx SR 0x%08lx\n",
885 			    trap_type[(ptrp->cause & CR_EXC_CODE) >>
886 			      CR_EXC_CODE_SHIFT],
887 			    ptrp->pc, ptrp->cause & 0xffffffff,
888 			    ptrp->status & 0xffffffff);
889 #endif
890 			(*pr)(" RA %p SP %p ADR %p\n",
891 			    ptrp->ra, ptrp->sp, ptrp->vadr);
892 		}
893 	}
894 
895 	splx(s);
896 }
897 #endif
898 
899 
900 /*
901  * Return the resulting PC as if the branch was executed.
902  */
903 register_t
904 MipsEmulateBranch(struct trapframe *tf, vaddr_t instPC, uint32_t fsr,
905     uint32_t curinst)
906 {
907 	register_t *regsPtr = (register_t *)tf;
908 	InstFmt inst;
909 	vaddr_t retAddr;
910 	int condition;
911 	uint cc;
912 
913 #define	GetBranchDest(InstPtr, inst) \
914 	    (InstPtr + 4 + ((short)inst.IType.imm << 2))
915 
916 	if (curinst != 0)
917 		inst = *(InstFmt *)&curinst;
918 	else
919 		inst = *(InstFmt *)instPC;
920 
921 	regsPtr[ZERO] = 0;	/* Make sure zero is 0x0 */
922 
923 	switch ((int)inst.JType.op) {
924 	case OP_SPECIAL:
925 		switch ((int)inst.RType.func) {
926 		case OP_JR:
927 		case OP_JALR:
928 			retAddr = (vaddr_t)regsPtr[inst.RType.rs];
929 			break;
930 		default:
931 			retAddr = instPC + 4;
932 			break;
933 		}
934 		break;
935 	case OP_BCOND:
936 		switch ((int)inst.IType.rt) {
937 		case OP_BLTZ:
938 		case OP_BLTZL:
939 		case OP_BLTZAL:
940 		case OP_BLTZALL:
941 			if ((int64_t)(regsPtr[inst.RType.rs]) < 0)
942 				retAddr = GetBranchDest(instPC, inst);
943 			else
944 				retAddr = instPC + 8;
945 			break;
946 		case OP_BGEZ:
947 		case OP_BGEZL:
948 		case OP_BGEZAL:
949 		case OP_BGEZALL:
950 			if ((int64_t)(regsPtr[inst.RType.rs]) >= 0)
951 				retAddr = GetBranchDest(instPC, inst);
952 			else
953 				retAddr = instPC + 8;
954 			break;
955 		default:
956 			retAddr = instPC + 4;
957 			break;
958 		}
959 		break;
960 	case OP_J:
961 	case OP_JAL:
962 		retAddr = (inst.JType.target << 2) | (instPC & ~0x0fffffffUL);
963 		break;
964 	case OP_BEQ:
965 	case OP_BEQL:
966 		if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt])
967 			retAddr = GetBranchDest(instPC, inst);
968 		else
969 			retAddr = instPC + 8;
970 		break;
971 	case OP_BNE:
972 	case OP_BNEL:
973 		if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt])
974 			retAddr = GetBranchDest(instPC, inst);
975 		else
976 			retAddr = instPC + 8;
977 		break;
978 	case OP_BLEZ:
979 	case OP_BLEZL:
980 		if ((int64_t)(regsPtr[inst.RType.rs]) <= 0)
981 			retAddr = GetBranchDest(instPC, inst);
982 		else
983 			retAddr = instPC + 8;
984 		break;
985 	case OP_BGTZ:
986 	case OP_BGTZL:
987 		if ((int64_t)(regsPtr[inst.RType.rs]) > 0)
988 			retAddr = GetBranchDest(instPC, inst);
989 		else
990 			retAddr = instPC + 8;
991 		break;
992 	case OP_COP1:
993 		switch (inst.RType.rs) {
994 		case OP_BC:
995 			cc = (inst.RType.rt & COPz_BC_CC_MASK) >>
996 			    COPz_BC_CC_SHIFT;
997 			if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE)
998 				condition = fsr & FPCSR_CONDVAL(cc);
999 			else
1000 				condition = !(fsr & FPCSR_CONDVAL(cc));
1001 			if (condition)
1002 				retAddr = GetBranchDest(instPC, inst);
1003 			else
1004 				retAddr = instPC + 8;
1005 			break;
1006 		default:
1007 			retAddr = instPC + 4;
1008 		}
1009 		break;
1010 	default:
1011 		retAddr = instPC + 4;
1012 	}
1013 
1014 	return (register_t)retAddr;
1015 #undef	GetBranchDest
1016 }
1017 
1018 #ifdef PTRACE
1019 
1020 int
1021 ptrace_read_insn(struct proc *p, vaddr_t va, uint32_t *insn)
1022 {
1023 	struct iovec iov;
1024 	struct uio uio;
1025 
1026 	iov.iov_base = (caddr_t)insn;
1027 	iov.iov_len = sizeof(uint32_t);
1028 	uio.uio_iov = &iov;
1029 	uio.uio_iovcnt = 1;
1030 	uio.uio_offset = (off_t)va;
1031 	uio.uio_resid = sizeof(uint32_t);
1032 	uio.uio_segflg = UIO_SYSSPACE;
1033 	uio.uio_rw = UIO_READ;
1034 	uio.uio_procp = p;
1035 	return process_domem(p, p, &uio, PT_READ_I);
1036 }
1037 
1038 int
1039 ptrace_write_insn(struct proc *p, vaddr_t va, uint32_t insn)
1040 {
1041 	struct iovec iov;
1042 	struct uio uio;
1043 
1044 	iov.iov_base = (caddr_t)&insn;
1045 	iov.iov_len = sizeof(uint32_t);
1046 	uio.uio_iov = &iov;
1047 	uio.uio_iovcnt = 1;
1048 	uio.uio_offset = (off_t)va;
1049 	uio.uio_resid = sizeof(uint32_t);
1050 	uio.uio_segflg = UIO_SYSSPACE;
1051 	uio.uio_rw = UIO_WRITE;
1052 	uio.uio_procp = p;
1053 	return process_domem(p, p, &uio, PT_WRITE_I);
1054 }
1055 
1056 /*
1057  * This routine is called by procxmt() to single step one instruction.
1058  * We do this by storing a break instruction after the current instruction,
1059  * resuming execution, and then restoring the old instruction.
1060  */
1061 int
1062 process_sstep(struct proc *p, int sstep)
1063 {
1064 	struct trapframe *locr0 = p->p_md.md_regs;
1065 	int rc;
1066 	uint32_t curinstr;
1067 	vaddr_t va;
1068 
1069 	if (sstep == 0) {
1070 		/* clear the breakpoint */
1071 		if (p->p_md.md_ss_addr != 0) {
1072 			rc = ptrace_write_insn(p, p->p_md.md_ss_addr,
1073 			    p->p_md.md_ss_instr);
1074 #ifdef DIAGNOSTIC
1075 			if (rc != 0)
1076 				printf("WARNING: %s (%d): can't restore "
1077 				    "instruction at %p: %08x\n",
1078 				    p->p_comm, p->p_pid,
1079 				    (void *)p->p_md.md_ss_addr,
1080 				    p->p_md.md_ss_instr);
1081 #endif
1082 			p->p_md.md_ss_addr = 0;
1083 		} else
1084 			rc = 0;
1085 		return rc;
1086 	}
1087 
1088 	/* read current instruction */
1089 	rc = ptrace_read_insn(p, locr0->pc, &curinstr);
1090 	if (rc != 0)
1091 		return rc;
1092 
1093 	/* compute next address after current location */
1094 	if (curinstr != 0 /* nop */)
1095 		va = (vaddr_t)MipsEmulateBranch(locr0,
1096 		    locr0->pc, locr0->fsr, curinstr);
1097 	else
1098 		va = locr0->pc + 4;
1099 #ifdef DIAGNOSTIC
1100 	/* should not happen */
1101 	if (p->p_md.md_ss_addr != 0) {
1102 		printf("WARNING: %s (%d): breakpoint request "
1103 		    "at %p, already set at %p\n",
1104 		    p->p_comm, p->p_pid, (void *)va, (void *)p->p_md.md_ss_addr);
1105 		return EFAULT;
1106 	}
1107 #endif
1108 
1109 	/* read next instruction */
1110 	rc = ptrace_read_insn(p, va, &p->p_md.md_ss_instr);
1111 	if (rc != 0)
1112 		return rc;
1113 
1114 	/* replace with a breakpoint instruction */
1115 	rc = ptrace_write_insn(p, va, BREAK_SSTEP);
1116 	if (rc != 0)
1117 		return rc;
1118 
1119 	p->p_md.md_ss_addr = va;
1120 
1121 #ifdef DEBUG
1122 	printf("%s (%d): breakpoint set at %p: %08x (pc %p %08x)\n",
1123 		p->p_comm, p->p_pid, (void *)p->p_md.md_ss_addr,
1124 		p->p_md.md_ss_instr, (void *)locr0->pc, curinstr);
1125 #endif
1126 	return 0;
1127 }
1128 
1129 #endif /* PTRACE */
1130 
1131 #if defined(DDB) || defined(DEBUG)
1132 #define MIPS_JR_RA	0x03e00008	/* instruction code for jr ra */
1133 
1134 /* forward */
1135 #if !defined(DDB)
1136 const char *fn_name(vaddr_t);
1137 #endif
1138 void stacktrace_subr(struct trapframe *, int, int (*)(const char*, ...));
1139 
1140 /*
1141  * Print a stack backtrace.
1142  */
1143 void
1144 stacktrace(struct trapframe *regs)
1145 {
1146 	stacktrace_subr(regs, 6, printf);
1147 }
1148 
1149 #ifdef CPU_R8000
1150 #define	VALID_ADDRESS(va) \
1151 	(((va) >= VM_MIN_KERNEL_ADDRESS && (va) < VM_MAX_KERNEL_ADDRESS) || \
1152 	 IS_XKPHYS(va))
1153 #else
1154 #define	VALID_ADDRESS(va) \
1155 	(((va) >= VM_MIN_KERNEL_ADDRESS && (va) < VM_MAX_KERNEL_ADDRESS) || \
1156 	 IS_XKPHYS(va) || ((va) >= CKSEG0_BASE && (va) < CKSEG1_BASE))
1157 #endif
1158 
1159 void
1160 stacktrace_subr(struct trapframe *regs, int count,
1161     int (*pr)(const char*, ...))
1162 {
1163 	vaddr_t pc, sp, ra, va, subr;
1164 	register_t a0, a1, a2, a3;
1165 	uint32_t instr, mask;
1166 	InstFmt i;
1167 	int more, stksize;
1168 	extern char k_intr[];
1169 	extern char k_general[];
1170 #ifdef DDB
1171 	db_expr_t diff;
1172 	db_sym_t sym;
1173 	char *symname;
1174 #endif
1175 
1176 	/* get initial values from the exception frame */
1177 	sp = (vaddr_t)regs->sp;
1178 	pc = (vaddr_t)regs->pc;
1179 	ra = (vaddr_t)regs->ra;		/* May be a 'leaf' function */
1180 	a0 = regs->a0;
1181 	a1 = regs->a1;
1182 	a2 = regs->a2;
1183 	a3 = regs->a3;
1184 
1185 /* Jump here when done with a frame, to start a new one */
1186 loop:
1187 #ifdef DDB
1188 	symname = NULL;
1189 #endif
1190 	subr = 0;
1191 	stksize = 0;
1192 
1193 	if (count-- == 0) {
1194 		ra = 0;
1195 		goto end;
1196 	}
1197 
1198 	/* check for bad SP: could foul up next frame */
1199 	if (sp & 3 || !VALID_ADDRESS(sp)) {
1200 		(*pr)("SP %p: not in kernel\n", sp);
1201 		ra = 0;
1202 		goto end;
1203 	}
1204 
1205 	/* check for bad PC */
1206 	if (pc & 3 || !VALID_ADDRESS(pc)) {
1207 		(*pr)("PC %p: not in kernel\n", pc);
1208 		ra = 0;
1209 		goto end;
1210 	}
1211 
1212 #ifdef DDB
1213 	/*
1214 	 * Dig out the function from the symbol table.
1215 	 * Watch out for function tail optimizations.
1216 	 */
1217 	sym = db_search_symbol(pc, DB_STGY_ANY, &diff);
1218 	if (sym != NULL && diff == 0) {
1219 		instr = kdbpeek(pc - 2 * sizeof(int));
1220 		i.word = instr;
1221 		if (i.JType.op == OP_JAL) {
1222 			sym = db_search_symbol(pc - sizeof(int),
1223 			    DB_STGY_ANY, &diff);
1224 			if (sym != NULL && diff != 0)
1225 				diff += sizeof(int);
1226 		}
1227 	}
1228 	if (sym != NULL) {
1229 		db_symbol_values(sym, &symname, 0);
1230 		subr = pc - (vaddr_t)diff;
1231 	}
1232 #endif
1233 
1234 	/*
1235 	 * Find the beginning of the current subroutine by scanning backwards
1236 	 * from the current PC for the end of the previous subroutine.
1237 	 */
1238 	if (!subr) {
1239 		va = pc - sizeof(int);
1240 		while ((instr = kdbpeek(va)) != MIPS_JR_RA)
1241 			va -= sizeof(int);
1242 		va += 2 * sizeof(int);	/* skip back over branch & delay slot */
1243 		/* skip over nulls which might separate .o files */
1244 		while ((instr = kdbpeek(va)) == 0)
1245 			va += sizeof(int);
1246 		subr = va;
1247 	}
1248 
1249 	/*
1250 	 * Jump here for locore entry points for which the preceding
1251 	 * function doesn't end in "j ra"
1252 	 */
1253 	/* scan forwards to find stack size and any saved registers */
1254 	stksize = 0;
1255 	more = 3;
1256 	mask = 0;
1257 	for (va = subr; more; va += sizeof(int),
1258 	    more = (more == 3) ? 3 : more - 1) {
1259 		/* stop if hit our current position */
1260 		if (va >= pc)
1261 			break;
1262 		instr = kdbpeek(va);
1263 		i.word = instr;
1264 		switch (i.JType.op) {
1265 		case OP_SPECIAL:
1266 			switch (i.RType.func) {
1267 			case OP_JR:
1268 			case OP_JALR:
1269 				more = 2; /* stop after next instruction */
1270 				break;
1271 
1272 			case OP_SYSCALL:
1273 			case OP_BREAK:
1274 				more = 1; /* stop now */
1275 			};
1276 			break;
1277 
1278 		case OP_BCOND:
1279 		case OP_J:
1280 		case OP_JAL:
1281 		case OP_BEQ:
1282 		case OP_BNE:
1283 		case OP_BLEZ:
1284 		case OP_BGTZ:
1285 			more = 2; /* stop after next instruction */
1286 			break;
1287 
1288 		case OP_COP0:
1289 		case OP_COP1:
1290 		case OP_COP2:
1291 		case OP_COP3:
1292 			switch (i.RType.rs) {
1293 			case OP_BC:
1294 				more = 2; /* stop after next instruction */
1295 			};
1296 			break;
1297 
1298 		case OP_SD:
1299 			/* look for saved registers on the stack */
1300 			if (i.IType.rs != SP)
1301 				break;
1302 			/* only restore the first one */
1303 			if (mask & (1 << i.IType.rt))
1304 				break;
1305 			mask |= (1 << i.IType.rt);
1306 			switch (i.IType.rt) {
1307 			case A0:
1308 				a0 = kdbpeekd(sp + (int16_t)i.IType.imm);
1309 				break;
1310 			case A1:
1311 				a1 = kdbpeekd(sp + (int16_t)i.IType.imm);
1312 				break;
1313 			case A2:
1314 				a2 = kdbpeekd(sp + (int16_t)i.IType.imm);
1315 				break;
1316 			case A3:
1317 				a3 = kdbpeekd(sp + (int16_t)i.IType.imm);
1318 				break;
1319 			case RA:
1320 				ra = kdbpeekd(sp + (int16_t)i.IType.imm);
1321 				break;
1322 			}
1323 			break;
1324 
1325 		case OP_DADDI:
1326 		case OP_DADDIU:
1327 			/* look for stack pointer adjustment */
1328 			if (i.IType.rs != SP || i.IType.rt != SP)
1329 				break;
1330 			stksize = -((int16_t)i.IType.imm);
1331 		}
1332 	}
1333 
1334 #ifdef DDB
1335 	if (symname == NULL)
1336 		(*pr)("%p ", subr);
1337 	else
1338 		(*pr)("%s+%p ", symname, diff);
1339 #else
1340 	(*pr)("%s+%p ", fn_name(subr), pc - subr);
1341 #endif
1342 	(*pr)("(%llx,%llx,%llx,%llx) ", a0, a1, a2, a3);
1343 	(*pr)(" ra %p sp %p, sz %d\n", ra, sp, stksize);
1344 
1345 	if (subr == (vaddr_t)k_intr || subr == (vaddr_t)k_general) {
1346 		if (subr == (vaddr_t)k_general)
1347 			(*pr)("(KERNEL TRAP)\n");
1348 		else
1349 			(*pr)("(KERNEL INTERRUPT)\n");
1350 		sp = *(register_t *)sp;
1351 		pc = ((struct trapframe *)sp)->pc;
1352 		ra = ((struct trapframe *)sp)->ra;
1353 		sp = ((struct trapframe *)sp)->sp;
1354 		goto loop;
1355 	}
1356 
1357 end:
1358 	if (ra) {
1359 		extern void *kernel_text;
1360 		extern void *etext;
1361 
1362 		if (pc == ra && stksize == 0)
1363 			(*pr)("stacktrace: loop!\n");
1364 		else if (ra < (vaddr_t)&kernel_text || ra > (vaddr_t)&etext)
1365 			(*pr)("stacktrace: ra corrupted!\n");
1366 		else {
1367 			pc = ra;
1368 			sp += stksize;
1369 			ra = 0;
1370 			goto loop;
1371 		}
1372 	} else {
1373 		if (curproc)
1374 			(*pr)("User-level: pid %d\n", curproc->p_pid);
1375 		else
1376 			(*pr)("User-level: curproc NULL\n");
1377 	}
1378 }
1379 
1380 #undef	VALID_ADDRESS
1381 
1382 #if !defined(DDB)
1383 /*
1384  * Functions ``special'' enough to print by name
1385  */
1386 #ifdef __STDC__
1387 #define Name(_fn)  { (void*)_fn, # _fn }
1388 #else
1389 #define Name(_fn) { _fn, "_fn"}
1390 #endif
1391 static const struct { void *addr; const char *name;} names[] = {
1392 	Name(trap),
1393 	{ 0, NULL }
1394 };
1395 
1396 /*
1397  * Map a function address to a string name, if known; or a hex string.
1398  */
1399 const char *
1400 fn_name(vaddr_t addr)
1401 {
1402 	static char buf[19];
1403 	int i = 0;
1404 
1405 	for (i = 0; names[i].name != NULL; i++)
1406 		if (names[i].addr == (void*)addr)
1407 			return (names[i].name);
1408 	snprintf(buf, sizeof(buf), "%p", addr);
1409 	return (buf);
1410 }
1411 #endif	/* !DDB */
1412 
1413 #endif /* DDB || DEBUG */
1414 
1415 #ifdef FPUEMUL
1416 /*
1417  * Set up a successful branch emulation.
1418  * The delay slot instruction is copied to a reserved page, followed by a
1419  * trap instruction to get control back, and resume at the branch
1420  * destination.
1421  */
1422 int
1423 fpe_branch_emulate(struct proc *p, struct trapframe *tf, uint32_t insn,
1424     vaddr_t dest)
1425 {
1426 	struct vm_map *map = &p->p_vmspace->vm_map;
1427 	InstFmt inst;
1428 	int rc;
1429 
1430 	/*
1431 	 * Check the delay slot instruction: since it will run as a
1432 	 * non-delay slot instruction, we want to reject branch instructions
1433 	 * (which behaviour, when in a delay slot, is undefined anyway).
1434 	 */
1435 
1436 	inst = *(InstFmt *)&insn;
1437 	rc = 0;
1438 	switch ((int)inst.JType.op) {
1439 	case OP_SPECIAL:
1440 		switch ((int)inst.RType.func) {
1441 		case OP_JR:
1442 		case OP_JALR:
1443 			rc = EINVAL;
1444 			break;
1445 		}
1446 		break;
1447 	case OP_BCOND:
1448 		switch ((int)inst.IType.rt) {
1449 		case OP_BLTZ:
1450 		case OP_BLTZL:
1451 		case OP_BLTZAL:
1452 		case OP_BLTZALL:
1453 		case OP_BGEZ:
1454 		case OP_BGEZL:
1455 		case OP_BGEZAL:
1456 		case OP_BGEZALL:
1457 			rc = EINVAL;
1458 			break;
1459 		}
1460 		break;
1461 	case OP_J:
1462 	case OP_JAL:
1463 	case OP_BEQ:
1464 	case OP_BEQL:
1465 	case OP_BNE:
1466 	case OP_BNEL:
1467 	case OP_BLEZ:
1468 	case OP_BLEZL:
1469 	case OP_BGTZ:
1470 	case OP_BGTZL:
1471 		rc = EINVAL;
1472 		break;
1473 	case OP_COP1:
1474 		if (inst.RType.rs == OP_BC)	/* oh the irony */
1475 			rc = EINVAL;
1476 		break;
1477 	}
1478 
1479 	if (rc != 0) {
1480 #ifdef DEBUG
1481 		printf("%s: bogus delay slot insn %08x\n", __func__, insn);
1482 #endif
1483 		return rc;
1484 	}
1485 
1486 	/*
1487 	 * Temporarily change protection over the page used to relocate
1488 	 * the delay slot, and fault it in.
1489 	 */
1490 
1491 	rc = uvm_map_protect(map, p->p_md.md_fppgva,
1492 	    p->p_md.md_fppgva + PAGE_SIZE, PROT_MASK, FALSE);
1493 	if (rc != 0) {
1494 #ifdef DEBUG
1495 		printf("%s: uvm_map_protect on %p failed: %d\n",
1496 		    __func__, (void *)p->p_md.md_fppgva, rc);
1497 #endif
1498 		return rc;
1499 	}
1500 	KERNEL_LOCK();
1501 	rc = uvm_fault_wire(map, p->p_md.md_fppgva,
1502 	    p->p_md.md_fppgva + PAGE_SIZE, PROT_MASK);
1503 	KERNEL_UNLOCK();
1504 	if (rc != 0) {
1505 #ifdef DEBUG
1506 		printf("%s: uvm_fault_wire on %p failed: %d\n",
1507 		    __func__, (void *)p->p_md.md_fppgva, rc);
1508 #endif
1509 		goto err2;
1510 	}
1511 
1512 	rc = copyout(&insn, (void *)p->p_md.md_fppgva, sizeof insn);
1513 	if (rc != 0) {
1514 #ifdef DEBUG
1515 		printf("%s: copyout %p failed %d\n",
1516 		    __func__, (void *)p->p_md.md_fppgva, rc);
1517 #endif
1518 		goto err;
1519 	}
1520 	insn = BREAK_FPUEMUL;
1521 	rc = copyout(&insn, (void *)(p->p_md.md_fppgva + 4), sizeof insn);
1522 	if (rc != 0) {
1523 #ifdef DEBUG
1524 		printf("%s: copyout %p failed %d\n",
1525 		    __func__, (void *)(p->p_md.md_fppgva + 4), rc);
1526 #endif
1527 		goto err;
1528 	}
1529 
1530 	(void)uvm_map_protect(map, p->p_md.md_fppgva,
1531 	    p->p_md.md_fppgva + PAGE_SIZE, PROT_READ | PROT_EXEC, FALSE);
1532 	p->p_md.md_fpbranchva = dest;
1533 	p->p_md.md_fpslotva = (vaddr_t)tf->pc + 4;
1534 	p->p_md.md_flags |= MDP_FPUSED;
1535 	tf->pc = p->p_md.md_fppgva;
1536 	pmap_proc_iflush(p, tf->pc, 2 * 4);
1537 
1538 	return 0;
1539 
1540 err:
1541 	KERNEL_LOCK();
1542 	uvm_fault_unwire(map, p->p_md.md_fppgva, p->p_md.md_fppgva + PAGE_SIZE);
1543 	KERNEL_UNLOCK();
1544 err2:
1545 	(void)uvm_map_protect(map, p->p_md.md_fppgva,
1546 	    p->p_md.md_fppgva + PAGE_SIZE, PROT_NONE, FALSE);
1547 	return rc;
1548 }
1549 #endif
1550