xref: /netbsd-src/sys/arch/mips/mips/trap.c (revision 26afd233bd356043a102a2b8cf86dbb0058cc8fa)
1 /*	$NetBSD: trap.c,v 1.265 2023/10/24 18:08:16 andvar Exp $	*/
2 
3 /*
4  * Copyright (c) 1988 University of Utah.
5  * Copyright (c) 1992, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department and Ralph Campbell.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * from: Utah Hdr: trap.c 1.32 91/04/06
37  *
38  *	@(#)trap.c	8.5 (Berkeley) 1/11/94
39  */
40 
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.265 2023/10/24 18:08:16 andvar Exp $");
43 
44 #include "opt_cputype.h"	/* which mips CPU levels do we support? */
45 #include "opt_ddb.h"
46 #include "opt_dtrace.h"
47 #include "opt_kgdb.h"
48 #include "opt_multiprocessor.h"
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/kernel.h>
53 #include <sys/cpu.h>
54 #include <sys/proc.h>
55 #include <sys/ras.h>
56 #include <sys/signalvar.h>
57 #include <sys/syscall.h>
58 #include <sys/buf.h>
59 #include <sys/ktrace.h>
60 #include <sys/kauth.h>
61 #include <sys/atomic.h>
62 
63 #include <mips/cache.h>
64 #include <mips/locore.h>
65 #include <mips/mips_opcode.h>
66 
67 #include <uvm/uvm.h>
68 
69 #include <mips/trap.h>
70 #include <mips/reg.h>
71 #include <mips/regnum.h>			/* symbolic register indices */
72 #include <mips/pcb.h>
73 #include <mips/pte.h>
74 #include <mips/psl.h>
75 #include <mips/userret.h>
76 
77 #ifdef DDB
78 #include <machine/db_machdep.h>
79 #include <ddb/db_sym.h>
80 #endif
81 
82 #ifdef KGDB
83 #include <sys/kgdb.h>
84 #endif
85 
86 #ifdef KDTRACE_HOOKS
87 #include <sys/dtrace_bsd.h>
88 
89 /* Not used for now, but needed for dtrace/fbt modules */
90 dtrace_doubletrap_func_t	dtrace_doubletrap_func = NULL;
91 dtrace_trap_func_t		dtrace_trap_func = NULL;
92 
93 int				(* dtrace_invop_jump_addr)(struct trapframe *);
94 #endif /* KDTRACE_HOOKS */
95 
96 const char * const trap_names[] = {
97 	"external interrupt",
98 	"TLB modification",
99 	"TLB miss (load or instr. fetch)",
100 	"TLB miss (store)",
101 	"address error (load or I-fetch)",
102 	"address error (store)",
103 	"bus error (I-fetch)",
104 	"bus error (load or store)",
105 	"system call",
106 	"breakpoint",
107 	"reserved instruction",
108 	"coprocessor unusable",
109 	"arithmetic overflow",
110 	"r4k trap/r3k reserved 13",
111 	"r4k virtual coherency instruction/r3k reserved 14",
112 	"r4k floating point/ r3k reserved 15",
113 	"mips NMI",
114 	"reserved 17",
115 	"mipsNN cp2 exception",
116 	"mipsNN TLBRI",
117 	"mipsNN TLBXI",
118 	"reserved 21",
119 	"mips64 MDMX",
120 	"r4k watch",
121 	"mipsNN machine check",
122 	"mipsNN thread",
123 	"DSP exception",
124 	"reserved 27",
125 	"reserved 28",
126 	"reserved 29",
127 	"mipsNN cache error",
128 	"r4000 virtual coherency data",
129 };
130 
131 void trap(uint32_t, uint32_t, vaddr_t, vaddr_t, struct trapframe *);
132 void ast(void);
133 
134 #ifdef TRAP_SIGDEBUG
135 static void sigdebug(const struct trapframe *, const ksiginfo_t *, int,
136     vaddr_t);
137 #define SIGDEBUG(a, b, c, d) sigdebug(a, b, c, d)
138 #else
139 #define SIGDEBUG(a, b, c, d)
140 #endif
141 
142 /*
143  * fork syscall returns directly to user process via lwp_trampoline(),
144  * which will be called the very first time when child gets running.
145  */
146 void
md_child_return(struct lwp * l)147 md_child_return(struct lwp *l)
148 {
149 	struct trapframe *utf = l->l_md.md_utf;
150 
151 	utf->tf_regs[_R_V0] = 0;
152 	utf->tf_regs[_R_V1] = 1;
153 	utf->tf_regs[_R_A3] = 0;
154 	userret(l);
155 }
156 
157 #ifdef MIPS3_PLUS
158 #define TRAPTYPE(x) (((x) & MIPS3_CR_EXC_CODE) >> MIPS_CR_EXC_CODE_SHIFT)
159 #else
160 #define TRAPTYPE(x) (((x) & MIPS1_CR_EXC_CODE) >> MIPS_CR_EXC_CODE_SHIFT)
161 #endif
162 #define KERNLAND_P(x) ((intptr_t)(x) < 0)
163 
164 /*
165  * Trap is called from locore to handle most types of processor traps.
166  * System calls are broken out for efficiency.  MIPS can handle software
167  * interrupts as a part of real interrupt processing.
168  */
169 void
trap(uint32_t status,uint32_t cause,vaddr_t vaddr,vaddr_t pc,struct trapframe * tf)170 trap(uint32_t status, uint32_t cause, vaddr_t vaddr, vaddr_t pc,
171     struct trapframe *tf)
172 {
173 	struct lwp * const l = curlwp;
174 	struct proc * const p = curproc;
175 	struct trapframe * const utf = l->l_md.md_utf;
176 	struct pcb * const pcb = lwp_getpcb(l);
177 	vm_prot_t ftype;
178 	ksiginfo_t ksi;
179 	extern void fswintrberr(void);
180 	void *onfault;
181 	InstFmt insn;
182 	uint32_t instr;
183 	int type;
184 	int rv = 0;
185 
186 	KSI_INIT_TRAP(&ksi);
187 
188 	curcpu()->ci_data.cpu_ntrap++;
189 	if (CPUISMIPS3 && (status & MIPS3_SR_NMI)) {
190 		type = T_NMI;
191 	} else {
192 		type = TRAPTYPE(cause);
193 	}
194 	if (USERMODE(status)) {
195 		tf = utf;
196 		type |= T_USER;
197 	}
198 
199 #ifdef KDTRACE_HOOKS
200 	/*
201 	 * A trap can occur while DTrace executes a probe. Before
202 	 * executing the probe, DTrace blocks re-scheduling and sets
203 	 * a flag in its per-cpu flags to indicate that it doesn't
204 	 * want to fault. On returning from the probe, the no-fault
205 	 * flag is cleared and finally re-scheduling is enabled.
206 	 *
207 	 * If the DTrace kernel module has registered a trap handler,
208 	 * call it and if it returns non-zero, assume that it has
209 	 * handled the trap and modified the trap frame so that this
210 	 * function can return normally.
211 	 */
212 	/*
213 	 * XXXDTRACE: add pid probe handler here (if ever)
214 	 */
215 	if (!USERMODE(status)) {
216 		if ((dtrace_trap_func != NULL) &&
217 		    ((*dtrace_trap_func)(tf, type) != 0)) {
218 			return;
219 		}
220 	}
221 #endif /* KDTRACE_HOOKS */
222 
223 	switch (type) {
224 	default:
225 	dopanic:
226 		(void)splhigh();
227 
228 		/*
229 		 * use snprintf to allow a single, idempotent, readable printf
230 		 */
231 		char strbuf[256], *str = strbuf;
232 		int n, sz = sizeof(strbuf);
233 
234 		n = snprintf(str, sz, "pid %d(%s): ", p->p_pid, p->p_comm);
235 		sz -= n;
236 		str += n;
237 		n = snprintf(str, sz, "trap: cpu%d, %s in %s mode\n",
238 			cpu_number(), trap_names[TRAPTYPE(cause)],
239 			USERMODE(status) ? "user" : "kernel");
240 		sz -= n;
241 		str += n;
242 		n = snprintf(str, sz, "status=%#x, cause=%#x, epc=%#"
243 			PRIxVADDR ", vaddr=%#" PRIxVADDR "\n",
244 			status, cause, pc, vaddr);
245 		sz -= n;
246 		str += n;
247 		if (USERMODE(status)) {
248 			KASSERT(tf == utf);
249 			n = snprintf(str, sz, "frame=%p usp=%#" PRIxREGISTER
250 			    " ra=%#" PRIxREGISTER "\n",
251 			    tf, tf->tf_regs[_R_SP], tf->tf_regs[_R_RA]);
252 			sz -= n;
253 			str += n;
254 		} else {
255 			n = snprintf(str, sz, "tf=%p ksp=%p ra=%#"
256 			    PRIxREGISTER " ppl=%#x\n", tf,
257 			    type == T_NMI
258 				? (void*)(uintptr_t)tf->tf_regs[_R_SP]
259 				: tf+1,
260 			    tf->tf_regs[_R_RA], tf->tf_ppl);
261 			sz -= n;
262 			str += n;
263 		}
264 		printf("%s", strbuf);
265 
266 		if (type == T_BUS_ERR_IFETCH || type == T_BUS_ERR_LD_ST)
267 			(void)(*mips_locoresw.lsw_bus_error)(cause);
268 
269 #if defined(DDB)
270 		kdb_trap(type, &tf->tf_registers);
271 		/* XXX force halt XXX */
272 #elif defined(KGDB)
273 		{
274 			extern mips_reg_t kgdb_cause, kgdb_vaddr;
275 			struct reg *regs = &ddb_regs;
276 			kgdb_cause = cause;
277 			kgdb_vaddr = vaddr;
278 
279 			/*
280 			 * init global ddb_regs, used in db_interface.c routines
281 			 * shared between ddb and gdb. Send ddb_regs to gdb so
282 			 * that db_machdep.h macros will work with it, and
283 			 * allow gdb to alter the PC.
284 			 */
285 			db_set_ddb_regs(type, &tf->tf_registers);
286 			PC_BREAK_ADVANCE(regs);
287 			if (kgdb_trap(type, regs)) {
288 				tf->tf_regs[_R_PC] = regs->r_regs[_R_PC];
289 				return;
290 			}
291 		}
292 #else
293 		panic("trap");
294 #endif
295 		/*NOTREACHED*/
296 	case T_TLB_MOD:
297 	case T_TLB_MOD+T_USER: {
298 		const bool user_p = (type & T_USER) || !KERNLAND_P(vaddr);
299 		pmap_t pmap = user_p
300 		    ? p->p_vmspace->vm_map.pmap
301 		    : pmap_kernel();
302 
303 		kpreempt_disable();
304 
305 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, vaddr);
306 		if (!ptep)
307 			panic("%ctlbmod: %#"PRIxVADDR": no pte",
308 			    user_p ? 'u' : 'k', vaddr);
309 		pt_entry_t pte = *ptep;
310 		if (!pte_valid_p(pte)) {
311 			panic("%ctlbmod: %#"PRIxVADDR": invalid pte %#"PRIx32
312 			    " @ ptep %p", user_p ? 'u' : 'k', vaddr,
313 			    pte_value(pte), ptep);
314 		}
315 		if (pte_readonly_p(pte)) {
316 			/* write to read only page */
317 			ftype = VM_PROT_WRITE;
318 			kpreempt_enable();
319 			if (user_p) {
320 				goto pagefault;
321 			} else {
322 				goto kernelfault;
323 			}
324 		}
325 		UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
326 		UVMHIST_LOG(maphist, "%ctlbmod(va=%#lx, pc=%#lx, tf=%#jx)",
327 		    user_p ? 'u' : 'k', vaddr, pc, (uintptr_t)tf);
328 		if (!pte_modified_p(pte)) {
329 			pte |= mips_pg_m_bit();
330 #ifdef MULTIPROCESSOR
331 			atomic_or_32(ptep, mips_pg_m_bit());
332 #else
333 			*ptep = pte;
334 #endif
335 		}
336 		// We got a TLB MOD exception so we must have a valid ASID
337 		// and there must be a matching entry in the TLB.  So when
338 		// we try to update it, we better have done it.
339 		KASSERTMSG(pte_valid_p(pte), "%#"PRIx32, pte_value(pte));
340 		vaddr = trunc_page(vaddr);
341 		int ok = pmap_tlb_update_addr(pmap, vaddr, pte, 0);
342 		kpreempt_enable();
343 		if (ok != 1) {
344 #if 0 /* PMAP_FAULTINFO? */
345 			/*
346 			 * Since we don't block interrupts here,
347 			 * this can legitimately happen if we get
348 			 * a TLB miss that's serviced in an interrupt
349 			 * handler that happens to randomly evict the
350 			 * TLB entry we're concerned about.
351 			 */
352 			printf("pmap_tlb_update_addr(%p,%#"
353 			    PRIxVADDR",%#"PRIxPTE", 0) returned %d\n",
354 			    pmap, vaddr, pte_value(pte), ok);
355 #endif
356 		}
357 		paddr_t pa = pte_to_paddr(pte);
358 		KASSERTMSG(uvm_pageismanaged(pa),
359 		    "%#"PRIxVADDR" pa %#"PRIxPADDR, vaddr, pa);
360 		pmap_set_modified(pa);
361 		if (type & T_USER)
362 			userret(l);
363 		UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
364 		return; /* GEN */
365 	}
366 	case T_TLB_LD_MISS:
367 	case T_TLB_ST_MISS:
368 		ftype = (type == T_TLB_LD_MISS) ? VM_PROT_READ : VM_PROT_WRITE;
369 		if (KERNLAND_P(vaddr))
370 			goto kernelfault;
371 		/*
372 		 * It is an error for the kernel to access user space except
373 		 * through the copyin/copyout routines.
374 		 */
375 		if (pcb->pcb_onfault == NULL) {
376 			goto dopanic;
377 		}
378 		goto pagefault;
379 	case T_TLB_LD_MISS+T_USER:
380 		ftype = VM_PROT_READ;
381 		goto pagefault;
382 	case T_TLB_ST_MISS+T_USER:
383 		ftype = VM_PROT_WRITE;
384 	pagefault: {
385 		const vaddr_t va = trunc_page(vaddr);
386 		struct vmspace * const vm = p->p_vmspace;
387 		struct vm_map * const map = &vm->vm_map;
388 #ifdef PMAP_FAULTINFO
389 		struct pcb_faultinfo * const pfi = &pcb->pcb_faultinfo;
390 #endif
391 
392 		kpreempt_disable();
393 #ifdef _LP64
394 		/*
395 		 * If the pmap has been activated and we allocated the segtab
396 		 * for the low 4GB, seg0tab may still be NULL.  We can't
397 		 * really fix this in pmap_enter (we can only update the local
398 		 * cpu's cpu_info but not other cpu's) so we need to detect
399 		 * and fix this here.
400 		 */
401 		struct cpu_info * const ci = curcpu();
402 		if ((va >> XSEGSHIFT) == 0 &&
403 		    __predict_false(ci->ci_pmap_user_seg0tab == NULL
404 				&& ci->ci_pmap_user_segtab->seg_seg[0] != NULL)) {
405 			ci->ci_pmap_user_seg0tab =
406 			    ci->ci_pmap_user_segtab->seg_seg[0];
407 			kpreempt_enable();
408 			if (type & T_USER) {
409 				userret(l);
410 			}
411 			return; /* GEN */
412 		}
413 #endif
414 		KASSERT(KERNLAND_P(va) || curcpu()->ci_pmap_asid_cur != 0);
415 		pmap_tlb_asid_check();
416 		kpreempt_enable();
417 
418 #ifdef PMAP_FAULTINFO
419 		if (p->p_pid == pfi->pfi_lastpid && va == pfi->pfi_faultaddr) {
420 			if (++pfi->pfi_repeats > 4) {
421 				tlb_asid_t asid = tlb_get_asid();
422 				pt_entry_t *ptep = pfi->pfi_faultptep;
423 				printf("trap: fault #%u (%s/%s) for %#"
424 				    PRIxVADDR" (%#"PRIxVADDR") at pc %#"
425 				    PRIxVADDR" curpid=%u/%u ptep@%p=%#"
426 				    PRIxPTE")\n", pfi->pfi_repeats,
427 				    trap_names[TRAPTYPE(cause)],
428 				    trap_names[pfi->pfi_faulttype], va,
429 				    vaddr, pc, map->pmap->pm_pai[0].pai_asid,
430 				    asid, ptep, ptep ? pte_value(*ptep) : 0);
431 				if (pfi->pfi_repeats >= 4) {
432 					cpu_Debugger();
433 				} else {
434 					pfi->pfi_faulttype = TRAPTYPE(cause);
435 				}
436 			}
437 		} else {
438 			pfi->pfi_lastpid = p->p_pid;
439 			pfi->pfi_faultaddr = va;
440 			pfi->pfi_repeats = 0;
441 			pfi->pfi_faultptep = NULL;
442 			pfi->pfi_faulttype = TRAPTYPE(cause);
443 		}
444 #endif /* PMAP_FAULTINFO */
445 
446 		onfault = pcb->pcb_onfault;
447 		pcb->pcb_onfault = NULL;
448 		rv = uvm_fault(map, va, ftype);
449 		pcb->pcb_onfault = onfault;
450 
451 #if defined(VMFAULT_TRACE)
452 		if (!KERNLAND_P(va))
453 			printf(
454 			    "uvm_fault(%p (pmap %p), %#"PRIxVADDR
455 			    " (%"PRIxVADDR"), %d) -> %d at pc %#"PRIxVADDR"\n",
456 			    map, vm->vm_map.pmap, va, vaddr, ftype, rv, pc);
457 #endif
458 		/*
459 		 * If this was a stack access we keep track of the maximum
460 		 * accessed stack size.  Also, if vm_fault gets a protection
461 		 * failure it is due to accessing the stack region outside
462 		 * the current limit and we need to reflect that as an access
463 		 * error.
464 		 */
465 		if ((void *)va >= vm->vm_maxsaddr) {
466 			if (rv == 0)
467 				uvm_grow(p, va);
468 			else if (rv == EACCES)
469 				rv = EFAULT;
470 		}
471 		if (rv == 0) {
472 #ifdef PMAP_FAULTINFO
473 			if (pfi->pfi_repeats == 0) {
474 				pfi->pfi_faultptep =
475 				    pmap_pte_lookup(map->pmap, va);
476 			}
477 			KASSERT(*(pt_entry_t *)pfi->pfi_faultptep);
478 #endif
479 			if (type & T_USER) {
480 				userret(l);
481 			}
482 			return; /* GEN */
483 		}
484 		if ((type & T_USER) == 0)
485 			goto copyfault;
486 
487 		KSI_INIT_TRAP(&ksi);
488 		switch (rv) {
489 		case EINVAL:
490 			ksi.ksi_signo = SIGBUS;
491 			ksi.ksi_code = BUS_ADRERR;
492 			break;
493 		case EACCES:
494 			ksi.ksi_signo = SIGSEGV;
495 			ksi.ksi_code = SEGV_ACCERR;
496 			break;
497 		case ENOMEM:
498 			ksi.ksi_signo = SIGKILL;
499 			printf("UVM: pid %d.%d (%s), uid %d killed: "
500 			    "out of swap\n", p->p_pid, l->l_lid, p->p_comm,
501 			    l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1);
502 			break;
503 		default:
504 			ksi.ksi_signo = SIGSEGV;
505 			ksi.ksi_code = SEGV_MAPERR;
506 			break;
507 		}
508 		ksi.ksi_trap = type & ~T_USER;
509 		ksi.ksi_addr = (void *)vaddr;
510 		break; /* SIGNAL */
511 	}
512 	kernelfault: {
513 		onfault = pcb->pcb_onfault;
514 
515 		pcb->pcb_onfault = NULL;
516 		rv = uvm_fault(kernel_map, trunc_page(vaddr), ftype);
517 		pcb->pcb_onfault = onfault;
518 		if (rv == 0)
519 			return; /* KERN */
520 		goto copyfault;
521 	}
522 	case T_ADDR_ERR_LD:	/* misaligned access */
523 	case T_ADDR_ERR_ST:	/* misaligned access */
524 	case T_BUS_ERR_LD_ST:	/* BERR asserted to CPU */
525 		onfault = pcb->pcb_onfault;
526 		rv = EFAULT;
527 	copyfault:
528 		if (onfault == NULL) {
529 			goto dopanic;
530 		}
531 		tf->tf_regs[_R_PC] = (intptr_t)onfault;
532 		tf->tf_regs[_R_V0] = rv;
533 		return; /* KERN */
534 
535 	case T_ADDR_ERR_LD+T_USER:	/* misaligned or kseg access */
536 	case T_ADDR_ERR_ST+T_USER:	/* misaligned or kseg access */
537 	case T_BUS_ERR_IFETCH+T_USER:	/* BERR asserted to CPU */
538 	case T_BUS_ERR_LD_ST+T_USER:	/* BERR asserted to CPU */
539 		ksi.ksi_trap = type & ~T_USER;
540 		ksi.ksi_addr = (void *)vaddr;
541 		if (KERNLAND_P(vaddr)) {
542 			ksi.ksi_signo = SIGSEGV;
543 			ksi.ksi_code = SEGV_MAPERR;
544 		} else {
545 			ksi.ksi_signo = SIGBUS;
546 			if (type == T_BUS_ERR_IFETCH+T_USER
547 			    || type == T_BUS_ERR_LD_ST+T_USER)
548 				ksi.ksi_code = BUS_OBJERR;
549 			else
550 				ksi.ksi_code = BUS_ADRALN;
551 		}
552 		break; /* SIGNAL */
553 
554 	case T_BREAK:
555 #ifdef KDTRACE_HOOKS
556 		if ((dtrace_invop_jump_addr != NULL) &&
557 		    (dtrace_invop_jump_addr(tf) == 0)) {
558 			return;
559 		}
560 #endif /* KDTRACE_HOOKS */
561 		/* FALLTHROUGH */
562 	case T_WATCH:
563 #if defined(DDB)
564 		kdb_trap(type, &tf->tf_registers);
565 		return;	/* KERN */
566 #elif defined(KGDB)
567 		{
568 			extern mips_reg_t kgdb_cause, kgdb_vaddr;
569 			struct reg *regs = &ddb_regs;
570 			kgdb_cause = cause;
571 			kgdb_vaddr = vaddr;
572 
573 			/*
574 			 * init global ddb_regs, used in db_interface.c routines
575 			 * shared between ddb and gdb. Send ddb_regs to gdb so
576 			 * that db_machdep.h macros will work with it, and
577 			 * allow gdb to alter the PC.
578 			 */
579 			db_set_ddb_regs(type, &tf->tf_registers);
580 			PC_BREAK_ADVANCE(regs);
581 			if (!kgdb_trap(type, regs))
582 				printf("kgdb: ignored %s\n",
583 				       trap_names[TRAPTYPE(cause)]);
584 			else
585 				tf->tf_regs[_R_PC] = regs->r_regs[_R_PC];
586 
587 			return;
588 		}
589 #else
590 		goto dopanic;
591 #endif
592 	case T_BREAK+T_USER: {
593 		/* compute address of break instruction */
594 		vaddr_t va = pc + (cause & MIPS_CR_BR_DELAY ? sizeof(int) : 0);
595 
596 		/* read break instruction */
597 		instr = mips_ufetch32((void *)va);
598 		insn.word = instr;
599 
600 		if (l->l_md.md_ss_addr != va || instr != MIPS_BREAK_SSTEP) {
601 			bool advance_pc = false;
602 
603 			ksi.ksi_trap = type & ~T_USER;
604 			ksi.ksi_signo = SIGTRAP;
605 			ksi.ksi_addr = (void *)va;
606 			ksi.ksi_code = TRAP_BRKPT;
607 
608 			if ((insn.JType.op == OP_SPECIAL) &&
609 			    (insn.RType.func == OP_BREAK)) {
610 				int code = (insn.RType.rs << 5) | insn.RType.rt;
611 				switch (code) {
612 				case 0:
613 					/* we broke, skip it to avoid infinite loop */
614 					advance_pc = true;
615 					break;
616 				case MIPS_BREAK_INTOVERFLOW:
617 					ksi.ksi_signo = SIGFPE;
618 					ksi.ksi_code = FPE_INTOVF;
619 					advance_pc = true;
620 					break;
621 				case MIPS_BREAK_INTDIVZERO:
622 					ksi.ksi_signo = SIGFPE;
623 					ksi.ksi_code = FPE_INTDIV;
624 					advance_pc = true;
625 					break;
626 				default:
627 					/* do nothing */
628 					break;
629 				}
630 			}
631 
632 			if (advance_pc)
633 				tf->tf_regs[_R_PC] += 4;
634 			break;
635 		}
636 		/*
637 		 * Restore original instruction and clear BP
638 		 */
639 		rv = mips_ustore32_isync((void *)va, l->l_md.md_ss_instr);
640 		if (rv != 0) {
641 			vaddr_t sa, ea;
642 			sa = trunc_page(va);
643 			ea = round_page(va + sizeof(int) - 1);
644 			rv = uvm_map_protect(&p->p_vmspace->vm_map,
645 				sa, ea, VM_PROT_ALL, false);
646 			if (rv == 0) {
647 				rv = mips_ustore32_isync((void *)va,
648 				    l->l_md.md_ss_instr);
649 				(void)uvm_map_protect(&p->p_vmspace->vm_map,
650 				sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, false);
651 			}
652 		}
653 		mips_icache_sync_all();		/* XXXJRT -- necessary? */
654 		mips_dcache_wbinv_all();	/* XXXJRT -- necessary? */
655 
656 		if (rv != 0)
657 			printf("Warning: can't restore instruction"
658 			    " at %#"PRIxVADDR": 0x%x\n",
659 			    l->l_md.md_ss_addr, l->l_md.md_ss_instr);
660 		l->l_md.md_ss_addr = 0;
661 		ksi.ksi_trap = type & ~T_USER;
662 		ksi.ksi_signo = SIGTRAP;
663 		ksi.ksi_addr = (void *)va;
664 		ksi.ksi_code = TRAP_TRACE;
665 		break; /* SIGNAL */
666 	}
667 	case T_DSP+T_USER:
668 #if (MIPS32R2 + MIPS64R2) > 0
669 		if (MIPS_HAS_DSP) {
670 			dsp_load();
671 			userret(l);
672 			return; /* GEN */
673 		}
674 #endif /* (MIPS32R3 + MIPS64R2) > 0 */
675 		/* FALLTHROUGH */
676 	case T_RES_INST+T_USER:
677 	case T_COP_UNUSABLE+T_USER:
678 #if !defined(FPEMUL) && !defined(NOFPU)
679 		if (__SHIFTOUT(cause, MIPS_CR_COP_ERR) == MIPS_CR_COP_ERR_CU1) {
680 			fpu_load();          	/* load FPA */
681 		} else
682 #endif
683 		{
684 			mips_emul_inst(status, cause, pc, utf);
685 		}
686 		userret(l);
687 		return; /* GEN */
688 	case T_FPE+T_USER:
689 #if defined(FPEMUL)
690 		mips_emul_inst(status, cause, pc, utf);
691 #elif !defined(NOFPU)
692 		utf->tf_regs[_R_CAUSE] = cause;
693 		mips_fpu_trap(pc, utf);
694 #endif
695 		userret(l);
696 		return; /* GEN */
697 	case T_OVFLOW+T_USER:
698 	case T_TRAP+T_USER: {
699 		/* compute address of trap/faulting instruction */
700 		vaddr_t va = pc + (cause & MIPS_CR_BR_DELAY ? sizeof(int) : 0);
701 		bool advance_pc = false;
702 
703 		/* read break instruction */
704 		instr = mips_ufetch32((void *)va);
705 		insn.word = instr;
706 
707 		ksi.ksi_trap = type & ~T_USER;
708 		ksi.ksi_signo = SIGFPE;
709 		ksi.ksi_addr = (void *)(intptr_t)pc /*utf->tf_regs[_R_PC]*/;
710 		ksi.ksi_code = FPE_FLTOVF; /* XXX */
711 
712 		if ((insn.JType.op == OP_SPECIAL) &&
713 		    (insn.RType.func == OP_TEQ)) {
714 			int code = (insn.RType.rd << 5) | insn.RType.shamt;
715 			switch (code) {
716 			case MIPS_BREAK_INTOVERFLOW:
717 				ksi.ksi_code = FPE_INTOVF;
718 				advance_pc = true;
719 				break;
720 			case MIPS_BREAK_INTDIVZERO:
721 				ksi.ksi_code = FPE_INTDIV;
722 				advance_pc = true;
723 				break;
724 			}
725 		}
726 
727 		/* XXX when else do we advance the PC? */
728 		if (advance_pc)
729 			tf->tf_regs[_R_PC] += 4;
730 		break; /* SIGNAL */
731 	 }
732 	}
733 	utf->tf_regs[_R_CAUSE] = cause;
734 	utf->tf_regs[_R_BADVADDR] = vaddr;
735 	SIGDEBUG(utf, &ksi, rv, pc);
736 	(*p->p_emul->e_trapsignal)(l, &ksi);
737 	if ((type & T_USER) == 0) {
738 #ifdef DDB
739 		Debugger();
740 #endif
741 		panic("trapsignal");
742 	}
743 	userret(l);
744 	return;
745 }
746 
747 /*
748  * Handle asynchronous software traps.
749  * This is called from MachUserIntr() either to deliver signals or
750  * to make involuntary context switch (preemption).
751  */
752 void
ast(void)753 ast(void)
754 {
755 	struct lwp * const l = curlwp;
756 	u_int astpending;
757 
758 	while ((astpending = l->l_md.md_astpending) != 0) {
759 		//curcpu()->ci_data.cpu_nast++;
760 		l->l_md.md_astpending = 0;
761 
762 #ifdef MULTIPROCESSOR
763 		{
764 			kpreempt_disable();
765 			struct cpu_info * const ci = l->l_cpu;
766 			if (ci->ci_tlb_info->ti_synci_page_bitmap != 0)
767 				pmap_tlb_syncicache_ast(ci);
768 			kpreempt_enable();
769 		}
770 #endif
771 
772 		if (l->l_pflag & LP_OWEUPC) {
773 			l->l_pflag &= ~LP_OWEUPC;
774 			ADDUPROF(l);
775 		}
776 
777 		userret(l);
778 
779 		if (l->l_cpu->ci_want_resched) {
780 			/*
781 			 * We are being preempted.
782 			 */
783 			preempt();
784 		}
785 	}
786 }
787 
788 
789 /* XXX need to rewrite ancient comment XXX
790  * This routine is called by procxmt() to single step one instruction.
791  * We do this by storing a break instruction after the current instruction,
792  * resuming execution, and then restoring the old instruction.
793  */
794 int
mips_singlestep(struct lwp * l)795 mips_singlestep(struct lwp *l)
796 {
797 	struct trapframe * const tf = l->l_md.md_utf;
798 	struct proc * const p = l->l_proc;
799 	vaddr_t pc, va;
800 	int rv;
801 
802 	if (l->l_md.md_ss_addr) {
803 		printf("SS %s (%d): breakpoint already set at %#"PRIxVADDR"\n",
804 			p->p_comm, p->p_pid, l->l_md.md_ss_addr);
805 		return EFAULT;
806 	}
807 	pc = (vaddr_t)tf->tf_regs[_R_PC];
808 	if (mips_ufetch32((void *)pc) != 0) { /* not a NOP instruction */
809 		struct pcb * const pcb = lwp_getpcb(l);
810 		va = mips_emul_branch(tf, pc, PCB_FSR(pcb), true);
811 	} else {
812 		va = pc + sizeof(int);
813 	}
814 
815 	/*
816 	 * We can't single-step into a RAS.  Check if we're in
817 	 * a RAS, and set the breakpoint just past it.
818 	 */
819 	if (p->p_raslist != NULL) {
820 		while (ras_lookup(p, (void *)va) != (void *)-1)
821 			va += sizeof(int);
822 	}
823 
824 	l->l_md.md_ss_addr = va;
825 	l->l_md.md_ss_instr = mips_ufetch32((void *)va);
826 	rv = mips_ustore32_isync((void *)va, MIPS_BREAK_SSTEP);
827 	if (rv != 0) {
828 		vaddr_t sa, ea;
829 		sa = trunc_page(va);
830 		ea = round_page(va + sizeof(int) - 1);
831 		rv = uvm_map_protect(&p->p_vmspace->vm_map,
832 		    sa, ea, VM_PROT_ALL, false);
833 		if (rv == 0) {
834 			rv = mips_ustore32_isync((void *)va,
835 			    MIPS_BREAK_SSTEP);
836 			(void)uvm_map_protect(&p->p_vmspace->vm_map,
837 			    sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, false);
838 		}
839 	}
840 #if 0
841 	printf("SS %s (%d): breakpoint set at %x: %x (pc %x) br %x\n",
842 		p->p_comm, p->p_pid, p->p_md.md_ss_addr,
843 		p->p_md.md_ss_instr, pc, mips_ufetch32((void *)va)); /* XXX */
844 #endif
845 	return 0;
846 }
847 
848 #ifdef TRAP_SIGDEBUG
849 static void
frame_dump(const struct trapframe * tf,struct pcb * pcb)850 frame_dump(const struct trapframe *tf, struct pcb *pcb)
851 {
852 
853 	printf("trapframe %p\n", tf);
854 	printf("ast %#018lx   v0 %#018lx   v1 %#018lx\n",
855 	    tf->tf_regs[_R_AST], tf->tf_regs[_R_V0], tf->tf_regs[_R_V1]);
856 	printf(" a0 %#018lx   a1 %#018lx   a2 %#018lx\n",
857 	    tf->tf_regs[_R_A0], tf->tf_regs[_R_A1], tf->tf_regs[_R_A2]);
858 #if defined(__mips_n32) || defined(__mips_n64)
859 	printf(" a3 %#018lx   a4  %#018lx  a5  %#018lx\n",
860 	    tf->tf_regs[_R_A3], tf->tf_regs[_R_A4], tf->tf_regs[_R_A5]);
861 	printf(" a6 %#018lx   a7  %#018lx  t0  %#018lx\n",
862 	    tf->tf_regs[_R_A6], tf->tf_regs[_R_A7], tf->tf_regs[_R_T0]);
863 	printf(" t1 %#018lx   t2  %#018lx  t3  %#018lx\n",
864 	    tf->tf_regs[_R_T1], tf->tf_regs[_R_T2], tf->tf_regs[_R_T3]);
865 #else
866 	printf(" a3 %#018lx   t0  %#018lx  t1  %#018lx\n",
867 	    tf->tf_regs[_R_A3], tf->tf_regs[_R_T0], tf->tf_regs[_R_T1]);
868 	printf(" t2 %#018lx   t3  %#018lx  t4  %#018lx\n",
869 	    tf->tf_regs[_R_T2], tf->tf_regs[_R_T3], tf->tf_regs[_R_T4]);
870 	printf(" t5 %#018lx   t6  %#018lx  t7  %#018lx\n",
871 	    tf->tf_regs[_R_T5], tf->tf_regs[_R_T6], tf->tf_regs[_R_T7]);
872 #endif
873 	printf(" s0 %#018lx   s1  %#018lx  s2  %#018lx\n",
874 	    tf->tf_regs[_R_S0], tf->tf_regs[_R_S1], tf->tf_regs[_R_S2]);
875 	printf(" s3 %#018lx   s4  %#018lx  s5  %#018lx\n",
876 	    tf->tf_regs[_R_S3], tf->tf_regs[_R_S4], tf->tf_regs[_R_S5]);
877 	printf(" s6 %#018lx   s7  %#018lx  t8  %#018lx\n",
878 	    tf->tf_regs[_R_S6], tf->tf_regs[_R_S7], tf->tf_regs[_R_T8]);
879 	printf(" t9 %#018lx   k0  %#018lx  k1  %#018lx\n",
880 	    tf->tf_regs[_R_T9], tf->tf_regs[_R_K0], tf->tf_regs[_R_K1]);
881 	printf(" gp %#018lx   sp  %#018lx  s8  %#018lx\n",
882 	    tf->tf_regs[_R_GP], tf->tf_regs[_R_SP], tf->tf_regs[_R_S8]);
883 	printf(" ra %#018lx   sr  %#018lx  pc  %#018lx\n",
884 	    tf->tf_regs[_R_RA], tf->tf_regs[_R_SR], tf->tf_regs[_R_PC]);
885 	printf(" mullo     %#018lx mulhi %#018lx\n",
886 	    tf->tf_regs[_R_MULLO], tf->tf_regs[_R_MULHI]);
887 	printf(" badvaddr  %#018lx cause %#018lx\n",
888 	    tf->tf_regs[_R_BADVADDR], tf->tf_regs[_R_CAUSE]);
889 	printf("\n");
890 	hexdump(printf, "Stack dump", tf, 256);
891 }
892 
893 static void
sigdebug(const struct trapframe * tf,const ksiginfo_t * ksi,int e,vaddr_t pc)894 sigdebug(const struct trapframe *tf, const ksiginfo_t *ksi, int e,
895     vaddr_t pc)
896 {
897 	struct lwp *l = curlwp;
898 	struct proc *p = l->l_proc;
899 
900 	printf("pid %d.%d (%s): signal %d code=%d (trap %#lx) "
901 	    "@pc %#lx addr %#lx error=%d\n",
902 	    p->p_pid, l->l_lid, p->p_comm, ksi->ksi_signo, ksi->ksi_code,
903 	    tf->tf_regs[_R_CAUSE], (unsigned long)pc, tf->tf_regs[_R_BADVADDR],
904 	    e);
905 	frame_dump(tf, lwp_getpcb(l));
906 }
907 #endif /* TRAP_SIGDEBUG */
908