xref: /netbsd-src/sys/arch/powerpc/booke/trap.c (revision a5847cc334d9a7029f6352b847e9e8d71a0f9e0c)
1 /*	$NetBSD: trap.c,v 1.13 2011/09/27 01:02:35 jym Exp $	*/
2 /*-
3  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to The NetBSD Foundation
7  * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
8  * Agency and which was developed by Matt Thomas of 3am Software Foundry.
9  *
10  * This material is based upon work supported by the Defense Advanced Research
11  * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
12  * Contract No. N66001-09-C-2073.
13  * Approved for Public Release, Distribution Unlimited
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include "opt_ddb.h"
38 #include "opt_sa.h"
39 
40 #include <sys/cdefs.h>
41 
42 __KERNEL_RCSID(1, "$NetBSD: trap.c,v 1.13 2011/09/27 01:02:35 jym Exp $");
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/siginfo.h>
47 #include <sys/lwp.h>
48 #include <sys/proc.h>
49 #include <sys/cpu.h>
50 #ifdef KERN_SA
51 #include <sys/savar.h>
52 #endif
53 #include <sys/kauth.h>
54 #include <sys/ras.h>
55 
56 #include <uvm/uvm_extern.h>
57 
58 #include <powerpc/pcb.h>
59 #include <powerpc/userret.h>
60 #include <powerpc/psl.h>
61 #include <powerpc/instr.h>
62 #include <powerpc/altivec.h>		/* use same interface for SPE */
63 
64 #include <powerpc/spr.h>
65 #include <powerpc/booke/spr.h>
66 #include <powerpc/booke/cpuvar.h>
67 
68 #include <powerpc/db_machdep.h>
69 #include <ddb/db_interface.h>
70 
71 #include <powerpc/trap.h>
72 #include <powerpc/booke/trap.h>
73 #include <powerpc/booke/pte.h>
74 
75 void trap(enum ppc_booke_exceptions, struct trapframe *);
76 static void dump_trapframe(const struct trapframe *);
77 
78 static const char trap_names[][8] = {
79 	[T_CRITIAL_INPUT] = "CRIT",
80 	[T_EXTERNAL_INPUT] = "EXT",
81 	[T_DECREMENTER] = "DECR",
82 	[T_FIXED_INTERVAL] = "FIT",
83 	[T_WATCHDOG] = "WDOG",
84 	[T_SYSTEM_CALL] = "SC",
85 	[T_MACHINE_CHECK] = "MCHK",
86 	[T_DSI] = "DSI",
87 	[T_ISI] = "ISI",
88 	[T_ALIGNMENT] = "ALN",
89 	[T_PROGRAM] = "PGM",
90 	[T_FP_UNAVAILABLE] = "FP",
91 	[T_AP_UNAVAILABLE] = "AP",
92 	[T_DATA_TLB_ERROR] = "DTLB",
93 	[T_INSTRUCTION_TLB_ERROR] = "ITLB",
94 	[T_DEBUG] = "DEBUG",
95 	[T_SPE_UNAVAILABLE] = "SPE",
96 	[T_EMBEDDED_FP_DATA] = "FPDATA",
97 	[T_EMBEDDED_FP_ROUND] = "FPROUND",
98 	[T_EMBEDDED_PERF_MONITOR] = "PERFMON",
99 	[T_AST] = "AST",
100 };
101 
102 static inline bool
103 usertrap_p(struct trapframe *tf)
104 {
105 	return (tf->tf_srr1 & PSL_PR) != 0;
106 }
107 
108 static int
109 mchk_exception(struct trapframe *tf, ksiginfo_t *ksi)
110 {
111 	const bool usertrap = usertrap_p(tf);
112 	const vaddr_t faultva = tf->tf_mcar;
113 	struct cpu_info * const ci = curcpu();
114 	int rv = EFAULT;
115 
116 	if (usertrap)
117 		ci->ci_ev_umchk.ev_count++;
118 
119 	if (rv != 0 && usertrap) {
120 		KSI_INIT_TRAP(ksi);
121 		ksi->ksi_signo = SIGSEGV;
122 		ksi->ksi_trap = EXC_DSI;
123 		ksi->ksi_code = SEGV_ACCERR;
124 		ksi->ksi_addr = (void *)faultva;
125 	}
126 
127 	return rv;
128 }
129 
130 static inline vm_prot_t
131 get_faulttype(const struct trapframe * const tf)
132 {
133 	return VM_PROT_READ | (tf->tf_esr & ESR_ST ? VM_PROT_WRITE : 0);
134 }
135 
136 static inline struct vm_map *
137 get_faultmap(const struct trapframe * const tf, register_t psl_mask)
138 {
139 	return (tf->tf_srr1 & psl_mask)
140 	    ? &curlwp->l_proc->p_vmspace->vm_map
141 	    : kernel_map;
142 }
143 
144 /*
145  * We could use pmap_pte_lookip but this slightly faster since we already
146  * the segtab pointers in cpu_info.
147  */
148 static inline pt_entry_t *
149 trap_pte_lookup(struct trapframe *tf, vaddr_t va, register_t psl_mask)
150 {
151 	struct pmap_segtab ** const stps = &curcpu()->ci_pmap_kern_segtab;
152 	struct pmap_segtab * const stp = stps[(tf->tf_srr1 / psl_mask) & 1];
153 	if (__predict_false(stp == NULL))
154 		return NULL;
155 	pt_entry_t *ptep = stp->seg_tab[va >> SEGSHIFT];
156 	if (__predict_false(ptep == NULL))
157 		return NULL;
158 	return ptep + ((va & SEGOFSET) >> PAGE_SHIFT);
159 }
160 
161 static int
162 pagefault(struct vm_map *map, vaddr_t va, vm_prot_t ftype, bool usertrap)
163 {
164 	struct lwp * const l = curlwp;
165 	int rv;
166 
167 //	printf("%s(%p,%#lx,%u,%u)\n", __func__, map, va, ftype, usertrap);
168 
169 	if (usertrap) {
170 #ifdef KERN_SA
171 		if (l->l_flag & LW_SA) {
172 			l->l_savp->savp_faultaddr = va;
173 			l->l_pflag |= LP_SA_PAGEFAULT;
174 		}
175 #endif
176 		rv = uvm_fault(map, trunc_page(va), ftype);
177 		if (rv == 0)
178 			uvm_grow(l->l_proc, trunc_page(va));
179 		if (rv == EACCES)
180 			rv = EFAULT;
181 #ifdef KERN_SA
182 		l->l_pflag &= ~LP_SA_PAGEFAULT;
183 #endif
184 	} else {
185 		if (cpu_intr_p())
186 			return EFAULT;
187 
188 		struct pcb * const pcb = lwp_getpcb(l);
189 		struct faultbuf * const fb = pcb->pcb_onfault;
190 		pcb->pcb_onfault = NULL;
191 		rv = uvm_fault(map, trunc_page(va), ftype);
192 		pcb->pcb_onfault = fb;
193 		if (map != kernel_map) {
194 			if (rv == 0)
195 				uvm_grow(l->l_proc, trunc_page(va));
196 #ifdef KERN_SA
197 			l->l_pflag &= ~LP_SA_PAGEFAULT;
198 #endif
199 		}
200 		if (rv == EACCES)
201 			rv = EFAULT;
202 	}
203 	return rv;
204 }
205 
206 static int
207 dsi_exception(struct trapframe *tf, ksiginfo_t *ksi)
208 {
209 	const vaddr_t faultva = tf->tf_dear;
210 	const vm_prot_t ftype = get_faulttype(tf);
211 	struct vm_map * const faultmap = get_faultmap(tf, PSL_DS);
212 	const bool usertrap = usertrap_p(tf);
213 
214 	kpreempt_disable();
215 	struct cpu_info * const ci = curcpu();
216 
217 	if (usertrap)
218 		ci->ci_ev_udsi.ev_count++;
219 	else
220 		ci->ci_ev_kdsi.ev_count++;
221 
222 	/*
223 	 * If we had a TLB entry (which we must have had to get this exception),
224 	 * we certainly have a PTE.
225 	 */
226 	pt_entry_t * const ptep = trap_pte_lookup(tf, trunc_page(faultva),
227 	    PSL_DS);
228 	KASSERT(ptep != NULL);
229 	pt_entry_t pte = *ptep;
230 
231 	if ((ftype & VM_PROT_WRITE)
232 	    && ((pte & (PTE_xW|PTE_UNMODIFIED)) == (PTE_xW|PTE_UNMODIFIED))) {
233 		const paddr_t pa = pte_to_paddr(pte);
234 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
235 		KASSERT(pg);
236 		struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
237 
238 		if (!VM_PAGEMD_MODIFIED_P(mdpg)) {
239 			pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED);
240 		}
241 		pte &= ~PTE_UNMODIFIED;
242 		*ptep = pte;
243 		pmap_tlb_update_addr(faultmap->pmap, trunc_page(faultva),
244 		    pte, 0);
245 		kpreempt_enable();
246 		return 0;
247 	}
248 	kpreempt_enable();
249 
250 	int rv = pagefault(faultmap, faultva, ftype, usertrap);
251 
252 	/*
253 	 * We can't get a MAPERR here since that's a different exception.
254 	 */
255 	if (__predict_false(rv != 0 && usertrap)) {
256 		ci->ci_ev_udsi_fatal.ev_count++;
257 		KSI_INIT_TRAP(ksi);
258 		ksi->ksi_signo = SIGSEGV;
259 		ksi->ksi_trap = EXC_DSI;
260 		ksi->ksi_code = SEGV_ACCERR;
261 		ksi->ksi_addr = (void *)faultva;
262 	}
263 	return rv;
264 }
265 
266 static int
267 isi_exception(struct trapframe *tf, ksiginfo_t *ksi)
268 {
269 	const vaddr_t faultva = trunc_page(tf->tf_srr0);
270 	struct vm_map * const faultmap = get_faultmap(tf, PSL_IS);
271 	const bool usertrap = usertrap_p(tf);
272 
273 	kpreempt_disable();
274 	struct cpu_info * const ci = curcpu();
275 
276 	if (usertrap)
277 		ci->ci_ev_isi.ev_count++;
278 	else
279 		ci->ci_ev_kisi.ev_count++;
280 
281 	/*
282 	 * If we had a TLB entry (which we must have had to get this exception),
283 	 * we certainly have a PTE.
284 	 */
285 	pt_entry_t * const ptep = trap_pte_lookup(tf, trunc_page(faultva),
286 	    PSL_IS);
287 	if (ptep == NULL)
288 		dump_trapframe(tf);
289 	KASSERT(ptep != NULL);
290 	pt_entry_t pte = *ptep;
291 
292 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmapexechist);
293 
294 	if ((pte & PTE_UNSYNCED) == PTE_UNSYNCED) {
295 		const paddr_t pa = pte_to_paddr(pte);
296 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
297 		KASSERT(pg);
298 		struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
299 
300 		UVMHIST_LOG(pmapexechist,
301 		    "srr0=%#x pg=%p (pa %#"PRIxPADDR"): %s",
302 		    tf->tf_srr0, pg, pa,
303 		    (VM_PAGEMD_EXECPAGE_P(mdpg)
304 			? "no syncicache (already execpage)"
305 			: "performed syncicache (now execpage)"));
306 
307 		if (!VM_PAGEMD_EXECPAGE_P(mdpg)) {
308 			ci->ci_softc->cpu_ev_exec_trap_sync.ev_count++;
309 			dcache_wb_page(pa);
310 			icache_inv_page(pa);
311 			pmap_page_set_attributes(mdpg, VM_PAGEMD_EXECPAGE);
312 		}
313 		pte &= ~PTE_UNSYNCED;
314 		pte |= PTE_xX;
315 		*ptep = pte;
316 
317 		pmap_tlb_update_addr(faultmap->pmap, trunc_page(faultva),
318 		    pte, 0);
319 		kpreempt_enable();
320 		UVMHIST_LOG(pmapexechist, "<- 0", 0,0,0,0);
321 		return 0;
322 	}
323 	kpreempt_enable();
324 
325 	int rv = pagefault(faultmap, faultva, VM_PROT_READ|VM_PROT_EXECUTE,
326 	    usertrap);
327 
328 	if (__predict_false(rv != 0 && usertrap)) {
329 		/*
330 		 * We can't get a MAPERR here since
331 		 * that's a different exception.
332 		 */
333 		ci->ci_ev_isi_fatal.ev_count++;
334 		KSI_INIT_TRAP(ksi);
335 		ksi->ksi_signo = SIGSEGV;
336 		ksi->ksi_trap = EXC_ISI;
337 		ksi->ksi_code = SEGV_ACCERR;
338 		ksi->ksi_addr = (void *)tf->tf_srr0; /* not truncated */
339 	}
340 	UVMHIST_LOG(pmapexechist, "<- %d", rv, 0,0,0);
341 	return rv;
342 }
343 
344 static int
345 dtlb_exception(struct trapframe *tf, ksiginfo_t *ksi)
346 {
347 	const vaddr_t faultva = tf->tf_dear;
348 	const vm_prot_t ftype = get_faulttype(tf);
349 	struct vm_map * const faultmap = get_faultmap(tf, PSL_DS);
350 	struct cpu_info * const ci = curcpu();
351 	const bool usertrap = usertrap_p(tf);
352 
353 #if 0
354 	/*
355 	 * This is what pte_load in trap_subr.S does for us.
356 	 */
357 	const pt_entry_t * const ptep =
358 	    trap_pte_lookup(tf, trunc_page(faultva), PSL_DS);
359 	if (ptep != NULL && !usertrap && pte_valid_p(*ptep)) {
360 		tlb_update_addr(trunc_page(faultva), KERNEL_PID, *ptep, true);
361 		ci->ci_ev_tlbmiss_soft.ev_count++;
362 		return 0;
363 	}
364 #endif
365 
366 	ci->ci_ev_dtlbmiss_hard.ev_count++;
367 
368 //	printf("pagefault(%p,%#lx,%u,%u)", faultmap, faultva, ftype, usertrap);
369 	int rv = pagefault(faultmap, faultva, ftype, usertrap);
370 //	printf(": %d\n", rv);
371 
372 	if (__predict_false(rv != 0 && usertrap)) {
373 		ci->ci_ev_udsi_fatal.ev_count++;
374 		KSI_INIT_TRAP(ksi);
375 		ksi->ksi_signo = SIGSEGV;
376 		ksi->ksi_trap = EXC_DSI;
377 		ksi->ksi_code = (rv == EACCES ? SEGV_ACCERR : SEGV_MAPERR);
378 		ksi->ksi_addr = (void *)faultva;
379 	}
380 	return rv;
381 }
382 
383 static int
384 itlb_exception(struct trapframe *tf, ksiginfo_t *ksi)
385 {
386 	struct vm_map * const faultmap = get_faultmap(tf, PSL_IS);
387 	const vaddr_t faultva = tf->tf_srr0;
388 	struct cpu_info * const ci = curcpu();
389 	const bool usertrap = usertrap_p(tf);
390 
391 	ci->ci_ev_itlbmiss_hard.ev_count++;
392 
393 	int rv = pagefault(faultmap, faultva, VM_PROT_READ|VM_PROT_EXECUTE,
394 	    usertrap);
395 
396 	if (__predict_false(rv != 0 && usertrap)) {
397 		ci->ci_ev_isi_fatal.ev_count++;
398 		KSI_INIT_TRAP(ksi);
399 		ksi->ksi_signo = SIGSEGV;
400 		ksi->ksi_trap = EXC_ISI;
401 		ksi->ksi_code = (rv == EACCES ? SEGV_ACCERR : SEGV_MAPERR);
402 		ksi->ksi_addr = (void *)tf->tf_srr0;
403 	}
404 	return rv;
405 }
406 
407 static int
408 spe_exception(struct trapframe *tf, ksiginfo_t *ksi)
409 {
410 	struct cpu_info * const ci = curcpu();
411 
412 	if (!usertrap_p(tf))
413 		return EPERM;
414 
415 	ci->ci_ev_vec.ev_count++;
416 
417 #ifdef PPC_HAVE_SPE
418 	vec_load();
419 	return 0;
420 #else
421 	KSI_INIT_TRAP(ksi);
422 	ksi->ksi_signo = SIGILL;
423 	ksi->ksi_trap = EXC_PGM;
424 	ksi->ksi_code = ILL_ILLOPC;
425 	ksi->ksi_addr = (void *)tf->tf_srr0;
426 	return EPERM;
427 #endif
428 }
429 
430 static bool
431 emulate_opcode(struct trapframe *tf, ksiginfo_t *ksi)
432 {
433 	uint32_t opcode;
434         if (copyin((void *)tf->tf_srr0, &opcode, sizeof(opcode)) != 0)
435 		return false;
436 
437 	if (opcode == OPC_LWSYNC)
438 		return true;
439 
440 	if (OPC_MFSPR_P(opcode, SPR_PVR)) {
441 		__asm ("mfpvr %0" : "=r"(tf->tf_fixreg[OPC_MFSPR_REG(opcode)]));
442 		return true;
443 	}
444 
445 	/*
446 	 * If we bothered to emulate FP, we would try to do so here.
447 	 */
448 	return false;
449 }
450 
451 static int
452 pgm_exception(struct trapframe *tf, ksiginfo_t *ksi)
453 {
454 	struct cpu_info * const ci = curcpu();
455 	int rv = EPERM;
456 
457 	if (!usertrap_p(tf))
458 		return rv;
459 
460 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmapexechist);
461 
462 	UVMHIST_LOG(pmapexechist, " srr0/1=%#x/%#x esr=%#x pte=%#x",
463 	    tf->tf_srr0, tf->tf_srr1, tf->tf_esr,
464 	    *trap_pte_lookup(tf, trunc_page(tf->tf_srr0), PSL_IS));
465 
466 	ci->ci_ev_pgm.ev_count++;
467 
468 	if (tf->tf_esr & ESR_PTR) {
469 		struct proc *p = curlwp->l_proc;
470 		if (p->p_raslist != NULL
471 		    && ras_lookup(p, (void *)tf->tf_srr0) != (void *) -1) {
472 			tf->tf_srr0 += 4;
473 			return 0;
474 		}
475 	}
476 
477 	if (tf->tf_esr & (ESR_PIL|ESR_PPR)) {
478 		if (emulate_opcode(tf, ksi)) {
479 			tf->tf_srr0 += 4;
480 			return 0;
481 		}
482 	}
483 
484 	KSI_INIT_TRAP(ksi);
485 	ksi->ksi_signo = SIGILL;
486 	ksi->ksi_trap = EXC_PGM;
487 	if (tf->tf_esr & ESR_PIL) {
488 		ksi->ksi_code = ILL_ILLOPC;
489 	} else if (tf->tf_esr & ESR_PPR) {
490 		ksi->ksi_code = ILL_PRVOPC;
491 	} else if (tf->tf_esr & ESR_PTR) {
492 		ksi->ksi_signo = SIGTRAP;
493 		ksi->ksi_code = TRAP_BRKPT;
494 	} else {
495 		ksi->ksi_code = 0;
496 	}
497 	ksi->ksi_addr = (void *)tf->tf_srr0;
498 	return rv;
499 }
500 
501 static int
502 debug_exception(struct trapframe *tf, ksiginfo_t *ksi)
503 {
504 	struct cpu_info * const ci = curcpu();
505 	int rv = EPERM;
506 
507 	if (!usertrap_p(tf))
508 		return rv;
509 
510 	ci->ci_ev_debug.ev_count++;
511 
512 	/*
513 	 * Ack the interrupt.
514 	 */
515 	mtspr(SPR_DBSR, tf->tf_esr);
516 	KASSERT(tf->tf_esr & (DBSR_IAC1|DBSR_IAC2));
517 	KASSERT((tf->tf_srr1 & PSL_SE) == 0);
518 
519 	/*
520 	 * Disable debug events
521 	 */
522 	mtspr(SPR_DBCR1, 0);
523 	mtspr(SPR_DBCR0, 0);
524 
525 	/*
526 	 * Tell the debugger ...
527 	 */
528 	KSI_INIT_TRAP(ksi);
529 	ksi->ksi_signo = SIGTRAP;
530 	ksi->ksi_trap = EXC_TRC;
531 	ksi->ksi_addr = (void *)tf->tf_srr0;
532 	ksi->ksi_code = TRAP_TRACE;
533 	return rv;
534 }
535 
536 static int
537 ali_exception(struct trapframe *tf, ksiginfo_t *ksi)
538 {
539 	struct cpu_info * const ci = curcpu();
540 	int rv = EFAULT;
541 
542 	ci->ci_ev_ali.ev_count++;
543 
544 	if (rv != 0 && usertrap_p(tf)) {
545 		ci->ci_ev_ali_fatal.ev_count++;
546 		KSI_INIT_TRAP(ksi);
547 		ksi->ksi_signo = SIGILL;
548 		ksi->ksi_trap = EXC_PGM;
549 		if (tf->tf_esr & ESR_PIL)
550 			ksi->ksi_code = ILL_ILLOPC;
551 		else if (tf->tf_esr & ESR_PPR)
552 			ksi->ksi_code = ILL_PRVOPC;
553 		else if (tf->tf_esr & ESR_PTR)
554 			ksi->ksi_code = ILL_ILLTRP;
555 		else
556 			ksi->ksi_code = 0;
557 		ksi->ksi_addr = (void *)tf->tf_srr0;
558 	}
559 	return rv;
560 }
561 
562 static int
563 embedded_fp_data_exception(struct trapframe *tf, ksiginfo_t *ksi)
564 {
565 	struct cpu_info * const ci = curcpu();
566 	int rv = EFAULT;
567 
568 	ci->ci_ev_fpu.ev_count++;
569 
570 	if (rv != 0 && usertrap_p(tf)) {
571 		KSI_INIT_TRAP(ksi);
572 #ifdef PPC_HAVE_SPE
573 		ksi->ksi_signo = SIGFPE;
574 		ksi->ksi_trap = tf->tf_exc;
575 		ksi->ksi_code = vec_siginfo_code(tf);
576 #else
577 		ksi->ksi_signo = SIGILL;
578 		ksi->ksi_trap = EXC_PGM;
579 		ksi->ksi_code = ILL_ILLOPC;
580 #endif
581 		ksi->ksi_addr = (void *)tf->tf_srr0;
582 	}
583 	return rv;
584 }
585 
586 static int
587 embedded_fp_round_exception(struct trapframe *tf, ksiginfo_t *ksi)
588 {
589 	struct cpu_info * const ci = curcpu();
590 	int rv = EDOM;
591 
592 	ci->ci_ev_fpu.ev_count++;
593 
594 	if (rv != 0 && usertrap_p(tf)) {
595 		KSI_INIT_TRAP(ksi);
596 #ifdef PPC_HAVE_SPE
597 		ksi->ksi_signo = SIGFPE;
598 		ksi->ksi_trap = tf->tf_exc;
599 		ksi->ksi_code = vec_siginfo_code(tf);
600 #else
601 		ksi->ksi_signo = SIGILL;
602 		ksi->ksi_trap = EXC_PGM;
603 		ksi->ksi_code = ILL_ILLOPC;
604 #endif
605 		ksi->ksi_addr = (void *)tf->tf_srr0;
606 	}
607 	return rv;
608 }
609 
610 static void
611 dump_trapframe(const struct trapframe *tf)
612 {
613 	printf("trapframe %p (exc=%x srr0/1=%#lx/%#lx esr/dear=%#x/%#lx)\n",
614 	    tf, tf->tf_exc, tf->tf_srr0, tf->tf_srr1, tf->tf_esr, tf->tf_dear);
615 	printf("lr =%08lx ctr=%08lx cr =%08x xer=%08x\n",
616 	    tf->tf_lr, tf->tf_ctr, tf->tf_cr, tf->tf_xer);
617 	for (u_int r = 0; r < 32; r += 4) {
618 		printf("r%02u=%08lx r%02u=%08lx r%02u=%08lx r%02u=%08lx\n",
619 		    r+0, tf->tf_fixreg[r+0], r+1, tf->tf_fixreg[r+1],
620 		    r+2, tf->tf_fixreg[r+2], r+3, tf->tf_fixreg[r+3]);
621 	}
622 }
623 static bool
624 ddb_exception(struct trapframe *tf)
625 {
626 #if 0
627 	const register_t ddb_trapfunc = (uintptr_t) cpu_Debugger;
628 	if ((tf->tf_esr & ESR_PTR) == 0)
629 		return false;
630 	if (ddb_trapfunc <= tf->tf_srr0 && tf->tf_srr0 <= ddb_trapfunc+16) {
631 		register_t srr0 = tf->tf_srr0;
632 		if (kdb_trap(tf->tf_exc, tf)) {
633 			if (srr0 == tf->tf_srr0)
634 				tf->tf_srr0 += 4;
635 			return true;
636 		}
637 	}
638 	return false;
639 #else
640 #if 0
641 	struct cpu_info * const ci = curcpu();
642 	struct cpu_softc * const cpu = ci->ci_softc;
643 	printf("CPL stack:");
644 	if (ci->ci_idepth >= 0) {
645 		for (u_int i = 0; i <= ci->ci_idepth; i++) {
646 			printf(" [%u]=%u", i, cpu->cpu_pcpls[i]);
647 		}
648 	}
649 	printf(" %u\n", ci->ci_cpl);
650 	dump_trapframe(tf);
651 #endif
652 	if (kdb_trap(tf->tf_exc, tf)) {
653 		tf->tf_srr0 += 4;
654 		return true;
655 	}
656 	return false;
657 #endif
658 }
659 
660 static bool
661 onfaulted(struct trapframe *tf, register_t rv)
662 {
663 	struct lwp * const l = curlwp;
664 	struct pcb * const pcb = lwp_getpcb(l);
665 	struct faultbuf * const fb = pcb->pcb_onfault;
666 	if (fb == NULL)
667 		return false;
668 	tf->tf_srr0 = fb->fb_pc;
669 	tf->tf_srr1 = fb->fb_msr;
670 	tf->tf_cr = fb->fb_cr;
671 	tf->tf_fixreg[1] = fb->fb_sp;
672 	tf->tf_fixreg[2] = fb->fb_r2;
673 	tf->tf_fixreg[3] = rv;
674 	pcb->pcb_onfault = NULL;
675 	return true;
676 }
677 
678 void
679 trap(enum ppc_booke_exceptions trap_code, struct trapframe *tf)
680 {
681 	const bool usertrap = usertrap_p(tf);
682 	struct cpu_info * const ci = curcpu();
683 	struct lwp * const l = curlwp;
684 	struct proc * const p = l->l_proc;
685 	ksiginfo_t ksi;
686 	int rv = EACCES;
687 
688 	ci->ci_ev_traps.ev_count++;
689 	ci->ci_data.cpu_ntrap++;
690 
691 	KASSERTMSG(!usertrap || tf == trapframe(l),
692 	    "trap: tf=%p is invalid: trapframe(%p)=%p", tf, l, trapframe(l));
693 
694 #if 0
695 	if (trap_code != T_PROGRAM || usertrap)
696 		printf("trap(enter): %s (tf=%p, esr/dear=%#x/%#lx, srr0/1=%#lx/%#lx, lr=%#lx)\n",
697 		    trap_names[trap_code], tf, tf->tf_esr, tf->tf_dear,
698 		    tf->tf_srr0, tf->tf_srr1, tf->tf_lr);
699 #endif
700 #if 0
701 	if ((register_t)tf >= (register_t)l->l_addr + USPACE
702 	    || (register_t)tf < (register_t)l->l_addr + PAGE_SIZE) {
703 		printf("%s(entry): pid %d.%d (%s): invalid tf addr %p\n",
704 		    __func__, p->p_pid, l->l_lid, p->p_comm, tf);
705 		dump_trapframe(tf);
706 		Debugger();
707 	}
708 #endif
709 #if 0
710 	if ((mfmsr() & PSL_CE) == 0) {
711 		printf("%s(entry): pid %d.%d (%s): %s: PSL_CE (%#lx) not set\n",
712 		    __func__, p->p_pid, l->l_lid, p->p_comm,
713 		    trap_names[trap_code], mfmsr());
714 		dump_trapframe(tf);
715 	}
716 #endif
717 
718 	if (usertrap && (tf->tf_fixreg[1] & 0x80000000)) {
719 		printf("%s(entry): pid %d.%d (%s): %s invalid sp %#lx (sprg1=%#lx)\n",
720 		    __func__, p->p_pid, l->l_lid, p->p_comm,
721 		    trap_names[trap_code], tf->tf_fixreg[1], mfspr(SPR_SPRG1));
722 		dump_trapframe(tf);
723 		Debugger();
724 	}
725 
726 	if (usertrap && (tf->tf_srr1 & (PSL_DS|PSL_IS)) != (PSL_DS|PSL_IS)) {
727 		printf("%s(entry): pid %d.%d (%s): %s invalid PSL %#lx\n",
728 		    __func__, p->p_pid, l->l_lid, p->p_comm,
729 		    trap_names[trap_code], tf->tf_srr1);
730 		dump_trapframe(tf);
731 		Debugger();
732 	}
733 
734 	switch (trap_code) {
735 	case T_CRITIAL_INPUT:
736 	case T_EXTERNAL_INPUT:
737 	case T_DECREMENTER:
738 	case T_FIXED_INTERVAL:
739 	case T_WATCHDOG:
740 	case T_SYSTEM_CALL:
741 	default:
742 		panic("trap: unexcepted trap code %d! (tf=%p, srr0/1=%#lx/%#lx)",
743 		    trap_code, tf, tf->tf_srr0, tf->tf_srr1);
744 	case T_MACHINE_CHECK:
745 		rv = mchk_exception(tf, &ksi);
746 		break;
747 	case T_DSI:
748 		rv = dsi_exception(tf, &ksi);
749 		break;
750 	case T_ISI:
751 		rv = isi_exception(tf, &ksi);
752 		break;
753 	case T_ALIGNMENT:
754 		rv = ali_exception(tf, &ksi);
755 		break;
756 	case T_SPE_UNAVAILABLE:
757 		rv = spe_exception(tf, &ksi);
758 		break;
759 	case T_PROGRAM:
760 #ifdef DDB
761 		if (!usertrap && ddb_exception(tf))
762 			return;
763 #endif
764 		rv = pgm_exception(tf, &ksi);
765 		break;
766 	case T_FP_UNAVAILABLE:
767 	case T_AP_UNAVAILABLE:
768 		panic("trap: unexcepted trap code %d! (tf=%p, srr0/1=%#lx/%#lx)",
769 		    trap_code, tf, tf->tf_srr0, tf->tf_srr1);
770 	case T_DATA_TLB_ERROR:
771 		rv = dtlb_exception(tf, &ksi);
772 		break;
773 	case T_INSTRUCTION_TLB_ERROR:
774 		rv = itlb_exception(tf, &ksi);
775 		break;
776 	case T_DEBUG:
777 #ifdef DDB
778 		if (!usertrap && ddb_exception(tf))
779 			return;
780 #endif
781 		rv = debug_exception(tf, &ksi);
782 		break;
783 	case T_EMBEDDED_FP_DATA:
784 		rv = embedded_fp_data_exception(tf, &ksi);
785 		break;
786 	case T_EMBEDDED_FP_ROUND:
787 		rv = embedded_fp_round_exception(tf, &ksi);
788 		break;
789 	case T_EMBEDDED_PERF_MONITOR:
790 		//db_stack_trace_print(tf->tf_fixreg[1], true, 40, "", printf);
791 		dump_trapframe(tf);
792 		rv = EPERM;
793 		break;
794 	case T_AST:
795 		KASSERT(usertrap);
796 		cpu_ast(l, ci);
797 		if (tf->tf_fixreg[1] & 0x80000000) {
798 			printf("%s(ast-exit): pid %d.%d (%s): invalid sp %#lx\n",
799 			    __func__, p->p_pid, l->l_lid, p->p_comm,
800 			    tf->tf_fixreg[1]);
801 			dump_trapframe(tf);
802 			Debugger();
803 		}
804 		if ((tf->tf_srr1 & (PSL_DS|PSL_IS)) != (PSL_DS|PSL_IS)) {
805 			printf("%s(entry): pid %d.%d (%s): %s invalid PSL %#lx\n",
806 			    __func__, p->p_pid, l->l_lid, p->p_comm,
807 			    trap_names[trap_code], tf->tf_srr1);
808 			dump_trapframe(tf);
809 			Debugger();
810 		}
811 #if 0
812 		if ((mfmsr() & PSL_CE) == 0) {
813 			printf("%s(exit): pid %d.%d (%s): %s: PSL_CE (%#lx) not set\n",
814 			    __func__, p->p_pid, l->l_lid, p->p_comm,
815 			    trap_names[trap_code], mfmsr());
816 			dump_trapframe(tf);
817 		}
818 #endif
819 		userret(l, tf);
820 		return;
821 	}
822 	if (!usertrap) {
823 		if (rv != 0) {
824 			if (!onfaulted(tf, rv)) {
825 				db_stack_trace_print(tf->tf_fixreg[1], true, 40, "", printf);
826 				dump_trapframe(tf);
827 				panic("%s: pid %d.%d (%s): %s exception in kernel mode"
828 				    " (tf=%p, dear=%#lx, esr=%#x,"
829 				    " srr0/1=%#lx/%#lx)",
830 				    __func__, p->p_pid, l->l_lid, p->p_comm,
831 				    trap_names[trap_code], tf, tf->tf_dear,
832 				    tf->tf_esr, tf->tf_srr0, tf->tf_srr1);
833 			}
834 		}
835 #if 0
836 		if (tf->tf_fixreg[1] >= (register_t)l->l_addr + USPACE
837 		    || tf->tf_fixreg[1] < (register_t)l->l_addr + PAGE_SIZE) {
838 			printf("%s(exit): pid %d.%d (%s): invalid kern sp %#lx\n",
839 			    __func__, p->p_pid, l->l_lid, p->p_comm,
840 			    tf->tf_fixreg[1]);
841 			dump_trapframe(tf);
842 			Debugger();
843 		}
844 #endif
845 #if 0
846 		if ((mfmsr() & PSL_CE) == 0) {
847 			printf("%s(exit): pid %d.%d (%s): %s: PSL_CE (%#lx) not set\n",
848 			    __func__, p->p_pid, l->l_lid, p->p_comm,
849 			    trap_names[trap_code], mfmsr());
850 			mtmsr(mfmsr()|PSL_CE);
851 			dump_trapframe(tf);
852 		}
853 #endif
854 	} else {
855 		if (rv == ENOMEM) {
856 			printf("UVM: pid %d.%d (%s), uid %d killed: "
857 			    "out of swap\n",
858 			    p->p_pid, l->l_lid, p->p_comm,
859 			    l->l_cred ?  kauth_cred_geteuid(l->l_cred) : -1);
860 			ksi.ksi_signo = SIGKILL;
861 		}
862 		if (rv != 0) {
863 			if (cpu_printfataltraps) {
864 				printf("%s: pid %d.%d (%s):"
865 				    " %s exception in user mode\n",
866 				    __func__, p->p_pid, l->l_lid, p->p_comm,
867 				    trap_names[trap_code]);
868 				if (cpu_printfataltraps > 1)
869 					dump_trapframe(tf);
870 			}
871 			(*p->p_emul->e_trapsignal)(l, &ksi);
872 		}
873 #ifdef DEBUG
874 		if ((tf->tf_srr1 & (PSL_DS|PSL_IS)) != (PSL_DS|PSL_IS)) {
875 			printf("%s(exit): pid %d.%d (%s): %s invalid PSL %#lx\n",
876 			    __func__, p->p_pid, l->l_lid, p->p_comm,
877 			    trap_names[trap_code], tf->tf_srr1);
878 			dump_trapframe(tf);
879 			Debugger();
880 		}
881 #endif
882 #if 0
883 		if ((mfmsr() & PSL_CE) == 0) {
884 			printf("%s(exit): pid %d.%d (%s): %s: PSL_CE (%#lx) not set\n",
885 			    __func__, p->p_pid, l->l_lid, p->p_comm,
886 			    trap_names[trap_code], mfmsr());
887 			dump_trapframe(tf);
888 		}
889 #endif
890 		userret(l, tf);
891 	}
892 }
893