xref: /netbsd-src/sys/arch/sh3/sh3/exception.c (revision 68fa58437753598de948829082f591c269b48777)
1 /*	$NetBSD: exception.c,v 1.75 2023/10/05 19:41:05 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002, 2019 The NetBSD Foundation, Inc. All rights reserved.
5  * Copyright (c) 1990 The Regents of the University of California.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the University of Utah, and William Jolitz.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)trap.c	7.4 (Berkeley) 5/13/91
36  */
37 
38 /*-
39  * Copyright (c) 1995 Charles M. Hannum.  All rights reserved.
40  *
41  * This code is derived from software contributed to Berkeley by
42  * the University of Utah, and William Jolitz.
43  *
44  * Redistribution and use in source and binary forms, with or without
45  * modification, are permitted provided that the following conditions
46  * are met:
47  * 1. Redistributions of source code must retain the above copyright
48  *    notice, this list of conditions and the following disclaimer.
49  * 2. Redistributions in binary form must reproduce the above copyright
50  *    notice, this list of conditions and the following disclaimer in the
51  *    documentation and/or other materials provided with the distribution.
52  * 3. All advertising materials mentioning features or use of this software
53  *    must display the following acknowledgement:
54  *	This product includes software developed by the University of
55  *	California, Berkeley and its contributors.
56  * 4. Neither the name of the University nor the names of its contributors
57  *    may be used to endorse or promote products derived from this software
58  *    without specific prior written permission.
59  *
60  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
61  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
62  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
63  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
64  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
65  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
66  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
67  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
68  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
69  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
70  * SUCH DAMAGE.
71  *
72  *	@(#)trap.c	7.4 (Berkeley) 5/13/91
73  */
74 
75 /*
76  * SH3 Trap and System call handling
77  *
78  * T.Horiuchi 1998.06.8
79  */
80 
81 #include <sys/cdefs.h>
82 __KERNEL_RCSID(0, "$NetBSD: exception.c,v 1.75 2023/10/05 19:41:05 ad Exp $");
83 
84 #include "opt_ddb.h"
85 #include "opt_kgdb.h"
86 
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/kernel.h>
90 #include <sys/proc.h>
91 #include <sys/signal.h>
92 #include <sys/intr.h>
93 
94 #ifdef DDB
95 #include <sh3/db_machdep.h>
96 #endif
97 #ifdef KGDB
98 #include <sys/kgdb.h>
99 #endif
100 
101 #include <uvm/uvm_extern.h>
102 
103 #include <sh3/cpu.h>
104 #include <sh3/mmu.h>
105 #include <sh3/pcb.h>
106 #include <sh3/exception.h>
107 #include <sh3/userret.h>
108 
109 const char * const exp_type[] = {
110 	"--",					/* 0x000 (reset vector) */
111 	"--",					/* 0x020 (reset vector) */
112 	"TLB miss/invalid (load)",		/* 0x040 EXPEVT_TLB_MISS_LD */
113 	"TLB miss/invalid (store)",		/* 0x060 EXPEVT_TLB_MISS_ST */
114 	"initial page write",			/* 0x080 EXPEVT_TLB_MOD */
115 	"TLB protection violation (load)",	/* 0x0a0 EXPEVT_TLB_PROT_LD */
116 	"TLB protection violation (store)",	/* 0x0c0 EXPEVT_TLB_PROT_ST */
117 	"address error (load)",			/* 0x0e0 EXPEVT_ADDR_ERR_LD */
118 	"address error (store)",		/* 0x100 EXPEVT_ADDR_ERR_ST */
119 	"FPU",					/* 0x120 EXPEVT_FPU */
120 	"--",					/* 0x140 (reset vector) */
121 	"unconditional trap (TRAPA)",		/* 0x160 EXPEVT_TRAPA */
122 	"reserved instruction code exception",	/* 0x180 EXPEVT_RES_INST */
123 	"illegal slot instruction exception",	/* 0x1a0 EXPEVT_SLOT_INST */
124 	"--",					/* 0x1c0 (external interrupt) */
125 	"user break point trap",		/* 0x1e0 EXPEVT_BREAK */
126 };
127 const int exp_types = __arraycount(exp_type);
128 
129 void general_exception(struct lwp *, struct trapframe *, uint32_t);
130 void tlb_exception(struct lwp *, struct trapframe *, uint32_t);
131 void ast(struct lwp *, struct trapframe *);
132 
133 /*
134  * void general_exception(struct lwp *l, struct trapframe *tf):
135  *	l  ... curlwp when exception occur.
136  *	tf ... full user context.
137  *	va ... fault va for user mode EXPEVT_ADDR_ERR_{LD,ST}
138  */
139 void
general_exception(struct lwp * l,struct trapframe * tf,uint32_t va)140 general_exception(struct lwp *l, struct trapframe *tf, uint32_t va)
141 {
142 	int expevt = tf->tf_expevt;
143 	bool usermode = !KERNELMODE(tf->tf_ssr);
144 	struct pcb *pcb;
145 	ksiginfo_t ksi;
146 	uint32_t trapcode;
147 #ifdef DDB
148 	uint32_t code;
149 #endif
150 
151 	curcpu()->ci_data.cpu_ntrap++;
152 
153 	/*
154 	 * Read trap code from TRA before enabling interrupts,
155 	 * otherwise it can be clobbered by a ddb breakpoint in an
156 	 * interrupt handler.
157 	 */
158 	trapcode = _reg_read_4(SH_(TRA)) >> 2;
159 
160 	splx(tf->tf_ssr & PSL_IMASK);
161 
162 	if (l == NULL)
163  		goto do_panic;
164 
165 	if (usermode) {
166 		KDASSERT(l->l_md.md_regs == tf); /* check exception depth */
167 		expevt |= EXP_USER;
168 	}
169 
170 	switch (expevt) {
171 	case EXPEVT_TRAPA | EXP_USER:
172 		/* Check for debugger break */
173 		if (trapcode == _SH_TRA_BREAK) {
174 			tf->tf_spc -= 2; /* back to the breakpoint address */
175 			KSI_INIT_TRAP(&ksi);
176 			ksi.ksi_signo = SIGTRAP;
177 			ksi.ksi_code = TRAP_BRKPT;
178 			ksi.ksi_addr = (void *)tf->tf_spc;
179 			goto trapsignal;
180 		} else {
181 			/* XXX: we shouldn't treat *any* TRAPA as a syscall */
182 			(*l->l_proc->p_md.md_syscall)(l, tf);
183 			return;
184 		}
185 		break;
186 
187 	case EXPEVT_BREAK | EXP_USER:
188 		l->l_md.md_flags &= ~MDL_SSTEP;
189 		KSI_INIT_TRAP(&ksi);
190 		ksi.ksi_signo = SIGTRAP;
191 		ksi.ksi_code = TRAP_TRACE;
192 		ksi.ksi_addr = (void *)tf->tf_spc;
193 		goto trapsignal;
194 
195 	case EXPEVT_ADDR_ERR_LD: /* FALLTHROUGH */
196 	case EXPEVT_ADDR_ERR_ST:
197 		pcb = lwp_getpcb(l);
198 		if (__predict_false(pcb->pcb_onfault == NULL))
199 			goto do_panic;
200 		tf->tf_spc = (int)pcb->pcb_onfault;
201 		tf->tf_r0 = EFAULT;
202 		break;
203 
204 	case EXPEVT_ADDR_ERR_LD | EXP_USER: /* FALLTHROUGH */
205 	case EXPEVT_ADDR_ERR_ST | EXP_USER:
206 		KSI_INIT_TRAP(&ksi);
207 		if (((int)va) < 0) {
208 		    ksi.ksi_signo = SIGSEGV;
209 		    ksi.ksi_code = SEGV_ACCERR;
210 		} else {
211 		    ksi.ksi_signo = SIGBUS;
212 		    ksi.ksi_code = BUS_ADRALN;
213 		}
214 		ksi.ksi_addr = (void *)va;
215 		goto trapsignal;
216 
217 	case EXPEVT_RES_INST | EXP_USER: /* FALLTHROUGH */
218 	case EXPEVT_SLOT_INST | EXP_USER:
219 		KSI_INIT_TRAP(&ksi);
220 		ksi.ksi_signo = SIGILL;
221 		ksi.ksi_code = ILL_ILLOPC; /* XXX: could be ILL_PRVOPC */
222 		ksi.ksi_addr = (void *)tf->tf_spc;
223 		goto trapsignal;
224 
225 	default:
226 		goto do_panic;
227 	}
228 
229 	if (usermode)
230 		userret(l);
231 	return;
232 
233  trapsignal:
234 	KASSERT(usermode);
235 	ksi.ksi_trap = tf->tf_expevt;
236 	trapsignal(l, &ksi);
237 	userret(l);
238 	return;
239 
240  do_panic:
241 #ifdef DDB
242 	switch (expevt & ~EXP_USER) {
243 	case EXPEVT_TRAPA:
244 		code = trapcode;
245 		break;
246 	default:
247 		code = 0;
248 		break;
249 	}
250 	if (kdb_trap(expevt, code, tf))
251 		return;
252 #endif
253 #ifdef KGDB
254 	if (kgdb_trap(EXPEVT_BREAK, tf))
255 		return;
256 #endif
257 	if (expevt >> 5 < exp_types)
258 		printf("fatal %s", exp_type[expevt >> 5]);
259 	else
260 		printf("EXPEVT 0x%03x", expevt);
261 	printf(" in %s mode\n", usermode ? "user" : "kernel");
262 	printf(" spc %x ssr %x \n", tf->tf_spc, tf->tf_ssr);
263 
264 	panic("general_exception");
265 	/* NOTREACHED */
266 }
267 
268 
269 /*
270  * void tlb_exception(struct lwp *l, struct trapframe *tf, uint32_t va):
271  *	l  ... curlwp when exception occur.
272  *	tf ... full user context.
273  *	va ... fault address.
274  */
275 void
tlb_exception(struct lwp * l,struct trapframe * tf,uint32_t va)276 tlb_exception(struct lwp *l, struct trapframe *tf, uint32_t va)
277 {
278 	struct vm_map *map;
279 	struct pcb *pcb;
280 	pmap_t pmap;
281 	void *onfault;
282 	ksiginfo_t ksi;
283 	bool usermode;
284 	int err, track, ftype;
285 	const char *panic_msg;
286 
287 	pcb = lwp_getpcb(l);
288 	onfault = pcb->pcb_onfault;
289 
290 #define TLB_ASSERT(assert, msg)				\
291 		do {					\
292 			if (!(assert)) {		\
293 				panic_msg =  msg;	\
294 				goto tlb_panic;		\
295 			}				\
296 		} while(/*CONSTCOND*/0)
297 
298 	usermode = !KERNELMODE(tf->tf_ssr);
299 	if (usermode) {
300 		KDASSERT(l->l_md.md_regs == tf);
301 	} else {
302 #if 0 /* FIXME: probably wrong for yamt-idlelwp */
303 		KDASSERT(l == NULL ||		/* idle */
304 		    l == &lwp0 ||		/* kthread */
305 		    l->l_md.md_regs != tf);	/* other */
306 #endif
307 	}
308 
309 	switch (tf->tf_expevt) {
310 	case EXPEVT_TLB_MISS_LD:
311 		track = PVH_REFERENCED;
312 		ftype = VM_PROT_READ;
313 		break;
314 	case EXPEVT_TLB_MISS_ST:
315 		track = PVH_REFERENCED;
316 		ftype = VM_PROT_WRITE;
317 		break;
318 	case EXPEVT_TLB_MOD:
319 		track = PVH_REFERENCED | PVH_MODIFIED;
320 		ftype = VM_PROT_WRITE;
321 		break;
322 	case EXPEVT_TLB_PROT_LD:
323 		TLB_ASSERT((int)va > 0,
324 		    "kernel virtual protection fault (load)");
325 		if (usermode) {
326 			KSI_INIT_TRAP(&ksi);
327 			ksi.ksi_signo = SIGSEGV;
328 			ksi.ksi_code = SEGV_ACCERR;
329 			ksi.ksi_addr = (void *)va;
330 			splx(tf->tf_ssr & PSL_IMASK);
331 			goto user_fault;
332 		} else {
333 			TLB_ASSERT(l && onfault != NULL,
334 			    "no copyin/out fault handler (load protection)");
335 			tf->tf_spc = (int)onfault;
336 			tf->tf_r0 = EFAULT;
337 		}
338 		return;
339 
340 	case EXPEVT_TLB_PROT_ST:
341 		track = 0;	/* call uvm_fault first. (COW) */
342 		ftype = VM_PROT_WRITE;
343 		break;
344 
345 	default:
346 		TLB_ASSERT(0, "impossible expevt");
347 	}
348 
349 	/* Select address space */
350 	if (usermode) {
351 		TLB_ASSERT(l != NULL, "no curlwp");
352 		map = &l->l_proc->p_vmspace->vm_map;
353 		pmap = map->pmap;
354 	} else {
355 		if ((int)va < 0) {
356 			map = kernel_map;
357 			pmap = pmap_kernel();
358 		} else {
359 			TLB_ASSERT(l != NULL && onfault != NULL,
360 			    "invalid user-space access from kernel mode");
361 			if (va == 0) {
362 				tf->tf_spc = (int)onfault;
363 				tf->tf_r0 = EFAULT;
364 				return;
365 			}
366 			map = &l->l_proc->p_vmspace->vm_map;
367 			pmap = map->pmap;
368 		}
369 	}
370 
371 	/* Lookup page table. if entry found, load it. */
372 	if (track && __pmap_pte_load(pmap, va, track)) {
373 		return;
374 	}
375 
376 	/* Page not found. call fault handler */
377 	splx(tf->tf_ssr & PSL_IMASK);
378 	pcb->pcb_onfault = NULL;
379 	err = uvm_fault(map, va, ftype);
380 	pcb->pcb_onfault = onfault;
381 
382 	/* User stack extension */
383 	if (map != kernel_map &&
384 	    (va >= (vaddr_t)l->l_proc->p_vmspace->vm_maxsaddr) &&
385 	    (va <  (vaddr_t)l->l_proc->p_vmspace->vm_minsaddr)) {
386 		if (err == 0) {
387 			struct vmspace *vm = l->l_proc->p_vmspace;
388 			uint32_t nss;
389 			nss = btoc((vaddr_t)vm->vm_minsaddr - va);
390 			if (nss > vm->vm_ssize)
391 				vm->vm_ssize = nss;
392 		} else if (err == EACCES) {
393 			err = EFAULT;
394 		}
395 	}
396 
397 	/* Page in. load PTE to TLB. */
398 	if (err == 0) {
399 		bool loaded;
400 		if (usermode)
401 			userret(l);
402 		loaded = __pmap_pte_load(pmap, va, track);
403 #if 0
404 		/*
405 		 * XXXAD I don't think you should do this - consider
406 		 * a multithreaded program where another thread got
407 		 * switched to during UVM fault and it unmapped the
408 		 * page. I think you should just let the fault happen
409 		 * again.
410 		 */
411 		TLB_ASSERT(loaded, "page table entry not found");
412 #else
413 		__USE(loaded);
414 #endif
415 		return;
416 	}
417 
418 	/* Page not found. */
419 	if (usermode) {
420 		KSI_INIT_TRAP(&ksi);
421 		ksi.ksi_addr = (void *)va;
422 
423 		switch (err) {
424 		case ENOMEM:
425 			ksi.ksi_signo = SIGKILL;
426 			break;
427 		case EINVAL:
428 			ksi.ksi_signo = SIGBUS;
429 			ksi.ksi_code = BUS_ADRERR;
430 			break;
431 		case EACCES:
432 			ksi.ksi_signo = SIGSEGV;
433 			ksi.ksi_code = SEGV_ACCERR;
434 			break;
435 		default:
436 			ksi.ksi_signo = SIGSEGV;
437 			ksi.ksi_code = SEGV_MAPERR;
438 			break;
439 		}
440 		goto user_fault;
441 	} else {
442 		TLB_ASSERT(onfault,
443 		    "no copyin/out fault handler (page not found)");
444 		tf->tf_spc = (int)onfault;
445 		tf->tf_r0 = err;
446 	}
447 	return;
448 
449  user_fault:
450 	ksi.ksi_trap = tf->tf_expevt;
451 	trapsignal(l, &ksi);
452 	userret(l);
453 	return;
454 
455  tlb_panic:
456 	panic("tlb_exception: %s\n"
457 	      "expevt=%x va=%08x ssr=%08x spc=%08x lwp=%p onfault=%p",
458 	      panic_msg, tf->tf_expevt, va, tf->tf_ssr, tf->tf_spc,
459 	      l, pcb->pcb_onfault);
460 #undef	TLB_ASSERT
461 }
462 
463 
464 /*
465  * void ast(struct lwp *l, struct trapframe *tf):
466  *	l  ... curlwp when exception occur.
467  *	tf ... full user context.
468  *	This is called when exception return. if return from kernel to user,
469  *	handle asynchronous software traps and context switch if needed.
470  *	Interrupts are blocked on entry.
471  */
472 void
ast(struct lwp * l,struct trapframe * tf)473 ast(struct lwp *l, struct trapframe *tf)
474 {
475 	int s;
476 
477 	if (__predict_true(l->l_md.md_astpending == 0)) {
478 		return;
479 	}
480 	if (__predict_false(KERNELMODE(tf->tf_ssr))) {
481 		/* should not occur but leave it here to be safe */
482 		return;
483 	}
484 
485 	KDASSERT(l != NULL);
486 	KDASSERT(l->l_md.md_regs == tf);
487 
488 	s = tf->tf_ssr & PSL_IMASK;
489 	do {
490 		splx(s);
491 		/* userret() clears l_md.md_astpending */
492 		userret(l);
493 		s = splhigh();
494 	} while (__predict_false(l->l_md.md_astpending));
495 }
496