xref: /openbsd-src/sys/arch/powerpc64/powerpc64/trap.c (revision 68cc39830aa48a2182a4a2976238be1f40888669)
1 /*	$OpenBSD: trap.c,v 1.54 2023/04/13 02:19:05 jsg Exp $	*/
2 
3 /*
4  * Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/proc.h>
22 #include <sys/signalvar.h>
23 #include <sys/user.h>
24 #include <sys/syscall.h>
25 #include <sys/syscall_mi.h>
26 
27 #include <uvm/uvm_extern.h>
28 
29 #include <machine/fpu.h>
30 #include <machine/pte.h>
31 #include <machine/trap.h>
32 
33 #ifdef DDB
34 #include <machine/db_machdep.h>
35 #include <ddb/db_output.h>
36 #endif
37 
38 void	decr_intr(struct trapframe *); /* clock.c */
39 void	exi_intr(struct trapframe *);  /* intr.c */
40 void	hvi_intr(struct trapframe *);  /* intr.c */
41 void	syscall(struct trapframe *);   /* syscall.c */
42 
43 #ifdef TRAP_DEBUG
44 void	dumpframe(struct trapframe *);
45 #endif
46 
47 void
trap(struct trapframe * frame)48 trap(struct trapframe *frame)
49 {
50 	struct cpu_info *ci = curcpu();
51 	struct proc *p = curproc;
52 	int type = frame->exc;
53 	union sigval sv;
54 	struct vm_map *map;
55 	struct vm_map_entry *entry;
56 	pmap_t pm;
57 	vaddr_t va;
58 	int access_type;
59 	int error, sig, code;
60 
61 	/* Disable access to floating-point and vector registers. */
62 	mtmsr(mfmsr() & ~(PSL_FPU|PSL_VEC|PSL_VSX));
63 
64 	switch (type) {
65 	case EXC_DECR:
66 		uvmexp.intrs++;
67 		ci->ci_idepth++;
68 		decr_intr(frame);
69 		ci->ci_idepth--;
70 		return;
71 	case EXC_EXI:
72 		uvmexp.intrs++;
73 		ci->ci_idepth++;
74 		exi_intr(frame);
75 		ci->ci_idepth--;
76 		return;
77 	case EXC_HVI:
78 		uvmexp.intrs++;
79 		ci->ci_idepth++;
80 		hvi_intr(frame);
81 		ci->ci_idepth--;
82 		return;
83 	case EXC_SC:
84 		uvmexp.syscalls++;
85 		break;
86 	default:
87 		uvmexp.traps++;
88 		break;
89 	}
90 
91 	if (frame->srr1 & PSL_EE)
92 		intr_enable();
93 
94 	if (frame->srr1 & PSL_PR) {
95 		type |= EXC_USER;
96 		p->p_md.md_regs = frame;
97 		refreshcreds(p);
98 	}
99 
100 	switch (type) {
101 #ifdef DDB
102 	case EXC_PGM:
103 		/* At a trap instruction, enter the debugger. */
104 		if (frame->srr1 & EXC_PGM_TRAP) {
105 			/* Return from db_enter(). */
106 			if (frame->srr0 == (register_t)db_enter)
107 				frame->srr0 = frame->lr;
108 			db_ktrap(T_BREAKPOINT, frame);
109 			return;
110 		}
111 		goto fatal;
112 	case EXC_TRC:
113 		db_ktrap(T_BREAKPOINT, frame); /* single-stepping */
114 		return;
115 #endif
116 
117 	case EXC_DSI:
118 		map = kernel_map;
119 		va = frame->dar;
120 		if (curpcb->pcb_onfault &&
121 		    (va >> ADDR_ESID_SHIFT) == USER_ESID) {
122 			map = &p->p_vmspace->vm_map;
123 			va = curpcb->pcb_userva | (va & SEGMENT_MASK);
124 		}
125 		if (frame->dsisr & DSISR_STORE)
126 			access_type = PROT_WRITE;
127 		else
128 			access_type = PROT_READ;
129 		error = uvm_fault(map, trunc_page(va), 0, access_type);
130 		if (error == 0)
131 			return;
132 
133 		if (curpcb->pcb_onfault) {
134 			frame->srr0 = curpcb->pcb_onfault;
135 			return;
136 		}
137 
138 		printf("dar 0x%lx dsisr 0x%lx\n", frame->dar, frame->dsisr);
139 		goto fatal;
140 
141 	case EXC_DSE:
142 		/*
143 		 * If we sleep while handling a fault, we may lose our
144 		 * SLB entry.  Enter it again.
145 		 */
146 		va = frame->dar;
147 		if (curpcb->pcb_onfault &&
148 		    (va >> ADDR_ESID_SHIFT) == USER_ESID) {
149 			map = &p->p_vmspace->vm_map;
150 			va = curpcb->pcb_userva | (va & SEGMENT_MASK);
151 			if (pmap_set_user_slb(map->pmap, va, NULL, NULL) == 0)
152 				return;
153 		}
154 
155 		if (curpcb->pcb_onfault) {
156 			frame->srr0 = curpcb->pcb_onfault;
157 			return;
158 		}
159 
160 		printf("dar 0x%lx dsisr 0x%lx\n", frame->dar, frame->dsisr);
161 		goto fatal;
162 
163 	case EXC_ALI:
164 	{
165 		/*
166 		 * In general POWER allows unaligned loads and stores
167 		 * and executes those instructions in an efficient
168 		 * way.  As a result compilers may combine word-sized
169 		 * stores into a single doubleword store instruction
170 		 * even if the address is not guaranteed to be
171 		 * doubleword aligned.  Such unaligned stores are not
172 		 * supported in storage that is Caching Inibited.
173 		 * Access to such storage should be done through
174 		 * volatile pointers which inhibit the aforementioned
175 		 * optimizations.  Unfortunately code in the amdgpu(4)
176 		 * and radeondrm(4) drivers happens to run into such
177 		 * unaligned access because pointers aren't always
178 		 * marked as volatile.  For that reason we emulate
179 		 * certain store instructions here.
180 		 */
181 		uint32_t insn = *(uint32_t *)frame->srr0;
182 
183 		/* std and stdu */
184 		if ((insn & 0xfc000002) == 0xf8000000) {
185 			uint32_t rs = (insn >> 21) & 0x1f;
186 			uint32_t ra = (insn >> 16) & 0x1f;
187 			uint64_t ds = insn & 0xfffc;
188 			uint64_t ea;
189 
190 			if ((insn & 0x00000001) == 0 && ra == 0)
191 				panic("invalid stdu instruction form");
192 
193 			if (ds & 0x8000)
194 				ds |= ~0x7fff; /* sign extend */
195 			if (ra == 0)
196 				ea = ds;
197 			else
198 				ea = frame->fixreg[ra] + ds;
199 
200 			/*
201 			 * If the effective address isn't 32-bit
202 			 * aligned, or if data access cannot be
203 			 * performed because of the access violates
204 			 * storage protection, this will trigger
205 			 * another trap, which we can handle.
206 			 */
207 			*(volatile uint32_t *)ea = frame->fixreg[rs] >> 32;
208 			*(volatile uint32_t *)(ea + 4) = frame->fixreg[rs];
209 			if (insn & 0x00000001)
210 				frame->fixreg[ra] = ea;
211 			frame->srr0 += 4;
212 			return;
213 		}
214 		printf("dar 0x%lx dsisr 0x%lx\n", frame->dar, frame->dsisr);
215 		goto fatal;
216 	}
217 
218 	case EXC_DSE|EXC_USER:
219 		pm = p->p_vmspace->vm_map.pmap;
220 		error = pmap_slbd_fault(pm, frame->dar);
221 		if (error == 0)
222 			break;
223 
224 		if (!uvm_map_inentry(p, &p->p_spinentry, PROC_STACK(p),
225 		    "[%s]%d/%d sp=%lx inside %lx-%lx: not MAP_STACK\n",
226 		    uvm_map_inentry_sp, p->p_vmspace->vm_map.sserial))
227 			goto out;
228 
229 		/*
230 		 * Unfortunately, the hardware doesn't tell us whether
231 		 * this was a read or a write fault.  So we check
232 		 * whether there is a mapping at the fault address and
233 		 * insert a new SLB entry.  Executing the faulting
234 		 * instruction again should result in a Data Storage
235 		 * Interrupt that does indicate whether we're dealing
236 		 * with a read or a write fault.
237 		 */
238 		map = &p->p_vmspace->vm_map;
239 		vm_map_lock_read(map);
240 		if (uvm_map_lookup_entry(map, frame->dar, &entry))
241 			error = pmap_slbd_enter(pm, frame->dar);
242 		else
243 			error = EFAULT;
244 		vm_map_unlock_read(map);
245 		if (error) {
246 			sv.sival_ptr = (void *)frame->dar;
247 			trapsignal(p, SIGSEGV, 0, SEGV_MAPERR, sv);
248 		}
249 		break;
250 
251 	case EXC_DSI|EXC_USER:
252 		if (!uvm_map_inentry(p, &p->p_spinentry, PROC_STACK(p),
253 		    "[%s]%d/%d sp=%lx inside %lx-%lx: not MAP_STACK\n",
254 		    uvm_map_inentry_sp, p->p_vmspace->vm_map.sserial))
255 			goto out;
256 
257 		map = &p->p_vmspace->vm_map;
258 		va = frame->dar;
259 		if (frame->dsisr & DSISR_STORE)
260 			access_type = PROT_WRITE;
261 		else
262 			access_type = PROT_READ;
263 		error = uvm_fault(map, trunc_page(va), 0, access_type);
264 		if (error == 0)
265 			uvm_grow(p, va);
266 
267 		if (error) {
268 #ifdef TRAP_DEBUG
269 			printf("type %x dar 0x%lx dsisr 0x%lx %s\r\n",
270 			    type, frame->dar, frame->dsisr, p->p_p->ps_comm);
271 			dumpframe(frame);
272 #endif
273 
274 			if (error == ENOMEM) {
275 				sig = SIGKILL;
276 				code = 0;
277 			} else if (error == EIO) {
278 				sig = SIGBUS;
279 				code = BUS_OBJERR;
280 			} else if (error == EACCES) {
281 				sig = SIGSEGV;
282 				code = SEGV_ACCERR;
283 			} else {
284 				sig = SIGSEGV;
285 				code = SEGV_MAPERR;
286 			}
287 			sv.sival_ptr = (void *)va;
288 			trapsignal(p, sig, 0, code, sv);
289 		}
290 		break;
291 
292 	case EXC_ISE|EXC_USER:
293 		pm = p->p_vmspace->vm_map.pmap;
294 		error = pmap_slbd_fault(pm, frame->srr0);
295 		if (error == 0)
296 			break;
297 		/* FALLTHROUGH */
298 
299 	case EXC_ISI|EXC_USER:
300 		if (!uvm_map_inentry(p, &p->p_spinentry, PROC_STACK(p),
301 		    "[%s]%d/%d sp=%lx inside %lx-%lx: not MAP_STACK\n",
302 		    uvm_map_inentry_sp, p->p_vmspace->vm_map.sserial))
303 			goto out;
304 
305 		map = &p->p_vmspace->vm_map;
306 		va = frame->srr0;
307 		access_type = PROT_EXEC;
308 		error = uvm_fault(map, trunc_page(va), 0, access_type);
309 		if (error == 0)
310 			uvm_grow(p, va);
311 
312 		if (error) {
313 #ifdef TRAP_DEBUG
314 			printf("type %x srr0 0x%lx %s\r\n",
315 			    type, frame->srr0, p->p_p->ps_comm);
316 			dumpframe(frame);
317 #endif
318 
319 			if (error == ENOMEM) {
320 				sig = SIGKILL;
321 				code = 0;
322 			} else if (error == EIO) {
323 				sig = SIGBUS;
324 				code = BUS_OBJERR;
325 			} else if (error == EACCES) {
326 				sig = SIGSEGV;
327 				code = SEGV_ACCERR;
328 			} else {
329 				sig = SIGSEGV;
330 				code = SEGV_MAPERR;
331 			}
332 			sv.sival_ptr = (void *)va;
333 			trapsignal(p, sig, 0, code, sv);
334 		}
335 		break;
336 
337 	case EXC_SC|EXC_USER:
338 		syscall(frame);
339 		return;
340 
341 	case EXC_AST|EXC_USER:
342 		p->p_md.md_astpending = 0;
343 		uvmexp.softs++;
344 		mi_ast(p, curcpu()->ci_want_resched);
345 		break;
346 
347 	case EXC_ALI|EXC_USER:
348 		sv.sival_ptr = (void *)frame->dar;
349 		trapsignal(p, SIGBUS, 0, BUS_ADRALN, sv);
350 		break;
351 
352 	case EXC_PGM|EXC_USER:
353 		sv.sival_ptr = (void *)frame->srr0;
354 		if (frame->srr1 & EXC_PGM_FPENABLED)
355 			trapsignal(p, SIGFPE, 0, fpu_sigcode(p), sv);
356 		else if (frame->srr1 & EXC_PGM_TRAP)
357 			trapsignal(p, SIGTRAP, 0, TRAP_BRKPT, sv);
358 		else
359 			trapsignal(p, SIGILL, 0, ILL_PRVOPC, sv);
360 		break;
361 
362 	case EXC_FPU|EXC_USER:
363 		if ((frame->srr1 & (PSL_FP|PSL_VEC|PSL_VSX)) == 0)
364 			restore_vsx(p);
365 		curpcb->pcb_flags |= PCB_FPU;
366 		frame->srr1 |= PSL_FPU;
367 		break;
368 
369 	case EXC_TRC|EXC_USER:
370 		sv.sival_ptr = (void *)frame->srr0;
371 		trapsignal(p, SIGTRAP, 0, TRAP_TRACE, sv);
372 		break;
373 
374 	case EXC_HEA|EXC_USER:
375 		sv.sival_ptr = (void *)frame->srr0;
376 		trapsignal(p, SIGILL, 0, ILL_ILLOPC, sv);
377 		break;
378 
379 	case EXC_VEC|EXC_USER:
380 		if ((frame->srr1 & (PSL_FP|PSL_VEC|PSL_VSX)) == 0)
381 			restore_vsx(p);
382 		curpcb->pcb_flags |= PCB_VEC;
383 		frame->srr1 |= PSL_VEC;
384 		break;
385 
386 	case EXC_VSX|EXC_USER:
387 		if ((frame->srr1 & (PSL_FP|PSL_VEC|PSL_VSX)) == 0)
388 			restore_vsx(p);
389 		curpcb->pcb_flags |= PCB_VSX;
390 		frame->srr1 |= PSL_VSX;
391 		break;
392 
393 	case EXC_FAC|EXC_USER:
394 		sv.sival_ptr = (void *)frame->srr0;
395 		trapsignal(p, SIGILL, 0, ILL_PRVOPC, sv);
396 		break;
397 
398 	default:
399 	fatal:
400 #ifdef DDB
401 		db_printf("trap type %x srr1 %lx at %lx lr %lx\n",
402 		    type, frame->srr1, frame->srr0, frame->lr);
403 		db_ktrap(0, frame);
404 #endif
405 		panic("trap type %x srr1 %lx at %lx lr %lx",
406 		    type, frame->srr1, frame->srr0, frame->lr);
407 	}
408 out:
409 	userret(p);
410 }
411 
412 #ifdef TRAP_DEBUG
413 
414 #include <machine/opal.h>
415 
416 void
dumpframe(struct trapframe * frame)417 dumpframe(struct trapframe *frame)
418 {
419 	int i;
420 
421 	for (i = 0; i < 32; i++)
422 		opal_printf("r%d 0x%lx\r\n", i, frame->fixreg[i]);
423 	opal_printf("ctr 0x%lx\r\n", frame->ctr);
424 	opal_printf("xer 0x%lx\r\n", frame->xer);
425 	opal_printf("cr 0x%lx\r\n", frame->cr);
426 	opal_printf("lr 0x%lx\r\n", frame->lr);
427 	opal_printf("srr0 0x%lx\r\n", frame->srr0);
428 	opal_printf("srr1 0x%lx\r\n", frame->srr1);
429 }
430 
431 #endif
432