1 /* $NetBSD: trap.c,v 1.51 2024/02/18 09:03:44 andvar Exp $ */
2
3 /*-
4 * Copyright (c) 2014, 2023 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33
34 __KERNEL_RCSID(1, "$NetBSD: trap.c,v 1.51 2024/02/18 09:03:44 andvar Exp $");
35
36 #include "opt_arm_intr_impl.h"
37 #include "opt_compat_netbsd32.h"
38 #include "opt_dtrace.h"
39
40 #include <sys/param.h>
41 #include <sys/kauth.h>
42 #include <sys/types.h>
43 #include <sys/atomic.h>
44 #include <sys/cpu.h>
45 #include <sys/evcnt.h>
46 #ifdef KGDB
47 #include <sys/kgdb.h>
48 #endif
49 #include <sys/proc.h>
50 #include <sys/systm.h>
51 #include <sys/signal.h>
52 #include <sys/signalvar.h>
53 #include <sys/siginfo.h>
54 #include <sys/xcall.h>
55
56 #ifdef ARM_INTR_IMPL
57 #include ARM_INTR_IMPL
58 #else
59 #error ARM_INTR_IMPL not defined
60 #endif
61
62 #ifndef ARM_IRQ_HANDLER
63 #error ARM_IRQ_HANDLER not defined
64 #endif
65
66 #include <arm/cpufunc.h>
67
68 #include <aarch64/userret.h>
69 #include <aarch64/frame.h>
70 #include <aarch64/machdep.h>
71 #include <aarch64/armreg.h>
72 #include <aarch64/locore.h>
73
74 #include <arm/cpufunc.h>
75
76 #ifdef KGDB
77 #include <machine/db_machdep.h>
78 #endif
79 #ifdef DDB
80 #include <ddb/db_output.h>
81 #include <machine/db_machdep.h>
82 #endif
83 #ifdef KDTRACE_HOOKS
84 #include <sys/dtrace_bsd.h>
85 #endif
86
87 #ifdef DDB
88 int sigill_debug = 0;
89 #endif
90
91 #ifdef KDTRACE_HOOKS
92 dtrace_doubletrap_func_t dtrace_doubletrap_func = NULL;
93 dtrace_trap_func_t dtrace_trap_func = NULL;
94 int (*dtrace_invop_jump_addr)(struct trapframe *);
95 #endif
96
97 enum emul_arm_result {
98 EMUL_ARM_SUCCESS = 0,
99 EMUL_ARM_UNKNOWN,
100 EMUL_ARM_FAULT,
101 };
102
103 const char * const trap_names[] = {
104 [ESR_EC_UNKNOWN] = "Unknown Reason (Illegal Instruction)",
105 [ESR_EC_SERROR] = "SError Interrupt",
106 [ESR_EC_WFX] = "WFI or WFE instruction execution",
107 [ESR_EC_ILL_STATE] = "Illegal Execution State",
108
109 [ESR_EC_BTE_A64] = "Branch Target Exception",
110
111 [ESR_EC_SYS_REG] = "MSR/MRS/SYS instruction",
112 [ESR_EC_SVC_A64] = "SVC Instruction Execution",
113 [ESR_EC_HVC_A64] = "HVC Instruction Execution",
114 [ESR_EC_SMC_A64] = "SMC Instruction Execution",
115
116 [ESR_EC_INSN_ABT_EL0] = "Instruction Abort (EL0)",
117 [ESR_EC_INSN_ABT_EL1] = "Instruction Abort (EL1)",
118 [ESR_EC_DATA_ABT_EL0] = "Data Abort (EL0)",
119 [ESR_EC_DATA_ABT_EL1] = "Data Abort (EL1)",
120
121 [ESR_EC_PC_ALIGNMENT] = "Misaligned PC",
122 [ESR_EC_SP_ALIGNMENT] = "Misaligned SP",
123
124 [ESR_EC_FP_ACCESS] = "Access to SIMD/FP Registers",
125 [ESR_EC_FP_TRAP_A64] = "FP Exception",
126
127 [ESR_EC_BRKPNT_EL0] = "Breakpoint Exception (EL0)",
128 [ESR_EC_BRKPNT_EL1] = "Breakpoint Exception (EL1)",
129 [ESR_EC_SW_STEP_EL0] = "Software Step (EL0)",
130 [ESR_EC_SW_STEP_EL1] = "Software Step (EL1)",
131 [ESR_EC_WTCHPNT_EL0] = "Watchpoint (EL0)",
132 [ESR_EC_WTCHPNT_EL1] = "Watchpoint (EL1)",
133 [ESR_EC_BKPT_INSN_A64] = "BKPT Instruction Execution",
134
135 [ESR_EC_CP15_RT] = "A32: MCR/MRC access to CP15",
136 [ESR_EC_CP15_RRT] = "A32: MCRR/MRRC access to CP15",
137 [ESR_EC_CP14_RT] = "A32: MCR/MRC access to CP14",
138 [ESR_EC_CP14_DT] = "A32: LDC/STC access to CP14",
139 [ESR_EC_CP14_RRT] = "A32: MRRC access to CP14",
140 [ESR_EC_SVC_A32] = "A32: SVC Instruction Execution",
141 [ESR_EC_HVC_A32] = "A32: HVC Instruction Execution",
142 [ESR_EC_SMC_A32] = "A32: SMC Instruction Execution",
143 [ESR_EC_FPID] = "A32: MCR/MRC access to CP10",
144 [ESR_EC_FP_TRAP_A32] = "A32: FP Exception",
145 [ESR_EC_BKPT_INSN_A32] = "A32: BKPT Instruction Execution",
146 [ESR_EC_VECTOR_CATCH] = "A32: Vector Catch Exception"
147 };
148
149 const char *
eclass_trapname(uint32_t eclass)150 eclass_trapname(uint32_t eclass)
151 {
152 static char trapnamebuf[sizeof("Unknown trap 0x????????")];
153
154 if (eclass >= __arraycount(trap_names) || trap_names[eclass] == NULL) {
155 snprintf(trapnamebuf, sizeof(trapnamebuf),
156 "Unknown trap %#02x", eclass);
157 return trapnamebuf;
158 }
159 return trap_names[eclass];
160 }
161
162 void
userret(struct lwp * l)163 userret(struct lwp *l)
164 {
165 mi_userret(l);
166 }
167
168 void
trap_doast(struct trapframe * tf)169 trap_doast(struct trapframe *tf)
170 {
171 struct lwp * const l = curlwp;
172
173 /*
174 * allow to have a chance of context switch just prior to user
175 * exception return.
176 */
177 #ifdef __HAVE_PREEMPTION
178 kpreempt_disable();
179 #endif
180 struct cpu_info * const ci = curcpu();
181
182 ci->ci_data.cpu_ntrap++;
183
184 KDASSERT(ci->ci_cpl == IPL_NONE);
185 #ifdef __HAVE_PREEMPTION
186 kpreempt_enable();
187 #endif
188
189 if (l->l_pflag & LP_OWEUPC) {
190 l->l_pflag &= ~LP_OWEUPC;
191 ADDUPROF(l);
192 }
193
194 userret(l);
195 }
196
197 void
trap_el1h_sync(struct trapframe * tf)198 trap_el1h_sync(struct trapframe *tf)
199 {
200 const uint32_t esr = tf->tf_esr;
201 const uint32_t eclass = __SHIFTOUT(esr, ESR_EC); /* exception class */
202
203 /* re-enable traps and interrupts */
204 if (!(tf->tf_spsr & SPSR_I))
205 daif_enable(DAIF_D|DAIF_A|DAIF_I|DAIF_F);
206 else
207 daif_enable(DAIF_D|DAIF_A);
208
209 #ifdef KDTRACE_HOOKS
210 if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, eclass))
211 return;
212 #endif
213
214 switch (eclass) {
215 case ESR_EC_INSN_ABT_EL1:
216 case ESR_EC_DATA_ABT_EL1:
217 data_abort_handler(tf, eclass);
218 break;
219
220 case ESR_EC_BKPT_INSN_A64:
221 #ifdef KDTRACE_HOOKS
222 if (__SHIFTOUT(esr, ESR_ISS) == 0x40d &&
223 dtrace_invop_jump_addr != 0) {
224 (*dtrace_invop_jump_addr)(tf);
225 break;
226 }
227 /* FALLTHROUGH */
228 #endif
229 case ESR_EC_BRKPNT_EL1:
230 case ESR_EC_SW_STEP_EL1:
231 case ESR_EC_WTCHPNT_EL1:
232 #ifdef DDB
233 if (eclass == ESR_EC_BRKPNT_EL1)
234 kdb_trap(DB_TRAP_BREAKPOINT, tf);
235 else if (eclass == ESR_EC_BKPT_INSN_A64)
236 kdb_trap(DB_TRAP_BKPT_INSN, tf);
237 else if (eclass == ESR_EC_WTCHPNT_EL1)
238 kdb_trap(DB_TRAP_WATCHPOINT, tf);
239 else if (eclass == ESR_EC_SW_STEP_EL1)
240 kdb_trap(DB_TRAP_SW_STEP, tf);
241 else
242 kdb_trap(DB_TRAP_UNKNOWN, tf);
243 #else
244 panic("No debugger in kernel");
245 #endif
246 break;
247
248 case ESR_EC_FP_ACCESS:
249 if ((curlwp->l_flag & (LW_SYSTEM|LW_SYSTEM_FPU)) ==
250 (LW_SYSTEM|LW_SYSTEM_FPU)) {
251 fpu_load(curlwp);
252 break;
253 }
254 /*FALLTHROUGH*/
255 case ESR_EC_FP_TRAP_A64:
256 case ESR_EC_PC_ALIGNMENT:
257 case ESR_EC_SP_ALIGNMENT:
258 case ESR_EC_ILL_STATE:
259 case ESR_EC_BTE_A64:
260 default:
261 panic("Trap: fatal %s: pc=%016" PRIx64 " sp=%016" PRIx64
262 " esr=%08x", eclass_trapname(eclass), tf->tf_pc, tf->tf_sp,
263 esr);
264 break;
265 }
266 }
267
268 /*
269 * There are some systems with different cache line sizes for each cpu.
270 * Userland programs can be preempted between CPUs at any time, so in such
271 * a system, the minimum cache line size must be visible to userland.
272 */
273 #define CTR_EL0_USR_MASK \
274 (CTR_EL0_DIC | CTR_EL0_IDC | CTR_EL0_DMIN_LINE | CTR_EL0_IMIN_LINE)
275 uint64_t ctr_el0_usr __read_mostly;
276
277 static void
configure_cpu_traps0(void * arg1,void * arg2)278 configure_cpu_traps0(void *arg1, void *arg2)
279 {
280 struct cpu_info * const ci = curcpu();
281 uint64_t sctlr;
282 uint64_t ctr_el0_raw = reg_ctr_el0_read();
283
284 #ifdef DEBUG_FORCE_TRAP_CTR_EL0
285 goto need_ctr_trap;
286 #endif
287
288 if ((__SHIFTOUT(ctr_el0_raw, CTR_EL0_DMIN_LINE) >
289 __SHIFTOUT(ctr_el0_usr, CTR_EL0_DMIN_LINE)) ||
290 (__SHIFTOUT(ctr_el0_raw, CTR_EL0_IMIN_LINE) >
291 __SHIFTOUT(ctr_el0_usr, CTR_EL0_IMIN_LINE)))
292 goto need_ctr_trap;
293
294 if ((__SHIFTOUT(ctr_el0_raw, CTR_EL0_DIC) == 1 &&
295 __SHIFTOUT(ctr_el0_usr, CTR_EL0_DIC) == 0) ||
296 (__SHIFTOUT(ctr_el0_raw, CTR_EL0_IDC) == 1 &&
297 __SHIFTOUT(ctr_el0_usr, CTR_EL0_IDC) == 0))
298 goto need_ctr_trap;
299
300 #if 0 /* XXX: To do or not to do */
301 /*
302 * IDC==0, but (LoC==0 || LoUIS==LoUU==0)?
303 * Would it be better to show IDC=1 to userland?
304 */
305 if (__SHIFTOUT(ctr_el0_raw, CTR_EL0_IDC) == 0 &&
306 __SHIFTOUT(ctr_el0_usr, CTR_EL0_IDC) == 1)
307 goto need_ctr_trap;
308 #endif
309
310 return;
311
312 need_ctr_trap:
313 evcnt_attach_dynamic(&ci->ci_uct_trap, EVCNT_TYPE_MISC, NULL,
314 ci->ci_cpuname, "ctr_el0 trap");
315
316 /* trap CTR_EL0 access from EL0 on this cpu */
317 sctlr = reg_sctlr_el1_read();
318 sctlr &= ~SCTLR_UCT;
319 reg_sctlr_el1_write(sctlr);
320 }
321
322 void
configure_cpu_traps(void)323 configure_cpu_traps(void)
324 {
325 CPU_INFO_ITERATOR cii;
326 struct cpu_info *ci;
327 uint64_t where;
328
329 /* remember minimum cache line size out of all CPUs */
330 for (CPU_INFO_FOREACH(cii, ci)) {
331 uint64_t ctr_el0_cpu = ci->ci_id.ac_ctr;
332 uint64_t clidr = ci->ci_id.ac_clidr;
333
334 if (__SHIFTOUT(clidr, CLIDR_LOC) == 0 ||
335 (__SHIFTOUT(clidr, CLIDR_LOUIS) == 0 &&
336 __SHIFTOUT(clidr, CLIDR_LOUU) == 0)) {
337 /* this means the same as IDC=1 */
338 ctr_el0_cpu |= CTR_EL0_IDC;
339 }
340
341 /*
342 * if DIC==1, there is no need to icache sync. however,
343 * to calculate the minimum cacheline, in this case
344 * ICacheLine is treated as the maximum.
345 */
346 if (__SHIFTOUT(ctr_el0_cpu, CTR_EL0_DIC) == 1)
347 ctr_el0_cpu |= CTR_EL0_IMIN_LINE;
348
349 /* Neoverse N1 erratum 1542419 */
350 if (CPU_ID_NEOVERSEN1_P(ci->ci_id.ac_midr) &&
351 __SHIFTOUT(ctr_el0_cpu, CTR_EL0_DIC) == 1)
352 ctr_el0_cpu &= ~CTR_EL0_DIC;
353
354 if (cii == 0) {
355 ctr_el0_usr = ctr_el0_cpu;
356 continue;
357 }
358
359 /* keep minimum cache line size, and worst DIC/IDC */
360 ctr_el0_usr &= (ctr_el0_cpu & CTR_EL0_DIC) | ~CTR_EL0_DIC;
361 ctr_el0_usr &= (ctr_el0_cpu & CTR_EL0_IDC) | ~CTR_EL0_IDC;
362 if (__SHIFTOUT(ctr_el0_cpu, CTR_EL0_DMIN_LINE) <
363 __SHIFTOUT(ctr_el0_usr, CTR_EL0_DMIN_LINE)) {
364 ctr_el0_usr &= ~CTR_EL0_DMIN_LINE;
365 ctr_el0_usr |= ctr_el0_cpu & CTR_EL0_DMIN_LINE;
366 }
367 if ((ctr_el0_cpu & CTR_EL0_DIC) == 0 &&
368 (__SHIFTOUT(ctr_el0_cpu, CTR_EL0_IMIN_LINE) <
369 __SHIFTOUT(ctr_el0_usr, CTR_EL0_IMIN_LINE))) {
370 ctr_el0_usr &= ~CTR_EL0_IMIN_LINE;
371 ctr_el0_usr |= ctr_el0_cpu & CTR_EL0_IMIN_LINE;
372 }
373 }
374
375 where = xc_broadcast(0, configure_cpu_traps0, NULL, NULL);
376 xc_wait(where);
377 }
378
379 static enum emul_arm_result
emul_aarch64_insn(struct trapframe * tf)380 emul_aarch64_insn(struct trapframe *tf)
381 {
382 uint32_t insn;
383
384 if (ufetch_32((uint32_t *)tf->tf_pc, &insn)) {
385 tf->tf_far = reg_far_el1_read();
386 return EMUL_ARM_FAULT;
387 }
388
389 LE32TOH(insn);
390 if ((insn & 0xffffffe0) == 0xd53b0020) {
391 /* mrs x?,ctr_el0 */
392 unsigned int Xt = insn & 31;
393 if (Xt != 31) { /* !xzr */
394 uint64_t ctr_el0 = reg_ctr_el0_read();
395 ctr_el0 &= ~CTR_EL0_USR_MASK;
396 ctr_el0 |= (ctr_el0_usr & CTR_EL0_USR_MASK);
397 tf->tf_reg[Xt] = ctr_el0;
398 }
399 curcpu()->ci_uct_trap.ev_count++;
400
401 } else {
402 return EMUL_ARM_UNKNOWN;
403 }
404
405 tf->tf_pc += 4;
406 return EMUL_ARM_SUCCESS;
407 }
408
409 void
trap_el0_sync(struct trapframe * tf)410 trap_el0_sync(struct trapframe *tf)
411 {
412 struct lwp * const l = curlwp;
413 const uint32_t esr = tf->tf_esr;
414 const uint32_t eclass = __SHIFTOUT(esr, ESR_EC); /* exception class */
415
416 #ifdef DDB
417 /* disable trace, and enable hardware breakpoint/watchpoint */
418 reg_mdscr_el1_write(
419 (reg_mdscr_el1_read() & ~MDSCR_SS) | MDSCR_KDE);
420 #else
421 /* disable trace */
422 reg_mdscr_el1_write(reg_mdscr_el1_read() & ~MDSCR_SS);
423 #endif
424 /* enable traps and interrupts */
425 daif_enable(DAIF_D|DAIF_A|DAIF_I|DAIF_F);
426
427 switch (eclass) {
428 case ESR_EC_INSN_ABT_EL0:
429 case ESR_EC_DATA_ABT_EL0:
430 data_abort_handler(tf, eclass);
431 userret(l);
432 break;
433
434 case ESR_EC_SVC_A64:
435 (*l->l_proc->p_md.md_syscall)(tf);
436 break;
437 case ESR_EC_FP_ACCESS:
438 fpu_load(l);
439 userret(l);
440 break;
441 case ESR_EC_FP_TRAP_A64:
442 do_trapsignal(l, SIGFPE, FPE_FLTUND, NULL, esr); /* XXX */
443 userret(l);
444 break;
445
446 case ESR_EC_PC_ALIGNMENT:
447 do_trapsignal(l, SIGBUS, BUS_ADRALN, (void *)tf->tf_pc, esr);
448 userret(l);
449 break;
450 case ESR_EC_SP_ALIGNMENT:
451 do_trapsignal(l, SIGBUS, BUS_ADRALN, (void *)tf->tf_sp, esr);
452 userret(l);
453 break;
454
455 case ESR_EC_BKPT_INSN_A64:
456 case ESR_EC_BRKPNT_EL0:
457 case ESR_EC_WTCHPNT_EL0:
458 do_trapsignal(l, SIGTRAP, TRAP_BRKPT, (void *)tf->tf_pc, esr);
459 userret(l);
460 break;
461 case ESR_EC_SW_STEP_EL0:
462 /* disable trace, and send trace trap */
463 tf->tf_spsr &= ~SPSR_SS;
464 do_trapsignal(l, SIGTRAP, TRAP_TRACE, (void *)tf->tf_pc, esr);
465 userret(l);
466 break;
467
468 case ESR_EC_SYS_REG:
469 switch (emul_aarch64_insn(tf)) {
470 case EMUL_ARM_SUCCESS:
471 break;
472 case EMUL_ARM_UNKNOWN:
473 goto unknown;
474 case EMUL_ARM_FAULT:
475 do_trapsignal(l, SIGSEGV, SEGV_MAPERR,
476 (void *)tf->tf_far, esr);
477 break;
478 }
479 userret(l);
480 break;
481
482 default:
483 case ESR_EC_UNKNOWN:
484 unknown:
485 #ifdef DDB
486 if (sigill_debug) {
487 /* show illegal instruction */
488 printf("TRAP: pid %d (%s), uid %d: %s:"
489 " esr=0x%lx: pc=0x%lx: %s\n",
490 curlwp->l_proc->p_pid, curlwp->l_proc->p_comm,
491 l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1,
492 eclass_trapname(eclass), tf->tf_esr, tf->tf_pc,
493 strdisasm(tf->tf_pc, tf->tf_spsr));
494 }
495 #endif
496 /* illegal or not implemented instruction */
497 do_trapsignal(l, SIGILL, ILL_ILLTRP, (void *)tf->tf_pc, esr);
498 userret(l);
499 break;
500 }
501 }
502
503 void
cpu_irq(struct trapframe * tf)504 cpu_irq(struct trapframe *tf)
505 {
506 struct cpu_info * const ci = curcpu();
507
508 #ifdef STACKCHECKS
509 struct lwp *l = curlwp;
510 void *sp = (void *)reg_sp_read();
511 if (l->l_addr >= sp) {
512 panic("lwp/interrupt stack overflow detected."
513 " lwp=%p, sp=%p, l_addr=%p", l, sp, l->l_addr);
514 }
515 #endif
516
517 #ifdef DDB
518 /* disable trace, and enable hardware breakpoint/watchpoint */
519 reg_mdscr_el1_write(
520 (reg_mdscr_el1_read() & ~MDSCR_SS) | MDSCR_KDE);
521 #else
522 /* disable trace */
523 reg_mdscr_el1_write(reg_mdscr_el1_read() & ~MDSCR_SS);
524 #endif
525
526 /*
527 * Prevent preemption once we enable traps, until we have
528 * finished running hard and soft interrupt handlers. This
529 * guarantees ci = curcpu() remains stable and we don't
530 * accidentally try to run its pending soft interrupts on
531 * another CPU.
532 */
533 kpreempt_disable();
534
535 /* enable traps */
536 daif_enable(DAIF_D|DAIF_A);
537
538 /* run hard interrupt handlers */
539 ci->ci_intr_depth++;
540 ARM_IRQ_HANDLER(tf);
541 ci->ci_intr_depth--;
542
543 /* run soft interrupt handlers */
544 cpu_dosoftints();
545
546 /* all done, preempt as you please */
547 kpreempt_enable();
548 }
549
550 void
cpu_fiq(struct trapframe * tf)551 cpu_fiq(struct trapframe *tf)
552 {
553 struct cpu_info * const ci = curcpu();
554
555 #ifdef STACKCHECKS
556 struct lwp *l = curlwp;
557 void *sp = (void *)reg_sp_read();
558 if (l->l_addr >= sp) {
559 panic("lwp/interrupt stack overflow detected."
560 " lwp=%p, sp=%p, l_addr=%p", l, sp, l->l_addr);
561 }
562 #endif
563
564 /* disable trace */
565 reg_mdscr_el1_write(reg_mdscr_el1_read() & ~MDSCR_SS);
566
567 /*
568 * Prevent preemption once we enable traps, until we have
569 * finished running hard and soft interrupt handlers. This
570 * guarantees ci = curcpu() remains stable and we don't
571 * accidentally try to run its pending soft interrupts on
572 * another CPU.
573 */
574 kpreempt_disable();
575
576 /* enable traps */
577 daif_enable(DAIF_D|DAIF_A);
578
579 /* run hard interrupt handlers */
580 ci->ci_intr_depth++;
581 ARM_FIQ_HANDLER(tf);
582 ci->ci_intr_depth--;
583
584 /* run soft interrupt handlers */
585 cpu_dosoftints();
586
587 /* all done, preempt as you please */
588 kpreempt_enable();
589 }
590
591 #ifdef COMPAT_NETBSD32
592
593 /*
594 * 32-bit length Thumb instruction. See ARMv7 DDI0406A A6.3.
595 */
596 #define THUMB_32BIT(hi) (((hi) & 0xe000) == 0xe000 && ((hi) & 0x1800))
597
598 int
fetch_arm_insn(uint64_t pc,uint64_t spsr,uint32_t * insn)599 fetch_arm_insn(uint64_t pc, uint64_t spsr, uint32_t *insn)
600 {
601
602 /*
603 * Instructions are stored in little endian for BE8,
604 * only a valid binary format for ILP32EB. Therefore,
605 * we need byte-swapping before decoding on aarch64eb.
606 */
607
608 /* THUMB? */
609 if (spsr & SPSR_A32_T) {
610 uint16_t *p = (uint16_t *)(pc & ~1UL); /* XXX */
611 uint16_t hi, lo;
612
613 if (ufetch_16(p, &hi))
614 return -1;
615 LE16TOH(hi);
616
617 if (!THUMB_32BIT(hi)) {
618 /* 16-bit Thumb instruction */
619 *insn = hi;
620 return 2;
621 }
622
623 /* 32-bit Thumb instruction */
624 if (ufetch_16(p + 1, &lo))
625 return -1;
626 LE16TOH(lo);
627
628 *insn = ((uint32_t)hi << 16) | lo;
629 return 4;
630 }
631
632 if (ufetch_32((uint32_t *)pc, insn))
633 return -1;
634 LE32TOH(*insn);
635
636 return 4;
637 }
638
639 static bool
arm_cond_match(uint32_t insn,uint64_t spsr)640 arm_cond_match(uint32_t insn, uint64_t spsr)
641 {
642 bool invert = (insn >> 28) & 1;
643 bool match;
644
645 switch (insn >> 29) {
646 case 0: /* EQ or NE */
647 match = spsr & SPSR_Z;
648 break;
649 case 1: /* CS/HI or CC/LO */
650 match = spsr & SPSR_C;
651 break;
652 case 2: /* MI or PL */
653 match = spsr & SPSR_N;
654 break;
655 case 3: /* VS or VC */
656 match = spsr & SPSR_V;
657 break;
658 case 4: /* HI or LS */
659 match = ((spsr & (SPSR_C | SPSR_Z)) == SPSR_C);
660 break;
661 case 5: /* GE or LT */
662 match = (!(spsr & SPSR_N) == !(spsr & SPSR_V));
663 break;
664 case 6: /* GT or LE */
665 match = !(spsr & SPSR_Z) &&
666 (!(spsr & SPSR_N) == !(spsr & SPSR_V));
667 break;
668 case 7: /* AL */
669 match = true;
670 break;
671 }
672 return (!match != !invert);
673 }
674
675 uint8_t atomic_swap_8(volatile uint8_t *, uint8_t);
676
677 static int
emul_arm_swp(uint32_t insn,struct trapframe * tf)678 emul_arm_swp(uint32_t insn, struct trapframe *tf)
679 {
680 struct faultbuf fb;
681 vaddr_t vaddr;
682 uint32_t val;
683 int Rn, Rd, Rm, error;
684
685 Rn = __SHIFTOUT(insn, 0x000f0000);
686 Rd = __SHIFTOUT(insn, 0x0000f000);
687 Rm = __SHIFTOUT(insn, 0x0000000f);
688
689 vaddr = tf->tf_reg[Rn] & 0xffffffff;
690 val = tf->tf_reg[Rm];
691
692 /* fault if insn is swp, and unaligned access */
693 if ((insn & 0x00400000) == 0 && (vaddr & 3) != 0) {
694 tf->tf_far = vaddr;
695 return EFAULT;
696 }
697
698 /* vaddr will always point to userspace, since it has only 32bit */
699 if ((error = cpu_set_onfault(&fb)) == 0) {
700 if (aarch64_pan_enabled)
701 reg_pan_write(0); /* disable PAN */
702 if (insn & 0x00400000) {
703 /* swpb */
704 val = atomic_swap_8((uint8_t *)vaddr, val);
705 } else {
706 /* swp */
707 val = atomic_swap_32((uint32_t *)vaddr, val);
708 }
709 cpu_unset_onfault();
710 tf->tf_reg[Rd] = val;
711 } else {
712 tf->tf_far = reg_far_el1_read();
713 }
714 if (aarch64_pan_enabled)
715 reg_pan_write(1); /* enable PAN */
716 return error;
717 }
718
719 static enum emul_arm_result
emul_thumb_insn(struct trapframe * tf,uint32_t insn,int insn_size)720 emul_thumb_insn(struct trapframe *tf, uint32_t insn, int insn_size)
721 {
722 /* T32-16bit or 32bit instructions */
723 switch (insn_size) {
724 case 2:
725 /* Breakpoint used by GDB */
726 if (insn == 0xdefe) {
727 do_trapsignal(curlwp, SIGTRAP, TRAP_BRKPT,
728 (void *)tf->tf_pc, 0);
729 return EMUL_ARM_SUCCESS;
730 }
731 /* XXX: some T32 IT instruction deprecated should be emulated */
732 break;
733 case 4:
734 break;
735 default:
736 return EMUL_ARM_FAULT;
737 }
738 return EMUL_ARM_UNKNOWN;
739 }
740
741 static enum emul_arm_result
emul_arm_insn(struct trapframe * tf)742 emul_arm_insn(struct trapframe *tf)
743 {
744 uint32_t insn;
745 int insn_size;
746
747 insn_size = fetch_arm_insn(tf->tf_pc, tf->tf_spsr, &insn);
748 tf->tf_far = reg_far_el1_read();
749
750 if (tf->tf_spsr & SPSR_A32_T)
751 return emul_thumb_insn(tf, insn, insn_size);
752 if (insn_size != 4)
753 return EMUL_ARM_FAULT;
754
755 /* Breakpoint used by GDB */
756 if (insn == 0xe6000011 || insn == 0xe7ffdefe) {
757 do_trapsignal(curlwp, SIGTRAP, TRAP_BRKPT,
758 (void *)tf->tf_pc, 0);
759 return EMUL_ARM_SUCCESS;
760 }
761
762 /* Unconditional instruction extension space? */
763 if ((insn & 0xf0000000) == 0xf0000000)
764 goto unknown_insn;
765
766 /* swp,swpb */
767 if ((insn & 0x0fb00ff0) == 0x01000090) {
768 if (arm_cond_match(insn, tf->tf_spsr)) {
769 if (emul_arm_swp(insn, tf) != 0)
770 return EMUL_ARM_FAULT;
771 }
772 goto emulated;
773 }
774
775 /*
776 * Emulate ARMv6 instructions with cache operations
777 * register (c7), that can be used in user mode.
778 */
779 switch (insn & 0x0fff0fff) {
780 case 0x0e070f95:
781 if (arm_cond_match(insn, tf->tf_spsr)) {
782 /*
783 * mcr p15, 0, <Rd>, c7, c5, 4
784 * (flush prefetch buffer)
785 */
786 isb();
787 }
788 goto emulated;
789 case 0x0e070f9a:
790 if (arm_cond_match(insn, tf->tf_spsr)) {
791 /*
792 * mcr p15, 0, <Rd>, c7, c10, 4
793 * (data synchronization barrier)
794 */
795 dsb(sy);
796 }
797 goto emulated;
798 case 0x0e070fba:
799 if (arm_cond_match(insn, tf->tf_spsr)) {
800 /*
801 * mcr p15, 0, <Rd>, c7, c10, 5
802 * (data memory barrier)
803 */
804 dmb(sy);
805 }
806 goto emulated;
807 default:
808 break;
809 }
810
811 unknown_insn:
812 /* unknown, or unsupported instruction */
813 return EMUL_ARM_UNKNOWN;
814
815 emulated:
816 tf->tf_pc += insn_size;
817 return EMUL_ARM_SUCCESS;
818 }
819 #endif /* COMPAT_NETBSD32 */
820
821 void
trap_el0_32sync(struct trapframe * tf)822 trap_el0_32sync(struct trapframe *tf)
823 {
824 struct lwp * const l = curlwp;
825 const uint32_t esr = tf->tf_esr;
826 const uint32_t eclass = __SHIFTOUT(esr, ESR_EC); /* exception class */
827
828 #ifdef DDB
829 /* disable trace, and enable hardware breakpoint/watchpoint */
830 reg_mdscr_el1_write(
831 (reg_mdscr_el1_read() & ~MDSCR_SS) | MDSCR_KDE);
832 #else
833 /* disable trace */
834 reg_mdscr_el1_write(reg_mdscr_el1_read() & ~MDSCR_SS);
835 #endif
836 /* enable traps and interrupts */
837 daif_enable(DAIF_D|DAIF_A|DAIF_I|DAIF_F);
838
839 switch (eclass) {
840 #ifdef COMPAT_NETBSD32
841 case ESR_EC_INSN_ABT_EL0:
842 case ESR_EC_DATA_ABT_EL0:
843 data_abort_handler(tf, eclass);
844 userret(l);
845 break;
846
847 case ESR_EC_SVC_A32:
848 (*l->l_proc->p_md.md_syscall)(tf);
849 break;
850
851 case ESR_EC_FP_ACCESS:
852 fpu_load(l);
853 userret(l);
854 break;
855
856 case ESR_EC_FP_TRAP_A32:
857 do_trapsignal(l, SIGFPE, FPE_FLTUND, NULL, esr); /* XXX */
858 userret(l);
859 break;
860
861 case ESR_EC_PC_ALIGNMENT:
862 do_trapsignal(l, SIGBUS, BUS_ADRALN, (void *)tf->tf_pc, esr);
863 userret(l);
864 break;
865
866 case ESR_EC_SP_ALIGNMENT:
867 do_trapsignal(l, SIGBUS, BUS_ADRALN,
868 (void *)tf->tf_reg[13], esr); /* sp is r13 on AArch32 */
869 userret(l);
870 break;
871
872 case ESR_EC_BKPT_INSN_A32:
873 do_trapsignal(l, SIGTRAP, TRAP_BRKPT, (void *)tf->tf_pc, esr);
874 userret(l);
875 break;
876
877 case ESR_EC_UNKNOWN:
878 switch (emul_arm_insn(tf)) {
879 case EMUL_ARM_SUCCESS:
880 break;
881 case EMUL_ARM_UNKNOWN:
882 goto unknown;
883 case EMUL_ARM_FAULT:
884 do_trapsignal(l, SIGSEGV, SEGV_MAPERR,
885 (void *)tf->tf_far, esr);
886 break;
887 }
888 userret(l);
889 break;
890
891 case ESR_EC_CP15_RT:
892 case ESR_EC_CP15_RRT:
893 case ESR_EC_CP14_RT:
894 case ESR_EC_CP14_DT:
895 case ESR_EC_CP14_RRT:
896 unknown:
897 #endif /* COMPAT_NETBSD32 */
898 default:
899 #ifdef DDB
900 if (sigill_debug) {
901 /* show illegal instruction */
902 printf("TRAP: pid %d (%s), uid %d: %s:"
903 " esr=0x%lx: pc=0x%lx: %s\n",
904 curlwp->l_proc->p_pid, curlwp->l_proc->p_comm,
905 l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1,
906 eclass_trapname(eclass), tf->tf_esr, tf->tf_pc,
907 strdisasm(tf->tf_pc, tf->tf_spsr));
908 }
909 #endif
910 /* illegal or not implemented instruction */
911 do_trapsignal(l, SIGILL, ILL_ILLTRP, (void *)tf->tf_pc, esr);
912 userret(l);
913 break;
914 }
915 }
916
917 void
trap_el1h_error(struct trapframe * tf)918 trap_el1h_error(struct trapframe *tf)
919 {
920 /*
921 * Normally, we should panic unconditionally,
922 * but SError interrupt may occur when accessing to unmapped(?) I/O
923 * spaces. bus_space_{peek,poke}_{1,2,4,8}() should trap these case.
924 */
925 struct faultbuf *fb;
926
927 if (curcpu()->ci_intr_depth == 0) {
928 fb = cpu_disable_onfault();
929 if (fb != NULL) {
930 cpu_jump_onfault(tf, fb, EFAULT);
931 return;
932 }
933 }
934 panic("%s", __func__);
935 }
936
937 #define bad_trap_panic(trapfunc) \
938 void \
939 trapfunc(struct trapframe *tf) \
940 { \
941 panic("%s", __func__); \
942 }
943 bad_trap_panic(trap_el1t_sync)
bad_trap_panic(trap_el1t_irq)944 bad_trap_panic(trap_el1t_irq)
945 bad_trap_panic(trap_el1t_fiq)
946 bad_trap_panic(trap_el1t_error)
947 bad_trap_panic(trap_el1h_fiq)
948 bad_trap_panic(trap_el0_fiq)
949 bad_trap_panic(trap_el0_error)
950 bad_trap_panic(trap_el0_32fiq)
951 bad_trap_panic(trap_el0_32error)
952
953 void
954 cpu_jump_onfault(struct trapframe *tf, const struct faultbuf *fb, int val)
955 {
956 tf->tf_reg[19] = fb->fb_reg[FB_X19];
957 tf->tf_reg[20] = fb->fb_reg[FB_X20];
958 tf->tf_reg[21] = fb->fb_reg[FB_X21];
959 tf->tf_reg[22] = fb->fb_reg[FB_X22];
960 tf->tf_reg[23] = fb->fb_reg[FB_X23];
961 tf->tf_reg[24] = fb->fb_reg[FB_X24];
962 tf->tf_reg[25] = fb->fb_reg[FB_X25];
963 tf->tf_reg[26] = fb->fb_reg[FB_X26];
964 tf->tf_reg[27] = fb->fb_reg[FB_X27];
965 tf->tf_reg[28] = fb->fb_reg[FB_X28];
966 tf->tf_reg[29] = fb->fb_reg[FB_X29];
967 tf->tf_sp = fb->fb_reg[FB_SP];
968 tf->tf_pc = fb->fb_reg[FB_LR];
969 tf->tf_reg[0] = val;
970 }
971
972 #ifdef TRAP_SIGDEBUG
973 static void
frame_dump(const struct trapframe * tf)974 frame_dump(const struct trapframe *tf)
975 {
976 const struct reg *r = &tf->tf_regs;
977
978 printf("trapframe %p\n", tf);
979 for (size_t i = 0; i < __arraycount(r->r_reg); i++) {
980 printf(" r%.2zu %#018" PRIx64 "%c", i, r->r_reg[i],
981 " \n"[i && (i & 1) == 0]);
982 }
983
984 printf("\n");
985 printf(" sp %#018" PRIx64 " pc %#018" PRIx64 "\n",
986 r->r_sp, r->r_pc);
987 printf(" spsr %#018" PRIx64 " tpidr %#018" PRIx64 "\n",
988 r->r_spsr, r->r_tpidr);
989 printf(" esr %#018" PRIx64 " far %#018" PRIx64 "\n",
990 tf->tf_esr, tf->tf_far);
991
992 printf("\n");
993 hexdump(printf, "Stack dump", tf, 256);
994 }
995
996 static void
sigdebug(const struct trapframe * tf,const ksiginfo_t * ksi)997 sigdebug(const struct trapframe *tf, const ksiginfo_t *ksi)
998 {
999 struct lwp *l = curlwp;
1000 struct proc *p = l->l_proc;
1001 const uint32_t eclass = __SHIFTOUT(ksi->ksi_trap, ESR_EC);
1002
1003 printf("pid %d.%d (%s): signal %d (trap %#x) "
1004 "@pc %#" PRIx64 ", addr %p, error=%s\n",
1005 p->p_pid, l->l_lid, p->p_comm, ksi->ksi_signo, ksi->ksi_trap,
1006 tf->tf_regs.r_pc, ksi->ksi_addr, eclass_trapname(eclass));
1007 frame_dump(tf);
1008 }
1009 #endif
1010
1011 void
do_trapsignal1(const char * func,size_t line,struct trapframe * tf,struct lwp * l,int signo,int code,void * addr,int trap)1012 do_trapsignal1(
1013 #ifdef TRAP_SIGDEBUG
1014 const char *func,
1015 size_t line,
1016 struct trapframe *tf,
1017 #endif
1018 struct lwp *l, int signo, int code, void *addr, int trap)
1019 {
1020 ksiginfo_t ksi;
1021
1022 KSI_INIT_TRAP(&ksi);
1023 ksi.ksi_signo = signo;
1024 ksi.ksi_code = code;
1025 ksi.ksi_addr = addr;
1026 ksi.ksi_trap = trap;
1027 #ifdef TRAP_SIGDEBUG
1028 printf("%s, %zu: ", func, line);
1029 sigdebug(tf, &ksi);
1030 #endif
1031 (*l->l_proc->p_emul->e_trapsignal)(l, &ksi);
1032 }
1033
1034 bool
cpu_intr_p(void)1035 cpu_intr_p(void)
1036 {
1037 int idepth;
1038 long pctr;
1039 lwp_t *l;
1040
1041 #ifdef __HAVE_PIC_FAST_SOFTINTS
1042 /* XXX Copied from cpu.h. Looks incomplete - needs fixing. */
1043 if (ci->ci_cpl < IPL_VM)
1044 return false;
1045 #endif
1046
1047 l = curlwp;
1048 if (__predict_false(l->l_cpu == NULL)) {
1049 KASSERT(l == &lwp0);
1050 return false;
1051 }
1052 do {
1053 pctr = lwp_pctr();
1054 idepth = l->l_cpu->ci_intr_depth;
1055 } while (__predict_false(pctr != lwp_pctr()));
1056
1057 return idepth > 0;
1058 }
1059