1 /* $NetBSD: fault.c,v 1.103 2015/03/02 13:36:36 martin Exp $ */ 2 3 /* 4 * Copyright 2003 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Steve C. Woodford for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 /* 38 * Copyright (c) 1994-1997 Mark Brinicombe. 39 * Copyright (c) 1994 Brini. 40 * All rights reserved. 41 * 42 * This code is derived from software written for Brini by Mark Brinicombe 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 3. All advertising materials mentioning features or use of this software 53 * must display the following acknowledgement: 54 * This product includes software developed by Brini. 55 * 4. The name of the company nor the name of the author may be used to 56 * endorse or promote products derived from this software without specific 57 * prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED 60 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 61 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 62 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 63 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 64 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 65 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * RiscBSD kernel project 72 * 73 * fault.c 74 * 75 * Fault handlers 76 * 77 * Created : 28/11/94 78 */ 79 80 #include "opt_ddb.h" 81 #include "opt_kgdb.h" 82 83 #include <sys/types.h> 84 __KERNEL_RCSID(0, "$NetBSD: fault.c,v 1.103 2015/03/02 13:36:36 martin Exp $"); 85 86 #include <sys/param.h> 87 #include <sys/systm.h> 88 #include <sys/proc.h> 89 #include <sys/kernel.h> 90 #include <sys/kauth.h> 91 #include <sys/cpu.h> 92 #include <sys/intr.h> 93 94 #include <uvm/uvm_extern.h> 95 #include <uvm/uvm_stat.h> 96 #ifdef UVMHIST 97 #include <uvm/uvm.h> 98 #endif 99 100 #include <arm/locore.h> 101 102 #include <machine/pcb.h> 103 #if defined(DDB) || defined(KGDB) 104 #include <machine/db_machdep.h> 105 #ifdef KGDB 106 #include <sys/kgdb.h> 107 #endif 108 #if !defined(DDB) 109 #define kdb_trap kgdb_trap 110 #endif 111 #endif 112 113 #include <arch/arm/arm/disassem.h> 114 #include <arm/arm32/machdep.h> 115 116 extern char fusubailout[]; 117 118 #ifdef DEBUG 119 int last_fault_code; /* For the benefit of pmap_fault_fixup() */ 120 #endif 121 122 #if defined(CPU_ARM3) || defined(CPU_ARM6) || \ 123 defined(CPU_ARM7) || defined(CPU_ARM7TDMI) 124 /* These CPUs may need data/prefetch abort fixups */ 125 #define CPU_ABORT_FIXUP_REQUIRED 126 #endif 127 128 struct data_abort { 129 int (*func)(trapframe_t *, u_int, u_int, struct lwp *, ksiginfo_t *); 130 const char *desc; 131 }; 132 133 static int dab_fatal(trapframe_t *, u_int, u_int, struct lwp *, ksiginfo_t *); 134 static int dab_align(trapframe_t *, u_int, u_int, struct lwp *, ksiginfo_t *); 135 static int dab_buserr(trapframe_t *, u_int, u_int, struct lwp *, ksiginfo_t *); 136 137 static const struct data_abort data_aborts[] = { 138 {dab_fatal, "Vector Exception"}, 139 {dab_align, "Alignment Fault 1"}, 140 {dab_fatal, "Terminal Exception"}, 141 {dab_align, "Alignment Fault 3"}, 142 {dab_buserr, "External Linefetch Abort (S)"}, 143 {NULL, "Translation Fault (S)"}, 144 {dab_buserr, "External Linefetch Abort (P)"}, 145 {NULL, "Translation Fault (P)"}, 146 {dab_buserr, "External Non-Linefetch Abort (S)"}, 147 {NULL, "Domain Fault (S)"}, 148 {dab_buserr, "External Non-Linefetch Abort (P)"}, 149 {NULL, "Domain Fault (P)"}, 150 {dab_buserr, "External Translation Abort (L1)"}, 151 {NULL, "Permission Fault (S)"}, 152 {dab_buserr, "External Translation Abort (L2)"}, 153 {NULL, "Permission Fault (P)"} 154 }; 155 156 /* Determine if 'x' is a permission fault */ 157 #define IS_PERMISSION_FAULT(x) \ 158 (((1 << ((x) & FAULT_TYPE_MASK)) & \ 159 ((1 << FAULT_PERM_P) | (1 << FAULT_PERM_S))) != 0) 160 161 #if 0 162 /* maybe one day we'll do emulations */ 163 #define TRAPSIGNAL(l,k) (*(l)->l_proc->p_emul->e_trapsignal)((l), (k)) 164 #else 165 #define TRAPSIGNAL(l,k) trapsignal((l), (k)) 166 #endif 167 168 static inline void 169 call_trapsignal(struct lwp *l, const struct trapframe *tf, ksiginfo_t *ksi) 170 { 171 if (l->l_proc->p_pid == 1 || cpu_printfataltraps) { 172 printf("%d.%d(%s): trap: signo=%d code=%d addr=%p trap=%#x\n", 173 l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, 174 ksi->ksi_signo, ksi->ksi_code, ksi->ksi_addr, 175 ksi->ksi_trap); 176 printf("r0=%08x r1=%08x r2=%08x r3=%08x\n", 177 tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3); 178 printf("r4=%08x r5=%08x r6=%08x r7=%08x\n", 179 tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7); 180 printf("r8=%08x r9=%08x rA=%08x rB=%08x\n", 181 tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11); 182 printf("ip=%08x sp=%08x lr=%08x pc=%08x spsr=%08x\n", 183 tf->tf_r12, tf->tf_usr_sp, tf->tf_usr_lr, tf->tf_pc, 184 tf->tf_spsr); 185 } 186 187 TRAPSIGNAL(l, ksi); 188 } 189 190 static inline int 191 data_abort_fixup(trapframe_t *tf, u_int fsr, u_int far, struct lwp *l) 192 { 193 #ifdef CPU_ABORT_FIXUP_REQUIRED 194 int error; 195 196 /* Call the CPU specific data abort fixup routine */ 197 error = cpu_dataabt_fixup(tf); 198 if (__predict_true(error != ABORT_FIXUP_FAILED)) 199 return (error); 200 201 /* 202 * Oops, couldn't fix up the instruction 203 */ 204 printf("%s: fixup for %s mode data abort failed.\n", __func__, 205 TRAP_USERMODE(tf) ? "user" : "kernel"); 206 #ifdef THUMB_CODE 207 if (tf->tf_spsr & PSR_T_bit) { 208 printf("pc = 0x%08x, opcode 0x%04x, 0x%04x, insn = ", 209 tf->tf_pc, *((uint16 *)(tf->tf_pc & ~1)), 210 *((uint16 *)((tf->tf_pc + 2) & ~1))); 211 } 212 else 213 #endif 214 { 215 printf("pc = 0x%08x, opcode 0x%08x, insn = ", tf->tf_pc, 216 *((u_int *)tf->tf_pc)); 217 } 218 disassemble(tf->tf_pc); 219 220 /* Die now if this happened in kernel mode */ 221 if (!TRAP_USERMODE(tf)) 222 dab_fatal(tf, fsr, far, l, NULL); 223 224 return (error); 225 #else 226 return (ABORT_FIXUP_OK); 227 #endif /* CPU_ABORT_FIXUP_REQUIRED */ 228 } 229 230 void 231 data_abort_handler(trapframe_t *tf) 232 { 233 struct vm_map *map; 234 struct lwp * const l = curlwp; 235 struct cpu_info * const ci = curcpu(); 236 u_int far, fsr; 237 vm_prot_t ftype; 238 void *onfault; 239 vaddr_t va; 240 int error; 241 ksiginfo_t ksi; 242 243 UVMHIST_FUNC(__func__); 244 UVMHIST_CALLED(maphist); 245 246 /* Grab FAR/FSR before enabling interrupts */ 247 far = cpu_faultaddress(); 248 fsr = cpu_faultstatus(); 249 250 /* Update vmmeter statistics */ 251 ci->ci_data.cpu_ntrap++; 252 253 /* Re-enable interrupts if they were enabled previously */ 254 KASSERT(!TRAP_USERMODE(tf) || VALID_R15_PSR(tf->tf_pc, tf->tf_spsr)); 255 #ifdef __NO_FIQ 256 if (__predict_true((tf->tf_spsr & I32_bit) != I32_bit)) 257 restore_interrupts(tf->tf_spsr & IF32_bits); 258 #else 259 if (__predict_true((tf->tf_spsr & IF32_bits) != IF32_bits)) 260 restore_interrupts(tf->tf_spsr & IF32_bits); 261 #endif 262 263 /* Get the current lwp structure */ 264 265 UVMHIST_LOG(maphist, " (l=%#x, far=%#x, fsr=%#x", 266 l, far, fsr, 0); 267 UVMHIST_LOG(maphist, " tf=%#x, pc=%#x)", 268 tf, tf->tf_pc, 0, 0); 269 270 /* Data abort came from user mode? */ 271 bool user = (TRAP_USERMODE(tf) != 0); 272 if (user) 273 LWP_CACHE_CREDS(l, l->l_proc); 274 275 /* Grab the current pcb */ 276 struct pcb * const pcb = lwp_getpcb(l); 277 278 curcpu()->ci_abt_evs[fsr & FAULT_TYPE_MASK].ev_count++; 279 280 /* Invoke the appropriate handler, if necessary */ 281 if (__predict_false(data_aborts[fsr & FAULT_TYPE_MASK].func != NULL)) { 282 #ifdef DIAGNOSTIC 283 printf("%s: data_aborts fsr=0x%x far=0x%x\n", 284 __func__, fsr, far); 285 #endif 286 if ((data_aborts[fsr & FAULT_TYPE_MASK].func)(tf, fsr, far, 287 l, &ksi)) 288 goto do_trapsignal; 289 goto out; 290 } 291 292 /* 293 * At this point, we're dealing with one of the following data aborts: 294 * 295 * FAULT_TRANS_S - Translation -- Section 296 * FAULT_TRANS_P - Translation -- Page 297 * FAULT_DOMAIN_S - Domain -- Section 298 * FAULT_DOMAIN_P - Domain -- Page 299 * FAULT_PERM_S - Permission -- Section 300 * FAULT_PERM_P - Permission -- Page 301 * 302 * These are the main virtual memory-related faults signalled by 303 * the MMU. 304 */ 305 306 /* fusubailout is used by [fs]uswintr to avoid page faulting */ 307 if (__predict_false(pcb->pcb_onfault == fusubailout)) { 308 tf->tf_r0 = EFAULT; 309 tf->tf_pc = (intptr_t) pcb->pcb_onfault; 310 return; 311 } 312 313 if (user) { 314 lwp_settrapframe(l, tf); 315 } 316 317 /* 318 * Make sure the Program Counter is sane. We could fall foul of 319 * someone executing Thumb code, in which case the PC might not 320 * be word-aligned. This would cause a kernel alignment fault 321 * further down if we have to decode the current instruction. 322 */ 323 #ifdef THUMB_CODE 324 /* 325 * XXX: It would be nice to be able to support Thumb in the kernel 326 * at some point. 327 */ 328 if (__predict_false(!user && (tf->tf_pc & 3) != 0)) { 329 printf("\n%s: Misaligned Kernel-mode Program Counter\n", 330 __func__); 331 dab_fatal(tf, fsr, far, l, NULL); 332 } 333 #else 334 if (__predict_false((tf->tf_pc & 3) != 0)) { 335 if (user) { 336 /* 337 * Give the user an illegal instruction signal. 338 */ 339 /* Deliver a SIGILL to the process */ 340 KSI_INIT_TRAP(&ksi); 341 ksi.ksi_signo = SIGILL; 342 ksi.ksi_code = ILL_ILLOPC; 343 ksi.ksi_addr = (uint32_t *)(intptr_t) far; 344 ksi.ksi_trap = fsr; 345 goto do_trapsignal; 346 } 347 348 /* 349 * The kernel never executes Thumb code. 350 */ 351 printf("\n%s: Misaligned Kernel-mode Program Counter\n", 352 __func__); 353 dab_fatal(tf, fsr, far, l, NULL); 354 } 355 #endif 356 357 /* See if the CPU state needs to be fixed up */ 358 switch (data_abort_fixup(tf, fsr, far, l)) { 359 case ABORT_FIXUP_RETURN: 360 return; 361 case ABORT_FIXUP_FAILED: 362 /* Deliver a SIGILL to the process */ 363 KSI_INIT_TRAP(&ksi); 364 ksi.ksi_signo = SIGILL; 365 ksi.ksi_code = ILL_ILLOPC; 366 ksi.ksi_addr = (uint32_t *)(intptr_t) far; 367 ksi.ksi_trap = fsr; 368 goto do_trapsignal; 369 default: 370 break; 371 } 372 373 va = trunc_page((vaddr_t)far); 374 375 /* 376 * It is only a kernel address space fault iff: 377 * 1. user == 0 and 378 * 2. pcb_onfault not set or 379 * 3. pcb_onfault set and not LDRT/LDRBT/STRT/STRBT instruction. 380 */ 381 if (!user && (va >= VM_MIN_KERNEL_ADDRESS || 382 (va < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW)) && 383 __predict_true((pcb->pcb_onfault == NULL || 384 (read_insn(tf->tf_pc, false) & 0x05200000) != 0x04200000))) { 385 map = kernel_map; 386 387 /* Was the fault due to the FPE/IPKDB ? */ 388 if (__predict_false((tf->tf_spsr & PSR_MODE)==PSR_UND32_MODE)) { 389 KSI_INIT_TRAP(&ksi); 390 ksi.ksi_signo = SIGSEGV; 391 ksi.ksi_code = SEGV_ACCERR; 392 ksi.ksi_addr = (uint32_t *)(intptr_t) far; 393 ksi.ksi_trap = fsr; 394 395 /* 396 * Force exit via userret() 397 * This is necessary as the FPE is an extension to 398 * userland that actually runs in a priveledged mode 399 * but uses USR mode permissions for its accesses. 400 */ 401 user = true; 402 goto do_trapsignal; 403 } 404 } else { 405 map = &l->l_proc->p_vmspace->vm_map; 406 } 407 408 /* 409 * We need to know whether the page should be mapped as R or R/W. 410 * Before ARMv6, the MMU did not give us the info as to whether the 411 * fault was caused by a read or a write. 412 * 413 * However, we know that a permission fault can only be the result of 414 * a write to a read-only location, so we can deal with those quickly. 415 * 416 * Otherwise we need to disassemble the instruction responsible to 417 * determine if it was a write. 418 */ 419 if (CPU_IS_ARMV6_P() || CPU_IS_ARMV7_P()) { 420 ftype = (fsr & FAULT_WRITE) ? VM_PROT_WRITE : VM_PROT_READ; 421 } else if (IS_PERMISSION_FAULT(fsr)) { 422 ftype = VM_PROT_WRITE; 423 } else { 424 #ifdef THUMB_CODE 425 /* Fast track the ARM case. */ 426 if (__predict_false(tf->tf_spsr & PSR_T_bit)) { 427 u_int insn = read_thumb_insn(tf->tf_pc, user); 428 u_int insn_f8 = insn & 0xf800; 429 u_int insn_fe = insn & 0xfe00; 430 431 if (insn_f8 == 0x6000 || /* STR(1) */ 432 insn_f8 == 0x7000 || /* STRB(1) */ 433 insn_f8 == 0x8000 || /* STRH(1) */ 434 insn_f8 == 0x9000 || /* STR(3) */ 435 insn_f8 == 0xc000 || /* STM */ 436 insn_fe == 0x5000 || /* STR(2) */ 437 insn_fe == 0x5200 || /* STRH(2) */ 438 insn_fe == 0x5400) /* STRB(2) */ 439 ftype = VM_PROT_WRITE; 440 else 441 ftype = VM_PROT_READ; 442 } 443 else 444 #endif 445 { 446 u_int insn = read_insn(tf->tf_pc, user); 447 448 if (((insn & 0x0c100000) == 0x04000000) || /* STR[B] */ 449 ((insn & 0x0e1000b0) == 0x000000b0) || /* STR[HD]*/ 450 ((insn & 0x0a100000) == 0x08000000) || /* STM/CDT*/ 451 ((insn & 0x0f9000f0) == 0x01800090)) /* STREX[BDH] */ 452 ftype = VM_PROT_WRITE; 453 else if ((insn & 0x0fb00ff0) == 0x01000090)/* SWP */ 454 ftype = VM_PROT_READ | VM_PROT_WRITE; 455 else 456 ftype = VM_PROT_READ; 457 } 458 } 459 460 /* 461 * See if the fault is as a result of ref/mod emulation, 462 * or domain mismatch. 463 */ 464 #ifdef DEBUG 465 last_fault_code = fsr; 466 #endif 467 if (pmap_fault_fixup(map->pmap, va, ftype, user)) { 468 UVMHIST_LOG(maphist, " <- ref/mod emul", 0, 0, 0, 0); 469 goto out; 470 } 471 472 if (__predict_false(curcpu()->ci_intr_depth > 0)) { 473 if (pcb->pcb_onfault) { 474 tf->tf_r0 = EINVAL; 475 tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; 476 return; 477 } 478 printf("\nNon-emulated page fault with intr_depth > 0\n"); 479 dab_fatal(tf, fsr, far, l, NULL); 480 } 481 482 onfault = pcb->pcb_onfault; 483 pcb->pcb_onfault = NULL; 484 error = uvm_fault(map, va, ftype); 485 pcb->pcb_onfault = onfault; 486 487 if (__predict_true(error == 0)) { 488 if (user) 489 uvm_grow(l->l_proc, va); /* Record any stack growth */ 490 else 491 ucas_ras_check(tf); 492 UVMHIST_LOG(maphist, " <- uvm", 0, 0, 0, 0); 493 goto out; 494 } 495 496 if (user == 0) { 497 if (pcb->pcb_onfault) { 498 tf->tf_r0 = error; 499 tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; 500 return; 501 } 502 503 printf("\nuvm_fault(%p, %lx, %x) -> %x\n", map, va, ftype, 504 error); 505 dab_fatal(tf, fsr, far, l, NULL); 506 } 507 508 KSI_INIT_TRAP(&ksi); 509 510 switch (error) { 511 case ENOMEM: 512 printf("UVM: pid %d (%s), uid %d killed: " 513 "out of swap\n", l->l_proc->p_pid, l->l_proc->p_comm, 514 l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); 515 ksi.ksi_signo = SIGKILL; 516 break; 517 case EACCES: 518 ksi.ksi_signo = SIGSEGV; 519 ksi.ksi_code = SEGV_ACCERR; 520 break; 521 case EINVAL: 522 ksi.ksi_signo = SIGBUS; 523 ksi.ksi_code = BUS_ADRERR; 524 break; 525 default: 526 ksi.ksi_signo = SIGSEGV; 527 ksi.ksi_code = SEGV_MAPERR; 528 break; 529 } 530 ksi.ksi_addr = (uint32_t *)(intptr_t) far; 531 ksi.ksi_trap = fsr; 532 UVMHIST_LOG(maphist, " <- error (%d)", error, 0, 0, 0); 533 534 do_trapsignal: 535 call_trapsignal(l, tf, &ksi); 536 out: 537 /* If returning to user mode, make sure to invoke userret() */ 538 if (user) 539 userret(l); 540 } 541 542 /* 543 * dab_fatal() handles the following data aborts: 544 * 545 * FAULT_WRTBUF_0 - Vector Exception 546 * FAULT_WRTBUF_1 - Terminal Exception 547 * 548 * We should never see these on a properly functioning system. 549 * 550 * This function is also called by the other handlers if they 551 * detect a fatal problem. 552 * 553 * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort. 554 */ 555 static int 556 dab_fatal(trapframe_t *tf, u_int fsr, u_int far, struct lwp *l, ksiginfo_t *ksi) 557 { 558 const char * const mode = TRAP_USERMODE(tf) ? "user" : "kernel"; 559 560 if (l != NULL) { 561 printf("Fatal %s mode data abort: '%s'\n", mode, 562 data_aborts[fsr & FAULT_TYPE_MASK].desc); 563 printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr); 564 if ((fsr & FAULT_IMPRECISE) == 0) 565 printf("%08x, ", far); 566 else 567 printf("Invalid, "); 568 printf("spsr=%08x\n", tf->tf_spsr); 569 } else { 570 printf("Fatal %s mode prefetch abort at 0x%08x\n", 571 mode, tf->tf_pc); 572 printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr); 573 } 574 575 printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n", 576 tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3); 577 printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n", 578 tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7); 579 printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n", 580 tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11); 581 printf("r12=%08x, ", tf->tf_r12); 582 583 if (TRAP_USERMODE(tf)) 584 printf("usp=%08x, ulr=%08x", 585 tf->tf_usr_sp, tf->tf_usr_lr); 586 else 587 printf("ssp=%08x, slr=%08x", 588 tf->tf_svc_sp, tf->tf_svc_lr); 589 printf(", pc =%08x\n\n", tf->tf_pc); 590 591 #if defined(DDB) || defined(KGDB) 592 kdb_trap(T_FAULT, tf); 593 #endif 594 panic("Fatal abort"); 595 /*NOTREACHED*/ 596 } 597 598 /* 599 * dab_align() handles the following data aborts: 600 * 601 * FAULT_ALIGN_0 - Alignment fault 602 * FAULT_ALIGN_0 - Alignment fault 603 * 604 * These faults are fatal if they happen in kernel mode. Otherwise, we 605 * deliver a bus error to the process. 606 */ 607 static int 608 dab_align(trapframe_t *tf, u_int fsr, u_int far, struct lwp *l, ksiginfo_t *ksi) 609 { 610 /* Alignment faults are always fatal if they occur in kernel mode */ 611 if (!TRAP_USERMODE(tf)) 612 dab_fatal(tf, fsr, far, l, NULL); 613 614 /* pcb_onfault *must* be NULL at this point */ 615 KDASSERT(((struct pcb *)lwp_getpcb(l))->pcb_onfault == NULL); 616 617 /* See if the CPU state needs to be fixed up */ 618 (void) data_abort_fixup(tf, fsr, far, l); 619 620 /* Deliver a bus error signal to the process */ 621 KSI_INIT_TRAP(ksi); 622 ksi->ksi_signo = SIGBUS; 623 ksi->ksi_code = BUS_ADRALN; 624 ksi->ksi_addr = (uint32_t *)(intptr_t)far; 625 ksi->ksi_trap = fsr; 626 627 lwp_settrapframe(l, tf); 628 629 return (1); 630 } 631 632 /* 633 * dab_buserr() handles the following data aborts: 634 * 635 * FAULT_BUSERR_0 - External Abort on Linefetch -- Section 636 * FAULT_BUSERR_1 - External Abort on Linefetch -- Page 637 * FAULT_BUSERR_2 - External Abort on Non-linefetch -- Section 638 * FAULT_BUSERR_3 - External Abort on Non-linefetch -- Page 639 * FAULT_BUSTRNL1 - External abort on Translation -- Level 1 640 * FAULT_BUSTRNL2 - External abort on Translation -- Level 2 641 * 642 * If pcb_onfault is set, flag the fault and return to the handler. 643 * If the fault occurred in user mode, give the process a SIGBUS. 644 * 645 * Note: On XScale, FAULT_BUSERR_0, FAULT_BUSERR_1, and FAULT_BUSERR_2 646 * can be flagged as imprecise in the FSR. This causes a real headache 647 * since some of the machine state is lost. In this case, tf->tf_pc 648 * may not actually point to the offending instruction. In fact, if 649 * we've taken a double abort fault, it generally points somewhere near 650 * the top of "data_abort_entry" in exception.S. 651 * 652 * In all other cases, these data aborts are considered fatal. 653 */ 654 static int 655 dab_buserr(trapframe_t *tf, u_int fsr, u_int far, struct lwp *l, 656 ksiginfo_t *ksi) 657 { 658 struct pcb *pcb = lwp_getpcb(l); 659 660 #ifdef __XSCALE__ 661 if ((fsr & FAULT_IMPRECISE) != 0 && 662 (tf->tf_spsr & PSR_MODE) == PSR_ABT32_MODE) { 663 /* 664 * Oops, an imprecise, double abort fault. We've lost the 665 * r14_abt/spsr_abt values corresponding to the original 666 * abort, and the spsr saved in the trapframe indicates 667 * ABT mode. 668 */ 669 tf->tf_spsr &= ~PSR_MODE; 670 671 /* 672 * We use a simple heuristic to determine if the double abort 673 * happened as a result of a kernel or user mode access. 674 * If the current trapframe is at the top of the kernel stack, 675 * the fault _must_ have come from user mode. 676 */ 677 if (tf != ((trapframe_t *)pcb->pcb_ksp) - 1) { 678 /* 679 * Kernel mode. We're either about to die a 680 * spectacular death, or pcb_onfault will come 681 * to our rescue. Either way, the current value 682 * of tf->tf_pc is irrelevant. 683 */ 684 tf->tf_spsr |= PSR_SVC32_MODE; 685 if (pcb->pcb_onfault == NULL) 686 printf("\nKernel mode double abort!\n"); 687 } else { 688 /* 689 * User mode. We've lost the program counter at the 690 * time of the fault (not that it was accurate anyway; 691 * it's not called an imprecise fault for nothing). 692 * About all we can do is copy r14_usr to tf_pc and 693 * hope for the best. The process is about to get a 694 * SIGBUS, so it's probably history anyway. 695 */ 696 tf->tf_spsr |= PSR_USR32_MODE; 697 tf->tf_pc = tf->tf_usr_lr; 698 #ifdef THUMB_CODE 699 tf->tf_spsr &= ~PSR_T_bit; 700 if (tf->tf_usr_lr & 1) 701 tf->tf_spsr |= PSR_T_bit; 702 #endif 703 } 704 } 705 706 /* FAR is invalid for imprecise exceptions */ 707 if ((fsr & FAULT_IMPRECISE) != 0) 708 far = 0; 709 #endif /* __XSCALE__ */ 710 711 if (pcb->pcb_onfault) { 712 KDASSERT(TRAP_USERMODE(tf) == 0); 713 tf->tf_r0 = EFAULT; 714 tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; 715 return (0); 716 } 717 718 /* See if the CPU state needs to be fixed up */ 719 (void) data_abort_fixup(tf, fsr, far, l); 720 721 /* 722 * At this point, if the fault happened in kernel mode, we're toast 723 */ 724 if (!TRAP_USERMODE(tf)) 725 dab_fatal(tf, fsr, far, l, NULL); 726 727 /* Deliver a bus error signal to the process */ 728 KSI_INIT_TRAP(ksi); 729 ksi->ksi_signo = SIGBUS; 730 ksi->ksi_code = BUS_ADRERR; 731 ksi->ksi_addr = (uint32_t *)(intptr_t)far; 732 ksi->ksi_trap = fsr; 733 734 lwp_settrapframe(l, tf); 735 736 return (1); 737 } 738 739 static inline int 740 prefetch_abort_fixup(trapframe_t *tf) 741 { 742 #ifdef CPU_ABORT_FIXUP_REQUIRED 743 int error; 744 745 /* Call the CPU specific prefetch abort fixup routine */ 746 error = cpu_prefetchabt_fixup(tf); 747 if (__predict_true(error != ABORT_FIXUP_FAILED)) 748 return (error); 749 750 /* 751 * Oops, couldn't fix up the instruction 752 */ 753 printf("%s: fixup for %s mode prefetch abort failed.\n", __func__, 754 TRAP_USERMODE(tf) ? "user" : "kernel"); 755 #ifdef THUMB_CODE 756 if (tf->tf_spsr & PSR_T_bit) { 757 printf("pc = 0x%08x, opcode 0x%04x, 0x%04x, insn = ", 758 tf->tf_pc, *((uint16 *)(tf->tf_pc & ~1)), 759 *((uint16 *)((tf->tf_pc + 2) & ~1))); 760 } 761 else 762 #endif 763 { 764 printf("pc = 0x%08x, opcode 0x%08x, insn = ", tf->tf_pc, 765 *((u_int *)tf->tf_pc)); 766 } 767 disassemble(tf->tf_pc); 768 769 /* Die now if this happened in kernel mode */ 770 if (!TRAP_USERMODE(tf)) 771 dab_fatal(tf, 0, tf->tf_pc, NULL, NULL); 772 773 return (error); 774 #else 775 return (ABORT_FIXUP_OK); 776 #endif /* CPU_ABORT_FIXUP_REQUIRED */ 777 } 778 779 /* 780 * void prefetch_abort_handler(trapframe_t *tf) 781 * 782 * Abort handler called when instruction execution occurs at 783 * a non existent or restricted (access permissions) memory page. 784 * If the address is invalid and we were in SVC mode then panic as 785 * the kernel should never prefetch abort. 786 * If the address is invalid and the page is mapped then the user process 787 * does no have read permission so send it a signal. 788 * Otherwise fault the page in and try again. 789 */ 790 void 791 prefetch_abort_handler(trapframe_t *tf) 792 { 793 struct lwp *l; 794 struct pcb *pcb __diagused; 795 struct vm_map *map; 796 vaddr_t fault_pc, va; 797 ksiginfo_t ksi; 798 int error, user; 799 800 UVMHIST_FUNC(__func__); 801 UVMHIST_CALLED(maphist); 802 803 /* Update vmmeter statistics */ 804 curcpu()->ci_data.cpu_ntrap++; 805 806 l = curlwp; 807 pcb = lwp_getpcb(l); 808 809 if ((user = TRAP_USERMODE(tf)) != 0) 810 LWP_CACHE_CREDS(l, l->l_proc); 811 812 /* 813 * Enable IRQ's (disabled by the abort) This always comes 814 * from user mode so we know interrupts were not disabled. 815 * But we check anyway. 816 */ 817 KASSERT(!TRAP_USERMODE(tf) || VALID_R15_PSR(tf->tf_pc, tf->tf_spsr)); 818 #ifdef __NO_FIQ 819 if (__predict_true((tf->tf_spsr & I32_bit) != I32_bit)) 820 restore_interrupts(tf->tf_spsr & IF32_bits); 821 #else 822 if (__predict_true((tf->tf_spsr & IF32_bits) != IF32_bits)) 823 restore_interrupts(tf->tf_spsr & IF32_bits); 824 #endif 825 826 /* See if the CPU state needs to be fixed up */ 827 switch (prefetch_abort_fixup(tf)) { 828 case ABORT_FIXUP_RETURN: 829 KASSERT(!TRAP_USERMODE(tf) || VALID_R15_PSR(tf->tf_pc, tf->tf_spsr)); 830 return; 831 case ABORT_FIXUP_FAILED: 832 /* Deliver a SIGILL to the process */ 833 KSI_INIT_TRAP(&ksi); 834 ksi.ksi_signo = SIGILL; 835 ksi.ksi_code = ILL_ILLOPC; 836 ksi.ksi_addr = (uint32_t *)(intptr_t) tf->tf_pc; 837 lwp_settrapframe(l, tf); 838 goto do_trapsignal; 839 default: 840 break; 841 } 842 843 /* Prefetch aborts cannot happen in kernel mode */ 844 if (__predict_false(!user)) 845 dab_fatal(tf, 0, tf->tf_pc, NULL, NULL); 846 847 /* Get fault address */ 848 fault_pc = tf->tf_pc; 849 lwp_settrapframe(l, tf); 850 UVMHIST_LOG(maphist, " (pc=0x%x, l=0x%x, tf=0x%x)", 851 fault_pc, l, tf, 0); 852 853 /* Ok validate the address, can only execute in USER space */ 854 if (__predict_false(fault_pc >= VM_MAXUSER_ADDRESS || 855 (fault_pc < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW))) { 856 KSI_INIT_TRAP(&ksi); 857 ksi.ksi_signo = SIGSEGV; 858 ksi.ksi_code = SEGV_ACCERR; 859 ksi.ksi_addr = (uint32_t *)(intptr_t) fault_pc; 860 ksi.ksi_trap = fault_pc; 861 goto do_trapsignal; 862 } 863 864 map = &l->l_proc->p_vmspace->vm_map; 865 va = trunc_page(fault_pc); 866 867 /* 868 * See if the pmap can handle this fault on its own... 869 */ 870 #ifdef DEBUG 871 last_fault_code = -1; 872 #endif 873 if (pmap_fault_fixup(map->pmap, va, VM_PROT_READ|VM_PROT_EXECUTE, 1)) { 874 UVMHIST_LOG (maphist, " <- emulated", 0, 0, 0, 0); 875 goto out; 876 } 877 878 #ifdef DIAGNOSTIC 879 if (__predict_false(curcpu()->ci_intr_depth > 0)) { 880 printf("\nNon-emulated prefetch abort with intr_depth > 0\n"); 881 dab_fatal(tf, 0, tf->tf_pc, NULL, NULL); 882 } 883 #endif 884 885 KASSERT(pcb->pcb_onfault == NULL); 886 error = uvm_fault(map, va, VM_PROT_READ|VM_PROT_EXECUTE); 887 888 if (__predict_true(error == 0)) { 889 UVMHIST_LOG (maphist, " <- uvm", 0, 0, 0, 0); 890 goto out; 891 } 892 KSI_INIT_TRAP(&ksi); 893 894 UVMHIST_LOG (maphist, " <- fatal (%d)", error, 0, 0, 0); 895 896 if (error == ENOMEM) { 897 printf("UVM: pid %d (%s), uid %d killed: " 898 "out of swap\n", l->l_proc->p_pid, l->l_proc->p_comm, 899 l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1); 900 ksi.ksi_signo = SIGKILL; 901 } else 902 ksi.ksi_signo = SIGSEGV; 903 904 ksi.ksi_code = SEGV_MAPERR; 905 ksi.ksi_addr = (uint32_t *)(intptr_t) fault_pc; 906 ksi.ksi_trap = fault_pc; 907 908 do_trapsignal: 909 call_trapsignal(l, tf, &ksi); 910 911 out: 912 KASSERT(!TRAP_USERMODE(tf) || VALID_R15_PSR(tf->tf_pc, tf->tf_spsr)); 913 userret(l); 914 } 915 916 /* 917 * Tentatively read an 8, 16, or 32-bit value from 'addr'. 918 * If the read succeeds, the value is written to 'rptr' and zero is returned. 919 * Else, return EFAULT. 920 */ 921 int 922 badaddr_read(void *addr, size_t size, void *rptr) 923 { 924 extern int badaddr_read_1(const uint8_t *, uint8_t *); 925 extern int badaddr_read_2(const uint16_t *, uint16_t *); 926 extern int badaddr_read_4(const uint32_t *, uint32_t *); 927 union { 928 uint8_t v1; 929 uint16_t v2; 930 uint32_t v4; 931 } u; 932 int rv, s; 933 934 cpu_drain_writebuf(); 935 936 s = splhigh(); 937 938 /* Read from the test address. */ 939 switch (size) { 940 case sizeof(uint8_t): 941 rv = badaddr_read_1(addr, &u.v1); 942 if (rv == 0 && rptr) 943 *(uint8_t *) rptr = u.v1; 944 break; 945 946 case sizeof(uint16_t): 947 rv = badaddr_read_2(addr, &u.v2); 948 if (rv == 0 && rptr) 949 *(uint16_t *) rptr = u.v2; 950 break; 951 952 case sizeof(uint32_t): 953 rv = badaddr_read_4(addr, &u.v4); 954 if (rv == 0 && rptr) 955 *(uint32_t *) rptr = u.v4; 956 break; 957 958 default: 959 panic("%s: invalid size (%zu)", __func__, size); 960 } 961 962 splx(s); 963 964 /* Return EFAULT if the address was invalid, else zero */ 965 return (rv); 966 } 967