1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (C) 1994, David Greenman 5 * Copyright (c) 1990, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the University of Utah, and William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 */ 39 40 #include <sys/cdefs.h> 41 /* 42 * AMD64 Trap and System call handling 43 */ 44 45 #include "opt_clock.h" 46 #include "opt_cpu.h" 47 #include "opt_hwpmc_hooks.h" 48 #include "opt_isa.h" 49 #include "opt_kdb.h" 50 51 #include <sys/param.h> 52 #include <sys/asan.h> 53 #include <sys/bus.h> 54 #include <sys/systm.h> 55 #include <sys/proc.h> 56 #include <sys/ptrace.h> 57 #include <sys/kdb.h> 58 #include <sys/kernel.h> 59 #include <sys/ktr.h> 60 #include <sys/lock.h> 61 #include <sys/msan.h> 62 #include <sys/mutex.h> 63 #include <sys/resourcevar.h> 64 #include <sys/signalvar.h> 65 #include <sys/syscall.h> 66 #include <sys/sysctl.h> 67 #include <sys/sysent.h> 68 #include <sys/uio.h> 69 #include <sys/vmmeter.h> 70 #ifdef HWPMC_HOOKS 71 #include <sys/pmckern.h> 72 PMC_SOFT_DEFINE( , , page_fault, all); 73 PMC_SOFT_DEFINE( , , page_fault, read); 74 PMC_SOFT_DEFINE( , , page_fault, write); 75 #endif 76 77 #include <vm/vm.h> 78 #include <vm/vm_param.h> 79 #include <vm/pmap.h> 80 #include <vm/vm_kern.h> 81 #include <vm/vm_map.h> 82 #include <vm/vm_page.h> 83 #include <vm/vm_extern.h> 84 85 #include <machine/cpu.h> 86 #include <machine/intr_machdep.h> 87 #include <x86/mca.h> 88 #include <machine/md_var.h> 89 #include <machine/pcb.h> 90 #ifdef SMP 91 #include <machine/smp.h> 92 #endif 93 #include <machine/stack.h> 94 #include <machine/trap.h> 95 #include <machine/tss.h> 96 97 #ifdef KDTRACE_HOOKS 98 #include <sys/dtrace_bsd.h> 99 #endif 100 101 extern inthand_t IDTVEC(bpt), IDTVEC(bpt_pti), IDTVEC(dbg), 102 IDTVEC(fast_syscall), IDTVEC(fast_syscall_pti), IDTVEC(fast_syscall32), 103 IDTVEC(int0x80_syscall_pti), IDTVEC(int0x80_syscall); 104 105 void __noinline trap(struct trapframe *frame); 106 void trap_check(struct trapframe *frame); 107 void dblfault_handler(struct trapframe *frame); 108 109 static int trap_pfault(struct trapframe *, bool, int *, int *); 110 static void trap_diag(struct trapframe *, vm_offset_t); 111 static void trap_fatal(struct trapframe *, vm_offset_t); 112 #ifdef KDTRACE_HOOKS 113 static bool trap_user_dtrace(struct trapframe *, 114 int (**hook)(struct trapframe *)); 115 #endif 116 117 static const char UNKNOWN[] = "unknown"; 118 static const char *const trap_msg[] = { 119 [0] = UNKNOWN, /* unused */ 120 [T_PRIVINFLT] = "privileged instruction fault", 121 [2] = UNKNOWN, /* unused */ 122 [T_BPTFLT] = "breakpoint instruction fault", 123 [4] = UNKNOWN, /* unused */ 124 [5] = UNKNOWN, /* unused */ 125 [T_ARITHTRAP] = "arithmetic trap", 126 [7] = UNKNOWN, /* unused */ 127 [8] = UNKNOWN, /* unused */ 128 [T_PROTFLT] = "general protection fault", 129 [T_TRCTRAP] = "debug exception", 130 [11] = UNKNOWN, /* unused */ 131 [T_PAGEFLT] = "page fault", 132 [13] = UNKNOWN, /* unused */ 133 [T_ALIGNFLT] = "alignment fault", 134 [15] = UNKNOWN, /* unused */ 135 [16] = UNKNOWN, /* unused */ 136 [17] = UNKNOWN, /* unused */ 137 [T_DIVIDE] = "integer divide fault", 138 [T_NMI] = "non-maskable interrupt trap", 139 [T_OFLOW] = "overflow trap", 140 [T_BOUND] = "FPU bounds check fault", 141 [T_DNA] = "FPU device not available", 142 [T_DOUBLEFLT] = "double fault", 143 [T_FPOPFLT] = "FPU operand fetch fault", 144 [T_TSSFLT] = "invalid TSS fault", 145 [T_SEGNPFLT] = "segment not present fault", 146 [T_STKFLT] = "stack fault", 147 [T_MCHK] = "machine check trap", 148 [T_XMMFLT] = "SIMD floating-point exception", 149 [T_RESERVED] = "reserved (unknown) fault", 150 [31] = UNKNOWN, /* reserved */ 151 [T_DTRACE_RET] = "DTrace pid return trap", 152 }; 153 154 static const char * 155 traptype_to_msg(u_int type) 156 { 157 return (type < nitems(trap_msg) ? trap_msg[type] : 158 "unknown/reserved trap"); 159 } 160 161 static int uprintf_signal; 162 SYSCTL_INT(_machdep, OID_AUTO, uprintf_signal, CTLFLAG_RWTUN, 163 &uprintf_signal, 0, 164 "Print debugging information on trap signal to ctty"); 165 166 /* 167 * Control L1D flush on return from NMI. 168 * 169 * Tunable can be set to the following values: 170 * 0 - only enable flush on return from NMI if required by vmm.ko (default) 171 * >1 - always flush on return from NMI. 172 * 173 * Post-boot, the sysctl indicates if flushing is currently enabled. 174 */ 175 int nmi_flush_l1d_sw; 176 SYSCTL_INT(_machdep, OID_AUTO, nmi_flush_l1d_sw, CTLFLAG_RWTUN, 177 &nmi_flush_l1d_sw, 0, 178 "Flush L1 Data Cache on NMI exit, software bhyve L1TF mitigation assist"); 179 180 /* 181 * Table of handlers for various segment load faults. 182 */ 183 static const struct { 184 uintptr_t faddr; 185 uintptr_t fhandler; 186 } sfhandlers[] = { 187 { 188 .faddr = (uintptr_t)ld_ds, 189 .fhandler = (uintptr_t)ds_load_fault, 190 }, 191 { 192 .faddr = (uintptr_t)ld_es, 193 .fhandler = (uintptr_t)es_load_fault, 194 }, 195 { 196 .faddr = (uintptr_t)ld_fs, 197 .fhandler = (uintptr_t)fs_load_fault, 198 }, 199 { 200 .faddr = (uintptr_t)ld_gs, 201 .fhandler = (uintptr_t)gs_load_fault, 202 }, 203 { 204 .faddr = (uintptr_t)ld_gsbase, 205 .fhandler = (uintptr_t)gsbase_load_fault 206 }, 207 { 208 .faddr = (uintptr_t)ld_fsbase, 209 .fhandler = (uintptr_t)fsbase_load_fault, 210 }, 211 }; 212 213 /* 214 * Exception, fault, and trap interface to the FreeBSD kernel. 215 * This common code is called from assembly language IDT gate entry 216 * routines that prepare a suitable stack frame, and restore this 217 * frame after the exception has been processed. 218 */ 219 220 void 221 trap(struct trapframe *frame) 222 { 223 ksiginfo_t ksi; 224 struct thread *td; 225 struct proc *p; 226 register_t addr, dr6; 227 size_t i; 228 int pf, signo, ucode; 229 u_int type; 230 231 td = curthread; 232 p = td->td_proc; 233 dr6 = 0; 234 235 kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0); 236 kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED); 237 238 VM_CNT_INC(v_trap); 239 type = frame->tf_trapno; 240 241 #ifdef KDB 242 if (kdb_active) { 243 kdb_reenter(); 244 return; 245 } 246 #endif 247 if (type == T_NMI) { 248 nmi_handle_intr(frame); 249 return; 250 } 251 252 if (type == T_RESERVED) { 253 trap_fatal(frame, 0); 254 return; 255 } 256 257 if ((frame->tf_rflags & PSL_I) == 0) { 258 /* 259 * Buggy application or kernel code has disabled 260 * interrupts and then trapped. Enabling interrupts 261 * now is wrong, but it is better than running with 262 * interrupts disabled until they are accidentally 263 * enabled later. 264 */ 265 if (TRAPF_USERMODE(frame)) { 266 uprintf( 267 "pid %ld (%s): trap %d (%s) " 268 "with interrupts disabled\n", 269 (long)curproc->p_pid, curthread->td_name, type, 270 trap_msg[type]); 271 } else { 272 switch (type) { 273 case T_NMI: 274 case T_BPTFLT: 275 case T_TRCTRAP: 276 case T_PROTFLT: 277 case T_SEGNPFLT: 278 case T_STKFLT: 279 break; 280 default: 281 printf( 282 "kernel trap %d with interrupts disabled\n", 283 type); 284 285 /* 286 * We shouldn't enable interrupts while holding a 287 * spin lock. 288 */ 289 if (td->td_md.md_spinlock_count == 0) 290 enable_intr(); 291 } 292 } 293 } 294 295 if (TRAPF_USERMODE(frame)) { 296 /* user trap */ 297 298 td->td_pticks = 0; 299 td->td_frame = frame; 300 addr = frame->tf_rip; 301 if (td->td_cowgen != atomic_load_int(&p->p_cowgen)) 302 thread_cow_update(td); 303 304 switch (type) { 305 case T_PRIVINFLT: /* privileged instruction fault */ 306 signo = SIGILL; 307 ucode = ILL_PRVOPC; 308 break; 309 310 case T_BPTFLT: /* bpt instruction fault */ 311 #ifdef KDTRACE_HOOKS 312 if (trap_user_dtrace(frame, &dtrace_pid_probe_ptr)) 313 return; 314 #else 315 enable_intr(); 316 #endif 317 signo = SIGTRAP; 318 ucode = TRAP_BRKPT; 319 break; 320 321 case T_TRCTRAP: /* debug exception */ 322 enable_intr(); 323 signo = SIGTRAP; 324 ucode = TRAP_TRACE; 325 dr6 = rdr6(); 326 if ((dr6 & DBREG_DR6_BS) != 0) { 327 PROC_LOCK(td->td_proc); 328 if ((td->td_dbgflags & TDB_STEP) != 0) { 329 td->td_frame->tf_rflags &= ~PSL_T; 330 td->td_dbgflags &= ~TDB_STEP; 331 } 332 PROC_UNLOCK(td->td_proc); 333 } 334 break; 335 336 case T_ARITHTRAP: /* arithmetic trap */ 337 ucode = fputrap_x87(); 338 if (ucode == -1) 339 return; 340 signo = SIGFPE; 341 break; 342 343 case T_PROTFLT: /* general protection fault */ 344 signo = SIGBUS; 345 ucode = BUS_OBJERR; 346 break; 347 case T_STKFLT: /* stack fault */ 348 case T_SEGNPFLT: /* segment not present fault */ 349 signo = SIGBUS; 350 ucode = BUS_ADRERR; 351 break; 352 case T_TSSFLT: /* invalid TSS fault */ 353 signo = SIGBUS; 354 ucode = BUS_OBJERR; 355 break; 356 case T_ALIGNFLT: 357 signo = SIGBUS; 358 ucode = BUS_ADRALN; 359 break; 360 case T_DOUBLEFLT: /* double fault */ 361 default: 362 signo = SIGBUS; 363 ucode = BUS_OBJERR; 364 break; 365 366 case T_PAGEFLT: /* page fault */ 367 /* 368 * Can emulator handle this trap? 369 */ 370 if (*p->p_sysent->sv_trap != NULL && 371 (*p->p_sysent->sv_trap)(td) == 0) 372 return; 373 374 pf = trap_pfault(frame, true, &signo, &ucode); 375 if (pf == -1) 376 return; 377 if (pf == 0) 378 goto userret; 379 addr = frame->tf_addr; 380 break; 381 382 case T_DIVIDE: /* integer divide fault */ 383 ucode = FPE_INTDIV; 384 signo = SIGFPE; 385 break; 386 387 case T_OFLOW: /* integer overflow fault */ 388 ucode = FPE_INTOVF; 389 signo = SIGFPE; 390 break; 391 392 case T_BOUND: /* bounds check fault */ 393 ucode = FPE_FLTSUB; 394 signo = SIGFPE; 395 break; 396 397 case T_DNA: 398 /* transparent fault (due to context switch "late") */ 399 KASSERT(PCB_USER_FPU(td->td_pcb), 400 ("kernel FPU ctx has leaked")); 401 fpudna(); 402 return; 403 404 case T_FPOPFLT: /* FPU operand fetch fault */ 405 ucode = ILL_COPROC; 406 signo = SIGILL; 407 break; 408 409 case T_XMMFLT: /* SIMD floating-point exception */ 410 ucode = fputrap_sse(); 411 if (ucode == -1) 412 return; 413 signo = SIGFPE; 414 break; 415 #ifdef KDTRACE_HOOKS 416 case T_DTRACE_RET: 417 (void)trap_user_dtrace(frame, &dtrace_return_probe_ptr); 418 return; 419 #endif 420 } 421 } else { 422 /* kernel trap */ 423 424 KASSERT(cold || td->td_ucred != NULL, 425 ("kernel trap doesn't have ucred")); 426 427 /* 428 * Most likely, EFI RT faulted. This check prevents 429 * kdb from handling breakpoints set on the BIOS text, 430 * if such option is ever needed. 431 */ 432 if ((td->td_pflags & TDP_EFIRT) != 0 && 433 curpcb->pcb_onfault != NULL && type != T_PAGEFLT) { 434 trap_diag(frame, 0); 435 printf("EFI RT fault %s\n", traptype_to_msg(type)); 436 frame->tf_rip = (long)curpcb->pcb_onfault; 437 return; 438 } 439 440 switch (type) { 441 case T_PAGEFLT: /* page fault */ 442 (void)trap_pfault(frame, false, NULL, NULL); 443 return; 444 445 case T_DNA: 446 if (PCB_USER_FPU(td->td_pcb)) 447 panic("Unregistered use of FPU in kernel"); 448 fpudna(); 449 return; 450 451 case T_ARITHTRAP: /* arithmetic trap */ 452 case T_XMMFLT: /* SIMD floating-point exception */ 453 case T_FPOPFLT: /* FPU operand fetch fault */ 454 /* 455 * For now, supporting kernel handler 456 * registration for FPU traps is overkill. 457 */ 458 trap_fatal(frame, 0); 459 return; 460 461 case T_STKFLT: /* stack fault */ 462 case T_PROTFLT: /* general protection fault */ 463 case T_SEGNPFLT: /* segment not present fault */ 464 if (td->td_intr_nesting_level != 0) 465 break; 466 467 /* 468 * Invalid segment selectors and out of bounds 469 * %rip's and %rsp's can be set up in user mode. 470 * This causes a fault in kernel mode when the 471 * kernel tries to return to user mode. We want 472 * to get this fault so that we can fix the 473 * problem here and not have to check all the 474 * selectors and pointers when the user changes 475 * them. 476 * 477 * In case of PTI, the IRETQ faulted while the 478 * kernel used the pti stack, and exception 479 * frame records %rsp value pointing to that 480 * stack. If we return normally to 481 * doreti_iret_fault, the trapframe is 482 * reconstructed on pti stack, and calltrap() 483 * called on it as well. Due to the very 484 * limited pti stack size, kernel does not 485 * survive for too long. Switch to the normal 486 * thread stack for the trap handling. 487 * 488 * Magic '5' is the number of qwords occupied by 489 * the hardware trap frame. 490 */ 491 if (frame->tf_rip == (long)doreti_iret) { 492 KASSERT((read_rflags() & PSL_I) == 0, 493 ("interrupts enabled")); 494 frame->tf_rip = (long)doreti_iret_fault; 495 if ((PCPU_GET(curpmap)->pm_ucr3 != 496 PMAP_NO_CR3) && 497 (frame->tf_rsp == (uintptr_t)PCPU_GET( 498 pti_rsp0) - 5 * sizeof(register_t))) { 499 frame->tf_rsp = PCPU_GET(rsp0) - 5 * 500 sizeof(register_t); 501 } 502 return; 503 } 504 505 for (i = 0; i < nitems(sfhandlers); i++) { 506 if (frame->tf_rip == sfhandlers[i].faddr) { 507 KASSERT((read_rflags() & PSL_I) == 0, 508 ("interrupts enabled")); 509 frame->tf_rip = sfhandlers[i].fhandler; 510 return; 511 } 512 } 513 514 if (curpcb->pcb_onfault != NULL) { 515 frame->tf_rip = (long)curpcb->pcb_onfault; 516 return; 517 } 518 break; 519 520 case T_TSSFLT: 521 /* 522 * PSL_NT can be set in user mode and isn't cleared 523 * automatically when the kernel is entered. This 524 * causes a TSS fault when the kernel attempts to 525 * `iret' because the TSS link is uninitialized. We 526 * want to get this fault so that we can fix the 527 * problem here and not every time the kernel is 528 * entered. 529 */ 530 if (frame->tf_rflags & PSL_NT) { 531 frame->tf_rflags &= ~PSL_NT; 532 return; 533 } 534 break; 535 536 case T_TRCTRAP: /* debug exception */ 537 /* Clear any pending debug events. */ 538 dr6 = rdr6(); 539 load_dr6(0); 540 541 /* 542 * Ignore debug register exceptions due to 543 * accesses in the user's address space, which 544 * can happen under several conditions such as 545 * if a user sets a watchpoint on a buffer and 546 * then passes that buffer to a system call. 547 * We still want to get TRCTRAPS for addresses 548 * in kernel space because that is useful when 549 * debugging the kernel. 550 */ 551 if (user_dbreg_trap(dr6)) 552 return; 553 554 /* 555 * Malicious user code can configure a debug 556 * register watchpoint to trap on data access 557 * to the top of stack and then execute 'pop 558 * %ss; int 3'. Due to exception deferral for 559 * 'pop %ss', the CPU will not interrupt 'int 560 * 3' to raise the DB# exception for the debug 561 * register but will postpone the DB# until 562 * execution of the first instruction of the 563 * BP# handler (in kernel mode). Normally the 564 * previous check would ignore DB# exceptions 565 * for watchpoints on user addresses raised in 566 * kernel mode. However, some CPU errata 567 * include cases where DB# exceptions do not 568 * properly set bits in %dr6, e.g. Haswell 569 * HSD23 and Skylake-X SKZ24. 570 * 571 * A deferred DB# can also be raised on the 572 * first instructions of system call entry 573 * points or single-step traps via similar use 574 * of 'pop %ss' or 'mov xxx, %ss'. 575 */ 576 if (pti) { 577 if (frame->tf_rip == 578 (uintptr_t)IDTVEC(fast_syscall_pti) || 579 #ifdef COMPAT_FREEBSD32 580 frame->tf_rip == 581 (uintptr_t)IDTVEC(int0x80_syscall_pti) || 582 #endif 583 frame->tf_rip == (uintptr_t)IDTVEC(bpt_pti)) 584 return; 585 } else { 586 if (frame->tf_rip == 587 (uintptr_t)IDTVEC(fast_syscall) || 588 #ifdef COMPAT_FREEBSD32 589 frame->tf_rip == 590 (uintptr_t)IDTVEC(int0x80_syscall) || 591 #endif 592 frame->tf_rip == (uintptr_t)IDTVEC(bpt)) 593 return; 594 } 595 if (frame->tf_rip == (uintptr_t)IDTVEC(dbg) || 596 /* Needed for AMD. */ 597 frame->tf_rip == (uintptr_t)IDTVEC(fast_syscall32)) 598 return; 599 /* 600 * FALLTHROUGH (TRCTRAP kernel mode, kernel address) 601 */ 602 case T_BPTFLT: 603 /* 604 * If KDB is enabled, let it handle the debugger trap. 605 * Otherwise, debugger traps "can't happen". 606 */ 607 #ifdef KDB 608 if (kdb_trap(type, dr6, frame)) 609 return; 610 #endif 611 break; 612 } 613 614 trap_fatal(frame, 0); 615 return; 616 } 617 618 ksiginfo_init_trap(&ksi); 619 ksi.ksi_signo = signo; 620 ksi.ksi_code = ucode; 621 ksi.ksi_trapno = type; 622 ksi.ksi_addr = (void *)addr; 623 if (uprintf_signal) { 624 uprintf("pid %d comm %s: signal %d err %#lx code %d type %d " 625 "addr %#lx rsp %#lx rip %#lx rax %#lx " 626 "<%02x %02x %02x %02x %02x %02x %02x %02x>\n", 627 p->p_pid, p->p_comm, signo, frame->tf_err, ucode, type, 628 addr, frame->tf_rsp, frame->tf_rip, frame->tf_rax, 629 fubyte((void *)(frame->tf_rip + 0)), 630 fubyte((void *)(frame->tf_rip + 1)), 631 fubyte((void *)(frame->tf_rip + 2)), 632 fubyte((void *)(frame->tf_rip + 3)), 633 fubyte((void *)(frame->tf_rip + 4)), 634 fubyte((void *)(frame->tf_rip + 5)), 635 fubyte((void *)(frame->tf_rip + 6)), 636 fubyte((void *)(frame->tf_rip + 7))); 637 } 638 KASSERT((read_rflags() & PSL_I) != 0, ("interrupts disabled")); 639 trapsignal(td, &ksi); 640 641 userret: 642 userret(td, frame); 643 KASSERT(PCB_USER_FPU(td->td_pcb), 644 ("Return from trap with kernel FPU ctx leaked")); 645 } 646 647 /* 648 * Ensure that we ignore any DTrace-induced faults. This function cannot 649 * be instrumented, so it cannot generate such faults itself. 650 */ 651 void 652 trap_check(struct trapframe *frame) 653 { 654 655 #ifdef KDTRACE_HOOKS 656 if (dtrace_trap_func != NULL && 657 (*dtrace_trap_func)(frame, frame->tf_trapno) != 0) 658 return; 659 #endif 660 trap(frame); 661 } 662 663 static bool 664 trap_is_smap(struct trapframe *frame) 665 { 666 667 /* 668 * A page fault on a userspace address is classified as 669 * SMAP-induced if: 670 * - SMAP is supported; 671 * - kernel mode accessed present data page; 672 * - rflags.AC was cleared. 673 * Kernel must never access user space with rflags.AC cleared 674 * if SMAP is enabled. 675 */ 676 return ((cpu_stdext_feature & CPUID_STDEXT_SMAP) != 0 && 677 (frame->tf_err & (PGEX_P | PGEX_U | PGEX_I | PGEX_RSV)) == 678 PGEX_P && (frame->tf_rflags & PSL_AC) == 0); 679 } 680 681 static bool 682 trap_is_pti(struct trapframe *frame) 683 { 684 685 return (PCPU_GET(curpmap)->pm_ucr3 != PMAP_NO_CR3 && 686 pg_nx != 0 && (frame->tf_err & (PGEX_P | PGEX_W | 687 PGEX_U | PGEX_I)) == (PGEX_P | PGEX_U | PGEX_I) && 688 (curpcb->pcb_saved_ucr3 & ~CR3_PCID_MASK) == 689 (PCPU_GET(curpmap)->pm_cr3 & ~CR3_PCID_MASK)); 690 } 691 692 /* 693 * Handle all details of a page fault. 694 * Returns: 695 * -1 if this fault was fatal, typically from kernel mode 696 * (cannot happen, but we need to return something). 697 * 0 if this fault was handled by updating either the user or kernel 698 * page table, execution can continue. 699 * 1 if this fault was from usermode and it was not handled, a synchronous 700 * signal should be delivered to the thread. *signo returns the signal 701 * number, *ucode gives si_code. 702 */ 703 static int 704 trap_pfault(struct trapframe *frame, bool usermode, int *signo, int *ucode) 705 { 706 struct thread *td; 707 struct proc *p; 708 vm_map_t map; 709 vm_offset_t eva; 710 int rv; 711 vm_prot_t ftype; 712 713 MPASS(!usermode || (signo != NULL && ucode != NULL)); 714 715 td = curthread; 716 p = td->td_proc; 717 eva = frame->tf_addr; 718 719 if (__predict_false((td->td_pflags & TDP_NOFAULTING) != 0)) { 720 /* 721 * Due to both processor errata and lazy TLB invalidation when 722 * access restrictions are removed from virtual pages, memory 723 * accesses that are allowed by the physical mapping layer may 724 * nonetheless cause one spurious page fault per virtual page. 725 * When the thread is executing a "no faulting" section that 726 * is bracketed by vm_fault_{disable,enable}_pagefaults(), 727 * every page fault is treated as a spurious page fault, 728 * unless it accesses the same virtual address as the most 729 * recent page fault within the same "no faulting" section. 730 */ 731 if (td->td_md.md_spurflt_addr != eva || 732 (td->td_pflags & TDP_RESETSPUR) != 0) { 733 /* 734 * Do nothing to the TLB. A stale TLB entry is 735 * flushed automatically by a page fault. 736 */ 737 td->td_md.md_spurflt_addr = eva; 738 td->td_pflags &= ~TDP_RESETSPUR; 739 return (0); 740 } 741 } else { 742 /* 743 * If we get a page fault while in a critical section, then 744 * it is most likely a fatal kernel page fault. The kernel 745 * is already going to panic trying to get a sleep lock to 746 * do the VM lookup, so just consider it a fatal trap so the 747 * kernel can print out a useful trap message and even get 748 * to the debugger. 749 * 750 * If we get a page fault while holding a non-sleepable 751 * lock, then it is most likely a fatal kernel page fault. 752 * If WITNESS is enabled, then it's going to whine about 753 * bogus LORs with various VM locks, so just skip to the 754 * fatal trap handling directly. 755 */ 756 if (td->td_critnest != 0 || 757 WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL, 758 "Kernel page fault") != 0) { 759 trap_fatal(frame, eva); 760 return (-1); 761 } 762 } 763 if (eva >= VM_MIN_KERNEL_ADDRESS) { 764 /* 765 * Don't allow user-mode faults in kernel address space. 766 */ 767 if (usermode) { 768 *signo = SIGSEGV; 769 *ucode = SEGV_MAPERR; 770 return (1); 771 } 772 773 map = kernel_map; 774 } else { 775 map = &p->p_vmspace->vm_map; 776 777 /* 778 * When accessing a usermode address, kernel must be 779 * ready to accept the page fault, and provide a 780 * handling routine. Since accessing the address 781 * without the handler is a bug, do not try to handle 782 * it normally, and panic immediately. 783 * 784 * If SMAP is enabled, filter SMAP faults also, 785 * because illegal access might occur to the mapped 786 * user address, causing infinite loop. 787 */ 788 if (!usermode && (td->td_intr_nesting_level != 0 || 789 trap_is_smap(frame) || curpcb->pcb_onfault == NULL)) { 790 trap_fatal(frame, eva); 791 return (-1); 792 } 793 } 794 795 /* 796 * If the trap was caused by errant bits in the PTE then panic. 797 */ 798 if (frame->tf_err & PGEX_RSV) { 799 trap_fatal(frame, eva); 800 return (-1); 801 } 802 803 /* 804 * User-mode protection key violation (PKU). May happen 805 * either from usermode or from kernel if copyin accessed 806 * key-protected mapping. 807 */ 808 if ((frame->tf_err & PGEX_PK) != 0) { 809 if (eva > VM_MAXUSER_ADDRESS) { 810 trap_fatal(frame, eva); 811 return (-1); 812 } 813 if (usermode) { 814 *signo = SIGSEGV; 815 *ucode = SEGV_PKUERR; 816 return (1); 817 } 818 goto after_vmfault; 819 } 820 821 /* 822 * If nx protection of the usermode portion of kernel page 823 * tables caused trap, panic. 824 */ 825 if (usermode && trap_is_pti(frame)) 826 panic("PTI: pid %d comm %s tf_err %#lx", p->p_pid, 827 p->p_comm, frame->tf_err); 828 829 /* 830 * PGEX_I is defined only if the execute disable bit capability is 831 * supported and enabled. 832 */ 833 if (frame->tf_err & PGEX_W) 834 ftype = VM_PROT_WRITE; 835 else if ((frame->tf_err & PGEX_I) && pg_nx != 0) 836 ftype = VM_PROT_EXECUTE; 837 else 838 ftype = VM_PROT_READ; 839 840 /* Fault in the page. */ 841 rv = vm_fault_trap(map, eva, ftype, VM_FAULT_NORMAL, signo, ucode); 842 if (rv == KERN_SUCCESS) { 843 #ifdef HWPMC_HOOKS 844 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) { 845 PMC_SOFT_CALL_TF( , , page_fault, all, frame); 846 if (ftype == VM_PROT_READ) 847 PMC_SOFT_CALL_TF( , , page_fault, read, 848 frame); 849 else 850 PMC_SOFT_CALL_TF( , , page_fault, write, 851 frame); 852 } 853 #endif 854 return (0); 855 } 856 857 if (usermode) 858 return (1); 859 after_vmfault: 860 if (td->td_intr_nesting_level == 0 && 861 curpcb->pcb_onfault != NULL) { 862 if ((td->td_pflags & TDP_EFIRT) != 0) { 863 trap_diag(frame, eva); 864 printf("EFI RT page fault\n"); 865 } 866 frame->tf_rip = (long)curpcb->pcb_onfault; 867 return (0); 868 } 869 trap_fatal(frame, eva); 870 return (-1); 871 } 872 873 static void 874 trap_diag(struct trapframe *frame, vm_offset_t eva) 875 { 876 int code, ss; 877 u_int type; 878 struct soft_segment_descriptor softseg; 879 struct user_segment_descriptor *gdt; 880 881 code = frame->tf_err; 882 type = frame->tf_trapno; 883 gdt = *PCPU_PTR(gdt); 884 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)], &softseg); 885 886 printf("\n\nFatal trap %d: %s while in %s mode\n", type, 887 type < nitems(trap_msg) ? trap_msg[type] : UNKNOWN, 888 TRAPF_USERMODE(frame) ? "user" : "kernel"); 889 #ifdef SMP 890 /* two separate prints in case of a trap on an unmapped page */ 891 printf("cpuid = %d; ", PCPU_GET(cpuid)); 892 printf("apic id = %02x\n", PCPU_GET(apic_id)); 893 #endif 894 if (type == T_PAGEFLT) { 895 printf("fault virtual address = 0x%lx\n", eva); 896 printf("fault code = %s %s %s%s%s, %s\n", 897 code & PGEX_U ? "user" : "supervisor", 898 code & PGEX_W ? "write" : "read", 899 code & PGEX_I ? "instruction" : "data", 900 code & PGEX_PK ? " prot key" : "", 901 code & PGEX_SGX ? " SGX" : "", 902 code & PGEX_RSV ? "reserved bits in PTE" : 903 code & PGEX_P ? "protection violation" : "page not present"); 904 } 905 printf("instruction pointer = 0x%lx:0x%lx\n", 906 frame->tf_cs & 0xffff, frame->tf_rip); 907 ss = frame->tf_ss & 0xffff; 908 printf("stack pointer = 0x%x:0x%lx\n", ss, frame->tf_rsp); 909 printf("frame pointer = 0x%x:0x%lx\n", ss, frame->tf_rbp); 910 printf("code segment = base 0x%lx, limit 0x%lx, type 0x%x\n", 911 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type); 912 printf(" = DPL %d, pres %d, long %d, def32 %d, gran %d\n", 913 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_long, softseg.ssd_def32, 914 softseg.ssd_gran); 915 printf("processor eflags = "); 916 if (frame->tf_rflags & PSL_T) 917 printf("trace trap, "); 918 if (frame->tf_rflags & PSL_I) 919 printf("interrupt enabled, "); 920 if (frame->tf_rflags & PSL_NT) 921 printf("nested task, "); 922 if (frame->tf_rflags & PSL_RF) 923 printf("resume, "); 924 printf("IOPL = %ld\n", (frame->tf_rflags & PSL_IOPL) >> 12); 925 printf("current process = %d (%s)\n", 926 curproc->p_pid, curthread->td_name); 927 928 printf("rdi: %016lx rsi: %016lx rdx: %016lx\n", frame->tf_rdi, 929 frame->tf_rsi, frame->tf_rdx); 930 printf("rcx: %016lx r8: %016lx r9: %016lx\n", frame->tf_rcx, 931 frame->tf_r8, frame->tf_r9); 932 printf("rax: %016lx rbx: %016lx rbp: %016lx\n", frame->tf_rax, 933 frame->tf_rbx, frame->tf_rbp); 934 printf("r10: %016lx r11: %016lx r12: %016lx\n", frame->tf_r10, 935 frame->tf_r11, frame->tf_r12); 936 printf("r13: %016lx r14: %016lx r15: %016lx\n", frame->tf_r13, 937 frame->tf_r14, frame->tf_r15); 938 939 printf("trap number = %d\n", type); 940 } 941 942 static void 943 trap_fatal(struct trapframe *frame, vm_offset_t eva) 944 { 945 u_int type; 946 947 type = frame->tf_trapno; 948 trap_diag(frame, eva); 949 #ifdef KDB 950 if (debugger_on_trap) { 951 bool handled; 952 953 kdb_why = KDB_WHY_TRAP; 954 handled = kdb_trap(type, 0, frame); 955 kdb_why = KDB_WHY_UNSET; 956 if (handled) 957 return; 958 } 959 #endif 960 panic("%s", traptype_to_msg(type)); 961 } 962 963 #ifdef KDTRACE_HOOKS 964 /* 965 * Invoke a userspace DTrace hook. The hook pointer is cleared when no 966 * userspace probes are enabled, so we must synchronize with DTrace to ensure 967 * that a trapping thread is able to call the hook before it is cleared. 968 */ 969 static bool 970 trap_user_dtrace(struct trapframe *frame, int (**hookp)(struct trapframe *)) 971 { 972 int (*hook)(struct trapframe *); 973 974 hook = atomic_load_ptr(hookp); 975 enable_intr(); 976 if (hook != NULL) 977 return ((hook)(frame) == 0); 978 return (false); 979 } 980 #endif 981 982 /* 983 * Double fault handler. Called when a fault occurs while writing 984 * a frame for a trap/exception onto the stack. This usually occurs 985 * when the stack overflows (such is the case with infinite recursion, 986 * for example). 987 */ 988 void 989 dblfault_handler(struct trapframe *frame) 990 { 991 kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED); 992 #ifdef KDTRACE_HOOKS 993 if (dtrace_doubletrap_func != NULL) 994 (*dtrace_doubletrap_func)(); 995 #endif 996 printf("\nFatal double fault\n" 997 "rip %#lx rsp %#lx rbp %#lx\n" 998 "rax %#lx rdx %#lx rbx %#lx\n" 999 "rcx %#lx rsi %#lx rdi %#lx\n" 1000 "r8 %#lx r9 %#lx r10 %#lx\n" 1001 "r11 %#lx r12 %#lx r13 %#lx\n" 1002 "r14 %#lx r15 %#lx rflags %#lx\n" 1003 "cs %#lx ss %#lx ds %#hx es %#hx fs %#hx gs %#hx\n" 1004 "fsbase %#lx gsbase %#lx kgsbase %#lx\n", 1005 frame->tf_rip, frame->tf_rsp, frame->tf_rbp, 1006 frame->tf_rax, frame->tf_rdx, frame->tf_rbx, 1007 frame->tf_rcx, frame->tf_rdi, frame->tf_rsi, 1008 frame->tf_r8, frame->tf_r9, frame->tf_r10, 1009 frame->tf_r11, frame->tf_r12, frame->tf_r13, 1010 frame->tf_r14, frame->tf_r15, frame->tf_rflags, 1011 frame->tf_cs, frame->tf_ss, frame->tf_ds, frame->tf_es, 1012 frame->tf_fs, frame->tf_gs, 1013 rdmsr(MSR_FSBASE), rdmsr(MSR_GSBASE), rdmsr(MSR_KGSBASE)); 1014 #ifdef SMP 1015 /* two separate prints in case of a trap on an unmapped page */ 1016 printf("cpuid = %d; ", PCPU_GET(cpuid)); 1017 printf("apic id = %02x\n", PCPU_GET(apic_id)); 1018 #endif 1019 panic("double fault"); 1020 } 1021 1022 static int __noinline 1023 cpu_fetch_syscall_args_fallback(struct thread *td, struct syscall_args *sa) 1024 { 1025 struct proc *p; 1026 struct trapframe *frame; 1027 syscallarg_t *argp; 1028 caddr_t params; 1029 int reg, regcnt, error; 1030 1031 p = td->td_proc; 1032 frame = td->td_frame; 1033 reg = 0; 1034 regcnt = NARGREGS; 1035 1036 if (sa->code == SYS_syscall || sa->code == SYS___syscall) { 1037 sa->code = frame->tf_rdi; 1038 reg++; 1039 regcnt--; 1040 } 1041 1042 if (sa->code >= p->p_sysent->sv_size) 1043 sa->callp = &nosys_sysent; 1044 else 1045 sa->callp = &p->p_sysent->sv_table[sa->code]; 1046 1047 KASSERT(sa->callp->sy_narg <= nitems(sa->args), 1048 ("Too many syscall arguments!")); 1049 argp = &frame->tf_rdi; 1050 argp += reg; 1051 memcpy(sa->args, argp, sizeof(sa->args[0]) * NARGREGS); 1052 if (sa->callp->sy_narg > regcnt) { 1053 params = (caddr_t)frame->tf_rsp + sizeof(register_t); 1054 error = copyin(params, &sa->args[regcnt], 1055 (sa->callp->sy_narg - regcnt) * sizeof(sa->args[0])); 1056 if (__predict_false(error != 0)) 1057 return (error); 1058 } 1059 1060 td->td_retval[0] = 0; 1061 td->td_retval[1] = frame->tf_rdx; 1062 1063 return (0); 1064 } 1065 1066 int 1067 cpu_fetch_syscall_args(struct thread *td) 1068 { 1069 struct proc *p; 1070 struct trapframe *frame; 1071 struct syscall_args *sa; 1072 1073 p = td->td_proc; 1074 frame = td->td_frame; 1075 sa = &td->td_sa; 1076 1077 sa->code = frame->tf_rax; 1078 sa->original_code = sa->code; 1079 1080 if (__predict_false(sa->code == SYS_syscall || 1081 sa->code == SYS___syscall || 1082 sa->code >= p->p_sysent->sv_size)) 1083 return (cpu_fetch_syscall_args_fallback(td, sa)); 1084 1085 sa->callp = &p->p_sysent->sv_table[sa->code]; 1086 KASSERT(sa->callp->sy_narg <= nitems(sa->args), 1087 ("Too many syscall arguments!")); 1088 1089 if (__predict_false(sa->callp->sy_narg > NARGREGS)) 1090 return (cpu_fetch_syscall_args_fallback(td, sa)); 1091 1092 memcpy(sa->args, &frame->tf_rdi, sizeof(sa->args[0]) * NARGREGS); 1093 1094 td->td_retval[0] = 0; 1095 td->td_retval[1] = frame->tf_rdx; 1096 1097 return (0); 1098 } 1099 1100 #include "../../kern/subr_syscall.c" 1101 1102 static void (*syscall_ret_l1d_flush)(void); 1103 int syscall_ret_l1d_flush_mode; 1104 1105 static void 1106 flush_l1d_hw(void) 1107 { 1108 1109 wrmsr(MSR_IA32_FLUSH_CMD, IA32_FLUSH_CMD_L1D); 1110 } 1111 1112 static void __noinline 1113 amd64_syscall_ret_flush_l1d_check(int error) 1114 { 1115 void (*p)(void); 1116 1117 if (error != EEXIST && error != EAGAIN && error != EXDEV && 1118 error != ENOENT && error != ENOTCONN && error != EINPROGRESS) { 1119 p = atomic_load_ptr(&syscall_ret_l1d_flush); 1120 if (p != NULL) 1121 p(); 1122 } 1123 } 1124 1125 static void __inline 1126 amd64_syscall_ret_flush_l1d_check_inline(int error) 1127 { 1128 1129 if (__predict_false(error != 0)) 1130 amd64_syscall_ret_flush_l1d_check(error); 1131 } 1132 1133 void 1134 amd64_syscall_ret_flush_l1d(int error) 1135 { 1136 1137 amd64_syscall_ret_flush_l1d_check_inline(error); 1138 } 1139 1140 void 1141 amd64_syscall_ret_flush_l1d_recalc(void) 1142 { 1143 bool l1d_hw; 1144 1145 l1d_hw = (cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) != 0; 1146 again: 1147 switch (syscall_ret_l1d_flush_mode) { 1148 case 0: 1149 syscall_ret_l1d_flush = NULL; 1150 break; 1151 case 1: 1152 syscall_ret_l1d_flush = l1d_hw ? flush_l1d_hw : 1153 flush_l1d_sw_abi; 1154 break; 1155 case 2: 1156 syscall_ret_l1d_flush = l1d_hw ? flush_l1d_hw : NULL; 1157 break; 1158 case 3: 1159 syscall_ret_l1d_flush = flush_l1d_sw_abi; 1160 break; 1161 default: 1162 syscall_ret_l1d_flush_mode = 1; 1163 goto again; 1164 } 1165 } 1166 1167 static int 1168 machdep_syscall_ret_flush_l1d(SYSCTL_HANDLER_ARGS) 1169 { 1170 int error, val; 1171 1172 val = syscall_ret_l1d_flush_mode; 1173 error = sysctl_handle_int(oidp, &val, 0, req); 1174 if (error != 0 || req->newptr == NULL) 1175 return (error); 1176 syscall_ret_l1d_flush_mode = val; 1177 amd64_syscall_ret_flush_l1d_recalc(); 1178 return (0); 1179 } 1180 SYSCTL_PROC(_machdep, OID_AUTO, syscall_ret_flush_l1d, CTLTYPE_INT | 1181 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, 1182 machdep_syscall_ret_flush_l1d, "I", 1183 "Flush L1D on syscall return with error (0 - off, 1 - on, " 1184 "2 - use hw only, 3 - use sw only)"); 1185 1186 /* 1187 * System call handler for native binaries. The trap frame is already 1188 * set up by the assembler trampoline and a pointer to it is saved in 1189 * td_frame. 1190 */ 1191 void 1192 amd64_syscall(struct thread *td, int traced) 1193 { 1194 ksiginfo_t ksi; 1195 1196 kmsan_mark(td->td_frame, sizeof(*td->td_frame), KMSAN_STATE_INITED); 1197 1198 KASSERT(TRAPF_USERMODE(td->td_frame), 1199 ("%s: not from user mode", __func__)); 1200 1201 syscallenter(td); 1202 1203 /* 1204 * Traced syscall. 1205 */ 1206 if (__predict_false(traced)) { 1207 td->td_frame->tf_rflags &= ~PSL_T; 1208 ksiginfo_init_trap(&ksi); 1209 ksi.ksi_signo = SIGTRAP; 1210 ksi.ksi_code = TRAP_TRACE; 1211 ksi.ksi_addr = (void *)td->td_frame->tf_rip; 1212 trapsignal(td, &ksi); 1213 } 1214 1215 KASSERT(PCB_USER_FPU(td->td_pcb), 1216 ("System call %s returning with kernel FPU ctx leaked", 1217 syscallname(td->td_proc, td->td_sa.code))); 1218 KASSERT(td->td_pcb->pcb_save == get_pcb_user_save_td(td), 1219 ("System call %s returning with mangled pcb_save", 1220 syscallname(td->td_proc, td->td_sa.code))); 1221 KASSERT(pmap_not_in_di(), 1222 ("System call %s returning with leaked invl_gen %lu", 1223 syscallname(td->td_proc, td->td_sa.code), 1224 td->td_md.md_invl_gen.gen)); 1225 1226 syscallret(td); 1227 1228 /* 1229 * If the user-supplied value of %rip is not a canonical 1230 * address, then some CPUs will trigger a ring 0 #GP during 1231 * the sysret instruction. However, the fault handler would 1232 * execute in ring 0 with the user's %gs and %rsp which would 1233 * not be safe. Instead, use the full return path which 1234 * catches the problem safely. 1235 */ 1236 if (__predict_false(td->td_frame->tf_rip >= (la57 ? 1237 VM_MAXUSER_ADDRESS_LA57 : VM_MAXUSER_ADDRESS_LA48))) 1238 set_pcb_flags(td->td_pcb, PCB_FULL_IRET); 1239 1240 amd64_syscall_ret_flush_l1d_check_inline(td->td_errno); 1241 } 1242