1 /*- 2 * Copyright (c) 1990 William Jolitz. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the name of the University nor the names of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 #include "opt_cpu.h" 33 #include "opt_isa.h" 34 #include "opt_npx.h" 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/bus.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/module.h> 43 #include <sys/mutex.h> 44 #include <sys/mutex.h> 45 #include <sys/proc.h> 46 #include <sys/smp.h> 47 #include <sys/sysctl.h> 48 #include <machine/bus.h> 49 #include <sys/rman.h> 50 #ifdef NPX_DEBUG 51 #include <sys/syslog.h> 52 #endif 53 #include <sys/signalvar.h> 54 #include <vm/uma.h> 55 56 #include <machine/asmacros.h> 57 #include <machine/cputypes.h> 58 #include <machine/frame.h> 59 #include <machine/md_var.h> 60 #include <machine/pcb.h> 61 #include <machine/psl.h> 62 #include <machine/resource.h> 63 #include <machine/specialreg.h> 64 #include <machine/segments.h> 65 #include <machine/ucontext.h> 66 #include <x86/ifunc.h> 67 68 #include <machine/intr_machdep.h> 69 70 #ifdef DEV_ISA 71 #include <isa/isavar.h> 72 #endif 73 74 /* 75 * 387 and 287 Numeric Coprocessor Extension (NPX) Driver. 76 */ 77 78 #define fldcw(cw) __asm __volatile("fldcw %0" : : "m" (cw)) 79 #define fnclex() __asm __volatile("fnclex") 80 #define fninit() __asm __volatile("fninit") 81 #define fnsave(addr) __asm __volatile("fnsave %0" : "=m" (*(addr))) 82 #define fnstcw(addr) __asm __volatile("fnstcw %0" : "=m" (*(addr))) 83 #define fnstsw(addr) __asm __volatile("fnstsw %0" : "=am" (*(addr))) 84 #define fp_divide_by_0() __asm __volatile( \ 85 "fldz; fld1; fdiv %st,%st(1); fnop") 86 #define frstor(addr) __asm __volatile("frstor %0" : : "m" (*(addr))) 87 #define fxrstor(addr) __asm __volatile("fxrstor %0" : : "m" (*(addr))) 88 #define fxsave(addr) __asm __volatile("fxsave %0" : "=m" (*(addr))) 89 #define ldmxcsr(csr) __asm __volatile("ldmxcsr %0" : : "m" (csr)) 90 #define stmxcsr(addr) __asm __volatile("stmxcsr %0" : : "m" (*(addr))) 91 92 static __inline void 93 xrstor(char *addr, uint64_t mask) 94 { 95 uint32_t low, hi; 96 97 low = mask; 98 hi = mask >> 32; 99 __asm __volatile("xrstor %0" : : "m" (*addr), "a" (low), "d" (hi)); 100 } 101 102 static __inline void 103 xsave(char *addr, uint64_t mask) 104 { 105 uint32_t low, hi; 106 107 low = mask; 108 hi = mask >> 32; 109 __asm __volatile("xsave %0" : "=m" (*addr) : "a" (low), "d" (hi) : 110 "memory"); 111 } 112 113 static __inline void 114 xsaveopt(char *addr, uint64_t mask) 115 { 116 uint32_t low, hi; 117 118 low = mask; 119 hi = mask >> 32; 120 __asm __volatile("xsaveopt %0" : "=m" (*addr) : "a" (low), "d" (hi) : 121 "memory"); 122 } 123 124 #define GET_FPU_CW(thread) \ 125 (cpu_fxsr ? \ 126 (thread)->td_pcb->pcb_save->sv_xmm.sv_env.en_cw : \ 127 (thread)->td_pcb->pcb_save->sv_87.sv_env.en_cw) 128 #define GET_FPU_SW(thread) \ 129 (cpu_fxsr ? \ 130 (thread)->td_pcb->pcb_save->sv_xmm.sv_env.en_sw : \ 131 (thread)->td_pcb->pcb_save->sv_87.sv_env.en_sw) 132 #define SET_FPU_CW(savefpu, value) do { \ 133 if (cpu_fxsr) \ 134 (savefpu)->sv_xmm.sv_env.en_cw = (value); \ 135 else \ 136 (savefpu)->sv_87.sv_env.en_cw = (value); \ 137 } while (0) 138 139 CTASSERT(sizeof(union savefpu) == 512); 140 CTASSERT(sizeof(struct xstate_hdr) == 64); 141 CTASSERT(sizeof(struct savefpu_ymm) == 832); 142 143 /* 144 * This requirement is to make it easier for asm code to calculate 145 * offset of the fpu save area from the pcb address. FPU save area 146 * must be 64-byte aligned. 147 */ 148 CTASSERT(sizeof(struct pcb) % XSAVE_AREA_ALIGN == 0); 149 150 /* 151 * Ensure the copy of XCR0 saved in a core is contained in the padding 152 * area. 153 */ 154 CTASSERT(X86_XSTATE_XCR0_OFFSET >= offsetof(struct savexmm, sv_pad) && 155 X86_XSTATE_XCR0_OFFSET + sizeof(uint64_t) <= sizeof(struct savexmm)); 156 157 static void fpu_clean_state(void); 158 159 static void fpurstor(union savefpu *); 160 161 int hw_float; 162 163 SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD, 164 &hw_float, 0, "Floating point instructions executed in hardware"); 165 166 int lazy_fpu_switch = 0; 167 SYSCTL_INT(_hw, OID_AUTO, lazy_fpu_switch, CTLFLAG_RWTUN | CTLFLAG_NOFETCH, 168 &lazy_fpu_switch, 0, 169 "Lazily load FPU context after context switch"); 170 171 u_int cpu_fxsr; /* SSE enabled */ 172 int use_xsave; 173 uint64_t xsave_mask; 174 static uma_zone_t fpu_save_area_zone; 175 static union savefpu *npx_initialstate; 176 177 static struct xsave_area_elm_descr { 178 u_int offset; 179 u_int size; 180 } *xsave_area_desc; 181 182 static volatile u_int npx_traps_while_probing; 183 184 alias_for_inthand_t probetrap; 185 __asm(" \n\ 186 .text \n\ 187 .p2align 2,0x90 \n\ 188 .type " __XSTRING(CNAME(probetrap)) ",@function \n\ 189 " __XSTRING(CNAME(probetrap)) ": \n\ 190 ss \n\ 191 incl " __XSTRING(CNAME(npx_traps_while_probing)) " \n\ 192 fnclex \n\ 193 iret \n\ 194 "); 195 196 /* 197 * Determine if an FPU is present and how to use it. 198 */ 199 static int 200 npx_probe(void) 201 { 202 struct gate_descriptor save_idt_npxtrap; 203 u_short control, status; 204 205 /* 206 * Modern CPUs all have an FPU that uses the INT16 interface 207 * and provide a simple way to verify that, so handle the 208 * common case right away. 209 */ 210 if (cpu_feature & CPUID_FPU) { 211 hw_float = 1; 212 return (1); 213 } 214 215 save_idt_npxtrap = idt[IDT_MF]; 216 setidt(IDT_MF, probetrap, SDT_SYS386TGT, SEL_KPL, 217 GSEL(GCODE_SEL, SEL_KPL)); 218 219 /* 220 * Don't trap while we're probing. 221 */ 222 fpu_enable(); 223 224 /* 225 * Finish resetting the coprocessor, if any. If there is an error 226 * pending, then we may get a bogus IRQ13, but npx_intr() will handle 227 * it OK. Bogus halts have never been observed, but we enabled 228 * IRQ13 and cleared the BUSY# latch early to handle them anyway. 229 */ 230 fninit(); 231 232 /* 233 * Don't use fwait here because it might hang. 234 * Don't use fnop here because it usually hangs if there is no FPU. 235 */ 236 DELAY(1000); /* wait for any IRQ13 */ 237 #ifdef DIAGNOSTIC 238 if (npx_traps_while_probing != 0) 239 printf("fninit caused %u bogus npx trap(s)\n", 240 npx_traps_while_probing); 241 #endif 242 /* 243 * Check for a status of mostly zero. 244 */ 245 status = 0x5a5a; 246 fnstsw(&status); 247 if ((status & 0xb8ff) == 0) { 248 /* 249 * Good, now check for a proper control word. 250 */ 251 control = 0x5a5a; 252 fnstcw(&control); 253 if ((control & 0x1f3f) == 0x033f) { 254 /* 255 * We have an npx, now divide by 0 to see if exception 256 * 16 works. 257 */ 258 control &= ~(1 << 2); /* enable divide by 0 trap */ 259 fldcw(control); 260 npx_traps_while_probing = 0; 261 fp_divide_by_0(); 262 if (npx_traps_while_probing != 0) { 263 /* 264 * Good, exception 16 works. 265 */ 266 hw_float = 1; 267 goto cleanup; 268 } 269 printf( 270 "FPU does not use exception 16 for error reporting\n"); 271 goto cleanup; 272 } 273 } 274 275 /* 276 * Probe failed. Floating point simply won't work. 277 * Notify user and disable FPU/MMX/SSE instruction execution. 278 */ 279 printf("WARNING: no FPU!\n"); 280 __asm __volatile("smsw %%ax; orb %0,%%al; lmsw %%ax" : : 281 "n" (CR0_EM | CR0_MP) : "ax"); 282 283 cleanup: 284 idt[IDT_MF] = save_idt_npxtrap; 285 return (hw_float); 286 } 287 288 static void 289 fpusave_xsaveopt(union savefpu *addr) 290 { 291 292 xsaveopt((char *)addr, xsave_mask); 293 } 294 295 static void 296 fpusave_xsave(union savefpu *addr) 297 { 298 299 xsave((char *)addr, xsave_mask); 300 } 301 302 static void 303 fpusave_fxsave(union savefpu *addr) 304 { 305 306 fxsave((char *)addr); 307 } 308 309 static void 310 fpusave_fnsave(union savefpu *addr) 311 { 312 313 fnsave((char *)addr); 314 } 315 316 DEFINE_IFUNC(, void, fpusave, (union savefpu *)) 317 { 318 u_int cp[4]; 319 320 if (use_xsave) { 321 cpuid_count(0xd, 0x1, cp); 322 return ((cp[0] & CPUID_EXTSTATE_XSAVEOPT) != 0 ? 323 fpusave_xsaveopt : fpusave_xsave); 324 } 325 if (cpu_fxsr) 326 return (fpusave_fxsave); 327 return (fpusave_fnsave); 328 } 329 330 /* 331 * Enable XSAVE if supported and allowed by user. 332 * Calculate the xsave_mask. 333 */ 334 static void 335 npxinit_bsp1(void) 336 { 337 u_int cp[4]; 338 uint64_t xsave_mask_user; 339 340 TUNABLE_INT_FETCH("hw.lazy_fpu_switch", &lazy_fpu_switch); 341 if (!use_xsave) 342 return; 343 cpuid_count(0xd, 0x0, cp); 344 xsave_mask = XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE; 345 if ((cp[0] & xsave_mask) != xsave_mask) 346 panic("CPU0 does not support X87 or SSE: %x", cp[0]); 347 xsave_mask = ((uint64_t)cp[3] << 32) | cp[0]; 348 xsave_mask_user = xsave_mask; 349 TUNABLE_QUAD_FETCH("hw.xsave_mask", &xsave_mask_user); 350 xsave_mask_user |= XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE; 351 xsave_mask &= xsave_mask_user; 352 if ((xsave_mask & XFEATURE_AVX512) != XFEATURE_AVX512) 353 xsave_mask &= ~XFEATURE_AVX512; 354 if ((xsave_mask & XFEATURE_MPX) != XFEATURE_MPX) 355 xsave_mask &= ~XFEATURE_MPX; 356 } 357 358 /* 359 * Calculate the fpu save area size. 360 */ 361 static void 362 npxinit_bsp2(void) 363 { 364 u_int cp[4]; 365 366 if (use_xsave) { 367 cpuid_count(0xd, 0x0, cp); 368 cpu_max_ext_state_size = cp[1]; 369 370 /* 371 * Reload the cpu_feature2, since we enabled OSXSAVE. 372 */ 373 do_cpuid(1, cp); 374 cpu_feature2 = cp[2]; 375 } else 376 cpu_max_ext_state_size = sizeof(union savefpu); 377 } 378 379 /* 380 * Initialize floating point unit. 381 */ 382 void 383 npxinit(bool bsp) 384 { 385 static union savefpu dummy; 386 register_t saveintr; 387 u_int mxcsr; 388 u_short control; 389 390 if (bsp) { 391 if (!npx_probe()) 392 return; 393 npxinit_bsp1(); 394 } 395 396 if (use_xsave) { 397 load_cr4(rcr4() | CR4_XSAVE); 398 load_xcr(XCR0, xsave_mask); 399 } 400 401 /* 402 * XCR0 shall be set up before CPU can report the save area size. 403 */ 404 if (bsp) 405 npxinit_bsp2(); 406 407 /* 408 * fninit has the same h/w bugs as fnsave. Use the detoxified 409 * fnsave to throw away any junk in the fpu. fpusave() initializes 410 * the fpu. 411 * 412 * It is too early for critical_enter() to work on AP. 413 */ 414 saveintr = intr_disable(); 415 fpu_enable(); 416 if (cpu_fxsr) 417 fninit(); 418 else 419 fnsave(&dummy); 420 control = __INITIAL_NPXCW__; 421 fldcw(control); 422 if (cpu_fxsr) { 423 mxcsr = __INITIAL_MXCSR__; 424 ldmxcsr(mxcsr); 425 } 426 fpu_disable(); 427 intr_restore(saveintr); 428 } 429 430 /* 431 * On the boot CPU we generate a clean state that is used to 432 * initialize the floating point unit when it is first used by a 433 * process. 434 */ 435 static void 436 npxinitstate(void *arg __unused) 437 { 438 uint64_t *xstate_bv; 439 register_t saveintr; 440 int cp[4], i, max_ext_n; 441 442 if (!hw_float) 443 return; 444 445 /* Do potentially blocking operations before disabling interrupts. */ 446 fpu_save_area_zone = uma_zcreate("FPU_save_area", 447 cpu_max_ext_state_size, NULL, NULL, NULL, NULL, 448 XSAVE_AREA_ALIGN - 1, 0); 449 npx_initialstate = uma_zalloc(fpu_save_area_zone, M_WAITOK | M_ZERO); 450 if (use_xsave) { 451 if (xsave_mask >> 32 != 0) 452 max_ext_n = fls(xsave_mask >> 32) + 32; 453 else 454 max_ext_n = fls(xsave_mask); 455 xsave_area_desc = malloc(max_ext_n * sizeof(struct 456 xsave_area_elm_descr), M_DEVBUF, M_WAITOK | M_ZERO); 457 } 458 459 saveintr = intr_disable(); 460 fpu_enable(); 461 462 if (cpu_fxsr) 463 fpusave_fxsave(npx_initialstate); 464 else 465 fpusave_fnsave(npx_initialstate); 466 if (cpu_fxsr) { 467 if (npx_initialstate->sv_xmm.sv_env.en_mxcsr_mask) 468 cpu_mxcsr_mask = 469 npx_initialstate->sv_xmm.sv_env.en_mxcsr_mask; 470 else 471 cpu_mxcsr_mask = 0xFFBF; 472 473 /* 474 * The fninit instruction does not modify XMM 475 * registers or x87 registers (MM/ST). The fpusave 476 * call dumped the garbage contained in the registers 477 * after reset to the initial state saved. Clear XMM 478 * and x87 registers file image to make the startup 479 * program state and signal handler XMM/x87 register 480 * content predictable. 481 */ 482 bzero(npx_initialstate->sv_xmm.sv_fp, 483 sizeof(npx_initialstate->sv_xmm.sv_fp)); 484 bzero(npx_initialstate->sv_xmm.sv_xmm, 485 sizeof(npx_initialstate->sv_xmm.sv_xmm)); 486 487 } else 488 bzero(npx_initialstate->sv_87.sv_ac, 489 sizeof(npx_initialstate->sv_87.sv_ac)); 490 491 /* 492 * Create a table describing the layout of the CPU Extended 493 * Save Area. See Intel SDM rev. 075 Vol. 1 13.4.1 "Legacy 494 * Region of an XSAVE Area" for the source of offsets/sizes. 495 * Note that 32bit XSAVE does not use %xmm8-%xmm15, see 496 * 10.5.1.2 and 13.5.2 "SSE State". 497 */ 498 if (use_xsave) { 499 xstate_bv = (uint64_t *)((char *)(npx_initialstate + 1) + 500 offsetof(struct xstate_hdr, xstate_bv)); 501 *xstate_bv = XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE; 502 503 /* x87 state */ 504 xsave_area_desc[0].offset = 0; 505 xsave_area_desc[0].size = 160; 506 /* XMM */ 507 xsave_area_desc[1].offset = 160; 508 xsave_area_desc[1].size = 288 - 160; 509 510 for (i = 2; i < max_ext_n; i++) { 511 cpuid_count(0xd, i, cp); 512 xsave_area_desc[i].offset = cp[1]; 513 xsave_area_desc[i].size = cp[0]; 514 } 515 } 516 517 fpu_disable(); 518 intr_restore(saveintr); 519 } 520 SYSINIT(npxinitstate, SI_SUB_CPU, SI_ORDER_ANY, npxinitstate, NULL); 521 522 /* 523 * Free coprocessor (if we have it). 524 */ 525 void 526 npxexit(struct thread *td) 527 { 528 529 critical_enter(); 530 if (curthread == PCPU_GET(fpcurthread)) { 531 fpu_enable(); 532 fpusave(curpcb->pcb_save); 533 fpu_disable(); 534 PCPU_SET(fpcurthread, NULL); 535 } 536 critical_exit(); 537 #ifdef NPX_DEBUG 538 if (hw_float) { 539 u_int masked_exceptions; 540 541 masked_exceptions = GET_FPU_CW(td) & GET_FPU_SW(td) & 0x7f; 542 /* 543 * Log exceptions that would have trapped with the old 544 * control word (overflow, divide by 0, and invalid operand). 545 */ 546 if (masked_exceptions & 0x0d) 547 log(LOG_ERR, 548 "pid %d (%s) exited with masked floating point exceptions 0x%02x\n", 549 td->td_proc->p_pid, td->td_proc->p_comm, 550 masked_exceptions); 551 } 552 #endif 553 } 554 555 int 556 npxformat(void) 557 { 558 559 if (!hw_float) 560 return (_MC_FPFMT_NODEV); 561 if (cpu_fxsr) 562 return (_MC_FPFMT_XMM); 563 return (_MC_FPFMT_387); 564 } 565 566 /* 567 * The following mechanism is used to ensure that the FPE_... value 568 * that is passed as a trapcode to the signal handler of the user 569 * process does not have more than one bit set. 570 * 571 * Multiple bits may be set if the user process modifies the control 572 * word while a status word bit is already set. While this is a sign 573 * of bad coding, we have no choice than to narrow them down to one 574 * bit, since we must not send a trapcode that is not exactly one of 575 * the FPE_ macros. 576 * 577 * The mechanism has a static table with 127 entries. Each combination 578 * of the 7 FPU status word exception bits directly translates to a 579 * position in this table, where a single FPE_... value is stored. 580 * This FPE_... value stored there is considered the "most important" 581 * of the exception bits and will be sent as the signal code. The 582 * precedence of the bits is based upon Intel Document "Numerical 583 * Applications", Chapter "Special Computational Situations". 584 * 585 * The macro to choose one of these values does these steps: 1) Throw 586 * away status word bits that cannot be masked. 2) Throw away the bits 587 * currently masked in the control word, assuming the user isn't 588 * interested in them anymore. 3) Reinsert status word bit 7 (stack 589 * fault) if it is set, which cannot be masked but must be presered. 590 * 4) Use the remaining bits to point into the trapcode table. 591 * 592 * The 6 maskable bits in order of their preference, as stated in the 593 * above referenced Intel manual: 594 * 1 Invalid operation (FP_X_INV) 595 * 1a Stack underflow 596 * 1b Stack overflow 597 * 1c Operand of unsupported format 598 * 1d SNaN operand. 599 * 2 QNaN operand (not an exception, irrelavant here) 600 * 3 Any other invalid-operation not mentioned above or zero divide 601 * (FP_X_INV, FP_X_DZ) 602 * 4 Denormal operand (FP_X_DNML) 603 * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL) 604 * 6 Inexact result (FP_X_IMP) 605 */ 606 static char fpetable[128] = { 607 0, 608 FPE_FLTINV, /* 1 - INV */ 609 FPE_FLTUND, /* 2 - DNML */ 610 FPE_FLTINV, /* 3 - INV | DNML */ 611 FPE_FLTDIV, /* 4 - DZ */ 612 FPE_FLTINV, /* 5 - INV | DZ */ 613 FPE_FLTDIV, /* 6 - DNML | DZ */ 614 FPE_FLTINV, /* 7 - INV | DNML | DZ */ 615 FPE_FLTOVF, /* 8 - OFL */ 616 FPE_FLTINV, /* 9 - INV | OFL */ 617 FPE_FLTUND, /* A - DNML | OFL */ 618 FPE_FLTINV, /* B - INV | DNML | OFL */ 619 FPE_FLTDIV, /* C - DZ | OFL */ 620 FPE_FLTINV, /* D - INV | DZ | OFL */ 621 FPE_FLTDIV, /* E - DNML | DZ | OFL */ 622 FPE_FLTINV, /* F - INV | DNML | DZ | OFL */ 623 FPE_FLTUND, /* 10 - UFL */ 624 FPE_FLTINV, /* 11 - INV | UFL */ 625 FPE_FLTUND, /* 12 - DNML | UFL */ 626 FPE_FLTINV, /* 13 - INV | DNML | UFL */ 627 FPE_FLTDIV, /* 14 - DZ | UFL */ 628 FPE_FLTINV, /* 15 - INV | DZ | UFL */ 629 FPE_FLTDIV, /* 16 - DNML | DZ | UFL */ 630 FPE_FLTINV, /* 17 - INV | DNML | DZ | UFL */ 631 FPE_FLTOVF, /* 18 - OFL | UFL */ 632 FPE_FLTINV, /* 19 - INV | OFL | UFL */ 633 FPE_FLTUND, /* 1A - DNML | OFL | UFL */ 634 FPE_FLTINV, /* 1B - INV | DNML | OFL | UFL */ 635 FPE_FLTDIV, /* 1C - DZ | OFL | UFL */ 636 FPE_FLTINV, /* 1D - INV | DZ | OFL | UFL */ 637 FPE_FLTDIV, /* 1E - DNML | DZ | OFL | UFL */ 638 FPE_FLTINV, /* 1F - INV | DNML | DZ | OFL | UFL */ 639 FPE_FLTRES, /* 20 - IMP */ 640 FPE_FLTINV, /* 21 - INV | IMP */ 641 FPE_FLTUND, /* 22 - DNML | IMP */ 642 FPE_FLTINV, /* 23 - INV | DNML | IMP */ 643 FPE_FLTDIV, /* 24 - DZ | IMP */ 644 FPE_FLTINV, /* 25 - INV | DZ | IMP */ 645 FPE_FLTDIV, /* 26 - DNML | DZ | IMP */ 646 FPE_FLTINV, /* 27 - INV | DNML | DZ | IMP */ 647 FPE_FLTOVF, /* 28 - OFL | IMP */ 648 FPE_FLTINV, /* 29 - INV | OFL | IMP */ 649 FPE_FLTUND, /* 2A - DNML | OFL | IMP */ 650 FPE_FLTINV, /* 2B - INV | DNML | OFL | IMP */ 651 FPE_FLTDIV, /* 2C - DZ | OFL | IMP */ 652 FPE_FLTINV, /* 2D - INV | DZ | OFL | IMP */ 653 FPE_FLTDIV, /* 2E - DNML | DZ | OFL | IMP */ 654 FPE_FLTINV, /* 2F - INV | DNML | DZ | OFL | IMP */ 655 FPE_FLTUND, /* 30 - UFL | IMP */ 656 FPE_FLTINV, /* 31 - INV | UFL | IMP */ 657 FPE_FLTUND, /* 32 - DNML | UFL | IMP */ 658 FPE_FLTINV, /* 33 - INV | DNML | UFL | IMP */ 659 FPE_FLTDIV, /* 34 - DZ | UFL | IMP */ 660 FPE_FLTINV, /* 35 - INV | DZ | UFL | IMP */ 661 FPE_FLTDIV, /* 36 - DNML | DZ | UFL | IMP */ 662 FPE_FLTINV, /* 37 - INV | DNML | DZ | UFL | IMP */ 663 FPE_FLTOVF, /* 38 - OFL | UFL | IMP */ 664 FPE_FLTINV, /* 39 - INV | OFL | UFL | IMP */ 665 FPE_FLTUND, /* 3A - DNML | OFL | UFL | IMP */ 666 FPE_FLTINV, /* 3B - INV | DNML | OFL | UFL | IMP */ 667 FPE_FLTDIV, /* 3C - DZ | OFL | UFL | IMP */ 668 FPE_FLTINV, /* 3D - INV | DZ | OFL | UFL | IMP */ 669 FPE_FLTDIV, /* 3E - DNML | DZ | OFL | UFL | IMP */ 670 FPE_FLTINV, /* 3F - INV | DNML | DZ | OFL | UFL | IMP */ 671 FPE_FLTSUB, /* 40 - STK */ 672 FPE_FLTSUB, /* 41 - INV | STK */ 673 FPE_FLTUND, /* 42 - DNML | STK */ 674 FPE_FLTSUB, /* 43 - INV | DNML | STK */ 675 FPE_FLTDIV, /* 44 - DZ | STK */ 676 FPE_FLTSUB, /* 45 - INV | DZ | STK */ 677 FPE_FLTDIV, /* 46 - DNML | DZ | STK */ 678 FPE_FLTSUB, /* 47 - INV | DNML | DZ | STK */ 679 FPE_FLTOVF, /* 48 - OFL | STK */ 680 FPE_FLTSUB, /* 49 - INV | OFL | STK */ 681 FPE_FLTUND, /* 4A - DNML | OFL | STK */ 682 FPE_FLTSUB, /* 4B - INV | DNML | OFL | STK */ 683 FPE_FLTDIV, /* 4C - DZ | OFL | STK */ 684 FPE_FLTSUB, /* 4D - INV | DZ | OFL | STK */ 685 FPE_FLTDIV, /* 4E - DNML | DZ | OFL | STK */ 686 FPE_FLTSUB, /* 4F - INV | DNML | DZ | OFL | STK */ 687 FPE_FLTUND, /* 50 - UFL | STK */ 688 FPE_FLTSUB, /* 51 - INV | UFL | STK */ 689 FPE_FLTUND, /* 52 - DNML | UFL | STK */ 690 FPE_FLTSUB, /* 53 - INV | DNML | UFL | STK */ 691 FPE_FLTDIV, /* 54 - DZ | UFL | STK */ 692 FPE_FLTSUB, /* 55 - INV | DZ | UFL | STK */ 693 FPE_FLTDIV, /* 56 - DNML | DZ | UFL | STK */ 694 FPE_FLTSUB, /* 57 - INV | DNML | DZ | UFL | STK */ 695 FPE_FLTOVF, /* 58 - OFL | UFL | STK */ 696 FPE_FLTSUB, /* 59 - INV | OFL | UFL | STK */ 697 FPE_FLTUND, /* 5A - DNML | OFL | UFL | STK */ 698 FPE_FLTSUB, /* 5B - INV | DNML | OFL | UFL | STK */ 699 FPE_FLTDIV, /* 5C - DZ | OFL | UFL | STK */ 700 FPE_FLTSUB, /* 5D - INV | DZ | OFL | UFL | STK */ 701 FPE_FLTDIV, /* 5E - DNML | DZ | OFL | UFL | STK */ 702 FPE_FLTSUB, /* 5F - INV | DNML | DZ | OFL | UFL | STK */ 703 FPE_FLTRES, /* 60 - IMP | STK */ 704 FPE_FLTSUB, /* 61 - INV | IMP | STK */ 705 FPE_FLTUND, /* 62 - DNML | IMP | STK */ 706 FPE_FLTSUB, /* 63 - INV | DNML | IMP | STK */ 707 FPE_FLTDIV, /* 64 - DZ | IMP | STK */ 708 FPE_FLTSUB, /* 65 - INV | DZ | IMP | STK */ 709 FPE_FLTDIV, /* 66 - DNML | DZ | IMP | STK */ 710 FPE_FLTSUB, /* 67 - INV | DNML | DZ | IMP | STK */ 711 FPE_FLTOVF, /* 68 - OFL | IMP | STK */ 712 FPE_FLTSUB, /* 69 - INV | OFL | IMP | STK */ 713 FPE_FLTUND, /* 6A - DNML | OFL | IMP | STK */ 714 FPE_FLTSUB, /* 6B - INV | DNML | OFL | IMP | STK */ 715 FPE_FLTDIV, /* 6C - DZ | OFL | IMP | STK */ 716 FPE_FLTSUB, /* 6D - INV | DZ | OFL | IMP | STK */ 717 FPE_FLTDIV, /* 6E - DNML | DZ | OFL | IMP | STK */ 718 FPE_FLTSUB, /* 6F - INV | DNML | DZ | OFL | IMP | STK */ 719 FPE_FLTUND, /* 70 - UFL | IMP | STK */ 720 FPE_FLTSUB, /* 71 - INV | UFL | IMP | STK */ 721 FPE_FLTUND, /* 72 - DNML | UFL | IMP | STK */ 722 FPE_FLTSUB, /* 73 - INV | DNML | UFL | IMP | STK */ 723 FPE_FLTDIV, /* 74 - DZ | UFL | IMP | STK */ 724 FPE_FLTSUB, /* 75 - INV | DZ | UFL | IMP | STK */ 725 FPE_FLTDIV, /* 76 - DNML | DZ | UFL | IMP | STK */ 726 FPE_FLTSUB, /* 77 - INV | DNML | DZ | UFL | IMP | STK */ 727 FPE_FLTOVF, /* 78 - OFL | UFL | IMP | STK */ 728 FPE_FLTSUB, /* 79 - INV | OFL | UFL | IMP | STK */ 729 FPE_FLTUND, /* 7A - DNML | OFL | UFL | IMP | STK */ 730 FPE_FLTSUB, /* 7B - INV | DNML | OFL | UFL | IMP | STK */ 731 FPE_FLTDIV, /* 7C - DZ | OFL | UFL | IMP | STK */ 732 FPE_FLTSUB, /* 7D - INV | DZ | OFL | UFL | IMP | STK */ 733 FPE_FLTDIV, /* 7E - DNML | DZ | OFL | UFL | IMP | STK */ 734 FPE_FLTSUB, /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */ 735 }; 736 737 /* 738 * Read the FP status and control words, then generate si_code value 739 * for SIGFPE. The error code chosen will be one of the 740 * FPE_... macros. It will be sent as the second argument to old 741 * BSD-style signal handlers and as "siginfo_t->si_code" (second 742 * argument) to SA_SIGINFO signal handlers. 743 * 744 * Some time ago, we cleared the x87 exceptions with FNCLEX there. 745 * Clearing exceptions was necessary mainly to avoid IRQ13 bugs. The 746 * usermode code which understands the FPU hardware enough to enable 747 * the exceptions, can also handle clearing the exception state in the 748 * handler. The only consequence of not clearing the exception is the 749 * rethrow of the SIGFPE on return from the signal handler and 750 * reexecution of the corresponding instruction. 751 * 752 * For XMM traps, the exceptions were never cleared. 753 */ 754 int 755 npxtrap_x87(void) 756 { 757 u_short control, status; 758 759 if (!hw_float) { 760 printf( 761 "npxtrap_x87: fpcurthread = %p, curthread = %p, hw_float = %d\n", 762 PCPU_GET(fpcurthread), curthread, hw_float); 763 panic("npxtrap from nowhere"); 764 } 765 critical_enter(); 766 767 /* 768 * Interrupt handling (for another interrupt) may have pushed the 769 * state to memory. Fetch the relevant parts of the state from 770 * wherever they are. 771 */ 772 if (PCPU_GET(fpcurthread) != curthread) { 773 control = GET_FPU_CW(curthread); 774 status = GET_FPU_SW(curthread); 775 } else { 776 fnstcw(&control); 777 fnstsw(&status); 778 } 779 critical_exit(); 780 return (fpetable[status & ((~control & 0x3f) | 0x40)]); 781 } 782 783 int 784 npxtrap_sse(void) 785 { 786 u_int mxcsr; 787 788 if (!hw_float) { 789 printf( 790 "npxtrap_sse: fpcurthread = %p, curthread = %p, hw_float = %d\n", 791 PCPU_GET(fpcurthread), curthread, hw_float); 792 panic("npxtrap from nowhere"); 793 } 794 critical_enter(); 795 if (PCPU_GET(fpcurthread) != curthread) 796 mxcsr = curthread->td_pcb->pcb_save->sv_xmm.sv_env.en_mxcsr; 797 else 798 stmxcsr(&mxcsr); 799 critical_exit(); 800 return (fpetable[(mxcsr & (~mxcsr >> 7)) & 0x3f]); 801 } 802 803 static void 804 restore_npx_curthread(struct thread *td, struct pcb *pcb) 805 { 806 807 /* 808 * Record new context early in case frstor causes a trap. 809 */ 810 PCPU_SET(fpcurthread, td); 811 812 fpu_enable(); 813 if (cpu_fxsr) 814 fpu_clean_state(); 815 816 if ((pcb->pcb_flags & PCB_NPXINITDONE) == 0) { 817 /* 818 * This is the first time this thread has used the FPU or 819 * the PCB doesn't contain a clean FPU state. Explicitly 820 * load an initial state. 821 * 822 * We prefer to restore the state from the actual save 823 * area in PCB instead of directly loading from 824 * npx_initialstate, to ignite the XSAVEOPT 825 * tracking engine. 826 */ 827 bcopy(npx_initialstate, pcb->pcb_save, cpu_max_ext_state_size); 828 fpurstor(pcb->pcb_save); 829 if (pcb->pcb_initial_npxcw != __INITIAL_NPXCW__) 830 fldcw(pcb->pcb_initial_npxcw); 831 pcb->pcb_flags |= PCB_NPXINITDONE; 832 if (PCB_USER_FPU(pcb)) 833 pcb->pcb_flags |= PCB_NPXUSERINITDONE; 834 } else { 835 fpurstor(pcb->pcb_save); 836 } 837 } 838 839 /* 840 * Implement device not available (DNA) exception 841 * 842 * It would be better to switch FP context here (if curthread != fpcurthread) 843 * and not necessarily for every context switch, but it is too hard to 844 * access foreign pcb's. 845 */ 846 int 847 npxdna(void) 848 { 849 struct thread *td; 850 851 if (!hw_float) 852 return (0); 853 td = curthread; 854 critical_enter(); 855 856 KASSERT((curpcb->pcb_flags & PCB_NPXNOSAVE) == 0, 857 ("npxdna while in fpu_kern_enter(FPU_KERN_NOCTX)")); 858 if (__predict_false(PCPU_GET(fpcurthread) == td)) { 859 /* 860 * Some virtual machines seems to set %cr0.TS at 861 * arbitrary moments. Silently clear the TS bit 862 * regardless of the eager/lazy FPU context switch 863 * mode. 864 */ 865 fpu_enable(); 866 } else { 867 if (__predict_false(PCPU_GET(fpcurthread) != NULL)) { 868 printf( 869 "npxdna: fpcurthread = %p (%d), curthread = %p (%d)\n", 870 PCPU_GET(fpcurthread), 871 PCPU_GET(fpcurthread)->td_proc->p_pid, 872 td, td->td_proc->p_pid); 873 panic("npxdna"); 874 } 875 restore_npx_curthread(td, td->td_pcb); 876 } 877 critical_exit(); 878 return (1); 879 } 880 881 /* 882 * Wrapper for fpusave() called from context switch routines. 883 * 884 * npxsave() must be called with interrupts disabled, so that it clears 885 * fpcurthread atomically with saving the state. We require callers to do the 886 * disabling, since most callers need to disable interrupts anyway to call 887 * npxsave() atomically with checking fpcurthread. 888 */ 889 void 890 npxsave(union savefpu *addr) 891 { 892 893 fpu_enable(); 894 fpusave(addr); 895 } 896 897 void npxswitch(struct thread *td, struct pcb *pcb); 898 void 899 npxswitch(struct thread *td, struct pcb *pcb) 900 { 901 902 if (lazy_fpu_switch || (td->td_pflags & TDP_KTHREAD) != 0 || 903 !PCB_USER_FPU(pcb)) { 904 fpu_disable(); 905 PCPU_SET(fpcurthread, NULL); 906 } else if (PCPU_GET(fpcurthread) != td) { 907 restore_npx_curthread(td, pcb); 908 } 909 } 910 911 /* 912 * Unconditionally save the current co-processor state across suspend and 913 * resume. 914 */ 915 void 916 npxsuspend(union savefpu *addr) 917 { 918 register_t cr0; 919 920 if (!hw_float) 921 return; 922 if (PCPU_GET(fpcurthread) == NULL) { 923 bcopy(npx_initialstate, addr, cpu_max_ext_state_size); 924 return; 925 } 926 cr0 = rcr0(); 927 fpu_enable(); 928 fpusave(addr); 929 load_cr0(cr0); 930 } 931 932 void 933 npxresume(union savefpu *addr) 934 { 935 register_t cr0; 936 937 if (!hw_float) 938 return; 939 940 cr0 = rcr0(); 941 npxinit(false); 942 fpu_enable(); 943 fpurstor(addr); 944 load_cr0(cr0); 945 } 946 947 void 948 npxdrop(void) 949 { 950 struct thread *td; 951 952 /* 953 * Discard pending exceptions in the !cpu_fxsr case so that unmasked 954 * ones don't cause a panic on the next frstor. 955 */ 956 if (!cpu_fxsr) 957 fnclex(); 958 959 td = PCPU_GET(fpcurthread); 960 KASSERT(td == curthread, ("fpudrop: fpcurthread != curthread")); 961 CRITICAL_ASSERT(td); 962 PCPU_SET(fpcurthread, NULL); 963 td->td_pcb->pcb_flags &= ~PCB_NPXINITDONE; 964 fpu_disable(); 965 } 966 967 /* 968 * Get the user state of the FPU into pcb->pcb_user_save without 969 * dropping ownership (if possible). It returns the FPU ownership 970 * status. 971 */ 972 int 973 npxgetregs(struct thread *td) 974 { 975 struct pcb *pcb; 976 uint64_t *xstate_bv, bit; 977 char *sa; 978 union savefpu *s; 979 uint32_t mxcsr, mxcsr_mask; 980 int max_ext_n, i; 981 int owned; 982 bool do_mxcsr; 983 984 if (!hw_float) 985 return (_MC_FPOWNED_NONE); 986 987 pcb = td->td_pcb; 988 critical_enter(); 989 if ((pcb->pcb_flags & PCB_NPXINITDONE) == 0) { 990 bcopy(npx_initialstate, get_pcb_user_save_pcb(pcb), 991 cpu_max_ext_state_size); 992 SET_FPU_CW(get_pcb_user_save_pcb(pcb), pcb->pcb_initial_npxcw); 993 npxuserinited(td); 994 critical_exit(); 995 return (_MC_FPOWNED_PCB); 996 } 997 if (td == PCPU_GET(fpcurthread)) { 998 fpusave(get_pcb_user_save_pcb(pcb)); 999 if (!cpu_fxsr) 1000 /* 1001 * fnsave initializes the FPU and destroys whatever 1002 * context it contains. Make sure the FPU owner 1003 * starts with a clean state next time. 1004 */ 1005 npxdrop(); 1006 owned = _MC_FPOWNED_FPU; 1007 } else { 1008 owned = _MC_FPOWNED_PCB; 1009 } 1010 if (use_xsave) { 1011 /* 1012 * Handle partially saved state. 1013 */ 1014 sa = (char *)get_pcb_user_save_pcb(pcb); 1015 xstate_bv = (uint64_t *)(sa + sizeof(union savefpu) + 1016 offsetof(struct xstate_hdr, xstate_bv)); 1017 if (xsave_mask >> 32 != 0) 1018 max_ext_n = fls(xsave_mask >> 32) + 32; 1019 else 1020 max_ext_n = fls(xsave_mask); 1021 for (i = 0; i < max_ext_n; i++) { 1022 bit = 1ULL << i; 1023 if ((xsave_mask & bit) == 0 || (*xstate_bv & bit) != 0) 1024 continue; 1025 do_mxcsr = false; 1026 if (i == 0 && (*xstate_bv & (XFEATURE_ENABLED_SSE | 1027 XFEATURE_ENABLED_AVX)) != 0) { 1028 /* 1029 * x87 area was not saved by XSAVEOPT, 1030 * but one of XMM or AVX was. Then we need 1031 * to preserve MXCSR from being overwritten 1032 * with the default value. 1033 */ 1034 s = (union savefpu *)sa; 1035 mxcsr = s->sv_xmm.sv_env.en_mxcsr; 1036 mxcsr_mask = s->sv_xmm.sv_env.en_mxcsr_mask; 1037 do_mxcsr = true; 1038 } 1039 bcopy((char *)npx_initialstate + 1040 xsave_area_desc[i].offset, 1041 sa + xsave_area_desc[i].offset, 1042 xsave_area_desc[i].size); 1043 if (do_mxcsr) { 1044 s->sv_xmm.sv_env.en_mxcsr = mxcsr; 1045 s->sv_xmm.sv_env.en_mxcsr_mask = mxcsr_mask; 1046 } 1047 *xstate_bv |= bit; 1048 } 1049 } 1050 critical_exit(); 1051 return (owned); 1052 } 1053 1054 void 1055 npxuserinited(struct thread *td) 1056 { 1057 struct pcb *pcb; 1058 1059 CRITICAL_ASSERT(td); 1060 pcb = td->td_pcb; 1061 if (PCB_USER_FPU(pcb)) 1062 pcb->pcb_flags |= PCB_NPXINITDONE; 1063 pcb->pcb_flags |= PCB_NPXUSERINITDONE; 1064 } 1065 1066 int 1067 npxsetxstate(struct thread *td, char *xfpustate, size_t xfpustate_size) 1068 { 1069 struct xstate_hdr *hdr, *ehdr; 1070 size_t len, max_len; 1071 uint64_t bv; 1072 1073 /* XXXKIB should we clear all extended state in xstate_bv instead ? */ 1074 if (xfpustate == NULL) 1075 return (0); 1076 if (!use_xsave) 1077 return (EOPNOTSUPP); 1078 1079 len = xfpustate_size; 1080 if (len < sizeof(struct xstate_hdr)) 1081 return (EINVAL); 1082 max_len = cpu_max_ext_state_size - sizeof(union savefpu); 1083 if (len > max_len) 1084 return (EINVAL); 1085 1086 ehdr = (struct xstate_hdr *)xfpustate; 1087 bv = ehdr->xstate_bv; 1088 1089 /* 1090 * Avoid #gp. 1091 */ 1092 if (bv & ~xsave_mask) 1093 return (EINVAL); 1094 1095 hdr = (struct xstate_hdr *)(get_pcb_user_save_td(td) + 1); 1096 1097 hdr->xstate_bv = bv; 1098 bcopy(xfpustate + sizeof(struct xstate_hdr), 1099 (char *)(hdr + 1), len - sizeof(struct xstate_hdr)); 1100 1101 return (0); 1102 } 1103 1104 int 1105 npxsetregs(struct thread *td, union savefpu *addr, char *xfpustate, 1106 size_t xfpustate_size) 1107 { 1108 struct pcb *pcb; 1109 int error; 1110 1111 if (!hw_float) 1112 return (ENXIO); 1113 1114 if (cpu_fxsr) 1115 addr->sv_xmm.sv_env.en_mxcsr &= cpu_mxcsr_mask; 1116 pcb = td->td_pcb; 1117 error = 0; 1118 critical_enter(); 1119 if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) { 1120 error = npxsetxstate(td, xfpustate, xfpustate_size); 1121 if (error == 0) { 1122 if (!cpu_fxsr) 1123 fnclex(); /* As in npxdrop(). */ 1124 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr)); 1125 fpurstor(get_pcb_user_save_td(td)); 1126 pcb->pcb_flags |= PCB_NPXUSERINITDONE | PCB_NPXINITDONE; 1127 } 1128 } else { 1129 error = npxsetxstate(td, xfpustate, xfpustate_size); 1130 if (error == 0) { 1131 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr)); 1132 npxuserinited(td); 1133 } 1134 } 1135 critical_exit(); 1136 return (error); 1137 } 1138 1139 static void 1140 npx_fill_fpregs_xmm1(struct savexmm *sv_xmm, struct save87 *sv_87) 1141 { 1142 struct env87 *penv_87; 1143 struct envxmm *penv_xmm; 1144 struct fpacc87 *fx_reg; 1145 int i, st; 1146 uint64_t mantissa; 1147 uint16_t tw, exp; 1148 uint8_t ab_tw; 1149 1150 penv_87 = &sv_87->sv_env; 1151 penv_xmm = &sv_xmm->sv_env; 1152 1153 /* FPU control/status */ 1154 penv_87->en_cw = penv_xmm->en_cw; 1155 penv_87->en_sw = penv_xmm->en_sw; 1156 penv_87->en_fip = penv_xmm->en_fip; 1157 penv_87->en_fcs = penv_xmm->en_fcs; 1158 penv_87->en_opcode = penv_xmm->en_opcode; 1159 penv_87->en_foo = penv_xmm->en_foo; 1160 penv_87->en_fos = penv_xmm->en_fos; 1161 1162 /* 1163 * FPU registers and tags. 1164 * For ST(i), i = fpu_reg - top; we start with fpu_reg=7. 1165 */ 1166 st = 7 - ((penv_xmm->en_sw >> 11) & 7); 1167 ab_tw = penv_xmm->en_tw; 1168 tw = 0; 1169 for (i = 0x80; i != 0; i >>= 1) { 1170 sv_87->sv_ac[st] = sv_xmm->sv_fp[st].fp_acc; 1171 tw <<= 2; 1172 if (ab_tw & i) { 1173 /* Non-empty - we need to check ST(i) */ 1174 fx_reg = &sv_xmm->sv_fp[st].fp_acc; 1175 /* The first 64 bits contain the mantissa. */ 1176 mantissa = *((uint64_t *)fx_reg->fp_bytes); 1177 /* 1178 * The final 16 bits contain the sign bit and the exponent. 1179 * Mask the sign bit since it is of no consequence to these 1180 * tests. 1181 */ 1182 exp = *((uint16_t *)&fx_reg->fp_bytes[8]) & 0x7fff; 1183 if (exp == 0) { 1184 if (mantissa == 0) 1185 tw |= 1; /* Zero */ 1186 else 1187 tw |= 2; /* Denormal */ 1188 } else if (exp == 0x7fff) 1189 tw |= 2; /* Infinity or NaN */ 1190 } else 1191 tw |= 3; /* Empty */ 1192 st = (st - 1) & 7; 1193 } 1194 penv_87->en_tw = tw; 1195 } 1196 1197 void 1198 npx_fill_fpregs_xmm(struct savexmm *sv_xmm, struct save87 *sv_87) 1199 { 1200 1201 bzero(sv_87, sizeof(*sv_87)); 1202 npx_fill_fpregs_xmm1(sv_xmm, sv_87); 1203 } 1204 1205 void 1206 npx_set_fpregs_xmm(struct save87 *sv_87, struct savexmm *sv_xmm) 1207 { 1208 struct env87 *penv_87; 1209 struct envxmm *penv_xmm; 1210 int i; 1211 1212 penv_87 = &sv_87->sv_env; 1213 penv_xmm = &sv_xmm->sv_env; 1214 1215 /* FPU control/status */ 1216 penv_xmm->en_cw = penv_87->en_cw; 1217 penv_xmm->en_sw = penv_87->en_sw; 1218 penv_xmm->en_fip = penv_87->en_fip; 1219 penv_xmm->en_fcs = penv_87->en_fcs; 1220 penv_xmm->en_opcode = penv_87->en_opcode; 1221 penv_xmm->en_foo = penv_87->en_foo; 1222 penv_xmm->en_fos = penv_87->en_fos; 1223 1224 /* 1225 * FPU registers and tags. 1226 * Abridged / Full translation (values in binary), see FXSAVE spec. 1227 * 0 11 1228 * 1 00, 01, 10 1229 */ 1230 penv_xmm->en_tw = 0; 1231 for (i = 0; i < 8; ++i) { 1232 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i]; 1233 if ((penv_87->en_tw & (3 << i * 2)) != (3 << i * 2)) 1234 penv_xmm->en_tw |= 1 << i; 1235 } 1236 } 1237 1238 void 1239 npx_get_fsave(void *addr) 1240 { 1241 struct thread *td; 1242 union savefpu *sv; 1243 1244 td = curthread; 1245 npxgetregs(td); 1246 sv = get_pcb_user_save_td(td); 1247 if (cpu_fxsr) 1248 npx_fill_fpregs_xmm1(&sv->sv_xmm, addr); 1249 else 1250 bcopy(sv, addr, sizeof(struct env87) + 1251 sizeof(struct fpacc87[8])); 1252 } 1253 1254 int 1255 npx_set_fsave(void *addr) 1256 { 1257 union savefpu sv; 1258 int error; 1259 1260 bzero(&sv, sizeof(sv)); 1261 if (cpu_fxsr) 1262 npx_set_fpregs_xmm(addr, &sv.sv_xmm); 1263 else 1264 bcopy(addr, &sv, sizeof(struct env87) + 1265 sizeof(struct fpacc87[8])); 1266 error = npxsetregs(curthread, &sv, NULL, 0); 1267 return (error); 1268 } 1269 1270 /* 1271 * On AuthenticAMD processors, the fxrstor instruction does not restore 1272 * the x87's stored last instruction pointer, last data pointer, and last 1273 * opcode values, except in the rare case in which the exception summary 1274 * (ES) bit in the x87 status word is set to 1. 1275 * 1276 * In order to avoid leaking this information across processes, we clean 1277 * these values by performing a dummy load before executing fxrstor(). 1278 */ 1279 static void 1280 fpu_clean_state(void) 1281 { 1282 static float dummy_variable = 0.0; 1283 u_short status; 1284 1285 /* 1286 * Clear the ES bit in the x87 status word if it is currently 1287 * set, in order to avoid causing a fault in the upcoming load. 1288 */ 1289 fnstsw(&status); 1290 if (status & 0x80) 1291 fnclex(); 1292 1293 /* 1294 * Load the dummy variable into the x87 stack. This mangles 1295 * the x87 stack, but we don't care since we're about to call 1296 * fxrstor() anyway. 1297 */ 1298 __asm __volatile("ffree %%st(7); flds %0" : : "m" (dummy_variable)); 1299 } 1300 1301 static void 1302 fpurstor(union savefpu *addr) 1303 { 1304 1305 if (use_xsave) 1306 xrstor((char *)addr, xsave_mask); 1307 else if (cpu_fxsr) 1308 fxrstor(addr); 1309 else 1310 frstor(addr); 1311 } 1312 1313 #ifdef DEV_ISA 1314 /* 1315 * This sucks up the legacy ISA support assignments from PNPBIOS/ACPI. 1316 */ 1317 static struct isa_pnp_id npxisa_ids[] = { 1318 { 0x040cd041, "Legacy ISA coprocessor support" }, /* PNP0C04 */ 1319 { 0 } 1320 }; 1321 1322 static int 1323 npxisa_probe(device_t dev) 1324 { 1325 int result; 1326 if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, npxisa_ids)) <= 0) { 1327 device_quiet(dev); 1328 } 1329 return(result); 1330 } 1331 1332 static int 1333 npxisa_attach(device_t dev) 1334 { 1335 return (0); 1336 } 1337 1338 static device_method_t npxisa_methods[] = { 1339 /* Device interface */ 1340 DEVMETHOD(device_probe, npxisa_probe), 1341 DEVMETHOD(device_attach, npxisa_attach), 1342 { 0, 0 } 1343 }; 1344 1345 static driver_t npxisa_driver = { 1346 "npxisa", 1347 npxisa_methods, 1348 1, /* no softc */ 1349 }; 1350 1351 DRIVER_MODULE(npxisa, isa, npxisa_driver, 0, 0); 1352 DRIVER_MODULE(npxisa, acpi, npxisa_driver, 0, 0); 1353 ISA_PNP_INFO(npxisa_ids); 1354 #endif /* DEV_ISA */ 1355 1356 static MALLOC_DEFINE(M_FPUKERN_CTX, "fpukern_ctx", 1357 "Kernel contexts for FPU state"); 1358 1359 #define FPU_KERN_CTX_NPXINITDONE 0x01 1360 #define FPU_KERN_CTX_DUMMY 0x02 1361 #define FPU_KERN_CTX_INUSE 0x04 1362 1363 struct fpu_kern_ctx { 1364 union savefpu *prev; 1365 uint32_t flags; 1366 char hwstate1[]; 1367 }; 1368 1369 struct fpu_kern_ctx * 1370 fpu_kern_alloc_ctx(u_int flags) 1371 { 1372 struct fpu_kern_ctx *res; 1373 size_t sz; 1374 1375 sz = sizeof(struct fpu_kern_ctx) + XSAVE_AREA_ALIGN + 1376 cpu_max_ext_state_size; 1377 res = malloc(sz, M_FPUKERN_CTX, ((flags & FPU_KERN_NOWAIT) ? 1378 M_NOWAIT : M_WAITOK) | M_ZERO); 1379 return (res); 1380 } 1381 1382 void 1383 fpu_kern_free_ctx(struct fpu_kern_ctx *ctx) 1384 { 1385 1386 KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) == 0, ("free'ing inuse ctx")); 1387 /* XXXKIB clear the memory ? */ 1388 free(ctx, M_FPUKERN_CTX); 1389 } 1390 1391 static union savefpu * 1392 fpu_kern_ctx_savefpu(struct fpu_kern_ctx *ctx) 1393 { 1394 vm_offset_t p; 1395 1396 p = (vm_offset_t)&ctx->hwstate1; 1397 p = roundup2(p, XSAVE_AREA_ALIGN); 1398 return ((union savefpu *)p); 1399 } 1400 1401 void 1402 fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags) 1403 { 1404 struct pcb *pcb; 1405 1406 pcb = td->td_pcb; 1407 KASSERT((flags & FPU_KERN_NOCTX) != 0 || ctx != NULL, 1408 ("ctx is required when !FPU_KERN_NOCTX")); 1409 KASSERT(ctx == NULL || (ctx->flags & FPU_KERN_CTX_INUSE) == 0, 1410 ("using inuse ctx")); 1411 KASSERT((pcb->pcb_flags & PCB_NPXNOSAVE) == 0, 1412 ("recursive fpu_kern_enter while in PCB_NPXNOSAVE state")); 1413 1414 if ((flags & FPU_KERN_NOCTX) != 0) { 1415 critical_enter(); 1416 fpu_enable(); 1417 if (curthread == PCPU_GET(fpcurthread)) { 1418 fpusave(curpcb->pcb_save); 1419 PCPU_SET(fpcurthread, NULL); 1420 } else { 1421 KASSERT(PCPU_GET(fpcurthread) == NULL, 1422 ("invalid fpcurthread")); 1423 } 1424 1425 /* 1426 * This breaks XSAVEOPT tracker, but 1427 * PCB_NPXNOSAVE state is supposed to never need to 1428 * save FPU context at all. 1429 */ 1430 fpurstor(npx_initialstate); 1431 pcb->pcb_flags |= PCB_KERNNPX | PCB_NPXNOSAVE | PCB_NPXINITDONE; 1432 return; 1433 } 1434 if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) { 1435 ctx->flags = FPU_KERN_CTX_DUMMY | FPU_KERN_CTX_INUSE; 1436 return; 1437 } 1438 pcb = td->td_pcb; 1439 critical_enter(); 1440 KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save == 1441 get_pcb_user_save_pcb(pcb), ("mangled pcb_save")); 1442 ctx->flags = FPU_KERN_CTX_INUSE; 1443 if ((pcb->pcb_flags & PCB_NPXINITDONE) != 0) 1444 ctx->flags |= FPU_KERN_CTX_NPXINITDONE; 1445 npxexit(td); 1446 ctx->prev = pcb->pcb_save; 1447 pcb->pcb_save = fpu_kern_ctx_savefpu(ctx); 1448 pcb->pcb_flags |= PCB_KERNNPX; 1449 pcb->pcb_flags &= ~PCB_NPXINITDONE; 1450 critical_exit(); 1451 } 1452 1453 int 1454 fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx) 1455 { 1456 struct pcb *pcb; 1457 1458 pcb = td->td_pcb; 1459 1460 if ((pcb->pcb_flags & PCB_NPXNOSAVE) != 0) { 1461 KASSERT(ctx == NULL, ("non-null ctx after FPU_KERN_NOCTX")); 1462 KASSERT(PCPU_GET(fpcurthread) == NULL, 1463 ("non-NULL fpcurthread for PCB_NPXNOSAVE")); 1464 CRITICAL_ASSERT(td); 1465 1466 pcb->pcb_flags &= ~(PCB_NPXNOSAVE | PCB_NPXINITDONE); 1467 fpu_disable(); 1468 } else { 1469 KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) != 0, 1470 ("leaving not inuse ctx")); 1471 ctx->flags &= ~FPU_KERN_CTX_INUSE; 1472 1473 if (is_fpu_kern_thread(0) && 1474 (ctx->flags & FPU_KERN_CTX_DUMMY) != 0) 1475 return (0); 1476 KASSERT((ctx->flags & FPU_KERN_CTX_DUMMY) == 0, 1477 ("dummy ctx")); 1478 critical_enter(); 1479 if (curthread == PCPU_GET(fpcurthread)) 1480 npxdrop(); 1481 pcb->pcb_save = ctx->prev; 1482 } 1483 1484 if (pcb->pcb_save == get_pcb_user_save_pcb(pcb)) { 1485 if ((pcb->pcb_flags & PCB_NPXUSERINITDONE) != 0) { 1486 pcb->pcb_flags |= PCB_NPXINITDONE; 1487 if ((pcb->pcb_flags & PCB_KERNNPX_THR) == 0) 1488 pcb->pcb_flags &= ~PCB_KERNNPX; 1489 } else if ((pcb->pcb_flags & PCB_KERNNPX_THR) == 0) 1490 pcb->pcb_flags &= ~(PCB_NPXINITDONE | PCB_KERNNPX); 1491 } else { 1492 if ((ctx->flags & FPU_KERN_CTX_NPXINITDONE) != 0) 1493 pcb->pcb_flags |= PCB_NPXINITDONE; 1494 else 1495 pcb->pcb_flags &= ~PCB_NPXINITDONE; 1496 KASSERT(!PCB_USER_FPU(pcb), ("unpaired fpu_kern_leave")); 1497 } 1498 critical_exit(); 1499 return (0); 1500 } 1501 1502 int 1503 fpu_kern_thread(u_int flags) 1504 { 1505 1506 KASSERT((curthread->td_pflags & TDP_KTHREAD) != 0, 1507 ("Only kthread may use fpu_kern_thread")); 1508 KASSERT(curpcb->pcb_save == get_pcb_user_save_pcb(curpcb), 1509 ("mangled pcb_save")); 1510 KASSERT(PCB_USER_FPU(curpcb), ("recursive call")); 1511 1512 curpcb->pcb_flags |= PCB_KERNNPX | PCB_KERNNPX_THR; 1513 return (0); 1514 } 1515 1516 int 1517 is_fpu_kern_thread(u_int flags) 1518 { 1519 1520 if ((curthread->td_pflags & TDP_KTHREAD) == 0) 1521 return (0); 1522 return ((curpcb->pcb_flags & PCB_KERNNPX_THR) != 0); 1523 } 1524 1525 /* 1526 * FPU save area alloc/free/init utility routines 1527 */ 1528 union savefpu * 1529 fpu_save_area_alloc(void) 1530 { 1531 1532 return (uma_zalloc(fpu_save_area_zone, M_WAITOK)); 1533 } 1534 1535 void 1536 fpu_save_area_free(union savefpu *fsa) 1537 { 1538 1539 uma_zfree(fpu_save_area_zone, fsa); 1540 } 1541 1542 void 1543 fpu_save_area_reset(union savefpu *fsa) 1544 { 1545 1546 bcopy(npx_initialstate, fsa, cpu_max_ext_state_size); 1547 } 1548