1 /* $NetBSD: cpufunc.h,v 1.41 2020/06/15 09:09:23 msaitoh Exp $ */ 2 3 /* 4 * Copyright (c) 1998, 2007, 2019 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum, and by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #ifndef _X86_CPUFUNC_H_ 33 #define _X86_CPUFUNC_H_ 34 35 /* 36 * Functions to provide access to x86-specific instructions. 37 */ 38 39 #include <sys/cdefs.h> 40 #include <sys/types.h> 41 42 #include <machine/segments.h> 43 #include <machine/specialreg.h> 44 45 #ifdef _KERNEL 46 #if defined(_KERNEL_OPT) 47 #include "opt_xen.h" 48 #endif 49 50 static inline void 51 x86_pause(void) 52 { 53 __asm volatile ("pause"); 54 } 55 56 void x86_lfence(void); 57 void x86_sfence(void); 58 void x86_mfence(void); 59 void x86_flush(void); 60 void x86_hlt(void); 61 void x86_stihlt(void); 62 void tlbflush(void); 63 void tlbflushg(void); 64 void invlpg(vaddr_t); 65 void wbinvd(void); 66 void breakpoint(void); 67 68 #define INVPCID_ADDRESS 0 69 #define INVPCID_CONTEXT 1 70 #define INVPCID_ALL 2 71 #define INVPCID_ALL_NONGLOBAL 3 72 73 static inline void 74 invpcid(register_t op, uint64_t pcid, vaddr_t va) 75 { 76 struct { 77 uint64_t pcid; 78 uint64_t addr; 79 } desc = { 80 .pcid = pcid, 81 .addr = va 82 }; 83 84 __asm volatile ( 85 "invpcid %[desc],%[op]" 86 : 87 : [desc] "m" (desc), [op] "r" (op) 88 : "memory" 89 ); 90 } 91 92 extern uint64_t (*rdtsc)(void); 93 94 #define _SERIALIZE_lfence __asm volatile ("lfence") 95 #define _SERIALIZE_mfence __asm volatile ("mfence") 96 #define _SERIALIZE_cpuid __asm volatile ("xor %%eax, %%eax;cpuid" ::: \ 97 "eax", "ebx", "ecx", "edx"); 98 99 #define RDTSCFUNC(fence) \ 100 static inline uint64_t \ 101 rdtsc_##fence(void) \ 102 { \ 103 uint32_t low, high; \ 104 \ 105 _SERIALIZE_##fence; \ 106 __asm volatile ( \ 107 "rdtsc" \ 108 : "=a" (low), "=d" (high) \ 109 : \ 110 ); \ 111 \ 112 return (low | ((uint64_t)high << 32)); \ 113 } 114 115 RDTSCFUNC(lfence) 116 RDTSCFUNC(mfence) 117 RDTSCFUNC(cpuid) 118 119 #undef _SERIALIZE_LFENCE 120 #undef _SERIALIZE_MFENCE 121 #undef _SERIALIZE_CPUID 122 123 124 #ifndef XENPV 125 struct x86_hotpatch_source { 126 uint8_t *saddr; 127 uint8_t *eaddr; 128 }; 129 130 struct x86_hotpatch_descriptor { 131 uint8_t name; 132 uint8_t nsrc; 133 const struct x86_hotpatch_source *srcs[]; 134 }; 135 136 void x86_hotpatch(uint8_t, uint8_t); 137 void x86_patch(bool); 138 #endif 139 140 void x86_monitor(const void *, uint32_t, uint32_t); 141 void x86_mwait(uint32_t, uint32_t); 142 143 static inline void 144 x86_cpuid2(uint32_t eax, uint32_t ecx, uint32_t *regs) 145 { 146 uint32_t ebx, edx; 147 148 __asm volatile ( 149 "cpuid" 150 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) 151 : "a" (eax), "c" (ecx) 152 ); 153 154 regs[0] = eax; 155 regs[1] = ebx; 156 regs[2] = ecx; 157 regs[3] = edx; 158 } 159 #define x86_cpuid(a,b) x86_cpuid2((a), 0, (b)) 160 161 /* -------------------------------------------------------------------------- */ 162 163 void lidt(struct region_descriptor *); 164 void lldt(u_short); 165 void ltr(u_short); 166 167 static inline uint16_t 168 x86_getss(void) 169 { 170 uint16_t val; 171 172 __asm volatile ( 173 "mov %%ss,%[val]" 174 : [val] "=r" (val) 175 : 176 ); 177 return val; 178 } 179 180 static inline void 181 setds(uint16_t val) 182 { 183 __asm volatile ( 184 "mov %[val],%%ds" 185 : 186 : [val] "r" (val) 187 ); 188 } 189 190 static inline void 191 setes(uint16_t val) 192 { 193 __asm volatile ( 194 "mov %[val],%%es" 195 : 196 : [val] "r" (val) 197 ); 198 } 199 200 static inline void 201 setfs(uint16_t val) 202 { 203 __asm volatile ( 204 "mov %[val],%%fs" 205 : 206 : [val] "r" (val) 207 ); 208 } 209 210 void setusergs(int); 211 212 /* -------------------------------------------------------------------------- */ 213 214 #define FUNC_CR(crnum) \ 215 static inline void lcr##crnum(register_t val) \ 216 { \ 217 __asm volatile ( \ 218 "mov %[val],%%cr" #crnum \ 219 : \ 220 : [val] "r" (val) \ 221 : "memory" \ 222 ); \ 223 } \ 224 static inline register_t rcr##crnum(void) \ 225 { \ 226 register_t val; \ 227 __asm volatile ( \ 228 "mov %%cr" #crnum ",%[val]" \ 229 : [val] "=r" (val) \ 230 : \ 231 ); \ 232 return val; \ 233 } 234 235 #define PROTO_CR(crnum) \ 236 void lcr##crnum(register_t); \ 237 register_t rcr##crnum(void); 238 239 #ifndef XENPV 240 FUNC_CR(0) 241 FUNC_CR(2) 242 FUNC_CR(3) 243 #else 244 PROTO_CR(0) 245 PROTO_CR(2) 246 PROTO_CR(3) 247 #endif 248 249 FUNC_CR(4) 250 FUNC_CR(8) 251 252 /* -------------------------------------------------------------------------- */ 253 254 #define FUNC_DR(drnum) \ 255 static inline void ldr##drnum(register_t val) \ 256 { \ 257 __asm volatile ( \ 258 "mov %[val],%%dr" #drnum \ 259 : \ 260 : [val] "r" (val) \ 261 ); \ 262 } \ 263 static inline register_t rdr##drnum(void) \ 264 { \ 265 register_t val; \ 266 __asm volatile ( \ 267 "mov %%dr" #drnum ",%[val]" \ 268 : [val] "=r" (val) \ 269 : \ 270 ); \ 271 return val; \ 272 } 273 274 #define PROTO_DR(drnum) \ 275 register_t rdr##drnum(void); \ 276 void ldr##drnum(register_t); 277 278 #ifndef XENPV 279 FUNC_DR(0) 280 FUNC_DR(1) 281 FUNC_DR(2) 282 FUNC_DR(3) 283 FUNC_DR(6) 284 FUNC_DR(7) 285 #else 286 PROTO_DR(0) 287 PROTO_DR(1) 288 PROTO_DR(2) 289 PROTO_DR(3) 290 PROTO_DR(6) 291 PROTO_DR(7) 292 #endif 293 294 /* -------------------------------------------------------------------------- */ 295 296 union savefpu; 297 298 static inline void 299 fninit(void) 300 { 301 __asm volatile ("fninit" ::: "memory"); 302 } 303 304 static inline void 305 fnclex(void) 306 { 307 __asm volatile ("fnclex"); 308 } 309 310 static inline void 311 fnstcw(uint16_t *val) 312 { 313 __asm volatile ( 314 "fnstcw %[val]" 315 : [val] "=m" (*val) 316 : 317 ); 318 } 319 320 static inline void 321 fnstsw(uint16_t *val) 322 { 323 __asm volatile ( 324 "fnstsw %[val]" 325 : [val] "=m" (*val) 326 : 327 ); 328 } 329 330 static inline void 331 clts(void) 332 { 333 __asm volatile ("clts" ::: "memory"); 334 } 335 336 void stts(void); 337 338 static inline void 339 x86_stmxcsr(uint32_t *val) 340 { 341 __asm volatile ( 342 "stmxcsr %[val]" 343 : [val] "=m" (*val) 344 : 345 ); 346 } 347 348 static inline void 349 x86_ldmxcsr(uint32_t *val) 350 { 351 __asm volatile ( 352 "ldmxcsr %[val]" 353 : 354 : [val] "m" (*val) 355 ); 356 } 357 358 void fldummy(void); 359 360 static inline uint64_t 361 rdxcr(uint32_t xcr) 362 { 363 uint32_t low, high; 364 365 __asm volatile ( 366 "xgetbv" 367 : "=a" (low), "=d" (high) 368 : "c" (xcr) 369 ); 370 371 return (low | ((uint64_t)high << 32)); 372 } 373 374 static inline void 375 wrxcr(uint32_t xcr, uint64_t val) 376 { 377 uint32_t low, high; 378 379 low = val; 380 high = val >> 32; 381 __asm volatile ( 382 "xsetbv" 383 : 384 : "a" (low), "d" (high), "c" (xcr) 385 ); 386 } 387 388 static inline void 389 fnsave(void *addr) 390 { 391 uint8_t *area = addr; 392 393 __asm volatile ( 394 "fnsave %[area]" 395 : [area] "=m" (*area) 396 : 397 : "memory" 398 ); 399 } 400 401 static inline void 402 frstor(const void *addr) 403 { 404 const uint8_t *area = addr; 405 406 __asm volatile ( 407 "frstor %[area]" 408 : 409 : [area] "m" (*area) 410 : "memory" 411 ); 412 } 413 414 static inline void 415 fxsave(void *addr) 416 { 417 uint8_t *area = addr; 418 419 __asm volatile ( 420 "fxsave %[area]" 421 : [area] "=m" (*area) 422 : 423 : "memory" 424 ); 425 } 426 427 static inline void 428 fxrstor(const void *addr) 429 { 430 const uint8_t *area = addr; 431 432 __asm volatile ( 433 "fxrstor %[area]" 434 : 435 : [area] "m" (*area) 436 : "memory" 437 ); 438 } 439 440 static inline void 441 xsave(void *addr, uint64_t mask) 442 { 443 uint8_t *area = addr; 444 uint32_t low, high; 445 446 low = mask; 447 high = mask >> 32; 448 __asm volatile ( 449 "xsave %[area]" 450 : [area] "=m" (*area) 451 : "a" (low), "d" (high) 452 : "memory" 453 ); 454 } 455 456 static inline void 457 xsaveopt(void *addr, uint64_t mask) 458 { 459 uint8_t *area = addr; 460 uint32_t low, high; 461 462 low = mask; 463 high = mask >> 32; 464 __asm volatile ( 465 "xsaveopt %[area]" 466 : [area] "=m" (*area) 467 : "a" (low), "d" (high) 468 : "memory" 469 ); 470 } 471 472 static inline void 473 xrstor(const void *addr, uint64_t mask) 474 { 475 const uint8_t *area = addr; 476 uint32_t low, high; 477 478 low = mask; 479 high = mask >> 32; 480 __asm volatile ( 481 "xrstor %[area]" 482 : 483 : [area] "m" (*area), "a" (low), "d" (high) 484 : "memory" 485 ); 486 } 487 488 /* -------------------------------------------------------------------------- */ 489 490 #ifdef XENPV 491 void x86_disable_intr(void); 492 void x86_enable_intr(void); 493 #else 494 static inline void 495 x86_disable_intr(void) 496 { 497 __asm volatile ("cli" ::: "memory"); 498 } 499 500 static inline void 501 x86_enable_intr(void) 502 { 503 __asm volatile ("sti" ::: "memory"); 504 } 505 #endif /* XENPV */ 506 507 /* Use read_psl, write_psl when saving and restoring interrupt state. */ 508 u_long x86_read_psl(void); 509 void x86_write_psl(u_long); 510 511 /* Use read_flags, write_flags to adjust other members of %eflags. */ 512 u_long x86_read_flags(void); 513 void x86_write_flags(u_long); 514 515 void x86_reset(void); 516 517 /* -------------------------------------------------------------------------- */ 518 519 /* 520 * Some of the undocumented AMD64 MSRs need a 'passcode' to access. 521 * See LinuxBIOSv2: src/cpu/amd/model_fxx/model_fxx_init.c 522 */ 523 #define OPTERON_MSR_PASSCODE 0x9c5a203aU 524 525 static inline uint64_t 526 rdmsr(u_int msr) 527 { 528 uint32_t low, high; 529 530 __asm volatile ( 531 "rdmsr" 532 : "=a" (low), "=d" (high) 533 : "c" (msr) 534 ); 535 536 return (low | ((uint64_t)high << 32)); 537 } 538 539 static inline uint64_t 540 rdmsr_locked(u_int msr) 541 { 542 uint32_t low, high, pass = OPTERON_MSR_PASSCODE; 543 544 __asm volatile ( 545 "rdmsr" 546 : "=a" (low), "=d" (high) 547 : "c" (msr), "D" (pass) 548 ); 549 550 return (low | ((uint64_t)high << 32)); 551 } 552 553 int rdmsr_safe(u_int, uint64_t *); 554 555 static inline void 556 wrmsr(u_int msr, uint64_t val) 557 { 558 uint32_t low, high; 559 560 low = val; 561 high = val >> 32; 562 __asm volatile ( 563 "wrmsr" 564 : 565 : "a" (low), "d" (high), "c" (msr) 566 : "memory" 567 ); 568 } 569 570 static inline void 571 wrmsr_locked(u_int msr, uint64_t val) 572 { 573 uint32_t low, high, pass = OPTERON_MSR_PASSCODE; 574 575 low = val; 576 high = val >> 32; 577 __asm volatile ( 578 "wrmsr" 579 : 580 : "a" (low), "d" (high), "c" (msr), "D" (pass) 581 : "memory" 582 ); 583 } 584 585 #endif /* _KERNEL */ 586 587 #endif /* !_X86_CPUFUNC_H_ */ 588