1 /* $NetBSD: identcpu.c,v 1.133 2025/01/17 10:38:48 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2000, 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Frank van der Linden, and by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: identcpu.c,v 1.133 2025/01/17 10:38:48 riastradh Exp $"); 34 35 #include "opt_xen.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/device.h> 40 #include <sys/cpu.h> 41 42 #include <crypto/aes/aes_impl.h> 43 #include <crypto/aes/arch/x86/aes_ni.h> 44 #include <crypto/aes/arch/x86/aes_sse2.h> 45 #include <crypto/aes/arch/x86/aes_ssse3.h> 46 #include <crypto/aes/arch/x86/aes_via.h> 47 #include <crypto/chacha/chacha_impl.h> 48 #include <crypto/chacha/arch/x86/chacha_sse2.h> 49 50 #include <uvm/uvm_extern.h> 51 52 #include <machine/specialreg.h> 53 #include <machine/pio.h> 54 #include <machine/cpu.h> 55 56 #include <x86/cputypes.h> 57 #include <x86/cacheinfo.h> 58 #include <x86/cpuvar.h> 59 #include <x86/fpu.h> 60 61 #include <dev/vmt/vmtreg.h> /* for vmt_hvcall() */ 62 #include <dev/vmt/vmtvar.h> /* for vmt_hvcall() */ 63 64 #ifndef XENPV 65 #include "hyperv.h" 66 #if NHYPERV > 0 67 #include <x86/x86/hypervvar.h> 68 #endif 69 #endif 70 71 static const struct x86_cache_info intel_cpuid_cache_info[] = INTEL_CACHE_INFO; 72 73 static const struct x86_cache_info amd_cpuid_l2l3cache_assoc_info[] = 74 AMD_L2L3CACHE_INFO; 75 76 int cpu_vendor; 77 char cpu_brand_string[49]; 78 79 int x86_fpu_save __read_mostly; 80 unsigned int x86_fpu_save_size __read_mostly = sizeof(struct save87); 81 uint64_t x86_xsave_features __read_mostly = 0; 82 size_t x86_xsave_offsets[XSAVE_MAX_COMPONENT+1] __read_mostly; 83 size_t x86_xsave_sizes[XSAVE_MAX_COMPONENT+1] __read_mostly; 84 85 /* 86 * Note: these are just the ones that may not have a cpuid instruction. 87 * We deal with the rest in a different way. 88 */ 89 const int i386_nocpuid_cpus[] = { 90 CPUVENDOR_INTEL, CPUCLASS_386, /* CPU_386SX */ 91 CPUVENDOR_INTEL, CPUCLASS_386, /* CPU_386 */ 92 CPUVENDOR_INTEL, CPUCLASS_486, /* CPU_486SX */ 93 CPUVENDOR_INTEL, CPUCLASS_486, /* CPU_486 */ 94 CPUVENDOR_CYRIX, CPUCLASS_486, /* CPU_486DLC */ 95 CPUVENDOR_CYRIX, CPUCLASS_486, /* CPU_6x86 */ 96 CPUVENDOR_NEXGEN, CPUCLASS_386, /* CPU_NX586 */ 97 }; 98 99 static const char cpu_vendor_names[][10] = { 100 "Unknown", "Intel", "NS/Cyrix", "NexGen", "AMD", "IDT/VIA", "Transmeta", 101 "Vortex86" 102 }; 103 104 static void 105 cpu_probe_intel_cache(struct cpu_info *ci) 106 { 107 const struct x86_cache_info *cai; 108 u_int descs[4]; 109 int iterations, i, j; 110 uint8_t desc; 111 112 if (cpuid_level >= 2) { 113 /* Parse the cache info from `cpuid leaf 2', if we have it. */ 114 x86_cpuid(2, descs); 115 iterations = descs[0] & 0xff; 116 while (iterations-- > 0) { 117 for (i = 0; i < 4; i++) { 118 if (descs[i] & 0x80000000) 119 continue; 120 for (j = 0; j < 4; j++) { 121 if (i == 0 && j == 0) 122 continue; 123 desc = (descs[i] >> (j * 8)) & 0xff; 124 if (desc == 0) 125 continue; 126 cai = cpu_cacheinfo_lookup( 127 intel_cpuid_cache_info, desc); 128 if (cai != NULL) { 129 ci->ci_cinfo[cai->cai_index] = 130 *cai; 131 } 132 } 133 } 134 } 135 } 136 137 if (cpuid_level < 4) 138 return; 139 140 /* Parse the cache info from `cpuid leaf 4', if we have it. */ 141 cpu_dcp_cacheinfo(ci, 4); 142 } 143 144 static void 145 cpu_probe_intel_errata(struct cpu_info *ci) 146 { 147 u_int family, model; 148 149 family = CPUID_TO_FAMILY(ci->ci_signature); 150 model = CPUID_TO_MODEL(ci->ci_signature); 151 152 /* 153 * For details, refer to the Intel Pentium and Celeron Processor 154 * N- and J- Series Specification Update (Document number: 334820-010), 155 * August 2022, Revision 010. See page 28, Section 5.30: "APL30 A Store 156 * Instruction May Not Wake Up MWAIT." 157 * https://cdrdv2-public.intel.com/334820/334820-APL_Spec_Update_rev010.pdf 158 * https://web.archive.org/web/20250114072355/https://cdrdv2-public.intel.com/334820/334820-APL_Spec_Update_rev010.pdf 159 * 160 * Disable MWAIT/MONITOR on Apollo Lake CPUs to address the 161 * APL30 erratum. When using the MONITOR/MWAIT instruction 162 * pair, stores to the armed address range may fail to trigger 163 * MWAIT to resume execution. When these instructions are used 164 * to hatch secondary CPUs, this erratum causes SMP boot 165 * failures. 166 */ 167 if (family == 0x6 && model == 0x5C) { 168 wrmsr(MSR_MISC_ENABLE, 169 rdmsr(MSR_MISC_ENABLE) & ~IA32_MISC_MWAIT_EN); 170 171 cpu_feature[1] &= ~CPUID2_MONITOR; 172 ci->ci_feat_val[1] &= ~CPUID2_MONITOR; 173 } 174 } 175 176 static void 177 cpu_probe_intel(struct cpu_info *ci) 178 { 179 180 if (cpu_vendor != CPUVENDOR_INTEL) 181 return; 182 183 cpu_probe_intel_cache(ci); 184 cpu_probe_intel_errata(ci); 185 } 186 187 static void 188 cpu_probe_amd_cache(struct cpu_info *ci) 189 { 190 const struct x86_cache_info *cp; 191 struct x86_cache_info *cai; 192 int family, model; 193 u_int descs[4]; 194 u_int lfunc; 195 196 family = CPUID_TO_FAMILY(ci->ci_signature); 197 model = CPUID_TO_MODEL(ci->ci_signature); 198 199 /* K5 model 0 has none of this info. */ 200 if (family == 5 && model == 0) 201 return; 202 203 /* Determine the largest extended function value. */ 204 x86_cpuid(0x80000000, descs); 205 lfunc = descs[0]; 206 207 if (lfunc < 0x80000005) 208 return; 209 210 /* Determine L1 cache/TLB info. */ 211 x86_cpuid(0x80000005, descs); 212 213 /* K6-III and higher have large page TLBs. */ 214 if ((family == 5 && model >= 9) || family >= 6) { 215 cai = &ci->ci_cinfo[CAI_ITLB2]; 216 cai->cai_totalsize = AMD_L1_EAX_ITLB_ENTRIES(descs[0]); 217 cai->cai_associativity = AMD_L1_EAX_ITLB_ASSOC(descs[0]); 218 cai->cai_linesize = (4 * 1024 * 1024); 219 220 cai = &ci->ci_cinfo[CAI_DTLB2]; 221 cai->cai_totalsize = AMD_L1_EAX_DTLB_ENTRIES(descs[0]); 222 cai->cai_associativity = AMD_L1_EAX_DTLB_ASSOC(descs[0]); 223 cai->cai_linesize = (4 * 1024 * 1024); 224 } 225 226 cai = &ci->ci_cinfo[CAI_ITLB]; 227 cai->cai_totalsize = AMD_L1_EBX_ITLB_ENTRIES(descs[1]); 228 cai->cai_associativity = AMD_L1_EBX_ITLB_ASSOC(descs[1]); 229 cai->cai_linesize = (4 * 1024); 230 231 cai = &ci->ci_cinfo[CAI_DTLB]; 232 cai->cai_totalsize = AMD_L1_EBX_DTLB_ENTRIES(descs[1]); 233 cai->cai_associativity = AMD_L1_EBX_DTLB_ASSOC(descs[1]); 234 cai->cai_linesize = (4 * 1024); 235 236 cai = &ci->ci_cinfo[CAI_DCACHE]; 237 cai->cai_totalsize = AMD_L1_ECX_DC_SIZE(descs[2]); 238 cai->cai_associativity = AMD_L1_ECX_DC_ASSOC(descs[2]); 239 cai->cai_linesize = AMD_L1_ECX_DC_LS(descs[2]); 240 241 cai = &ci->ci_cinfo[CAI_ICACHE]; 242 cai->cai_totalsize = AMD_L1_EDX_IC_SIZE(descs[3]); 243 cai->cai_associativity = AMD_L1_EDX_IC_ASSOC(descs[3]); 244 cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[3]); 245 246 if (lfunc < 0x80000006) 247 return; 248 249 /* Determine L2 cache/TLB info. */ 250 x86_cpuid(0x80000006, descs); 251 252 cai = &ci->ci_cinfo[CAI_L2CACHE]; 253 cai->cai_totalsize = AMD_L2_ECX_C_SIZE(descs[2]); 254 cai->cai_associativity = AMD_L2_ECX_C_ASSOC(descs[2]); 255 cai->cai_linesize = AMD_L2_ECX_C_LS(descs[2]); 256 257 cp = cpu_cacheinfo_lookup(amd_cpuid_l2l3cache_assoc_info, 258 cai->cai_associativity); 259 if (cp != NULL) 260 cai->cai_associativity = cp->cai_associativity; 261 else 262 cai->cai_associativity = 0; /* XXX Unknown/reserved */ 263 264 if (family < 0xf) 265 return; 266 267 /* Determine L3 cache info on AMD Family 10h and newer processors */ 268 cai = &ci->ci_cinfo[CAI_L3CACHE]; 269 cai->cai_totalsize = AMD_L3_EDX_C_SIZE(descs[3]); 270 cai->cai_associativity = AMD_L3_EDX_C_ASSOC(descs[3]); 271 cai->cai_linesize = AMD_L3_EDX_C_LS(descs[3]); 272 273 cp = cpu_cacheinfo_lookup(amd_cpuid_l2l3cache_assoc_info, 274 cai->cai_associativity); 275 if (cp != NULL) 276 cai->cai_associativity = cp->cai_associativity; 277 else 278 cai->cai_associativity = 0; /* XXX Unknown reserved */ 279 280 if (lfunc < 0x80000019) 281 return; 282 283 /* Determine 1GB TLB info. */ 284 x86_cpuid(0x80000019, descs); 285 286 cai = &ci->ci_cinfo[CAI_L1_1GBDTLB]; 287 cai->cai_totalsize = AMD_L1_1GB_EAX_DTLB_ENTRIES(descs[1]); 288 cai->cai_associativity = AMD_L1_1GB_EAX_DTLB_ASSOC(descs[1]); 289 cai->cai_linesize = (1 * 1024); 290 291 cai = &ci->ci_cinfo[CAI_L1_1GBITLB]; 292 cai->cai_totalsize = AMD_L1_1GB_EAX_IUTLB_ENTRIES(descs[0]); 293 cai->cai_associativity = AMD_L1_1GB_EAX_IUTLB_ASSOC(descs[0]); 294 cai->cai_linesize = (1 * 1024); 295 296 cai = &ci->ci_cinfo[CAI_L2_1GBDTLB]; 297 cai->cai_totalsize = AMD_L2_1GB_EBX_DUTLB_ENTRIES(descs[1]); 298 cai->cai_associativity = AMD_L2_1GB_EBX_DUTLB_ASSOC(descs[1]); 299 cai->cai_linesize = (1 * 1024); 300 301 cai = &ci->ci_cinfo[CAI_L2_1GBITLB]; 302 cai->cai_totalsize = AMD_L2_1GB_EBX_IUTLB_ENTRIES(descs[0]); 303 cai->cai_associativity = AMD_L2_1GB_EBX_IUTLB_ASSOC(descs[0]); 304 cai->cai_linesize = (1 * 1024); 305 306 if (lfunc < 0x8000001d) 307 return; 308 309 if (ci->ci_feat_val[3] & CPUID_TOPOEXT) 310 cpu_dcp_cacheinfo(ci, 0x8000001d); 311 } 312 313 static void 314 cpu_probe_amd_errata(struct cpu_info *ci) 315 { 316 u_int model; 317 uint64_t val; 318 int flag; 319 320 model = CPUID_TO_MODEL(ci->ci_signature); 321 322 switch (CPUID_TO_FAMILY(ci->ci_signature)) { 323 case 0x05: /* K5 */ 324 if (model == 0) { 325 /* 326 * According to the AMD Processor Recognition App Note, 327 * the AMD-K5 Model 0 uses the wrong bit to indicate 328 * support for global PTEs, instead using bit 9 (APIC) 329 * rather than bit 13 (i.e. "0x200" vs. 0x2000"). 330 */ 331 flag = ci->ci_feat_val[0]; 332 if ((flag & CPUID_APIC) != 0) 333 flag = (flag & ~CPUID_APIC) | CPUID_PGE; 334 ci->ci_feat_val[0] = flag; 335 } 336 break; 337 338 case 0x10: /* Family 10h */ 339 /* 340 * On Family 10h, certain BIOSes do not enable WC+ support. 341 * This causes WC+ to become CD, and degrades guest 342 * performance at the NPT level. 343 * 344 * Explicitly enable WC+ if we're not a guest. 345 */ 346 if (!ISSET(ci->ci_feat_val[1], CPUID2_RAZ)) { 347 val = rdmsr(MSR_BU_CFG2); 348 val &= ~BU_CFG2_CWPLUS_DIS; 349 wrmsr(MSR_BU_CFG2, val); 350 } 351 break; 352 353 case 0x17: 354 /* 355 * "Revision Guide for AMD Family 17h Models 00h-0Fh 356 * Processors" revision 1.12: 357 * 358 * 1057 MWAIT or MWAITX Instructions May Fail to Correctly 359 * Exit From the Monitor Event Pending State 360 * 361 * 1109 MWAIT Instruction May Hang a Thread 362 */ 363 if (model == 0x01) { 364 cpu_feature[1] &= ~CPUID2_MONITOR; 365 ci->ci_feat_val[1] &= ~CPUID2_MONITOR; 366 } 367 break; 368 } 369 } 370 371 static void 372 cpu_probe_amd(struct cpu_info *ci) 373 { 374 375 if (cpu_vendor != CPUVENDOR_AMD) 376 return; 377 378 cpu_probe_amd_cache(ci); 379 cpu_probe_amd_errata(ci); 380 } 381 382 static inline uint8_t 383 cyrix_read_reg(uint8_t reg) 384 { 385 386 outb(0x22, reg); 387 return inb(0x23); 388 } 389 390 static inline void 391 cyrix_write_reg(uint8_t reg, uint8_t data) 392 { 393 394 outb(0x22, reg); 395 outb(0x23, data); 396 } 397 398 static void 399 cpu_probe_cyrix_cmn(struct cpu_info *ci) 400 { 401 /* 402 * i8254 latch check routine: 403 * National Geode (formerly Cyrix MediaGX) has a serious bug in 404 * its built-in i8254-compatible clock module (cs5510 cs5520). 405 * Set the variable 'clock_broken_latch' to indicate it. 406 * 407 * This bug is not present in the cs5530, and the flag 408 * is disabled again in sys/arch/i386/pci/pcib.c if this later 409 * model device is detected. Ideally, this work-around should not 410 * even be in here, it should be in there. XXX 411 */ 412 uint8_t c3; 413 #ifndef XENPV 414 extern int clock_broken_latch; 415 416 switch (ci->ci_signature) { 417 case 0x440: /* Cyrix MediaGX */ 418 case 0x540: /* GXm */ 419 clock_broken_latch = 1; 420 break; 421 } 422 #endif 423 424 /* set up various cyrix registers */ 425 /* 426 * Enable suspend on halt (powersave mode). 427 * When powersave mode is enabled, the TSC stops counting 428 * while the CPU is halted in idle() waiting for an interrupt. 429 * This means we can't use the TSC for interval time in 430 * microtime(9), and thus it is disabled here. 431 * 432 * It still makes a perfectly good cycle counter 433 * for program profiling, so long as you remember you're 434 * counting cycles, and not time. Further, if you don't 435 * mind not using powersave mode, the TSC works just fine, 436 * so this should really be optional. XXX 437 */ 438 cyrix_write_reg(0xc2, cyrix_read_reg(0xc2) | 0x08); 439 440 /* 441 * Do not disable the TSC on the Geode GX, it's reported to 442 * work fine. 443 */ 444 if (ci->ci_signature != 0x552) 445 ci->ci_feat_val[0] &= ~CPUID_TSC; 446 447 /* enable access to ccr4/ccr5 */ 448 c3 = cyrix_read_reg(0xC3); 449 cyrix_write_reg(0xC3, c3 | 0x10); 450 /* cyrix's workaround for the "coma bug" */ 451 cyrix_write_reg(0x31, cyrix_read_reg(0x31) | 0xf8); 452 cyrix_write_reg(0x32, cyrix_read_reg(0x32) | 0x7f); 453 cyrix_write_reg(0x33, cyrix_read_reg(0x33) & ~0xffu); 454 cyrix_write_reg(0x3c, cyrix_read_reg(0x3c) | 0x87); 455 /* disable access to ccr4/ccr5 */ 456 cyrix_write_reg(0xC3, c3); 457 } 458 459 static void 460 cpu_probe_cyrix(struct cpu_info *ci) 461 { 462 463 if (cpu_vendor != CPUVENDOR_CYRIX || 464 CPUID_TO_FAMILY(ci->ci_signature) < 4 || 465 CPUID_TO_FAMILY(ci->ci_signature) > 6) 466 return; 467 468 cpu_probe_cyrix_cmn(ci); 469 } 470 471 static void 472 cpu_probe_winchip(struct cpu_info *ci) 473 { 474 475 if (cpu_vendor != CPUVENDOR_IDT || 476 CPUID_TO_FAMILY(ci->ci_signature) != 5) 477 return; 478 479 /* WinChip C6 */ 480 if (CPUID_TO_MODEL(ci->ci_signature) == 4) 481 ci->ci_feat_val[0] &= ~CPUID_TSC; 482 } 483 484 static void 485 cpu_probe_c3(struct cpu_info *ci) 486 { 487 u_int family, model, stepping, descs[4], lfunc, msr; 488 struct x86_cache_info *cai; 489 490 if (cpu_vendor != CPUVENDOR_IDT || 491 CPUID_TO_FAMILY(ci->ci_signature) < 6) 492 return; 493 494 family = CPUID_TO_FAMILY(ci->ci_signature); 495 model = CPUID_TO_MODEL(ci->ci_signature); 496 stepping = CPUID_TO_STEPPING(ci->ci_signature); 497 498 if (family == 6) { 499 /* 500 * VIA Eden ESP. 501 * 502 * Quoting from page 3-4 of: "VIA Eden ESP Processor Datasheet" 503 * http://www.via.com.tw/download/mainboards/6/14/Eden20v115.pdf 504 * 505 * 1. The CMPXCHG8B instruction is provided and always enabled, 506 * however, it appears disabled in the corresponding CPUID 507 * function bit 0 to avoid a bug in an early version of 508 * Windows NT. However, this default can be changed via a 509 * bit in the FCR MSR. 510 */ 511 ci->ci_feat_val[0] |= CPUID_CX8; 512 wrmsr(MSR_VIA_FCR, rdmsr(MSR_VIA_FCR) | VIA_FCR_CX8_REPORT); 513 514 /* 515 * For reference on VIA Alternate Instructions, see the VIA C3 516 * Processor Alternate Instruction Set Application Note, 2002. 517 * http://www.bitsavers.org/components/viaTechnologies/C3-ais-appnote.pdf 518 * 519 * Disable unsafe ALTINST mode for VIA C3 processors, if necessary. 520 * 521 * This is done for the security reasons, as some CPUs were 522 * found with ALTINST enabled by default. This functionality 523 * has ability to bypass many x86 architecture memory 524 * protections and privilege checks, exposing a possibility 525 * for backdoors and should not be enabled unintentionally. 526 */ 527 if (model > 0x5 && model < 0xA) { 528 int disable_ais = 0; 529 x86_cpuid(0xc0000000, descs); 530 lfunc = descs[0]; 531 /* Check AIS flags first if supported ("Nehemiah"). */ 532 if (lfunc >= 0xc0000001) { 533 x86_cpuid(0xc0000001, descs); 534 lfunc = descs[3]; 535 if ((lfunc & CPUID_VIA_HAS_AIS) 536 && (lfunc & CPUID_VIA_DO_AIS)) { 537 disable_ais = 1; 538 } 539 } else /* Explicitly disable AIS for pre-CX5L CPUs. */ 540 disable_ais = 1; 541 542 if (disable_ais) { 543 msr = rdmsr(MSR_VIA_FCR); 544 wrmsr(MSR_VIA_FCR, msr & ~VIA_FCR_ALTINST_ENABLE); 545 } 546 } 547 } 548 549 if (family > 6 || model > 0x9 || (model == 0x9 && stepping >= 3)) { 550 /* VIA Nehemiah or later. */ 551 x86_cpuid(0xc0000000, descs); 552 lfunc = descs[0]; 553 if (lfunc >= 0xc0000001) { /* has ACE, RNG */ 554 int rng_enable = 0, ace_enable = 0; 555 x86_cpuid(0xc0000001, descs); 556 lfunc = descs[3]; 557 ci->ci_feat_val[4] = lfunc; 558 /* Check for and enable RNG */ 559 if (lfunc & CPUID_VIA_HAS_RNG) { 560 if (!(lfunc & CPUID_VIA_DO_RNG)) { 561 rng_enable++; 562 ci->ci_feat_val[4] |= CPUID_VIA_DO_RNG; 563 } 564 } 565 /* Check for and enable ACE (AES-CBC) */ 566 if (lfunc & CPUID_VIA_HAS_ACE) { 567 if (!(lfunc & CPUID_VIA_DO_ACE)) { 568 ace_enable++; 569 ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE; 570 } 571 } 572 /* Check for and enable SHA */ 573 if (lfunc & CPUID_VIA_HAS_PHE) { 574 if (!(lfunc & CPUID_VIA_DO_PHE)) { 575 ace_enable++; 576 ci->ci_feat_val[4] |= CPUID_VIA_DO_PHE; 577 } 578 } 579 /* Check for and enable ACE2 (AES-CTR) */ 580 if (lfunc & CPUID_VIA_HAS_ACE2) { 581 if (!(lfunc & CPUID_VIA_DO_ACE2)) { 582 ace_enable++; 583 ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE2; 584 } 585 } 586 /* Check for and enable PMM (modmult engine) */ 587 if (lfunc & CPUID_VIA_HAS_PMM) { 588 if (!(lfunc & CPUID_VIA_DO_PMM)) { 589 ace_enable++; 590 ci->ci_feat_val[4] |= CPUID_VIA_DO_PMM; 591 } 592 } 593 594 /* 595 * Actually do the enables. It's a little gross, 596 * but per the PadLock programming guide, "Enabling 597 * PadLock", condition 3, we must enable SSE too or 598 * else the first use of RNG or ACE instructions 599 * will generate a trap. 600 * 601 * We must do this early because of kernel RNG 602 * initialization but it is safe without the full 603 * FPU-detect as all these CPUs have SSE. 604 */ 605 lcr4(rcr4() | CR4_OSFXSR); 606 607 if (rng_enable) { 608 msr = rdmsr(MSR_VIA_RNG); 609 msr |= MSR_VIA_RNG_ENABLE; 610 /* C7 stepping 8 and subsequent CPUs have dual RNG */ 611 if (model > 0xA || (model == 0xA && stepping > 0x7)) { 612 msr |= MSR_VIA_RNG_2NOISE; 613 } 614 wrmsr(MSR_VIA_RNG, msr); 615 } 616 617 if (ace_enable) { 618 msr = rdmsr(MSR_VIA_FCR); 619 wrmsr(MSR_VIA_FCR, msr | VIA_FCR_ACE_ENABLE); 620 } 621 } 622 } 623 624 /* Determine the largest extended function value. */ 625 x86_cpuid(0x80000000, descs); 626 lfunc = descs[0]; 627 628 /* 629 * Determine L1 cache/TLB info. 630 */ 631 if (lfunc < 0x80000005) { 632 /* No L1 cache info available. */ 633 return; 634 } 635 636 x86_cpuid(0x80000005, descs); 637 638 cai = &ci->ci_cinfo[CAI_ITLB]; 639 cai->cai_totalsize = VIA_L1_EBX_ITLB_ENTRIES(descs[1]); 640 cai->cai_associativity = VIA_L1_EBX_ITLB_ASSOC(descs[1]); 641 cai->cai_linesize = (4 * 1024); 642 643 cai = &ci->ci_cinfo[CAI_DTLB]; 644 cai->cai_totalsize = VIA_L1_EBX_DTLB_ENTRIES(descs[1]); 645 cai->cai_associativity = VIA_L1_EBX_DTLB_ASSOC(descs[1]); 646 cai->cai_linesize = (4 * 1024); 647 648 cai = &ci->ci_cinfo[CAI_DCACHE]; 649 cai->cai_totalsize = VIA_L1_ECX_DC_SIZE(descs[2]); 650 cai->cai_associativity = VIA_L1_ECX_DC_ASSOC(descs[2]); 651 cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[2]); 652 if (family == 6 && model == 9 && stepping == 8) { 653 /* Erratum: stepping 8 reports 4 when it should be 2 */ 654 cai->cai_associativity = 2; 655 } 656 657 cai = &ci->ci_cinfo[CAI_ICACHE]; 658 cai->cai_totalsize = VIA_L1_EDX_IC_SIZE(descs[3]); 659 cai->cai_associativity = VIA_L1_EDX_IC_ASSOC(descs[3]); 660 cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[3]); 661 if (family == 6 && model == 9 && stepping == 8) { 662 /* Erratum: stepping 8 reports 4 when it should be 2 */ 663 cai->cai_associativity = 2; 664 } 665 666 /* 667 * Determine L2 cache/TLB info. 668 */ 669 if (lfunc < 0x80000006) { 670 /* No L2 cache info available. */ 671 return; 672 } 673 674 x86_cpuid(0x80000006, descs); 675 676 cai = &ci->ci_cinfo[CAI_L2CACHE]; 677 if (family > 6 || model >= 9) { 678 cai->cai_totalsize = VIA_L2N_ECX_C_SIZE(descs[2]); 679 cai->cai_associativity = VIA_L2N_ECX_C_ASSOC(descs[2]); 680 cai->cai_linesize = VIA_L2N_ECX_C_LS(descs[2]); 681 } else { 682 cai->cai_totalsize = VIA_L2_ECX_C_SIZE(descs[2]); 683 cai->cai_associativity = VIA_L2_ECX_C_ASSOC(descs[2]); 684 cai->cai_linesize = VIA_L2_ECX_C_LS(descs[2]); 685 } 686 } 687 688 static void 689 cpu_probe_geode(struct cpu_info *ci) 690 { 691 692 if (memcmp("Geode by NSC", ci->ci_vendor, 12) != 0 || 693 CPUID_TO_FAMILY(ci->ci_signature) != 5) 694 return; 695 696 cpu_probe_cyrix_cmn(ci); 697 cpu_probe_amd_cache(ci); 698 } 699 700 static void 701 cpu_probe_vortex86(struct cpu_info *ci) 702 { 703 #define PCI_MODE1_ADDRESS_REG 0x0cf8 704 #define PCI_MODE1_DATA_REG 0x0cfc 705 #define PCI_MODE1_ENABLE 0x80000000UL 706 707 uint32_t reg, idx; 708 709 if (cpu_vendor != CPUVENDOR_VORTEX86) 710 return; 711 /* 712 * CPU model available from "Customer ID register" in 713 * North Bridge Function 0 PCI space 714 * we can't use pci_conf_read() because the PCI subsystem is not 715 * not initialised early enough 716 */ 717 718 outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE | 0x90); 719 reg = inl(PCI_MODE1_DATA_REG); 720 721 if ((reg & 0xf0ffffff) != 0x30504d44) { 722 idx = 0; 723 } else { 724 idx = (reg >> 24) & 0xf; 725 } 726 727 static const char *cpu_vortex86_flavor[] = { 728 "??", "SX", "DX", "MX", "DX2", "MX+", "DX3", "EX", "EX2", 729 }; 730 idx = idx < __arraycount(cpu_vortex86_flavor) ? idx : 0; 731 snprintf(cpu_brand_string, sizeof(cpu_brand_string), "Vortex86%s", 732 cpu_vortex86_flavor[idx]); 733 734 #undef PCI_MODE1_ENABLE 735 #undef PCI_MODE1_ADDRESS_REG 736 #undef PCI_MODE1_DATA_REG 737 } 738 739 static void 740 cpu_probe_fpu_old(struct cpu_info *ci) 741 { 742 #if defined(__i386__) && !defined(XENPV) 743 744 clts(); 745 fninit(); 746 747 /* Check for 'FDIV' bug on the original Pentium */ 748 if (npx586bug1(4195835, 3145727) != 0) 749 /* NB 120+MHz cpus are not affected */ 750 i386_fpu_fdivbug = 1; 751 752 stts(); 753 #endif 754 } 755 756 static void 757 cpu_probe_fpu(struct cpu_info *ci) 758 { 759 u_int descs[4]; 760 int i; 761 762 x86_fpu_save = FPU_SAVE_FSAVE; 763 764 #ifdef i386 765 /* If we have FXSAVE/FXRESTOR, use them. */ 766 if ((ci->ci_feat_val[0] & CPUID_FXSR) == 0) { 767 i386_use_fxsave = 0; 768 cpu_probe_fpu_old(ci); 769 return; 770 } 771 772 i386_use_fxsave = 1; 773 /* 774 * If we have SSE/SSE2, enable XMM exceptions, and 775 * notify userland. 776 */ 777 if (ci->ci_feat_val[0] & CPUID_SSE) 778 i386_has_sse = 1; 779 if (ci->ci_feat_val[0] & CPUID_SSE2) 780 i386_has_sse2 = 1; 781 #else 782 /* 783 * For amd64 i386_use_fxsave, i386_has_sse and i386_has_sse2 are 784 * #defined to 1, because fxsave/sse/sse2 are always present. 785 */ 786 #endif 787 788 x86_fpu_save = FPU_SAVE_FXSAVE; 789 x86_fpu_save_size = sizeof(struct fxsave); 790 791 /* See if XSAVE is supported */ 792 if ((ci->ci_feat_val[1] & CPUID2_XSAVE) == 0) 793 return; 794 795 #ifdef XENPV 796 /* 797 * Xen kernel can disable XSAVE via "no-xsave" option, in that case 798 * the XSAVE/XRSTOR instructions become privileged and trigger 799 * supervisor trap. OSXSAVE flag seems to be reliably set according 800 * to whether XSAVE is actually available. 801 */ 802 if ((ci->ci_feat_val[1] & CPUID2_OSXSAVE) == 0) 803 return; 804 #endif 805 806 x86_fpu_save = FPU_SAVE_XSAVE; 807 808 x86_cpuid2(0xd, 1, descs); 809 if (descs[0] & CPUID_PES1_XSAVEOPT) 810 x86_fpu_save = FPU_SAVE_XSAVEOPT; 811 812 /* Get features and maximum size of the save area */ 813 x86_cpuid(0xd, descs); 814 if (descs[2] > sizeof(struct fxsave)) 815 x86_fpu_save_size = descs[2]; 816 817 x86_xsave_features = (uint64_t)descs[3] << 32 | descs[0]; 818 819 /* Get component offsets and sizes for the save area */ 820 for (i = XSAVE_YMM_Hi128; i < __arraycount(x86_xsave_offsets); i++) { 821 if (x86_xsave_features & __BIT(i)) { 822 x86_cpuid2(0xd, i, descs); 823 x86_xsave_offsets[i] = descs[1]; 824 x86_xsave_sizes[i] = descs[0]; 825 } 826 } 827 } 828 829 void 830 cpu_probe(struct cpu_info *ci) 831 { 832 u_int descs[4]; 833 int i; 834 uint32_t miscbytes; 835 uint32_t brand[12]; 836 837 if (ci == &cpu_info_primary) { 838 cpu_vendor = i386_nocpuid_cpus[cputype << 1]; 839 cpu_class = i386_nocpuid_cpus[(cputype << 1) + 1]; 840 } 841 842 if (cpuid_level < 0) { 843 /* cpuid instruction not supported */ 844 cpu_probe_fpu_old(ci); 845 return; 846 } 847 848 for (i = 0; i < __arraycount(ci->ci_feat_val); i++) { 849 ci->ci_feat_val[i] = 0; 850 } 851 852 x86_cpuid(0, descs); 853 cpuid_level = descs[0]; 854 ci->ci_max_cpuid = descs[0]; 855 856 ci->ci_vendor[0] = descs[1]; 857 ci->ci_vendor[2] = descs[2]; 858 ci->ci_vendor[1] = descs[3]; 859 ci->ci_vendor[3] = 0; 860 861 if (ci == &cpu_info_primary) { 862 if (memcmp(ci->ci_vendor, "GenuineIntel", 12) == 0) 863 cpu_vendor = CPUVENDOR_INTEL; 864 else if (memcmp(ci->ci_vendor, "AuthenticAMD", 12) == 0) 865 cpu_vendor = CPUVENDOR_AMD; 866 else if (memcmp(ci->ci_vendor, "CyrixInstead", 12) == 0) 867 cpu_vendor = CPUVENDOR_CYRIX; 868 else if (memcmp(ci->ci_vendor, "Geode by NSC", 12) == 0) 869 cpu_vendor = CPUVENDOR_CYRIX; 870 else if (memcmp(ci->ci_vendor, "CentaurHauls", 12) == 0) 871 cpu_vendor = CPUVENDOR_IDT; 872 else if (memcmp(ci->ci_vendor, "GenuineTMx86", 12) == 0) 873 cpu_vendor = CPUVENDOR_TRANSMETA; 874 else if (memcmp(ci->ci_vendor, "Vortex86 SoC", 12) == 0) 875 cpu_vendor = CPUVENDOR_VORTEX86; 876 else 877 cpu_vendor = CPUVENDOR_UNKNOWN; 878 } 879 880 if (cpuid_level >= 1) { 881 x86_cpuid(1, descs); 882 ci->ci_signature = descs[0]; 883 miscbytes = descs[1]; 884 ci->ci_feat_val[1] = descs[2]; 885 ci->ci_feat_val[0] = descs[3]; 886 887 if (ci == &cpu_info_primary) { 888 /* Determine family + class. */ 889 cpu_class = CPUID_TO_FAMILY(ci->ci_signature) 890 + (CPUCLASS_386 - 3); 891 if (cpu_class > CPUCLASS_686) 892 cpu_class = CPUCLASS_686; 893 } 894 895 /* CLFLUSH line size is next 8 bits */ 896 if (ci->ci_feat_val[0] & CPUID_CLFSH) 897 ci->ci_cflush_lsize 898 = __SHIFTOUT(miscbytes, CPUID_CLFLUSH_SIZE) << 3; 899 ci->ci_initapicid = __SHIFTOUT(miscbytes, CPUID_LOCAL_APIC_ID); 900 } 901 902 /* 903 * Get the basic information from the extended cpuid leafs. 904 * These were first implemented by amd, but most of the values 905 * match with those generated by modern intel cpus. 906 */ 907 x86_cpuid(0x80000000, descs); 908 if (descs[0] >= 0x80000000) 909 ci->ci_max_ext_cpuid = descs[0]; 910 else 911 ci->ci_max_ext_cpuid = 0; 912 913 if (ci->ci_max_ext_cpuid >= 0x80000001) { 914 /* Determine the extended feature flags. */ 915 x86_cpuid(0x80000001, descs); 916 ci->ci_feat_val[3] = descs[2]; /* %ecx */ 917 ci->ci_feat_val[2] = descs[3]; /* %edx */ 918 } 919 920 if (ci->ci_max_ext_cpuid >= 0x80000004) { 921 x86_cpuid(0x80000002, brand); 922 x86_cpuid(0x80000003, brand + 4); 923 x86_cpuid(0x80000004, brand + 8); 924 /* Skip leading spaces on brand */ 925 for (i = 0; i < 48; i++) { 926 if (((char *) brand)[i] != ' ') 927 break; 928 } 929 memcpy(cpu_brand_string, ((char *) brand) + i, 48 - i); 930 } 931 932 /* 933 * Get the structured extended features. 934 */ 935 if (cpuid_level >= 7) { 936 x86_cpuid(7, descs); 937 ci->ci_feat_val[5] = descs[1]; /* %ebx */ 938 ci->ci_feat_val[6] = descs[2]; /* %ecx */ 939 ci->ci_feat_val[7] = descs[3]; /* %edx */ 940 } 941 942 cpu_probe_intel(ci); 943 cpu_probe_amd(ci); 944 cpu_probe_cyrix(ci); 945 cpu_probe_winchip(ci); 946 cpu_probe_c3(ci); 947 cpu_probe_geode(ci); 948 cpu_probe_vortex86(ci); 949 950 if (ci == &cpu_info_primary) { 951 cpu_probe_fpu(ci); 952 } 953 954 #ifndef XENPV 955 x86_cpu_topology(ci); 956 #endif 957 958 if (cpu_vendor != CPUVENDOR_AMD && (ci->ci_feat_val[0] & CPUID_TM) && 959 (rdmsr(MSR_MISC_ENABLE) & (1 << 3)) == 0) { 960 /* Enable thermal monitor 1. */ 961 wrmsr(MSR_MISC_ENABLE, rdmsr(MSR_MISC_ENABLE) | (1<<3)); 962 } 963 964 ci->ci_feat_val[0] &= ~CPUID_FEAT_BLACKLIST; 965 if (ci == &cpu_info_primary) { 966 /* If first. Boot Processor is the cpu_feature reference. */ 967 for (i = 0; i < __arraycount(cpu_feature); i++) { 968 cpu_feature[i] = ci->ci_feat_val[i]; 969 } 970 identify_hypervisor(); 971 #ifndef XENPV 972 /* Early patch of text segment. */ 973 x86_patch(true); 974 #endif 975 976 /* AES */ 977 #ifdef __x86_64__ /* not yet implemented on i386 */ 978 if (cpu_feature[1] & CPUID2_AESNI) 979 aes_md_init(&aes_ni_impl); 980 else 981 #endif 982 if (cpu_feature[4] & CPUID_VIA_HAS_ACE) 983 aes_md_init(&aes_via_impl); 984 else if (i386_has_sse && i386_has_sse2 && 985 (cpu_feature[1] & CPUID2_SSE3) && 986 (cpu_feature[1] & CPUID2_SSSE3)) 987 aes_md_init(&aes_ssse3_impl); 988 else if (i386_has_sse && i386_has_sse2) 989 aes_md_init(&aes_sse2_impl); 990 991 /* ChaCha */ 992 if (i386_has_sse && i386_has_sse2) 993 chacha_md_init(&chacha_sse2_impl); 994 } else { 995 /* 996 * If not first. Warn about cpu_feature mismatch for 997 * secondary CPUs. 998 */ 999 for (i = 0; i < __arraycount(cpu_feature); i++) { 1000 if (cpu_feature[i] != ci->ci_feat_val[i]) 1001 aprint_error_dev(ci->ci_dev, 1002 "feature mismatch: cpu_feature[%d] is " 1003 "%#x, but CPU reported %#x\n", 1004 i, cpu_feature[i], ci->ci_feat_val[i]); 1005 } 1006 } 1007 } 1008 1009 /* Write what we know about the cpu to the console... */ 1010 void 1011 cpu_identify(struct cpu_info *ci) 1012 { 1013 1014 cpu_setmodel("%s %d86-class", 1015 cpu_vendor_names[cpu_vendor], cpu_class + 3); 1016 if (cpu_brand_string[0] != '\0') { 1017 aprint_normal_dev(ci->ci_dev, "%s", cpu_brand_string); 1018 } else { 1019 aprint_normal_dev(ci->ci_dev, "%s", cpu_getmodel()); 1020 if (ci->ci_data.cpu_cc_freq != 0) 1021 aprint_normal(", %dMHz", 1022 (int)(ci->ci_data.cpu_cc_freq / 1000000)); 1023 } 1024 if (ci->ci_signature != 0) 1025 aprint_normal(", id 0x%x", ci->ci_signature); 1026 aprint_normal("\n"); 1027 aprint_normal_dev(ci->ci_dev, "node %u, package %u, core %u, smt %u\n", 1028 ci->ci_numa_id, ci->ci_package_id, ci->ci_core_id, ci->ci_smt_id); 1029 if (cpu_brand_string[0] == '\0') { 1030 strlcpy(cpu_brand_string, cpu_getmodel(), 1031 sizeof(cpu_brand_string)); 1032 } 1033 if (cpu_class == CPUCLASS_386) { 1034 panic("NetBSD requires an 80486DX or later processor"); 1035 } 1036 if (cputype == CPU_486DLC) { 1037 aprint_error("WARNING: BUGGY CYRIX CACHE\n"); 1038 } 1039 1040 #if !defined(XENPV) || defined(DOM0OPS) /* on Xen PV rdmsr is for Dom0 only */ 1041 if (cpu_vendor == CPUVENDOR_AMD /* check enablement of an */ 1042 && device_unit(ci->ci_dev) == 0 /* AMD feature only once */ 1043 && ((cpu_feature[3] & CPUID_SVM) == CPUID_SVM)) { 1044 uint64_t val; 1045 1046 val = rdmsr(MSR_VMCR); 1047 if (((val & VMCR_SVMED) == VMCR_SVMED) 1048 && ((val & VMCR_LOCK) == VMCR_LOCK)) { 1049 aprint_normal_dev(ci->ci_dev, 1050 "SVM disabled by the BIOS\n"); 1051 } 1052 } 1053 #endif 1054 1055 #ifdef i386 1056 if (i386_fpu_fdivbug == 1) 1057 aprint_normal_dev(ci->ci_dev, 1058 "WARNING: Pentium FDIV bug detected!\n"); 1059 1060 if (cpu_vendor == CPUVENDOR_TRANSMETA) { 1061 u_int descs[4]; 1062 x86_cpuid(0x80860000, descs); 1063 if (descs[0] >= 0x80860007) 1064 /* Create longrun sysctls */ 1065 tmx86_init_longrun(); 1066 } 1067 #endif /* i386 */ 1068 1069 } 1070 1071 /* 1072 * Hypervisor 1073 */ 1074 vm_guest_t vm_guest = VM_GUEST_NO; 1075 1076 struct vm_name_guest { 1077 const char *name; 1078 vm_guest_t guest; 1079 }; 1080 1081 static const struct vm_name_guest vm_bios_vendors[] = { 1082 { "QEMU", VM_GUEST_VM }, /* QEMU */ 1083 { "Plex86", VM_GUEST_VM }, /* Plex86 */ 1084 { "Bochs", VM_GUEST_VM }, /* Bochs */ 1085 { "Xen", VM_GUEST_VM }, /* Xen */ 1086 { "BHYVE", VM_GUEST_VM }, /* bhyve */ 1087 { "Seabios", VM_GUEST_VM }, /* KVM */ 1088 { "innotek GmbH", VM_GUEST_VIRTUALBOX }, /* Oracle VirtualBox */ 1089 { "Generic PVH", VM_GUEST_GENPVH}, /* Generic PVH */ 1090 }; 1091 1092 static const struct vm_name_guest vm_system_products[] = { 1093 { "VMware Virtual Platform", VM_GUEST_VM }, /* VMWare VM */ 1094 { "Virtual Machine", VM_GUEST_VM }, /* Microsoft VirtualPC */ 1095 { "VirtualBox", VM_GUEST_VIRTUALBOX }, /* Sun xVM VirtualBox */ 1096 { "Parallels Virtual Platform", VM_GUEST_VM }, /* Parallels VM */ 1097 { "KVM", VM_GUEST_VM }, /* KVM */ 1098 }; 1099 1100 void 1101 identify_hypervisor(void) 1102 { 1103 u_int regs[6]; 1104 char hv_vendor[12]; 1105 const char *p; 1106 int i; 1107 1108 switch (vm_guest) { 1109 case VM_GUEST_XENPV: 1110 case VM_GUEST_XENPVH: 1111 case VM_GUEST_GENPVH: 1112 /* guest type already known, no bios info */ 1113 return; 1114 default: 1115 break; 1116 } 1117 1118 /* 1119 * [RFC] CPUID usage for interaction between Hypervisors and Linux. 1120 * http://lkml.org/lkml/2008/10/1/246 1121 * 1122 * KB1009458: Mechanisms to determine if software is running in 1123 * a VMware virtual machine 1124 * http://kb.vmware.com/kb/1009458 1125 */ 1126 if (ISSET(cpu_feature[1], CPUID2_RAZ)) { 1127 vm_guest = VM_GUEST_VM; 1128 x86_cpuid(0x40000000, regs); 1129 if (regs[0] >= 0x40000000) { 1130 memcpy(&hv_vendor[0], ®s[1], sizeof(*regs)); 1131 memcpy(&hv_vendor[4], ®s[2], sizeof(*regs)); 1132 memcpy(&hv_vendor[8], ®s[3], sizeof(*regs)); 1133 if (memcmp(hv_vendor, "VMwareVMware", 12) == 0) 1134 vm_guest = VM_GUEST_VMWARE; 1135 else if (memcmp(hv_vendor, "Microsoft Hv", 12) == 0) { 1136 vm_guest = VM_GUEST_HV; 1137 #if NHYPERV > 0 1138 hyperv_early_init(); 1139 #endif 1140 } else if (memcmp(hv_vendor, "KVMKVMKVM\0\0\0", 12) == 0) 1141 vm_guest = VM_GUEST_KVM; 1142 else if (memcmp(hv_vendor, "XenVMMXenVMM", 12) == 0) 1143 vm_guest = VM_GUEST_XENHVM; 1144 /* FreeBSD bhyve: "bhyve bhyve " */ 1145 /* OpenBSD vmm: "OpenBSDVMM58" */ 1146 /* NetBSD nvmm: "___ NVMM ___" */ 1147 } 1148 // VirtualBox returns KVM, so keep going. 1149 if (vm_guest != VM_GUEST_KVM) 1150 return; 1151 } 1152 1153 /* 1154 * Examine SMBIOS strings for older hypervisors. 1155 */ 1156 p = pmf_get_platform("system-serial"); 1157 if (p != NULL) { 1158 if (strncmp(p, "VMware-", 7) == 0 || strncmp(p, "VMW", 3) == 0) { 1159 vmt_hvcall(VM_CMD_GET_VERSION, regs); 1160 if (regs[1] == VM_MAGIC) { 1161 vm_guest = VM_GUEST_VMWARE; 1162 return; 1163 } 1164 } 1165 } 1166 p = pmf_get_platform("bios-vendor"); 1167 if (p != NULL) { 1168 for (i = 0; i < __arraycount(vm_bios_vendors); i++) { 1169 if (strcmp(p, vm_bios_vendors[i].name) == 0) { 1170 vm_guest = vm_bios_vendors[i].guest; 1171 return; 1172 } 1173 } 1174 } 1175 p = pmf_get_platform("system-product"); 1176 if (p != NULL) { 1177 for (i = 0; i < __arraycount(vm_system_products); i++) { 1178 if (strcmp(p, vm_system_products[i].name) == 0) { 1179 vm_guest = vm_system_products[i].guest; 1180 return; 1181 } 1182 } 1183 } 1184 } 1185