1*86920a7cSriastradh /* $NetBSD: identcpu.c,v 1.133 2025/01/17 10:38:48 riastradh Exp $ */ 250d8ae9dSad 350d8ae9dSad /*- 450d8ae9dSad * Copyright (c) 1999, 2000, 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc. 550d8ae9dSad * All rights reserved. 650d8ae9dSad * 750d8ae9dSad * This code is derived from software contributed to The NetBSD Foundation 850d8ae9dSad * by Frank van der Linden, and by Jason R. Thorpe. 950d8ae9dSad * 1050d8ae9dSad * Redistribution and use in source and binary forms, with or without 1150d8ae9dSad * modification, are permitted provided that the following conditions 1250d8ae9dSad * are met: 1350d8ae9dSad * 1. Redistributions of source code must retain the above copyright 1450d8ae9dSad * notice, this list of conditions and the following disclaimer. 1550d8ae9dSad * 2. Redistributions in binary form must reproduce the above copyright 1650d8ae9dSad * notice, this list of conditions and the following disclaimer in the 1750d8ae9dSad * documentation and/or other materials provided with the distribution. 1850d8ae9dSad * 1950d8ae9dSad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 2050d8ae9dSad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 2150d8ae9dSad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 2250d8ae9dSad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 2350d8ae9dSad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 2450d8ae9dSad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 2550d8ae9dSad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 2650d8ae9dSad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 2750d8ae9dSad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 2850d8ae9dSad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 2950d8ae9dSad * POSSIBILITY OF SUCH DAMAGE. 3050d8ae9dSad */ 3150d8ae9dSad 3250d8ae9dSad #include <sys/cdefs.h> 33*86920a7cSriastradh __KERNEL_RCSID(0, "$NetBSD: identcpu.c,v 1.133 2025/01/17 10:38:48 riastradh Exp $"); 3450d8ae9dSad 3550d8ae9dSad #include "opt_xen.h" 3650d8ae9dSad 3750d8ae9dSad #include <sys/param.h> 3850d8ae9dSad #include <sys/systm.h> 39f81f6237Sdyoung #include <sys/device.h> 40b2543b91Schristos #include <sys/cpu.h> 4150d8ae9dSad 42518b19bbSriastradh #include <crypto/aes/aes_impl.h> 4399325bb8Sriastradh #include <crypto/aes/arch/x86/aes_ni.h> 44336b5650Sriastradh #include <crypto/aes/arch/x86/aes_sse2.h> 45c0579016Sriastradh #include <crypto/aes/arch/x86/aes_ssse3.h> 467ff94d7aSriastradh #include <crypto/aes/arch/x86/aes_via.h> 47ba0c8ad5Sriastradh #include <crypto/chacha/chacha_impl.h> 48ba0c8ad5Sriastradh #include <crypto/chacha/arch/x86/chacha_sse2.h> 4999325bb8Sriastradh 5050d8ae9dSad #include <uvm/uvm_extern.h> 5150d8ae9dSad 5250d8ae9dSad #include <machine/specialreg.h> 5350d8ae9dSad #include <machine/pio.h> 5450d8ae9dSad #include <machine/cpu.h> 5550d8ae9dSad 5650d8ae9dSad #include <x86/cputypes.h> 5750d8ae9dSad #include <x86/cacheinfo.h> 5850d8ae9dSad #include <x86/cpuvar.h> 595b2b284fSkre #include <x86/fpu.h> 6050d8ae9dSad 610b42d22aSryo #include <dev/vmt/vmtreg.h> /* for vmt_hvcall() */ 620b42d22aSryo #include <dev/vmt/vmtvar.h> /* for vmt_hvcall() */ 63b38f39aeSnonaka 64c24c993fSbouyer #ifndef XENPV 6513deebddSnonaka #include "hyperv.h" 6613deebddSnonaka #if NHYPERV > 0 6713deebddSnonaka #include <x86/x86/hypervvar.h> 6813deebddSnonaka #endif 6913deebddSnonaka #endif 7013deebddSnonaka 716cb1513dSchristos static const struct x86_cache_info intel_cpuid_cache_info[] = INTEL_CACHE_INFO; 7250d8ae9dSad 73e3b8d4c8Smsaitoh static const struct x86_cache_info amd_cpuid_l2l3cache_assoc_info[] = 74e3b8d4c8Smsaitoh AMD_L2L3CACHE_INFO; 7550d8ae9dSad 7650d8ae9dSad int cpu_vendor; 7750d8ae9dSad char cpu_brand_string[49]; 7850d8ae9dSad 798f45ca38Smaxv int x86_fpu_save __read_mostly; 80337a599bSmaxv unsigned int x86_fpu_save_size __read_mostly = sizeof(struct save87); 81a4a47537Smaxv uint64_t x86_xsave_features __read_mostly = 0; 8217a629e4Smgorny size_t x86_xsave_offsets[XSAVE_MAX_COMPONENT+1] __read_mostly; 8317a629e4Smgorny size_t x86_xsave_sizes[XSAVE_MAX_COMPONENT+1] __read_mostly; 84f9cd6e68Sdsl 8550d8ae9dSad /* 8650d8ae9dSad * Note: these are just the ones that may not have a cpuid instruction. 8750d8ae9dSad * We deal with the rest in a different way. 8850d8ae9dSad */ 8950d8ae9dSad const int i386_nocpuid_cpus[] = { 9050d8ae9dSad CPUVENDOR_INTEL, CPUCLASS_386, /* CPU_386SX */ 9150d8ae9dSad CPUVENDOR_INTEL, CPUCLASS_386, /* CPU_386 */ 9250d8ae9dSad CPUVENDOR_INTEL, CPUCLASS_486, /* CPU_486SX */ 9350d8ae9dSad CPUVENDOR_INTEL, CPUCLASS_486, /* CPU_486 */ 9450d8ae9dSad CPUVENDOR_CYRIX, CPUCLASS_486, /* CPU_486DLC */ 9550d8ae9dSad CPUVENDOR_CYRIX, CPUCLASS_486, /* CPU_6x86 */ 9650d8ae9dSad CPUVENDOR_NEXGEN, CPUCLASS_386, /* CPU_NX586 */ 9750d8ae9dSad }; 9850d8ae9dSad 9950d8ae9dSad static const char cpu_vendor_names[][10] = { 100e8e2c453Sbouyer "Unknown", "Intel", "NS/Cyrix", "NexGen", "AMD", "IDT/VIA", "Transmeta", 101e8e2c453Sbouyer "Vortex86" 10250d8ae9dSad }; 10350d8ae9dSad 104e3b8d4c8Smsaitoh static void 105e3b8d4c8Smsaitoh cpu_probe_intel_cache(struct cpu_info *ci) 106e3b8d4c8Smsaitoh { 107e3b8d4c8Smsaitoh const struct x86_cache_info *cai; 108e3b8d4c8Smsaitoh u_int descs[4]; 109e3b8d4c8Smsaitoh int iterations, i, j; 110e3b8d4c8Smsaitoh uint8_t desc; 111e3b8d4c8Smsaitoh 112e3b8d4c8Smsaitoh if (cpuid_level >= 2) { 113e3b8d4c8Smsaitoh /* Parse the cache info from `cpuid leaf 2', if we have it. */ 114e3b8d4c8Smsaitoh x86_cpuid(2, descs); 115e3b8d4c8Smsaitoh iterations = descs[0] & 0xff; 116e3b8d4c8Smsaitoh while (iterations-- > 0) { 117e3b8d4c8Smsaitoh for (i = 0; i < 4; i++) { 118e3b8d4c8Smsaitoh if (descs[i] & 0x80000000) 119e3b8d4c8Smsaitoh continue; 120e3b8d4c8Smsaitoh for (j = 0; j < 4; j++) { 121e3b8d4c8Smsaitoh if (i == 0 && j == 0) 122e3b8d4c8Smsaitoh continue; 123e3b8d4c8Smsaitoh desc = (descs[i] >> (j * 8)) & 0xff; 124e3b8d4c8Smsaitoh if (desc == 0) 125e3b8d4c8Smsaitoh continue; 126fae021bcSmsaitoh cai = cpu_cacheinfo_lookup( 127e3b8d4c8Smsaitoh intel_cpuid_cache_info, desc); 128e3b8d4c8Smsaitoh if (cai != NULL) { 129e3b8d4c8Smsaitoh ci->ci_cinfo[cai->cai_index] = 130e3b8d4c8Smsaitoh *cai; 131e3b8d4c8Smsaitoh } 132e3b8d4c8Smsaitoh } 133e3b8d4c8Smsaitoh } 134e3b8d4c8Smsaitoh } 135e3b8d4c8Smsaitoh } 136e3b8d4c8Smsaitoh 137e3b8d4c8Smsaitoh if (cpuid_level < 4) 138e3b8d4c8Smsaitoh return; 139e3b8d4c8Smsaitoh 140e3b8d4c8Smsaitoh /* Parse the cache info from `cpuid leaf 4', if we have it. */ 141e3b8d4c8Smsaitoh cpu_dcp_cacheinfo(ci, 4); 14270dd5b41Smsaitoh } 14370dd5b41Smsaitoh 14470dd5b41Smsaitoh static void 145adc23db8Smaya cpu_probe_intel_errata(struct cpu_info *ci) 146adc23db8Smaya { 14747c62969Sandvar u_int family, model; 148adc23db8Smaya 149adc23db8Smaya family = CPUID_TO_FAMILY(ci->ci_signature); 150adc23db8Smaya model = CPUID_TO_MODEL(ci->ci_signature); 151adc23db8Smaya 15247c62969Sandvar /* 15347c62969Sandvar * For details, refer to the Intel Pentium and Celeron Processor 15447c62969Sandvar * N- and J- Series Specification Update (Document number: 334820-010), 15547c62969Sandvar * August 2022, Revision 010. See page 28, Section 5.30: "APL30 A Store 15647c62969Sandvar * Instruction May Not Wake Up MWAIT." 15747c62969Sandvar * https://cdrdv2-public.intel.com/334820/334820-APL_Spec_Update_rev010.pdf 158*86920a7cSriastradh * https://web.archive.org/web/20250114072355/https://cdrdv2-public.intel.com/334820/334820-APL_Spec_Update_rev010.pdf 15947c62969Sandvar * 160*86920a7cSriastradh * Disable MWAIT/MONITOR on Apollo Lake CPUs to address the 161*86920a7cSriastradh * APL30 erratum. When using the MONITOR/MWAIT instruction 162*86920a7cSriastradh * pair, stores to the armed address range may fail to trigger 163*86920a7cSriastradh * MWAIT to resume execution. When these instructions are used 164*86920a7cSriastradh * to hatch secondary CPUs, this erratum causes SMP boot 165*86920a7cSriastradh * failures. 16647c62969Sandvar */ 16747c62969Sandvar if (family == 0x6 && model == 0x5C) { 168adc23db8Smaya wrmsr(MSR_MISC_ENABLE, 169adc23db8Smaya rdmsr(MSR_MISC_ENABLE) & ~IA32_MISC_MWAIT_EN); 170adc23db8Smaya 171adc23db8Smaya cpu_feature[1] &= ~CPUID2_MONITOR; 172adc23db8Smaya ci->ci_feat_val[1] &= ~CPUID2_MONITOR; 173adc23db8Smaya } 174adc23db8Smaya } 175adc23db8Smaya 176adc23db8Smaya static void 17770dd5b41Smsaitoh cpu_probe_intel(struct cpu_info *ci) 17870dd5b41Smsaitoh { 17970dd5b41Smsaitoh 18070dd5b41Smsaitoh if (cpu_vendor != CPUVENDOR_INTEL) 18170dd5b41Smsaitoh return; 18270dd5b41Smsaitoh 18370dd5b41Smsaitoh cpu_probe_intel_cache(ci); 184adc23db8Smaya cpu_probe_intel_errata(ci); 18570dd5b41Smsaitoh } 18650d8ae9dSad 18750d8ae9dSad static void 18850d8ae9dSad cpu_probe_amd_cache(struct cpu_info *ci) 18950d8ae9dSad { 19050d8ae9dSad const struct x86_cache_info *cp; 19150d8ae9dSad struct x86_cache_info *cai; 19250d8ae9dSad int family, model; 19350d8ae9dSad u_int descs[4]; 19450d8ae9dSad u_int lfunc; 19550d8ae9dSad 196b1a32cacSmsaitoh family = CPUID_TO_FAMILY(ci->ci_signature); 197b1a32cacSmsaitoh model = CPUID_TO_MODEL(ci->ci_signature); 19850d8ae9dSad 199e3b8d4c8Smsaitoh /* K5 model 0 has none of this info. */ 20050d8ae9dSad if (family == 5 && model == 0) 20150d8ae9dSad return; 20250d8ae9dSad 203e3b8d4c8Smsaitoh /* Determine the largest extended function value. */ 20450d8ae9dSad x86_cpuid(0x80000000, descs); 20550d8ae9dSad lfunc = descs[0]; 20650d8ae9dSad 207e3b8d4c8Smsaitoh if (lfunc < 0x80000005) 20850d8ae9dSad return; 20950d8ae9dSad 210e3b8d4c8Smsaitoh /* Determine L1 cache/TLB info. */ 21150d8ae9dSad x86_cpuid(0x80000005, descs); 21250d8ae9dSad 213e3b8d4c8Smsaitoh /* K6-III and higher have large page TLBs. */ 21450d8ae9dSad if ((family == 5 && model >= 9) || family >= 6) { 21550d8ae9dSad cai = &ci->ci_cinfo[CAI_ITLB2]; 21650d8ae9dSad cai->cai_totalsize = AMD_L1_EAX_ITLB_ENTRIES(descs[0]); 21750d8ae9dSad cai->cai_associativity = AMD_L1_EAX_ITLB_ASSOC(descs[0]); 21850d8ae9dSad cai->cai_linesize = (4 * 1024 * 1024); 21950d8ae9dSad 22050d8ae9dSad cai = &ci->ci_cinfo[CAI_DTLB2]; 22150d8ae9dSad cai->cai_totalsize = AMD_L1_EAX_DTLB_ENTRIES(descs[0]); 22250d8ae9dSad cai->cai_associativity = AMD_L1_EAX_DTLB_ASSOC(descs[0]); 22350d8ae9dSad cai->cai_linesize = (4 * 1024 * 1024); 22450d8ae9dSad } 22550d8ae9dSad 22650d8ae9dSad cai = &ci->ci_cinfo[CAI_ITLB]; 22750d8ae9dSad cai->cai_totalsize = AMD_L1_EBX_ITLB_ENTRIES(descs[1]); 22850d8ae9dSad cai->cai_associativity = AMD_L1_EBX_ITLB_ASSOC(descs[1]); 22950d8ae9dSad cai->cai_linesize = (4 * 1024); 23050d8ae9dSad 23150d8ae9dSad cai = &ci->ci_cinfo[CAI_DTLB]; 23250d8ae9dSad cai->cai_totalsize = AMD_L1_EBX_DTLB_ENTRIES(descs[1]); 23350d8ae9dSad cai->cai_associativity = AMD_L1_EBX_DTLB_ASSOC(descs[1]); 23450d8ae9dSad cai->cai_linesize = (4 * 1024); 23550d8ae9dSad 23650d8ae9dSad cai = &ci->ci_cinfo[CAI_DCACHE]; 23750d8ae9dSad cai->cai_totalsize = AMD_L1_ECX_DC_SIZE(descs[2]); 23850d8ae9dSad cai->cai_associativity = AMD_L1_ECX_DC_ASSOC(descs[2]); 239e1197005Syamt cai->cai_linesize = AMD_L1_ECX_DC_LS(descs[2]); 24050d8ae9dSad 24150d8ae9dSad cai = &ci->ci_cinfo[CAI_ICACHE]; 24250d8ae9dSad cai->cai_totalsize = AMD_L1_EDX_IC_SIZE(descs[3]); 24350d8ae9dSad cai->cai_associativity = AMD_L1_EDX_IC_ASSOC(descs[3]); 24450d8ae9dSad cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[3]); 24550d8ae9dSad 246e3b8d4c8Smsaitoh if (lfunc < 0x80000006) 24750d8ae9dSad return; 24850d8ae9dSad 249e3b8d4c8Smsaitoh /* Determine L2 cache/TLB info. */ 25050d8ae9dSad x86_cpuid(0x80000006, descs); 25150d8ae9dSad 25250d8ae9dSad cai = &ci->ci_cinfo[CAI_L2CACHE]; 25350d8ae9dSad cai->cai_totalsize = AMD_L2_ECX_C_SIZE(descs[2]); 25450d8ae9dSad cai->cai_associativity = AMD_L2_ECX_C_ASSOC(descs[2]); 25550d8ae9dSad cai->cai_linesize = AMD_L2_ECX_C_LS(descs[2]); 25650d8ae9dSad 257fae021bcSmsaitoh cp = cpu_cacheinfo_lookup(amd_cpuid_l2l3cache_assoc_info, 25850d8ae9dSad cai->cai_associativity); 25950d8ae9dSad if (cp != NULL) 26050d8ae9dSad cai->cai_associativity = cp->cai_associativity; 26150d8ae9dSad else 26250d8ae9dSad cai->cai_associativity = 0; /* XXX Unknown/reserved */ 263c094da18Scegger 264e3b8d4c8Smsaitoh if (family < 0xf) 265c094da18Scegger return; 266c094da18Scegger 267e3b8d4c8Smsaitoh /* Determine L3 cache info on AMD Family 10h and newer processors */ 268c094da18Scegger cai = &ci->ci_cinfo[CAI_L3CACHE]; 269c094da18Scegger cai->cai_totalsize = AMD_L3_EDX_C_SIZE(descs[3]); 270c094da18Scegger cai->cai_associativity = AMD_L3_EDX_C_ASSOC(descs[3]); 271c094da18Scegger cai->cai_linesize = AMD_L3_EDX_C_LS(descs[3]); 272c094da18Scegger 273fae021bcSmsaitoh cp = cpu_cacheinfo_lookup(amd_cpuid_l2l3cache_assoc_info, 274c094da18Scegger cai->cai_associativity); 275c094da18Scegger if (cp != NULL) 276c094da18Scegger cai->cai_associativity = cp->cai_associativity; 277c094da18Scegger else 278c094da18Scegger cai->cai_associativity = 0; /* XXX Unknown reserved */ 279c094da18Scegger 280e3b8d4c8Smsaitoh if (lfunc < 0x80000019) 281c094da18Scegger return; 282c094da18Scegger 283e3b8d4c8Smsaitoh /* Determine 1GB TLB info. */ 284c094da18Scegger x86_cpuid(0x80000019, descs); 285c094da18Scegger 286c094da18Scegger cai = &ci->ci_cinfo[CAI_L1_1GBDTLB]; 287c094da18Scegger cai->cai_totalsize = AMD_L1_1GB_EAX_DTLB_ENTRIES(descs[1]); 288c094da18Scegger cai->cai_associativity = AMD_L1_1GB_EAX_DTLB_ASSOC(descs[1]); 289c094da18Scegger cai->cai_linesize = (1 * 1024); 290c094da18Scegger 291c094da18Scegger cai = &ci->ci_cinfo[CAI_L1_1GBITLB]; 292c094da18Scegger cai->cai_totalsize = AMD_L1_1GB_EAX_IUTLB_ENTRIES(descs[0]); 293c094da18Scegger cai->cai_associativity = AMD_L1_1GB_EAX_IUTLB_ASSOC(descs[0]); 294c094da18Scegger cai->cai_linesize = (1 * 1024); 295c094da18Scegger 296c094da18Scegger cai = &ci->ci_cinfo[CAI_L2_1GBDTLB]; 297c094da18Scegger cai->cai_totalsize = AMD_L2_1GB_EBX_DUTLB_ENTRIES(descs[1]); 298c094da18Scegger cai->cai_associativity = AMD_L2_1GB_EBX_DUTLB_ASSOC(descs[1]); 299c094da18Scegger cai->cai_linesize = (1 * 1024); 300c094da18Scegger 301c094da18Scegger cai = &ci->ci_cinfo[CAI_L2_1GBITLB]; 302c094da18Scegger cai->cai_totalsize = AMD_L2_1GB_EBX_IUTLB_ENTRIES(descs[0]); 303c094da18Scegger cai->cai_associativity = AMD_L2_1GB_EBX_IUTLB_ASSOC(descs[0]); 304c094da18Scegger cai->cai_linesize = (1 * 1024); 305e3b8d4c8Smsaitoh 306e3b8d4c8Smsaitoh if (lfunc < 0x8000001d) 307e3b8d4c8Smsaitoh return; 308e3b8d4c8Smsaitoh 3095360c5e2Smsaitoh if (ci->ci_feat_val[3] & CPUID_TOPOEXT) 310e3b8d4c8Smsaitoh cpu_dcp_cacheinfo(ci, 0x8000001d); 31150d8ae9dSad } 31250d8ae9dSad 31350d8ae9dSad static void 314ad9ce02eSad cpu_probe_amd_errata(struct cpu_info *ci) 31550d8ae9dSad { 316ad9ce02eSad u_int model; 3174729fe3dSmaxv uint64_t val; 31850d8ae9dSad int flag; 31950d8ae9dSad 320ad9ce02eSad model = CPUID_TO_MODEL(ci->ci_signature); 32150d8ae9dSad 3224729fe3dSmaxv switch (CPUID_TO_FAMILY(ci->ci_signature)) { 3234729fe3dSmaxv case 0x05: /* K5 */ 324ad9ce02eSad if (model == 0) { 32550d8ae9dSad /* 32650d8ae9dSad * According to the AMD Processor Recognition App Note, 32750d8ae9dSad * the AMD-K5 Model 0 uses the wrong bit to indicate 32850d8ae9dSad * support for global PTEs, instead using bit 9 (APIC) 3294729fe3dSmaxv * rather than bit 13 (i.e. "0x200" vs. 0x2000"). 33050d8ae9dSad */ 331bc042041Sjym flag = ci->ci_feat_val[0]; 33250d8ae9dSad if ((flag & CPUID_APIC) != 0) 33350d8ae9dSad flag = (flag & ~CPUID_APIC) | CPUID_PGE; 334bc042041Sjym ci->ci_feat_val[0] = flag; 33550d8ae9dSad } 3364729fe3dSmaxv break; 33750d8ae9dSad 3384729fe3dSmaxv case 0x10: /* Family 10h */ 3394729fe3dSmaxv /* 3404729fe3dSmaxv * On Family 10h, certain BIOSes do not enable WC+ support. 3414729fe3dSmaxv * This causes WC+ to become CD, and degrades guest 3424729fe3dSmaxv * performance at the NPT level. 3434729fe3dSmaxv * 3444729fe3dSmaxv * Explicitly enable WC+ if we're not a guest. 3454729fe3dSmaxv */ 3464729fe3dSmaxv if (!ISSET(ci->ci_feat_val[1], CPUID2_RAZ)) { 3474729fe3dSmaxv val = rdmsr(MSR_BU_CFG2); 3484729fe3dSmaxv val &= ~BU_CFG2_CWPLUS_DIS; 3494729fe3dSmaxv wrmsr(MSR_BU_CFG2, val); 35050d8ae9dSad } 3514729fe3dSmaxv break; 352ad9ce02eSad 353ad9ce02eSad case 0x17: 354ad9ce02eSad /* 355ad9ce02eSad * "Revision Guide for AMD Family 17h Models 00h-0Fh 356ad9ce02eSad * Processors" revision 1.12: 357ad9ce02eSad * 358ad9ce02eSad * 1057 MWAIT or MWAITX Instructions May Fail to Correctly 359ad9ce02eSad * Exit From the Monitor Event Pending State 360ad9ce02eSad * 361ad9ce02eSad * 1109 MWAIT Instruction May Hang a Thread 362ad9ce02eSad */ 363ad9ce02eSad if (model == 0x01) { 364ad9ce02eSad cpu_feature[1] &= ~CPUID2_MONITOR; 365ad9ce02eSad ci->ci_feat_val[1] &= ~CPUID2_MONITOR; 366ad9ce02eSad } 367ad9ce02eSad break; 368ad9ce02eSad } 3694729fe3dSmaxv } 37050d8ae9dSad 371ad9ce02eSad static void 372ad9ce02eSad cpu_probe_amd(struct cpu_info *ci) 373ad9ce02eSad { 374ad9ce02eSad 375ad9ce02eSad if (cpu_vendor != CPUVENDOR_AMD) 376ad9ce02eSad return; 377ad9ce02eSad 37850d8ae9dSad cpu_probe_amd_cache(ci); 379ad9ce02eSad cpu_probe_amd_errata(ci); 38050d8ae9dSad } 38150d8ae9dSad 38250d8ae9dSad static inline uint8_t 38350d8ae9dSad cyrix_read_reg(uint8_t reg) 38450d8ae9dSad { 38550d8ae9dSad 38650d8ae9dSad outb(0x22, reg); 38750d8ae9dSad return inb(0x23); 38850d8ae9dSad } 38950d8ae9dSad 39050d8ae9dSad static inline void 39150d8ae9dSad cyrix_write_reg(uint8_t reg, uint8_t data) 39250d8ae9dSad { 39350d8ae9dSad 39450d8ae9dSad outb(0x22, reg); 39550d8ae9dSad outb(0x23, data); 39650d8ae9dSad } 39750d8ae9dSad 39850d8ae9dSad static void 39950d8ae9dSad cpu_probe_cyrix_cmn(struct cpu_info *ci) 40050d8ae9dSad { 40150d8ae9dSad /* 40250d8ae9dSad * i8254 latch check routine: 40350d8ae9dSad * National Geode (formerly Cyrix MediaGX) has a serious bug in 40450d8ae9dSad * its built-in i8254-compatible clock module (cs5510 cs5520). 40550d8ae9dSad * Set the variable 'clock_broken_latch' to indicate it. 40650d8ae9dSad * 40750d8ae9dSad * This bug is not present in the cs5530, and the flag 40850d8ae9dSad * is disabled again in sys/arch/i386/pci/pcib.c if this later 40950d8ae9dSad * model device is detected. Ideally, this work-around should not 41050d8ae9dSad * even be in here, it should be in there. XXX 41150d8ae9dSad */ 41250d8ae9dSad uint8_t c3; 413c24c993fSbouyer #ifndef XENPV 41450d8ae9dSad extern int clock_broken_latch; 41550d8ae9dSad 41650d8ae9dSad switch (ci->ci_signature) { 41750d8ae9dSad case 0x440: /* Cyrix MediaGX */ 41850d8ae9dSad case 0x540: /* GXm */ 41950d8ae9dSad clock_broken_latch = 1; 42050d8ae9dSad break; 42150d8ae9dSad } 42250d8ae9dSad #endif 42350d8ae9dSad 42450d8ae9dSad /* set up various cyrix registers */ 42550d8ae9dSad /* 42650d8ae9dSad * Enable suspend on halt (powersave mode). 42750d8ae9dSad * When powersave mode is enabled, the TSC stops counting 42850d8ae9dSad * while the CPU is halted in idle() waiting for an interrupt. 42950d8ae9dSad * This means we can't use the TSC for interval time in 43050d8ae9dSad * microtime(9), and thus it is disabled here. 43150d8ae9dSad * 43250d8ae9dSad * It still makes a perfectly good cycle counter 43350d8ae9dSad * for program profiling, so long as you remember you're 43450d8ae9dSad * counting cycles, and not time. Further, if you don't 43550d8ae9dSad * mind not using powersave mode, the TSC works just fine, 43650d8ae9dSad * so this should really be optional. XXX 43750d8ae9dSad */ 43850d8ae9dSad cyrix_write_reg(0xc2, cyrix_read_reg(0xc2) | 0x08); 43950d8ae9dSad 44050d8ae9dSad /* 44150d8ae9dSad * Do not disable the TSC on the Geode GX, it's reported to 44250d8ae9dSad * work fine. 44350d8ae9dSad */ 44450d8ae9dSad if (ci->ci_signature != 0x552) 445bc042041Sjym ci->ci_feat_val[0] &= ~CPUID_TSC; 44650d8ae9dSad 44750d8ae9dSad /* enable access to ccr4/ccr5 */ 44850d8ae9dSad c3 = cyrix_read_reg(0xC3); 44950d8ae9dSad cyrix_write_reg(0xC3, c3 | 0x10); 45050d8ae9dSad /* cyrix's workaround for the "coma bug" */ 45150d8ae9dSad cyrix_write_reg(0x31, cyrix_read_reg(0x31) | 0xf8); 45250d8ae9dSad cyrix_write_reg(0x32, cyrix_read_reg(0x32) | 0x7f); 453b9620f06Smrg cyrix_write_reg(0x33, cyrix_read_reg(0x33) & ~0xffu); 45450d8ae9dSad cyrix_write_reg(0x3c, cyrix_read_reg(0x3c) | 0x87); 45550d8ae9dSad /* disable access to ccr4/ccr5 */ 45650d8ae9dSad cyrix_write_reg(0xC3, c3); 45750d8ae9dSad } 45850d8ae9dSad 45950d8ae9dSad static void 46050d8ae9dSad cpu_probe_cyrix(struct cpu_info *ci) 46150d8ae9dSad { 46250d8ae9dSad 46350d8ae9dSad if (cpu_vendor != CPUVENDOR_CYRIX || 464b1a32cacSmsaitoh CPUID_TO_FAMILY(ci->ci_signature) < 4 || 465b1a32cacSmsaitoh CPUID_TO_FAMILY(ci->ci_signature) > 6) 46650d8ae9dSad return; 46750d8ae9dSad 46850d8ae9dSad cpu_probe_cyrix_cmn(ci); 46950d8ae9dSad } 47050d8ae9dSad 47150d8ae9dSad static void 47250d8ae9dSad cpu_probe_winchip(struct cpu_info *ci) 47350d8ae9dSad { 47450d8ae9dSad 475e899ee3eSmaxv if (cpu_vendor != CPUVENDOR_IDT || 476e899ee3eSmaxv CPUID_TO_FAMILY(ci->ci_signature) != 5) 47750d8ae9dSad return; 47850d8ae9dSad 47950d8ae9dSad /* WinChip C6 */ 480b1a32cacSmsaitoh if (CPUID_TO_MODEL(ci->ci_signature) == 4) 481bc042041Sjym ci->ci_feat_val[0] &= ~CPUID_TSC; 48250d8ae9dSad } 48350d8ae9dSad 48450d8ae9dSad static void 48550d8ae9dSad cpu_probe_c3(struct cpu_info *ci) 48650d8ae9dSad { 48750d8ae9dSad u_int family, model, stepping, descs[4], lfunc, msr; 48850d8ae9dSad struct x86_cache_info *cai; 48950d8ae9dSad 49050d8ae9dSad if (cpu_vendor != CPUVENDOR_IDT || 491b1a32cacSmsaitoh CPUID_TO_FAMILY(ci->ci_signature) < 6) 49250d8ae9dSad return; 49350d8ae9dSad 494b1a32cacSmsaitoh family = CPUID_TO_FAMILY(ci->ci_signature); 495b1a32cacSmsaitoh model = CPUID_TO_MODEL(ci->ci_signature); 496b1a32cacSmsaitoh stepping = CPUID_TO_STEPPING(ci->ci_signature); 49750d8ae9dSad 498e899ee3eSmaxv if (family == 6) { 499e899ee3eSmaxv /* 500e899ee3eSmaxv * VIA Eden ESP. 501e899ee3eSmaxv * 502e899ee3eSmaxv * Quoting from page 3-4 of: "VIA Eden ESP Processor Datasheet" 503e899ee3eSmaxv * http://www.via.com.tw/download/mainboards/6/14/Eden20v115.pdf 504e899ee3eSmaxv * 505e899ee3eSmaxv * 1. The CMPXCHG8B instruction is provided and always enabled, 506e899ee3eSmaxv * however, it appears disabled in the corresponding CPUID 507e899ee3eSmaxv * function bit 0 to avoid a bug in an early version of 508e899ee3eSmaxv * Windows NT. However, this default can be changed via a 509e899ee3eSmaxv * bit in the FCR MSR. 510e899ee3eSmaxv */ 511e899ee3eSmaxv ci->ci_feat_val[0] |= CPUID_CX8; 512369a2d20Sandvar wrmsr(MSR_VIA_FCR, rdmsr(MSR_VIA_FCR) | VIA_FCR_CX8_REPORT); 513369a2d20Sandvar 514369a2d20Sandvar /* 515369a2d20Sandvar * For reference on VIA Alternate Instructions, see the VIA C3 516369a2d20Sandvar * Processor Alternate Instruction Set Application Note, 2002. 517369a2d20Sandvar * http://www.bitsavers.org/components/viaTechnologies/C3-ais-appnote.pdf 518369a2d20Sandvar * 519369a2d20Sandvar * Disable unsafe ALTINST mode for VIA C3 processors, if necessary. 520369a2d20Sandvar * 521369a2d20Sandvar * This is done for the security reasons, as some CPUs were 522369a2d20Sandvar * found with ALTINST enabled by default. This functionality 523369a2d20Sandvar * has ability to bypass many x86 architecture memory 524369a2d20Sandvar * protections and privilege checks, exposing a possibility 525369a2d20Sandvar * for backdoors and should not be enabled unintentionally. 526369a2d20Sandvar */ 527369a2d20Sandvar if (model > 0x5 && model < 0xA) { 528369a2d20Sandvar int disable_ais = 0; 529369a2d20Sandvar x86_cpuid(0xc0000000, descs); 530369a2d20Sandvar lfunc = descs[0]; 531369a2d20Sandvar /* Check AIS flags first if supported ("Nehemiah"). */ 532369a2d20Sandvar if (lfunc >= 0xc0000001) { 533369a2d20Sandvar x86_cpuid(0xc0000001, descs); 534369a2d20Sandvar lfunc = descs[3]; 535369a2d20Sandvar if ((lfunc & CPUID_VIA_HAS_AIS) 536369a2d20Sandvar && (lfunc & CPUID_VIA_DO_AIS)) { 537369a2d20Sandvar disable_ais = 1; 538369a2d20Sandvar } 539369a2d20Sandvar } else /* Explicitly disable AIS for pre-CX5L CPUs. */ 540369a2d20Sandvar disable_ais = 1; 541369a2d20Sandvar 542369a2d20Sandvar if (disable_ais) { 543369a2d20Sandvar msr = rdmsr(MSR_VIA_FCR); 544369a2d20Sandvar wrmsr(MSR_VIA_FCR, msr & ~VIA_FCR_ALTINST_ENABLE); 545369a2d20Sandvar } 546369a2d20Sandvar } 547e899ee3eSmaxv } 548e899ee3eSmaxv 549d15f48cfStls if (family > 6 || model > 0x9 || (model == 0x9 && stepping >= 3)) { 550369a2d20Sandvar /* VIA Nehemiah or later. */ 55150d8ae9dSad x86_cpuid(0xc0000000, descs); 55250d8ae9dSad lfunc = descs[0]; 55350d8ae9dSad if (lfunc >= 0xc0000001) { /* has ACE, RNG */ 554d15f48cfStls int rng_enable = 0, ace_enable = 0; 55550d8ae9dSad x86_cpuid(0xc0000001, descs); 55650d8ae9dSad lfunc = descs[3]; 557bc042041Sjym ci->ci_feat_val[4] = lfunc; 558d15f48cfStls /* Check for and enable RNG */ 559d15f48cfStls if (lfunc & CPUID_VIA_HAS_RNG) { 560d15f48cfStls if (!(lfunc & CPUID_VIA_DO_RNG)) { 561d15f48cfStls rng_enable++; 562d86215c4Sjmcneill ci->ci_feat_val[4] |= CPUID_VIA_DO_RNG; 563d15f48cfStls } 564d15f48cfStls } 565d15f48cfStls /* Check for and enable ACE (AES-CBC) */ 566d15f48cfStls if (lfunc & CPUID_VIA_HAS_ACE) { 567d15f48cfStls if (!(lfunc & CPUID_VIA_DO_ACE)) { 568d15f48cfStls ace_enable++; 569bc042041Sjym ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE; 570d15f48cfStls } 571d15f48cfStls } 572d15f48cfStls /* Check for and enable SHA */ 573d15f48cfStls if (lfunc & CPUID_VIA_HAS_PHE) { 574d15f48cfStls if (!(lfunc & CPUID_VIA_DO_PHE)) { 575d15f48cfStls ace_enable++; 576bc042041Sjym ci->ci_feat_val[4] |= CPUID_VIA_DO_PHE; 577d15f48cfStls } 578d15f48cfStls } 579d15f48cfStls /* Check for and enable ACE2 (AES-CTR) */ 580d15f48cfStls if (lfunc & CPUID_VIA_HAS_ACE2) { 581d15f48cfStls if (!(lfunc & CPUID_VIA_DO_ACE2)) { 582d15f48cfStls ace_enable++; 583bc042041Sjym ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE2; 584d15f48cfStls } 585d15f48cfStls } 586d15f48cfStls /* Check for and enable PMM (modmult engine) */ 587d15f48cfStls if (lfunc & CPUID_VIA_HAS_PMM) { 588d15f48cfStls if (!(lfunc & CPUID_VIA_DO_PMM)) { 589d15f48cfStls ace_enable++; 590bc042041Sjym ci->ci_feat_val[4] |= CPUID_VIA_DO_PMM; 591d15f48cfStls } 592d15f48cfStls } 593d15f48cfStls 5949c8a51e7Stls /* 5959c8a51e7Stls * Actually do the enables. It's a little gross, 5969c8a51e7Stls * but per the PadLock programming guide, "Enabling 5979c8a51e7Stls * PadLock", condition 3, we must enable SSE too or 5989c8a51e7Stls * else the first use of RNG or ACE instructions 5999c8a51e7Stls * will generate a trap. 6009c8a51e7Stls * 6019c8a51e7Stls * We must do this early because of kernel RNG 6029c8a51e7Stls * initialization but it is safe without the full 6039c8a51e7Stls * FPU-detect as all these CPUs have SSE. 6049c8a51e7Stls */ 6059c8a51e7Stls lcr4(rcr4() | CR4_OSFXSR); 6069c8a51e7Stls 607d15f48cfStls if (rng_enable) { 608d15f48cfStls msr = rdmsr(MSR_VIA_RNG); 609e4dd39ceStls msr |= MSR_VIA_RNG_ENABLE; 610e4dd39ceStls /* C7 stepping 8 and subsequent CPUs have dual RNG */ 611e4dd39ceStls if (model > 0xA || (model == 0xA && stepping > 0x7)) { 612e4dd39ceStls msr |= MSR_VIA_RNG_2NOISE; 613d15f48cfStls } 614e4dd39ceStls wrmsr(MSR_VIA_RNG, msr); 615e4dd39ceStls } 616e4dd39ceStls 617d15f48cfStls if (ace_enable) { 618369a2d20Sandvar msr = rdmsr(MSR_VIA_FCR); 619369a2d20Sandvar wrmsr(MSR_VIA_FCR, msr | VIA_FCR_ACE_ENABLE); 62050d8ae9dSad } 62150d8ae9dSad } 62250d8ae9dSad } 62350d8ae9dSad 6243719ee42Sandvar /* Determine the largest extended function value. */ 6253719ee42Sandvar x86_cpuid(0x80000000, descs); 6263719ee42Sandvar lfunc = descs[0]; 6273719ee42Sandvar 62850d8ae9dSad /* 62950d8ae9dSad * Determine L1 cache/TLB info. 63050d8ae9dSad */ 63150d8ae9dSad if (lfunc < 0x80000005) { 63250d8ae9dSad /* No L1 cache info available. */ 63350d8ae9dSad return; 63450d8ae9dSad } 63550d8ae9dSad 63650d8ae9dSad x86_cpuid(0x80000005, descs); 63750d8ae9dSad 63850d8ae9dSad cai = &ci->ci_cinfo[CAI_ITLB]; 63950d8ae9dSad cai->cai_totalsize = VIA_L1_EBX_ITLB_ENTRIES(descs[1]); 64050d8ae9dSad cai->cai_associativity = VIA_L1_EBX_ITLB_ASSOC(descs[1]); 64150d8ae9dSad cai->cai_linesize = (4 * 1024); 64250d8ae9dSad 64350d8ae9dSad cai = &ci->ci_cinfo[CAI_DTLB]; 64450d8ae9dSad cai->cai_totalsize = VIA_L1_EBX_DTLB_ENTRIES(descs[1]); 64550d8ae9dSad cai->cai_associativity = VIA_L1_EBX_DTLB_ASSOC(descs[1]); 64650d8ae9dSad cai->cai_linesize = (4 * 1024); 64750d8ae9dSad 64850d8ae9dSad cai = &ci->ci_cinfo[CAI_DCACHE]; 64950d8ae9dSad cai->cai_totalsize = VIA_L1_ECX_DC_SIZE(descs[2]); 65050d8ae9dSad cai->cai_associativity = VIA_L1_ECX_DC_ASSOC(descs[2]); 65150d8ae9dSad cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[2]); 652d15f48cfStls if (family == 6 && model == 9 && stepping == 8) { 65350d8ae9dSad /* Erratum: stepping 8 reports 4 when it should be 2 */ 65450d8ae9dSad cai->cai_associativity = 2; 65550d8ae9dSad } 65650d8ae9dSad 65750d8ae9dSad cai = &ci->ci_cinfo[CAI_ICACHE]; 65850d8ae9dSad cai->cai_totalsize = VIA_L1_EDX_IC_SIZE(descs[3]); 65950d8ae9dSad cai->cai_associativity = VIA_L1_EDX_IC_ASSOC(descs[3]); 66050d8ae9dSad cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[3]); 661d15f48cfStls if (family == 6 && model == 9 && stepping == 8) { 66250d8ae9dSad /* Erratum: stepping 8 reports 4 when it should be 2 */ 66350d8ae9dSad cai->cai_associativity = 2; 66450d8ae9dSad } 66550d8ae9dSad 66650d8ae9dSad /* 66750d8ae9dSad * Determine L2 cache/TLB info. 66850d8ae9dSad */ 66950d8ae9dSad if (lfunc < 0x80000006) { 67050d8ae9dSad /* No L2 cache info available. */ 67150d8ae9dSad return; 67250d8ae9dSad } 67350d8ae9dSad 67450d8ae9dSad x86_cpuid(0x80000006, descs); 67550d8ae9dSad 67650d8ae9dSad cai = &ci->ci_cinfo[CAI_L2CACHE]; 677d15f48cfStls if (family > 6 || model >= 9) { 67850d8ae9dSad cai->cai_totalsize = VIA_L2N_ECX_C_SIZE(descs[2]); 67950d8ae9dSad cai->cai_associativity = VIA_L2N_ECX_C_ASSOC(descs[2]); 68050d8ae9dSad cai->cai_linesize = VIA_L2N_ECX_C_LS(descs[2]); 68150d8ae9dSad } else { 68250d8ae9dSad cai->cai_totalsize = VIA_L2_ECX_C_SIZE(descs[2]); 68350d8ae9dSad cai->cai_associativity = VIA_L2_ECX_C_ASSOC(descs[2]); 68450d8ae9dSad cai->cai_linesize = VIA_L2_ECX_C_LS(descs[2]); 68550d8ae9dSad } 68650d8ae9dSad } 68750d8ae9dSad 68850d8ae9dSad static void 68950d8ae9dSad cpu_probe_geode(struct cpu_info *ci) 69050d8ae9dSad { 69150d8ae9dSad 69250d8ae9dSad if (memcmp("Geode by NSC", ci->ci_vendor, 12) != 0 || 693b1a32cacSmsaitoh CPUID_TO_FAMILY(ci->ci_signature) != 5) 69450d8ae9dSad return; 69550d8ae9dSad 69650d8ae9dSad cpu_probe_cyrix_cmn(ci); 69750d8ae9dSad cpu_probe_amd_cache(ci); 69850d8ae9dSad } 69950d8ae9dSad 700e8e2c453Sbouyer static void 701e8e2c453Sbouyer cpu_probe_vortex86(struct cpu_info *ci) 702e8e2c453Sbouyer { 703e8e2c453Sbouyer #define PCI_MODE1_ADDRESS_REG 0x0cf8 704e8e2c453Sbouyer #define PCI_MODE1_DATA_REG 0x0cfc 705e8e2c453Sbouyer #define PCI_MODE1_ENABLE 0x80000000UL 706e8e2c453Sbouyer 707343f4126Schristos uint32_t reg, idx; 708e8e2c453Sbouyer 709e8e2c453Sbouyer if (cpu_vendor != CPUVENDOR_VORTEX86) 710e8e2c453Sbouyer return; 711e8e2c453Sbouyer /* 712e8e2c453Sbouyer * CPU model available from "Customer ID register" in 713e8e2c453Sbouyer * North Bridge Function 0 PCI space 714e8e2c453Sbouyer * we can't use pci_conf_read() because the PCI subsystem is not 715e8e2c453Sbouyer * not initialised early enough 716e8e2c453Sbouyer */ 717e8e2c453Sbouyer 718e8e2c453Sbouyer outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE | 0x90); 719e8e2c453Sbouyer reg = inl(PCI_MODE1_DATA_REG); 720e8e2c453Sbouyer 721343f4126Schristos if ((reg & 0xf0ffffff) != 0x30504d44) { 722343f4126Schristos idx = 0; 7230461c2a5Schristos } else { 724343f4126Schristos idx = (reg >> 24) & 0xf; 725e8e2c453Sbouyer } 726e8e2c453Sbouyer 7270461c2a5Schristos static const char *cpu_vortex86_flavor[] = { 728343f4126Schristos "??", "SX", "DX", "MX", "DX2", "MX+", "DX3", "EX", "EX2", 7290461c2a5Schristos }; 730e7f6bcb6Schristos idx = idx < __arraycount(cpu_vortex86_flavor) ? idx : 0; 7310461c2a5Schristos snprintf(cpu_brand_string, sizeof(cpu_brand_string), "Vortex86%s", 732343f4126Schristos cpu_vortex86_flavor[idx]); 7330461c2a5Schristos 734e8e2c453Sbouyer #undef PCI_MODE1_ENABLE 735e8e2c453Sbouyer #undef PCI_MODE1_ADDRESS_REG 736e8e2c453Sbouyer #undef PCI_MODE1_DATA_REG 737e8e2c453Sbouyer } 738e8e2c453Sbouyer 739f9cd6e68Sdsl static void 740d47b30fbSmaxv cpu_probe_fpu_old(struct cpu_info *ci) 741f9cd6e68Sdsl { 742427af037Scherry #if defined(__i386__) && !defined(XENPV) 743f9cd6e68Sdsl 744f9cd6e68Sdsl clts(); 745f9cd6e68Sdsl fninit(); 746f9cd6e68Sdsl 747f9cd6e68Sdsl /* Check for 'FDIV' bug on the original Pentium */ 748f9cd6e68Sdsl if (npx586bug1(4195835, 3145727) != 0) 749f9cd6e68Sdsl /* NB 120+MHz cpus are not affected */ 750f9cd6e68Sdsl i386_fpu_fdivbug = 1; 751f9cd6e68Sdsl 752f9cd6e68Sdsl stts(); 753f9cd6e68Sdsl #endif 754640b232aSmaya } 755f9cd6e68Sdsl 75638a99443Smaxv static void 757f9cd6e68Sdsl cpu_probe_fpu(struct cpu_info *ci) 758f9cd6e68Sdsl { 759f9cd6e68Sdsl u_int descs[4]; 76017a629e4Smgorny int i; 761f9cd6e68Sdsl 7628f45ca38Smaxv x86_fpu_save = FPU_SAVE_FSAVE; 7638f45ca38Smaxv 764337a599bSmaxv #ifdef i386 765f9cd6e68Sdsl /* If we have FXSAVE/FXRESTOR, use them. */ 766f9cd6e68Sdsl if ((ci->ci_feat_val[0] & CPUID_FXSR) == 0) { 767f9cd6e68Sdsl i386_use_fxsave = 0; 768d47b30fbSmaxv cpu_probe_fpu_old(ci); 769f9cd6e68Sdsl return; 770f9cd6e68Sdsl } 771f9cd6e68Sdsl 772f9cd6e68Sdsl i386_use_fxsave = 1; 773f9cd6e68Sdsl /* 774f9cd6e68Sdsl * If we have SSE/SSE2, enable XMM exceptions, and 775f9cd6e68Sdsl * notify userland. 776f9cd6e68Sdsl */ 777f9cd6e68Sdsl if (ci->ci_feat_val[0] & CPUID_SSE) 778f9cd6e68Sdsl i386_has_sse = 1; 779f9cd6e68Sdsl if (ci->ci_feat_val[0] & CPUID_SSE2) 780f9cd6e68Sdsl i386_has_sse2 = 1; 781f9cd6e68Sdsl #else 782f9cd6e68Sdsl /* 783f9cd6e68Sdsl * For amd64 i386_use_fxsave, i386_has_sse and i386_has_sse2 are 784337a599bSmaxv * #defined to 1, because fxsave/sse/sse2 are always present. 785f9cd6e68Sdsl */ 786337a599bSmaxv #endif 787f9cd6e68Sdsl 788f9cd6e68Sdsl x86_fpu_save = FPU_SAVE_FXSAVE; 789337a599bSmaxv x86_fpu_save_size = sizeof(struct fxsave); 790f9cd6e68Sdsl 791d47b30fbSmaxv /* See if XSAVE is supported */ 792f9cd6e68Sdsl if ((ci->ci_feat_val[1] & CPUID2_XSAVE) == 0) 793f9cd6e68Sdsl return; 794f9cd6e68Sdsl 795427af037Scherry #ifdef XENPV 796908b4d87Sjdolecek /* 797908b4d87Sjdolecek * Xen kernel can disable XSAVE via "no-xsave" option, in that case 798d47b30fbSmaxv * the XSAVE/XRSTOR instructions become privileged and trigger 799908b4d87Sjdolecek * supervisor trap. OSXSAVE flag seems to be reliably set according 800908b4d87Sjdolecek * to whether XSAVE is actually available. 801908b4d87Sjdolecek */ 802908b4d87Sjdolecek if ((ci->ci_feat_val[1] & CPUID2_OSXSAVE) == 0) 803908b4d87Sjdolecek return; 804908b4d87Sjdolecek #endif 805908b4d87Sjdolecek 806f9cd6e68Sdsl x86_fpu_save = FPU_SAVE_XSAVE; 807f9cd6e68Sdsl 808f9cd6e68Sdsl x86_cpuid2(0xd, 1, descs); 809bc8bd417Smaxv if (descs[0] & CPUID_PES1_XSAVEOPT) 810f9cd6e68Sdsl x86_fpu_save = FPU_SAVE_XSAVEOPT; 811f9cd6e68Sdsl 812f9cd6e68Sdsl /* Get features and maximum size of the save area */ 813f9cd6e68Sdsl x86_cpuid(0xd, descs); 814cdc48153Sriastradh if (descs[2] > sizeof(struct fxsave)) 815a1bc28e8Sdsl x86_fpu_save_size = descs[2]; 8163a8b7cadSmaxv 817f9cd6e68Sdsl x86_xsave_features = (uint64_t)descs[3] << 32 | descs[0]; 81817a629e4Smgorny 81917a629e4Smgorny /* Get component offsets and sizes for the save area */ 82017a629e4Smgorny for (i = XSAVE_YMM_Hi128; i < __arraycount(x86_xsave_offsets); i++) { 82117a629e4Smgorny if (x86_xsave_features & __BIT(i)) { 82217a629e4Smgorny x86_cpuid2(0xd, i, descs); 82317a629e4Smgorny x86_xsave_offsets[i] = descs[1]; 82417a629e4Smgorny x86_xsave_sizes[i] = descs[0]; 82517a629e4Smgorny } 82617a629e4Smgorny } 827f9cd6e68Sdsl } 828f9cd6e68Sdsl 82950d8ae9dSad void 83050d8ae9dSad cpu_probe(struct cpu_info *ci) 83150d8ae9dSad { 83250d8ae9dSad u_int descs[4]; 83370dd5b41Smsaitoh int i; 83450d8ae9dSad uint32_t miscbytes; 83550d8ae9dSad uint32_t brand[12]; 83650d8ae9dSad 837729e3e93Smaxv if (ci == &cpu_info_primary) { 8384eb0eda4Schs cpu_vendor = i386_nocpuid_cpus[cputype << 1]; 8394eb0eda4Schs cpu_class = i386_nocpuid_cpus[(cputype << 1) + 1]; 840729e3e93Smaxv } 84150d8ae9dSad 842f9cd6e68Sdsl if (cpuid_level < 0) { 843f9cd6e68Sdsl /* cpuid instruction not supported */ 844d47b30fbSmaxv cpu_probe_fpu_old(ci); 84550d8ae9dSad return; 846f9cd6e68Sdsl } 84750d8ae9dSad 848bc042041Sjym for (i = 0; i < __arraycount(ci->ci_feat_val); i++) { 849bc042041Sjym ci->ci_feat_val[i] = 0; 850bc042041Sjym } 851bc042041Sjym 85250d8ae9dSad x86_cpuid(0, descs); 85350d8ae9dSad cpuid_level = descs[0]; 854fc194a52Sdsl ci->ci_max_cpuid = descs[0]; 855fc194a52Sdsl 85650d8ae9dSad ci->ci_vendor[0] = descs[1]; 85750d8ae9dSad ci->ci_vendor[2] = descs[2]; 85850d8ae9dSad ci->ci_vendor[1] = descs[3]; 85950d8ae9dSad ci->ci_vendor[3] = 0; 86050d8ae9dSad 861729e3e93Smaxv if (ci == &cpu_info_primary) { 86250d8ae9dSad if (memcmp(ci->ci_vendor, "GenuineIntel", 12) == 0) 86350d8ae9dSad cpu_vendor = CPUVENDOR_INTEL; 86450d8ae9dSad else if (memcmp(ci->ci_vendor, "AuthenticAMD", 12) == 0) 86550d8ae9dSad cpu_vendor = CPUVENDOR_AMD; 86650d8ae9dSad else if (memcmp(ci->ci_vendor, "CyrixInstead", 12) == 0) 86750d8ae9dSad cpu_vendor = CPUVENDOR_CYRIX; 86850d8ae9dSad else if (memcmp(ci->ci_vendor, "Geode by NSC", 12) == 0) 86950d8ae9dSad cpu_vendor = CPUVENDOR_CYRIX; 87050d8ae9dSad else if (memcmp(ci->ci_vendor, "CentaurHauls", 12) == 0) 87150d8ae9dSad cpu_vendor = CPUVENDOR_IDT; 87250d8ae9dSad else if (memcmp(ci->ci_vendor, "GenuineTMx86", 12) == 0) 87350d8ae9dSad cpu_vendor = CPUVENDOR_TRANSMETA; 874e8e2c453Sbouyer else if (memcmp(ci->ci_vendor, "Vortex86 SoC", 12) == 0) 875e8e2c453Sbouyer cpu_vendor = CPUVENDOR_VORTEX86; 87650d8ae9dSad else 87750d8ae9dSad cpu_vendor = CPUVENDOR_UNKNOWN; 878729e3e93Smaxv } 87950d8ae9dSad 88050d8ae9dSad if (cpuid_level >= 1) { 88150d8ae9dSad x86_cpuid(1, descs); 88250d8ae9dSad ci->ci_signature = descs[0]; 88350d8ae9dSad miscbytes = descs[1]; 884bc042041Sjym ci->ci_feat_val[1] = descs[2]; 885bc042041Sjym ci->ci_feat_val[0] = descs[3]; 88650d8ae9dSad 887729e3e93Smaxv if (ci == &cpu_info_primary) { 88850d8ae9dSad /* Determine family + class. */ 889b1a32cacSmsaitoh cpu_class = CPUID_TO_FAMILY(ci->ci_signature) 890b1a32cacSmsaitoh + (CPUCLASS_386 - 3); 89150d8ae9dSad if (cpu_class > CPUCLASS_686) 89250d8ae9dSad cpu_class = CPUCLASS_686; 893729e3e93Smaxv } 89450d8ae9dSad 89550d8ae9dSad /* CLFLUSH line size is next 8 bits */ 89660236c8cSmaxv if (ci->ci_feat_val[0] & CPUID_CLFSH) 897ed892e9aSmsaitoh ci->ci_cflush_lsize 898d536bc68Smsaitoh = __SHIFTOUT(miscbytes, CPUID_CLFLUSH_SIZE) << 3; 899ed892e9aSmsaitoh ci->ci_initapicid = __SHIFTOUT(miscbytes, CPUID_LOCAL_APIC_ID); 90050d8ae9dSad } 90150d8ae9dSad 902fc194a52Sdsl /* 903fc194a52Sdsl * Get the basic information from the extended cpuid leafs. 904fc194a52Sdsl * These were first implemented by amd, but most of the values 905fc194a52Sdsl * match with those generated by modern intel cpus. 906fc194a52Sdsl */ 907fc194a52Sdsl x86_cpuid(0x80000000, descs); 908c8b478cdSdsl if (descs[0] >= 0x80000000) 909fc194a52Sdsl ci->ci_max_ext_cpuid = descs[0]; 910fc194a52Sdsl else 911fc194a52Sdsl ci->ci_max_ext_cpuid = 0; 912fc194a52Sdsl 913fc194a52Sdsl if (ci->ci_max_ext_cpuid >= 0x80000001) { 914fc194a52Sdsl /* Determine the extended feature flags. */ 915fc194a52Sdsl x86_cpuid(0x80000001, descs); 916fc194a52Sdsl ci->ci_feat_val[3] = descs[2]; /* %ecx */ 917fc194a52Sdsl ci->ci_feat_val[2] = descs[3]; /* %edx */ 918fc194a52Sdsl } 919fc194a52Sdsl 920fc194a52Sdsl if (ci->ci_max_ext_cpuid >= 0x80000004) { 921fc194a52Sdsl x86_cpuid(0x80000002, brand); 922fc194a52Sdsl x86_cpuid(0x80000003, brand + 4); 923fc194a52Sdsl x86_cpuid(0x80000004, brand + 8); 924fc194a52Sdsl /* Skip leading spaces on brand */ 925fc194a52Sdsl for (i = 0; i < 48; i++) { 926fc194a52Sdsl if (((char *) brand)[i] != ' ') 927fc194a52Sdsl break; 928fc194a52Sdsl } 929fc194a52Sdsl memcpy(cpu_brand_string, ((char *) brand) + i, 48 - i); 930fc194a52Sdsl } 931fc194a52Sdsl 9321848fa42Smaxv /* 9331848fa42Smaxv * Get the structured extended features. 9341848fa42Smaxv */ 9351848fa42Smaxv if (cpuid_level >= 7) { 9361848fa42Smaxv x86_cpuid(7, descs); 9371848fa42Smaxv ci->ci_feat_val[5] = descs[1]; /* %ebx */ 9381848fa42Smaxv ci->ci_feat_val[6] = descs[2]; /* %ecx */ 93914852905Smaxv ci->ci_feat_val[7] = descs[3]; /* %edx */ 9401848fa42Smaxv } 9411848fa42Smaxv 94270dd5b41Smsaitoh cpu_probe_intel(ci); 9434729fe3dSmaxv cpu_probe_amd(ci); 94450d8ae9dSad cpu_probe_cyrix(ci); 94550d8ae9dSad cpu_probe_winchip(ci); 94650d8ae9dSad cpu_probe_c3(ci); 94750d8ae9dSad cpu_probe_geode(ci); 948e8e2c453Sbouyer cpu_probe_vortex86(ci); 94950d8ae9dSad 950d47b30fbSmaxv if (ci == &cpu_info_primary) { 951f9cd6e68Sdsl cpu_probe_fpu(ci); 952d47b30fbSmaxv } 953f9cd6e68Sdsl 954c24c993fSbouyer #ifndef XENPV 955b6c25885Srmind x86_cpu_topology(ci); 956c29d31acSad #endif 957cc8d7ff4Srmind 958bc042041Sjym if (cpu_vendor != CPUVENDOR_AMD && (ci->ci_feat_val[0] & CPUID_TM) && 95950d8ae9dSad (rdmsr(MSR_MISC_ENABLE) & (1 << 3)) == 0) { 96050d8ae9dSad /* Enable thermal monitor 1. */ 96150d8ae9dSad wrmsr(MSR_MISC_ENABLE, rdmsr(MSR_MISC_ENABLE) | (1<<3)); 96250d8ae9dSad } 96350d8ae9dSad 964a02fcbbfSchs ci->ci_feat_val[0] &= ~CPUID_FEAT_BLACKLIST; 965f0fb5193Sjym if (ci == &cpu_info_primary) { 966bc042041Sjym /* If first. Boot Processor is the cpu_feature reference. */ 967bc042041Sjym for (i = 0; i < __arraycount(cpu_feature); i++) { 968bc042041Sjym cpu_feature[i] = ci->ci_feat_val[i]; 969bc042041Sjym } 970b38f39aeSnonaka identify_hypervisor(); 971c24c993fSbouyer #ifndef XENPV 972bc042041Sjym /* Early patch of text segment. */ 97330fd42e8Sad x86_patch(true); 97443c86f86Scegger #endif 975ba0c8ad5Sriastradh 976ba0c8ad5Sriastradh /* AES */ 97799325bb8Sriastradh #ifdef __x86_64__ /* not yet implemented on i386 */ 97860236c8cSmaxv if (cpu_feature[1] & CPUID2_AESNI) 97999325bb8Sriastradh aes_md_init(&aes_ni_impl); 9807ff94d7aSriastradh else 98199325bb8Sriastradh #endif 982bbc1ed88Sriastradh if (cpu_feature[4] & CPUID_VIA_HAS_ACE) 9837ff94d7aSriastradh aes_md_init(&aes_via_impl); 984bbc1ed88Sriastradh else if (i386_has_sse && i386_has_sse2 && 985c0579016Sriastradh (cpu_feature[1] & CPUID2_SSE3) && 986c0579016Sriastradh (cpu_feature[1] & CPUID2_SSSE3)) 987c0579016Sriastradh aes_md_init(&aes_ssse3_impl); 988bbc1ed88Sriastradh else if (i386_has_sse && i386_has_sse2) 989336b5650Sriastradh aes_md_init(&aes_sse2_impl); 990ba0c8ad5Sriastradh 991ba0c8ad5Sriastradh /* ChaCha */ 992ba0c8ad5Sriastradh if (i386_has_sse && i386_has_sse2) 993ba0c8ad5Sriastradh chacha_md_init(&chacha_sse2_impl); 99450d8ae9dSad } else { 995bc042041Sjym /* 996bc042041Sjym * If not first. Warn about cpu_feature mismatch for 997bc042041Sjym * secondary CPUs. 998bc042041Sjym */ 999bc042041Sjym for (i = 0; i < __arraycount(cpu_feature); i++) { 1000bc042041Sjym if (cpu_feature[i] != ci->ci_feat_val[i]) 1001bc042041Sjym aprint_error_dev(ci->ci_dev, 1002bc042041Sjym "feature mismatch: cpu_feature[%d] is " 1003bc042041Sjym "%#x, but CPU reported %#x\n", 1004bc042041Sjym i, cpu_feature[i], ci->ci_feat_val[i]); 1005bc042041Sjym } 100650d8ae9dSad } 100750d8ae9dSad } 100850d8ae9dSad 1009f9cd6e68Sdsl /* Write what we know about the cpu to the console... */ 101050d8ae9dSad void 101150d8ae9dSad cpu_identify(struct cpu_info *ci) 101250d8ae9dSad { 101350d8ae9dSad 1014b2543b91Schristos cpu_setmodel("%s %d86-class", 101550d8ae9dSad cpu_vendor_names[cpu_vendor], cpu_class + 3); 101623db5830Sjmcneill if (cpu_brand_string[0] != '\0') { 10172fb8c497Smsaitoh aprint_normal_dev(ci->ci_dev, "%s", cpu_brand_string); 101823db5830Sjmcneill } else { 10192fb8c497Smsaitoh aprint_normal_dev(ci->ci_dev, "%s", cpu_getmodel()); 102050d8ae9dSad if (ci->ci_data.cpu_cc_freq != 0) 102123db5830Sjmcneill aprint_normal(", %dMHz", 102223db5830Sjmcneill (int)(ci->ci_data.cpu_cc_freq / 1000000)); 102323db5830Sjmcneill } 102450d8ae9dSad if (ci->ci_signature != 0) 102550d8ae9dSad aprint_normal(", id 0x%x", ci->ci_signature); 102650d8ae9dSad aprint_normal("\n"); 1027090e8e0aSad aprint_normal_dev(ci->ci_dev, "node %u, package %u, core %u, smt %u\n", 1028090e8e0aSad ci->ci_numa_id, ci->ci_package_id, ci->ci_core_id, ci->ci_smt_id); 102950d8ae9dSad if (cpu_brand_string[0] == '\0') { 1030b2543b91Schristos strlcpy(cpu_brand_string, cpu_getmodel(), 1031b2543b91Schristos sizeof(cpu_brand_string)); 103250d8ae9dSad } 103350d8ae9dSad if (cpu_class == CPUCLASS_386) { 103450d8ae9dSad panic("NetBSD requires an 80486DX or later processor"); 103550d8ae9dSad } 10364eb0eda4Schs if (cputype == CPU_486DLC) { 103750d8ae9dSad aprint_error("WARNING: BUGGY CYRIX CACHE\n"); 103850d8ae9dSad } 103950d8ae9dSad 1040427af037Scherry #if !defined(XENPV) || defined(DOM0OPS) /* on Xen PV rdmsr is for Dom0 only */ 1041f9cd6e68Sdsl if (cpu_vendor == CPUVENDOR_AMD /* check enablement of an */ 1042f9cd6e68Sdsl && device_unit(ci->ci_dev) == 0 /* AMD feature only once */ 1043f9cd6e68Sdsl && ((cpu_feature[3] & CPUID_SVM) == CPUID_SVM)) { 1044fa517365Scegger uint64_t val; 1045fa517365Scegger 1046fa517365Scegger val = rdmsr(MSR_VMCR); 1047fa517365Scegger if (((val & VMCR_SVMED) == VMCR_SVMED) 1048f9cd6e68Sdsl && ((val & VMCR_LOCK) == VMCR_LOCK)) { 1049fa517365Scegger aprint_normal_dev(ci->ci_dev, 1050fa517365Scegger "SVM disabled by the BIOS\n"); 1051fa517365Scegger } 1052fa517365Scegger } 1053f9cd6e68Sdsl #endif 1054fa517365Scegger 1055f9cd6e68Sdsl #ifdef i386 1056f9cd6e68Sdsl if (i386_fpu_fdivbug == 1) 1057f9cd6e68Sdsl aprint_normal_dev(ci->ci_dev, 1058f9cd6e68Sdsl "WARNING: Pentium FDIV bug detected!\n"); 1059f9cd6e68Sdsl 106050d8ae9dSad if (cpu_vendor == CPUVENDOR_TRANSMETA) { 106150d8ae9dSad u_int descs[4]; 106250d8ae9dSad x86_cpuid(0x80860000, descs); 106350d8ae9dSad if (descs[0] >= 0x80860007) 1064f9cd6e68Sdsl /* Create longrun sysctls */ 106550d8ae9dSad tmx86_init_longrun(); 106650d8ae9dSad } 106750d8ae9dSad #endif /* i386 */ 1068f9cd6e68Sdsl 106950d8ae9dSad } 1070b38f39aeSnonaka 1071b38f39aeSnonaka /* 1072b38f39aeSnonaka * Hypervisor 1073b38f39aeSnonaka */ 1074b38f39aeSnonaka vm_guest_t vm_guest = VM_GUEST_NO; 1075b38f39aeSnonaka 107608546061Schristos struct vm_name_guest { 107708546061Schristos const char *name; 107808546061Schristos vm_guest_t guest; 1079b38f39aeSnonaka }; 1080b38f39aeSnonaka 108108546061Schristos static const struct vm_name_guest vm_bios_vendors[] = { 108208546061Schristos { "QEMU", VM_GUEST_VM }, /* QEMU */ 108308546061Schristos { "Plex86", VM_GUEST_VM }, /* Plex86 */ 108408546061Schristos { "Bochs", VM_GUEST_VM }, /* Bochs */ 108508546061Schristos { "Xen", VM_GUEST_VM }, /* Xen */ 108608546061Schristos { "BHYVE", VM_GUEST_VM }, /* bhyve */ 108708546061Schristos { "Seabios", VM_GUEST_VM }, /* KVM */ 108808546061Schristos { "innotek GmbH", VM_GUEST_VIRTUALBOX }, /* Oracle VirtualBox */ 108989753a73Sbouyer { "Generic PVH", VM_GUEST_GENPVH}, /* Generic PVH */ 109008546061Schristos }; 109108546061Schristos 109208546061Schristos static const struct vm_name_guest vm_system_products[] = { 109308546061Schristos { "VMware Virtual Platform", VM_GUEST_VM }, /* VMWare VM */ 109408546061Schristos { "Virtual Machine", VM_GUEST_VM }, /* Microsoft VirtualPC */ 109508546061Schristos { "VirtualBox", VM_GUEST_VIRTUALBOX }, /* Sun xVM VirtualBox */ 109608546061Schristos { "Parallels Virtual Platform", VM_GUEST_VM }, /* Parallels VM */ 109708546061Schristos { "KVM", VM_GUEST_VM }, /* KVM */ 1098b38f39aeSnonaka }; 1099b38f39aeSnonaka 1100b38f39aeSnonaka void 1101b38f39aeSnonaka identify_hypervisor(void) 1102b38f39aeSnonaka { 1103b38f39aeSnonaka u_int regs[6]; 1104b38f39aeSnonaka char hv_vendor[12]; 1105b38f39aeSnonaka const char *p; 1106b38f39aeSnonaka int i; 1107b38f39aeSnonaka 1108adaf54daSbouyer switch (vm_guest) { 1109adaf54daSbouyer case VM_GUEST_XENPV: 1110adaf54daSbouyer case VM_GUEST_XENPVH: 111189753a73Sbouyer case VM_GUEST_GENPVH: 1112adaf54daSbouyer /* guest type already known, no bios info */ 1113b38f39aeSnonaka return; 1114adaf54daSbouyer default: 1115adaf54daSbouyer break; 1116adaf54daSbouyer } 1117b38f39aeSnonaka 1118b38f39aeSnonaka /* 1119b38f39aeSnonaka * [RFC] CPUID usage for interaction between Hypervisors and Linux. 1120b38f39aeSnonaka * http://lkml.org/lkml/2008/10/1/246 1121b38f39aeSnonaka * 1122b38f39aeSnonaka * KB1009458: Mechanisms to determine if software is running in 1123b38f39aeSnonaka * a VMware virtual machine 1124b38f39aeSnonaka * http://kb.vmware.com/kb/1009458 1125b38f39aeSnonaka */ 1126b38f39aeSnonaka if (ISSET(cpu_feature[1], CPUID2_RAZ)) { 1127b38f39aeSnonaka vm_guest = VM_GUEST_VM; 1128b38f39aeSnonaka x86_cpuid(0x40000000, regs); 1129b38f39aeSnonaka if (regs[0] >= 0x40000000) { 1130b38f39aeSnonaka memcpy(&hv_vendor[0], ®s[1], sizeof(*regs)); 1131b38f39aeSnonaka memcpy(&hv_vendor[4], ®s[2], sizeof(*regs)); 1132b38f39aeSnonaka memcpy(&hv_vendor[8], ®s[3], sizeof(*regs)); 1133b38f39aeSnonaka if (memcmp(hv_vendor, "VMwareVMware", 12) == 0) 1134b38f39aeSnonaka vm_guest = VM_GUEST_VMWARE; 113513deebddSnonaka else if (memcmp(hv_vendor, "Microsoft Hv", 12) == 0) { 1136b38f39aeSnonaka vm_guest = VM_GUEST_HV; 113713deebddSnonaka #if NHYPERV > 0 113813deebddSnonaka hyperv_early_init(); 113913deebddSnonaka #endif 114013deebddSnonaka } else if (memcmp(hv_vendor, "KVMKVMKVM\0\0\0", 12) == 0) 1141b38f39aeSnonaka vm_guest = VM_GUEST_KVM; 1142728ee35fScherry else if (memcmp(hv_vendor, "XenVMMXenVMM", 12) == 0) 1143c24c993fSbouyer vm_guest = VM_GUEST_XENHVM; 1144b38f39aeSnonaka /* FreeBSD bhyve: "bhyve bhyve " */ 1145b38f39aeSnonaka /* OpenBSD vmm: "OpenBSDVMM58" */ 11463426341fSmaxv /* NetBSD nvmm: "___ NVMM ___" */ 1147b38f39aeSnonaka } 114808546061Schristos // VirtualBox returns KVM, so keep going. 114908546061Schristos if (vm_guest != VM_GUEST_KVM) 1150b38f39aeSnonaka return; 1151b38f39aeSnonaka } 1152b38f39aeSnonaka 1153b38f39aeSnonaka /* 1154b38f39aeSnonaka * Examine SMBIOS strings for older hypervisors. 1155b38f39aeSnonaka */ 1156b38f39aeSnonaka p = pmf_get_platform("system-serial"); 1157b38f39aeSnonaka if (p != NULL) { 1158b38f39aeSnonaka if (strncmp(p, "VMware-", 7) == 0 || strncmp(p, "VMW", 3) == 0) { 1159b38f39aeSnonaka vmt_hvcall(VM_CMD_GET_VERSION, regs); 1160b38f39aeSnonaka if (regs[1] == VM_MAGIC) { 1161b38f39aeSnonaka vm_guest = VM_GUEST_VMWARE; 1162b38f39aeSnonaka return; 1163b38f39aeSnonaka } 1164b38f39aeSnonaka } 1165b38f39aeSnonaka } 1166b38f39aeSnonaka p = pmf_get_platform("bios-vendor"); 1167b38f39aeSnonaka if (p != NULL) { 1168b38f39aeSnonaka for (i = 0; i < __arraycount(vm_bios_vendors); i++) { 116908546061Schristos if (strcmp(p, vm_bios_vendors[i].name) == 0) { 117008546061Schristos vm_guest = vm_bios_vendors[i].guest; 1171b38f39aeSnonaka return; 1172b38f39aeSnonaka } 1173b38f39aeSnonaka } 1174b38f39aeSnonaka } 1175b38f39aeSnonaka p = pmf_get_platform("system-product"); 1176b38f39aeSnonaka if (p != NULL) { 1177b38f39aeSnonaka for (i = 0; i < __arraycount(vm_system_products); i++) { 117808546061Schristos if (strcmp(p, vm_system_products[i].name) == 0) { 117908546061Schristos vm_guest = vm_system_products[i].guest; 1180b38f39aeSnonaka return; 1181b38f39aeSnonaka } 1182b38f39aeSnonaka } 1183b38f39aeSnonaka } 1184b38f39aeSnonaka } 1185