xref: /netbsd-src/sys/arch/x86/x86/identcpu.c (revision 53b02e147d4ed531c0d2a5ca9b3e8026ba3e99b5)
1 /*	$NetBSD: identcpu.c,v 1.123 2021/10/07 13:04:18 msaitoh Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Frank van der Linden,  and by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: identcpu.c,v 1.123 2021/10/07 13:04:18 msaitoh Exp $");
34 
35 #include "opt_xen.h"
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/device.h>
40 #include <sys/cpu.h>
41 
42 #include <crypto/aes/aes_impl.h>
43 #include <crypto/aes/arch/x86/aes_ni.h>
44 #include <crypto/aes/arch/x86/aes_sse2.h>
45 #include <crypto/aes/arch/x86/aes_ssse3.h>
46 #include <crypto/aes/arch/x86/aes_via.h>
47 #include <crypto/chacha/chacha_impl.h>
48 #include <crypto/chacha/arch/x86/chacha_sse2.h>
49 
50 #include <uvm/uvm_extern.h>
51 
52 #include <machine/specialreg.h>
53 #include <machine/pio.h>
54 #include <machine/cpu.h>
55 
56 #include <x86/cputypes.h>
57 #include <x86/cacheinfo.h>
58 #include <x86/cpuvar.h>
59 #include <x86/fpu.h>
60 
61 #include <dev/vmt/vmtreg.h>	/* for vmt_hvcall() */
62 #include <dev/vmt/vmtvar.h>	/* for vmt_hvcall() */
63 
64 #ifndef XENPV
65 #include "hyperv.h"
66 #if NHYPERV > 0
67 #include <x86/x86/hypervvar.h>
68 #endif
69 #endif
70 
71 static const struct x86_cache_info intel_cpuid_cache_info[] = INTEL_CACHE_INFO;
72 
73 static const struct x86_cache_info amd_cpuid_l2l3cache_assoc_info[] =
74 	AMD_L2L3CACHE_INFO;
75 
76 int cpu_vendor;
77 char cpu_brand_string[49];
78 
79 int x86_fpu_save __read_mostly;
80 unsigned int x86_fpu_save_size __read_mostly = sizeof(struct save87);
81 uint64_t x86_xsave_features __read_mostly = 0;
82 size_t x86_xsave_offsets[XSAVE_MAX_COMPONENT+1] __read_mostly;
83 size_t x86_xsave_sizes[XSAVE_MAX_COMPONENT+1] __read_mostly;
84 
85 /*
86  * Note: these are just the ones that may not have a cpuid instruction.
87  * We deal with the rest in a different way.
88  */
89 const int i386_nocpuid_cpus[] = {
90 	CPUVENDOR_INTEL, CPUCLASS_386,	/* CPU_386SX */
91 	CPUVENDOR_INTEL, CPUCLASS_386,	/* CPU_386   */
92 	CPUVENDOR_INTEL, CPUCLASS_486,	/* CPU_486SX */
93 	CPUVENDOR_INTEL, CPUCLASS_486,	/* CPU_486   */
94 	CPUVENDOR_CYRIX, CPUCLASS_486,	/* CPU_486DLC */
95 	CPUVENDOR_CYRIX, CPUCLASS_486,	/* CPU_6x86 */
96 	CPUVENDOR_NEXGEN, CPUCLASS_386,	/* CPU_NX586 */
97 };
98 
99 static const char cpu_vendor_names[][10] = {
100 	"Unknown", "Intel", "NS/Cyrix", "NexGen", "AMD", "IDT/VIA", "Transmeta",
101 	"Vortex86"
102 };
103 
104 static void
105 cpu_probe_intel_cache(struct cpu_info *ci)
106 {
107 	const struct x86_cache_info *cai;
108 	u_int descs[4];
109 	int iterations, i, j;
110 	uint8_t desc;
111 
112 	if (cpuid_level >= 2) {
113 		/* Parse the cache info from `cpuid leaf 2', if we have it. */
114 		x86_cpuid(2, descs);
115 		iterations = descs[0] & 0xff;
116 		while (iterations-- > 0) {
117 			for (i = 0; i < 4; i++) {
118 				if (descs[i] & 0x80000000)
119 					continue;
120 				for (j = 0; j < 4; j++) {
121 					if (i == 0 && j == 0)
122 						continue;
123 					desc = (descs[i] >> (j * 8)) & 0xff;
124 					if (desc == 0)
125 						continue;
126 					cai = cpu_cacheinfo_lookup(
127 					    intel_cpuid_cache_info, desc);
128 					if (cai != NULL) {
129 						ci->ci_cinfo[cai->cai_index] =
130 						    *cai;
131 					}
132 				}
133 			}
134 		}
135 	}
136 
137 	if (cpuid_level < 4)
138 		return;
139 
140 	/* Parse the cache info from `cpuid leaf 4', if we have it. */
141 	cpu_dcp_cacheinfo(ci, 4);
142 }
143 
144 static void
145 cpu_probe_intel_errata(struct cpu_info *ci)
146 {
147 	u_int family, model, stepping;
148 
149 	family = CPUID_TO_FAMILY(ci->ci_signature);
150 	model = CPUID_TO_MODEL(ci->ci_signature);
151 	stepping = CPUID_TO_STEPPING(ci->ci_signature);
152 
153 	if (family == 0x6 && model == 0x5C && stepping == 0x9) { /* Apollo Lake */
154 		wrmsr(MSR_MISC_ENABLE,
155 		    rdmsr(MSR_MISC_ENABLE) & ~IA32_MISC_MWAIT_EN);
156 
157 		cpu_feature[1] &= ~CPUID2_MONITOR;
158 		ci->ci_feat_val[1] &= ~CPUID2_MONITOR;
159 	}
160 }
161 
162 static void
163 cpu_probe_intel(struct cpu_info *ci)
164 {
165 
166 	if (cpu_vendor != CPUVENDOR_INTEL)
167 		return;
168 
169 	cpu_probe_intel_cache(ci);
170 	cpu_probe_intel_errata(ci);
171 }
172 
173 static void
174 cpu_probe_amd_cache(struct cpu_info *ci)
175 {
176 	const struct x86_cache_info *cp;
177 	struct x86_cache_info *cai;
178 	int family, model;
179 	u_int descs[4];
180 	u_int lfunc;
181 
182 	family = CPUID_TO_FAMILY(ci->ci_signature);
183 	model = CPUID_TO_MODEL(ci->ci_signature);
184 
185 	/* K5 model 0 has none of this info. */
186 	if (family == 5 && model == 0)
187 		return;
188 
189 	/* Determine the largest extended function value. */
190 	x86_cpuid(0x80000000, descs);
191 	lfunc = descs[0];
192 
193 	if (lfunc < 0x80000005)
194 		return;
195 
196 	/* Determine L1 cache/TLB info. */
197 	x86_cpuid(0x80000005, descs);
198 
199 	/* K6-III and higher have large page TLBs. */
200 	if ((family == 5 && model >= 9) || family >= 6) {
201 		cai = &ci->ci_cinfo[CAI_ITLB2];
202 		cai->cai_totalsize = AMD_L1_EAX_ITLB_ENTRIES(descs[0]);
203 		cai->cai_associativity = AMD_L1_EAX_ITLB_ASSOC(descs[0]);
204 		cai->cai_linesize = (4 * 1024 * 1024);
205 
206 		cai = &ci->ci_cinfo[CAI_DTLB2];
207 		cai->cai_totalsize = AMD_L1_EAX_DTLB_ENTRIES(descs[0]);
208 		cai->cai_associativity = AMD_L1_EAX_DTLB_ASSOC(descs[0]);
209 		cai->cai_linesize = (4 * 1024 * 1024);
210 	}
211 
212 	cai = &ci->ci_cinfo[CAI_ITLB];
213 	cai->cai_totalsize = AMD_L1_EBX_ITLB_ENTRIES(descs[1]);
214 	cai->cai_associativity = AMD_L1_EBX_ITLB_ASSOC(descs[1]);
215 	cai->cai_linesize = (4 * 1024);
216 
217 	cai = &ci->ci_cinfo[CAI_DTLB];
218 	cai->cai_totalsize = AMD_L1_EBX_DTLB_ENTRIES(descs[1]);
219 	cai->cai_associativity = AMD_L1_EBX_DTLB_ASSOC(descs[1]);
220 	cai->cai_linesize = (4 * 1024);
221 
222 	cai = &ci->ci_cinfo[CAI_DCACHE];
223 	cai->cai_totalsize = AMD_L1_ECX_DC_SIZE(descs[2]);
224 	cai->cai_associativity = AMD_L1_ECX_DC_ASSOC(descs[2]);
225 	cai->cai_linesize = AMD_L1_ECX_DC_LS(descs[2]);
226 
227 	cai = &ci->ci_cinfo[CAI_ICACHE];
228 	cai->cai_totalsize = AMD_L1_EDX_IC_SIZE(descs[3]);
229 	cai->cai_associativity = AMD_L1_EDX_IC_ASSOC(descs[3]);
230 	cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[3]);
231 
232 	if (lfunc < 0x80000006)
233 		return;
234 
235 	/* Determine L2 cache/TLB info. */
236 	x86_cpuid(0x80000006, descs);
237 
238 	cai = &ci->ci_cinfo[CAI_L2CACHE];
239 	cai->cai_totalsize = AMD_L2_ECX_C_SIZE(descs[2]);
240 	cai->cai_associativity = AMD_L2_ECX_C_ASSOC(descs[2]);
241 	cai->cai_linesize = AMD_L2_ECX_C_LS(descs[2]);
242 
243 	cp = cpu_cacheinfo_lookup(amd_cpuid_l2l3cache_assoc_info,
244 	    cai->cai_associativity);
245 	if (cp != NULL)
246 		cai->cai_associativity = cp->cai_associativity;
247 	else
248 		cai->cai_associativity = 0;	/* XXX Unknown/reserved */
249 
250 	if (family < 0xf)
251 		return;
252 
253 	/* Determine L3 cache info on AMD Family 10h and newer processors */
254 	cai = &ci->ci_cinfo[CAI_L3CACHE];
255 	cai->cai_totalsize = AMD_L3_EDX_C_SIZE(descs[3]);
256 	cai->cai_associativity = AMD_L3_EDX_C_ASSOC(descs[3]);
257 	cai->cai_linesize = AMD_L3_EDX_C_LS(descs[3]);
258 
259 	cp = cpu_cacheinfo_lookup(amd_cpuid_l2l3cache_assoc_info,
260 	    cai->cai_associativity);
261 	if (cp != NULL)
262 		cai->cai_associativity = cp->cai_associativity;
263 	else
264 		cai->cai_associativity = 0;	/* XXX Unknown reserved */
265 
266 	if (lfunc < 0x80000019)
267 		return;
268 
269 	/* Determine 1GB TLB info. */
270 	x86_cpuid(0x80000019, descs);
271 
272 	cai = &ci->ci_cinfo[CAI_L1_1GBDTLB];
273 	cai->cai_totalsize = AMD_L1_1GB_EAX_DTLB_ENTRIES(descs[1]);
274 	cai->cai_associativity = AMD_L1_1GB_EAX_DTLB_ASSOC(descs[1]);
275 	cai->cai_linesize = (1 * 1024);
276 
277 	cai = &ci->ci_cinfo[CAI_L1_1GBITLB];
278 	cai->cai_totalsize = AMD_L1_1GB_EAX_IUTLB_ENTRIES(descs[0]);
279 	cai->cai_associativity = AMD_L1_1GB_EAX_IUTLB_ASSOC(descs[0]);
280 	cai->cai_linesize = (1 * 1024);
281 
282 	cai = &ci->ci_cinfo[CAI_L2_1GBDTLB];
283 	cai->cai_totalsize = AMD_L2_1GB_EBX_DUTLB_ENTRIES(descs[1]);
284 	cai->cai_associativity = AMD_L2_1GB_EBX_DUTLB_ASSOC(descs[1]);
285 	cai->cai_linesize = (1 * 1024);
286 
287 	cai = &ci->ci_cinfo[CAI_L2_1GBITLB];
288 	cai->cai_totalsize = AMD_L2_1GB_EBX_IUTLB_ENTRIES(descs[0]);
289 	cai->cai_associativity = AMD_L2_1GB_EBX_IUTLB_ASSOC(descs[0]);
290 	cai->cai_linesize = (1 * 1024);
291 
292 	if (lfunc < 0x8000001d)
293 		return;
294 
295 	if (ci->ci_feat_val[3] & CPUID_TOPOEXT)
296 		cpu_dcp_cacheinfo(ci, 0x8000001d);
297 }
298 
299 static void
300 cpu_probe_amd_errata(struct cpu_info *ci)
301 {
302 	u_int model;
303 	uint64_t val;
304 	int flag;
305 
306 	model = CPUID_TO_MODEL(ci->ci_signature);
307 
308 	switch (CPUID_TO_FAMILY(ci->ci_signature)) {
309 	case 0x05: /* K5 */
310 		if (model == 0) {
311 			/*
312 			 * According to the AMD Processor Recognition App Note,
313 			 * the AMD-K5 Model 0 uses the wrong bit to indicate
314 			 * support for global PTEs, instead using bit 9 (APIC)
315 			 * rather than bit 13 (i.e. "0x200" vs. 0x2000").
316 			 */
317 			flag = ci->ci_feat_val[0];
318 			if ((flag & CPUID_APIC) != 0)
319 				flag = (flag & ~CPUID_APIC) | CPUID_PGE;
320 			ci->ci_feat_val[0] = flag;
321 		}
322 		break;
323 
324 	case 0x10: /* Family 10h */
325 		/*
326 		 * On Family 10h, certain BIOSes do not enable WC+ support.
327 		 * This causes WC+ to become CD, and degrades guest
328 		 * performance at the NPT level.
329 		 *
330 		 * Explicitly enable WC+ if we're not a guest.
331 		 */
332 		if (!ISSET(ci->ci_feat_val[1], CPUID2_RAZ)) {
333 			val = rdmsr(MSR_BU_CFG2);
334 			val &= ~BU_CFG2_CWPLUS_DIS;
335 			wrmsr(MSR_BU_CFG2, val);
336 		}
337 		break;
338 
339 	case 0x17:
340 		/*
341 		 * "Revision Guide for AMD Family 17h Models 00h-0Fh
342 		 * Processors" revision 1.12:
343 		 *
344 		 * 1057 MWAIT or MWAITX Instructions May Fail to Correctly
345 		 * Exit From the Monitor Event Pending State
346 		 *
347 		 * 1109 MWAIT Instruction May Hang a Thread
348 		 */
349 		if (model == 0x01) {
350 			cpu_feature[1] &= ~CPUID2_MONITOR;
351 			ci->ci_feat_val[1] &= ~CPUID2_MONITOR;
352 		}
353 		break;
354 	}
355 }
356 
357 static void
358 cpu_probe_amd(struct cpu_info *ci)
359 {
360 
361 	if (cpu_vendor != CPUVENDOR_AMD)
362 		return;
363 
364 	cpu_probe_amd_cache(ci);
365 	cpu_probe_amd_errata(ci);
366 }
367 
368 static inline uint8_t
369 cyrix_read_reg(uint8_t reg)
370 {
371 
372 	outb(0x22, reg);
373 	return inb(0x23);
374 }
375 
376 static inline void
377 cyrix_write_reg(uint8_t reg, uint8_t data)
378 {
379 
380 	outb(0x22, reg);
381 	outb(0x23, data);
382 }
383 
384 static void
385 cpu_probe_cyrix_cmn(struct cpu_info *ci)
386 {
387 	/*
388 	 * i8254 latch check routine:
389 	 *     National Geode (formerly Cyrix MediaGX) has a serious bug in
390 	 *     its built-in i8254-compatible clock module (cs5510 cs5520).
391 	 *     Set the variable 'clock_broken_latch' to indicate it.
392 	 *
393 	 * This bug is not present in the cs5530, and the flag
394 	 * is disabled again in sys/arch/i386/pci/pcib.c if this later
395 	 * model device is detected. Ideally, this work-around should not
396 	 * even be in here, it should be in there. XXX
397 	 */
398 	uint8_t c3;
399 #ifndef XENPV
400 	extern int clock_broken_latch;
401 
402 	switch (ci->ci_signature) {
403 	case 0x440:     /* Cyrix MediaGX */
404 	case 0x540:     /* GXm */
405 		clock_broken_latch = 1;
406 		break;
407 	}
408 #endif
409 
410 	/* set up various cyrix registers */
411 	/*
412 	 * Enable suspend on halt (powersave mode).
413 	 * When powersave mode is enabled, the TSC stops counting
414 	 * while the CPU is halted in idle() waiting for an interrupt.
415 	 * This means we can't use the TSC for interval time in
416 	 * microtime(9), and thus it is disabled here.
417 	 *
418 	 * It still makes a perfectly good cycle counter
419 	 * for program profiling, so long as you remember you're
420 	 * counting cycles, and not time. Further, if you don't
421 	 * mind not using powersave mode, the TSC works just fine,
422 	 * so this should really be optional. XXX
423 	 */
424 	cyrix_write_reg(0xc2, cyrix_read_reg(0xc2) | 0x08);
425 
426 	/*
427 	 * Do not disable the TSC on the Geode GX, it's reported to
428 	 * work fine.
429 	 */
430 	if (ci->ci_signature != 0x552)
431 		ci->ci_feat_val[0] &= ~CPUID_TSC;
432 
433 	/* enable access to ccr4/ccr5 */
434 	c3 = cyrix_read_reg(0xC3);
435 	cyrix_write_reg(0xC3, c3 | 0x10);
436 	/* cyrix's workaround  for the "coma bug" */
437 	cyrix_write_reg(0x31, cyrix_read_reg(0x31) | 0xf8);
438 	cyrix_write_reg(0x32, cyrix_read_reg(0x32) | 0x7f);
439 	cyrix_write_reg(0x33, cyrix_read_reg(0x33) & ~0xffu);
440 	cyrix_write_reg(0x3c, cyrix_read_reg(0x3c) | 0x87);
441 	/* disable access to ccr4/ccr5 */
442 	cyrix_write_reg(0xC3, c3);
443 }
444 
445 static void
446 cpu_probe_cyrix(struct cpu_info *ci)
447 {
448 
449 	if (cpu_vendor != CPUVENDOR_CYRIX ||
450 	    CPUID_TO_FAMILY(ci->ci_signature) < 4 ||
451 	    CPUID_TO_FAMILY(ci->ci_signature) > 6)
452 		return;
453 
454 	cpu_probe_cyrix_cmn(ci);
455 }
456 
457 static void
458 cpu_probe_winchip(struct cpu_info *ci)
459 {
460 
461 	if (cpu_vendor != CPUVENDOR_IDT ||
462 	    CPUID_TO_FAMILY(ci->ci_signature) != 5)
463 		return;
464 
465 	/* WinChip C6 */
466 	if (CPUID_TO_MODEL(ci->ci_signature) == 4)
467 		ci->ci_feat_val[0] &= ~CPUID_TSC;
468 }
469 
470 static void
471 cpu_probe_c3(struct cpu_info *ci)
472 {
473 	u_int family, model, stepping, descs[4], lfunc, msr;
474 	struct x86_cache_info *cai;
475 
476 	if (cpu_vendor != CPUVENDOR_IDT ||
477 	    CPUID_TO_FAMILY(ci->ci_signature) < 6)
478 		return;
479 
480 	family = CPUID_TO_FAMILY(ci->ci_signature);
481 	model = CPUID_TO_MODEL(ci->ci_signature);
482 	stepping = CPUID_TO_STEPPING(ci->ci_signature);
483 
484 	/* Determine the largest extended function value. */
485 	x86_cpuid(0x80000000, descs);
486 	lfunc = descs[0];
487 
488 	if (family == 6) {
489 		/*
490 		 * VIA Eden ESP.
491 		 *
492 		 * Quoting from page 3-4 of: "VIA Eden ESP Processor Datasheet"
493 		 * http://www.via.com.tw/download/mainboards/6/14/Eden20v115.pdf
494 		 *
495 		 * 1. The CMPXCHG8B instruction is provided and always enabled,
496 		 *    however, it appears disabled in the corresponding CPUID
497 		 *    function bit 0 to avoid a bug in an early version of
498 		 *    Windows NT. However, this default can be changed via a
499 		 *    bit in the FCR MSR.
500 		 */
501 		ci->ci_feat_val[0] |= CPUID_CX8;
502 		wrmsr(MSR_VIA_FCR, rdmsr(MSR_VIA_FCR) | VIA_ACE_ECX8);
503 	}
504 
505 	if (family > 6 || model > 0x9 || (model == 0x9 && stepping >= 3)) {
506 		/* VIA Nehemiah or Esther. */
507 		x86_cpuid(0xc0000000, descs);
508 		lfunc = descs[0];
509 		if (lfunc >= 0xc0000001) {	/* has ACE, RNG */
510 		    int rng_enable = 0, ace_enable = 0;
511 		    x86_cpuid(0xc0000001, descs);
512 		    lfunc = descs[3];
513 		    ci->ci_feat_val[4] = lfunc;
514 		    /* Check for and enable RNG */
515 		    if (lfunc & CPUID_VIA_HAS_RNG) {
516 			if (!(lfunc & CPUID_VIA_DO_RNG)) {
517 			    rng_enable++;
518 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_RNG;
519 			}
520 		    }
521 		    /* Check for and enable ACE (AES-CBC) */
522 		    if (lfunc & CPUID_VIA_HAS_ACE) {
523 			if (!(lfunc & CPUID_VIA_DO_ACE)) {
524 			    ace_enable++;
525 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE;
526 			}
527 		    }
528 		    /* Check for and enable SHA */
529 		    if (lfunc & CPUID_VIA_HAS_PHE) {
530 			if (!(lfunc & CPUID_VIA_DO_PHE)) {
531 			    ace_enable++;
532 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_PHE;
533 			}
534 		    }
535 		    /* Check for and enable ACE2 (AES-CTR) */
536 		    if (lfunc & CPUID_VIA_HAS_ACE2) {
537 			if (!(lfunc & CPUID_VIA_DO_ACE2)) {
538 			    ace_enable++;
539 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE2;
540 			}
541 		    }
542 		    /* Check for and enable PMM (modmult engine) */
543 		    if (lfunc & CPUID_VIA_HAS_PMM) {
544 			if (!(lfunc & CPUID_VIA_DO_PMM)) {
545 			    ace_enable++;
546 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_PMM;
547 			}
548 		    }
549 
550 		    /*
551 		     * Actually do the enables.  It's a little gross,
552 		     * but per the PadLock programming guide, "Enabling
553 		     * PadLock", condition 3, we must enable SSE too or
554 		     * else the first use of RNG or ACE instructions
555 		     * will generate a trap.
556 		     *
557 		     * We must do this early because of kernel RNG
558 		     * initialization but it is safe without the full
559 		     * FPU-detect as all these CPUs have SSE.
560 		     */
561 		    lcr4(rcr4() | CR4_OSFXSR);
562 
563 		    if (rng_enable) {
564 			msr = rdmsr(MSR_VIA_RNG);
565 			msr |= MSR_VIA_RNG_ENABLE;
566 			/* C7 stepping 8 and subsequent CPUs have dual RNG */
567 			if (model > 0xA || (model == 0xA && stepping > 0x7)) {
568 				msr |= MSR_VIA_RNG_2NOISE;
569 			}
570 			wrmsr(MSR_VIA_RNG, msr);
571 		    }
572 
573 		    if (ace_enable) {
574 			msr = rdmsr(MSR_VIA_ACE);
575 			wrmsr(MSR_VIA_ACE, msr | VIA_ACE_ENABLE);
576 		    }
577 		}
578 	}
579 
580 	/* Explicitly disable unsafe ALTINST mode. */
581 	if (ci->ci_feat_val[4] & CPUID_VIA_DO_ACE) {
582 		msr = rdmsr(MSR_VIA_ACE);
583 		wrmsr(MSR_VIA_ACE, msr & ~VIA_ACE_ALTINST);
584 	}
585 
586 	/*
587 	 * Determine L1 cache/TLB info.
588 	 */
589 	if (lfunc < 0x80000005) {
590 		/* No L1 cache info available. */
591 		return;
592 	}
593 
594 	x86_cpuid(0x80000005, descs);
595 
596 	cai = &ci->ci_cinfo[CAI_ITLB];
597 	cai->cai_totalsize = VIA_L1_EBX_ITLB_ENTRIES(descs[1]);
598 	cai->cai_associativity = VIA_L1_EBX_ITLB_ASSOC(descs[1]);
599 	cai->cai_linesize = (4 * 1024);
600 
601 	cai = &ci->ci_cinfo[CAI_DTLB];
602 	cai->cai_totalsize = VIA_L1_EBX_DTLB_ENTRIES(descs[1]);
603 	cai->cai_associativity = VIA_L1_EBX_DTLB_ASSOC(descs[1]);
604 	cai->cai_linesize = (4 * 1024);
605 
606 	cai = &ci->ci_cinfo[CAI_DCACHE];
607 	cai->cai_totalsize = VIA_L1_ECX_DC_SIZE(descs[2]);
608 	cai->cai_associativity = VIA_L1_ECX_DC_ASSOC(descs[2]);
609 	cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[2]);
610 	if (family == 6 && model == 9 && stepping == 8) {
611 		/* Erratum: stepping 8 reports 4 when it should be 2 */
612 		cai->cai_associativity = 2;
613 	}
614 
615 	cai = &ci->ci_cinfo[CAI_ICACHE];
616 	cai->cai_totalsize = VIA_L1_EDX_IC_SIZE(descs[3]);
617 	cai->cai_associativity = VIA_L1_EDX_IC_ASSOC(descs[3]);
618 	cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[3]);
619 	if (family == 6 && model == 9 && stepping == 8) {
620 		/* Erratum: stepping 8 reports 4 when it should be 2 */
621 		cai->cai_associativity = 2;
622 	}
623 
624 	/*
625 	 * Determine L2 cache/TLB info.
626 	 */
627 	if (lfunc < 0x80000006) {
628 		/* No L2 cache info available. */
629 		return;
630 	}
631 
632 	x86_cpuid(0x80000006, descs);
633 
634 	cai = &ci->ci_cinfo[CAI_L2CACHE];
635 	if (family > 6 || model >= 9) {
636 		cai->cai_totalsize = VIA_L2N_ECX_C_SIZE(descs[2]);
637 		cai->cai_associativity = VIA_L2N_ECX_C_ASSOC(descs[2]);
638 		cai->cai_linesize = VIA_L2N_ECX_C_LS(descs[2]);
639 	} else {
640 		cai->cai_totalsize = VIA_L2_ECX_C_SIZE(descs[2]);
641 		cai->cai_associativity = VIA_L2_ECX_C_ASSOC(descs[2]);
642 		cai->cai_linesize = VIA_L2_ECX_C_LS(descs[2]);
643 	}
644 }
645 
646 static void
647 cpu_probe_geode(struct cpu_info *ci)
648 {
649 
650 	if (memcmp("Geode by NSC", ci->ci_vendor, 12) != 0 ||
651 	    CPUID_TO_FAMILY(ci->ci_signature) != 5)
652 		return;
653 
654 	cpu_probe_cyrix_cmn(ci);
655 	cpu_probe_amd_cache(ci);
656 }
657 
658 static void
659 cpu_probe_vortex86(struct cpu_info *ci)
660 {
661 #define PCI_MODE1_ADDRESS_REG	0x0cf8
662 #define PCI_MODE1_DATA_REG	0x0cfc
663 #define PCI_MODE1_ENABLE	0x80000000UL
664 
665 	uint32_t reg, idx;
666 
667 	if (cpu_vendor != CPUVENDOR_VORTEX86)
668 		return;
669 	/*
670 	 * CPU model available from "Customer ID register" in
671 	 * North Bridge Function 0 PCI space
672 	 * we can't use pci_conf_read() because the PCI subsystem is not
673 	 * not initialised early enough
674 	 */
675 
676 	outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE | 0x90);
677 	reg = inl(PCI_MODE1_DATA_REG);
678 
679 	if ((reg & 0xf0ffffff) != 0x30504d44) {
680 		idx = 0;
681 	} else {
682 		idx = (reg >> 24) & 0xf;
683 	}
684 
685 	static const char *cpu_vortex86_flavor[] = {
686 	    "??", "SX", "DX", "MX", "DX2", "MX+", "DX3", "EX", "EX2",
687 	};
688 	idx = idx < __arraycount(cpu_vortex86_flavor) ? idx : 0;
689 	snprintf(cpu_brand_string, sizeof(cpu_brand_string), "Vortex86%s",
690 	    cpu_vortex86_flavor[idx]);
691 
692 #undef PCI_MODE1_ENABLE
693 #undef PCI_MODE1_ADDRESS_REG
694 #undef PCI_MODE1_DATA_REG
695 }
696 
697 static void
698 cpu_probe_fpu_old(struct cpu_info *ci)
699 {
700 #if defined(__i386__) && !defined(XENPV)
701 
702 	clts();
703 	fninit();
704 
705 	/* Check for 'FDIV' bug on the original Pentium */
706 	if (npx586bug1(4195835, 3145727) != 0)
707 		/* NB 120+MHz cpus are not affected */
708 		i386_fpu_fdivbug = 1;
709 
710 	stts();
711 #endif
712 }
713 
714 static void
715 cpu_probe_fpu(struct cpu_info *ci)
716 {
717 	u_int descs[4];
718 	int i;
719 
720 	x86_fpu_save = FPU_SAVE_FSAVE;
721 
722 #ifdef i386
723 	/* If we have FXSAVE/FXRESTOR, use them. */
724 	if ((ci->ci_feat_val[0] & CPUID_FXSR) == 0) {
725 		i386_use_fxsave = 0;
726 		cpu_probe_fpu_old(ci);
727 		return;
728 	}
729 
730 	i386_use_fxsave = 1;
731 	/*
732 	 * If we have SSE/SSE2, enable XMM exceptions, and
733 	 * notify userland.
734 	 */
735 	if (ci->ci_feat_val[0] & CPUID_SSE)
736 		i386_has_sse = 1;
737 	if (ci->ci_feat_val[0] & CPUID_SSE2)
738 		i386_has_sse2 = 1;
739 #else
740 	/*
741 	 * For amd64 i386_use_fxsave, i386_has_sse and i386_has_sse2 are
742 	 * #defined to 1, because fxsave/sse/sse2 are always present.
743 	 */
744 #endif
745 
746 	x86_fpu_save = FPU_SAVE_FXSAVE;
747 	x86_fpu_save_size = sizeof(struct fxsave);
748 
749 	/* See if XSAVE is supported */
750 	if ((ci->ci_feat_val[1] & CPUID2_XSAVE) == 0)
751 		return;
752 
753 #ifdef XENPV
754 	/*
755 	 * Xen kernel can disable XSAVE via "no-xsave" option, in that case
756 	 * the XSAVE/XRSTOR instructions become privileged and trigger
757 	 * supervisor trap. OSXSAVE flag seems to be reliably set according
758 	 * to whether XSAVE is actually available.
759 	 */
760 	if ((ci->ci_feat_val[1] & CPUID2_OSXSAVE) == 0)
761 		return;
762 #endif
763 
764 	x86_fpu_save = FPU_SAVE_XSAVE;
765 
766 	x86_cpuid2(0xd, 1, descs);
767 	if (descs[0] & CPUID_PES1_XSAVEOPT)
768 		x86_fpu_save = FPU_SAVE_XSAVEOPT;
769 
770 	/* Get features and maximum size of the save area */
771 	x86_cpuid(0xd, descs);
772 	if (descs[2] > sizeof(struct fxsave))
773 		x86_fpu_save_size = descs[2];
774 
775 	x86_xsave_features = (uint64_t)descs[3] << 32 | descs[0];
776 
777 	/* Get component offsets and sizes for the save area */
778 	for (i = XSAVE_YMM_Hi128; i < __arraycount(x86_xsave_offsets); i++) {
779 		if (x86_xsave_features & __BIT(i)) {
780 			x86_cpuid2(0xd, i, descs);
781 			x86_xsave_offsets[i] = descs[1];
782 			x86_xsave_sizes[i] = descs[0];
783 		}
784 	}
785 }
786 
787 void
788 cpu_probe(struct cpu_info *ci)
789 {
790 	u_int descs[4];
791 	int i;
792 	uint32_t miscbytes;
793 	uint32_t brand[12];
794 
795 	if (ci == &cpu_info_primary) {
796 		cpu_vendor = i386_nocpuid_cpus[cputype << 1];
797 		cpu_class = i386_nocpuid_cpus[(cputype << 1) + 1];
798 	}
799 
800 	if (cpuid_level < 0) {
801 		/* cpuid instruction not supported */
802 		cpu_probe_fpu_old(ci);
803 		return;
804 	}
805 
806 	for (i = 0; i < __arraycount(ci->ci_feat_val); i++) {
807 		ci->ci_feat_val[i] = 0;
808 	}
809 
810 	x86_cpuid(0, descs);
811 	cpuid_level = descs[0];
812 	ci->ci_max_cpuid = descs[0];
813 
814 	ci->ci_vendor[0] = descs[1];
815 	ci->ci_vendor[2] = descs[2];
816 	ci->ci_vendor[1] = descs[3];
817 	ci->ci_vendor[3] = 0;
818 
819 	if (ci == &cpu_info_primary) {
820 		if (memcmp(ci->ci_vendor, "GenuineIntel", 12) == 0)
821 			cpu_vendor = CPUVENDOR_INTEL;
822 		else if (memcmp(ci->ci_vendor, "AuthenticAMD", 12) == 0)
823 			cpu_vendor = CPUVENDOR_AMD;
824 		else if (memcmp(ci->ci_vendor, "CyrixInstead", 12) == 0)
825 			cpu_vendor = CPUVENDOR_CYRIX;
826 		else if (memcmp(ci->ci_vendor, "Geode by NSC", 12) == 0)
827 			cpu_vendor = CPUVENDOR_CYRIX;
828 		else if (memcmp(ci->ci_vendor, "CentaurHauls", 12) == 0)
829 			cpu_vendor = CPUVENDOR_IDT;
830 		else if (memcmp(ci->ci_vendor, "GenuineTMx86", 12) == 0)
831 			cpu_vendor = CPUVENDOR_TRANSMETA;
832 		else if (memcmp(ci->ci_vendor, "Vortex86 SoC", 12) == 0)
833 			cpu_vendor = CPUVENDOR_VORTEX86;
834 		else
835 			cpu_vendor = CPUVENDOR_UNKNOWN;
836 	}
837 
838 	if (cpuid_level >= 1) {
839 		x86_cpuid(1, descs);
840 		ci->ci_signature = descs[0];
841 		miscbytes = descs[1];
842 		ci->ci_feat_val[1] = descs[2];
843 		ci->ci_feat_val[0] = descs[3];
844 
845 		if (ci == &cpu_info_primary) {
846 			/* Determine family + class. */
847 			cpu_class = CPUID_TO_FAMILY(ci->ci_signature)
848 			    + (CPUCLASS_386 - 3);
849 			if (cpu_class > CPUCLASS_686)
850 				cpu_class = CPUCLASS_686;
851 		}
852 
853 		/* CLFLUSH line size is next 8 bits */
854 		if (ci->ci_feat_val[0] & CPUID_CLFSH)
855 			ci->ci_cflush_lsize
856 			    = __SHIFTOUT(miscbytes, CPUID_CLFLUSH_SIZE) << 3;
857 		ci->ci_initapicid = __SHIFTOUT(miscbytes, CPUID_LOCAL_APIC_ID);
858 	}
859 
860 	/*
861 	 * Get the basic information from the extended cpuid leafs.
862 	 * These were first implemented by amd, but most of the values
863 	 * match with those generated by modern intel cpus.
864 	 */
865 	x86_cpuid(0x80000000, descs);
866 	if (descs[0] >= 0x80000000)
867 		ci->ci_max_ext_cpuid = descs[0];
868 	else
869 		ci->ci_max_ext_cpuid = 0;
870 
871 	if (ci->ci_max_ext_cpuid >= 0x80000001) {
872 		/* Determine the extended feature flags. */
873 		x86_cpuid(0x80000001, descs);
874 		ci->ci_feat_val[3] = descs[2]; /* %ecx */
875 		ci->ci_feat_val[2] = descs[3]; /* %edx */
876 	}
877 
878 	if (ci->ci_max_ext_cpuid >= 0x80000004) {
879 		x86_cpuid(0x80000002, brand);
880 		x86_cpuid(0x80000003, brand + 4);
881 		x86_cpuid(0x80000004, brand + 8);
882 		/* Skip leading spaces on brand */
883 		for (i = 0; i < 48; i++) {
884 			if (((char *) brand)[i] != ' ')
885 				break;
886 		}
887 		memcpy(cpu_brand_string, ((char *) brand) + i, 48 - i);
888 	}
889 
890 	/*
891 	 * Get the structured extended features.
892 	 */
893 	if (cpuid_level >= 7) {
894 		x86_cpuid(7, descs);
895 		ci->ci_feat_val[5] = descs[1]; /* %ebx */
896 		ci->ci_feat_val[6] = descs[2]; /* %ecx */
897 		ci->ci_feat_val[7] = descs[3]; /* %edx */
898 	}
899 
900 	cpu_probe_intel(ci);
901 	cpu_probe_amd(ci);
902 	cpu_probe_cyrix(ci);
903 	cpu_probe_winchip(ci);
904 	cpu_probe_c3(ci);
905 	cpu_probe_geode(ci);
906 	cpu_probe_vortex86(ci);
907 
908 	if (ci == &cpu_info_primary) {
909 		cpu_probe_fpu(ci);
910 	}
911 
912 #ifndef XENPV
913 	x86_cpu_topology(ci);
914 #endif
915 
916 	if (cpu_vendor != CPUVENDOR_AMD && (ci->ci_feat_val[0] & CPUID_TM) &&
917 	    (rdmsr(MSR_MISC_ENABLE) & (1 << 3)) == 0) {
918 		/* Enable thermal monitor 1. */
919 		wrmsr(MSR_MISC_ENABLE, rdmsr(MSR_MISC_ENABLE) | (1<<3));
920 	}
921 
922 	ci->ci_feat_val[0] &= ~CPUID_FEAT_BLACKLIST;
923 	if (ci == &cpu_info_primary) {
924 		/* If first. Boot Processor is the cpu_feature reference. */
925 		for (i = 0; i < __arraycount(cpu_feature); i++) {
926 			cpu_feature[i] = ci->ci_feat_val[i];
927 		}
928 		identify_hypervisor();
929 #ifndef XENPV
930 		/* Early patch of text segment. */
931 		x86_patch(true);
932 #endif
933 
934 		/* AES */
935 #ifdef __x86_64__	/* not yet implemented on i386 */
936 		if (cpu_feature[1] & CPUID2_AESNI)
937 			aes_md_init(&aes_ni_impl);
938 		else
939 #endif
940 		if (cpu_feature[4] & CPUID_VIA_HAS_ACE)
941 			aes_md_init(&aes_via_impl);
942 		else if (i386_has_sse && i386_has_sse2 &&
943 		    (cpu_feature[1] & CPUID2_SSE3) &&
944 		    (cpu_feature[1] & CPUID2_SSSE3))
945 			aes_md_init(&aes_ssse3_impl);
946 		else if (i386_has_sse && i386_has_sse2)
947 			aes_md_init(&aes_sse2_impl);
948 
949 		/* ChaCha */
950 		if (i386_has_sse && i386_has_sse2)
951 			chacha_md_init(&chacha_sse2_impl);
952 	} else {
953 		/*
954 		 * If not first. Warn about cpu_feature mismatch for
955 		 * secondary CPUs.
956 		 */
957 		for (i = 0; i < __arraycount(cpu_feature); i++) {
958 			if (cpu_feature[i] != ci->ci_feat_val[i])
959 				aprint_error_dev(ci->ci_dev,
960 				    "feature mismatch: cpu_feature[%d] is "
961 				    "%#x, but CPU reported %#x\n",
962 				    i, cpu_feature[i], ci->ci_feat_val[i]);
963 		}
964 	}
965 }
966 
967 /* Write what we know about the cpu to the console... */
968 void
969 cpu_identify(struct cpu_info *ci)
970 {
971 
972 	cpu_setmodel("%s %d86-class",
973 	    cpu_vendor_names[cpu_vendor], cpu_class + 3);
974 	if (cpu_brand_string[0] != '\0') {
975 		aprint_normal_dev(ci->ci_dev, "%s", cpu_brand_string);
976 	} else {
977 		aprint_normal_dev(ci->ci_dev, "%s", cpu_getmodel());
978 		if (ci->ci_data.cpu_cc_freq != 0)
979 			aprint_normal(", %dMHz",
980 			    (int)(ci->ci_data.cpu_cc_freq / 1000000));
981 	}
982 	if (ci->ci_signature != 0)
983 		aprint_normal(", id 0x%x", ci->ci_signature);
984 	aprint_normal("\n");
985 	aprint_normal_dev(ci->ci_dev, "node %u, package %u, core %u, smt %u\n",
986 	    ci->ci_numa_id, ci->ci_package_id, ci->ci_core_id, ci->ci_smt_id);
987 	if (cpu_brand_string[0] == '\0') {
988 		strlcpy(cpu_brand_string, cpu_getmodel(),
989 		    sizeof(cpu_brand_string));
990 	}
991 	if (cpu_class == CPUCLASS_386) {
992 		panic("NetBSD requires an 80486DX or later processor");
993 	}
994 	if (cputype == CPU_486DLC) {
995 		aprint_error("WARNING: BUGGY CYRIX CACHE\n");
996 	}
997 
998 #if !defined(XENPV) || defined(DOM0OPS)       /* on Xen PV rdmsr is for Dom0 only */
999 	if (cpu_vendor == CPUVENDOR_AMD     /* check enablement of an */
1000 	    && device_unit(ci->ci_dev) == 0 /* AMD feature only once */
1001 	    && ((cpu_feature[3] & CPUID_SVM) == CPUID_SVM)) {
1002 		uint64_t val;
1003 
1004 		val = rdmsr(MSR_VMCR);
1005 		if (((val & VMCR_SVMED) == VMCR_SVMED)
1006 		    && ((val & VMCR_LOCK) == VMCR_LOCK)) {
1007 			aprint_normal_dev(ci->ci_dev,
1008 				"SVM disabled by the BIOS\n");
1009 		}
1010 	}
1011 #endif
1012 
1013 #ifdef i386
1014 	if (i386_fpu_fdivbug == 1)
1015 		aprint_normal_dev(ci->ci_dev,
1016 		    "WARNING: Pentium FDIV bug detected!\n");
1017 
1018 	if (cpu_vendor == CPUVENDOR_TRANSMETA) {
1019 		u_int descs[4];
1020 		x86_cpuid(0x80860000, descs);
1021 		if (descs[0] >= 0x80860007)
1022 			/* Create longrun sysctls */
1023 			tmx86_init_longrun();
1024 	}
1025 #endif	/* i386 */
1026 
1027 }
1028 
1029 /*
1030  * Hypervisor
1031  */
1032 vm_guest_t vm_guest = VM_GUEST_NO;
1033 
1034 struct vm_name_guest {
1035 	const char *name;
1036 	vm_guest_t guest;
1037 };
1038 
1039 static const struct vm_name_guest vm_bios_vendors[] = {
1040 	{ "QEMU", VM_GUEST_VM },			/* QEMU */
1041 	{ "Plex86", VM_GUEST_VM },			/* Plex86 */
1042 	{ "Bochs", VM_GUEST_VM },			/* Bochs */
1043 	{ "Xen", VM_GUEST_VM },				/* Xen */
1044 	{ "BHYVE", VM_GUEST_VM },			/* bhyve */
1045 	{ "Seabios", VM_GUEST_VM },			/* KVM */
1046 	{ "innotek GmbH", VM_GUEST_VIRTUALBOX },	/* Oracle VirtualBox */
1047 };
1048 
1049 static const struct vm_name_guest vm_system_products[] = {
1050 	{ "VMware Virtual Platform", VM_GUEST_VM },	/* VMWare VM */
1051 	{ "Virtual Machine", VM_GUEST_VM },		/* Microsoft VirtualPC */
1052 	{ "VirtualBox", VM_GUEST_VIRTUALBOX },		/* Sun xVM VirtualBox */
1053 	{ "Parallels Virtual Platform", VM_GUEST_VM },	/* Parallels VM */
1054 	{ "KVM", VM_GUEST_VM },				/* KVM */
1055 };
1056 
1057 void
1058 identify_hypervisor(void)
1059 {
1060 	u_int regs[6];
1061 	char hv_vendor[12];
1062 	const char *p;
1063 	int i;
1064 
1065 	switch (vm_guest) {
1066 	case VM_GUEST_XENPV:
1067 	case VM_GUEST_XENPVH:
1068 		/* guest type already known, no bios info */
1069 		return;
1070 	default:
1071 		break;
1072 	}
1073 
1074 	/*
1075 	 * [RFC] CPUID usage for interaction between Hypervisors and Linux.
1076 	 * http://lkml.org/lkml/2008/10/1/246
1077 	 *
1078 	 * KB1009458: Mechanisms to determine if software is running in
1079 	 * a VMware virtual machine
1080 	 * http://kb.vmware.com/kb/1009458
1081 	 */
1082 	if (ISSET(cpu_feature[1], CPUID2_RAZ)) {
1083 		vm_guest = VM_GUEST_VM;
1084 		x86_cpuid(0x40000000, regs);
1085 		if (regs[0] >= 0x40000000) {
1086 			memcpy(&hv_vendor[0], &regs[1], sizeof(*regs));
1087 			memcpy(&hv_vendor[4], &regs[2], sizeof(*regs));
1088 			memcpy(&hv_vendor[8], &regs[3], sizeof(*regs));
1089 			if (memcmp(hv_vendor, "VMwareVMware", 12) == 0)
1090 				vm_guest = VM_GUEST_VMWARE;
1091 			else if (memcmp(hv_vendor, "Microsoft Hv", 12) == 0) {
1092 				vm_guest = VM_GUEST_HV;
1093 #if NHYPERV > 0
1094 				hyperv_early_init();
1095 #endif
1096 			} else if (memcmp(hv_vendor, "KVMKVMKVM\0\0\0", 12) == 0)
1097 				vm_guest = VM_GUEST_KVM;
1098 			else if (memcmp(hv_vendor, "XenVMMXenVMM", 12) == 0)
1099 				vm_guest = VM_GUEST_XENHVM;
1100 			/* FreeBSD bhyve: "bhyve bhyve " */
1101 			/* OpenBSD vmm:   "OpenBSDVMM58" */
1102 			/* NetBSD nvmm:   "___ NVMM ___" */
1103 		}
1104 		// VirtualBox returns KVM, so keep going.
1105 		if (vm_guest != VM_GUEST_KVM)
1106 			return;
1107 	}
1108 
1109 	/*
1110 	 * Examine SMBIOS strings for older hypervisors.
1111 	 */
1112 	p = pmf_get_platform("system-serial");
1113 	if (p != NULL) {
1114 		if (strncmp(p, "VMware-", 7) == 0 || strncmp(p, "VMW", 3) == 0) {
1115 			vmt_hvcall(VM_CMD_GET_VERSION, regs);
1116 			if (regs[1] == VM_MAGIC) {
1117 				vm_guest = VM_GUEST_VMWARE;
1118 				return;
1119 			}
1120 		}
1121 	}
1122 	p = pmf_get_platform("bios-vendor");
1123 	if (p != NULL) {
1124 		for (i = 0; i < __arraycount(vm_bios_vendors); i++) {
1125 			if (strcmp(p, vm_bios_vendors[i].name) == 0) {
1126 				vm_guest = vm_bios_vendors[i].guest;
1127 				return;
1128 			}
1129 		}
1130 	}
1131 	p = pmf_get_platform("system-product");
1132 	if (p != NULL) {
1133 		for (i = 0; i < __arraycount(vm_system_products); i++) {
1134 			if (strcmp(p, vm_system_products[i].name) == 0) {
1135 				vm_guest = vm_system_products[i].guest;
1136 				return;
1137 			}
1138 		}
1139 	}
1140 }
1141