xref: /netbsd-src/sys/arch/x86/x86/identcpu.c (revision 7330f729ccf0bd976a06f95fad452fe774fc7fd1)
1 /*	$NetBSD: identcpu.c,v 1.98 2019/10/29 12:39:46 maxv Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Frank van der Linden,  and by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: identcpu.c,v 1.98 2019/10/29 12:39:46 maxv Exp $");
34 
35 #include "opt_xen.h"
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/device.h>
40 #include <sys/cpu.h>
41 
42 #include <uvm/uvm_extern.h>
43 
44 #include <machine/specialreg.h>
45 #include <machine/pio.h>
46 #include <machine/cpu.h>
47 
48 #include <x86/cputypes.h>
49 #include <x86/cacheinfo.h>
50 #include <x86/cpuvar.h>
51 #include <x86/fpu.h>
52 
53 #include <x86/x86/vmtreg.h>	/* for vmt_hvcall() */
54 #include <x86/x86/vmtvar.h>	/* for vmt_hvcall() */
55 
56 #ifndef XEN
57 #include "hyperv.h"
58 #if NHYPERV > 0
59 #include <x86/x86/hypervvar.h>
60 #endif
61 #endif
62 
63 static const struct x86_cache_info intel_cpuid_cache_info[] = INTEL_CACHE_INFO;
64 
65 static const struct x86_cache_info amd_cpuid_l2l3cache_assoc_info[] =
66 	AMD_L2L3CACHE_INFO;
67 
68 int cpu_vendor;
69 char cpu_brand_string[49];
70 
71 int x86_fpu_save __read_mostly;
72 unsigned int x86_fpu_save_size __read_mostly = sizeof(struct save87);
73 uint64_t x86_xsave_features __read_mostly = 0;
74 size_t x86_xsave_offsets[XSAVE_MAX_COMPONENT+1] __read_mostly;
75 size_t x86_xsave_sizes[XSAVE_MAX_COMPONENT+1] __read_mostly;
76 
77 /*
78  * Note: these are just the ones that may not have a cpuid instruction.
79  * We deal with the rest in a different way.
80  */
81 const int i386_nocpuid_cpus[] = {
82 	CPUVENDOR_INTEL, CPUCLASS_386,	/* CPU_386SX */
83 	CPUVENDOR_INTEL, CPUCLASS_386,	/* CPU_386   */
84 	CPUVENDOR_INTEL, CPUCLASS_486,	/* CPU_486SX */
85 	CPUVENDOR_INTEL, CPUCLASS_486, 	/* CPU_486   */
86 	CPUVENDOR_CYRIX, CPUCLASS_486,	/* CPU_486DLC */
87 	CPUVENDOR_CYRIX, CPUCLASS_486,	/* CPU_6x86 */
88 	CPUVENDOR_NEXGEN, CPUCLASS_386,	/* CPU_NX586 */
89 };
90 
91 static const char cpu_vendor_names[][10] = {
92 	"Unknown", "Intel", "NS/Cyrix", "NexGen", "AMD", "IDT/VIA", "Transmeta",
93 	"Vortex86"
94 };
95 
96 static const struct x86_cache_info *
97 cache_info_lookup(const struct x86_cache_info *cai, uint8_t desc)
98 {
99 	int i;
100 
101 	for (i = 0; cai[i].cai_desc != 0; i++) {
102 		if (cai[i].cai_desc == desc)
103 			return (&cai[i]);
104 	}
105 
106 	return (NULL);
107 }
108 
109 /*
110  * Get cache info from one of the following:
111  *	Intel Deterministic Cache Parameter Leaf (0x04)
112  *	AMD Cache Topology Information Leaf (0x8000001d)
113  */
114 static void
115 cpu_dcp_cacheinfo(struct cpu_info *ci, uint32_t leaf)
116 {
117 	u_int descs[4];
118 	int type, level, ways, partitions, linesize, sets, totalsize;
119 	int caitype = -1;
120 	int i;
121 
122 	for (i = 0; ; i++) {
123 		x86_cpuid2(leaf, i, descs);
124 		type = __SHIFTOUT(descs[0], CPUID_DCP_CACHETYPE);
125 		if (type == CPUID_DCP_CACHETYPE_N)
126 			break;
127 		level = __SHIFTOUT(descs[0], CPUID_DCP_CACHELEVEL);
128 		switch (level) {
129 		case 1:
130 			if (type == CPUID_DCP_CACHETYPE_I)
131 				caitype = CAI_ICACHE;
132 			else if (type == CPUID_DCP_CACHETYPE_D)
133 				caitype = CAI_DCACHE;
134 			else
135 				caitype = -1;
136 			break;
137 		case 2:
138 			if (type == CPUID_DCP_CACHETYPE_U)
139 				caitype = CAI_L2CACHE;
140 			else
141 				caitype = -1;
142 			break;
143 		case 3:
144 			if (type == CPUID_DCP_CACHETYPE_U)
145 				caitype = CAI_L3CACHE;
146 			else
147 				caitype = -1;
148 			break;
149 		default:
150 			caitype = -1;
151 			break;
152 		}
153 		if (caitype == -1)
154 			continue;
155 
156 		ways = __SHIFTOUT(descs[1], CPUID_DCP_WAYS) + 1;
157 		partitions =__SHIFTOUT(descs[1], CPUID_DCP_PARTITIONS)
158 		    + 1;
159 		linesize = __SHIFTOUT(descs[1], CPUID_DCP_LINESIZE)
160 		    + 1;
161 		sets = descs[2] + 1;
162 		totalsize = ways * partitions * linesize * sets;
163 		ci->ci_cinfo[caitype].cai_totalsize = totalsize;
164 		ci->ci_cinfo[caitype].cai_associativity = ways;
165 		ci->ci_cinfo[caitype].cai_linesize = linesize;
166 	}
167 }
168 
169 static void
170 cpu_probe_intel_cache(struct cpu_info *ci)
171 {
172 	const struct x86_cache_info *cai;
173 	u_int descs[4];
174 	int iterations, i, j;
175 	uint8_t desc;
176 
177 	if (cpuid_level >= 2) {
178 		/* Parse the cache info from `cpuid leaf 2', if we have it. */
179 		x86_cpuid(2, descs);
180 		iterations = descs[0] & 0xff;
181 		while (iterations-- > 0) {
182 			for (i = 0; i < 4; i++) {
183 				if (descs[i] & 0x80000000)
184 					continue;
185 				for (j = 0; j < 4; j++) {
186 					if (i == 0 && j == 0)
187 						continue;
188 					desc = (descs[i] >> (j * 8)) & 0xff;
189 					if (desc == 0)
190 						continue;
191 					cai = cache_info_lookup(
192 					    intel_cpuid_cache_info, desc);
193 					if (cai != NULL) {
194 						ci->ci_cinfo[cai->cai_index] =
195 						    *cai;
196 					}
197 				}
198 			}
199 		}
200 	}
201 
202 	if (cpuid_level < 4)
203 		return;
204 
205 	/* Parse the cache info from `cpuid leaf 4', if we have it. */
206 	cpu_dcp_cacheinfo(ci, 4);
207 }
208 
209 static void
210 cpu_probe_intel_errata(struct cpu_info *ci)
211 {
212 	u_int family, model, stepping;
213 
214 	family = CPUID_TO_FAMILY(ci->ci_signature);
215 	model = CPUID_TO_MODEL(ci->ci_signature);
216 	stepping = CPUID_TO_STEPPING(ci->ci_signature);
217 
218 	if (family == 0x6 && model == 0x5C && stepping == 0x9) { /* Apollo Lake */
219 		wrmsr(MSR_MISC_ENABLE,
220 		    rdmsr(MSR_MISC_ENABLE) & ~IA32_MISC_MWAIT_EN);
221 
222 		cpu_feature[1] &= ~CPUID2_MONITOR;
223 		ci->ci_feat_val[1] &= ~CPUID2_MONITOR;
224 	}
225 }
226 
227 static void
228 cpu_probe_intel(struct cpu_info *ci)
229 {
230 
231 	if (cpu_vendor != CPUVENDOR_INTEL)
232 		return;
233 
234 	cpu_probe_intel_cache(ci);
235 	cpu_probe_intel_errata(ci);
236 }
237 
238 static void
239 cpu_probe_amd_cache(struct cpu_info *ci)
240 {
241 	const struct x86_cache_info *cp;
242 	struct x86_cache_info *cai;
243 	int family, model;
244 	u_int descs[4];
245 	u_int lfunc;
246 
247 	family = CPUID_TO_FAMILY(ci->ci_signature);
248 	model = CPUID_TO_MODEL(ci->ci_signature);
249 
250 	/* K5 model 0 has none of this info. */
251 	if (family == 5 && model == 0)
252 		return;
253 
254 	/* Determine the largest extended function value. */
255 	x86_cpuid(0x80000000, descs);
256 	lfunc = descs[0];
257 
258 	if (lfunc < 0x80000005)
259 		return;
260 
261 	/* Determine L1 cache/TLB info. */
262 	x86_cpuid(0x80000005, descs);
263 
264 	/* K6-III and higher have large page TLBs. */
265 	if ((family == 5 && model >= 9) || family >= 6) {
266 		cai = &ci->ci_cinfo[CAI_ITLB2];
267 		cai->cai_totalsize = AMD_L1_EAX_ITLB_ENTRIES(descs[0]);
268 		cai->cai_associativity = AMD_L1_EAX_ITLB_ASSOC(descs[0]);
269 		cai->cai_linesize = (4 * 1024 * 1024);
270 
271 		cai = &ci->ci_cinfo[CAI_DTLB2];
272 		cai->cai_totalsize = AMD_L1_EAX_DTLB_ENTRIES(descs[0]);
273 		cai->cai_associativity = AMD_L1_EAX_DTLB_ASSOC(descs[0]);
274 		cai->cai_linesize = (4 * 1024 * 1024);
275 	}
276 
277 	cai = &ci->ci_cinfo[CAI_ITLB];
278 	cai->cai_totalsize = AMD_L1_EBX_ITLB_ENTRIES(descs[1]);
279 	cai->cai_associativity = AMD_L1_EBX_ITLB_ASSOC(descs[1]);
280 	cai->cai_linesize = (4 * 1024);
281 
282 	cai = &ci->ci_cinfo[CAI_DTLB];
283 	cai->cai_totalsize = AMD_L1_EBX_DTLB_ENTRIES(descs[1]);
284 	cai->cai_associativity = AMD_L1_EBX_DTLB_ASSOC(descs[1]);
285 	cai->cai_linesize = (4 * 1024);
286 
287 	cai = &ci->ci_cinfo[CAI_DCACHE];
288 	cai->cai_totalsize = AMD_L1_ECX_DC_SIZE(descs[2]);
289 	cai->cai_associativity = AMD_L1_ECX_DC_ASSOC(descs[2]);
290 	cai->cai_linesize = AMD_L1_ECX_DC_LS(descs[2]);
291 
292 	cai = &ci->ci_cinfo[CAI_ICACHE];
293 	cai->cai_totalsize = AMD_L1_EDX_IC_SIZE(descs[3]);
294 	cai->cai_associativity = AMD_L1_EDX_IC_ASSOC(descs[3]);
295 	cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[3]);
296 
297 	if (lfunc < 0x80000006)
298 		return;
299 
300 	/* Determine L2 cache/TLB info. */
301 	x86_cpuid(0x80000006, descs);
302 
303 	cai = &ci->ci_cinfo[CAI_L2CACHE];
304 	cai->cai_totalsize = AMD_L2_ECX_C_SIZE(descs[2]);
305 	cai->cai_associativity = AMD_L2_ECX_C_ASSOC(descs[2]);
306 	cai->cai_linesize = AMD_L2_ECX_C_LS(descs[2]);
307 
308 	cp = cache_info_lookup(amd_cpuid_l2l3cache_assoc_info,
309 	    cai->cai_associativity);
310 	if (cp != NULL)
311 		cai->cai_associativity = cp->cai_associativity;
312 	else
313 		cai->cai_associativity = 0;	/* XXX Unknown/reserved */
314 
315 	if (family < 0xf)
316 		return;
317 
318 	/* Determine L3 cache info on AMD Family 10h and newer processors */
319 	cai = &ci->ci_cinfo[CAI_L3CACHE];
320 	cai->cai_totalsize = AMD_L3_EDX_C_SIZE(descs[3]);
321 	cai->cai_associativity = AMD_L3_EDX_C_ASSOC(descs[3]);
322 	cai->cai_linesize = AMD_L3_EDX_C_LS(descs[3]);
323 
324 	cp = cache_info_lookup(amd_cpuid_l2l3cache_assoc_info,
325 	    cai->cai_associativity);
326 	if (cp != NULL)
327 		cai->cai_associativity = cp->cai_associativity;
328 	else
329 		cai->cai_associativity = 0;	/* XXX Unknown reserved */
330 
331 	if (lfunc < 0x80000019)
332 		return;
333 
334 	/* Determine 1GB TLB info. */
335 	x86_cpuid(0x80000019, descs);
336 
337 	cai = &ci->ci_cinfo[CAI_L1_1GBDTLB];
338 	cai->cai_totalsize = AMD_L1_1GB_EAX_DTLB_ENTRIES(descs[1]);
339 	cai->cai_associativity = AMD_L1_1GB_EAX_DTLB_ASSOC(descs[1]);
340 	cai->cai_linesize = (1 * 1024);
341 
342 	cai = &ci->ci_cinfo[CAI_L1_1GBITLB];
343 	cai->cai_totalsize = AMD_L1_1GB_EAX_IUTLB_ENTRIES(descs[0]);
344 	cai->cai_associativity = AMD_L1_1GB_EAX_IUTLB_ASSOC(descs[0]);
345 	cai->cai_linesize = (1 * 1024);
346 
347 	cai = &ci->ci_cinfo[CAI_L2_1GBDTLB];
348 	cai->cai_totalsize = AMD_L2_1GB_EBX_DUTLB_ENTRIES(descs[1]);
349 	cai->cai_associativity = AMD_L2_1GB_EBX_DUTLB_ASSOC(descs[1]);
350 	cai->cai_linesize = (1 * 1024);
351 
352 	cai = &ci->ci_cinfo[CAI_L2_1GBITLB];
353 	cai->cai_totalsize = AMD_L2_1GB_EBX_IUTLB_ENTRIES(descs[0]);
354 	cai->cai_associativity = AMD_L2_1GB_EBX_IUTLB_ASSOC(descs[0]);
355 	cai->cai_linesize = (1 * 1024);
356 
357 	if (lfunc < 0x8000001d)
358 		return;
359 
360 	if (ci->ci_feat_val[3] & CPUID_TOPOEXT)
361 		cpu_dcp_cacheinfo(ci, 0x8000001d);
362 }
363 
364 static void
365 cpu_probe_amd(struct cpu_info *ci)
366 {
367 	uint64_t val;
368 	int flag;
369 
370 	if (cpu_vendor != CPUVENDOR_AMD)
371 		return;
372 	if (CPUID_TO_FAMILY(ci->ci_signature) < 5)
373 		return;
374 
375 	switch (CPUID_TO_FAMILY(ci->ci_signature)) {
376 	case 0x05: /* K5 */
377 		if (CPUID_TO_MODEL(ci->ci_signature) == 0) {
378 			/*
379 			 * According to the AMD Processor Recognition App Note,
380 			 * the AMD-K5 Model 0 uses the wrong bit to indicate
381 			 * support for global PTEs, instead using bit 9 (APIC)
382 			 * rather than bit 13 (i.e. "0x200" vs. 0x2000").
383 			 */
384 			flag = ci->ci_feat_val[0];
385 			if ((flag & CPUID_APIC) != 0)
386 				flag = (flag & ~CPUID_APIC) | CPUID_PGE;
387 			ci->ci_feat_val[0] = flag;
388 		}
389 		break;
390 
391 	case 0x10: /* Family 10h */
392 		/*
393 		 * On Family 10h, certain BIOSes do not enable WC+ support.
394 		 * This causes WC+ to become CD, and degrades guest
395 		 * performance at the NPT level.
396 		 *
397 		 * Explicitly enable WC+ if we're not a guest.
398 		 */
399 		if (!ISSET(ci->ci_feat_val[1], CPUID2_RAZ)) {
400 			val = rdmsr(MSR_BU_CFG2);
401 			val &= ~BU_CFG2_CWPLUS_DIS;
402 			wrmsr(MSR_BU_CFG2, val);
403 		}
404 		break;
405 	}
406 
407 	cpu_probe_amd_cache(ci);
408 }
409 
410 static inline uint8_t
411 cyrix_read_reg(uint8_t reg)
412 {
413 
414 	outb(0x22, reg);
415 	return inb(0x23);
416 }
417 
418 static inline void
419 cyrix_write_reg(uint8_t reg, uint8_t data)
420 {
421 
422 	outb(0x22, reg);
423 	outb(0x23, data);
424 }
425 
426 static void
427 cpu_probe_cyrix_cmn(struct cpu_info *ci)
428 {
429 	/*
430 	 * i8254 latch check routine:
431 	 *     National Geode (formerly Cyrix MediaGX) has a serious bug in
432 	 *     its built-in i8254-compatible clock module (cs5510 cs5520).
433 	 *     Set the variable 'clock_broken_latch' to indicate it.
434 	 *
435 	 * This bug is not present in the cs5530, and the flag
436 	 * is disabled again in sys/arch/i386/pci/pcib.c if this later
437 	 * model device is detected. Ideally, this work-around should not
438 	 * even be in here, it should be in there. XXX
439 	 */
440 	uint8_t c3;
441 #ifndef XEN
442 	extern int clock_broken_latch;
443 
444 	switch (ci->ci_signature) {
445 	case 0x440:     /* Cyrix MediaGX */
446 	case 0x540:     /* GXm */
447 		clock_broken_latch = 1;
448 		break;
449 	}
450 #endif
451 
452 	/* set up various cyrix registers */
453 	/*
454 	 * Enable suspend on halt (powersave mode).
455 	 * When powersave mode is enabled, the TSC stops counting
456 	 * while the CPU is halted in idle() waiting for an interrupt.
457 	 * This means we can't use the TSC for interval time in
458 	 * microtime(9), and thus it is disabled here.
459 	 *
460 	 * It still makes a perfectly good cycle counter
461 	 * for program profiling, so long as you remember you're
462 	 * counting cycles, and not time. Further, if you don't
463 	 * mind not using powersave mode, the TSC works just fine,
464 	 * so this should really be optional. XXX
465 	 */
466 	cyrix_write_reg(0xc2, cyrix_read_reg(0xc2) | 0x08);
467 
468 	/*
469 	 * Do not disable the TSC on the Geode GX, it's reported to
470 	 * work fine.
471 	 */
472 	if (ci->ci_signature != 0x552)
473 		ci->ci_feat_val[0] &= ~CPUID_TSC;
474 
475 	/* enable access to ccr4/ccr5 */
476 	c3 = cyrix_read_reg(0xC3);
477 	cyrix_write_reg(0xC3, c3 | 0x10);
478 	/* cyrix's workaround  for the "coma bug" */
479 	cyrix_write_reg(0x31, cyrix_read_reg(0x31) | 0xf8);
480 	cyrix_write_reg(0x32, cyrix_read_reg(0x32) | 0x7f);
481 	cyrix_write_reg(0x33, cyrix_read_reg(0x33) & ~0xff);
482 	cyrix_write_reg(0x3c, cyrix_read_reg(0x3c) | 0x87);
483 	/* disable access to ccr4/ccr5 */
484 	cyrix_write_reg(0xC3, c3);
485 }
486 
487 static void
488 cpu_probe_cyrix(struct cpu_info *ci)
489 {
490 
491 	if (cpu_vendor != CPUVENDOR_CYRIX ||
492 	    CPUID_TO_FAMILY(ci->ci_signature) < 4 ||
493 	    CPUID_TO_FAMILY(ci->ci_signature) > 6)
494 		return;
495 
496 	cpu_probe_cyrix_cmn(ci);
497 }
498 
499 static void
500 cpu_probe_winchip(struct cpu_info *ci)
501 {
502 
503 	if (cpu_vendor != CPUVENDOR_IDT ||
504 	    CPUID_TO_FAMILY(ci->ci_signature) != 5)
505 	    	return;
506 
507 	/* WinChip C6 */
508 	if (CPUID_TO_MODEL(ci->ci_signature) == 4)
509 		ci->ci_feat_val[0] &= ~CPUID_TSC;
510 }
511 
512 static void
513 cpu_probe_c3(struct cpu_info *ci)
514 {
515 	u_int family, model, stepping, descs[4], lfunc, msr;
516 	struct x86_cache_info *cai;
517 
518 	if (cpu_vendor != CPUVENDOR_IDT ||
519 	    CPUID_TO_FAMILY(ci->ci_signature) < 6)
520 	    	return;
521 
522 	family = CPUID_TO_FAMILY(ci->ci_signature);
523 	model = CPUID_TO_MODEL(ci->ci_signature);
524 	stepping = CPUID_TO_STEPPING(ci->ci_signature);
525 
526 	/* Determine the largest extended function value. */
527 	x86_cpuid(0x80000000, descs);
528 	lfunc = descs[0];
529 
530 	if (family == 6) {
531 		/*
532 		 * VIA Eden ESP.
533 		 *
534 		 * Quoting from page 3-4 of: "VIA Eden ESP Processor Datasheet"
535 		 * http://www.via.com.tw/download/mainboards/6/14/Eden20v115.pdf
536 		 *
537 		 * 1. The CMPXCHG8B instruction is provided and always enabled,
538 		 *    however, it appears disabled in the corresponding CPUID
539 		 *    function bit 0 to avoid a bug in an early version of
540 		 *    Windows NT. However, this default can be changed via a
541 		 *    bit in the FCR MSR.
542 		 */
543 		ci->ci_feat_val[0] |= CPUID_CX8;
544 		wrmsr(MSR_VIA_FCR, rdmsr(MSR_VIA_FCR) | VIA_ACE_ECX8);
545 	}
546 
547 	if (family > 6 || model > 0x9 || (model == 0x9 && stepping >= 3)) {
548 		/* VIA Nehemiah or Esther. */
549 		x86_cpuid(0xc0000000, descs);
550 		lfunc = descs[0];
551 		if (lfunc >= 0xc0000001) {	/* has ACE, RNG */
552 		    int rng_enable = 0, ace_enable = 0;
553 		    x86_cpuid(0xc0000001, descs);
554 		    lfunc = descs[3];
555 		    ci->ci_feat_val[4] = lfunc;
556 		    /* Check for and enable RNG */
557 		    if (lfunc & CPUID_VIA_HAS_RNG) {
558 		    	if (!(lfunc & CPUID_VIA_DO_RNG)) {
559 			    rng_enable++;
560 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_RNG;
561 			}
562 		    }
563 		    /* Check for and enable ACE (AES-CBC) */
564 		    if (lfunc & CPUID_VIA_HAS_ACE) {
565 			if (!(lfunc & CPUID_VIA_DO_ACE)) {
566 			    ace_enable++;
567 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE;
568 			}
569 		    }
570 		    /* Check for and enable SHA */
571 		    if (lfunc & CPUID_VIA_HAS_PHE) {
572 			if (!(lfunc & CPUID_VIA_DO_PHE)) {
573 			    ace_enable++;
574 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_PHE;
575 			}
576 		    }
577 		    /* Check for and enable ACE2 (AES-CTR) */
578 		    if (lfunc & CPUID_VIA_HAS_ACE2) {
579 			if (!(lfunc & CPUID_VIA_DO_ACE2)) {
580 			    ace_enable++;
581 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE2;
582 			}
583 		    }
584 		    /* Check for and enable PMM (modmult engine) */
585 		    if (lfunc & CPUID_VIA_HAS_PMM) {
586 			if (!(lfunc & CPUID_VIA_DO_PMM)) {
587 			    ace_enable++;
588 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_PMM;
589 			}
590 		    }
591 
592 		    /*
593 		     * Actually do the enables.  It's a little gross,
594 		     * but per the PadLock programming guide, "Enabling
595 		     * PadLock", condition 3, we must enable SSE too or
596 		     * else the first use of RNG or ACE instructions
597 		     * will generate a trap.
598 		     *
599 		     * We must do this early because of kernel RNG
600 		     * initialization but it is safe without the full
601 		     * FPU-detect as all these CPUs have SSE.
602 		     */
603 		    lcr4(rcr4() | CR4_OSFXSR);
604 
605 		    if (rng_enable) {
606 			msr = rdmsr(MSR_VIA_RNG);
607 			msr |= MSR_VIA_RNG_ENABLE;
608 			/* C7 stepping 8 and subsequent CPUs have dual RNG */
609 			if (model > 0xA || (model == 0xA && stepping > 0x7)) {
610 				msr |= MSR_VIA_RNG_2NOISE;
611 			}
612 			wrmsr(MSR_VIA_RNG, msr);
613 		    }
614 
615 		    if (ace_enable) {
616 			msr = rdmsr(MSR_VIA_ACE);
617 			wrmsr(MSR_VIA_ACE, msr | VIA_ACE_ENABLE);
618 		    }
619 		}
620 	}
621 
622 	/* Explicitly disable unsafe ALTINST mode. */
623 	if (ci->ci_feat_val[4] & CPUID_VIA_DO_ACE) {
624 		msr = rdmsr(MSR_VIA_ACE);
625 		wrmsr(MSR_VIA_ACE, msr & ~VIA_ACE_ALTINST);
626 	}
627 
628 	/*
629 	 * Determine L1 cache/TLB info.
630 	 */
631 	if (lfunc < 0x80000005) {
632 		/* No L1 cache info available. */
633 		return;
634 	}
635 
636 	x86_cpuid(0x80000005, descs);
637 
638 	cai = &ci->ci_cinfo[CAI_ITLB];
639 	cai->cai_totalsize = VIA_L1_EBX_ITLB_ENTRIES(descs[1]);
640 	cai->cai_associativity = VIA_L1_EBX_ITLB_ASSOC(descs[1]);
641 	cai->cai_linesize = (4 * 1024);
642 
643 	cai = &ci->ci_cinfo[CAI_DTLB];
644 	cai->cai_totalsize = VIA_L1_EBX_DTLB_ENTRIES(descs[1]);
645 	cai->cai_associativity = VIA_L1_EBX_DTLB_ASSOC(descs[1]);
646 	cai->cai_linesize = (4 * 1024);
647 
648 	cai = &ci->ci_cinfo[CAI_DCACHE];
649 	cai->cai_totalsize = VIA_L1_ECX_DC_SIZE(descs[2]);
650 	cai->cai_associativity = VIA_L1_ECX_DC_ASSOC(descs[2]);
651 	cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[2]);
652 	if (family == 6 && model == 9 && stepping == 8) {
653 		/* Erratum: stepping 8 reports 4 when it should be 2 */
654 		cai->cai_associativity = 2;
655 	}
656 
657 	cai = &ci->ci_cinfo[CAI_ICACHE];
658 	cai->cai_totalsize = VIA_L1_EDX_IC_SIZE(descs[3]);
659 	cai->cai_associativity = VIA_L1_EDX_IC_ASSOC(descs[3]);
660 	cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[3]);
661 	if (family == 6 && model == 9 && stepping == 8) {
662 		/* Erratum: stepping 8 reports 4 when it should be 2 */
663 		cai->cai_associativity = 2;
664 	}
665 
666 	/*
667 	 * Determine L2 cache/TLB info.
668 	 */
669 	if (lfunc < 0x80000006) {
670 		/* No L2 cache info available. */
671 		return;
672 	}
673 
674 	x86_cpuid(0x80000006, descs);
675 
676 	cai = &ci->ci_cinfo[CAI_L2CACHE];
677 	if (family > 6 || model >= 9) {
678 		cai->cai_totalsize = VIA_L2N_ECX_C_SIZE(descs[2]);
679 		cai->cai_associativity = VIA_L2N_ECX_C_ASSOC(descs[2]);
680 		cai->cai_linesize = VIA_L2N_ECX_C_LS(descs[2]);
681 	} else {
682 		cai->cai_totalsize = VIA_L2_ECX_C_SIZE(descs[2]);
683 		cai->cai_associativity = VIA_L2_ECX_C_ASSOC(descs[2]);
684 		cai->cai_linesize = VIA_L2_ECX_C_LS(descs[2]);
685 	}
686 }
687 
688 static void
689 cpu_probe_geode(struct cpu_info *ci)
690 {
691 
692 	if (memcmp("Geode by NSC", ci->ci_vendor, 12) != 0 ||
693 	    CPUID_TO_FAMILY(ci->ci_signature) != 5)
694 	    	return;
695 
696 	cpu_probe_cyrix_cmn(ci);
697 	cpu_probe_amd_cache(ci);
698 }
699 
700 static void
701 cpu_probe_vortex86(struct cpu_info *ci)
702 {
703 #define PCI_MODE1_ADDRESS_REG	0x0cf8
704 #define PCI_MODE1_DATA_REG	0x0cfc
705 #define PCI_MODE1_ENABLE	0x80000000UL
706 
707 	uint32_t reg;
708 
709 	if (cpu_vendor != CPUVENDOR_VORTEX86)
710 		return;
711 	/*
712 	 * CPU model available from "Customer ID register" in
713 	 * North Bridge Function 0 PCI space
714 	 * we can't use pci_conf_read() because the PCI subsystem is not
715 	 * not initialised early enough
716 	 */
717 
718 	outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE | 0x90);
719 	reg = inl(PCI_MODE1_DATA_REG);
720 
721 	if ((reg & 0xf8ffffff) != 0x30504d44) {
722 		reg = 0;
723 	} else {
724 		reg = (reg >> 24) & 7;
725 	}
726 
727 	static const char *cpu_vortex86_flavor[] = {
728 	    "??", "SX", "DX", "MX", "DX2", "MX+", "DX3", "EX",
729 	};
730 	snprintf(cpu_brand_string, sizeof(cpu_brand_string), "Vortex86%s",
731 	    cpu_vortex86_flavor[reg]);
732 
733 #undef PCI_MODE1_ENABLE
734 #undef PCI_MODE1_ADDRESS_REG
735 #undef PCI_MODE1_DATA_REG
736 }
737 
738 static void
739 cpu_probe_fpu_old(struct cpu_info *ci)
740 {
741 #if defined(__i386__) && !defined(XENPV)
742 
743 	clts();
744 	fninit();
745 
746 	/* Check for 'FDIV' bug on the original Pentium */
747 	if (npx586bug1(4195835, 3145727) != 0)
748 		/* NB 120+MHz cpus are not affected */
749 		i386_fpu_fdivbug = 1;
750 
751 	stts();
752 #endif
753 }
754 
755 static void
756 cpu_probe_fpu(struct cpu_info *ci)
757 {
758 	u_int descs[4];
759 	int i;
760 
761 	x86_fpu_save = FPU_SAVE_FSAVE;
762 
763 #ifdef i386
764 	/* If we have FXSAVE/FXRESTOR, use them. */
765 	if ((ci->ci_feat_val[0] & CPUID_FXSR) == 0) {
766 		i386_use_fxsave = 0;
767 		cpu_probe_fpu_old(ci);
768 		return;
769 	}
770 
771 	i386_use_fxsave = 1;
772 	/*
773 	 * If we have SSE/SSE2, enable XMM exceptions, and
774 	 * notify userland.
775 	 */
776 	if (ci->ci_feat_val[0] & CPUID_SSE)
777 		i386_has_sse = 1;
778 	if (ci->ci_feat_val[0] & CPUID_SSE2)
779 		i386_has_sse2 = 1;
780 #else
781 	/*
782 	 * For amd64 i386_use_fxsave, i386_has_sse and i386_has_sse2 are
783 	 * #defined to 1, because fxsave/sse/sse2 are always present.
784 	 */
785 #endif
786 
787 	x86_fpu_save = FPU_SAVE_FXSAVE;
788 	x86_fpu_save_size = sizeof(struct fxsave);
789 
790 	/* See if XSAVE is supported */
791 	if ((ci->ci_feat_val[1] & CPUID2_XSAVE) == 0)
792 		return;
793 
794 #ifdef XENPV
795 	/*
796 	 * Xen kernel can disable XSAVE via "no-xsave" option, in that case
797 	 * the XSAVE/XRSTOR instructions become privileged and trigger
798 	 * supervisor trap. OSXSAVE flag seems to be reliably set according
799 	 * to whether XSAVE is actually available.
800 	 */
801 	if ((ci->ci_feat_val[1] & CPUID2_OSXSAVE) == 0)
802 		return;
803 #endif
804 
805 	x86_fpu_save = FPU_SAVE_XSAVE;
806 
807 	x86_cpuid2(0xd, 1, descs);
808 	if (descs[0] & CPUID_PES1_XSAVEOPT)
809 		x86_fpu_save = FPU_SAVE_XSAVEOPT;
810 
811 	/* Get features and maximum size of the save area */
812 	x86_cpuid(0xd, descs);
813 	if (descs[2] > sizeof(struct fxsave))
814 		x86_fpu_save_size = descs[2];
815 
816 	x86_xsave_features = (uint64_t)descs[3] << 32 | descs[0];
817 
818 	/* Get component offsets and sizes for the save area */
819 	for (i = XSAVE_YMM_Hi128; i < __arraycount(x86_xsave_offsets); i++) {
820 		if (x86_xsave_features & __BIT(i)) {
821 			x86_cpuid2(0xd, i, descs);
822 			x86_xsave_offsets[i] = descs[1];
823 			x86_xsave_sizes[i] = descs[0];
824 		}
825 	}
826 }
827 
828 void
829 cpu_probe(struct cpu_info *ci)
830 {
831 	u_int descs[4];
832 	int i;
833 	uint32_t miscbytes;
834 	uint32_t brand[12];
835 
836 	if (ci == &cpu_info_primary) {
837 		cpu_vendor = i386_nocpuid_cpus[cputype << 1];
838 		cpu_class = i386_nocpuid_cpus[(cputype << 1) + 1];
839 	}
840 
841 	if (cpuid_level < 0) {
842 		/* cpuid instruction not supported */
843 		cpu_probe_fpu_old(ci);
844 		return;
845 	}
846 
847 	for (i = 0; i < __arraycount(ci->ci_feat_val); i++) {
848 		ci->ci_feat_val[i] = 0;
849 	}
850 
851 	x86_cpuid(0, descs);
852 	cpuid_level = descs[0];
853 	ci->ci_max_cpuid = descs[0];
854 
855 	ci->ci_vendor[0] = descs[1];
856 	ci->ci_vendor[2] = descs[2];
857 	ci->ci_vendor[1] = descs[3];
858 	ci->ci_vendor[3] = 0;
859 
860 	if (ci == &cpu_info_primary) {
861 		if (memcmp(ci->ci_vendor, "GenuineIntel", 12) == 0)
862 			cpu_vendor = CPUVENDOR_INTEL;
863 		else if (memcmp(ci->ci_vendor, "AuthenticAMD", 12) == 0)
864 			cpu_vendor = CPUVENDOR_AMD;
865 		else if (memcmp(ci->ci_vendor, "CyrixInstead", 12) == 0)
866 			cpu_vendor = CPUVENDOR_CYRIX;
867 		else if (memcmp(ci->ci_vendor, "Geode by NSC", 12) == 0)
868 			cpu_vendor = CPUVENDOR_CYRIX;
869 		else if (memcmp(ci->ci_vendor, "CentaurHauls", 12) == 0)
870 			cpu_vendor = CPUVENDOR_IDT;
871 		else if (memcmp(ci->ci_vendor, "GenuineTMx86", 12) == 0)
872 			cpu_vendor = CPUVENDOR_TRANSMETA;
873 		else if (memcmp(ci->ci_vendor, "Vortex86 SoC", 12) == 0)
874 			cpu_vendor = CPUVENDOR_VORTEX86;
875 		else
876 			cpu_vendor = CPUVENDOR_UNKNOWN;
877 	}
878 
879 	if (cpuid_level >= 1) {
880 		x86_cpuid(1, descs);
881 		ci->ci_signature = descs[0];
882 		miscbytes = descs[1];
883 		ci->ci_feat_val[1] = descs[2];
884 		ci->ci_feat_val[0] = descs[3];
885 
886 		if (ci == &cpu_info_primary) {
887 			/* Determine family + class. */
888 			cpu_class = CPUID_TO_FAMILY(ci->ci_signature)
889 			    + (CPUCLASS_386 - 3);
890 			if (cpu_class > CPUCLASS_686)
891 				cpu_class = CPUCLASS_686;
892 		}
893 
894 		/* CLFLUSH line size is next 8 bits */
895 		if (ci->ci_feat_val[0] & CPUID_CFLUSH)
896 			ci->ci_cflush_lsize
897 			    = __SHIFTOUT(miscbytes, CPUID_CLFLUSH_SIZE) << 3;
898 		ci->ci_initapicid = __SHIFTOUT(miscbytes, CPUID_LOCAL_APIC_ID);
899 	}
900 
901 	/*
902 	 * Get the basic information from the extended cpuid leafs.
903 	 * These were first implemented by amd, but most of the values
904 	 * match with those generated by modern intel cpus.
905 	 */
906 	x86_cpuid(0x80000000, descs);
907 	if (descs[0] >= 0x80000000)
908 		ci->ci_max_ext_cpuid = descs[0];
909 	else
910 		ci->ci_max_ext_cpuid = 0;
911 
912 	if (ci->ci_max_ext_cpuid >= 0x80000001) {
913 		/* Determine the extended feature flags. */
914 		x86_cpuid(0x80000001, descs);
915 		ci->ci_feat_val[3] = descs[2]; /* %ecx */
916 		ci->ci_feat_val[2] = descs[3]; /* %edx */
917 	}
918 
919 	if (ci->ci_max_ext_cpuid >= 0x80000004) {
920 		x86_cpuid(0x80000002, brand);
921 		x86_cpuid(0x80000003, brand + 4);
922 		x86_cpuid(0x80000004, brand + 8);
923 		/* Skip leading spaces on brand */
924 		for (i = 0; i < 48; i++) {
925 			if (((char *) brand)[i] != ' ')
926 				break;
927 		}
928 		memcpy(cpu_brand_string, ((char *) brand) + i, 48 - i);
929 	}
930 
931 	/*
932 	 * Get the structured extended features.
933 	 */
934 	if (cpuid_level >= 7) {
935 		x86_cpuid(7, descs);
936 		ci->ci_feat_val[5] = descs[1]; /* %ebx */
937 		ci->ci_feat_val[6] = descs[2]; /* %ecx */
938 		ci->ci_feat_val[7] = descs[3]; /* %edx */
939 	}
940 
941 	cpu_probe_intel(ci);
942 	cpu_probe_amd(ci);
943 	cpu_probe_cyrix(ci);
944 	cpu_probe_winchip(ci);
945 	cpu_probe_c3(ci);
946 	cpu_probe_geode(ci);
947 	cpu_probe_vortex86(ci);
948 
949 	if (ci == &cpu_info_primary) {
950 		cpu_probe_fpu(ci);
951 	}
952 
953 	x86_cpu_topology(ci);
954 
955 	if (cpu_vendor != CPUVENDOR_AMD && (ci->ci_feat_val[0] & CPUID_TM) &&
956 	    (rdmsr(MSR_MISC_ENABLE) & (1 << 3)) == 0) {
957 		/* Enable thermal monitor 1. */
958 		wrmsr(MSR_MISC_ENABLE, rdmsr(MSR_MISC_ENABLE) | (1<<3));
959 	}
960 
961 	ci->ci_feat_val[0] &= ~CPUID_FEAT_BLACKLIST;
962 	if (ci == &cpu_info_primary) {
963 		/* If first. Boot Processor is the cpu_feature reference. */
964 		for (i = 0; i < __arraycount(cpu_feature); i++) {
965 			cpu_feature[i] = ci->ci_feat_val[i];
966 		}
967 		identify_hypervisor();
968 #ifndef XEN
969 		/* Early patch of text segment. */
970 		x86_patch(true);
971 #endif
972 	} else {
973 		/*
974 		 * If not first. Warn about cpu_feature mismatch for
975 		 * secondary CPUs.
976 		 */
977 		for (i = 0; i < __arraycount(cpu_feature); i++) {
978 			if (cpu_feature[i] != ci->ci_feat_val[i])
979 				aprint_error_dev(ci->ci_dev,
980 				    "feature mismatch: cpu_feature[%d] is "
981 				    "%#x, but CPU reported %#x\n",
982 				    i, cpu_feature[i], ci->ci_feat_val[i]);
983 		}
984 	}
985 }
986 
987 /* Write what we know about the cpu to the console... */
988 void
989 cpu_identify(struct cpu_info *ci)
990 {
991 
992 	cpu_setmodel("%s %d86-class",
993 	    cpu_vendor_names[cpu_vendor], cpu_class + 3);
994 	if (cpu_brand_string[0] != '\0') {
995 		aprint_normal_dev(ci->ci_dev, "%s", cpu_brand_string);
996 	} else {
997 		aprint_normal_dev(ci->ci_dev, "%s", cpu_getmodel());
998 		if (ci->ci_data.cpu_cc_freq != 0)
999 			aprint_normal(", %dMHz",
1000 			    (int)(ci->ci_data.cpu_cc_freq / 1000000));
1001 	}
1002 	if (ci->ci_signature != 0)
1003 		aprint_normal(", id 0x%x", ci->ci_signature);
1004 	aprint_normal("\n");
1005 	aprint_normal_dev(ci->ci_dev, "package %lu, core %lu, smt %lu\n",
1006 	    ci->ci_package_id, ci->ci_core_id, ci->ci_smt_id);
1007 	if (cpu_brand_string[0] == '\0') {
1008 		strlcpy(cpu_brand_string, cpu_getmodel(),
1009 		    sizeof(cpu_brand_string));
1010 	}
1011 	if (cpu_class == CPUCLASS_386) {
1012 		panic("NetBSD requires an 80486DX or later processor");
1013 	}
1014 	if (cputype == CPU_486DLC) {
1015 		aprint_error("WARNING: BUGGY CYRIX CACHE\n");
1016 	}
1017 
1018 #if !defined(XENPV) || defined(DOM0OPS)       /* on Xen PV rdmsr is for Dom0 only */
1019 	if (cpu_vendor == CPUVENDOR_AMD     /* check enablement of an */
1020 	    && device_unit(ci->ci_dev) == 0 /* AMD feature only once */
1021 	    && ((cpu_feature[3] & CPUID_SVM) == CPUID_SVM)) {
1022 		uint64_t val;
1023 
1024 		val = rdmsr(MSR_VMCR);
1025 		if (((val & VMCR_SVMED) == VMCR_SVMED)
1026 		    && ((val & VMCR_LOCK) == VMCR_LOCK)) {
1027 			aprint_normal_dev(ci->ci_dev,
1028 				"SVM disabled by the BIOS\n");
1029 		}
1030 	}
1031 #endif
1032 
1033 #ifdef i386
1034 	if (i386_fpu_fdivbug == 1)
1035 		aprint_normal_dev(ci->ci_dev,
1036 		    "WARNING: Pentium FDIV bug detected!\n");
1037 
1038 	if (cpu_vendor == CPUVENDOR_TRANSMETA) {
1039 		u_int descs[4];
1040 		x86_cpuid(0x80860000, descs);
1041 		if (descs[0] >= 0x80860007)
1042 			/* Create longrun sysctls */
1043 			tmx86_init_longrun();
1044 	}
1045 #endif	/* i386 */
1046 
1047 }
1048 
1049 /*
1050  * Hypervisor
1051  */
1052 vm_guest_t vm_guest = VM_GUEST_NO;
1053 
1054 static const char * const vm_bios_vendors[] = {
1055 	"QEMU",				/* QEMU */
1056 	"Plex86",			/* Plex86 */
1057 	"Bochs",			/* Bochs */
1058 	"Xen",				/* Xen */
1059 	"BHYVE",			/* bhyve */
1060 	"Seabios",			/* KVM */
1061 };
1062 
1063 static const char * const vm_system_products[] = {
1064 	"VMware Virtual Platform",	/* VMWare VM */
1065 	"Virtual Machine",		/* Microsoft VirtualPC */
1066 	"VirtualBox",			/* Sun xVM VirtualBox */
1067 	"Parallels Virtual Platform",	/* Parallels VM */
1068 	"KVM",				/* KVM */
1069 };
1070 
1071 void
1072 identify_hypervisor(void)
1073 {
1074 	u_int regs[6];
1075 	char hv_vendor[12];
1076 	const char *p;
1077 	int i;
1078 
1079 	if (vm_guest != VM_GUEST_NO)
1080 		return;
1081 
1082 	/*
1083 	 * [RFC] CPUID usage for interaction between Hypervisors and Linux.
1084 	 * http://lkml.org/lkml/2008/10/1/246
1085 	 *
1086 	 * KB1009458: Mechanisms to determine if software is running in
1087 	 * a VMware virtual machine
1088 	 * http://kb.vmware.com/kb/1009458
1089 	 */
1090 	if (ISSET(cpu_feature[1], CPUID2_RAZ)) {
1091 		vm_guest = VM_GUEST_VM;
1092 		x86_cpuid(0x40000000, regs);
1093 		if (regs[0] >= 0x40000000) {
1094 			memcpy(&hv_vendor[0], &regs[1], sizeof(*regs));
1095 			memcpy(&hv_vendor[4], &regs[2], sizeof(*regs));
1096 			memcpy(&hv_vendor[8], &regs[3], sizeof(*regs));
1097 			if (memcmp(hv_vendor, "VMwareVMware", 12) == 0)
1098 				vm_guest = VM_GUEST_VMWARE;
1099 			else if (memcmp(hv_vendor, "Microsoft Hv", 12) == 0) {
1100 				vm_guest = VM_GUEST_HV;
1101 #if NHYPERV > 0
1102 				hyperv_early_init();
1103 #endif
1104 			} else if (memcmp(hv_vendor, "KVMKVMKVM\0\0\0", 12) == 0)
1105 				vm_guest = VM_GUEST_KVM;
1106 			else if (memcmp(hv_vendor, "XenVMMXenVMM", 12) == 0)
1107 				vm_guest = VM_GUEST_XEN;
1108 			/* FreeBSD bhyve: "bhyve bhyve " */
1109 			/* OpenBSD vmm:   "OpenBSDVMM58" */
1110 			/* NetBSD nvmm:   "___ NVMM ___" */
1111 		}
1112 		return;
1113 	}
1114 
1115 	/*
1116 	 * Examine SMBIOS strings for older hypervisors.
1117 	 */
1118 	p = pmf_get_platform("system-serial");
1119 	if (p != NULL) {
1120 		if (strncmp(p, "VMware-", 7) == 0 || strncmp(p, "VMW", 3) == 0) {
1121 			vmt_hvcall(VM_CMD_GET_VERSION, regs);
1122 			if (regs[1] == VM_MAGIC) {
1123 				vm_guest = VM_GUEST_VMWARE;
1124 				return;
1125 			}
1126 		}
1127 	}
1128 	p = pmf_get_platform("bios-vendor");
1129 	if (p != NULL) {
1130 		for (i = 0; i < __arraycount(vm_bios_vendors); i++) {
1131 			if (strcmp(p, vm_bios_vendors[i]) == 0) {
1132 				vm_guest = VM_GUEST_VM;
1133 				return;
1134 			}
1135 		}
1136 	}
1137 	p = pmf_get_platform("system-product");
1138 	if (p != NULL) {
1139 		for (i = 0; i < __arraycount(vm_system_products); i++) {
1140 			if (strcmp(p, vm_system_products[i]) == 0) {
1141 				vm_guest = VM_GUEST_VM;
1142 				return;
1143 			}
1144 		}
1145 	}
1146 }
1147