xref: /netbsd-src/sys/arch/x86/x86/identcpu.c (revision 6a493d6bc668897c91594964a732d38505b70cbb)
1 /*	$NetBSD: identcpu.c,v 1.39 2013/12/23 11:40:57 msaitoh Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Frank van der Linden,  and by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: identcpu.c,v 1.39 2013/12/23 11:40:57 msaitoh Exp $");
34 
35 #include "opt_xen.h"
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/device.h>
40 
41 #include <uvm/uvm_extern.h>
42 
43 #include <machine/specialreg.h>
44 #include <machine/pio.h>
45 #include <machine/cpu.h>
46 
47 #include <x86/cputypes.h>
48 #include <x86/cacheinfo.h>
49 #include <x86/cpuvar.h>
50 #include <x86/cpu_msr.h>
51 
52 static const struct x86_cache_info intel_cpuid_cache_info[] = INTEL_CACHE_INFO;
53 
54 static const struct x86_cache_info amd_cpuid_l2cache_assoc_info[] =
55 	AMD_L2CACHE_INFO;
56 
57 static const struct x86_cache_info amd_cpuid_l3cache_assoc_info[] =
58 	AMD_L3CACHE_INFO;
59 
60 int cpu_vendor;
61 char cpu_brand_string[49];
62 
63 /*
64  * Info for CTL_HW
65  */
66 char	cpu_model[120];
67 
68 /*
69  * Note: these are just the ones that may not have a cpuid instruction.
70  * We deal with the rest in a different way.
71  */
72 const int i386_nocpuid_cpus[] = {
73 	CPUVENDOR_INTEL, CPUCLASS_386,	/* CPU_386SX */
74 	CPUVENDOR_INTEL, CPUCLASS_386,	/* CPU_386   */
75 	CPUVENDOR_INTEL, CPUCLASS_486,	/* CPU_486SX */
76 	CPUVENDOR_INTEL, CPUCLASS_486, 	/* CPU_486   */
77 	CPUVENDOR_CYRIX, CPUCLASS_486,	/* CPU_486DLC */
78 	CPUVENDOR_CYRIX, CPUCLASS_486,	/* CPU_6x86 */
79 	CPUVENDOR_NEXGEN, CPUCLASS_386,	/* CPU_NX586 */
80 };
81 
82 static const char cpu_vendor_names[][10] = {
83 	"Unknown", "Intel", "NS/Cyrix", "NexGen", "AMD", "IDT/VIA", "Transmeta",
84 	"Vortex86"
85 };
86 
87 static const struct x86_cache_info *
88 cache_info_lookup(const struct x86_cache_info *cai, uint8_t desc)
89 {
90 	int i;
91 
92 	for (i = 0; cai[i].cai_desc != 0; i++) {
93 		if (cai[i].cai_desc == desc)
94 			return (&cai[i]);
95 	}
96 
97 	return (NULL);
98 }
99 
100 static void
101 cpu_probe_intel_cache(struct cpu_info *ci)
102 {
103 	const struct x86_cache_info *cai;
104 	u_int descs[4];
105 	int iterations, i, j;
106 	uint8_t desc;
107 
108 	if (cpuid_level >= 2) {
109 		/* Parse the cache info from `cpuid leaf 2', if we have it. */
110 		x86_cpuid(2, descs);
111 		iterations = descs[0] & 0xff;
112 		while (iterations-- > 0) {
113 			for (i = 0; i < 4; i++) {
114 				if (descs[i] & 0x80000000)
115 					continue;
116 				for (j = 0; j < 4; j++) {
117 					if (i == 0 && j == 0)
118 						continue;
119 					desc = (descs[i] >> (j * 8)) & 0xff;
120 					if (desc == 0)
121 						continue;
122 					cai = cache_info_lookup(
123 					    intel_cpuid_cache_info, desc);
124 					if (cai != NULL) {
125 						ci->ci_cinfo[cai->cai_index] =
126 						    *cai;
127 					}
128 				}
129 			}
130 		}
131 	}
132 
133 	if (cpuid_level >= 4) {
134 		int type, level;
135 		int ways, partitions, linesize, sets;
136 		int caitype = -1;
137 		int totalsize;
138 
139 		/* Parse the cache info from `cpuid leaf 4', if we have it. */
140 		for (i = 0; ; i++) {
141 			x86_cpuid2(4, i, descs);
142 			type = __SHIFTOUT(descs[0], CPUID_DCP_CACHETYPE);
143 			if (type == CPUID_DCP_CACHETYPE_N)
144 				break;
145 			level = __SHIFTOUT(descs[0], CPUID_DCP_CACHELEVEL);
146 			switch (level) {
147 			case 1:
148 				if (type == CPUID_DCP_CACHETYPE_I)
149 					caitype = CAI_ICACHE;
150 				else if (type == CPUID_DCP_CACHETYPE_D)
151 					caitype = CAI_DCACHE;
152 				else
153 					caitype = -1;
154 				break;
155 			case 2:
156 				if (type == CPUID_DCP_CACHETYPE_U)
157 					caitype = CAI_L2CACHE;
158 				else
159 					caitype = -1;
160 				break;
161 			case 3:
162 				if (type == CPUID_DCP_CACHETYPE_U)
163 					caitype = CAI_L3CACHE;
164 				else
165 					caitype = -1;
166 				break;
167 			default:
168 				caitype = -1;
169 				break;
170 			}
171 			if (caitype == -1)
172 				continue;
173 
174 			ways = __SHIFTOUT(descs[1], CPUID_DCP_WAYS) + 1;
175 			partitions =__SHIFTOUT(descs[1], CPUID_DCP_PARTITIONS)
176 			    + 1;
177 			linesize = __SHIFTOUT(descs[1], CPUID_DCP_LINESIZE)
178 			    + 1;
179 			sets = descs[2] + 1;
180 			totalsize = ways * partitions * linesize * sets;
181 			ci->ci_cinfo[caitype].cai_totalsize = totalsize;
182 			ci->ci_cinfo[caitype].cai_associativity = ways;
183 			ci->ci_cinfo[caitype].cai_linesize = linesize;
184 		}
185 	}
186 }
187 
188 static void
189 cpu_probe_intel(struct cpu_info *ci)
190 {
191 
192 	if (cpu_vendor != CPUVENDOR_INTEL)
193 		return;
194 
195 	cpu_probe_intel_cache(ci);
196 }
197 
198 static void
199 cpu_probe_amd_cache(struct cpu_info *ci)
200 {
201 	const struct x86_cache_info *cp;
202 	struct x86_cache_info *cai;
203 	int family, model;
204 	u_int descs[4];
205 	u_int lfunc;
206 
207 	family = CPUID_TO_FAMILY(ci->ci_signature);
208 	model = CPUID_TO_MODEL(ci->ci_signature);
209 
210 	/*
211 	 * K5 model 0 has none of this info.
212 	 */
213 	if (family == 5 && model == 0)
214 		return;
215 
216 	/*
217 	 * Determine the largest extended function value.
218 	 */
219 	x86_cpuid(0x80000000, descs);
220 	lfunc = descs[0];
221 
222 	/*
223 	 * Determine L1 cache/TLB info.
224 	 */
225 	if (lfunc < 0x80000005) {
226 		/* No L1 cache info available. */
227 		return;
228 	}
229 
230 	x86_cpuid(0x80000005, descs);
231 
232 	/*
233 	 * K6-III and higher have large page TLBs.
234 	 */
235 	if ((family == 5 && model >= 9) || family >= 6) {
236 		cai = &ci->ci_cinfo[CAI_ITLB2];
237 		cai->cai_totalsize = AMD_L1_EAX_ITLB_ENTRIES(descs[0]);
238 		cai->cai_associativity = AMD_L1_EAX_ITLB_ASSOC(descs[0]);
239 		cai->cai_linesize = (4 * 1024 * 1024);
240 
241 		cai = &ci->ci_cinfo[CAI_DTLB2];
242 		cai->cai_totalsize = AMD_L1_EAX_DTLB_ENTRIES(descs[0]);
243 		cai->cai_associativity = AMD_L1_EAX_DTLB_ASSOC(descs[0]);
244 		cai->cai_linesize = (4 * 1024 * 1024);
245 	}
246 
247 	cai = &ci->ci_cinfo[CAI_ITLB];
248 	cai->cai_totalsize = AMD_L1_EBX_ITLB_ENTRIES(descs[1]);
249 	cai->cai_associativity = AMD_L1_EBX_ITLB_ASSOC(descs[1]);
250 	cai->cai_linesize = (4 * 1024);
251 
252 	cai = &ci->ci_cinfo[CAI_DTLB];
253 	cai->cai_totalsize = AMD_L1_EBX_DTLB_ENTRIES(descs[1]);
254 	cai->cai_associativity = AMD_L1_EBX_DTLB_ASSOC(descs[1]);
255 	cai->cai_linesize = (4 * 1024);
256 
257 	cai = &ci->ci_cinfo[CAI_DCACHE];
258 	cai->cai_totalsize = AMD_L1_ECX_DC_SIZE(descs[2]);
259 	cai->cai_associativity = AMD_L1_ECX_DC_ASSOC(descs[2]);
260 	cai->cai_linesize = AMD_L1_ECX_DC_LS(descs[2]);
261 
262 	cai = &ci->ci_cinfo[CAI_ICACHE];
263 	cai->cai_totalsize = AMD_L1_EDX_IC_SIZE(descs[3]);
264 	cai->cai_associativity = AMD_L1_EDX_IC_ASSOC(descs[3]);
265 	cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[3]);
266 
267 	/*
268 	 * Determine L2 cache/TLB info.
269 	 */
270 	if (lfunc < 0x80000006) {
271 		/* No L2 cache info available. */
272 		return;
273 	}
274 
275 	x86_cpuid(0x80000006, descs);
276 
277 	cai = &ci->ci_cinfo[CAI_L2CACHE];
278 	cai->cai_totalsize = AMD_L2_ECX_C_SIZE(descs[2]);
279 	cai->cai_associativity = AMD_L2_ECX_C_ASSOC(descs[2]);
280 	cai->cai_linesize = AMD_L2_ECX_C_LS(descs[2]);
281 
282 	cp = cache_info_lookup(amd_cpuid_l2cache_assoc_info,
283 	    cai->cai_associativity);
284 	if (cp != NULL)
285 		cai->cai_associativity = cp->cai_associativity;
286 	else
287 		cai->cai_associativity = 0;	/* XXX Unknown/reserved */
288 
289 	if (family < 0xf) {
290 		/* No L3 cache info available. */
291 		return;
292 	}
293 
294 	cai = &ci->ci_cinfo[CAI_L3CACHE];
295 	cai->cai_totalsize = AMD_L3_EDX_C_SIZE(descs[3]);
296 	cai->cai_associativity = AMD_L3_EDX_C_ASSOC(descs[3]);
297 	cai->cai_linesize = AMD_L3_EDX_C_LS(descs[3]);
298 
299 	cp = cache_info_lookup(amd_cpuid_l3cache_assoc_info,
300 	    cai->cai_associativity);
301 	if (cp != NULL)
302 		cai->cai_associativity = cp->cai_associativity;
303 	else
304 		cai->cai_associativity = 0;	/* XXX Unknown reserved */
305 
306 	if (lfunc < 0x80000019) {
307 		/* No 1GB Page TLB */
308 		return;
309 	}
310 
311 	x86_cpuid(0x80000019, descs);
312 
313 	cai = &ci->ci_cinfo[CAI_L1_1GBDTLB];
314 	cai->cai_totalsize = AMD_L1_1GB_EAX_DTLB_ENTRIES(descs[1]);
315 	cai->cai_associativity = AMD_L1_1GB_EAX_DTLB_ASSOC(descs[1]);
316 	cai->cai_linesize = (1 * 1024);
317 
318 	cai = &ci->ci_cinfo[CAI_L1_1GBITLB];
319 	cai->cai_totalsize = AMD_L1_1GB_EAX_IUTLB_ENTRIES(descs[0]);
320 	cai->cai_associativity = AMD_L1_1GB_EAX_IUTLB_ASSOC(descs[0]);
321 	cai->cai_linesize = (1 * 1024);
322 
323 	cai = &ci->ci_cinfo[CAI_L2_1GBDTLB];
324 	cai->cai_totalsize = AMD_L2_1GB_EBX_DUTLB_ENTRIES(descs[1]);
325 	cai->cai_associativity = AMD_L2_1GB_EBX_DUTLB_ASSOC(descs[1]);
326 	cai->cai_linesize = (1 * 1024);
327 
328 	cai = &ci->ci_cinfo[CAI_L2_1GBITLB];
329 	cai->cai_totalsize = AMD_L2_1GB_EBX_IUTLB_ENTRIES(descs[0]);
330 	cai->cai_associativity = AMD_L2_1GB_EBX_IUTLB_ASSOC(descs[0]);
331 	cai->cai_linesize = (1 * 1024);
332 }
333 
334 static void
335 cpu_probe_k5(struct cpu_info *ci)
336 {
337 	int flag;
338 
339 	if (cpu_vendor != CPUVENDOR_AMD ||
340 	    CPUID_TO_FAMILY(ci->ci_signature) != 5)
341 		return;
342 
343 	if (CPUID_TO_MODEL(ci->ci_signature) == 0) {
344 		/*
345 		 * According to the AMD Processor Recognition App Note,
346 		 * the AMD-K5 Model 0 uses the wrong bit to indicate
347 		 * support for global PTEs, instead using bit 9 (APIC)
348 		 * rather than bit 13 (i.e. "0x200" vs. 0x2000".  Oops!).
349 		 */
350 		flag = ci->ci_feat_val[0];
351 		if ((flag & CPUID_APIC) != 0)
352 			flag = (flag & ~CPUID_APIC) | CPUID_PGE;
353 		ci->ci_feat_val[0] = flag;
354 	}
355 
356 	cpu_probe_amd_cache(ci);
357 }
358 
359 static void
360 cpu_probe_k678(struct cpu_info *ci)
361 {
362 	uint32_t descs[4];
363 
364 	if (cpu_vendor != CPUVENDOR_AMD ||
365 	    CPUID_TO_FAMILY(ci->ci_signature) < 6)
366 		return;
367 
368 	/* Determine the extended feature flags. */
369 	x86_cpuid(0x80000000, descs);
370 	if (descs[0] >= 0x80000001) {
371 		x86_cpuid(0x80000001, descs);
372 		ci->ci_feat_val[3] = descs[2]; /* %ecx */
373 		ci->ci_feat_val[2] = descs[3]; /* %edx */
374 	}
375 
376 	cpu_probe_amd_cache(ci);
377 }
378 
379 static inline uint8_t
380 cyrix_read_reg(uint8_t reg)
381 {
382 
383 	outb(0x22, reg);
384 	return inb(0x23);
385 }
386 
387 static inline void
388 cyrix_write_reg(uint8_t reg, uint8_t data)
389 {
390 
391 	outb(0x22, reg);
392 	outb(0x23, data);
393 }
394 
395 static void
396 cpu_probe_cyrix_cmn(struct cpu_info *ci)
397 {
398 	/*
399 	 * i8254 latch check routine:
400 	 *     National Geode (formerly Cyrix MediaGX) has a serious bug in
401 	 *     its built-in i8254-compatible clock module (cs5510 cs5520).
402 	 *     Set the variable 'clock_broken_latch' to indicate it.
403 	 *
404 	 * This bug is not present in the cs5530, and the flag
405 	 * is disabled again in sys/arch/i386/pci/pcib.c if this later
406 	 * model device is detected. Ideally, this work-around should not
407 	 * even be in here, it should be in there. XXX
408 	 */
409 	uint8_t c3;
410 #ifndef XEN
411 	extern int clock_broken_latch;
412 
413 	switch (ci->ci_signature) {
414 	case 0x440:     /* Cyrix MediaGX */
415 	case 0x540:     /* GXm */
416 		clock_broken_latch = 1;
417 		break;
418 	}
419 #endif
420 
421 	/* set up various cyrix registers */
422 	/*
423 	 * Enable suspend on halt (powersave mode).
424 	 * When powersave mode is enabled, the TSC stops counting
425 	 * while the CPU is halted in idle() waiting for an interrupt.
426 	 * This means we can't use the TSC for interval time in
427 	 * microtime(9), and thus it is disabled here.
428 	 *
429 	 * It still makes a perfectly good cycle counter
430 	 * for program profiling, so long as you remember you're
431 	 * counting cycles, and not time. Further, if you don't
432 	 * mind not using powersave mode, the TSC works just fine,
433 	 * so this should really be optional. XXX
434 	 */
435 	cyrix_write_reg(0xc2, cyrix_read_reg(0xc2) | 0x08);
436 
437 	/*
438 	 * Do not disable the TSC on the Geode GX, it's reported to
439 	 * work fine.
440 	 */
441 	if (ci->ci_signature != 0x552)
442 		ci->ci_feat_val[0] &= ~CPUID_TSC;
443 
444 	/* enable access to ccr4/ccr5 */
445 	c3 = cyrix_read_reg(0xC3);
446 	cyrix_write_reg(0xC3, c3 | 0x10);
447 	/* cyrix's workaround  for the "coma bug" */
448 	cyrix_write_reg(0x31, cyrix_read_reg(0x31) | 0xf8);
449 	cyrix_write_reg(0x32, cyrix_read_reg(0x32) | 0x7f);
450 	cyrix_write_reg(0x33, cyrix_read_reg(0x33) & ~0xff);
451 	cyrix_write_reg(0x3c, cyrix_read_reg(0x3c) | 0x87);
452 	/* disable access to ccr4/ccr5 */
453 	cyrix_write_reg(0xC3, c3);
454 }
455 
456 static void
457 cpu_probe_cyrix(struct cpu_info *ci)
458 {
459 
460 	if (cpu_vendor != CPUVENDOR_CYRIX ||
461 	    CPUID_TO_FAMILY(ci->ci_signature) < 4 ||
462 	    CPUID_TO_FAMILY(ci->ci_signature) > 6)
463 		return;
464 
465 	cpu_probe_cyrix_cmn(ci);
466 }
467 
468 static void
469 cpu_probe_winchip(struct cpu_info *ci)
470 {
471 
472 	if (cpu_vendor != CPUVENDOR_IDT)
473 	    	return;
474 
475 	switch (CPUID_TO_FAMILY(ci->ci_signature)) {
476 	case 5:
477 		/* WinChip C6 */
478 		if (CPUID_TO_MODEL(ci->ci_signature) == 4)
479 			ci->ci_feat_val[0] &= ~CPUID_TSC;
480 		break;
481 	case 6:
482 		/*
483 		 * VIA Eden ESP
484 		 *
485 		 * Quoting from page 3-4 of: "VIA Eden ESP Processor Datasheet"
486 		 * http://www.via.com.tw/download/mainboards/6/14/Eden20v115.pdf
487 		 *
488 		 * 1. The CMPXCHG8B instruction is provided and always enabled,
489 		 *    however, it appears disabled in the corresponding CPUID
490 		 *    function bit 0 to avoid a bug in an early version of
491 		 *    Windows NT. However, this default can be changed via a
492 		 *    bit in the FCR MSR.
493 		 */
494 		ci->ci_feat_val[0] |= CPUID_CX8;
495 		wrmsr(MSR_VIA_FCR, rdmsr(MSR_VIA_FCR) | 0x00000001);
496 		break;
497 	}
498 }
499 
500 static void
501 cpu_probe_c3(struct cpu_info *ci)
502 {
503 	u_int family, model, stepping, descs[4], lfunc, msr;
504 	struct x86_cache_info *cai;
505 
506 	if (cpu_vendor != CPUVENDOR_IDT ||
507 	    CPUID_TO_FAMILY(ci->ci_signature) < 6)
508 	    	return;
509 
510 	family = CPUID_TO_FAMILY(ci->ci_signature);
511 	model = CPUID_TO_MODEL(ci->ci_signature);
512 	stepping = CPUID_TO_STEPPING(ci->ci_signature);
513 
514 	/* Determine the largest extended function value. */
515 	x86_cpuid(0x80000000, descs);
516 	lfunc = descs[0];
517 
518 	/* Determine the extended feature flags. */
519 	if (lfunc >= 0x80000001) {
520 		x86_cpuid(0x80000001, descs);
521 		ci->ci_feat_val[2] = descs[3];
522 	}
523 
524 	if (family > 6 || model > 0x9 || (model == 0x9 && stepping >= 3)) {
525 		/* Nehemiah or Esther */
526 		x86_cpuid(0xc0000000, descs);
527 		lfunc = descs[0];
528 		if (lfunc >= 0xc0000001) {	/* has ACE, RNG */
529 		    int rng_enable = 0, ace_enable = 0;
530 		    x86_cpuid(0xc0000001, descs);
531 		    lfunc = descs[3];
532 		    ci->ci_feat_val[4] = lfunc;
533 		    /* Check for and enable RNG */
534 		    if (lfunc & CPUID_VIA_HAS_RNG) {
535 		    	if (!(lfunc & CPUID_VIA_DO_RNG)) {
536 			    rng_enable++;
537 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_RNG;
538 			}
539 		    }
540 		    /* Check for and enable ACE (AES-CBC) */
541 		    if (lfunc & CPUID_VIA_HAS_ACE) {
542 			if (!(lfunc & CPUID_VIA_DO_ACE)) {
543 			    ace_enable++;
544 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE;
545 			}
546 		    }
547 		    /* Check for and enable SHA */
548 		    if (lfunc & CPUID_VIA_HAS_PHE) {
549 			if (!(lfunc & CPUID_VIA_DO_PHE)) {
550 			    ace_enable++;
551 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_PHE;
552 			}
553 		    }
554 		    /* Check for and enable ACE2 (AES-CTR) */
555 		    if (lfunc & CPUID_VIA_HAS_ACE2) {
556 			if (!(lfunc & CPUID_VIA_DO_ACE2)) {
557 			    ace_enable++;
558 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE2;
559 			}
560 		    }
561 		    /* Check for and enable PMM (modmult engine) */
562 		    if (lfunc & CPUID_VIA_HAS_PMM) {
563 			if (!(lfunc & CPUID_VIA_DO_PMM)) {
564 			    ace_enable++;
565 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_PMM;
566 			}
567 		    }
568 
569 		    /* Actually do the enables. */
570 		    if (rng_enable) {
571 			msr = rdmsr(MSR_VIA_RNG);
572 			wrmsr(MSR_VIA_RNG, msr | MSR_VIA_RNG_ENABLE);
573 		    }
574 		    if (ace_enable) {
575 			msr = rdmsr(MSR_VIA_ACE);
576 			wrmsr(MSR_VIA_ACE, msr | MSR_VIA_ACE_ENABLE);
577 		    }
578 
579 		}
580 	}
581 
582 	/*
583 	 * Determine L1 cache/TLB info.
584 	 */
585 	if (lfunc < 0x80000005) {
586 		/* No L1 cache info available. */
587 		return;
588 	}
589 
590 	x86_cpuid(0x80000005, descs);
591 
592 	cai = &ci->ci_cinfo[CAI_ITLB];
593 	cai->cai_totalsize = VIA_L1_EBX_ITLB_ENTRIES(descs[1]);
594 	cai->cai_associativity = VIA_L1_EBX_ITLB_ASSOC(descs[1]);
595 	cai->cai_linesize = (4 * 1024);
596 
597 	cai = &ci->ci_cinfo[CAI_DTLB];
598 	cai->cai_totalsize = VIA_L1_EBX_DTLB_ENTRIES(descs[1]);
599 	cai->cai_associativity = VIA_L1_EBX_DTLB_ASSOC(descs[1]);
600 	cai->cai_linesize = (4 * 1024);
601 
602 	cai = &ci->ci_cinfo[CAI_DCACHE];
603 	cai->cai_totalsize = VIA_L1_ECX_DC_SIZE(descs[2]);
604 	cai->cai_associativity = VIA_L1_ECX_DC_ASSOC(descs[2]);
605 	cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[2]);
606 	if (family == 6 && model == 9 && stepping == 8) {
607 		/* Erratum: stepping 8 reports 4 when it should be 2 */
608 		cai->cai_associativity = 2;
609 	}
610 
611 	cai = &ci->ci_cinfo[CAI_ICACHE];
612 	cai->cai_totalsize = VIA_L1_EDX_IC_SIZE(descs[3]);
613 	cai->cai_associativity = VIA_L1_EDX_IC_ASSOC(descs[3]);
614 	cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[3]);
615 	if (family == 6 && model == 9 && stepping == 8) {
616 		/* Erratum: stepping 8 reports 4 when it should be 2 */
617 		cai->cai_associativity = 2;
618 	}
619 
620 	/*
621 	 * Determine L2 cache/TLB info.
622 	 */
623 	if (lfunc < 0x80000006) {
624 		/* No L2 cache info available. */
625 		return;
626 	}
627 
628 	x86_cpuid(0x80000006, descs);
629 
630 	cai = &ci->ci_cinfo[CAI_L2CACHE];
631 	if (family > 6 || model >= 9) {
632 		cai->cai_totalsize = VIA_L2N_ECX_C_SIZE(descs[2]);
633 		cai->cai_associativity = VIA_L2N_ECX_C_ASSOC(descs[2]);
634 		cai->cai_linesize = VIA_L2N_ECX_C_LS(descs[2]);
635 	} else {
636 		cai->cai_totalsize = VIA_L2_ECX_C_SIZE(descs[2]);
637 		cai->cai_associativity = VIA_L2_ECX_C_ASSOC(descs[2]);
638 		cai->cai_linesize = VIA_L2_ECX_C_LS(descs[2]);
639 	}
640 }
641 
642 static void
643 cpu_probe_geode(struct cpu_info *ci)
644 {
645 
646 	if (memcmp("Geode by NSC", ci->ci_vendor, 12) != 0 ||
647 	    CPUID_TO_FAMILY(ci->ci_signature) != 5)
648 	    	return;
649 
650 	cpu_probe_cyrix_cmn(ci);
651 	cpu_probe_amd_cache(ci);
652 }
653 
654 static void
655 cpu_probe_vortex86(struct cpu_info *ci)
656 {
657 #define PCI_MODE1_ADDRESS_REG	0x0cf8
658 #define PCI_MODE1_DATA_REG	0x0cfc
659 #define PCI_MODE1_ENABLE	0x80000000UL
660 
661 	uint32_t reg;
662 
663 	if (cpu_vendor != CPUVENDOR_VORTEX86)
664 		return;
665 	/*
666 	 * CPU model available from "Customer ID register" in
667 	 * North Bridge Function 0 PCI space
668 	 * we can't use pci_conf_read() because the PCI subsystem is not
669 	 * not initialised early enough
670 	 */
671 
672 	outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE | 0x90);
673 	reg = inl(PCI_MODE1_DATA_REG);
674 
675 	switch(reg) {
676 	case 0x31504d44:
677 		strcpy(cpu_brand_string, "Vortex86SX");
678 		break;
679 	case 0x32504d44:
680 		strcpy(cpu_brand_string, "Vortex86DX");
681 		break;
682 	case 0x33504d44:
683 		strcpy(cpu_brand_string, "Vortex86MX");
684 		break;
685 	default:
686 		strcpy(cpu_brand_string, "Unknown Vortex86");
687 		break;
688 	}
689 
690 #undef PCI_MODE1_ENABLE
691 #undef PCI_MODE1_ADDRESS_REG
692 #undef PCI_MODE1_DATA_REG
693 }
694 
695 void
696 cpu_probe(struct cpu_info *ci)
697 {
698 	u_int descs[4];
699 	int i;
700 	uint32_t miscbytes;
701 	uint32_t brand[12];
702 
703 	cpu_vendor = i386_nocpuid_cpus[cputype << 1];
704 	cpu_class = i386_nocpuid_cpus[(cputype << 1) + 1];
705 
706 	if (cpuid_level < 0)
707 		return;
708 
709 	for (i = 0; i < __arraycount(ci->ci_feat_val); i++) {
710 		ci->ci_feat_val[i] = 0;
711 	}
712 
713 	x86_cpuid(0, descs);
714 	cpuid_level = descs[0];
715 	ci->ci_vendor[0] = descs[1];
716 	ci->ci_vendor[2] = descs[2];
717 	ci->ci_vendor[1] = descs[3];
718 	ci->ci_vendor[3] = 0;
719 
720 	if (memcmp(ci->ci_vendor, "GenuineIntel", 12) == 0)
721 		cpu_vendor = CPUVENDOR_INTEL;
722 	else if (memcmp(ci->ci_vendor,  "AuthenticAMD", 12) == 0)
723 		cpu_vendor = CPUVENDOR_AMD;
724 	else if (memcmp(ci->ci_vendor,  "CyrixInstead", 12) == 0)
725 		cpu_vendor = CPUVENDOR_CYRIX;
726 	else if (memcmp(ci->ci_vendor,  "Geode by NSC", 12) == 0)
727 		cpu_vendor = CPUVENDOR_CYRIX;
728 	else if (memcmp(ci->ci_vendor, "CentaurHauls", 12) == 0)
729 		cpu_vendor = CPUVENDOR_IDT;
730 	else if (memcmp(ci->ci_vendor, "GenuineTMx86", 12) == 0)
731 		cpu_vendor = CPUVENDOR_TRANSMETA;
732 	else if (memcmp(ci->ci_vendor, "Vortex86 SoC", 12) == 0)
733 		cpu_vendor = CPUVENDOR_VORTEX86;
734 	else
735 		cpu_vendor = CPUVENDOR_UNKNOWN;
736 
737 	x86_cpuid(0x80000000, brand);
738 	if (brand[0] >= 0x80000004) {
739 		x86_cpuid(0x80000002, brand);
740 		x86_cpuid(0x80000003, brand + 4);
741 		x86_cpuid(0x80000004, brand + 8);
742 		for (i = 0; i < 48; i++) {
743 			if (((char *) brand)[i] != ' ')
744 				break;
745 		}
746 		memcpy(cpu_brand_string, ((char *) brand) + i, 48 - i);
747 	}
748 
749 	if (cpuid_level >= 1) {
750 		x86_cpuid(1, descs);
751 		ci->ci_signature = descs[0];
752 		miscbytes = descs[1];
753 		ci->ci_feat_val[1] = descs[2];
754 		ci->ci_feat_val[0] = descs[3];
755 
756 		/* Determine family + class. */
757 		cpu_class = CPUID_TO_FAMILY(ci->ci_signature)
758 		    + (CPUCLASS_386 - 3);
759 		if (cpu_class > CPUCLASS_686)
760 			cpu_class = CPUCLASS_686;
761 
762 		/* CLFLUSH line size is next 8 bits */
763 		if (ci->ci_feat_val[0] & CPUID_CFLUSH)
764 			ci->ci_cflush_lsize = ((miscbytes >> 8) & 0xff) << 3;
765 		ci->ci_initapicid = (miscbytes >> 24) & 0xff;
766 	}
767 
768 	cpu_probe_intel(ci);
769 	cpu_probe_k5(ci);
770 	cpu_probe_k678(ci);
771 	cpu_probe_cyrix(ci);
772 	cpu_probe_winchip(ci);
773 	cpu_probe_c3(ci);
774 	cpu_probe_geode(ci);
775 	cpu_probe_vortex86(ci);
776 
777 	x86_cpu_topology(ci);
778 
779 	if (cpu_vendor != CPUVENDOR_AMD && (ci->ci_feat_val[0] & CPUID_TM) &&
780 	    (rdmsr(MSR_MISC_ENABLE) & (1 << 3)) == 0) {
781 		/* Enable thermal monitor 1. */
782 		wrmsr(MSR_MISC_ENABLE, rdmsr(MSR_MISC_ENABLE) | (1<<3));
783 	}
784 
785 	ci->ci_feat_val[0] &= ~CPUID_FEAT_BLACKLIST;
786 	if (ci == &cpu_info_primary) {
787 		/* If first. Boot Processor is the cpu_feature reference. */
788 		for (i = 0; i < __arraycount(cpu_feature); i++) {
789 			cpu_feature[i] = ci->ci_feat_val[i];
790 		}
791 #ifndef XEN
792 		/* Early patch of text segment. */
793 		x86_patch(true);
794 #endif
795 	} else {
796 		/*
797 		 * If not first. Warn about cpu_feature mismatch for
798 		 * secondary CPUs.
799 		 */
800 		for (i = 0; i < __arraycount(cpu_feature); i++) {
801 			if (cpu_feature[i] != ci->ci_feat_val[i])
802 				aprint_error_dev(ci->ci_dev,
803 				    "feature mismatch: cpu_feature[%d] is "
804 				    "%#x, but CPU reported %#x\n",
805 				    i, cpu_feature[i], ci->ci_feat_val[i]);
806 		}
807 	}
808 }
809 
810 void
811 cpu_identify(struct cpu_info *ci)
812 {
813 
814 	snprintf(cpu_model, sizeof(cpu_model), "%s %d86-class",
815 	    cpu_vendor_names[cpu_vendor], cpu_class + 3);
816 	if (cpu_brand_string[0] != '\0') {
817 		aprint_normal(": %s", cpu_brand_string);
818 	} else {
819 		aprint_normal(": %s", cpu_model);
820 		if (ci->ci_data.cpu_cc_freq != 0)
821 			aprint_normal(", %dMHz",
822 			    (int)(ci->ci_data.cpu_cc_freq / 1000000));
823 	}
824 	if (ci->ci_signature != 0)
825 		aprint_normal(", id 0x%x", ci->ci_signature);
826 	aprint_normal("\n");
827 
828 	if (cpu_brand_string[0] == '\0') {
829 		strlcpy(cpu_brand_string, cpu_model, sizeof(cpu_brand_string));
830 	}
831 	if (cpu_class == CPUCLASS_386) {
832 		panic("NetBSD requires an 80486DX or later processor");
833 	}
834 	if (cputype == CPU_486DLC) {
835 		aprint_error("WARNING: BUGGY CYRIX CACHE\n");
836 	}
837 
838 	if ((cpu_vendor == CPUVENDOR_AMD) /* check enablement of an */
839 	  && (device_unit(ci->ci_dev) == 0) /* AMD feature only once */
840 	  && ((cpu_feature[3] & CPUID_SVM) == CPUID_SVM)
841 #if defined(XEN) && !defined(DOM0OPS)
842 	  && (false)  /* on Xen rdmsr is for Dom0 only */
843 #endif
844 	  )
845 	{
846 		uint64_t val;
847 
848 		val = rdmsr(MSR_VMCR);
849 		if (((val & VMCR_SVMED) == VMCR_SVMED)
850 		  && ((val & VMCR_LOCK) == VMCR_LOCK))
851 		{
852 			aprint_normal_dev(ci->ci_dev,
853 				"SVM disabled by the BIOS\n");
854 		}
855 	}
856 
857 #ifdef i386 /* XXX for now */
858 	if (cpu_vendor == CPUVENDOR_TRANSMETA) {
859 		u_int descs[4];
860 		x86_cpuid(0x80860000, descs);
861 		if (descs[0] >= 0x80860007)
862 			tmx86_init_longrun();
863 	}
864 
865 	/* If we have FXSAVE/FXRESTOR, use them. */
866 	if (cpu_feature[0] & CPUID_FXSR) {
867 		i386_use_fxsave = 1;
868 		/*
869 		 * If we have SSE/SSE2, enable XMM exceptions, and
870 		 * notify userland.
871 		 */
872 		if (cpu_feature[0] & CPUID_SSE)
873 			i386_has_sse = 1;
874 		if (cpu_feature[0] & CPUID_SSE2)
875 			i386_has_sse2 = 1;
876 	} else
877 		i386_use_fxsave = 0;
878 #endif	/* i386 */
879 }
880