xref: /netbsd-src/sys/arch/x86/x86/identcpu.c (revision b5677b36047b601b9addaaa494a58ceae82c2a6c)
1 /*	$NetBSD: identcpu.c,v 1.15 2009/04/01 03:56:54 tls Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Frank van der Linden,  and by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*-
33  * Copyright (c)2008 YAMAMOTO Takashi,
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  *
45  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55  * SUCH DAMAGE.
56  */
57 
58 #include <sys/cdefs.h>
59 __KERNEL_RCSID(0, "$NetBSD: identcpu.c,v 1.15 2009/04/01 03:56:54 tls Exp $");
60 
61 #include "opt_enhanced_speedstep.h"
62 #include "opt_intel_odcm.h"
63 #include "opt_intel_coretemp.h"
64 #include "opt_powernow_k8.h"
65 #include "opt_xen.h"
66 #ifdef i386	/* XXX */
67 #include "opt_powernow_k7.h"
68 #endif
69 
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/device.h>
73 #include <sys/bitops.h>
74 
75 #include <uvm/uvm_extern.h>
76 
77 #include <machine/specialreg.h>
78 #include <machine/pio.h>
79 #include <machine/cpu.h>
80 
81 #include <x86/cputypes.h>
82 #include <x86/cacheinfo.h>
83 #include <x86/cpuvar.h>
84 #include <x86/cpu_msr.h>
85 #include <x86/powernow.h>
86 
87 static const struct x86_cache_info intel_cpuid_cache_info[] = INTEL_CACHE_INFO;
88 
89 static const struct x86_cache_info amd_cpuid_l2cache_assoc_info[] =
90 	AMD_L2CACHE_INFO;
91 
92 static const struct x86_cache_info amd_cpuid_l3cache_assoc_info[] =
93 	AMD_L3CACHE_INFO;
94 
95 int cpu_vendor;
96 char cpu_brand_string[49];
97 
98 /*
99  * Info for CTL_HW
100  */
101 char	cpu_model[120];
102 
103 /*
104  * Note: these are just the ones that may not have a cpuid instruction.
105  * We deal with the rest in a different way.
106  */
107 const int i386_nocpuid_cpus[] = {
108 	CPUVENDOR_INTEL, CPUCLASS_386,	/* CPU_386SX */
109 	CPUVENDOR_INTEL, CPUCLASS_386,	/* CPU_386   */
110 	CPUVENDOR_INTEL, CPUCLASS_486,	/* CPU_486SX */
111 	CPUVENDOR_INTEL, CPUCLASS_486, 	/* CPU_486   */
112 	CPUVENDOR_CYRIX, CPUCLASS_486,	/* CPU_486DLC */
113 	CPUVENDOR_CYRIX, CPUCLASS_486,	/* CPU_6x86 */
114 	CPUVENDOR_NEXGEN, CPUCLASS_386,	/* CPU_NX586 */
115 };
116 
117 static const char cpu_vendor_names[][10] = {
118 	"Unknown", "Intel", "NS/Cyrix", "NexGen", "AMD", "IDT/VIA", "Transmeta"
119 };
120 
121 static const struct x86_cache_info *
122 cache_info_lookup(const struct x86_cache_info *cai, uint8_t desc)
123 {
124 	int i;
125 
126 	for (i = 0; cai[i].cai_desc != 0; i++) {
127 		if (cai[i].cai_desc == desc)
128 			return (&cai[i]);
129 	}
130 
131 	return (NULL);
132 }
133 
134 static void
135 cpu_probe_p6(struct cpu_info *ci)
136 {
137 	u_int lp_max = 1;	/* logical processors per package */
138 	u_int smt_max;		/* smt per core */
139 	u_int core_max = 1;	/* core per package */
140 	int smt_bits, core_bits;
141 	uint32_t descs[4];
142 
143 	if (cpu_vendor != CPUVENDOR_INTEL ||
144 	    CPUID2FAMILY(ci->ci_signature) < 6)
145 		return;
146 
147 	/* Determine the extended feature flags. */
148 	x86_cpuid(0x80000000, descs);
149 	if (descs[0] >= 0x80000001) {
150 		x86_cpuid(0x80000001, descs);
151 		ci->ci_feature3_flags |= descs[3]; /* %edx */
152 	}
153 
154 	/* Determine topology. 253668.pdf 7.10.2. */
155 	ci->ci_packageid = ci->ci_initapicid;
156 	ci->ci_coreid = 0;
157 	ci->ci_smtid = 0;
158 	if ((ci->ci_feature_flags & CPUID_HTT) != 0) {
159 		x86_cpuid(1, descs);
160 		lp_max = (descs[1] >> 16) & 0xff;
161 	}
162 	x86_cpuid(0, descs);
163 	if (descs[0] >= 4) {
164 		x86_cpuid2(4, 0, descs);
165 		core_max = (descs[0] >> 26) + 1;
166 	}
167 	KASSERT(lp_max >= core_max);
168 	smt_max = lp_max / core_max;
169 	smt_bits = ilog2(smt_max - 1) + 1;
170 	core_bits = ilog2(core_max - 1) + 1;
171 	if (smt_bits + core_bits) {
172 		ci->ci_packageid = ci->ci_initapicid >> (smt_bits + core_bits);
173 	}
174 	if (core_bits) {
175 		u_int core_mask = __BITS(smt_bits, smt_bits + core_bits - 1);
176 		ci->ci_coreid = __SHIFTOUT(ci->ci_initapicid, core_mask);
177 	}
178 	if (smt_bits) {
179 		u_int smt_mask = __BITS(0, smt_bits - 1);
180 		ci->ci_smtid = __SHIFTOUT(ci->ci_initapicid, smt_mask);
181 	}
182 }
183 
184 static void
185 cpu_probe_amd_cache(struct cpu_info *ci)
186 {
187 	const struct x86_cache_info *cp;
188 	struct x86_cache_info *cai;
189 	int family, model;
190 	u_int descs[4];
191 	u_int lfunc;
192 
193 	family = CPUID2FAMILY(ci->ci_signature);
194 	model = CPUID2MODEL(ci->ci_signature);
195 
196 	/*
197 	 * K5 model 0 has none of this info.
198 	 */
199 	if (family == 5 && model == 0)
200 		return;
201 
202 	/*
203 	 * Get extended values for K8 and up.
204 	 */
205 	if (family == 0xf) {
206 		family += CPUID2EXTFAMILY(ci->ci_signature);
207 		model += CPUID2EXTMODEL(ci->ci_signature);
208 	}
209 
210 	/*
211 	 * Determine the largest extended function value.
212 	 */
213 	x86_cpuid(0x80000000, descs);
214 	lfunc = descs[0];
215 
216 	/*
217 	 * Determine L1 cache/TLB info.
218 	 */
219 	if (lfunc < 0x80000005) {
220 		/* No L1 cache info available. */
221 		return;
222 	}
223 
224 	x86_cpuid(0x80000005, descs);
225 
226 	/*
227 	 * K6-III and higher have large page TLBs.
228 	 */
229 	if ((family == 5 && model >= 9) || family >= 6) {
230 		cai = &ci->ci_cinfo[CAI_ITLB2];
231 		cai->cai_totalsize = AMD_L1_EAX_ITLB_ENTRIES(descs[0]);
232 		cai->cai_associativity = AMD_L1_EAX_ITLB_ASSOC(descs[0]);
233 		cai->cai_linesize = (4 * 1024 * 1024);
234 
235 		cai = &ci->ci_cinfo[CAI_DTLB2];
236 		cai->cai_totalsize = AMD_L1_EAX_DTLB_ENTRIES(descs[0]);
237 		cai->cai_associativity = AMD_L1_EAX_DTLB_ASSOC(descs[0]);
238 		cai->cai_linesize = (4 * 1024 * 1024);
239 	}
240 
241 	cai = &ci->ci_cinfo[CAI_ITLB];
242 	cai->cai_totalsize = AMD_L1_EBX_ITLB_ENTRIES(descs[1]);
243 	cai->cai_associativity = AMD_L1_EBX_ITLB_ASSOC(descs[1]);
244 	cai->cai_linesize = (4 * 1024);
245 
246 	cai = &ci->ci_cinfo[CAI_DTLB];
247 	cai->cai_totalsize = AMD_L1_EBX_DTLB_ENTRIES(descs[1]);
248 	cai->cai_associativity = AMD_L1_EBX_DTLB_ASSOC(descs[1]);
249 	cai->cai_linesize = (4 * 1024);
250 
251 	cai = &ci->ci_cinfo[CAI_DCACHE];
252 	cai->cai_totalsize = AMD_L1_ECX_DC_SIZE(descs[2]);
253 	cai->cai_associativity = AMD_L1_ECX_DC_ASSOC(descs[2]);
254 	cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[2]);
255 
256 	cai = &ci->ci_cinfo[CAI_ICACHE];
257 	cai->cai_totalsize = AMD_L1_EDX_IC_SIZE(descs[3]);
258 	cai->cai_associativity = AMD_L1_EDX_IC_ASSOC(descs[3]);
259 	cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[3]);
260 
261 	/*
262 	 * Determine L2 cache/TLB info.
263 	 */
264 	if (lfunc < 0x80000006) {
265 		/* No L2 cache info available. */
266 		return;
267 	}
268 
269 	x86_cpuid(0x80000006, descs);
270 
271 	cai = &ci->ci_cinfo[CAI_L2CACHE];
272 	cai->cai_totalsize = AMD_L2_ECX_C_SIZE(descs[2]);
273 	cai->cai_associativity = AMD_L2_ECX_C_ASSOC(descs[2]);
274 	cai->cai_linesize = AMD_L2_ECX_C_LS(descs[2]);
275 
276 	cp = cache_info_lookup(amd_cpuid_l2cache_assoc_info,
277 	    cai->cai_associativity);
278 	if (cp != NULL)
279 		cai->cai_associativity = cp->cai_associativity;
280 	else
281 		cai->cai_associativity = 0;	/* XXX Unknown/reserved */
282 
283 	if (family < 0xf) {
284 		/* No L3 cache info available. */
285 		return;
286 	}
287 
288 	cai = &ci->ci_cinfo[CAI_L3CACHE];
289 	cai->cai_totalsize = AMD_L3_EDX_C_SIZE(descs[3]);
290 	cai->cai_associativity = AMD_L3_EDX_C_ASSOC(descs[3]);
291 	cai->cai_linesize = AMD_L3_EDX_C_LS(descs[3]);
292 
293 	cp = cache_info_lookup(amd_cpuid_l3cache_assoc_info,
294 	    cai->cai_associativity);
295 	if (cp != NULL)
296 		cai->cai_associativity = cp->cai_associativity;
297 	else
298 		cai->cai_associativity = 0;	/* XXX Unknown reserved */
299 
300 	if (lfunc < 0x80000019) {
301 		/* No 1GB Page TLB */
302 		return;
303 	}
304 
305 	x86_cpuid(0x80000019, descs);
306 
307 	cai = &ci->ci_cinfo[CAI_L1_1GBDTLB];
308 	cai->cai_totalsize = AMD_L1_1GB_EAX_DTLB_ENTRIES(descs[1]);
309 	cai->cai_associativity = AMD_L1_1GB_EAX_DTLB_ASSOC(descs[1]);
310 	cai->cai_linesize = (1 * 1024);
311 
312 	cai = &ci->ci_cinfo[CAI_L1_1GBITLB];
313 	cai->cai_totalsize = AMD_L1_1GB_EAX_IUTLB_ENTRIES(descs[0]);
314 	cai->cai_associativity = AMD_L1_1GB_EAX_IUTLB_ASSOC(descs[0]);
315 	cai->cai_linesize = (1 * 1024);
316 
317 	cai = &ci->ci_cinfo[CAI_L2_1GBDTLB];
318 	cai->cai_totalsize = AMD_L2_1GB_EBX_DUTLB_ENTRIES(descs[1]);
319 	cai->cai_associativity = AMD_L2_1GB_EBX_DUTLB_ASSOC(descs[1]);
320 	cai->cai_linesize = (1 * 1024);
321 
322 	cai = &ci->ci_cinfo[CAI_L2_1GBITLB];
323 	cai->cai_totalsize = AMD_L2_1GB_EBX_IUTLB_ENTRIES(descs[0]);
324 	cai->cai_associativity = AMD_L2_1GB_EBX_IUTLB_ASSOC(descs[0]);
325 	cai->cai_linesize = (1 * 1024);
326 }
327 
328 static void
329 cpu_probe_k5(struct cpu_info *ci)
330 {
331 	int flag;
332 
333 	if (cpu_vendor != CPUVENDOR_AMD ||
334 	    CPUID2FAMILY(ci->ci_signature) != 5)
335 		return;
336 
337 	if (CPUID2MODEL(ci->ci_signature) == 0) {
338 		/*
339 		 * According to the AMD Processor Recognition App Note,
340 		 * the AMD-K5 Model 0 uses the wrong bit to indicate
341 		 * support for global PTEs, instead using bit 9 (APIC)
342 		 * rather than bit 13 (i.e. "0x200" vs. 0x2000".  Oops!).
343 		 */
344 		flag = ci->ci_feature_flags;
345 		if ((flag & CPUID_APIC) != 0)
346 			flag = (flag & ~CPUID_APIC) | CPUID_PGE;
347 		ci->ci_feature_flags = flag;
348 	}
349 
350 	cpu_probe_amd_cache(ci);
351 }
352 
353 static void
354 cpu_probe_k678(struct cpu_info *ci)
355 {
356 	uint32_t descs[4];
357 
358 	if (cpu_vendor != CPUVENDOR_AMD ||
359 	    CPUID2FAMILY(ci->ci_signature) < 6)
360 		return;
361 
362 	/* Determine the extended feature flags. */
363 	x86_cpuid(0x80000000, descs);
364 	if (descs[0] >= 0x80000001) {
365 		x86_cpuid(0x80000001, descs);
366 		ci->ci_feature3_flags |= descs[3]; /* %edx */
367 		ci->ci_feature4_flags = descs[2];  /* %ecx */
368 	}
369 
370 	cpu_probe_amd_cache(ci);
371 }
372 
373 static inline uint8_t
374 cyrix_read_reg(uint8_t reg)
375 {
376 
377 	outb(0x22, reg);
378 	return inb(0x23);
379 }
380 
381 static inline void
382 cyrix_write_reg(uint8_t reg, uint8_t data)
383 {
384 
385 	outb(0x22, reg);
386 	outb(0x23, data);
387 }
388 
389 static void
390 cpu_probe_cyrix_cmn(struct cpu_info *ci)
391 {
392 	/*
393 	 * i8254 latch check routine:
394 	 *     National Geode (formerly Cyrix MediaGX) has a serious bug in
395 	 *     its built-in i8254-compatible clock module (cs5510 cs5520).
396 	 *     Set the variable 'clock_broken_latch' to indicate it.
397 	 *
398 	 * This bug is not present in the cs5530, and the flag
399 	 * is disabled again in sys/arch/i386/pci/pcib.c if this later
400 	 * model device is detected. Ideally, this work-around should not
401 	 * even be in here, it should be in there. XXX
402 	 */
403 	uint8_t c3;
404 #ifndef XEN
405 	extern int clock_broken_latch;
406 
407 	switch (ci->ci_signature) {
408 	case 0x440:     /* Cyrix MediaGX */
409 	case 0x540:     /* GXm */
410 		clock_broken_latch = 1;
411 		break;
412 	}
413 #endif
414 
415 	/* set up various cyrix registers */
416 	/*
417 	 * Enable suspend on halt (powersave mode).
418 	 * When powersave mode is enabled, the TSC stops counting
419 	 * while the CPU is halted in idle() waiting for an interrupt.
420 	 * This means we can't use the TSC for interval time in
421 	 * microtime(9), and thus it is disabled here.
422 	 *
423 	 * It still makes a perfectly good cycle counter
424 	 * for program profiling, so long as you remember you're
425 	 * counting cycles, and not time. Further, if you don't
426 	 * mind not using powersave mode, the TSC works just fine,
427 	 * so this should really be optional. XXX
428 	 */
429 	cyrix_write_reg(0xc2, cyrix_read_reg(0xc2) | 0x08);
430 
431 	/*
432 	 * Do not disable the TSC on the Geode GX, it's reported to
433 	 * work fine.
434 	 */
435 	if (ci->ci_signature != 0x552)
436 		ci->ci_feature_flags &= ~CPUID_TSC;
437 
438 	/* enable access to ccr4/ccr5 */
439 	c3 = cyrix_read_reg(0xC3);
440 	cyrix_write_reg(0xC3, c3 | 0x10);
441 	/* cyrix's workaround  for the "coma bug" */
442 	cyrix_write_reg(0x31, cyrix_read_reg(0x31) | 0xf8);
443 	cyrix_write_reg(0x32, cyrix_read_reg(0x32) | 0x7f);
444 	cyrix_write_reg(0x33, cyrix_read_reg(0x33) & ~0xff);
445 	cyrix_write_reg(0x3c, cyrix_read_reg(0x3c) | 0x87);
446 	/* disable access to ccr4/ccr5 */
447 	cyrix_write_reg(0xC3, c3);
448 }
449 
450 static void
451 cpu_probe_cyrix(struct cpu_info *ci)
452 {
453 
454 	if (cpu_vendor != CPUVENDOR_CYRIX ||
455 	    CPUID2FAMILY(ci->ci_signature) < 4 ||
456 	    CPUID2FAMILY(ci->ci_signature) > 6)
457 		return;
458 
459 	cpu_probe_cyrix_cmn(ci);
460 }
461 
462 static void
463 cpu_probe_winchip(struct cpu_info *ci)
464 {
465 
466 	if (cpu_vendor != CPUVENDOR_IDT ||
467 	    CPUID2FAMILY(ci->ci_signature) != 5)
468 	    	return;
469 
470 	if (CPUID2MODEL(ci->ci_signature) == 4) {
471 		/* WinChip C6 */
472 		ci->ci_feature_flags &= ~CPUID_TSC;
473 	}
474 }
475 
476 static void
477 cpu_probe_c3(struct cpu_info *ci)
478 {
479 	u_int family, model, stepping, descs[4], lfunc, msr;
480 	struct x86_cache_info *cai;
481 
482 	if (cpu_vendor != CPUVENDOR_IDT ||
483 	    CPUID2FAMILY(ci->ci_signature) < 6)
484 	    	return;
485 
486 	family = CPUID2FAMILY(ci->ci_signature);
487 	model = CPUID2MODEL(ci->ci_signature);
488 	stepping = CPUID2STEPPING(ci->ci_signature);
489 
490 	/* Determine the largest extended function value. */
491 	x86_cpuid(0x80000000, descs);
492 	lfunc = descs[0];
493 
494 	/* Determine the extended feature flags. */
495 	if (lfunc >= 0x80000001) {
496 		x86_cpuid(0x80000001, descs);
497 		ci->ci_feature_flags |= descs[3];
498 	}
499 
500 	if (family > 6 || model > 0x9 || (model == 0x9 && stepping >= 3)) {
501 		/* Nehemiah or Esther */
502 		x86_cpuid(0xc0000000, descs);
503 		lfunc = descs[0];
504 		if (lfunc >= 0xc0000001) {	/* has ACE, RNG */
505 		    int rng_enable = 0, ace_enable = 0;
506 		    x86_cpuid(0xc0000001, descs);
507 		    lfunc = descs[3];
508 		    ci->ci_padlock_flags = lfunc;
509 		    /* Check for and enable RNG */
510 		    if (lfunc & CPUID_VIA_HAS_RNG) {
511 		    	if (!(lfunc & CPUID_VIA_DO_RNG)) {
512 			    rng_enable++;
513 			    ci->ci_padlock_flags |= CPUID_VIA_HAS_RNG;
514 			}
515 		    }
516 		    /* Check for and enable ACE (AES-CBC) */
517 		    if (lfunc & CPUID_VIA_HAS_ACE) {
518 			if (!(lfunc & CPUID_VIA_DO_ACE)) {
519 			    ace_enable++;
520 			    ci->ci_padlock_flags |= CPUID_VIA_DO_ACE;
521 			}
522 		    }
523 		    /* Check for and enable SHA */
524 		    if (lfunc & CPUID_VIA_HAS_PHE) {
525 			if (!(lfunc & CPUID_VIA_DO_PHE)) {
526 			    ace_enable++;
527 			    ci->ci_padlock_flags |= CPUID_VIA_DO_PHE;
528 			}
529 		    }
530 		    /* Check for and enable ACE2 (AES-CTR) */
531 		    if (lfunc & CPUID_VIA_HAS_ACE2) {
532 			if (!(lfunc & CPUID_VIA_DO_ACE2)) {
533 			    ace_enable++;
534 			    ci->ci_padlock_flags |= CPUID_VIA_DO_ACE2;
535 			}
536 		    }
537 		    /* Check for and enable PMM (modmult engine) */
538 		    if (lfunc & CPUID_VIA_HAS_PMM) {
539 			if (!(lfunc & CPUID_VIA_DO_PMM)) {
540 			    ace_enable++;
541 			    ci->ci_padlock_flags |= CPUID_VIA_DO_PMM;
542 			}
543 		    }
544 
545 		    /* Actually do the enables. */
546 		    if (rng_enable) {
547 			msr = rdmsr(MSR_VIA_RNG);
548 			wrmsr(MSR_VIA_RNG, msr | MSR_VIA_RNG_ENABLE);
549 		    }
550 		    if (ace_enable) {
551 			msr = rdmsr(MSR_VIA_ACE);
552 			wrmsr(MSR_VIA_ACE, msr | MSR_VIA_ACE_ENABLE);
553 		    }
554 
555 		}
556 	}
557 
558 	/*
559 	 * Determine L1 cache/TLB info.
560 	 */
561 	if (lfunc < 0x80000005) {
562 		/* No L1 cache info available. */
563 		return;
564 	}
565 
566 	x86_cpuid(0x80000005, descs);
567 
568 	cai = &ci->ci_cinfo[CAI_ITLB];
569 	cai->cai_totalsize = VIA_L1_EBX_ITLB_ENTRIES(descs[1]);
570 	cai->cai_associativity = VIA_L1_EBX_ITLB_ASSOC(descs[1]);
571 	cai->cai_linesize = (4 * 1024);
572 
573 	cai = &ci->ci_cinfo[CAI_DTLB];
574 	cai->cai_totalsize = VIA_L1_EBX_DTLB_ENTRIES(descs[1]);
575 	cai->cai_associativity = VIA_L1_EBX_DTLB_ASSOC(descs[1]);
576 	cai->cai_linesize = (4 * 1024);
577 
578 	cai = &ci->ci_cinfo[CAI_DCACHE];
579 	cai->cai_totalsize = VIA_L1_ECX_DC_SIZE(descs[2]);
580 	cai->cai_associativity = VIA_L1_ECX_DC_ASSOC(descs[2]);
581 	cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[2]);
582 	if (family == 6 && model == 9 && stepping == 8) {
583 		/* Erratum: stepping 8 reports 4 when it should be 2 */
584 		cai->cai_associativity = 2;
585 	}
586 
587 	cai = &ci->ci_cinfo[CAI_ICACHE];
588 	cai->cai_totalsize = VIA_L1_EDX_IC_SIZE(descs[3]);
589 	cai->cai_associativity = VIA_L1_EDX_IC_ASSOC(descs[3]);
590 	cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[3]);
591 	if (family == 6 && model == 9 && stepping == 8) {
592 		/* Erratum: stepping 8 reports 4 when it should be 2 */
593 		cai->cai_associativity = 2;
594 	}
595 
596 	/*
597 	 * Determine L2 cache/TLB info.
598 	 */
599 	if (lfunc < 0x80000006) {
600 		/* No L2 cache info available. */
601 		return;
602 	}
603 
604 	x86_cpuid(0x80000006, descs);
605 
606 	cai = &ci->ci_cinfo[CAI_L2CACHE];
607 	if (family > 6 || model >= 9) {
608 		cai->cai_totalsize = VIA_L2N_ECX_C_SIZE(descs[2]);
609 		cai->cai_associativity = VIA_L2N_ECX_C_ASSOC(descs[2]);
610 		cai->cai_linesize = VIA_L2N_ECX_C_LS(descs[2]);
611 	} else {
612 		cai->cai_totalsize = VIA_L2_ECX_C_SIZE(descs[2]);
613 		cai->cai_associativity = VIA_L2_ECX_C_ASSOC(descs[2]);
614 		cai->cai_linesize = VIA_L2_ECX_C_LS(descs[2]);
615 	}
616 }
617 
618 static void
619 cpu_probe_geode(struct cpu_info *ci)
620 {
621 
622 	if (memcmp("Geode by NSC", ci->ci_vendor, 12) != 0 ||
623 	    CPUID2FAMILY(ci->ci_signature) != 5)
624 	    	return;
625 
626 	cpu_probe_cyrix_cmn(ci);
627 	cpu_probe_amd_cache(ci);
628 }
629 
630 void
631 cpu_probe(struct cpu_info *ci)
632 {
633 	const struct x86_cache_info *cai;
634 	u_int descs[4];
635 	int iterations, i, j;
636 	uint8_t desc;
637 	uint32_t miscbytes;
638 	uint32_t brand[12];
639 
640 	cpu_vendor = i386_nocpuid_cpus[cpu << 1];
641 	cpu_class = i386_nocpuid_cpus[(cpu << 1) + 1];
642 
643 	if (cpuid_level < 0)
644 		return;
645 
646 	x86_cpuid(0, descs);
647 	cpuid_level = descs[0];
648 	ci->ci_vendor[0] = descs[1];
649 	ci->ci_vendor[2] = descs[2];
650 	ci->ci_vendor[1] = descs[3];
651 	ci->ci_vendor[3] = 0;
652 
653 	if (memcmp(ci->ci_vendor, "GenuineIntel", 12) == 0)
654 		cpu_vendor = CPUVENDOR_INTEL;
655 	else if (memcmp(ci->ci_vendor,  "AuthenticAMD", 12) == 0)
656 		cpu_vendor = CPUVENDOR_AMD;
657 	else if (memcmp(ci->ci_vendor,  "CyrixInstead", 12) == 0)
658 		cpu_vendor = CPUVENDOR_CYRIX;
659 	else if (memcmp(ci->ci_vendor,  "Geode by NSC", 12) == 0)
660 		cpu_vendor = CPUVENDOR_CYRIX;
661 	else if (memcmp(ci->ci_vendor, "CentaurHauls", 12) == 0)
662 		cpu_vendor = CPUVENDOR_IDT;
663 	else if (memcmp(ci->ci_vendor, "GenuineTMx86", 12) == 0)
664 		cpu_vendor = CPUVENDOR_TRANSMETA;
665 	else
666 		cpu_vendor = CPUVENDOR_UNKNOWN;
667 
668 	x86_cpuid(0x80000000, brand);
669 	if (brand[0] >= 0x80000004) {
670 		x86_cpuid(0x80000002, brand);
671 		x86_cpuid(0x80000003, brand + 4);
672 		x86_cpuid(0x80000004, brand + 8);
673 		for (i = 0; i < 48; i++) {
674 			if (((char *) brand)[i] != ' ')
675 				break;
676 		}
677 		memcpy(cpu_brand_string, ((char *) brand) + i, 48 - i);
678 	}
679 
680 	if (cpuid_level >= 1) {
681 		x86_cpuid(1, descs);
682 		ci->ci_signature = descs[0];
683 		miscbytes = descs[1];
684 		ci->ci_feature2_flags = descs[2];
685 		ci->ci_feature_flags = descs[3];
686 
687 		/* Determine family + class. */
688 		cpu_class = CPUID2FAMILY(ci->ci_signature) + (CPUCLASS_386 - 3);
689 		if (cpu_class > CPUCLASS_686)
690 			cpu_class = CPUCLASS_686;
691 
692 		/* CLFLUSH line size is next 8 bits */
693 		if (ci->ci_feature_flags & CPUID_CFLUSH)
694 			ci->ci_cflush_lsize = ((miscbytes >> 8) & 0xff) << 3;
695 		ci->ci_initapicid = (miscbytes >> 24) & 0xff;
696 	}
697 
698 	if (cpuid_level >= 2) {
699 		/* Parse the cache info from `cpuid', if we have it. */
700 		x86_cpuid(2, descs);
701 		iterations = descs[0] & 0xff;
702 		while (iterations-- > 0) {
703 			for (i = 0; i < 4; i++) {
704 				if (descs[i] & 0x80000000)
705 					continue;
706 				for (j = 0; j < 4; j++) {
707 					if (i == 0 && j == 0)
708 						continue;
709 					desc = (descs[i] >> (j * 8)) & 0xff;
710 					if (desc == 0)
711 						continue;
712 					cai = cache_info_lookup(
713 					    intel_cpuid_cache_info, desc);
714 					if (cai != NULL) {
715 						ci->ci_cinfo[cai->cai_index] =
716 						    *cai;
717 					}
718 				}
719 			}
720 		}
721 	}
722 
723 	cpu_probe_p6(ci);
724 	cpu_probe_k5(ci);
725 	cpu_probe_k678(ci);
726 	cpu_probe_cyrix(ci);
727 	cpu_probe_winchip(ci);
728 	cpu_probe_c3(ci);
729 	cpu_probe_geode(ci);
730 
731 	if (cpu_vendor != CPUVENDOR_AMD && (ci->ci_feature_flags & CPUID_TM) &&
732 	    (rdmsr(MSR_MISC_ENABLE) & (1 << 3)) == 0) {
733 		/* Enable thermal monitor 1. */
734 		wrmsr(MSR_MISC_ENABLE, rdmsr(MSR_MISC_ENABLE) | (1<<3));
735 	}
736 
737 	if ((cpu_feature | cpu_feature2) == 0) {
738 		/* If first. */
739 		cpu_feature = ci->ci_feature_flags;
740 		cpu_feature2 = ci->ci_feature2_flags;
741 		/* Early patch of text segment. */
742 #ifndef XEN
743 		x86_patch(true);
744 #endif
745 	} else {
746 		/* If not first. */
747 		cpu_feature &= ci->ci_feature_flags;
748 		cpu_feature2 &= ci->ci_feature2_flags;
749 	}
750 }
751 
752 void
753 cpu_identify(struct cpu_info *ci)
754 {
755 
756 	snprintf(cpu_model, sizeof(cpu_model), "%s %d86-class",
757 	    cpu_vendor_names[cpu_vendor], cpu_class + 3);
758 	aprint_normal(": %s", cpu_model);
759 	if (ci->ci_data.cpu_cc_freq != 0)
760 		aprint_normal(", %dMHz", (int)(ci->ci_data.cpu_cc_freq / 1000000));
761 	if (ci->ci_signature != 0)
762 		aprint_normal(", id 0x%x", ci->ci_signature);
763 	aprint_normal("\n");
764 
765 	if (cpu_brand_string[0] == '\0') {
766 		strlcpy(cpu_brand_string, cpu_model, sizeof(cpu_brand_string));
767 	}
768 	if (cpu_class == CPUCLASS_386) {
769 		panic("NetBSD requires an 80486DX or later processor");
770 	}
771 	if (cpu == CPU_486DLC) {
772 		aprint_error("WARNING: BUGGY CYRIX CACHE\n");
773 	}
774 
775 	if ((cpu_vendor == CPUVENDOR_AMD) /* check enablement of an */
776 	  && (device_unit(ci->ci_dev) == 0) /* AMD feature only once */
777 	  && ((ci->ci_feature4_flags & CPUID_SVM) == CPUID_SVM)
778 #if defined(XEN) && !defined(DOM0OPS)
779 	  && (false)  /* on Xen rdmsr is for Dom0 only */
780 #endif
781 	  )
782 	{
783 		uint64_t val;
784 
785 		val = rdmsr(MSR_VMCR);
786 		if (((val & VMCR_SVMED) == VMCR_SVMED)
787 		  && ((val & VMCR_LOCK) == VMCR_LOCK))
788 		{
789 			aprint_normal_dev(ci->ci_dev,
790 				"SVM disabled by the BIOS\n");
791 		}
792 	}
793 
794 #ifdef i386 /* XXX for now */
795 	if (cpu_vendor == CPUVENDOR_TRANSMETA) {
796 		u_int descs[4];
797 		x86_cpuid(0x80860000, descs);
798 		if (descs[0] >= 0x80860007)
799 			tmx86_init_longrun();
800 	}
801 
802 	/* If we have FXSAVE/FXRESTOR, use them. */
803 	if (cpu_feature & CPUID_FXSR) {
804 		i386_use_fxsave = 1;
805 		/*
806 		 * If we have SSE/SSE2, enable XMM exceptions, and
807 		 * notify userland.
808 		 */
809 		if (cpu_feature & CPUID_SSE)
810 			i386_has_sse = 1;
811 		if (cpu_feature & CPUID_SSE2)
812 			i386_has_sse2 = 1;
813 	} else
814 		i386_use_fxsave = 0;
815 #endif	/* i386 */
816 
817 #ifdef ENHANCED_SPEEDSTEP
818 	if (cpu_feature2 & CPUID2_EST) {
819 		if (rdmsr(MSR_MISC_ENABLE) & (1 << 16))
820 			est_init(cpu_vendor);
821 	}
822 #endif /* ENHANCED_SPEEDSTEP */
823 
824 #ifdef INTEL_CORETEMP
825 	if (cpu_vendor == CPUVENDOR_INTEL && cpuid_level >= 0x06)
826 		coretemp_register(ci);
827 #endif
828 
829 #if defined(POWERNOW_K7) || defined(POWERNOW_K8)
830 	if (cpu_vendor == CPUVENDOR_AMD && powernow_probe(ci)) {
831 		switch (CPUID2FAMILY(ci->ci_signature)) {
832 #ifdef POWERNOW_K7
833 		case 6:
834 			k7_powernow_init();
835 			break;
836 #endif
837 #ifdef POWERNOW_K8
838 		case 15:
839 			k8_powernow_init();
840 			break;
841 #endif
842 		default:
843 			break;
844 		}
845 	}
846 #endif /* POWERNOW_K7 || POWERNOW_K8 */
847 
848 #ifdef INTEL_ONDEMAND_CLOCKMOD
849 	if (cpuid_level >= 1) {
850 		clockmod_init();
851 	}
852 #endif
853 }
854