xref: /netbsd-src/sys/arch/x86/x86/identcpu.c (revision da9817918ec7e88db2912a2882967c7570a83f47)
1 /*	$NetBSD: identcpu.c,v 1.16 2009/04/30 00:07:23 rmind Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Frank van der Linden,  and by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: identcpu.c,v 1.16 2009/04/30 00:07:23 rmind Exp $");
34 
35 #include "opt_enhanced_speedstep.h"
36 #include "opt_intel_odcm.h"
37 #include "opt_intel_coretemp.h"
38 #include "opt_powernow_k8.h"
39 #include "opt_xen.h"
40 #ifdef i386	/* XXX */
41 #include "opt_powernow_k7.h"
42 #endif
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/device.h>
47 
48 #include <uvm/uvm_extern.h>
49 
50 #include <machine/specialreg.h>
51 #include <machine/pio.h>
52 #include <machine/cpu.h>
53 
54 #include <x86/cputypes.h>
55 #include <x86/cacheinfo.h>
56 #include <x86/cpuvar.h>
57 #include <x86/cpu_msr.h>
58 #include <x86/powernow.h>
59 
60 static const struct x86_cache_info intel_cpuid_cache_info[] = INTEL_CACHE_INFO;
61 
62 static const struct x86_cache_info amd_cpuid_l2cache_assoc_info[] =
63 	AMD_L2CACHE_INFO;
64 
65 static const struct x86_cache_info amd_cpuid_l3cache_assoc_info[] =
66 	AMD_L3CACHE_INFO;
67 
68 int cpu_vendor;
69 char cpu_brand_string[49];
70 
71 /*
72  * Info for CTL_HW
73  */
74 char	cpu_model[120];
75 
76 /*
77  * Note: these are just the ones that may not have a cpuid instruction.
78  * We deal with the rest in a different way.
79  */
80 const int i386_nocpuid_cpus[] = {
81 	CPUVENDOR_INTEL, CPUCLASS_386,	/* CPU_386SX */
82 	CPUVENDOR_INTEL, CPUCLASS_386,	/* CPU_386   */
83 	CPUVENDOR_INTEL, CPUCLASS_486,	/* CPU_486SX */
84 	CPUVENDOR_INTEL, CPUCLASS_486, 	/* CPU_486   */
85 	CPUVENDOR_CYRIX, CPUCLASS_486,	/* CPU_486DLC */
86 	CPUVENDOR_CYRIX, CPUCLASS_486,	/* CPU_6x86 */
87 	CPUVENDOR_NEXGEN, CPUCLASS_386,	/* CPU_NX586 */
88 };
89 
90 static const char cpu_vendor_names[][10] = {
91 	"Unknown", "Intel", "NS/Cyrix", "NexGen", "AMD", "IDT/VIA", "Transmeta"
92 };
93 
94 static const struct x86_cache_info *
95 cache_info_lookup(const struct x86_cache_info *cai, uint8_t desc)
96 {
97 	int i;
98 
99 	for (i = 0; cai[i].cai_desc != 0; i++) {
100 		if (cai[i].cai_desc == desc)
101 			return (&cai[i]);
102 	}
103 
104 	return (NULL);
105 }
106 
107 
108 static void
109 cpu_probe_amd_cache(struct cpu_info *ci)
110 {
111 	const struct x86_cache_info *cp;
112 	struct x86_cache_info *cai;
113 	int family, model;
114 	u_int descs[4];
115 	u_int lfunc;
116 
117 	family = CPUID2FAMILY(ci->ci_signature);
118 	model = CPUID2MODEL(ci->ci_signature);
119 
120 	/*
121 	 * K5 model 0 has none of this info.
122 	 */
123 	if (family == 5 && model == 0)
124 		return;
125 
126 	/*
127 	 * Get extended values for K8 and up.
128 	 */
129 	if (family == 0xf) {
130 		family += CPUID2EXTFAMILY(ci->ci_signature);
131 		model += CPUID2EXTMODEL(ci->ci_signature);
132 	}
133 
134 	/*
135 	 * Determine the largest extended function value.
136 	 */
137 	x86_cpuid(0x80000000, descs);
138 	lfunc = descs[0];
139 
140 	/*
141 	 * Determine L1 cache/TLB info.
142 	 */
143 	if (lfunc < 0x80000005) {
144 		/* No L1 cache info available. */
145 		return;
146 	}
147 
148 	x86_cpuid(0x80000005, descs);
149 
150 	/*
151 	 * K6-III and higher have large page TLBs.
152 	 */
153 	if ((family == 5 && model >= 9) || family >= 6) {
154 		cai = &ci->ci_cinfo[CAI_ITLB2];
155 		cai->cai_totalsize = AMD_L1_EAX_ITLB_ENTRIES(descs[0]);
156 		cai->cai_associativity = AMD_L1_EAX_ITLB_ASSOC(descs[0]);
157 		cai->cai_linesize = (4 * 1024 * 1024);
158 
159 		cai = &ci->ci_cinfo[CAI_DTLB2];
160 		cai->cai_totalsize = AMD_L1_EAX_DTLB_ENTRIES(descs[0]);
161 		cai->cai_associativity = AMD_L1_EAX_DTLB_ASSOC(descs[0]);
162 		cai->cai_linesize = (4 * 1024 * 1024);
163 	}
164 
165 	cai = &ci->ci_cinfo[CAI_ITLB];
166 	cai->cai_totalsize = AMD_L1_EBX_ITLB_ENTRIES(descs[1]);
167 	cai->cai_associativity = AMD_L1_EBX_ITLB_ASSOC(descs[1]);
168 	cai->cai_linesize = (4 * 1024);
169 
170 	cai = &ci->ci_cinfo[CAI_DTLB];
171 	cai->cai_totalsize = AMD_L1_EBX_DTLB_ENTRIES(descs[1]);
172 	cai->cai_associativity = AMD_L1_EBX_DTLB_ASSOC(descs[1]);
173 	cai->cai_linesize = (4 * 1024);
174 
175 	cai = &ci->ci_cinfo[CAI_DCACHE];
176 	cai->cai_totalsize = AMD_L1_ECX_DC_SIZE(descs[2]);
177 	cai->cai_associativity = AMD_L1_ECX_DC_ASSOC(descs[2]);
178 	cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[2]);
179 
180 	cai = &ci->ci_cinfo[CAI_ICACHE];
181 	cai->cai_totalsize = AMD_L1_EDX_IC_SIZE(descs[3]);
182 	cai->cai_associativity = AMD_L1_EDX_IC_ASSOC(descs[3]);
183 	cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[3]);
184 
185 	/*
186 	 * Determine L2 cache/TLB info.
187 	 */
188 	if (lfunc < 0x80000006) {
189 		/* No L2 cache info available. */
190 		return;
191 	}
192 
193 	x86_cpuid(0x80000006, descs);
194 
195 	cai = &ci->ci_cinfo[CAI_L2CACHE];
196 	cai->cai_totalsize = AMD_L2_ECX_C_SIZE(descs[2]);
197 	cai->cai_associativity = AMD_L2_ECX_C_ASSOC(descs[2]);
198 	cai->cai_linesize = AMD_L2_ECX_C_LS(descs[2]);
199 
200 	cp = cache_info_lookup(amd_cpuid_l2cache_assoc_info,
201 	    cai->cai_associativity);
202 	if (cp != NULL)
203 		cai->cai_associativity = cp->cai_associativity;
204 	else
205 		cai->cai_associativity = 0;	/* XXX Unknown/reserved */
206 
207 	if (family < 0xf) {
208 		/* No L3 cache info available. */
209 		return;
210 	}
211 
212 	cai = &ci->ci_cinfo[CAI_L3CACHE];
213 	cai->cai_totalsize = AMD_L3_EDX_C_SIZE(descs[3]);
214 	cai->cai_associativity = AMD_L3_EDX_C_ASSOC(descs[3]);
215 	cai->cai_linesize = AMD_L3_EDX_C_LS(descs[3]);
216 
217 	cp = cache_info_lookup(amd_cpuid_l3cache_assoc_info,
218 	    cai->cai_associativity);
219 	if (cp != NULL)
220 		cai->cai_associativity = cp->cai_associativity;
221 	else
222 		cai->cai_associativity = 0;	/* XXX Unknown reserved */
223 
224 	if (lfunc < 0x80000019) {
225 		/* No 1GB Page TLB */
226 		return;
227 	}
228 
229 	x86_cpuid(0x80000019, descs);
230 
231 	cai = &ci->ci_cinfo[CAI_L1_1GBDTLB];
232 	cai->cai_totalsize = AMD_L1_1GB_EAX_DTLB_ENTRIES(descs[1]);
233 	cai->cai_associativity = AMD_L1_1GB_EAX_DTLB_ASSOC(descs[1]);
234 	cai->cai_linesize = (1 * 1024);
235 
236 	cai = &ci->ci_cinfo[CAI_L1_1GBITLB];
237 	cai->cai_totalsize = AMD_L1_1GB_EAX_IUTLB_ENTRIES(descs[0]);
238 	cai->cai_associativity = AMD_L1_1GB_EAX_IUTLB_ASSOC(descs[0]);
239 	cai->cai_linesize = (1 * 1024);
240 
241 	cai = &ci->ci_cinfo[CAI_L2_1GBDTLB];
242 	cai->cai_totalsize = AMD_L2_1GB_EBX_DUTLB_ENTRIES(descs[1]);
243 	cai->cai_associativity = AMD_L2_1GB_EBX_DUTLB_ASSOC(descs[1]);
244 	cai->cai_linesize = (1 * 1024);
245 
246 	cai = &ci->ci_cinfo[CAI_L2_1GBITLB];
247 	cai->cai_totalsize = AMD_L2_1GB_EBX_IUTLB_ENTRIES(descs[0]);
248 	cai->cai_associativity = AMD_L2_1GB_EBX_IUTLB_ASSOC(descs[0]);
249 	cai->cai_linesize = (1 * 1024);
250 }
251 
252 static void
253 cpu_probe_k5(struct cpu_info *ci)
254 {
255 	int flag;
256 
257 	if (cpu_vendor != CPUVENDOR_AMD ||
258 	    CPUID2FAMILY(ci->ci_signature) != 5)
259 		return;
260 
261 	if (CPUID2MODEL(ci->ci_signature) == 0) {
262 		/*
263 		 * According to the AMD Processor Recognition App Note,
264 		 * the AMD-K5 Model 0 uses the wrong bit to indicate
265 		 * support for global PTEs, instead using bit 9 (APIC)
266 		 * rather than bit 13 (i.e. "0x200" vs. 0x2000".  Oops!).
267 		 */
268 		flag = ci->ci_feature_flags;
269 		if ((flag & CPUID_APIC) != 0)
270 			flag = (flag & ~CPUID_APIC) | CPUID_PGE;
271 		ci->ci_feature_flags = flag;
272 	}
273 
274 	cpu_probe_amd_cache(ci);
275 }
276 
277 static void
278 cpu_probe_k678(struct cpu_info *ci)
279 {
280 	uint32_t descs[4];
281 
282 	if (cpu_vendor != CPUVENDOR_AMD ||
283 	    CPUID2FAMILY(ci->ci_signature) < 6)
284 		return;
285 
286 	/* Determine the extended feature flags. */
287 	x86_cpuid(0x80000000, descs);
288 	if (descs[0] >= 0x80000001) {
289 		x86_cpuid(0x80000001, descs);
290 		ci->ci_feature3_flags |= descs[3]; /* %edx */
291 		ci->ci_feature4_flags = descs[2];  /* %ecx */
292 	}
293 
294 	cpu_probe_amd_cache(ci);
295 }
296 
297 static inline uint8_t
298 cyrix_read_reg(uint8_t reg)
299 {
300 
301 	outb(0x22, reg);
302 	return inb(0x23);
303 }
304 
305 static inline void
306 cyrix_write_reg(uint8_t reg, uint8_t data)
307 {
308 
309 	outb(0x22, reg);
310 	outb(0x23, data);
311 }
312 
313 static void
314 cpu_probe_cyrix_cmn(struct cpu_info *ci)
315 {
316 	/*
317 	 * i8254 latch check routine:
318 	 *     National Geode (formerly Cyrix MediaGX) has a serious bug in
319 	 *     its built-in i8254-compatible clock module (cs5510 cs5520).
320 	 *     Set the variable 'clock_broken_latch' to indicate it.
321 	 *
322 	 * This bug is not present in the cs5530, and the flag
323 	 * is disabled again in sys/arch/i386/pci/pcib.c if this later
324 	 * model device is detected. Ideally, this work-around should not
325 	 * even be in here, it should be in there. XXX
326 	 */
327 	uint8_t c3;
328 #ifndef XEN
329 	extern int clock_broken_latch;
330 
331 	switch (ci->ci_signature) {
332 	case 0x440:     /* Cyrix MediaGX */
333 	case 0x540:     /* GXm */
334 		clock_broken_latch = 1;
335 		break;
336 	}
337 #endif
338 
339 	/* set up various cyrix registers */
340 	/*
341 	 * Enable suspend on halt (powersave mode).
342 	 * When powersave mode is enabled, the TSC stops counting
343 	 * while the CPU is halted in idle() waiting for an interrupt.
344 	 * This means we can't use the TSC for interval time in
345 	 * microtime(9), and thus it is disabled here.
346 	 *
347 	 * It still makes a perfectly good cycle counter
348 	 * for program profiling, so long as you remember you're
349 	 * counting cycles, and not time. Further, if you don't
350 	 * mind not using powersave mode, the TSC works just fine,
351 	 * so this should really be optional. XXX
352 	 */
353 	cyrix_write_reg(0xc2, cyrix_read_reg(0xc2) | 0x08);
354 
355 	/*
356 	 * Do not disable the TSC on the Geode GX, it's reported to
357 	 * work fine.
358 	 */
359 	if (ci->ci_signature != 0x552)
360 		ci->ci_feature_flags &= ~CPUID_TSC;
361 
362 	/* enable access to ccr4/ccr5 */
363 	c3 = cyrix_read_reg(0xC3);
364 	cyrix_write_reg(0xC3, c3 | 0x10);
365 	/* cyrix's workaround  for the "coma bug" */
366 	cyrix_write_reg(0x31, cyrix_read_reg(0x31) | 0xf8);
367 	cyrix_write_reg(0x32, cyrix_read_reg(0x32) | 0x7f);
368 	cyrix_write_reg(0x33, cyrix_read_reg(0x33) & ~0xff);
369 	cyrix_write_reg(0x3c, cyrix_read_reg(0x3c) | 0x87);
370 	/* disable access to ccr4/ccr5 */
371 	cyrix_write_reg(0xC3, c3);
372 }
373 
374 static void
375 cpu_probe_cyrix(struct cpu_info *ci)
376 {
377 
378 	if (cpu_vendor != CPUVENDOR_CYRIX ||
379 	    CPUID2FAMILY(ci->ci_signature) < 4 ||
380 	    CPUID2FAMILY(ci->ci_signature) > 6)
381 		return;
382 
383 	cpu_probe_cyrix_cmn(ci);
384 }
385 
386 static void
387 cpu_probe_winchip(struct cpu_info *ci)
388 {
389 
390 	if (cpu_vendor != CPUVENDOR_IDT ||
391 	    CPUID2FAMILY(ci->ci_signature) != 5)
392 	    	return;
393 
394 	if (CPUID2MODEL(ci->ci_signature) == 4) {
395 		/* WinChip C6 */
396 		ci->ci_feature_flags &= ~CPUID_TSC;
397 	}
398 }
399 
400 static void
401 cpu_probe_c3(struct cpu_info *ci)
402 {
403 	u_int family, model, stepping, descs[4], lfunc, msr;
404 	struct x86_cache_info *cai;
405 
406 	if (cpu_vendor != CPUVENDOR_IDT ||
407 	    CPUID2FAMILY(ci->ci_signature) < 6)
408 	    	return;
409 
410 	family = CPUID2FAMILY(ci->ci_signature);
411 	model = CPUID2MODEL(ci->ci_signature);
412 	stepping = CPUID2STEPPING(ci->ci_signature);
413 
414 	/* Determine the largest extended function value. */
415 	x86_cpuid(0x80000000, descs);
416 	lfunc = descs[0];
417 
418 	/* Determine the extended feature flags. */
419 	if (lfunc >= 0x80000001) {
420 		x86_cpuid(0x80000001, descs);
421 		ci->ci_feature_flags |= descs[3];
422 	}
423 
424 	if (family > 6 || model > 0x9 || (model == 0x9 && stepping >= 3)) {
425 		/* Nehemiah or Esther */
426 		x86_cpuid(0xc0000000, descs);
427 		lfunc = descs[0];
428 		if (lfunc >= 0xc0000001) {	/* has ACE, RNG */
429 		    int rng_enable = 0, ace_enable = 0;
430 		    x86_cpuid(0xc0000001, descs);
431 		    lfunc = descs[3];
432 		    ci->ci_padlock_flags = lfunc;
433 		    /* Check for and enable RNG */
434 		    if (lfunc & CPUID_VIA_HAS_RNG) {
435 		    	if (!(lfunc & CPUID_VIA_DO_RNG)) {
436 			    rng_enable++;
437 			    ci->ci_padlock_flags |= CPUID_VIA_HAS_RNG;
438 			}
439 		    }
440 		    /* Check for and enable ACE (AES-CBC) */
441 		    if (lfunc & CPUID_VIA_HAS_ACE) {
442 			if (!(lfunc & CPUID_VIA_DO_ACE)) {
443 			    ace_enable++;
444 			    ci->ci_padlock_flags |= CPUID_VIA_DO_ACE;
445 			}
446 		    }
447 		    /* Check for and enable SHA */
448 		    if (lfunc & CPUID_VIA_HAS_PHE) {
449 			if (!(lfunc & CPUID_VIA_DO_PHE)) {
450 			    ace_enable++;
451 			    ci->ci_padlock_flags |= CPUID_VIA_DO_PHE;
452 			}
453 		    }
454 		    /* Check for and enable ACE2 (AES-CTR) */
455 		    if (lfunc & CPUID_VIA_HAS_ACE2) {
456 			if (!(lfunc & CPUID_VIA_DO_ACE2)) {
457 			    ace_enable++;
458 			    ci->ci_padlock_flags |= CPUID_VIA_DO_ACE2;
459 			}
460 		    }
461 		    /* Check for and enable PMM (modmult engine) */
462 		    if (lfunc & CPUID_VIA_HAS_PMM) {
463 			if (!(lfunc & CPUID_VIA_DO_PMM)) {
464 			    ace_enable++;
465 			    ci->ci_padlock_flags |= CPUID_VIA_DO_PMM;
466 			}
467 		    }
468 
469 		    /* Actually do the enables. */
470 		    if (rng_enable) {
471 			msr = rdmsr(MSR_VIA_RNG);
472 			wrmsr(MSR_VIA_RNG, msr | MSR_VIA_RNG_ENABLE);
473 		    }
474 		    if (ace_enable) {
475 			msr = rdmsr(MSR_VIA_ACE);
476 			wrmsr(MSR_VIA_ACE, msr | MSR_VIA_ACE_ENABLE);
477 		    }
478 
479 		}
480 	}
481 
482 	/*
483 	 * Determine L1 cache/TLB info.
484 	 */
485 	if (lfunc < 0x80000005) {
486 		/* No L1 cache info available. */
487 		return;
488 	}
489 
490 	x86_cpuid(0x80000005, descs);
491 
492 	cai = &ci->ci_cinfo[CAI_ITLB];
493 	cai->cai_totalsize = VIA_L1_EBX_ITLB_ENTRIES(descs[1]);
494 	cai->cai_associativity = VIA_L1_EBX_ITLB_ASSOC(descs[1]);
495 	cai->cai_linesize = (4 * 1024);
496 
497 	cai = &ci->ci_cinfo[CAI_DTLB];
498 	cai->cai_totalsize = VIA_L1_EBX_DTLB_ENTRIES(descs[1]);
499 	cai->cai_associativity = VIA_L1_EBX_DTLB_ASSOC(descs[1]);
500 	cai->cai_linesize = (4 * 1024);
501 
502 	cai = &ci->ci_cinfo[CAI_DCACHE];
503 	cai->cai_totalsize = VIA_L1_ECX_DC_SIZE(descs[2]);
504 	cai->cai_associativity = VIA_L1_ECX_DC_ASSOC(descs[2]);
505 	cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[2]);
506 	if (family == 6 && model == 9 && stepping == 8) {
507 		/* Erratum: stepping 8 reports 4 when it should be 2 */
508 		cai->cai_associativity = 2;
509 	}
510 
511 	cai = &ci->ci_cinfo[CAI_ICACHE];
512 	cai->cai_totalsize = VIA_L1_EDX_IC_SIZE(descs[3]);
513 	cai->cai_associativity = VIA_L1_EDX_IC_ASSOC(descs[3]);
514 	cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[3]);
515 	if (family == 6 && model == 9 && stepping == 8) {
516 		/* Erratum: stepping 8 reports 4 when it should be 2 */
517 		cai->cai_associativity = 2;
518 	}
519 
520 	/*
521 	 * Determine L2 cache/TLB info.
522 	 */
523 	if (lfunc < 0x80000006) {
524 		/* No L2 cache info available. */
525 		return;
526 	}
527 
528 	x86_cpuid(0x80000006, descs);
529 
530 	cai = &ci->ci_cinfo[CAI_L2CACHE];
531 	if (family > 6 || model >= 9) {
532 		cai->cai_totalsize = VIA_L2N_ECX_C_SIZE(descs[2]);
533 		cai->cai_associativity = VIA_L2N_ECX_C_ASSOC(descs[2]);
534 		cai->cai_linesize = VIA_L2N_ECX_C_LS(descs[2]);
535 	} else {
536 		cai->cai_totalsize = VIA_L2_ECX_C_SIZE(descs[2]);
537 		cai->cai_associativity = VIA_L2_ECX_C_ASSOC(descs[2]);
538 		cai->cai_linesize = VIA_L2_ECX_C_LS(descs[2]);
539 	}
540 }
541 
542 static void
543 cpu_probe_geode(struct cpu_info *ci)
544 {
545 
546 	if (memcmp("Geode by NSC", ci->ci_vendor, 12) != 0 ||
547 	    CPUID2FAMILY(ci->ci_signature) != 5)
548 	    	return;
549 
550 	cpu_probe_cyrix_cmn(ci);
551 	cpu_probe_amd_cache(ci);
552 }
553 
554 void
555 cpu_probe(struct cpu_info *ci)
556 {
557 	const struct x86_cache_info *cai;
558 	u_int descs[4];
559 	int iterations, i, j;
560 	uint8_t desc;
561 	uint32_t miscbytes;
562 	uint32_t brand[12];
563 
564 	cpu_vendor = i386_nocpuid_cpus[cpu << 1];
565 	cpu_class = i386_nocpuid_cpus[(cpu << 1) + 1];
566 
567 	if (cpuid_level < 0)
568 		return;
569 
570 	x86_cpuid(0, descs);
571 	cpuid_level = descs[0];
572 	ci->ci_vendor[0] = descs[1];
573 	ci->ci_vendor[2] = descs[2];
574 	ci->ci_vendor[1] = descs[3];
575 	ci->ci_vendor[3] = 0;
576 
577 	if (memcmp(ci->ci_vendor, "GenuineIntel", 12) == 0)
578 		cpu_vendor = CPUVENDOR_INTEL;
579 	else if (memcmp(ci->ci_vendor,  "AuthenticAMD", 12) == 0)
580 		cpu_vendor = CPUVENDOR_AMD;
581 	else if (memcmp(ci->ci_vendor,  "CyrixInstead", 12) == 0)
582 		cpu_vendor = CPUVENDOR_CYRIX;
583 	else if (memcmp(ci->ci_vendor,  "Geode by NSC", 12) == 0)
584 		cpu_vendor = CPUVENDOR_CYRIX;
585 	else if (memcmp(ci->ci_vendor, "CentaurHauls", 12) == 0)
586 		cpu_vendor = CPUVENDOR_IDT;
587 	else if (memcmp(ci->ci_vendor, "GenuineTMx86", 12) == 0)
588 		cpu_vendor = CPUVENDOR_TRANSMETA;
589 	else
590 		cpu_vendor = CPUVENDOR_UNKNOWN;
591 
592 	x86_cpuid(0x80000000, brand);
593 	if (brand[0] >= 0x80000004) {
594 		x86_cpuid(0x80000002, brand);
595 		x86_cpuid(0x80000003, brand + 4);
596 		x86_cpuid(0x80000004, brand + 8);
597 		for (i = 0; i < 48; i++) {
598 			if (((char *) brand)[i] != ' ')
599 				break;
600 		}
601 		memcpy(cpu_brand_string, ((char *) brand) + i, 48 - i);
602 	}
603 
604 	if (cpuid_level >= 1) {
605 		x86_cpuid(1, descs);
606 		ci->ci_signature = descs[0];
607 		miscbytes = descs[1];
608 		ci->ci_feature2_flags = descs[2];
609 		ci->ci_feature_flags = descs[3];
610 
611 		/* Determine family + class. */
612 		cpu_class = CPUID2FAMILY(ci->ci_signature) + (CPUCLASS_386 - 3);
613 		if (cpu_class > CPUCLASS_686)
614 			cpu_class = CPUCLASS_686;
615 
616 		/* CLFLUSH line size is next 8 bits */
617 		if (ci->ci_feature_flags & CPUID_CFLUSH)
618 			ci->ci_cflush_lsize = ((miscbytes >> 8) & 0xff) << 3;
619 		ci->ci_initapicid = (miscbytes >> 24) & 0xff;
620 	}
621 
622 	if (cpuid_level >= 2) {
623 		/* Parse the cache info from `cpuid', if we have it. */
624 		x86_cpuid(2, descs);
625 		iterations = descs[0] & 0xff;
626 		while (iterations-- > 0) {
627 			for (i = 0; i < 4; i++) {
628 				if (descs[i] & 0x80000000)
629 					continue;
630 				for (j = 0; j < 4; j++) {
631 					if (i == 0 && j == 0)
632 						continue;
633 					desc = (descs[i] >> (j * 8)) & 0xff;
634 					if (desc == 0)
635 						continue;
636 					cai = cache_info_lookup(
637 					    intel_cpuid_cache_info, desc);
638 					if (cai != NULL) {
639 						ci->ci_cinfo[cai->cai_index] =
640 						    *cai;
641 					}
642 				}
643 			}
644 		}
645 	}
646 
647 	cpu_probe_k5(ci);
648 	cpu_probe_k678(ci);
649 	cpu_probe_cyrix(ci);
650 	cpu_probe_winchip(ci);
651 	cpu_probe_c3(ci);
652 	cpu_probe_geode(ci);
653 
654 	x86_cpu_toplogy(ci);
655 
656 	if (cpu_vendor != CPUVENDOR_AMD && (ci->ci_feature_flags & CPUID_TM) &&
657 	    (rdmsr(MSR_MISC_ENABLE) & (1 << 3)) == 0) {
658 		/* Enable thermal monitor 1. */
659 		wrmsr(MSR_MISC_ENABLE, rdmsr(MSR_MISC_ENABLE) | (1<<3));
660 	}
661 
662 	if ((cpu_feature | cpu_feature2) == 0) {
663 		/* If first. */
664 		cpu_feature = ci->ci_feature_flags;
665 		cpu_feature2 = ci->ci_feature2_flags;
666 		/* Early patch of text segment. */
667 #ifndef XEN
668 		x86_patch(true);
669 #endif
670 	} else {
671 		/* If not first. */
672 		cpu_feature &= ci->ci_feature_flags;
673 		cpu_feature2 &= ci->ci_feature2_flags;
674 	}
675 }
676 
677 void
678 cpu_identify(struct cpu_info *ci)
679 {
680 
681 	snprintf(cpu_model, sizeof(cpu_model), "%s %d86-class",
682 	    cpu_vendor_names[cpu_vendor], cpu_class + 3);
683 	aprint_normal(": %s", cpu_model);
684 	if (ci->ci_data.cpu_cc_freq != 0)
685 		aprint_normal(", %dMHz", (int)(ci->ci_data.cpu_cc_freq / 1000000));
686 	if (ci->ci_signature != 0)
687 		aprint_normal(", id 0x%x", ci->ci_signature);
688 	aprint_normal("\n");
689 
690 	if (cpu_brand_string[0] == '\0') {
691 		strlcpy(cpu_brand_string, cpu_model, sizeof(cpu_brand_string));
692 	}
693 	if (cpu_class == CPUCLASS_386) {
694 		panic("NetBSD requires an 80486DX or later processor");
695 	}
696 	if (cpu == CPU_486DLC) {
697 		aprint_error("WARNING: BUGGY CYRIX CACHE\n");
698 	}
699 
700 	if ((cpu_vendor == CPUVENDOR_AMD) /* check enablement of an */
701 	  && (device_unit(ci->ci_dev) == 0) /* AMD feature only once */
702 	  && ((ci->ci_feature4_flags & CPUID_SVM) == CPUID_SVM)
703 #if defined(XEN) && !defined(DOM0OPS)
704 	  && (false)  /* on Xen rdmsr is for Dom0 only */
705 #endif
706 	  )
707 	{
708 		uint64_t val;
709 
710 		val = rdmsr(MSR_VMCR);
711 		if (((val & VMCR_SVMED) == VMCR_SVMED)
712 		  && ((val & VMCR_LOCK) == VMCR_LOCK))
713 		{
714 			aprint_normal_dev(ci->ci_dev,
715 				"SVM disabled by the BIOS\n");
716 		}
717 	}
718 
719 #ifdef i386 /* XXX for now */
720 	if (cpu_vendor == CPUVENDOR_TRANSMETA) {
721 		u_int descs[4];
722 		x86_cpuid(0x80860000, descs);
723 		if (descs[0] >= 0x80860007)
724 			tmx86_init_longrun();
725 	}
726 
727 	/* If we have FXSAVE/FXRESTOR, use them. */
728 	if (cpu_feature & CPUID_FXSR) {
729 		i386_use_fxsave = 1;
730 		/*
731 		 * If we have SSE/SSE2, enable XMM exceptions, and
732 		 * notify userland.
733 		 */
734 		if (cpu_feature & CPUID_SSE)
735 			i386_has_sse = 1;
736 		if (cpu_feature & CPUID_SSE2)
737 			i386_has_sse2 = 1;
738 	} else
739 		i386_use_fxsave = 0;
740 #endif	/* i386 */
741 
742 #ifdef ENHANCED_SPEEDSTEP
743 	if (cpu_feature2 & CPUID2_EST) {
744 		if (rdmsr(MSR_MISC_ENABLE) & (1 << 16))
745 			est_init(cpu_vendor);
746 	}
747 #endif /* ENHANCED_SPEEDSTEP */
748 
749 #ifdef INTEL_CORETEMP
750 	if (cpu_vendor == CPUVENDOR_INTEL && cpuid_level >= 0x06)
751 		coretemp_register(ci);
752 #endif
753 
754 #if defined(POWERNOW_K7) || defined(POWERNOW_K8)
755 	if (cpu_vendor == CPUVENDOR_AMD && powernow_probe(ci)) {
756 		switch (CPUID2FAMILY(ci->ci_signature)) {
757 #ifdef POWERNOW_K7
758 		case 6:
759 			k7_powernow_init();
760 			break;
761 #endif
762 #ifdef POWERNOW_K8
763 		case 15:
764 			k8_powernow_init();
765 			break;
766 #endif
767 		default:
768 			break;
769 		}
770 	}
771 #endif /* POWERNOW_K7 || POWERNOW_K8 */
772 
773 #ifdef INTEL_ONDEMAND_CLOCKMOD
774 	if (cpuid_level >= 1) {
775 		clockmod_init();
776 	}
777 #endif
778 }
779