xref: /netbsd-src/sys/arch/x86/x86/identcpu.c (revision daf6c4152fcddc27c445489775ed1f66ab4ea9a9)
1 /*	$NetBSD: identcpu.c,v 1.22 2011/01/27 18:44:40 bouyer Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Frank van der Linden,  and by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: identcpu.c,v 1.22 2011/01/27 18:44:40 bouyer Exp $");
34 
35 #include "opt_enhanced_speedstep.h"
36 #include "opt_intel_odcm.h"
37 #include "opt_intel_coretemp.h"
38 #include "opt_via_c7temp.h"
39 #include "opt_powernow_k8.h"
40 #include "opt_xen.h"
41 #ifdef i386	/* XXX */
42 #include "opt_powernow_k7.h"
43 #endif
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/device.h>
48 
49 #include <uvm/uvm_extern.h>
50 
51 #include <machine/specialreg.h>
52 #include <machine/pio.h>
53 #include <machine/cpu.h>
54 
55 #include <x86/cputypes.h>
56 #include <x86/cacheinfo.h>
57 #include <x86/cpuvar.h>
58 #include <x86/cpu_msr.h>
59 #include <x86/powernow.h>
60 
61 static const struct x86_cache_info intel_cpuid_cache_info[] = INTEL_CACHE_INFO;
62 
63 static const struct x86_cache_info amd_cpuid_l2cache_assoc_info[] =
64 	AMD_L2CACHE_INFO;
65 
66 static const struct x86_cache_info amd_cpuid_l3cache_assoc_info[] =
67 	AMD_L3CACHE_INFO;
68 
69 int cpu_vendor;
70 char cpu_brand_string[49];
71 
72 /*
73  * Info for CTL_HW
74  */
75 char	cpu_model[120];
76 
77 /*
78  * Note: these are just the ones that may not have a cpuid instruction.
79  * We deal with the rest in a different way.
80  */
81 const int i386_nocpuid_cpus[] = {
82 	CPUVENDOR_INTEL, CPUCLASS_386,	/* CPU_386SX */
83 	CPUVENDOR_INTEL, CPUCLASS_386,	/* CPU_386   */
84 	CPUVENDOR_INTEL, CPUCLASS_486,	/* CPU_486SX */
85 	CPUVENDOR_INTEL, CPUCLASS_486, 	/* CPU_486   */
86 	CPUVENDOR_CYRIX, CPUCLASS_486,	/* CPU_486DLC */
87 	CPUVENDOR_CYRIX, CPUCLASS_486,	/* CPU_6x86 */
88 	CPUVENDOR_NEXGEN, CPUCLASS_386,	/* CPU_NX586 */
89 };
90 
91 static const char cpu_vendor_names[][10] = {
92 	"Unknown", "Intel", "NS/Cyrix", "NexGen", "AMD", "IDT/VIA", "Transmeta",
93 	"Vortex86"
94 };
95 
96 static const struct x86_cache_info *
97 cache_info_lookup(const struct x86_cache_info *cai, uint8_t desc)
98 {
99 	int i;
100 
101 	for (i = 0; cai[i].cai_desc != 0; i++) {
102 		if (cai[i].cai_desc == desc)
103 			return (&cai[i]);
104 	}
105 
106 	return (NULL);
107 }
108 
109 
110 static void
111 cpu_probe_amd_cache(struct cpu_info *ci)
112 {
113 	const struct x86_cache_info *cp;
114 	struct x86_cache_info *cai;
115 	int family, model;
116 	u_int descs[4];
117 	u_int lfunc;
118 
119 	family = CPUID2FAMILY(ci->ci_signature);
120 	model = CPUID2MODEL(ci->ci_signature);
121 
122 	/*
123 	 * K5 model 0 has none of this info.
124 	 */
125 	if (family == 5 && model == 0)
126 		return;
127 
128 	/*
129 	 * Get extended values for K8 and up.
130 	 */
131 	if (family == 0xf) {
132 		family += CPUID2EXTFAMILY(ci->ci_signature);
133 		model += CPUID2EXTMODEL(ci->ci_signature);
134 	}
135 
136 	/*
137 	 * Determine the largest extended function value.
138 	 */
139 	x86_cpuid(0x80000000, descs);
140 	lfunc = descs[0];
141 
142 	/*
143 	 * Determine L1 cache/TLB info.
144 	 */
145 	if (lfunc < 0x80000005) {
146 		/* No L1 cache info available. */
147 		return;
148 	}
149 
150 	x86_cpuid(0x80000005, descs);
151 
152 	/*
153 	 * K6-III and higher have large page TLBs.
154 	 */
155 	if ((family == 5 && model >= 9) || family >= 6) {
156 		cai = &ci->ci_cinfo[CAI_ITLB2];
157 		cai->cai_totalsize = AMD_L1_EAX_ITLB_ENTRIES(descs[0]);
158 		cai->cai_associativity = AMD_L1_EAX_ITLB_ASSOC(descs[0]);
159 		cai->cai_linesize = (4 * 1024 * 1024);
160 
161 		cai = &ci->ci_cinfo[CAI_DTLB2];
162 		cai->cai_totalsize = AMD_L1_EAX_DTLB_ENTRIES(descs[0]);
163 		cai->cai_associativity = AMD_L1_EAX_DTLB_ASSOC(descs[0]);
164 		cai->cai_linesize = (4 * 1024 * 1024);
165 	}
166 
167 	cai = &ci->ci_cinfo[CAI_ITLB];
168 	cai->cai_totalsize = AMD_L1_EBX_ITLB_ENTRIES(descs[1]);
169 	cai->cai_associativity = AMD_L1_EBX_ITLB_ASSOC(descs[1]);
170 	cai->cai_linesize = (4 * 1024);
171 
172 	cai = &ci->ci_cinfo[CAI_DTLB];
173 	cai->cai_totalsize = AMD_L1_EBX_DTLB_ENTRIES(descs[1]);
174 	cai->cai_associativity = AMD_L1_EBX_DTLB_ASSOC(descs[1]);
175 	cai->cai_linesize = (4 * 1024);
176 
177 	cai = &ci->ci_cinfo[CAI_DCACHE];
178 	cai->cai_totalsize = AMD_L1_ECX_DC_SIZE(descs[2]);
179 	cai->cai_associativity = AMD_L1_ECX_DC_ASSOC(descs[2]);
180 	cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[2]);
181 
182 	cai = &ci->ci_cinfo[CAI_ICACHE];
183 	cai->cai_totalsize = AMD_L1_EDX_IC_SIZE(descs[3]);
184 	cai->cai_associativity = AMD_L1_EDX_IC_ASSOC(descs[3]);
185 	cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[3]);
186 
187 	/*
188 	 * Determine L2 cache/TLB info.
189 	 */
190 	if (lfunc < 0x80000006) {
191 		/* No L2 cache info available. */
192 		return;
193 	}
194 
195 	x86_cpuid(0x80000006, descs);
196 
197 	cai = &ci->ci_cinfo[CAI_L2CACHE];
198 	cai->cai_totalsize = AMD_L2_ECX_C_SIZE(descs[2]);
199 	cai->cai_associativity = AMD_L2_ECX_C_ASSOC(descs[2]);
200 	cai->cai_linesize = AMD_L2_ECX_C_LS(descs[2]);
201 
202 	cp = cache_info_lookup(amd_cpuid_l2cache_assoc_info,
203 	    cai->cai_associativity);
204 	if (cp != NULL)
205 		cai->cai_associativity = cp->cai_associativity;
206 	else
207 		cai->cai_associativity = 0;	/* XXX Unknown/reserved */
208 
209 	if (family < 0xf) {
210 		/* No L3 cache info available. */
211 		return;
212 	}
213 
214 	cai = &ci->ci_cinfo[CAI_L3CACHE];
215 	cai->cai_totalsize = AMD_L3_EDX_C_SIZE(descs[3]);
216 	cai->cai_associativity = AMD_L3_EDX_C_ASSOC(descs[3]);
217 	cai->cai_linesize = AMD_L3_EDX_C_LS(descs[3]);
218 
219 	cp = cache_info_lookup(amd_cpuid_l3cache_assoc_info,
220 	    cai->cai_associativity);
221 	if (cp != NULL)
222 		cai->cai_associativity = cp->cai_associativity;
223 	else
224 		cai->cai_associativity = 0;	/* XXX Unknown reserved */
225 
226 	if (lfunc < 0x80000019) {
227 		/* No 1GB Page TLB */
228 		return;
229 	}
230 
231 	x86_cpuid(0x80000019, descs);
232 
233 	cai = &ci->ci_cinfo[CAI_L1_1GBDTLB];
234 	cai->cai_totalsize = AMD_L1_1GB_EAX_DTLB_ENTRIES(descs[1]);
235 	cai->cai_associativity = AMD_L1_1GB_EAX_DTLB_ASSOC(descs[1]);
236 	cai->cai_linesize = (1 * 1024);
237 
238 	cai = &ci->ci_cinfo[CAI_L1_1GBITLB];
239 	cai->cai_totalsize = AMD_L1_1GB_EAX_IUTLB_ENTRIES(descs[0]);
240 	cai->cai_associativity = AMD_L1_1GB_EAX_IUTLB_ASSOC(descs[0]);
241 	cai->cai_linesize = (1 * 1024);
242 
243 	cai = &ci->ci_cinfo[CAI_L2_1GBDTLB];
244 	cai->cai_totalsize = AMD_L2_1GB_EBX_DUTLB_ENTRIES(descs[1]);
245 	cai->cai_associativity = AMD_L2_1GB_EBX_DUTLB_ASSOC(descs[1]);
246 	cai->cai_linesize = (1 * 1024);
247 
248 	cai = &ci->ci_cinfo[CAI_L2_1GBITLB];
249 	cai->cai_totalsize = AMD_L2_1GB_EBX_IUTLB_ENTRIES(descs[0]);
250 	cai->cai_associativity = AMD_L2_1GB_EBX_IUTLB_ASSOC(descs[0]);
251 	cai->cai_linesize = (1 * 1024);
252 }
253 
254 static void
255 cpu_probe_k5(struct cpu_info *ci)
256 {
257 	int flag;
258 
259 	if (cpu_vendor != CPUVENDOR_AMD ||
260 	    CPUID2FAMILY(ci->ci_signature) != 5)
261 		return;
262 
263 	if (CPUID2MODEL(ci->ci_signature) == 0) {
264 		/*
265 		 * According to the AMD Processor Recognition App Note,
266 		 * the AMD-K5 Model 0 uses the wrong bit to indicate
267 		 * support for global PTEs, instead using bit 9 (APIC)
268 		 * rather than bit 13 (i.e. "0x200" vs. 0x2000".  Oops!).
269 		 */
270 		flag = ci->ci_feat_val[0];
271 		if ((flag & CPUID_APIC) != 0)
272 			flag = (flag & ~CPUID_APIC) | CPUID_PGE;
273 		ci->ci_feat_val[0] = flag;
274 	}
275 
276 	cpu_probe_amd_cache(ci);
277 }
278 
279 static void
280 cpu_probe_k678(struct cpu_info *ci)
281 {
282 	uint32_t descs[4];
283 
284 	if (cpu_vendor != CPUVENDOR_AMD ||
285 	    CPUID2FAMILY(ci->ci_signature) < 6)
286 		return;
287 
288 	/* Determine the extended feature flags. */
289 	x86_cpuid(0x80000000, descs);
290 	if (descs[0] >= 0x80000001) {
291 		x86_cpuid(0x80000001, descs);
292 		ci->ci_feat_val[3] = descs[2]; /* %ecx */
293 		ci->ci_feat_val[2] = descs[3]; /* %edx */
294 	}
295 
296 	cpu_probe_amd_cache(ci);
297 }
298 
299 static inline uint8_t
300 cyrix_read_reg(uint8_t reg)
301 {
302 
303 	outb(0x22, reg);
304 	return inb(0x23);
305 }
306 
307 static inline void
308 cyrix_write_reg(uint8_t reg, uint8_t data)
309 {
310 
311 	outb(0x22, reg);
312 	outb(0x23, data);
313 }
314 
315 static void
316 cpu_probe_cyrix_cmn(struct cpu_info *ci)
317 {
318 	/*
319 	 * i8254 latch check routine:
320 	 *     National Geode (formerly Cyrix MediaGX) has a serious bug in
321 	 *     its built-in i8254-compatible clock module (cs5510 cs5520).
322 	 *     Set the variable 'clock_broken_latch' to indicate it.
323 	 *
324 	 * This bug is not present in the cs5530, and the flag
325 	 * is disabled again in sys/arch/i386/pci/pcib.c if this later
326 	 * model device is detected. Ideally, this work-around should not
327 	 * even be in here, it should be in there. XXX
328 	 */
329 	uint8_t c3;
330 #ifndef XEN
331 	extern int clock_broken_latch;
332 
333 	switch (ci->ci_signature) {
334 	case 0x440:     /* Cyrix MediaGX */
335 	case 0x540:     /* GXm */
336 		clock_broken_latch = 1;
337 		break;
338 	}
339 #endif
340 
341 	/* set up various cyrix registers */
342 	/*
343 	 * Enable suspend on halt (powersave mode).
344 	 * When powersave mode is enabled, the TSC stops counting
345 	 * while the CPU is halted in idle() waiting for an interrupt.
346 	 * This means we can't use the TSC for interval time in
347 	 * microtime(9), and thus it is disabled here.
348 	 *
349 	 * It still makes a perfectly good cycle counter
350 	 * for program profiling, so long as you remember you're
351 	 * counting cycles, and not time. Further, if you don't
352 	 * mind not using powersave mode, the TSC works just fine,
353 	 * so this should really be optional. XXX
354 	 */
355 	cyrix_write_reg(0xc2, cyrix_read_reg(0xc2) | 0x08);
356 
357 	/*
358 	 * Do not disable the TSC on the Geode GX, it's reported to
359 	 * work fine.
360 	 */
361 	if (ci->ci_signature != 0x552)
362 		ci->ci_feat_val[0] &= ~CPUID_TSC;
363 
364 	/* enable access to ccr4/ccr5 */
365 	c3 = cyrix_read_reg(0xC3);
366 	cyrix_write_reg(0xC3, c3 | 0x10);
367 	/* cyrix's workaround  for the "coma bug" */
368 	cyrix_write_reg(0x31, cyrix_read_reg(0x31) | 0xf8);
369 	cyrix_write_reg(0x32, cyrix_read_reg(0x32) | 0x7f);
370 	cyrix_write_reg(0x33, cyrix_read_reg(0x33) & ~0xff);
371 	cyrix_write_reg(0x3c, cyrix_read_reg(0x3c) | 0x87);
372 	/* disable access to ccr4/ccr5 */
373 	cyrix_write_reg(0xC3, c3);
374 }
375 
376 static void
377 cpu_probe_cyrix(struct cpu_info *ci)
378 {
379 
380 	if (cpu_vendor != CPUVENDOR_CYRIX ||
381 	    CPUID2FAMILY(ci->ci_signature) < 4 ||
382 	    CPUID2FAMILY(ci->ci_signature) > 6)
383 		return;
384 
385 	cpu_probe_cyrix_cmn(ci);
386 }
387 
388 static void
389 cpu_probe_winchip(struct cpu_info *ci)
390 {
391 
392 	if (cpu_vendor != CPUVENDOR_IDT ||
393 	    CPUID2FAMILY(ci->ci_signature) != 5)
394 	    	return;
395 
396 	if (CPUID2MODEL(ci->ci_signature) == 4) {
397 		/* WinChip C6 */
398 		ci->ci_feat_val[0] &= ~CPUID_TSC;
399 	}
400 }
401 
402 static void
403 cpu_probe_c3(struct cpu_info *ci)
404 {
405 	u_int family, model, stepping, descs[4], lfunc, msr;
406 	struct x86_cache_info *cai;
407 
408 	if (cpu_vendor != CPUVENDOR_IDT ||
409 	    CPUID2FAMILY(ci->ci_signature) < 6)
410 	    	return;
411 
412 	family = CPUID2FAMILY(ci->ci_signature);
413 	model = CPUID2MODEL(ci->ci_signature);
414 	stepping = CPUID2STEPPING(ci->ci_signature);
415 
416 	/* Determine the largest extended function value. */
417 	x86_cpuid(0x80000000, descs);
418 	lfunc = descs[0];
419 
420 	/* Determine the extended feature flags. */
421 	if (lfunc >= 0x80000001) {
422 		x86_cpuid(0x80000001, descs);
423 		ci->ci_feat_val[2] = descs[3];
424 	}
425 
426 	if (family > 6 || model > 0x9 || (model == 0x9 && stepping >= 3)) {
427 		/* Nehemiah or Esther */
428 		x86_cpuid(0xc0000000, descs);
429 		lfunc = descs[0];
430 		if (lfunc >= 0xc0000001) {	/* has ACE, RNG */
431 		    int rng_enable = 0, ace_enable = 0;
432 		    x86_cpuid(0xc0000001, descs);
433 		    lfunc = descs[3];
434 		    ci->ci_feat_val[4] = lfunc;
435 		    /* Check for and enable RNG */
436 		    if (lfunc & CPUID_VIA_HAS_RNG) {
437 		    	if (!(lfunc & CPUID_VIA_DO_RNG)) {
438 			    rng_enable++;
439 			    ci->ci_feat_val[4] |= CPUID_VIA_HAS_RNG;
440 			}
441 		    }
442 		    /* Check for and enable ACE (AES-CBC) */
443 		    if (lfunc & CPUID_VIA_HAS_ACE) {
444 			if (!(lfunc & CPUID_VIA_DO_ACE)) {
445 			    ace_enable++;
446 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE;
447 			}
448 		    }
449 		    /* Check for and enable SHA */
450 		    if (lfunc & CPUID_VIA_HAS_PHE) {
451 			if (!(lfunc & CPUID_VIA_DO_PHE)) {
452 			    ace_enable++;
453 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_PHE;
454 			}
455 		    }
456 		    /* Check for and enable ACE2 (AES-CTR) */
457 		    if (lfunc & CPUID_VIA_HAS_ACE2) {
458 			if (!(lfunc & CPUID_VIA_DO_ACE2)) {
459 			    ace_enable++;
460 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE2;
461 			}
462 		    }
463 		    /* Check for and enable PMM (modmult engine) */
464 		    if (lfunc & CPUID_VIA_HAS_PMM) {
465 			if (!(lfunc & CPUID_VIA_DO_PMM)) {
466 			    ace_enable++;
467 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_PMM;
468 			}
469 		    }
470 
471 		    /* Actually do the enables. */
472 		    if (rng_enable) {
473 			msr = rdmsr(MSR_VIA_RNG);
474 			wrmsr(MSR_VIA_RNG, msr | MSR_VIA_RNG_ENABLE);
475 		    }
476 		    if (ace_enable) {
477 			msr = rdmsr(MSR_VIA_ACE);
478 			wrmsr(MSR_VIA_ACE, msr | MSR_VIA_ACE_ENABLE);
479 		    }
480 
481 		}
482 	}
483 
484 	/*
485 	 * Determine L1 cache/TLB info.
486 	 */
487 	if (lfunc < 0x80000005) {
488 		/* No L1 cache info available. */
489 		return;
490 	}
491 
492 	x86_cpuid(0x80000005, descs);
493 
494 	cai = &ci->ci_cinfo[CAI_ITLB];
495 	cai->cai_totalsize = VIA_L1_EBX_ITLB_ENTRIES(descs[1]);
496 	cai->cai_associativity = VIA_L1_EBX_ITLB_ASSOC(descs[1]);
497 	cai->cai_linesize = (4 * 1024);
498 
499 	cai = &ci->ci_cinfo[CAI_DTLB];
500 	cai->cai_totalsize = VIA_L1_EBX_DTLB_ENTRIES(descs[1]);
501 	cai->cai_associativity = VIA_L1_EBX_DTLB_ASSOC(descs[1]);
502 	cai->cai_linesize = (4 * 1024);
503 
504 	cai = &ci->ci_cinfo[CAI_DCACHE];
505 	cai->cai_totalsize = VIA_L1_ECX_DC_SIZE(descs[2]);
506 	cai->cai_associativity = VIA_L1_ECX_DC_ASSOC(descs[2]);
507 	cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[2]);
508 	if (family == 6 && model == 9 && stepping == 8) {
509 		/* Erratum: stepping 8 reports 4 when it should be 2 */
510 		cai->cai_associativity = 2;
511 	}
512 
513 	cai = &ci->ci_cinfo[CAI_ICACHE];
514 	cai->cai_totalsize = VIA_L1_EDX_IC_SIZE(descs[3]);
515 	cai->cai_associativity = VIA_L1_EDX_IC_ASSOC(descs[3]);
516 	cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[3]);
517 	if (family == 6 && model == 9 && stepping == 8) {
518 		/* Erratum: stepping 8 reports 4 when it should be 2 */
519 		cai->cai_associativity = 2;
520 	}
521 
522 	/*
523 	 * Determine L2 cache/TLB info.
524 	 */
525 	if (lfunc < 0x80000006) {
526 		/* No L2 cache info available. */
527 		return;
528 	}
529 
530 	x86_cpuid(0x80000006, descs);
531 
532 	cai = &ci->ci_cinfo[CAI_L2CACHE];
533 	if (family > 6 || model >= 9) {
534 		cai->cai_totalsize = VIA_L2N_ECX_C_SIZE(descs[2]);
535 		cai->cai_associativity = VIA_L2N_ECX_C_ASSOC(descs[2]);
536 		cai->cai_linesize = VIA_L2N_ECX_C_LS(descs[2]);
537 	} else {
538 		cai->cai_totalsize = VIA_L2_ECX_C_SIZE(descs[2]);
539 		cai->cai_associativity = VIA_L2_ECX_C_ASSOC(descs[2]);
540 		cai->cai_linesize = VIA_L2_ECX_C_LS(descs[2]);
541 	}
542 }
543 
544 static void
545 cpu_probe_geode(struct cpu_info *ci)
546 {
547 
548 	if (memcmp("Geode by NSC", ci->ci_vendor, 12) != 0 ||
549 	    CPUID2FAMILY(ci->ci_signature) != 5)
550 	    	return;
551 
552 	cpu_probe_cyrix_cmn(ci);
553 	cpu_probe_amd_cache(ci);
554 }
555 
556 static void
557 cpu_probe_vortex86(struct cpu_info *ci)
558 {
559 #define PCI_MODE1_ADDRESS_REG	0x0cf8
560 #define PCI_MODE1_DATA_REG	0x0cfc
561 #define PCI_MODE1_ENABLE	0x80000000UL
562 
563 	uint32_t reg;
564 
565 	if (cpu_vendor != CPUVENDOR_VORTEX86)
566 		return;
567 	/*
568 	 * CPU model available from "Customer ID register" in
569 	 * North Bridge Function 0 PCI space
570 	 * we can't use pci_conf_read() because the PCI subsystem is not
571 	 * not initialised early enough
572 	 */
573 
574 	outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE | 0x90);
575 	reg = inl(PCI_MODE1_DATA_REG);
576 
577 	switch(reg) {
578 	case 0x31504d44:
579 		strcpy(cpu_brand_string, "Vortex86SX");
580 		break;
581 	case 0x32504d44:
582 		strcpy(cpu_brand_string, "Vortex86DX");
583 		break;
584 	case 0x33504d44:
585 		strcpy(cpu_brand_string, "Vortex86MX");
586 		break;
587 	default:
588 		strcpy(cpu_brand_string, "Unknown Vortex86");
589 		break;
590 	}
591 
592 #undef PCI_MODE1_ENABLE
593 #undef PCI_MODE1_ADDRESS_REG
594 #undef PCI_MODE1_DATA_REG
595 }
596 
597 void
598 cpu_probe(struct cpu_info *ci)
599 {
600 	const struct x86_cache_info *cai;
601 	u_int descs[4];
602 	int iterations, i, j;
603 	uint8_t desc;
604 	uint32_t miscbytes;
605 	uint32_t brand[12];
606 
607 	cpu_vendor = i386_nocpuid_cpus[cpu << 1];
608 	cpu_class = i386_nocpuid_cpus[(cpu << 1) + 1];
609 
610 	if (cpuid_level < 0)
611 		return;
612 
613 	for (i = 0; i < __arraycount(ci->ci_feat_val); i++) {
614 		ci->ci_feat_val[i] = 0;
615 	}
616 
617 	x86_cpuid(0, descs);
618 	cpuid_level = descs[0];
619 	ci->ci_vendor[0] = descs[1];
620 	ci->ci_vendor[2] = descs[2];
621 	ci->ci_vendor[1] = descs[3];
622 	ci->ci_vendor[3] = 0;
623 
624 	if (memcmp(ci->ci_vendor, "GenuineIntel", 12) == 0)
625 		cpu_vendor = CPUVENDOR_INTEL;
626 	else if (memcmp(ci->ci_vendor,  "AuthenticAMD", 12) == 0)
627 		cpu_vendor = CPUVENDOR_AMD;
628 	else if (memcmp(ci->ci_vendor,  "CyrixInstead", 12) == 0)
629 		cpu_vendor = CPUVENDOR_CYRIX;
630 	else if (memcmp(ci->ci_vendor,  "Geode by NSC", 12) == 0)
631 		cpu_vendor = CPUVENDOR_CYRIX;
632 	else if (memcmp(ci->ci_vendor, "CentaurHauls", 12) == 0)
633 		cpu_vendor = CPUVENDOR_IDT;
634 	else if (memcmp(ci->ci_vendor, "GenuineTMx86", 12) == 0)
635 		cpu_vendor = CPUVENDOR_TRANSMETA;
636 	else if (memcmp(ci->ci_vendor, "Vortex86 SoC", 12) == 0)
637 		cpu_vendor = CPUVENDOR_VORTEX86;
638 	else
639 		cpu_vendor = CPUVENDOR_UNKNOWN;
640 
641 	x86_cpuid(0x80000000, brand);
642 	if (brand[0] >= 0x80000004) {
643 		x86_cpuid(0x80000002, brand);
644 		x86_cpuid(0x80000003, brand + 4);
645 		x86_cpuid(0x80000004, brand + 8);
646 		for (i = 0; i < 48; i++) {
647 			if (((char *) brand)[i] != ' ')
648 				break;
649 		}
650 		memcpy(cpu_brand_string, ((char *) brand) + i, 48 - i);
651 	}
652 
653 	if (cpuid_level >= 1) {
654 		x86_cpuid(1, descs);
655 		ci->ci_signature = descs[0];
656 		miscbytes = descs[1];
657 		ci->ci_feat_val[1] = descs[2];
658 		ci->ci_feat_val[0] = descs[3];
659 
660 		/* Determine family + class. */
661 		cpu_class = CPUID2FAMILY(ci->ci_signature) + (CPUCLASS_386 - 3);
662 		if (cpu_class > CPUCLASS_686)
663 			cpu_class = CPUCLASS_686;
664 
665 		/* CLFLUSH line size is next 8 bits */
666 		if (ci->ci_feat_val[0] & CPUID_CFLUSH)
667 			ci->ci_cflush_lsize = ((miscbytes >> 8) & 0xff) << 3;
668 		ci->ci_initapicid = (miscbytes >> 24) & 0xff;
669 	}
670 
671 	if (cpuid_level >= 2) {
672 		/* Parse the cache info from `cpuid', if we have it. */
673 		x86_cpuid(2, descs);
674 		iterations = descs[0] & 0xff;
675 		while (iterations-- > 0) {
676 			for (i = 0; i < 4; i++) {
677 				if (descs[i] & 0x80000000)
678 					continue;
679 				for (j = 0; j < 4; j++) {
680 					if (i == 0 && j == 0)
681 						continue;
682 					desc = (descs[i] >> (j * 8)) & 0xff;
683 					if (desc == 0)
684 						continue;
685 					cai = cache_info_lookup(
686 					    intel_cpuid_cache_info, desc);
687 					if (cai != NULL) {
688 						ci->ci_cinfo[cai->cai_index] =
689 						    *cai;
690 					}
691 				}
692 			}
693 		}
694 	}
695 
696 	cpu_probe_k5(ci);
697 	cpu_probe_k678(ci);
698 	cpu_probe_cyrix(ci);
699 	cpu_probe_winchip(ci);
700 	cpu_probe_c3(ci);
701 	cpu_probe_geode(ci);
702 	cpu_probe_vortex86(ci);
703 
704 	x86_cpu_topology(ci);
705 
706 	if (cpu_vendor != CPUVENDOR_AMD && (ci->ci_feat_val[0] & CPUID_TM) &&
707 	    (rdmsr(MSR_MISC_ENABLE) & (1 << 3)) == 0) {
708 		/* Enable thermal monitor 1. */
709 		wrmsr(MSR_MISC_ENABLE, rdmsr(MSR_MISC_ENABLE) | (1<<3));
710 	}
711 
712 	if (ci == &cpu_info_primary) {
713 		/* If first. Boot Processor is the cpu_feature reference. */
714 		for (i = 0; i < __arraycount(cpu_feature); i++) {
715 			cpu_feature[i] = ci->ci_feat_val[i];
716 		}
717 #ifndef XEN
718 		/* Early patch of text segment. */
719 		x86_patch(true);
720 #endif
721 	} else {
722 		/*
723 		 * If not first. Warn about cpu_feature mismatch for
724 		 * secondary CPUs.
725 		 */
726 		for (i = 0; i < __arraycount(cpu_feature); i++) {
727 			if (cpu_feature[i] != ci->ci_feat_val[i])
728 				aprint_error_dev(ci->ci_dev,
729 				    "feature mismatch: cpu_feature[%d] is "
730 				    "%#x, but CPU reported %#x\n",
731 				    i, cpu_feature[i], ci->ci_feat_val[i]);
732 		}
733 	}
734 }
735 
736 void
737 cpu_identify(struct cpu_info *ci)
738 {
739 
740 	snprintf(cpu_model, sizeof(cpu_model), "%s %d86-class",
741 	    cpu_vendor_names[cpu_vendor], cpu_class + 3);
742 	if (cpu_brand_string[0] != '\0') {
743 		aprint_normal(": %s", cpu_brand_string);
744 	} else {
745 		aprint_normal(": %s", cpu_model);
746 		if (ci->ci_data.cpu_cc_freq != 0)
747 			aprint_normal(", %dMHz",
748 			    (int)(ci->ci_data.cpu_cc_freq / 1000000));
749 	}
750 	if (ci->ci_signature != 0)
751 		aprint_normal(", id 0x%x", ci->ci_signature);
752 	aprint_normal("\n");
753 
754 	if (cpu_brand_string[0] == '\0') {
755 		strlcpy(cpu_brand_string, cpu_model, sizeof(cpu_brand_string));
756 	}
757 	if (cpu_class == CPUCLASS_386) {
758 		panic("NetBSD requires an 80486DX or later processor");
759 	}
760 	if (cpu == CPU_486DLC) {
761 		aprint_error("WARNING: BUGGY CYRIX CACHE\n");
762 	}
763 
764 	if ((cpu_vendor == CPUVENDOR_AMD) /* check enablement of an */
765 	  && (device_unit(ci->ci_dev) == 0) /* AMD feature only once */
766 	  && ((cpu_feature[3] & CPUID_SVM) == CPUID_SVM)
767 #if defined(XEN) && !defined(DOM0OPS)
768 	  && (false)  /* on Xen rdmsr is for Dom0 only */
769 #endif
770 	  )
771 	{
772 		uint64_t val;
773 
774 		val = rdmsr(MSR_VMCR);
775 		if (((val & VMCR_SVMED) == VMCR_SVMED)
776 		  && ((val & VMCR_LOCK) == VMCR_LOCK))
777 		{
778 			aprint_normal_dev(ci->ci_dev,
779 				"SVM disabled by the BIOS\n");
780 		}
781 	}
782 
783 #ifdef i386 /* XXX for now */
784 	if (cpu_vendor == CPUVENDOR_TRANSMETA) {
785 		u_int descs[4];
786 		x86_cpuid(0x80860000, descs);
787 		if (descs[0] >= 0x80860007)
788 			tmx86_init_longrun();
789 	}
790 
791 	/* If we have FXSAVE/FXRESTOR, use them. */
792 	if (cpu_feature[0] & CPUID_FXSR) {
793 		i386_use_fxsave = 1;
794 		/*
795 		 * If we have SSE/SSE2, enable XMM exceptions, and
796 		 * notify userland.
797 		 */
798 		if (cpu_feature[0] & CPUID_SSE)
799 			i386_has_sse = 1;
800 		if (cpu_feature[0] & CPUID_SSE2)
801 			i386_has_sse2 = 1;
802 	} else
803 		i386_use_fxsave = 0;
804 #endif	/* i386 */
805 
806 #ifdef ENHANCED_SPEEDSTEP
807 	if (cpu_feature[1] & CPUID2_EST) {
808 		if (rdmsr(MSR_MISC_ENABLE) & (1 << 16))
809 			est_init(cpu_vendor);
810 	}
811 #endif /* ENHANCED_SPEEDSTEP */
812 
813 #ifdef INTEL_CORETEMP
814 	if (cpu_vendor == CPUVENDOR_INTEL && cpuid_level >= 0x06)
815 		coretemp_register(ci);
816 #endif
817 
818 #ifdef VIA_C7TEMP
819 	if (cpu_vendor == CPUVENDOR_IDT &&
820 	    CPUID2FAMILY(ci->ci_signature) == 6 &&
821 	    CPUID2MODEL(ci->ci_signature) >= 0x9) {
822 		uint32_t descs[4];
823 
824 		x86_cpuid(0xc0000000, descs);
825 		if (descs[0] >= 0xc0000002)	/* has temp sensor */
826 			viac7temp_register(ci);
827 	}
828 #endif
829 
830 #if defined(POWERNOW_K7) || defined(POWERNOW_K8)
831 	if (cpu_vendor == CPUVENDOR_AMD && powernow_probe(ci)) {
832 		switch (CPUID2FAMILY(ci->ci_signature)) {
833 #ifdef POWERNOW_K7
834 		case 6:
835 			k7_powernow_init();
836 			break;
837 #endif
838 #ifdef POWERNOW_K8
839 		case 15:
840 			k8_powernow_init();
841 			break;
842 #endif
843 		default:
844 			break;
845 		}
846 	}
847 #endif /* POWERNOW_K7 || POWERNOW_K8 */
848 
849 #ifdef INTEL_ONDEMAND_CLOCKMOD
850 	if (cpuid_level >= 1) {
851 		clockmod_init();
852 	}
853 #endif
854 }
855