xref: /netbsd-src/sys/arch/x86/x86/identcpu.c (revision 213144e1de7024d4193d04aa51005ba3a5ad95e7)
1 /*	$NetBSD: identcpu.c,v 1.24 2011/02/20 13:42:46 jruoho Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Frank van der Linden,  and by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: identcpu.c,v 1.24 2011/02/20 13:42:46 jruoho Exp $");
34 
35 #include "opt_enhanced_speedstep.h"
36 #include "opt_intel_odcm.h"
37 #include "opt_via_c7temp.h"
38 #include "opt_powernow_k8.h"
39 #include "opt_xen.h"
40 #ifdef i386	/* XXX */
41 #include "opt_powernow_k7.h"
42 #endif
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/device.h>
47 
48 #include <uvm/uvm_extern.h>
49 
50 #include <machine/specialreg.h>
51 #include <machine/pio.h>
52 #include <machine/cpu.h>
53 
54 #include <x86/cputypes.h>
55 #include <x86/cacheinfo.h>
56 #include <x86/cpuvar.h>
57 #include <x86/cpu_msr.h>
58 #include <x86/powernow.h>
59 
60 static const struct x86_cache_info intel_cpuid_cache_info[] = INTEL_CACHE_INFO;
61 
62 static const struct x86_cache_info amd_cpuid_l2cache_assoc_info[] =
63 	AMD_L2CACHE_INFO;
64 
65 static const struct x86_cache_info amd_cpuid_l3cache_assoc_info[] =
66 	AMD_L3CACHE_INFO;
67 
68 int cpu_vendor;
69 char cpu_brand_string[49];
70 
71 /*
72  * Info for CTL_HW
73  */
74 char	cpu_model[120];
75 
76 /*
77  * Note: these are just the ones that may not have a cpuid instruction.
78  * We deal with the rest in a different way.
79  */
80 const int i386_nocpuid_cpus[] = {
81 	CPUVENDOR_INTEL, CPUCLASS_386,	/* CPU_386SX */
82 	CPUVENDOR_INTEL, CPUCLASS_386,	/* CPU_386   */
83 	CPUVENDOR_INTEL, CPUCLASS_486,	/* CPU_486SX */
84 	CPUVENDOR_INTEL, CPUCLASS_486, 	/* CPU_486   */
85 	CPUVENDOR_CYRIX, CPUCLASS_486,	/* CPU_486DLC */
86 	CPUVENDOR_CYRIX, CPUCLASS_486,	/* CPU_6x86 */
87 	CPUVENDOR_NEXGEN, CPUCLASS_386,	/* CPU_NX586 */
88 };
89 
90 static const char cpu_vendor_names[][10] = {
91 	"Unknown", "Intel", "NS/Cyrix", "NexGen", "AMD", "IDT/VIA", "Transmeta",
92 	"Vortex86"
93 };
94 
95 static const struct x86_cache_info *
96 cache_info_lookup(const struct x86_cache_info *cai, uint8_t desc)
97 {
98 	int i;
99 
100 	for (i = 0; cai[i].cai_desc != 0; i++) {
101 		if (cai[i].cai_desc == desc)
102 			return (&cai[i]);
103 	}
104 
105 	return (NULL);
106 }
107 
108 
109 static void
110 cpu_probe_amd_cache(struct cpu_info *ci)
111 {
112 	const struct x86_cache_info *cp;
113 	struct x86_cache_info *cai;
114 	int family, model;
115 	u_int descs[4];
116 	u_int lfunc;
117 
118 	family = CPUID2FAMILY(ci->ci_signature);
119 	model = CPUID2MODEL(ci->ci_signature);
120 
121 	/*
122 	 * K5 model 0 has none of this info.
123 	 */
124 	if (family == 5 && model == 0)
125 		return;
126 
127 	/*
128 	 * Get extended values for K8 and up.
129 	 */
130 	if (family == 0xf) {
131 		family += CPUID2EXTFAMILY(ci->ci_signature);
132 		model += CPUID2EXTMODEL(ci->ci_signature);
133 	}
134 
135 	/*
136 	 * Determine the largest extended function value.
137 	 */
138 	x86_cpuid(0x80000000, descs);
139 	lfunc = descs[0];
140 
141 	/*
142 	 * Determine L1 cache/TLB info.
143 	 */
144 	if (lfunc < 0x80000005) {
145 		/* No L1 cache info available. */
146 		return;
147 	}
148 
149 	x86_cpuid(0x80000005, descs);
150 
151 	/*
152 	 * K6-III and higher have large page TLBs.
153 	 */
154 	if ((family == 5 && model >= 9) || family >= 6) {
155 		cai = &ci->ci_cinfo[CAI_ITLB2];
156 		cai->cai_totalsize = AMD_L1_EAX_ITLB_ENTRIES(descs[0]);
157 		cai->cai_associativity = AMD_L1_EAX_ITLB_ASSOC(descs[0]);
158 		cai->cai_linesize = (4 * 1024 * 1024);
159 
160 		cai = &ci->ci_cinfo[CAI_DTLB2];
161 		cai->cai_totalsize = AMD_L1_EAX_DTLB_ENTRIES(descs[0]);
162 		cai->cai_associativity = AMD_L1_EAX_DTLB_ASSOC(descs[0]);
163 		cai->cai_linesize = (4 * 1024 * 1024);
164 	}
165 
166 	cai = &ci->ci_cinfo[CAI_ITLB];
167 	cai->cai_totalsize = AMD_L1_EBX_ITLB_ENTRIES(descs[1]);
168 	cai->cai_associativity = AMD_L1_EBX_ITLB_ASSOC(descs[1]);
169 	cai->cai_linesize = (4 * 1024);
170 
171 	cai = &ci->ci_cinfo[CAI_DTLB];
172 	cai->cai_totalsize = AMD_L1_EBX_DTLB_ENTRIES(descs[1]);
173 	cai->cai_associativity = AMD_L1_EBX_DTLB_ASSOC(descs[1]);
174 	cai->cai_linesize = (4 * 1024);
175 
176 	cai = &ci->ci_cinfo[CAI_DCACHE];
177 	cai->cai_totalsize = AMD_L1_ECX_DC_SIZE(descs[2]);
178 	cai->cai_associativity = AMD_L1_ECX_DC_ASSOC(descs[2]);
179 	cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[2]);
180 
181 	cai = &ci->ci_cinfo[CAI_ICACHE];
182 	cai->cai_totalsize = AMD_L1_EDX_IC_SIZE(descs[3]);
183 	cai->cai_associativity = AMD_L1_EDX_IC_ASSOC(descs[3]);
184 	cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[3]);
185 
186 	/*
187 	 * Determine L2 cache/TLB info.
188 	 */
189 	if (lfunc < 0x80000006) {
190 		/* No L2 cache info available. */
191 		return;
192 	}
193 
194 	x86_cpuid(0x80000006, descs);
195 
196 	cai = &ci->ci_cinfo[CAI_L2CACHE];
197 	cai->cai_totalsize = AMD_L2_ECX_C_SIZE(descs[2]);
198 	cai->cai_associativity = AMD_L2_ECX_C_ASSOC(descs[2]);
199 	cai->cai_linesize = AMD_L2_ECX_C_LS(descs[2]);
200 
201 	cp = cache_info_lookup(amd_cpuid_l2cache_assoc_info,
202 	    cai->cai_associativity);
203 	if (cp != NULL)
204 		cai->cai_associativity = cp->cai_associativity;
205 	else
206 		cai->cai_associativity = 0;	/* XXX Unknown/reserved */
207 
208 	if (family < 0xf) {
209 		/* No L3 cache info available. */
210 		return;
211 	}
212 
213 	cai = &ci->ci_cinfo[CAI_L3CACHE];
214 	cai->cai_totalsize = AMD_L3_EDX_C_SIZE(descs[3]);
215 	cai->cai_associativity = AMD_L3_EDX_C_ASSOC(descs[3]);
216 	cai->cai_linesize = AMD_L3_EDX_C_LS(descs[3]);
217 
218 	cp = cache_info_lookup(amd_cpuid_l3cache_assoc_info,
219 	    cai->cai_associativity);
220 	if (cp != NULL)
221 		cai->cai_associativity = cp->cai_associativity;
222 	else
223 		cai->cai_associativity = 0;	/* XXX Unknown reserved */
224 
225 	if (lfunc < 0x80000019) {
226 		/* No 1GB Page TLB */
227 		return;
228 	}
229 
230 	x86_cpuid(0x80000019, descs);
231 
232 	cai = &ci->ci_cinfo[CAI_L1_1GBDTLB];
233 	cai->cai_totalsize = AMD_L1_1GB_EAX_DTLB_ENTRIES(descs[1]);
234 	cai->cai_associativity = AMD_L1_1GB_EAX_DTLB_ASSOC(descs[1]);
235 	cai->cai_linesize = (1 * 1024);
236 
237 	cai = &ci->ci_cinfo[CAI_L1_1GBITLB];
238 	cai->cai_totalsize = AMD_L1_1GB_EAX_IUTLB_ENTRIES(descs[0]);
239 	cai->cai_associativity = AMD_L1_1GB_EAX_IUTLB_ASSOC(descs[0]);
240 	cai->cai_linesize = (1 * 1024);
241 
242 	cai = &ci->ci_cinfo[CAI_L2_1GBDTLB];
243 	cai->cai_totalsize = AMD_L2_1GB_EBX_DUTLB_ENTRIES(descs[1]);
244 	cai->cai_associativity = AMD_L2_1GB_EBX_DUTLB_ASSOC(descs[1]);
245 	cai->cai_linesize = (1 * 1024);
246 
247 	cai = &ci->ci_cinfo[CAI_L2_1GBITLB];
248 	cai->cai_totalsize = AMD_L2_1GB_EBX_IUTLB_ENTRIES(descs[0]);
249 	cai->cai_associativity = AMD_L2_1GB_EBX_IUTLB_ASSOC(descs[0]);
250 	cai->cai_linesize = (1 * 1024);
251 }
252 
253 static void
254 cpu_probe_k5(struct cpu_info *ci)
255 {
256 	int flag;
257 
258 	if (cpu_vendor != CPUVENDOR_AMD ||
259 	    CPUID2FAMILY(ci->ci_signature) != 5)
260 		return;
261 
262 	if (CPUID2MODEL(ci->ci_signature) == 0) {
263 		/*
264 		 * According to the AMD Processor Recognition App Note,
265 		 * the AMD-K5 Model 0 uses the wrong bit to indicate
266 		 * support for global PTEs, instead using bit 9 (APIC)
267 		 * rather than bit 13 (i.e. "0x200" vs. 0x2000".  Oops!).
268 		 */
269 		flag = ci->ci_feat_val[0];
270 		if ((flag & CPUID_APIC) != 0)
271 			flag = (flag & ~CPUID_APIC) | CPUID_PGE;
272 		ci->ci_feat_val[0] = flag;
273 	}
274 
275 	cpu_probe_amd_cache(ci);
276 }
277 
278 static void
279 cpu_probe_k678(struct cpu_info *ci)
280 {
281 	uint32_t descs[4];
282 
283 	if (cpu_vendor != CPUVENDOR_AMD ||
284 	    CPUID2FAMILY(ci->ci_signature) < 6)
285 		return;
286 
287 	/* Determine the extended feature flags. */
288 	x86_cpuid(0x80000000, descs);
289 	if (descs[0] >= 0x80000001) {
290 		x86_cpuid(0x80000001, descs);
291 		ci->ci_feat_val[3] = descs[2]; /* %ecx */
292 		ci->ci_feat_val[2] = descs[3]; /* %edx */
293 	}
294 
295 	cpu_probe_amd_cache(ci);
296 }
297 
298 static inline uint8_t
299 cyrix_read_reg(uint8_t reg)
300 {
301 
302 	outb(0x22, reg);
303 	return inb(0x23);
304 }
305 
306 static inline void
307 cyrix_write_reg(uint8_t reg, uint8_t data)
308 {
309 
310 	outb(0x22, reg);
311 	outb(0x23, data);
312 }
313 
314 static void
315 cpu_probe_cyrix_cmn(struct cpu_info *ci)
316 {
317 	/*
318 	 * i8254 latch check routine:
319 	 *     National Geode (formerly Cyrix MediaGX) has a serious bug in
320 	 *     its built-in i8254-compatible clock module (cs5510 cs5520).
321 	 *     Set the variable 'clock_broken_latch' to indicate it.
322 	 *
323 	 * This bug is not present in the cs5530, and the flag
324 	 * is disabled again in sys/arch/i386/pci/pcib.c if this later
325 	 * model device is detected. Ideally, this work-around should not
326 	 * even be in here, it should be in there. XXX
327 	 */
328 	uint8_t c3;
329 #ifndef XEN
330 	extern int clock_broken_latch;
331 
332 	switch (ci->ci_signature) {
333 	case 0x440:     /* Cyrix MediaGX */
334 	case 0x540:     /* GXm */
335 		clock_broken_latch = 1;
336 		break;
337 	}
338 #endif
339 
340 	/* set up various cyrix registers */
341 	/*
342 	 * Enable suspend on halt (powersave mode).
343 	 * When powersave mode is enabled, the TSC stops counting
344 	 * while the CPU is halted in idle() waiting for an interrupt.
345 	 * This means we can't use the TSC for interval time in
346 	 * microtime(9), and thus it is disabled here.
347 	 *
348 	 * It still makes a perfectly good cycle counter
349 	 * for program profiling, so long as you remember you're
350 	 * counting cycles, and not time. Further, if you don't
351 	 * mind not using powersave mode, the TSC works just fine,
352 	 * so this should really be optional. XXX
353 	 */
354 	cyrix_write_reg(0xc2, cyrix_read_reg(0xc2) | 0x08);
355 
356 	/*
357 	 * Do not disable the TSC on the Geode GX, it's reported to
358 	 * work fine.
359 	 */
360 	if (ci->ci_signature != 0x552)
361 		ci->ci_feat_val[0] &= ~CPUID_TSC;
362 
363 	/* enable access to ccr4/ccr5 */
364 	c3 = cyrix_read_reg(0xC3);
365 	cyrix_write_reg(0xC3, c3 | 0x10);
366 	/* cyrix's workaround  for the "coma bug" */
367 	cyrix_write_reg(0x31, cyrix_read_reg(0x31) | 0xf8);
368 	cyrix_write_reg(0x32, cyrix_read_reg(0x32) | 0x7f);
369 	cyrix_write_reg(0x33, cyrix_read_reg(0x33) & ~0xff);
370 	cyrix_write_reg(0x3c, cyrix_read_reg(0x3c) | 0x87);
371 	/* disable access to ccr4/ccr5 */
372 	cyrix_write_reg(0xC3, c3);
373 }
374 
375 static void
376 cpu_probe_cyrix(struct cpu_info *ci)
377 {
378 
379 	if (cpu_vendor != CPUVENDOR_CYRIX ||
380 	    CPUID2FAMILY(ci->ci_signature) < 4 ||
381 	    CPUID2FAMILY(ci->ci_signature) > 6)
382 		return;
383 
384 	cpu_probe_cyrix_cmn(ci);
385 }
386 
387 static void
388 cpu_probe_winchip(struct cpu_info *ci)
389 {
390 
391 	if (cpu_vendor != CPUVENDOR_IDT ||
392 	    CPUID2FAMILY(ci->ci_signature) != 5)
393 	    	return;
394 
395 	if (CPUID2MODEL(ci->ci_signature) == 4) {
396 		/* WinChip C6 */
397 		ci->ci_feat_val[0] &= ~CPUID_TSC;
398 	}
399 }
400 
401 static void
402 cpu_probe_c3(struct cpu_info *ci)
403 {
404 	u_int family, model, stepping, descs[4], lfunc, msr;
405 	struct x86_cache_info *cai;
406 
407 	if (cpu_vendor != CPUVENDOR_IDT ||
408 	    CPUID2FAMILY(ci->ci_signature) < 6)
409 	    	return;
410 
411 	family = CPUID2FAMILY(ci->ci_signature);
412 	model = CPUID2MODEL(ci->ci_signature);
413 	stepping = CPUID2STEPPING(ci->ci_signature);
414 
415 	/* Determine the largest extended function value. */
416 	x86_cpuid(0x80000000, descs);
417 	lfunc = descs[0];
418 
419 	/* Determine the extended feature flags. */
420 	if (lfunc >= 0x80000001) {
421 		x86_cpuid(0x80000001, descs);
422 		ci->ci_feat_val[2] = descs[3];
423 	}
424 
425 	if (family > 6 || model > 0x9 || (model == 0x9 && stepping >= 3)) {
426 		/* Nehemiah or Esther */
427 		x86_cpuid(0xc0000000, descs);
428 		lfunc = descs[0];
429 		if (lfunc >= 0xc0000001) {	/* has ACE, RNG */
430 		    int rng_enable = 0, ace_enable = 0;
431 		    x86_cpuid(0xc0000001, descs);
432 		    lfunc = descs[3];
433 		    ci->ci_feat_val[4] = lfunc;
434 		    /* Check for and enable RNG */
435 		    if (lfunc & CPUID_VIA_HAS_RNG) {
436 		    	if (!(lfunc & CPUID_VIA_DO_RNG)) {
437 			    rng_enable++;
438 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_RNG;
439 			}
440 		    }
441 		    /* Check for and enable ACE (AES-CBC) */
442 		    if (lfunc & CPUID_VIA_HAS_ACE) {
443 			if (!(lfunc & CPUID_VIA_DO_ACE)) {
444 			    ace_enable++;
445 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE;
446 			}
447 		    }
448 		    /* Check for and enable SHA */
449 		    if (lfunc & CPUID_VIA_HAS_PHE) {
450 			if (!(lfunc & CPUID_VIA_DO_PHE)) {
451 			    ace_enable++;
452 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_PHE;
453 			}
454 		    }
455 		    /* Check for and enable ACE2 (AES-CTR) */
456 		    if (lfunc & CPUID_VIA_HAS_ACE2) {
457 			if (!(lfunc & CPUID_VIA_DO_ACE2)) {
458 			    ace_enable++;
459 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE2;
460 			}
461 		    }
462 		    /* Check for and enable PMM (modmult engine) */
463 		    if (lfunc & CPUID_VIA_HAS_PMM) {
464 			if (!(lfunc & CPUID_VIA_DO_PMM)) {
465 			    ace_enable++;
466 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_PMM;
467 			}
468 		    }
469 
470 		    /* Actually do the enables. */
471 		    if (rng_enable) {
472 			msr = rdmsr(MSR_VIA_RNG);
473 			wrmsr(MSR_VIA_RNG, msr | MSR_VIA_RNG_ENABLE);
474 		    }
475 		    if (ace_enable) {
476 			msr = rdmsr(MSR_VIA_ACE);
477 			wrmsr(MSR_VIA_ACE, msr | MSR_VIA_ACE_ENABLE);
478 		    }
479 
480 		}
481 	}
482 
483 	/*
484 	 * Determine L1 cache/TLB info.
485 	 */
486 	if (lfunc < 0x80000005) {
487 		/* No L1 cache info available. */
488 		return;
489 	}
490 
491 	x86_cpuid(0x80000005, descs);
492 
493 	cai = &ci->ci_cinfo[CAI_ITLB];
494 	cai->cai_totalsize = VIA_L1_EBX_ITLB_ENTRIES(descs[1]);
495 	cai->cai_associativity = VIA_L1_EBX_ITLB_ASSOC(descs[1]);
496 	cai->cai_linesize = (4 * 1024);
497 
498 	cai = &ci->ci_cinfo[CAI_DTLB];
499 	cai->cai_totalsize = VIA_L1_EBX_DTLB_ENTRIES(descs[1]);
500 	cai->cai_associativity = VIA_L1_EBX_DTLB_ASSOC(descs[1]);
501 	cai->cai_linesize = (4 * 1024);
502 
503 	cai = &ci->ci_cinfo[CAI_DCACHE];
504 	cai->cai_totalsize = VIA_L1_ECX_DC_SIZE(descs[2]);
505 	cai->cai_associativity = VIA_L1_ECX_DC_ASSOC(descs[2]);
506 	cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[2]);
507 	if (family == 6 && model == 9 && stepping == 8) {
508 		/* Erratum: stepping 8 reports 4 when it should be 2 */
509 		cai->cai_associativity = 2;
510 	}
511 
512 	cai = &ci->ci_cinfo[CAI_ICACHE];
513 	cai->cai_totalsize = VIA_L1_EDX_IC_SIZE(descs[3]);
514 	cai->cai_associativity = VIA_L1_EDX_IC_ASSOC(descs[3]);
515 	cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[3]);
516 	if (family == 6 && model == 9 && stepping == 8) {
517 		/* Erratum: stepping 8 reports 4 when it should be 2 */
518 		cai->cai_associativity = 2;
519 	}
520 
521 	/*
522 	 * Determine L2 cache/TLB info.
523 	 */
524 	if (lfunc < 0x80000006) {
525 		/* No L2 cache info available. */
526 		return;
527 	}
528 
529 	x86_cpuid(0x80000006, descs);
530 
531 	cai = &ci->ci_cinfo[CAI_L2CACHE];
532 	if (family > 6 || model >= 9) {
533 		cai->cai_totalsize = VIA_L2N_ECX_C_SIZE(descs[2]);
534 		cai->cai_associativity = VIA_L2N_ECX_C_ASSOC(descs[2]);
535 		cai->cai_linesize = VIA_L2N_ECX_C_LS(descs[2]);
536 	} else {
537 		cai->cai_totalsize = VIA_L2_ECX_C_SIZE(descs[2]);
538 		cai->cai_associativity = VIA_L2_ECX_C_ASSOC(descs[2]);
539 		cai->cai_linesize = VIA_L2_ECX_C_LS(descs[2]);
540 	}
541 }
542 
543 static void
544 cpu_probe_geode(struct cpu_info *ci)
545 {
546 
547 	if (memcmp("Geode by NSC", ci->ci_vendor, 12) != 0 ||
548 	    CPUID2FAMILY(ci->ci_signature) != 5)
549 	    	return;
550 
551 	cpu_probe_cyrix_cmn(ci);
552 	cpu_probe_amd_cache(ci);
553 }
554 
555 static void
556 cpu_probe_vortex86(struct cpu_info *ci)
557 {
558 #define PCI_MODE1_ADDRESS_REG	0x0cf8
559 #define PCI_MODE1_DATA_REG	0x0cfc
560 #define PCI_MODE1_ENABLE	0x80000000UL
561 
562 	uint32_t reg;
563 
564 	if (cpu_vendor != CPUVENDOR_VORTEX86)
565 		return;
566 	/*
567 	 * CPU model available from "Customer ID register" in
568 	 * North Bridge Function 0 PCI space
569 	 * we can't use pci_conf_read() because the PCI subsystem is not
570 	 * not initialised early enough
571 	 */
572 
573 	outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE | 0x90);
574 	reg = inl(PCI_MODE1_DATA_REG);
575 
576 	switch(reg) {
577 	case 0x31504d44:
578 		strcpy(cpu_brand_string, "Vortex86SX");
579 		break;
580 	case 0x32504d44:
581 		strcpy(cpu_brand_string, "Vortex86DX");
582 		break;
583 	case 0x33504d44:
584 		strcpy(cpu_brand_string, "Vortex86MX");
585 		break;
586 	default:
587 		strcpy(cpu_brand_string, "Unknown Vortex86");
588 		break;
589 	}
590 
591 #undef PCI_MODE1_ENABLE
592 #undef PCI_MODE1_ADDRESS_REG
593 #undef PCI_MODE1_DATA_REG
594 }
595 
596 void
597 cpu_probe(struct cpu_info *ci)
598 {
599 	const struct x86_cache_info *cai;
600 	u_int descs[4];
601 	int iterations, i, j;
602 	uint8_t desc;
603 	uint32_t miscbytes;
604 	uint32_t brand[12];
605 
606 	cpu_vendor = i386_nocpuid_cpus[cpu << 1];
607 	cpu_class = i386_nocpuid_cpus[(cpu << 1) + 1];
608 
609 	if (cpuid_level < 0)
610 		return;
611 
612 	for (i = 0; i < __arraycount(ci->ci_feat_val); i++) {
613 		ci->ci_feat_val[i] = 0;
614 	}
615 
616 	x86_cpuid(0, descs);
617 	cpuid_level = descs[0];
618 	ci->ci_vendor[0] = descs[1];
619 	ci->ci_vendor[2] = descs[2];
620 	ci->ci_vendor[1] = descs[3];
621 	ci->ci_vendor[3] = 0;
622 
623 	if (memcmp(ci->ci_vendor, "GenuineIntel", 12) == 0)
624 		cpu_vendor = CPUVENDOR_INTEL;
625 	else if (memcmp(ci->ci_vendor,  "AuthenticAMD", 12) == 0)
626 		cpu_vendor = CPUVENDOR_AMD;
627 	else if (memcmp(ci->ci_vendor,  "CyrixInstead", 12) == 0)
628 		cpu_vendor = CPUVENDOR_CYRIX;
629 	else if (memcmp(ci->ci_vendor,  "Geode by NSC", 12) == 0)
630 		cpu_vendor = CPUVENDOR_CYRIX;
631 	else if (memcmp(ci->ci_vendor, "CentaurHauls", 12) == 0)
632 		cpu_vendor = CPUVENDOR_IDT;
633 	else if (memcmp(ci->ci_vendor, "GenuineTMx86", 12) == 0)
634 		cpu_vendor = CPUVENDOR_TRANSMETA;
635 	else if (memcmp(ci->ci_vendor, "Vortex86 SoC", 12) == 0)
636 		cpu_vendor = CPUVENDOR_VORTEX86;
637 	else
638 		cpu_vendor = CPUVENDOR_UNKNOWN;
639 
640 	x86_cpuid(0x80000000, brand);
641 	if (brand[0] >= 0x80000004) {
642 		x86_cpuid(0x80000002, brand);
643 		x86_cpuid(0x80000003, brand + 4);
644 		x86_cpuid(0x80000004, brand + 8);
645 		for (i = 0; i < 48; i++) {
646 			if (((char *) brand)[i] != ' ')
647 				break;
648 		}
649 		memcpy(cpu_brand_string, ((char *) brand) + i, 48 - i);
650 	}
651 
652 	if (cpuid_level >= 1) {
653 		x86_cpuid(1, descs);
654 		ci->ci_signature = descs[0];
655 		miscbytes = descs[1];
656 		ci->ci_feat_val[1] = descs[2];
657 		ci->ci_feat_val[0] = descs[3];
658 
659 		/* Determine family + class. */
660 		cpu_class = CPUID2FAMILY(ci->ci_signature) + (CPUCLASS_386 - 3);
661 		if (cpu_class > CPUCLASS_686)
662 			cpu_class = CPUCLASS_686;
663 
664 		/* CLFLUSH line size is next 8 bits */
665 		if (ci->ci_feat_val[0] & CPUID_CFLUSH)
666 			ci->ci_cflush_lsize = ((miscbytes >> 8) & 0xff) << 3;
667 		ci->ci_initapicid = (miscbytes >> 24) & 0xff;
668 	}
669 
670 	if (cpuid_level >= 2) {
671 		/* Parse the cache info from `cpuid', if we have it. */
672 		x86_cpuid(2, descs);
673 		iterations = descs[0] & 0xff;
674 		while (iterations-- > 0) {
675 			for (i = 0; i < 4; i++) {
676 				if (descs[i] & 0x80000000)
677 					continue;
678 				for (j = 0; j < 4; j++) {
679 					if (i == 0 && j == 0)
680 						continue;
681 					desc = (descs[i] >> (j * 8)) & 0xff;
682 					if (desc == 0)
683 						continue;
684 					cai = cache_info_lookup(
685 					    intel_cpuid_cache_info, desc);
686 					if (cai != NULL) {
687 						ci->ci_cinfo[cai->cai_index] =
688 						    *cai;
689 					}
690 				}
691 			}
692 		}
693 	}
694 
695 	cpu_probe_k5(ci);
696 	cpu_probe_k678(ci);
697 	cpu_probe_cyrix(ci);
698 	cpu_probe_winchip(ci);
699 	cpu_probe_c3(ci);
700 	cpu_probe_geode(ci);
701 	cpu_probe_vortex86(ci);
702 
703 	x86_cpu_topology(ci);
704 
705 	if (cpu_vendor != CPUVENDOR_AMD && (ci->ci_feat_val[0] & CPUID_TM) &&
706 	    (rdmsr(MSR_MISC_ENABLE) & (1 << 3)) == 0) {
707 		/* Enable thermal monitor 1. */
708 		wrmsr(MSR_MISC_ENABLE, rdmsr(MSR_MISC_ENABLE) | (1<<3));
709 	}
710 
711 	if (ci == &cpu_info_primary) {
712 		/* If first. Boot Processor is the cpu_feature reference. */
713 		for (i = 0; i < __arraycount(cpu_feature); i++) {
714 			cpu_feature[i] = ci->ci_feat_val[i];
715 		}
716 #ifndef XEN
717 		/* Early patch of text segment. */
718 		x86_patch(true);
719 #endif
720 	} else {
721 		/*
722 		 * If not first. Warn about cpu_feature mismatch for
723 		 * secondary CPUs.
724 		 */
725 		for (i = 0; i < __arraycount(cpu_feature); i++) {
726 			if (cpu_feature[i] != ci->ci_feat_val[i])
727 				aprint_error_dev(ci->ci_dev,
728 				    "feature mismatch: cpu_feature[%d] is "
729 				    "%#x, but CPU reported %#x\n",
730 				    i, cpu_feature[i], ci->ci_feat_val[i]);
731 		}
732 	}
733 }
734 
735 void
736 cpu_identify(struct cpu_info *ci)
737 {
738 
739 	snprintf(cpu_model, sizeof(cpu_model), "%s %d86-class",
740 	    cpu_vendor_names[cpu_vendor], cpu_class + 3);
741 	if (cpu_brand_string[0] != '\0') {
742 		aprint_normal(": %s", cpu_brand_string);
743 	} else {
744 		aprint_normal(": %s", cpu_model);
745 		if (ci->ci_data.cpu_cc_freq != 0)
746 			aprint_normal(", %dMHz",
747 			    (int)(ci->ci_data.cpu_cc_freq / 1000000));
748 	}
749 	if (ci->ci_signature != 0)
750 		aprint_normal(", id 0x%x", ci->ci_signature);
751 	aprint_normal("\n");
752 
753 	if (cpu_brand_string[0] == '\0') {
754 		strlcpy(cpu_brand_string, cpu_model, sizeof(cpu_brand_string));
755 	}
756 	if (cpu_class == CPUCLASS_386) {
757 		panic("NetBSD requires an 80486DX or later processor");
758 	}
759 	if (cpu == CPU_486DLC) {
760 		aprint_error("WARNING: BUGGY CYRIX CACHE\n");
761 	}
762 
763 	if ((cpu_vendor == CPUVENDOR_AMD) /* check enablement of an */
764 	  && (device_unit(ci->ci_dev) == 0) /* AMD feature only once */
765 	  && ((cpu_feature[3] & CPUID_SVM) == CPUID_SVM)
766 #if defined(XEN) && !defined(DOM0OPS)
767 	  && (false)  /* on Xen rdmsr is for Dom0 only */
768 #endif
769 	  )
770 	{
771 		uint64_t val;
772 
773 		val = rdmsr(MSR_VMCR);
774 		if (((val & VMCR_SVMED) == VMCR_SVMED)
775 		  && ((val & VMCR_LOCK) == VMCR_LOCK))
776 		{
777 			aprint_normal_dev(ci->ci_dev,
778 				"SVM disabled by the BIOS\n");
779 		}
780 	}
781 
782 #ifdef i386 /* XXX for now */
783 	if (cpu_vendor == CPUVENDOR_TRANSMETA) {
784 		u_int descs[4];
785 		x86_cpuid(0x80860000, descs);
786 		if (descs[0] >= 0x80860007)
787 			tmx86_init_longrun();
788 	}
789 
790 	/* If we have FXSAVE/FXRESTOR, use them. */
791 	if (cpu_feature[0] & CPUID_FXSR) {
792 		i386_use_fxsave = 1;
793 		/*
794 		 * If we have SSE/SSE2, enable XMM exceptions, and
795 		 * notify userland.
796 		 */
797 		if (cpu_feature[0] & CPUID_SSE)
798 			i386_has_sse = 1;
799 		if (cpu_feature[0] & CPUID_SSE2)
800 			i386_has_sse2 = 1;
801 	} else
802 		i386_use_fxsave = 0;
803 #endif	/* i386 */
804 
805 #ifdef ENHANCED_SPEEDSTEP
806 	if (cpu_feature[1] & CPUID2_EST) {
807 		if (rdmsr(MSR_MISC_ENABLE) & (1 << 16))
808 			est_init(cpu_vendor);
809 	}
810 #endif /* ENHANCED_SPEEDSTEP */
811 
812 #ifdef VIA_C7TEMP
813 	if (cpu_vendor == CPUVENDOR_IDT &&
814 	    CPUID2FAMILY(ci->ci_signature) == 6 &&
815 	    CPUID2MODEL(ci->ci_signature) >= 0x9) {
816 		uint32_t descs[4];
817 
818 		x86_cpuid(0xc0000000, descs);
819 		if (descs[0] >= 0xc0000002)	/* has temp sensor */
820 			viac7temp_register(ci);
821 	}
822 #endif
823 
824 #if defined(POWERNOW_K7) || defined(POWERNOW_K8)
825 	if (cpu_vendor == CPUVENDOR_AMD && powernow_probe(ci)) {
826 		switch (CPUID2FAMILY(ci->ci_signature)) {
827 #ifdef POWERNOW_K7
828 		case 6:
829 			k7_powernow_init();
830 			break;
831 #endif
832 #ifdef POWERNOW_K8
833 		case 15:
834 			k8_powernow_init();
835 			break;
836 #endif
837 		default:
838 			break;
839 		}
840 	}
841 #endif /* POWERNOW_K7 || POWERNOW_K8 */
842 
843 #ifdef INTEL_ONDEMAND_CLOCKMOD
844 	if (cpuid_level >= 1) {
845 		clockmod_init();
846 	}
847 #endif
848 }
849