xref: /netbsd-src/sys/arch/x86/x86/identcpu.c (revision ba65fde2d7fefa7d39838fa5fa855e62bd606b5e)
1 /*	$NetBSD: identcpu.c,v 1.32 2012/06/16 17:30:19 chs Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Frank van der Linden,  and by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: identcpu.c,v 1.32 2012/06/16 17:30:19 chs Exp $");
34 
35 #include "opt_xen.h"
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/device.h>
40 
41 #include <uvm/uvm_extern.h>
42 
43 #include <machine/specialreg.h>
44 #include <machine/pio.h>
45 #include <machine/cpu.h>
46 
47 #include <x86/cputypes.h>
48 #include <x86/cacheinfo.h>
49 #include <x86/cpuvar.h>
50 #include <x86/cpu_msr.h>
51 
52 static const struct x86_cache_info intel_cpuid_cache_info[] = INTEL_CACHE_INFO;
53 
54 static const struct x86_cache_info amd_cpuid_l2cache_assoc_info[] =
55 	AMD_L2CACHE_INFO;
56 
57 static const struct x86_cache_info amd_cpuid_l3cache_assoc_info[] =
58 	AMD_L3CACHE_INFO;
59 
60 int cpu_vendor;
61 char cpu_brand_string[49];
62 
63 /*
64  * Info for CTL_HW
65  */
66 char	cpu_model[120];
67 
68 /*
69  * Note: these are just the ones that may not have a cpuid instruction.
70  * We deal with the rest in a different way.
71  */
72 const int i386_nocpuid_cpus[] = {
73 	CPUVENDOR_INTEL, CPUCLASS_386,	/* CPU_386SX */
74 	CPUVENDOR_INTEL, CPUCLASS_386,	/* CPU_386   */
75 	CPUVENDOR_INTEL, CPUCLASS_486,	/* CPU_486SX */
76 	CPUVENDOR_INTEL, CPUCLASS_486, 	/* CPU_486   */
77 	CPUVENDOR_CYRIX, CPUCLASS_486,	/* CPU_486DLC */
78 	CPUVENDOR_CYRIX, CPUCLASS_486,	/* CPU_6x86 */
79 	CPUVENDOR_NEXGEN, CPUCLASS_386,	/* CPU_NX586 */
80 };
81 
82 static const char cpu_vendor_names[][10] = {
83 	"Unknown", "Intel", "NS/Cyrix", "NexGen", "AMD", "IDT/VIA", "Transmeta",
84 	"Vortex86"
85 };
86 
87 static const struct x86_cache_info *
88 cache_info_lookup(const struct x86_cache_info *cai, uint8_t desc)
89 {
90 	int i;
91 
92 	for (i = 0; cai[i].cai_desc != 0; i++) {
93 		if (cai[i].cai_desc == desc)
94 			return (&cai[i]);
95 	}
96 
97 	return (NULL);
98 }
99 
100 
101 static void
102 cpu_probe_amd_cache(struct cpu_info *ci)
103 {
104 	const struct x86_cache_info *cp;
105 	struct x86_cache_info *cai;
106 	int family, model;
107 	u_int descs[4];
108 	u_int lfunc;
109 
110 	family = CPUID2FAMILY(ci->ci_signature);
111 	model = CPUID2MODEL(ci->ci_signature);
112 
113 	/*
114 	 * K5 model 0 has none of this info.
115 	 */
116 	if (family == 5 && model == 0)
117 		return;
118 
119 	/*
120 	 * Get extended values for K8 and up.
121 	 */
122 	if (family == 0xf) {
123 		family += CPUID2EXTFAMILY(ci->ci_signature);
124 		model += CPUID2EXTMODEL(ci->ci_signature);
125 	}
126 
127 	/*
128 	 * Determine the largest extended function value.
129 	 */
130 	x86_cpuid(0x80000000, descs);
131 	lfunc = descs[0];
132 
133 	/*
134 	 * Determine L1 cache/TLB info.
135 	 */
136 	if (lfunc < 0x80000005) {
137 		/* No L1 cache info available. */
138 		return;
139 	}
140 
141 	x86_cpuid(0x80000005, descs);
142 
143 	/*
144 	 * K6-III and higher have large page TLBs.
145 	 */
146 	if ((family == 5 && model >= 9) || family >= 6) {
147 		cai = &ci->ci_cinfo[CAI_ITLB2];
148 		cai->cai_totalsize = AMD_L1_EAX_ITLB_ENTRIES(descs[0]);
149 		cai->cai_associativity = AMD_L1_EAX_ITLB_ASSOC(descs[0]);
150 		cai->cai_linesize = (4 * 1024 * 1024);
151 
152 		cai = &ci->ci_cinfo[CAI_DTLB2];
153 		cai->cai_totalsize = AMD_L1_EAX_DTLB_ENTRIES(descs[0]);
154 		cai->cai_associativity = AMD_L1_EAX_DTLB_ASSOC(descs[0]);
155 		cai->cai_linesize = (4 * 1024 * 1024);
156 	}
157 
158 	cai = &ci->ci_cinfo[CAI_ITLB];
159 	cai->cai_totalsize = AMD_L1_EBX_ITLB_ENTRIES(descs[1]);
160 	cai->cai_associativity = AMD_L1_EBX_ITLB_ASSOC(descs[1]);
161 	cai->cai_linesize = (4 * 1024);
162 
163 	cai = &ci->ci_cinfo[CAI_DTLB];
164 	cai->cai_totalsize = AMD_L1_EBX_DTLB_ENTRIES(descs[1]);
165 	cai->cai_associativity = AMD_L1_EBX_DTLB_ASSOC(descs[1]);
166 	cai->cai_linesize = (4 * 1024);
167 
168 	cai = &ci->ci_cinfo[CAI_DCACHE];
169 	cai->cai_totalsize = AMD_L1_ECX_DC_SIZE(descs[2]);
170 	cai->cai_associativity = AMD_L1_ECX_DC_ASSOC(descs[2]);
171 	cai->cai_linesize = AMD_L1_ECX_DC_LS(descs[2]);
172 
173 	cai = &ci->ci_cinfo[CAI_ICACHE];
174 	cai->cai_totalsize = AMD_L1_EDX_IC_SIZE(descs[3]);
175 	cai->cai_associativity = AMD_L1_EDX_IC_ASSOC(descs[3]);
176 	cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[3]);
177 
178 	/*
179 	 * Determine L2 cache/TLB info.
180 	 */
181 	if (lfunc < 0x80000006) {
182 		/* No L2 cache info available. */
183 		return;
184 	}
185 
186 	x86_cpuid(0x80000006, descs);
187 
188 	cai = &ci->ci_cinfo[CAI_L2CACHE];
189 	cai->cai_totalsize = AMD_L2_ECX_C_SIZE(descs[2]);
190 	cai->cai_associativity = AMD_L2_ECX_C_ASSOC(descs[2]);
191 	cai->cai_linesize = AMD_L2_ECX_C_LS(descs[2]);
192 
193 	cp = cache_info_lookup(amd_cpuid_l2cache_assoc_info,
194 	    cai->cai_associativity);
195 	if (cp != NULL)
196 		cai->cai_associativity = cp->cai_associativity;
197 	else
198 		cai->cai_associativity = 0;	/* XXX Unknown/reserved */
199 
200 	if (family < 0xf) {
201 		/* No L3 cache info available. */
202 		return;
203 	}
204 
205 	cai = &ci->ci_cinfo[CAI_L3CACHE];
206 	cai->cai_totalsize = AMD_L3_EDX_C_SIZE(descs[3]);
207 	cai->cai_associativity = AMD_L3_EDX_C_ASSOC(descs[3]);
208 	cai->cai_linesize = AMD_L3_EDX_C_LS(descs[3]);
209 
210 	cp = cache_info_lookup(amd_cpuid_l3cache_assoc_info,
211 	    cai->cai_associativity);
212 	if (cp != NULL)
213 		cai->cai_associativity = cp->cai_associativity;
214 	else
215 		cai->cai_associativity = 0;	/* XXX Unknown reserved */
216 
217 	if (lfunc < 0x80000019) {
218 		/* No 1GB Page TLB */
219 		return;
220 	}
221 
222 	x86_cpuid(0x80000019, descs);
223 
224 	cai = &ci->ci_cinfo[CAI_L1_1GBDTLB];
225 	cai->cai_totalsize = AMD_L1_1GB_EAX_DTLB_ENTRIES(descs[1]);
226 	cai->cai_associativity = AMD_L1_1GB_EAX_DTLB_ASSOC(descs[1]);
227 	cai->cai_linesize = (1 * 1024);
228 
229 	cai = &ci->ci_cinfo[CAI_L1_1GBITLB];
230 	cai->cai_totalsize = AMD_L1_1GB_EAX_IUTLB_ENTRIES(descs[0]);
231 	cai->cai_associativity = AMD_L1_1GB_EAX_IUTLB_ASSOC(descs[0]);
232 	cai->cai_linesize = (1 * 1024);
233 
234 	cai = &ci->ci_cinfo[CAI_L2_1GBDTLB];
235 	cai->cai_totalsize = AMD_L2_1GB_EBX_DUTLB_ENTRIES(descs[1]);
236 	cai->cai_associativity = AMD_L2_1GB_EBX_DUTLB_ASSOC(descs[1]);
237 	cai->cai_linesize = (1 * 1024);
238 
239 	cai = &ci->ci_cinfo[CAI_L2_1GBITLB];
240 	cai->cai_totalsize = AMD_L2_1GB_EBX_IUTLB_ENTRIES(descs[0]);
241 	cai->cai_associativity = AMD_L2_1GB_EBX_IUTLB_ASSOC(descs[0]);
242 	cai->cai_linesize = (1 * 1024);
243 }
244 
245 static void
246 cpu_probe_k5(struct cpu_info *ci)
247 {
248 	int flag;
249 
250 	if (cpu_vendor != CPUVENDOR_AMD ||
251 	    CPUID2FAMILY(ci->ci_signature) != 5)
252 		return;
253 
254 	if (CPUID2MODEL(ci->ci_signature) == 0) {
255 		/*
256 		 * According to the AMD Processor Recognition App Note,
257 		 * the AMD-K5 Model 0 uses the wrong bit to indicate
258 		 * support for global PTEs, instead using bit 9 (APIC)
259 		 * rather than bit 13 (i.e. "0x200" vs. 0x2000".  Oops!).
260 		 */
261 		flag = ci->ci_feat_val[0];
262 		if ((flag & CPUID_APIC) != 0)
263 			flag = (flag & ~CPUID_APIC) | CPUID_PGE;
264 		ci->ci_feat_val[0] = flag;
265 	}
266 
267 	cpu_probe_amd_cache(ci);
268 }
269 
270 static void
271 cpu_probe_k678(struct cpu_info *ci)
272 {
273 	uint32_t descs[4];
274 
275 	if (cpu_vendor != CPUVENDOR_AMD ||
276 	    CPUID2FAMILY(ci->ci_signature) < 6)
277 		return;
278 
279 	/* Determine the extended feature flags. */
280 	x86_cpuid(0x80000000, descs);
281 	if (descs[0] >= 0x80000001) {
282 		x86_cpuid(0x80000001, descs);
283 		ci->ci_feat_val[3] = descs[2]; /* %ecx */
284 		ci->ci_feat_val[2] = descs[3]; /* %edx */
285 	}
286 
287 	cpu_probe_amd_cache(ci);
288 }
289 
290 static inline uint8_t
291 cyrix_read_reg(uint8_t reg)
292 {
293 
294 	outb(0x22, reg);
295 	return inb(0x23);
296 }
297 
298 static inline void
299 cyrix_write_reg(uint8_t reg, uint8_t data)
300 {
301 
302 	outb(0x22, reg);
303 	outb(0x23, data);
304 }
305 
306 static void
307 cpu_probe_cyrix_cmn(struct cpu_info *ci)
308 {
309 	/*
310 	 * i8254 latch check routine:
311 	 *     National Geode (formerly Cyrix MediaGX) has a serious bug in
312 	 *     its built-in i8254-compatible clock module (cs5510 cs5520).
313 	 *     Set the variable 'clock_broken_latch' to indicate it.
314 	 *
315 	 * This bug is not present in the cs5530, and the flag
316 	 * is disabled again in sys/arch/i386/pci/pcib.c if this later
317 	 * model device is detected. Ideally, this work-around should not
318 	 * even be in here, it should be in there. XXX
319 	 */
320 	uint8_t c3;
321 #ifndef XEN
322 	extern int clock_broken_latch;
323 
324 	switch (ci->ci_signature) {
325 	case 0x440:     /* Cyrix MediaGX */
326 	case 0x540:     /* GXm */
327 		clock_broken_latch = 1;
328 		break;
329 	}
330 #endif
331 
332 	/* set up various cyrix registers */
333 	/*
334 	 * Enable suspend on halt (powersave mode).
335 	 * When powersave mode is enabled, the TSC stops counting
336 	 * while the CPU is halted in idle() waiting for an interrupt.
337 	 * This means we can't use the TSC for interval time in
338 	 * microtime(9), and thus it is disabled here.
339 	 *
340 	 * It still makes a perfectly good cycle counter
341 	 * for program profiling, so long as you remember you're
342 	 * counting cycles, and not time. Further, if you don't
343 	 * mind not using powersave mode, the TSC works just fine,
344 	 * so this should really be optional. XXX
345 	 */
346 	cyrix_write_reg(0xc2, cyrix_read_reg(0xc2) | 0x08);
347 
348 	/*
349 	 * Do not disable the TSC on the Geode GX, it's reported to
350 	 * work fine.
351 	 */
352 	if (ci->ci_signature != 0x552)
353 		ci->ci_feat_val[0] &= ~CPUID_TSC;
354 
355 	/* enable access to ccr4/ccr5 */
356 	c3 = cyrix_read_reg(0xC3);
357 	cyrix_write_reg(0xC3, c3 | 0x10);
358 	/* cyrix's workaround  for the "coma bug" */
359 	cyrix_write_reg(0x31, cyrix_read_reg(0x31) | 0xf8);
360 	cyrix_write_reg(0x32, cyrix_read_reg(0x32) | 0x7f);
361 	cyrix_write_reg(0x33, cyrix_read_reg(0x33) & ~0xff);
362 	cyrix_write_reg(0x3c, cyrix_read_reg(0x3c) | 0x87);
363 	/* disable access to ccr4/ccr5 */
364 	cyrix_write_reg(0xC3, c3);
365 }
366 
367 static void
368 cpu_probe_cyrix(struct cpu_info *ci)
369 {
370 
371 	if (cpu_vendor != CPUVENDOR_CYRIX ||
372 	    CPUID2FAMILY(ci->ci_signature) < 4 ||
373 	    CPUID2FAMILY(ci->ci_signature) > 6)
374 		return;
375 
376 	cpu_probe_cyrix_cmn(ci);
377 }
378 
379 static void
380 cpu_probe_winchip(struct cpu_info *ci)
381 {
382 
383 	if (cpu_vendor != CPUVENDOR_IDT)
384 	    	return;
385 
386 	switch (CPUID2FAMILY(ci->ci_signature)) {
387 	case 5:
388 		/* WinChip C6 */
389 		if (CPUID2MODEL(ci->ci_signature) == 4)
390 			ci->ci_feat_val[0] &= ~CPUID_TSC;
391 		break;
392 	case 6:
393 		/*
394 		 * VIA Eden ESP
395 		 *
396 		 * Quoting from page 3-4 of: "VIA Eden ESP Processor Datasheet"
397 		 * http://www.via.com.tw/download/mainboards/6/14/Eden20v115.pdf
398 		 *
399 		 * 1. The CMPXCHG8B instruction is provided and always enabled,
400 		 *    however, it appears disabled in the corresponding CPUID
401 		 *    function bit 0 to avoid a bug in an early version of
402 		 *    Windows NT. However, this default can be changed via a
403 		 *    bit in the FCR MSR.
404 		 */
405 		ci->ci_feat_val[0] |= CPUID_CX8;
406 		wrmsr(MSR_VIA_FCR, rdmsr(MSR_VIA_FCR) | 0x00000001);
407 		break;
408 	}
409 }
410 
411 static void
412 cpu_probe_c3(struct cpu_info *ci)
413 {
414 	u_int family, model, stepping, descs[4], lfunc, msr;
415 	struct x86_cache_info *cai;
416 
417 	if (cpu_vendor != CPUVENDOR_IDT ||
418 	    CPUID2FAMILY(ci->ci_signature) < 6)
419 	    	return;
420 
421 	family = CPUID2FAMILY(ci->ci_signature);
422 	model = CPUID2MODEL(ci->ci_signature);
423 	stepping = CPUID2STEPPING(ci->ci_signature);
424 
425 	/* Determine the largest extended function value. */
426 	x86_cpuid(0x80000000, descs);
427 	lfunc = descs[0];
428 
429 	/* Determine the extended feature flags. */
430 	if (lfunc >= 0x80000001) {
431 		x86_cpuid(0x80000001, descs);
432 		ci->ci_feat_val[2] = descs[3];
433 	}
434 
435 	if (family > 6 || model > 0x9 || (model == 0x9 && stepping >= 3)) {
436 		/* Nehemiah or Esther */
437 		x86_cpuid(0xc0000000, descs);
438 		lfunc = descs[0];
439 		if (lfunc >= 0xc0000001) {	/* has ACE, RNG */
440 		    int rng_enable = 0, ace_enable = 0;
441 		    x86_cpuid(0xc0000001, descs);
442 		    lfunc = descs[3];
443 		    ci->ci_feat_val[4] = lfunc;
444 		    /* Check for and enable RNG */
445 		    if (lfunc & CPUID_VIA_HAS_RNG) {
446 		    	if (!(lfunc & CPUID_VIA_DO_RNG)) {
447 			    rng_enable++;
448 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_RNG;
449 			}
450 		    }
451 		    /* Check for and enable ACE (AES-CBC) */
452 		    if (lfunc & CPUID_VIA_HAS_ACE) {
453 			if (!(lfunc & CPUID_VIA_DO_ACE)) {
454 			    ace_enable++;
455 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE;
456 			}
457 		    }
458 		    /* Check for and enable SHA */
459 		    if (lfunc & CPUID_VIA_HAS_PHE) {
460 			if (!(lfunc & CPUID_VIA_DO_PHE)) {
461 			    ace_enable++;
462 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_PHE;
463 			}
464 		    }
465 		    /* Check for and enable ACE2 (AES-CTR) */
466 		    if (lfunc & CPUID_VIA_HAS_ACE2) {
467 			if (!(lfunc & CPUID_VIA_DO_ACE2)) {
468 			    ace_enable++;
469 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE2;
470 			}
471 		    }
472 		    /* Check for and enable PMM (modmult engine) */
473 		    if (lfunc & CPUID_VIA_HAS_PMM) {
474 			if (!(lfunc & CPUID_VIA_DO_PMM)) {
475 			    ace_enable++;
476 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_PMM;
477 			}
478 		    }
479 
480 		    /* Actually do the enables. */
481 		    if (rng_enable) {
482 			msr = rdmsr(MSR_VIA_RNG);
483 			wrmsr(MSR_VIA_RNG, msr | MSR_VIA_RNG_ENABLE);
484 		    }
485 		    if (ace_enable) {
486 			msr = rdmsr(MSR_VIA_ACE);
487 			wrmsr(MSR_VIA_ACE, msr | MSR_VIA_ACE_ENABLE);
488 		    }
489 
490 		}
491 	}
492 
493 	/*
494 	 * Determine L1 cache/TLB info.
495 	 */
496 	if (lfunc < 0x80000005) {
497 		/* No L1 cache info available. */
498 		return;
499 	}
500 
501 	x86_cpuid(0x80000005, descs);
502 
503 	cai = &ci->ci_cinfo[CAI_ITLB];
504 	cai->cai_totalsize = VIA_L1_EBX_ITLB_ENTRIES(descs[1]);
505 	cai->cai_associativity = VIA_L1_EBX_ITLB_ASSOC(descs[1]);
506 	cai->cai_linesize = (4 * 1024);
507 
508 	cai = &ci->ci_cinfo[CAI_DTLB];
509 	cai->cai_totalsize = VIA_L1_EBX_DTLB_ENTRIES(descs[1]);
510 	cai->cai_associativity = VIA_L1_EBX_DTLB_ASSOC(descs[1]);
511 	cai->cai_linesize = (4 * 1024);
512 
513 	cai = &ci->ci_cinfo[CAI_DCACHE];
514 	cai->cai_totalsize = VIA_L1_ECX_DC_SIZE(descs[2]);
515 	cai->cai_associativity = VIA_L1_ECX_DC_ASSOC(descs[2]);
516 	cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[2]);
517 	if (family == 6 && model == 9 && stepping == 8) {
518 		/* Erratum: stepping 8 reports 4 when it should be 2 */
519 		cai->cai_associativity = 2;
520 	}
521 
522 	cai = &ci->ci_cinfo[CAI_ICACHE];
523 	cai->cai_totalsize = VIA_L1_EDX_IC_SIZE(descs[3]);
524 	cai->cai_associativity = VIA_L1_EDX_IC_ASSOC(descs[3]);
525 	cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[3]);
526 	if (family == 6 && model == 9 && stepping == 8) {
527 		/* Erratum: stepping 8 reports 4 when it should be 2 */
528 		cai->cai_associativity = 2;
529 	}
530 
531 	/*
532 	 * Determine L2 cache/TLB info.
533 	 */
534 	if (lfunc < 0x80000006) {
535 		/* No L2 cache info available. */
536 		return;
537 	}
538 
539 	x86_cpuid(0x80000006, descs);
540 
541 	cai = &ci->ci_cinfo[CAI_L2CACHE];
542 	if (family > 6 || model >= 9) {
543 		cai->cai_totalsize = VIA_L2N_ECX_C_SIZE(descs[2]);
544 		cai->cai_associativity = VIA_L2N_ECX_C_ASSOC(descs[2]);
545 		cai->cai_linesize = VIA_L2N_ECX_C_LS(descs[2]);
546 	} else {
547 		cai->cai_totalsize = VIA_L2_ECX_C_SIZE(descs[2]);
548 		cai->cai_associativity = VIA_L2_ECX_C_ASSOC(descs[2]);
549 		cai->cai_linesize = VIA_L2_ECX_C_LS(descs[2]);
550 	}
551 }
552 
553 static void
554 cpu_probe_geode(struct cpu_info *ci)
555 {
556 
557 	if (memcmp("Geode by NSC", ci->ci_vendor, 12) != 0 ||
558 	    CPUID2FAMILY(ci->ci_signature) != 5)
559 	    	return;
560 
561 	cpu_probe_cyrix_cmn(ci);
562 	cpu_probe_amd_cache(ci);
563 }
564 
565 static void
566 cpu_probe_vortex86(struct cpu_info *ci)
567 {
568 #define PCI_MODE1_ADDRESS_REG	0x0cf8
569 #define PCI_MODE1_DATA_REG	0x0cfc
570 #define PCI_MODE1_ENABLE	0x80000000UL
571 
572 	uint32_t reg;
573 
574 	if (cpu_vendor != CPUVENDOR_VORTEX86)
575 		return;
576 	/*
577 	 * CPU model available from "Customer ID register" in
578 	 * North Bridge Function 0 PCI space
579 	 * we can't use pci_conf_read() because the PCI subsystem is not
580 	 * not initialised early enough
581 	 */
582 
583 	outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE | 0x90);
584 	reg = inl(PCI_MODE1_DATA_REG);
585 
586 	switch(reg) {
587 	case 0x31504d44:
588 		strcpy(cpu_brand_string, "Vortex86SX");
589 		break;
590 	case 0x32504d44:
591 		strcpy(cpu_brand_string, "Vortex86DX");
592 		break;
593 	case 0x33504d44:
594 		strcpy(cpu_brand_string, "Vortex86MX");
595 		break;
596 	default:
597 		strcpy(cpu_brand_string, "Unknown Vortex86");
598 		break;
599 	}
600 
601 #undef PCI_MODE1_ENABLE
602 #undef PCI_MODE1_ADDRESS_REG
603 #undef PCI_MODE1_DATA_REG
604 }
605 
606 void
607 cpu_probe(struct cpu_info *ci)
608 {
609 	const struct x86_cache_info *cai;
610 	u_int descs[4];
611 	int iterations, i, j;
612 	uint8_t desc;
613 	uint32_t miscbytes;
614 	uint32_t brand[12];
615 
616 	cpu_vendor = i386_nocpuid_cpus[cputype << 1];
617 	cpu_class = i386_nocpuid_cpus[(cputype << 1) + 1];
618 
619 	if (cpuid_level < 0)
620 		return;
621 
622 	for (i = 0; i < __arraycount(ci->ci_feat_val); i++) {
623 		ci->ci_feat_val[i] = 0;
624 	}
625 
626 	x86_cpuid(0, descs);
627 	cpuid_level = descs[0];
628 	ci->ci_vendor[0] = descs[1];
629 	ci->ci_vendor[2] = descs[2];
630 	ci->ci_vendor[1] = descs[3];
631 	ci->ci_vendor[3] = 0;
632 
633 	if (memcmp(ci->ci_vendor, "GenuineIntel", 12) == 0)
634 		cpu_vendor = CPUVENDOR_INTEL;
635 	else if (memcmp(ci->ci_vendor,  "AuthenticAMD", 12) == 0)
636 		cpu_vendor = CPUVENDOR_AMD;
637 	else if (memcmp(ci->ci_vendor,  "CyrixInstead", 12) == 0)
638 		cpu_vendor = CPUVENDOR_CYRIX;
639 	else if (memcmp(ci->ci_vendor,  "Geode by NSC", 12) == 0)
640 		cpu_vendor = CPUVENDOR_CYRIX;
641 	else if (memcmp(ci->ci_vendor, "CentaurHauls", 12) == 0)
642 		cpu_vendor = CPUVENDOR_IDT;
643 	else if (memcmp(ci->ci_vendor, "GenuineTMx86", 12) == 0)
644 		cpu_vendor = CPUVENDOR_TRANSMETA;
645 	else if (memcmp(ci->ci_vendor, "Vortex86 SoC", 12) == 0)
646 		cpu_vendor = CPUVENDOR_VORTEX86;
647 	else
648 		cpu_vendor = CPUVENDOR_UNKNOWN;
649 
650 	x86_cpuid(0x80000000, brand);
651 	if (brand[0] >= 0x80000004) {
652 		x86_cpuid(0x80000002, brand);
653 		x86_cpuid(0x80000003, brand + 4);
654 		x86_cpuid(0x80000004, brand + 8);
655 		for (i = 0; i < 48; i++) {
656 			if (((char *) brand)[i] != ' ')
657 				break;
658 		}
659 		memcpy(cpu_brand_string, ((char *) brand) + i, 48 - i);
660 	}
661 
662 	if (cpuid_level >= 1) {
663 		x86_cpuid(1, descs);
664 		ci->ci_signature = descs[0];
665 		miscbytes = descs[1];
666 		ci->ci_feat_val[1] = descs[2];
667 		ci->ci_feat_val[0] = descs[3];
668 
669 		/* Determine family + class. */
670 		cpu_class = CPUID2FAMILY(ci->ci_signature) + (CPUCLASS_386 - 3);
671 		if (cpu_class > CPUCLASS_686)
672 			cpu_class = CPUCLASS_686;
673 
674 		/* CLFLUSH line size is next 8 bits */
675 		if (ci->ci_feat_val[0] & CPUID_CFLUSH)
676 			ci->ci_cflush_lsize = ((miscbytes >> 8) & 0xff) << 3;
677 		ci->ci_initapicid = (miscbytes >> 24) & 0xff;
678 	}
679 
680 	if (cpuid_level >= 2) {
681 		/* Parse the cache info from `cpuid', if we have it. */
682 		x86_cpuid(2, descs);
683 		iterations = descs[0] & 0xff;
684 		while (iterations-- > 0) {
685 			for (i = 0; i < 4; i++) {
686 				if (descs[i] & 0x80000000)
687 					continue;
688 				for (j = 0; j < 4; j++) {
689 					if (i == 0 && j == 0)
690 						continue;
691 					desc = (descs[i] >> (j * 8)) & 0xff;
692 					if (desc == 0)
693 						continue;
694 					cai = cache_info_lookup(
695 					    intel_cpuid_cache_info, desc);
696 					if (cai != NULL) {
697 						ci->ci_cinfo[cai->cai_index] =
698 						    *cai;
699 					}
700 				}
701 			}
702 		}
703 	}
704 
705 	cpu_probe_k5(ci);
706 	cpu_probe_k678(ci);
707 	cpu_probe_cyrix(ci);
708 	cpu_probe_winchip(ci);
709 	cpu_probe_c3(ci);
710 	cpu_probe_geode(ci);
711 	cpu_probe_vortex86(ci);
712 
713 	x86_cpu_topology(ci);
714 
715 	if (cpu_vendor != CPUVENDOR_AMD && (ci->ci_feat_val[0] & CPUID_TM) &&
716 	    (rdmsr(MSR_MISC_ENABLE) & (1 << 3)) == 0) {
717 		/* Enable thermal monitor 1. */
718 		wrmsr(MSR_MISC_ENABLE, rdmsr(MSR_MISC_ENABLE) | (1<<3));
719 	}
720 
721 	ci->ci_feat_val[0] &= ~CPUID_FEAT_BLACKLIST;
722 	if (ci == &cpu_info_primary) {
723 		/* If first. Boot Processor is the cpu_feature reference. */
724 		for (i = 0; i < __arraycount(cpu_feature); i++) {
725 			cpu_feature[i] = ci->ci_feat_val[i];
726 		}
727 #ifndef XEN
728 		/* Early patch of text segment. */
729 		x86_patch(true);
730 #endif
731 	} else {
732 		/*
733 		 * If not first. Warn about cpu_feature mismatch for
734 		 * secondary CPUs.
735 		 */
736 		for (i = 0; i < __arraycount(cpu_feature); i++) {
737 			if (cpu_feature[i] != ci->ci_feat_val[i])
738 				aprint_error_dev(ci->ci_dev,
739 				    "feature mismatch: cpu_feature[%d] is "
740 				    "%#x, but CPU reported %#x\n",
741 				    i, cpu_feature[i], ci->ci_feat_val[i]);
742 		}
743 	}
744 }
745 
746 void
747 cpu_identify(struct cpu_info *ci)
748 {
749 
750 	snprintf(cpu_model, sizeof(cpu_model), "%s %d86-class",
751 	    cpu_vendor_names[cpu_vendor], cpu_class + 3);
752 	if (cpu_brand_string[0] != '\0') {
753 		aprint_normal(": %s", cpu_brand_string);
754 	} else {
755 		aprint_normal(": %s", cpu_model);
756 		if (ci->ci_data.cpu_cc_freq != 0)
757 			aprint_normal(", %dMHz",
758 			    (int)(ci->ci_data.cpu_cc_freq / 1000000));
759 	}
760 	if (ci->ci_signature != 0)
761 		aprint_normal(", id 0x%x", ci->ci_signature);
762 	aprint_normal("\n");
763 
764 	if (cpu_brand_string[0] == '\0') {
765 		strlcpy(cpu_brand_string, cpu_model, sizeof(cpu_brand_string));
766 	}
767 	if (cpu_class == CPUCLASS_386) {
768 		panic("NetBSD requires an 80486DX or later processor");
769 	}
770 	if (cputype == CPU_486DLC) {
771 		aprint_error("WARNING: BUGGY CYRIX CACHE\n");
772 	}
773 
774 	if ((cpu_vendor == CPUVENDOR_AMD) /* check enablement of an */
775 	  && (device_unit(ci->ci_dev) == 0) /* AMD feature only once */
776 	  && ((cpu_feature[3] & CPUID_SVM) == CPUID_SVM)
777 #if defined(XEN) && !defined(DOM0OPS)
778 	  && (false)  /* on Xen rdmsr is for Dom0 only */
779 #endif
780 	  )
781 	{
782 		uint64_t val;
783 
784 		val = rdmsr(MSR_VMCR);
785 		if (((val & VMCR_SVMED) == VMCR_SVMED)
786 		  && ((val & VMCR_LOCK) == VMCR_LOCK))
787 		{
788 			aprint_normal_dev(ci->ci_dev,
789 				"SVM disabled by the BIOS\n");
790 		}
791 	}
792 
793 #ifdef i386 /* XXX for now */
794 	if (cpu_vendor == CPUVENDOR_TRANSMETA) {
795 		u_int descs[4];
796 		x86_cpuid(0x80860000, descs);
797 		if (descs[0] >= 0x80860007)
798 			tmx86_init_longrun();
799 	}
800 
801 	/* If we have FXSAVE/FXRESTOR, use them. */
802 	if (cpu_feature[0] & CPUID_FXSR) {
803 		i386_use_fxsave = 1;
804 		/*
805 		 * If we have SSE/SSE2, enable XMM exceptions, and
806 		 * notify userland.
807 		 */
808 		if (cpu_feature[0] & CPUID_SSE)
809 			i386_has_sse = 1;
810 		if (cpu_feature[0] & CPUID_SSE2)
811 			i386_has_sse2 = 1;
812 	} else
813 		i386_use_fxsave = 0;
814 #endif	/* i386 */
815 }
816