xref: /netbsd-src/sys/arch/x86/x86/identcpu.c (revision 56bb44cae5b13a6b74792381ba1e6d930b26aa67)
1 /*	$NetBSD: identcpu.c,v 1.27 2011/02/24 13:58:39 jruoho Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Frank van der Linden,  and by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: identcpu.c,v 1.27 2011/02/24 13:58:39 jruoho Exp $");
34 
35 #include "opt_intel_odcm.h"
36 #include "opt_xen.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 
42 #include <uvm/uvm_extern.h>
43 
44 #include <machine/specialreg.h>
45 #include <machine/pio.h>
46 #include <machine/cpu.h>
47 
48 #include <x86/cputypes.h>
49 #include <x86/cacheinfo.h>
50 #include <x86/cpuvar.h>
51 #include <x86/cpu_msr.h>
52 
53 static const struct x86_cache_info intel_cpuid_cache_info[] = INTEL_CACHE_INFO;
54 
55 static const struct x86_cache_info amd_cpuid_l2cache_assoc_info[] =
56 	AMD_L2CACHE_INFO;
57 
58 static const struct x86_cache_info amd_cpuid_l3cache_assoc_info[] =
59 	AMD_L3CACHE_INFO;
60 
61 int cpu_vendor;
62 char cpu_brand_string[49];
63 
64 /*
65  * Info for CTL_HW
66  */
67 char	cpu_model[120];
68 
69 /*
70  * Note: these are just the ones that may not have a cpuid instruction.
71  * We deal with the rest in a different way.
72  */
73 const int i386_nocpuid_cpus[] = {
74 	CPUVENDOR_INTEL, CPUCLASS_386,	/* CPU_386SX */
75 	CPUVENDOR_INTEL, CPUCLASS_386,	/* CPU_386   */
76 	CPUVENDOR_INTEL, CPUCLASS_486,	/* CPU_486SX */
77 	CPUVENDOR_INTEL, CPUCLASS_486, 	/* CPU_486   */
78 	CPUVENDOR_CYRIX, CPUCLASS_486,	/* CPU_486DLC */
79 	CPUVENDOR_CYRIX, CPUCLASS_486,	/* CPU_6x86 */
80 	CPUVENDOR_NEXGEN, CPUCLASS_386,	/* CPU_NX586 */
81 };
82 
83 static const char cpu_vendor_names[][10] = {
84 	"Unknown", "Intel", "NS/Cyrix", "NexGen", "AMD", "IDT/VIA", "Transmeta",
85 	"Vortex86"
86 };
87 
88 static const struct x86_cache_info *
89 cache_info_lookup(const struct x86_cache_info *cai, uint8_t desc)
90 {
91 	int i;
92 
93 	for (i = 0; cai[i].cai_desc != 0; i++) {
94 		if (cai[i].cai_desc == desc)
95 			return (&cai[i]);
96 	}
97 
98 	return (NULL);
99 }
100 
101 
102 static void
103 cpu_probe_amd_cache(struct cpu_info *ci)
104 {
105 	const struct x86_cache_info *cp;
106 	struct x86_cache_info *cai;
107 	int family, model;
108 	u_int descs[4];
109 	u_int lfunc;
110 
111 	family = CPUID2FAMILY(ci->ci_signature);
112 	model = CPUID2MODEL(ci->ci_signature);
113 
114 	/*
115 	 * K5 model 0 has none of this info.
116 	 */
117 	if (family == 5 && model == 0)
118 		return;
119 
120 	/*
121 	 * Get extended values for K8 and up.
122 	 */
123 	if (family == 0xf) {
124 		family += CPUID2EXTFAMILY(ci->ci_signature);
125 		model += CPUID2EXTMODEL(ci->ci_signature);
126 	}
127 
128 	/*
129 	 * Determine the largest extended function value.
130 	 */
131 	x86_cpuid(0x80000000, descs);
132 	lfunc = descs[0];
133 
134 	/*
135 	 * Determine L1 cache/TLB info.
136 	 */
137 	if (lfunc < 0x80000005) {
138 		/* No L1 cache info available. */
139 		return;
140 	}
141 
142 	x86_cpuid(0x80000005, descs);
143 
144 	/*
145 	 * K6-III and higher have large page TLBs.
146 	 */
147 	if ((family == 5 && model >= 9) || family >= 6) {
148 		cai = &ci->ci_cinfo[CAI_ITLB2];
149 		cai->cai_totalsize = AMD_L1_EAX_ITLB_ENTRIES(descs[0]);
150 		cai->cai_associativity = AMD_L1_EAX_ITLB_ASSOC(descs[0]);
151 		cai->cai_linesize = (4 * 1024 * 1024);
152 
153 		cai = &ci->ci_cinfo[CAI_DTLB2];
154 		cai->cai_totalsize = AMD_L1_EAX_DTLB_ENTRIES(descs[0]);
155 		cai->cai_associativity = AMD_L1_EAX_DTLB_ASSOC(descs[0]);
156 		cai->cai_linesize = (4 * 1024 * 1024);
157 	}
158 
159 	cai = &ci->ci_cinfo[CAI_ITLB];
160 	cai->cai_totalsize = AMD_L1_EBX_ITLB_ENTRIES(descs[1]);
161 	cai->cai_associativity = AMD_L1_EBX_ITLB_ASSOC(descs[1]);
162 	cai->cai_linesize = (4 * 1024);
163 
164 	cai = &ci->ci_cinfo[CAI_DTLB];
165 	cai->cai_totalsize = AMD_L1_EBX_DTLB_ENTRIES(descs[1]);
166 	cai->cai_associativity = AMD_L1_EBX_DTLB_ASSOC(descs[1]);
167 	cai->cai_linesize = (4 * 1024);
168 
169 	cai = &ci->ci_cinfo[CAI_DCACHE];
170 	cai->cai_totalsize = AMD_L1_ECX_DC_SIZE(descs[2]);
171 	cai->cai_associativity = AMD_L1_ECX_DC_ASSOC(descs[2]);
172 	cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[2]);
173 
174 	cai = &ci->ci_cinfo[CAI_ICACHE];
175 	cai->cai_totalsize = AMD_L1_EDX_IC_SIZE(descs[3]);
176 	cai->cai_associativity = AMD_L1_EDX_IC_ASSOC(descs[3]);
177 	cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[3]);
178 
179 	/*
180 	 * Determine L2 cache/TLB info.
181 	 */
182 	if (lfunc < 0x80000006) {
183 		/* No L2 cache info available. */
184 		return;
185 	}
186 
187 	x86_cpuid(0x80000006, descs);
188 
189 	cai = &ci->ci_cinfo[CAI_L2CACHE];
190 	cai->cai_totalsize = AMD_L2_ECX_C_SIZE(descs[2]);
191 	cai->cai_associativity = AMD_L2_ECX_C_ASSOC(descs[2]);
192 	cai->cai_linesize = AMD_L2_ECX_C_LS(descs[2]);
193 
194 	cp = cache_info_lookup(amd_cpuid_l2cache_assoc_info,
195 	    cai->cai_associativity);
196 	if (cp != NULL)
197 		cai->cai_associativity = cp->cai_associativity;
198 	else
199 		cai->cai_associativity = 0;	/* XXX Unknown/reserved */
200 
201 	if (family < 0xf) {
202 		/* No L3 cache info available. */
203 		return;
204 	}
205 
206 	cai = &ci->ci_cinfo[CAI_L3CACHE];
207 	cai->cai_totalsize = AMD_L3_EDX_C_SIZE(descs[3]);
208 	cai->cai_associativity = AMD_L3_EDX_C_ASSOC(descs[3]);
209 	cai->cai_linesize = AMD_L3_EDX_C_LS(descs[3]);
210 
211 	cp = cache_info_lookup(amd_cpuid_l3cache_assoc_info,
212 	    cai->cai_associativity);
213 	if (cp != NULL)
214 		cai->cai_associativity = cp->cai_associativity;
215 	else
216 		cai->cai_associativity = 0;	/* XXX Unknown reserved */
217 
218 	if (lfunc < 0x80000019) {
219 		/* No 1GB Page TLB */
220 		return;
221 	}
222 
223 	x86_cpuid(0x80000019, descs);
224 
225 	cai = &ci->ci_cinfo[CAI_L1_1GBDTLB];
226 	cai->cai_totalsize = AMD_L1_1GB_EAX_DTLB_ENTRIES(descs[1]);
227 	cai->cai_associativity = AMD_L1_1GB_EAX_DTLB_ASSOC(descs[1]);
228 	cai->cai_linesize = (1 * 1024);
229 
230 	cai = &ci->ci_cinfo[CAI_L1_1GBITLB];
231 	cai->cai_totalsize = AMD_L1_1GB_EAX_IUTLB_ENTRIES(descs[0]);
232 	cai->cai_associativity = AMD_L1_1GB_EAX_IUTLB_ASSOC(descs[0]);
233 	cai->cai_linesize = (1 * 1024);
234 
235 	cai = &ci->ci_cinfo[CAI_L2_1GBDTLB];
236 	cai->cai_totalsize = AMD_L2_1GB_EBX_DUTLB_ENTRIES(descs[1]);
237 	cai->cai_associativity = AMD_L2_1GB_EBX_DUTLB_ASSOC(descs[1]);
238 	cai->cai_linesize = (1 * 1024);
239 
240 	cai = &ci->ci_cinfo[CAI_L2_1GBITLB];
241 	cai->cai_totalsize = AMD_L2_1GB_EBX_IUTLB_ENTRIES(descs[0]);
242 	cai->cai_associativity = AMD_L2_1GB_EBX_IUTLB_ASSOC(descs[0]);
243 	cai->cai_linesize = (1 * 1024);
244 }
245 
246 static void
247 cpu_probe_k5(struct cpu_info *ci)
248 {
249 	int flag;
250 
251 	if (cpu_vendor != CPUVENDOR_AMD ||
252 	    CPUID2FAMILY(ci->ci_signature) != 5)
253 		return;
254 
255 	if (CPUID2MODEL(ci->ci_signature) == 0) {
256 		/*
257 		 * According to the AMD Processor Recognition App Note,
258 		 * the AMD-K5 Model 0 uses the wrong bit to indicate
259 		 * support for global PTEs, instead using bit 9 (APIC)
260 		 * rather than bit 13 (i.e. "0x200" vs. 0x2000".  Oops!).
261 		 */
262 		flag = ci->ci_feat_val[0];
263 		if ((flag & CPUID_APIC) != 0)
264 			flag = (flag & ~CPUID_APIC) | CPUID_PGE;
265 		ci->ci_feat_val[0] = flag;
266 	}
267 
268 	cpu_probe_amd_cache(ci);
269 }
270 
271 static void
272 cpu_probe_k678(struct cpu_info *ci)
273 {
274 	uint32_t descs[4];
275 
276 	if (cpu_vendor != CPUVENDOR_AMD ||
277 	    CPUID2FAMILY(ci->ci_signature) < 6)
278 		return;
279 
280 	/* Determine the extended feature flags. */
281 	x86_cpuid(0x80000000, descs);
282 	if (descs[0] >= 0x80000001) {
283 		x86_cpuid(0x80000001, descs);
284 		ci->ci_feat_val[3] = descs[2]; /* %ecx */
285 		ci->ci_feat_val[2] = descs[3]; /* %edx */
286 	}
287 
288 	cpu_probe_amd_cache(ci);
289 }
290 
291 static inline uint8_t
292 cyrix_read_reg(uint8_t reg)
293 {
294 
295 	outb(0x22, reg);
296 	return inb(0x23);
297 }
298 
299 static inline void
300 cyrix_write_reg(uint8_t reg, uint8_t data)
301 {
302 
303 	outb(0x22, reg);
304 	outb(0x23, data);
305 }
306 
307 static void
308 cpu_probe_cyrix_cmn(struct cpu_info *ci)
309 {
310 	/*
311 	 * i8254 latch check routine:
312 	 *     National Geode (formerly Cyrix MediaGX) has a serious bug in
313 	 *     its built-in i8254-compatible clock module (cs5510 cs5520).
314 	 *     Set the variable 'clock_broken_latch' to indicate it.
315 	 *
316 	 * This bug is not present in the cs5530, and the flag
317 	 * is disabled again in sys/arch/i386/pci/pcib.c if this later
318 	 * model device is detected. Ideally, this work-around should not
319 	 * even be in here, it should be in there. XXX
320 	 */
321 	uint8_t c3;
322 #ifndef XEN
323 	extern int clock_broken_latch;
324 
325 	switch (ci->ci_signature) {
326 	case 0x440:     /* Cyrix MediaGX */
327 	case 0x540:     /* GXm */
328 		clock_broken_latch = 1;
329 		break;
330 	}
331 #endif
332 
333 	/* set up various cyrix registers */
334 	/*
335 	 * Enable suspend on halt (powersave mode).
336 	 * When powersave mode is enabled, the TSC stops counting
337 	 * while the CPU is halted in idle() waiting for an interrupt.
338 	 * This means we can't use the TSC for interval time in
339 	 * microtime(9), and thus it is disabled here.
340 	 *
341 	 * It still makes a perfectly good cycle counter
342 	 * for program profiling, so long as you remember you're
343 	 * counting cycles, and not time. Further, if you don't
344 	 * mind not using powersave mode, the TSC works just fine,
345 	 * so this should really be optional. XXX
346 	 */
347 	cyrix_write_reg(0xc2, cyrix_read_reg(0xc2) | 0x08);
348 
349 	/*
350 	 * Do not disable the TSC on the Geode GX, it's reported to
351 	 * work fine.
352 	 */
353 	if (ci->ci_signature != 0x552)
354 		ci->ci_feat_val[0] &= ~CPUID_TSC;
355 
356 	/* enable access to ccr4/ccr5 */
357 	c3 = cyrix_read_reg(0xC3);
358 	cyrix_write_reg(0xC3, c3 | 0x10);
359 	/* cyrix's workaround  for the "coma bug" */
360 	cyrix_write_reg(0x31, cyrix_read_reg(0x31) | 0xf8);
361 	cyrix_write_reg(0x32, cyrix_read_reg(0x32) | 0x7f);
362 	cyrix_write_reg(0x33, cyrix_read_reg(0x33) & ~0xff);
363 	cyrix_write_reg(0x3c, cyrix_read_reg(0x3c) | 0x87);
364 	/* disable access to ccr4/ccr5 */
365 	cyrix_write_reg(0xC3, c3);
366 }
367 
368 static void
369 cpu_probe_cyrix(struct cpu_info *ci)
370 {
371 
372 	if (cpu_vendor != CPUVENDOR_CYRIX ||
373 	    CPUID2FAMILY(ci->ci_signature) < 4 ||
374 	    CPUID2FAMILY(ci->ci_signature) > 6)
375 		return;
376 
377 	cpu_probe_cyrix_cmn(ci);
378 }
379 
380 static void
381 cpu_probe_winchip(struct cpu_info *ci)
382 {
383 
384 	if (cpu_vendor != CPUVENDOR_IDT ||
385 	    CPUID2FAMILY(ci->ci_signature) != 5)
386 	    	return;
387 
388 	if (CPUID2MODEL(ci->ci_signature) == 4) {
389 		/* WinChip C6 */
390 		ci->ci_feat_val[0] &= ~CPUID_TSC;
391 	}
392 }
393 
394 static void
395 cpu_probe_c3(struct cpu_info *ci)
396 {
397 	u_int family, model, stepping, descs[4], lfunc, msr;
398 	struct x86_cache_info *cai;
399 
400 	if (cpu_vendor != CPUVENDOR_IDT ||
401 	    CPUID2FAMILY(ci->ci_signature) < 6)
402 	    	return;
403 
404 	family = CPUID2FAMILY(ci->ci_signature);
405 	model = CPUID2MODEL(ci->ci_signature);
406 	stepping = CPUID2STEPPING(ci->ci_signature);
407 
408 	/* Determine the largest extended function value. */
409 	x86_cpuid(0x80000000, descs);
410 	lfunc = descs[0];
411 
412 	/* Determine the extended feature flags. */
413 	if (lfunc >= 0x80000001) {
414 		x86_cpuid(0x80000001, descs);
415 		ci->ci_feat_val[2] = descs[3];
416 	}
417 
418 	if (family > 6 || model > 0x9 || (model == 0x9 && stepping >= 3)) {
419 		/* Nehemiah or Esther */
420 		x86_cpuid(0xc0000000, descs);
421 		lfunc = descs[0];
422 		if (lfunc >= 0xc0000001) {	/* has ACE, RNG */
423 		    int rng_enable = 0, ace_enable = 0;
424 		    x86_cpuid(0xc0000001, descs);
425 		    lfunc = descs[3];
426 		    ci->ci_feat_val[4] = lfunc;
427 		    /* Check for and enable RNG */
428 		    if (lfunc & CPUID_VIA_HAS_RNG) {
429 		    	if (!(lfunc & CPUID_VIA_DO_RNG)) {
430 			    rng_enable++;
431 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_RNG;
432 			}
433 		    }
434 		    /* Check for and enable ACE (AES-CBC) */
435 		    if (lfunc & CPUID_VIA_HAS_ACE) {
436 			if (!(lfunc & CPUID_VIA_DO_ACE)) {
437 			    ace_enable++;
438 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE;
439 			}
440 		    }
441 		    /* Check for and enable SHA */
442 		    if (lfunc & CPUID_VIA_HAS_PHE) {
443 			if (!(lfunc & CPUID_VIA_DO_PHE)) {
444 			    ace_enable++;
445 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_PHE;
446 			}
447 		    }
448 		    /* Check for and enable ACE2 (AES-CTR) */
449 		    if (lfunc & CPUID_VIA_HAS_ACE2) {
450 			if (!(lfunc & CPUID_VIA_DO_ACE2)) {
451 			    ace_enable++;
452 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE2;
453 			}
454 		    }
455 		    /* Check for and enable PMM (modmult engine) */
456 		    if (lfunc & CPUID_VIA_HAS_PMM) {
457 			if (!(lfunc & CPUID_VIA_DO_PMM)) {
458 			    ace_enable++;
459 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_PMM;
460 			}
461 		    }
462 
463 		    /* Actually do the enables. */
464 		    if (rng_enable) {
465 			msr = rdmsr(MSR_VIA_RNG);
466 			wrmsr(MSR_VIA_RNG, msr | MSR_VIA_RNG_ENABLE);
467 		    }
468 		    if (ace_enable) {
469 			msr = rdmsr(MSR_VIA_ACE);
470 			wrmsr(MSR_VIA_ACE, msr | MSR_VIA_ACE_ENABLE);
471 		    }
472 
473 		}
474 	}
475 
476 	/*
477 	 * Determine L1 cache/TLB info.
478 	 */
479 	if (lfunc < 0x80000005) {
480 		/* No L1 cache info available. */
481 		return;
482 	}
483 
484 	x86_cpuid(0x80000005, descs);
485 
486 	cai = &ci->ci_cinfo[CAI_ITLB];
487 	cai->cai_totalsize = VIA_L1_EBX_ITLB_ENTRIES(descs[1]);
488 	cai->cai_associativity = VIA_L1_EBX_ITLB_ASSOC(descs[1]);
489 	cai->cai_linesize = (4 * 1024);
490 
491 	cai = &ci->ci_cinfo[CAI_DTLB];
492 	cai->cai_totalsize = VIA_L1_EBX_DTLB_ENTRIES(descs[1]);
493 	cai->cai_associativity = VIA_L1_EBX_DTLB_ASSOC(descs[1]);
494 	cai->cai_linesize = (4 * 1024);
495 
496 	cai = &ci->ci_cinfo[CAI_DCACHE];
497 	cai->cai_totalsize = VIA_L1_ECX_DC_SIZE(descs[2]);
498 	cai->cai_associativity = VIA_L1_ECX_DC_ASSOC(descs[2]);
499 	cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[2]);
500 	if (family == 6 && model == 9 && stepping == 8) {
501 		/* Erratum: stepping 8 reports 4 when it should be 2 */
502 		cai->cai_associativity = 2;
503 	}
504 
505 	cai = &ci->ci_cinfo[CAI_ICACHE];
506 	cai->cai_totalsize = VIA_L1_EDX_IC_SIZE(descs[3]);
507 	cai->cai_associativity = VIA_L1_EDX_IC_ASSOC(descs[3]);
508 	cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[3]);
509 	if (family == 6 && model == 9 && stepping == 8) {
510 		/* Erratum: stepping 8 reports 4 when it should be 2 */
511 		cai->cai_associativity = 2;
512 	}
513 
514 	/*
515 	 * Determine L2 cache/TLB info.
516 	 */
517 	if (lfunc < 0x80000006) {
518 		/* No L2 cache info available. */
519 		return;
520 	}
521 
522 	x86_cpuid(0x80000006, descs);
523 
524 	cai = &ci->ci_cinfo[CAI_L2CACHE];
525 	if (family > 6 || model >= 9) {
526 		cai->cai_totalsize = VIA_L2N_ECX_C_SIZE(descs[2]);
527 		cai->cai_associativity = VIA_L2N_ECX_C_ASSOC(descs[2]);
528 		cai->cai_linesize = VIA_L2N_ECX_C_LS(descs[2]);
529 	} else {
530 		cai->cai_totalsize = VIA_L2_ECX_C_SIZE(descs[2]);
531 		cai->cai_associativity = VIA_L2_ECX_C_ASSOC(descs[2]);
532 		cai->cai_linesize = VIA_L2_ECX_C_LS(descs[2]);
533 	}
534 }
535 
536 static void
537 cpu_probe_geode(struct cpu_info *ci)
538 {
539 
540 	if (memcmp("Geode by NSC", ci->ci_vendor, 12) != 0 ||
541 	    CPUID2FAMILY(ci->ci_signature) != 5)
542 	    	return;
543 
544 	cpu_probe_cyrix_cmn(ci);
545 	cpu_probe_amd_cache(ci);
546 }
547 
548 static void
549 cpu_probe_vortex86(struct cpu_info *ci)
550 {
551 #define PCI_MODE1_ADDRESS_REG	0x0cf8
552 #define PCI_MODE1_DATA_REG	0x0cfc
553 #define PCI_MODE1_ENABLE	0x80000000UL
554 
555 	uint32_t reg;
556 
557 	if (cpu_vendor != CPUVENDOR_VORTEX86)
558 		return;
559 	/*
560 	 * CPU model available from "Customer ID register" in
561 	 * North Bridge Function 0 PCI space
562 	 * we can't use pci_conf_read() because the PCI subsystem is not
563 	 * not initialised early enough
564 	 */
565 
566 	outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE | 0x90);
567 	reg = inl(PCI_MODE1_DATA_REG);
568 
569 	switch(reg) {
570 	case 0x31504d44:
571 		strcpy(cpu_brand_string, "Vortex86SX");
572 		break;
573 	case 0x32504d44:
574 		strcpy(cpu_brand_string, "Vortex86DX");
575 		break;
576 	case 0x33504d44:
577 		strcpy(cpu_brand_string, "Vortex86MX");
578 		break;
579 	default:
580 		strcpy(cpu_brand_string, "Unknown Vortex86");
581 		break;
582 	}
583 
584 #undef PCI_MODE1_ENABLE
585 #undef PCI_MODE1_ADDRESS_REG
586 #undef PCI_MODE1_DATA_REG
587 }
588 
589 void
590 cpu_probe(struct cpu_info *ci)
591 {
592 	const struct x86_cache_info *cai;
593 	u_int descs[4];
594 	int iterations, i, j;
595 	uint8_t desc;
596 	uint32_t miscbytes;
597 	uint32_t brand[12];
598 
599 	cpu_vendor = i386_nocpuid_cpus[cpu << 1];
600 	cpu_class = i386_nocpuid_cpus[(cpu << 1) + 1];
601 
602 	if (cpuid_level < 0)
603 		return;
604 
605 	for (i = 0; i < __arraycount(ci->ci_feat_val); i++) {
606 		ci->ci_feat_val[i] = 0;
607 	}
608 
609 	x86_cpuid(0, descs);
610 	cpuid_level = descs[0];
611 	ci->ci_vendor[0] = descs[1];
612 	ci->ci_vendor[2] = descs[2];
613 	ci->ci_vendor[1] = descs[3];
614 	ci->ci_vendor[3] = 0;
615 
616 	if (memcmp(ci->ci_vendor, "GenuineIntel", 12) == 0)
617 		cpu_vendor = CPUVENDOR_INTEL;
618 	else if (memcmp(ci->ci_vendor,  "AuthenticAMD", 12) == 0)
619 		cpu_vendor = CPUVENDOR_AMD;
620 	else if (memcmp(ci->ci_vendor,  "CyrixInstead", 12) == 0)
621 		cpu_vendor = CPUVENDOR_CYRIX;
622 	else if (memcmp(ci->ci_vendor,  "Geode by NSC", 12) == 0)
623 		cpu_vendor = CPUVENDOR_CYRIX;
624 	else if (memcmp(ci->ci_vendor, "CentaurHauls", 12) == 0)
625 		cpu_vendor = CPUVENDOR_IDT;
626 	else if (memcmp(ci->ci_vendor, "GenuineTMx86", 12) == 0)
627 		cpu_vendor = CPUVENDOR_TRANSMETA;
628 	else if (memcmp(ci->ci_vendor, "Vortex86 SoC", 12) == 0)
629 		cpu_vendor = CPUVENDOR_VORTEX86;
630 	else
631 		cpu_vendor = CPUVENDOR_UNKNOWN;
632 
633 	x86_cpuid(0x80000000, brand);
634 	if (brand[0] >= 0x80000004) {
635 		x86_cpuid(0x80000002, brand);
636 		x86_cpuid(0x80000003, brand + 4);
637 		x86_cpuid(0x80000004, brand + 8);
638 		for (i = 0; i < 48; i++) {
639 			if (((char *) brand)[i] != ' ')
640 				break;
641 		}
642 		memcpy(cpu_brand_string, ((char *) brand) + i, 48 - i);
643 	}
644 
645 	if (cpuid_level >= 1) {
646 		x86_cpuid(1, descs);
647 		ci->ci_signature = descs[0];
648 		miscbytes = descs[1];
649 		ci->ci_feat_val[1] = descs[2];
650 		ci->ci_feat_val[0] = descs[3];
651 
652 		/* Determine family + class. */
653 		cpu_class = CPUID2FAMILY(ci->ci_signature) + (CPUCLASS_386 - 3);
654 		if (cpu_class > CPUCLASS_686)
655 			cpu_class = CPUCLASS_686;
656 
657 		/* CLFLUSH line size is next 8 bits */
658 		if (ci->ci_feat_val[0] & CPUID_CFLUSH)
659 			ci->ci_cflush_lsize = ((miscbytes >> 8) & 0xff) << 3;
660 		ci->ci_initapicid = (miscbytes >> 24) & 0xff;
661 	}
662 
663 	if (cpuid_level >= 2) {
664 		/* Parse the cache info from `cpuid', if we have it. */
665 		x86_cpuid(2, descs);
666 		iterations = descs[0] & 0xff;
667 		while (iterations-- > 0) {
668 			for (i = 0; i < 4; i++) {
669 				if (descs[i] & 0x80000000)
670 					continue;
671 				for (j = 0; j < 4; j++) {
672 					if (i == 0 && j == 0)
673 						continue;
674 					desc = (descs[i] >> (j * 8)) & 0xff;
675 					if (desc == 0)
676 						continue;
677 					cai = cache_info_lookup(
678 					    intel_cpuid_cache_info, desc);
679 					if (cai != NULL) {
680 						ci->ci_cinfo[cai->cai_index] =
681 						    *cai;
682 					}
683 				}
684 			}
685 		}
686 	}
687 
688 	cpu_probe_k5(ci);
689 	cpu_probe_k678(ci);
690 	cpu_probe_cyrix(ci);
691 	cpu_probe_winchip(ci);
692 	cpu_probe_c3(ci);
693 	cpu_probe_geode(ci);
694 	cpu_probe_vortex86(ci);
695 
696 	x86_cpu_topology(ci);
697 
698 	if (cpu_vendor != CPUVENDOR_AMD && (ci->ci_feat_val[0] & CPUID_TM) &&
699 	    (rdmsr(MSR_MISC_ENABLE) & (1 << 3)) == 0) {
700 		/* Enable thermal monitor 1. */
701 		wrmsr(MSR_MISC_ENABLE, rdmsr(MSR_MISC_ENABLE) | (1<<3));
702 	}
703 
704 	if (ci == &cpu_info_primary) {
705 		/* If first. Boot Processor is the cpu_feature reference. */
706 		for (i = 0; i < __arraycount(cpu_feature); i++) {
707 			cpu_feature[i] = ci->ci_feat_val[i];
708 		}
709 #ifndef XEN
710 		/* Early patch of text segment. */
711 		x86_patch(true);
712 #endif
713 	} else {
714 		/*
715 		 * If not first. Warn about cpu_feature mismatch for
716 		 * secondary CPUs.
717 		 */
718 		for (i = 0; i < __arraycount(cpu_feature); i++) {
719 			if (cpu_feature[i] != ci->ci_feat_val[i])
720 				aprint_error_dev(ci->ci_dev,
721 				    "feature mismatch: cpu_feature[%d] is "
722 				    "%#x, but CPU reported %#x\n",
723 				    i, cpu_feature[i], ci->ci_feat_val[i]);
724 		}
725 	}
726 }
727 
728 void
729 cpu_identify(struct cpu_info *ci)
730 {
731 
732 	snprintf(cpu_model, sizeof(cpu_model), "%s %d86-class",
733 	    cpu_vendor_names[cpu_vendor], cpu_class + 3);
734 	if (cpu_brand_string[0] != '\0') {
735 		aprint_normal(": %s", cpu_brand_string);
736 	} else {
737 		aprint_normal(": %s", cpu_model);
738 		if (ci->ci_data.cpu_cc_freq != 0)
739 			aprint_normal(", %dMHz",
740 			    (int)(ci->ci_data.cpu_cc_freq / 1000000));
741 	}
742 	if (ci->ci_signature != 0)
743 		aprint_normal(", id 0x%x", ci->ci_signature);
744 	aprint_normal("\n");
745 
746 	if (cpu_brand_string[0] == '\0') {
747 		strlcpy(cpu_brand_string, cpu_model, sizeof(cpu_brand_string));
748 	}
749 	if (cpu_class == CPUCLASS_386) {
750 		panic("NetBSD requires an 80486DX or later processor");
751 	}
752 	if (cpu == CPU_486DLC) {
753 		aprint_error("WARNING: BUGGY CYRIX CACHE\n");
754 	}
755 
756 	if ((cpu_vendor == CPUVENDOR_AMD) /* check enablement of an */
757 	  && (device_unit(ci->ci_dev) == 0) /* AMD feature only once */
758 	  && ((cpu_feature[3] & CPUID_SVM) == CPUID_SVM)
759 #if defined(XEN) && !defined(DOM0OPS)
760 	  && (false)  /* on Xen rdmsr is for Dom0 only */
761 #endif
762 	  )
763 	{
764 		uint64_t val;
765 
766 		val = rdmsr(MSR_VMCR);
767 		if (((val & VMCR_SVMED) == VMCR_SVMED)
768 		  && ((val & VMCR_LOCK) == VMCR_LOCK))
769 		{
770 			aprint_normal_dev(ci->ci_dev,
771 				"SVM disabled by the BIOS\n");
772 		}
773 	}
774 
775 #ifdef i386 /* XXX for now */
776 	if (cpu_vendor == CPUVENDOR_TRANSMETA) {
777 		u_int descs[4];
778 		x86_cpuid(0x80860000, descs);
779 		if (descs[0] >= 0x80860007)
780 			tmx86_init_longrun();
781 	}
782 
783 	/* If we have FXSAVE/FXRESTOR, use them. */
784 	if (cpu_feature[0] & CPUID_FXSR) {
785 		i386_use_fxsave = 1;
786 		/*
787 		 * If we have SSE/SSE2, enable XMM exceptions, and
788 		 * notify userland.
789 		 */
790 		if (cpu_feature[0] & CPUID_SSE)
791 			i386_has_sse = 1;
792 		if (cpu_feature[0] & CPUID_SSE2)
793 			i386_has_sse2 = 1;
794 	} else
795 		i386_use_fxsave = 0;
796 #endif	/* i386 */
797 
798 #ifdef INTEL_ONDEMAND_CLOCKMOD
799 	if (cpuid_level >= 1) {
800 		clockmod_init();
801 	}
802 #endif
803 }
804