xref: /netbsd-src/sys/arch/x86/x86/identcpu.c (revision 4391d5e9d4f291db41e3b3ba26a01b5e51364aae)
1 /*	$NetBSD: identcpu.c,v 1.38 2013/11/15 08:47:55 msaitoh Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Frank van der Linden,  and by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: identcpu.c,v 1.38 2013/11/15 08:47:55 msaitoh Exp $");
34 
35 #include "opt_xen.h"
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/device.h>
40 
41 #include <uvm/uvm_extern.h>
42 
43 #include <machine/specialreg.h>
44 #include <machine/pio.h>
45 #include <machine/cpu.h>
46 
47 #include <x86/cputypes.h>
48 #include <x86/cacheinfo.h>
49 #include <x86/cpuvar.h>
50 #include <x86/cpu_msr.h>
51 
52 static const struct x86_cache_info intel_cpuid_cache_info[] = INTEL_CACHE_INFO;
53 
54 static const struct x86_cache_info amd_cpuid_l2cache_assoc_info[] =
55 	AMD_L2CACHE_INFO;
56 
57 static const struct x86_cache_info amd_cpuid_l3cache_assoc_info[] =
58 	AMD_L3CACHE_INFO;
59 
60 int cpu_vendor;
61 char cpu_brand_string[49];
62 
63 /*
64  * Info for CTL_HW
65  */
66 char	cpu_model[120];
67 
68 /*
69  * Note: these are just the ones that may not have a cpuid instruction.
70  * We deal with the rest in a different way.
71  */
72 const int i386_nocpuid_cpus[] = {
73 	CPUVENDOR_INTEL, CPUCLASS_386,	/* CPU_386SX */
74 	CPUVENDOR_INTEL, CPUCLASS_386,	/* CPU_386   */
75 	CPUVENDOR_INTEL, CPUCLASS_486,	/* CPU_486SX */
76 	CPUVENDOR_INTEL, CPUCLASS_486, 	/* CPU_486   */
77 	CPUVENDOR_CYRIX, CPUCLASS_486,	/* CPU_486DLC */
78 	CPUVENDOR_CYRIX, CPUCLASS_486,	/* CPU_6x86 */
79 	CPUVENDOR_NEXGEN, CPUCLASS_386,	/* CPU_NX586 */
80 };
81 
82 static const char cpu_vendor_names[][10] = {
83 	"Unknown", "Intel", "NS/Cyrix", "NexGen", "AMD", "IDT/VIA", "Transmeta",
84 	"Vortex86"
85 };
86 
87 static const struct x86_cache_info *
88 cache_info_lookup(const struct x86_cache_info *cai, uint8_t desc)
89 {
90 	int i;
91 
92 	for (i = 0; cai[i].cai_desc != 0; i++) {
93 		if (cai[i].cai_desc == desc)
94 			return (&cai[i]);
95 	}
96 
97 	return (NULL);
98 }
99 
100 
101 static void
102 cpu_probe_amd_cache(struct cpu_info *ci)
103 {
104 	const struct x86_cache_info *cp;
105 	struct x86_cache_info *cai;
106 	int family, model;
107 	u_int descs[4];
108 	u_int lfunc;
109 
110 	family = CPUID_TO_FAMILY(ci->ci_signature);
111 	model = CPUID_TO_MODEL(ci->ci_signature);
112 
113 	/*
114 	 * K5 model 0 has none of this info.
115 	 */
116 	if (family == 5 && model == 0)
117 		return;
118 
119 	/*
120 	 * Determine the largest extended function value.
121 	 */
122 	x86_cpuid(0x80000000, descs);
123 	lfunc = descs[0];
124 
125 	/*
126 	 * Determine L1 cache/TLB info.
127 	 */
128 	if (lfunc < 0x80000005) {
129 		/* No L1 cache info available. */
130 		return;
131 	}
132 
133 	x86_cpuid(0x80000005, descs);
134 
135 	/*
136 	 * K6-III and higher have large page TLBs.
137 	 */
138 	if ((family == 5 && model >= 9) || family >= 6) {
139 		cai = &ci->ci_cinfo[CAI_ITLB2];
140 		cai->cai_totalsize = AMD_L1_EAX_ITLB_ENTRIES(descs[0]);
141 		cai->cai_associativity = AMD_L1_EAX_ITLB_ASSOC(descs[0]);
142 		cai->cai_linesize = (4 * 1024 * 1024);
143 
144 		cai = &ci->ci_cinfo[CAI_DTLB2];
145 		cai->cai_totalsize = AMD_L1_EAX_DTLB_ENTRIES(descs[0]);
146 		cai->cai_associativity = AMD_L1_EAX_DTLB_ASSOC(descs[0]);
147 		cai->cai_linesize = (4 * 1024 * 1024);
148 	}
149 
150 	cai = &ci->ci_cinfo[CAI_ITLB];
151 	cai->cai_totalsize = AMD_L1_EBX_ITLB_ENTRIES(descs[1]);
152 	cai->cai_associativity = AMD_L1_EBX_ITLB_ASSOC(descs[1]);
153 	cai->cai_linesize = (4 * 1024);
154 
155 	cai = &ci->ci_cinfo[CAI_DTLB];
156 	cai->cai_totalsize = AMD_L1_EBX_DTLB_ENTRIES(descs[1]);
157 	cai->cai_associativity = AMD_L1_EBX_DTLB_ASSOC(descs[1]);
158 	cai->cai_linesize = (4 * 1024);
159 
160 	cai = &ci->ci_cinfo[CAI_DCACHE];
161 	cai->cai_totalsize = AMD_L1_ECX_DC_SIZE(descs[2]);
162 	cai->cai_associativity = AMD_L1_ECX_DC_ASSOC(descs[2]);
163 	cai->cai_linesize = AMD_L1_ECX_DC_LS(descs[2]);
164 
165 	cai = &ci->ci_cinfo[CAI_ICACHE];
166 	cai->cai_totalsize = AMD_L1_EDX_IC_SIZE(descs[3]);
167 	cai->cai_associativity = AMD_L1_EDX_IC_ASSOC(descs[3]);
168 	cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[3]);
169 
170 	/*
171 	 * Determine L2 cache/TLB info.
172 	 */
173 	if (lfunc < 0x80000006) {
174 		/* No L2 cache info available. */
175 		return;
176 	}
177 
178 	x86_cpuid(0x80000006, descs);
179 
180 	cai = &ci->ci_cinfo[CAI_L2CACHE];
181 	cai->cai_totalsize = AMD_L2_ECX_C_SIZE(descs[2]);
182 	cai->cai_associativity = AMD_L2_ECX_C_ASSOC(descs[2]);
183 	cai->cai_linesize = AMD_L2_ECX_C_LS(descs[2]);
184 
185 	cp = cache_info_lookup(amd_cpuid_l2cache_assoc_info,
186 	    cai->cai_associativity);
187 	if (cp != NULL)
188 		cai->cai_associativity = cp->cai_associativity;
189 	else
190 		cai->cai_associativity = 0;	/* XXX Unknown/reserved */
191 
192 	if (family < 0xf) {
193 		/* No L3 cache info available. */
194 		return;
195 	}
196 
197 	cai = &ci->ci_cinfo[CAI_L3CACHE];
198 	cai->cai_totalsize = AMD_L3_EDX_C_SIZE(descs[3]);
199 	cai->cai_associativity = AMD_L3_EDX_C_ASSOC(descs[3]);
200 	cai->cai_linesize = AMD_L3_EDX_C_LS(descs[3]);
201 
202 	cp = cache_info_lookup(amd_cpuid_l3cache_assoc_info,
203 	    cai->cai_associativity);
204 	if (cp != NULL)
205 		cai->cai_associativity = cp->cai_associativity;
206 	else
207 		cai->cai_associativity = 0;	/* XXX Unknown reserved */
208 
209 	if (lfunc < 0x80000019) {
210 		/* No 1GB Page TLB */
211 		return;
212 	}
213 
214 	x86_cpuid(0x80000019, descs);
215 
216 	cai = &ci->ci_cinfo[CAI_L1_1GBDTLB];
217 	cai->cai_totalsize = AMD_L1_1GB_EAX_DTLB_ENTRIES(descs[1]);
218 	cai->cai_associativity = AMD_L1_1GB_EAX_DTLB_ASSOC(descs[1]);
219 	cai->cai_linesize = (1 * 1024);
220 
221 	cai = &ci->ci_cinfo[CAI_L1_1GBITLB];
222 	cai->cai_totalsize = AMD_L1_1GB_EAX_IUTLB_ENTRIES(descs[0]);
223 	cai->cai_associativity = AMD_L1_1GB_EAX_IUTLB_ASSOC(descs[0]);
224 	cai->cai_linesize = (1 * 1024);
225 
226 	cai = &ci->ci_cinfo[CAI_L2_1GBDTLB];
227 	cai->cai_totalsize = AMD_L2_1GB_EBX_DUTLB_ENTRIES(descs[1]);
228 	cai->cai_associativity = AMD_L2_1GB_EBX_DUTLB_ASSOC(descs[1]);
229 	cai->cai_linesize = (1 * 1024);
230 
231 	cai = &ci->ci_cinfo[CAI_L2_1GBITLB];
232 	cai->cai_totalsize = AMD_L2_1GB_EBX_IUTLB_ENTRIES(descs[0]);
233 	cai->cai_associativity = AMD_L2_1GB_EBX_IUTLB_ASSOC(descs[0]);
234 	cai->cai_linesize = (1 * 1024);
235 }
236 
237 static void
238 cpu_probe_k5(struct cpu_info *ci)
239 {
240 	int flag;
241 
242 	if (cpu_vendor != CPUVENDOR_AMD ||
243 	    CPUID_TO_FAMILY(ci->ci_signature) != 5)
244 		return;
245 
246 	if (CPUID_TO_MODEL(ci->ci_signature) == 0) {
247 		/*
248 		 * According to the AMD Processor Recognition App Note,
249 		 * the AMD-K5 Model 0 uses the wrong bit to indicate
250 		 * support for global PTEs, instead using bit 9 (APIC)
251 		 * rather than bit 13 (i.e. "0x200" vs. 0x2000".  Oops!).
252 		 */
253 		flag = ci->ci_feat_val[0];
254 		if ((flag & CPUID_APIC) != 0)
255 			flag = (flag & ~CPUID_APIC) | CPUID_PGE;
256 		ci->ci_feat_val[0] = flag;
257 	}
258 
259 	cpu_probe_amd_cache(ci);
260 }
261 
262 static void
263 cpu_probe_k678(struct cpu_info *ci)
264 {
265 	uint32_t descs[4];
266 
267 	if (cpu_vendor != CPUVENDOR_AMD ||
268 	    CPUID_TO_FAMILY(ci->ci_signature) < 6)
269 		return;
270 
271 	/* Determine the extended feature flags. */
272 	x86_cpuid(0x80000000, descs);
273 	if (descs[0] >= 0x80000001) {
274 		x86_cpuid(0x80000001, descs);
275 		ci->ci_feat_val[3] = descs[2]; /* %ecx */
276 		ci->ci_feat_val[2] = descs[3]; /* %edx */
277 	}
278 
279 	cpu_probe_amd_cache(ci);
280 }
281 
282 static inline uint8_t
283 cyrix_read_reg(uint8_t reg)
284 {
285 
286 	outb(0x22, reg);
287 	return inb(0x23);
288 }
289 
290 static inline void
291 cyrix_write_reg(uint8_t reg, uint8_t data)
292 {
293 
294 	outb(0x22, reg);
295 	outb(0x23, data);
296 }
297 
298 static void
299 cpu_probe_cyrix_cmn(struct cpu_info *ci)
300 {
301 	/*
302 	 * i8254 latch check routine:
303 	 *     National Geode (formerly Cyrix MediaGX) has a serious bug in
304 	 *     its built-in i8254-compatible clock module (cs5510 cs5520).
305 	 *     Set the variable 'clock_broken_latch' to indicate it.
306 	 *
307 	 * This bug is not present in the cs5530, and the flag
308 	 * is disabled again in sys/arch/i386/pci/pcib.c if this later
309 	 * model device is detected. Ideally, this work-around should not
310 	 * even be in here, it should be in there. XXX
311 	 */
312 	uint8_t c3;
313 #ifndef XEN
314 	extern int clock_broken_latch;
315 
316 	switch (ci->ci_signature) {
317 	case 0x440:     /* Cyrix MediaGX */
318 	case 0x540:     /* GXm */
319 		clock_broken_latch = 1;
320 		break;
321 	}
322 #endif
323 
324 	/* set up various cyrix registers */
325 	/*
326 	 * Enable suspend on halt (powersave mode).
327 	 * When powersave mode is enabled, the TSC stops counting
328 	 * while the CPU is halted in idle() waiting for an interrupt.
329 	 * This means we can't use the TSC for interval time in
330 	 * microtime(9), and thus it is disabled here.
331 	 *
332 	 * It still makes a perfectly good cycle counter
333 	 * for program profiling, so long as you remember you're
334 	 * counting cycles, and not time. Further, if you don't
335 	 * mind not using powersave mode, the TSC works just fine,
336 	 * so this should really be optional. XXX
337 	 */
338 	cyrix_write_reg(0xc2, cyrix_read_reg(0xc2) | 0x08);
339 
340 	/*
341 	 * Do not disable the TSC on the Geode GX, it's reported to
342 	 * work fine.
343 	 */
344 	if (ci->ci_signature != 0x552)
345 		ci->ci_feat_val[0] &= ~CPUID_TSC;
346 
347 	/* enable access to ccr4/ccr5 */
348 	c3 = cyrix_read_reg(0xC3);
349 	cyrix_write_reg(0xC3, c3 | 0x10);
350 	/* cyrix's workaround  for the "coma bug" */
351 	cyrix_write_reg(0x31, cyrix_read_reg(0x31) | 0xf8);
352 	cyrix_write_reg(0x32, cyrix_read_reg(0x32) | 0x7f);
353 	cyrix_write_reg(0x33, cyrix_read_reg(0x33) & ~0xff);
354 	cyrix_write_reg(0x3c, cyrix_read_reg(0x3c) | 0x87);
355 	/* disable access to ccr4/ccr5 */
356 	cyrix_write_reg(0xC3, c3);
357 }
358 
359 static void
360 cpu_probe_cyrix(struct cpu_info *ci)
361 {
362 
363 	if (cpu_vendor != CPUVENDOR_CYRIX ||
364 	    CPUID_TO_FAMILY(ci->ci_signature) < 4 ||
365 	    CPUID_TO_FAMILY(ci->ci_signature) > 6)
366 		return;
367 
368 	cpu_probe_cyrix_cmn(ci);
369 }
370 
371 static void
372 cpu_probe_winchip(struct cpu_info *ci)
373 {
374 
375 	if (cpu_vendor != CPUVENDOR_IDT)
376 	    	return;
377 
378 	switch (CPUID_TO_FAMILY(ci->ci_signature)) {
379 	case 5:
380 		/* WinChip C6 */
381 		if (CPUID_TO_MODEL(ci->ci_signature) == 4)
382 			ci->ci_feat_val[0] &= ~CPUID_TSC;
383 		break;
384 	case 6:
385 		/*
386 		 * VIA Eden ESP
387 		 *
388 		 * Quoting from page 3-4 of: "VIA Eden ESP Processor Datasheet"
389 		 * http://www.via.com.tw/download/mainboards/6/14/Eden20v115.pdf
390 		 *
391 		 * 1. The CMPXCHG8B instruction is provided and always enabled,
392 		 *    however, it appears disabled in the corresponding CPUID
393 		 *    function bit 0 to avoid a bug in an early version of
394 		 *    Windows NT. However, this default can be changed via a
395 		 *    bit in the FCR MSR.
396 		 */
397 		ci->ci_feat_val[0] |= CPUID_CX8;
398 		wrmsr(MSR_VIA_FCR, rdmsr(MSR_VIA_FCR) | 0x00000001);
399 		break;
400 	}
401 }
402 
403 static void
404 cpu_probe_c3(struct cpu_info *ci)
405 {
406 	u_int family, model, stepping, descs[4], lfunc, msr;
407 	struct x86_cache_info *cai;
408 
409 	if (cpu_vendor != CPUVENDOR_IDT ||
410 	    CPUID_TO_FAMILY(ci->ci_signature) < 6)
411 	    	return;
412 
413 	family = CPUID_TO_FAMILY(ci->ci_signature);
414 	model = CPUID_TO_MODEL(ci->ci_signature);
415 	stepping = CPUID_TO_STEPPING(ci->ci_signature);
416 
417 	/* Determine the largest extended function value. */
418 	x86_cpuid(0x80000000, descs);
419 	lfunc = descs[0];
420 
421 	/* Determine the extended feature flags. */
422 	if (lfunc >= 0x80000001) {
423 		x86_cpuid(0x80000001, descs);
424 		ci->ci_feat_val[2] = descs[3];
425 	}
426 
427 	if (family > 6 || model > 0x9 || (model == 0x9 && stepping >= 3)) {
428 		/* Nehemiah or Esther */
429 		x86_cpuid(0xc0000000, descs);
430 		lfunc = descs[0];
431 		if (lfunc >= 0xc0000001) {	/* has ACE, RNG */
432 		    int rng_enable = 0, ace_enable = 0;
433 		    x86_cpuid(0xc0000001, descs);
434 		    lfunc = descs[3];
435 		    ci->ci_feat_val[4] = lfunc;
436 		    /* Check for and enable RNG */
437 		    if (lfunc & CPUID_VIA_HAS_RNG) {
438 		    	if (!(lfunc & CPUID_VIA_DO_RNG)) {
439 			    rng_enable++;
440 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_RNG;
441 			}
442 		    }
443 		    /* Check for and enable ACE (AES-CBC) */
444 		    if (lfunc & CPUID_VIA_HAS_ACE) {
445 			if (!(lfunc & CPUID_VIA_DO_ACE)) {
446 			    ace_enable++;
447 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE;
448 			}
449 		    }
450 		    /* Check for and enable SHA */
451 		    if (lfunc & CPUID_VIA_HAS_PHE) {
452 			if (!(lfunc & CPUID_VIA_DO_PHE)) {
453 			    ace_enable++;
454 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_PHE;
455 			}
456 		    }
457 		    /* Check for and enable ACE2 (AES-CTR) */
458 		    if (lfunc & CPUID_VIA_HAS_ACE2) {
459 			if (!(lfunc & CPUID_VIA_DO_ACE2)) {
460 			    ace_enable++;
461 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_ACE2;
462 			}
463 		    }
464 		    /* Check for and enable PMM (modmult engine) */
465 		    if (lfunc & CPUID_VIA_HAS_PMM) {
466 			if (!(lfunc & CPUID_VIA_DO_PMM)) {
467 			    ace_enable++;
468 			    ci->ci_feat_val[4] |= CPUID_VIA_DO_PMM;
469 			}
470 		    }
471 
472 		    /* Actually do the enables. */
473 		    if (rng_enable) {
474 			msr = rdmsr(MSR_VIA_RNG);
475 			wrmsr(MSR_VIA_RNG, msr | MSR_VIA_RNG_ENABLE);
476 		    }
477 		    if (ace_enable) {
478 			msr = rdmsr(MSR_VIA_ACE);
479 			wrmsr(MSR_VIA_ACE, msr | MSR_VIA_ACE_ENABLE);
480 		    }
481 
482 		}
483 	}
484 
485 	/*
486 	 * Determine L1 cache/TLB info.
487 	 */
488 	if (lfunc < 0x80000005) {
489 		/* No L1 cache info available. */
490 		return;
491 	}
492 
493 	x86_cpuid(0x80000005, descs);
494 
495 	cai = &ci->ci_cinfo[CAI_ITLB];
496 	cai->cai_totalsize = VIA_L1_EBX_ITLB_ENTRIES(descs[1]);
497 	cai->cai_associativity = VIA_L1_EBX_ITLB_ASSOC(descs[1]);
498 	cai->cai_linesize = (4 * 1024);
499 
500 	cai = &ci->ci_cinfo[CAI_DTLB];
501 	cai->cai_totalsize = VIA_L1_EBX_DTLB_ENTRIES(descs[1]);
502 	cai->cai_associativity = VIA_L1_EBX_DTLB_ASSOC(descs[1]);
503 	cai->cai_linesize = (4 * 1024);
504 
505 	cai = &ci->ci_cinfo[CAI_DCACHE];
506 	cai->cai_totalsize = VIA_L1_ECX_DC_SIZE(descs[2]);
507 	cai->cai_associativity = VIA_L1_ECX_DC_ASSOC(descs[2]);
508 	cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[2]);
509 	if (family == 6 && model == 9 && stepping == 8) {
510 		/* Erratum: stepping 8 reports 4 when it should be 2 */
511 		cai->cai_associativity = 2;
512 	}
513 
514 	cai = &ci->ci_cinfo[CAI_ICACHE];
515 	cai->cai_totalsize = VIA_L1_EDX_IC_SIZE(descs[3]);
516 	cai->cai_associativity = VIA_L1_EDX_IC_ASSOC(descs[3]);
517 	cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[3]);
518 	if (family == 6 && model == 9 && stepping == 8) {
519 		/* Erratum: stepping 8 reports 4 when it should be 2 */
520 		cai->cai_associativity = 2;
521 	}
522 
523 	/*
524 	 * Determine L2 cache/TLB info.
525 	 */
526 	if (lfunc < 0x80000006) {
527 		/* No L2 cache info available. */
528 		return;
529 	}
530 
531 	x86_cpuid(0x80000006, descs);
532 
533 	cai = &ci->ci_cinfo[CAI_L2CACHE];
534 	if (family > 6 || model >= 9) {
535 		cai->cai_totalsize = VIA_L2N_ECX_C_SIZE(descs[2]);
536 		cai->cai_associativity = VIA_L2N_ECX_C_ASSOC(descs[2]);
537 		cai->cai_linesize = VIA_L2N_ECX_C_LS(descs[2]);
538 	} else {
539 		cai->cai_totalsize = VIA_L2_ECX_C_SIZE(descs[2]);
540 		cai->cai_associativity = VIA_L2_ECX_C_ASSOC(descs[2]);
541 		cai->cai_linesize = VIA_L2_ECX_C_LS(descs[2]);
542 	}
543 }
544 
545 static void
546 cpu_probe_geode(struct cpu_info *ci)
547 {
548 
549 	if (memcmp("Geode by NSC", ci->ci_vendor, 12) != 0 ||
550 	    CPUID_TO_FAMILY(ci->ci_signature) != 5)
551 	    	return;
552 
553 	cpu_probe_cyrix_cmn(ci);
554 	cpu_probe_amd_cache(ci);
555 }
556 
557 static void
558 cpu_probe_vortex86(struct cpu_info *ci)
559 {
560 #define PCI_MODE1_ADDRESS_REG	0x0cf8
561 #define PCI_MODE1_DATA_REG	0x0cfc
562 #define PCI_MODE1_ENABLE	0x80000000UL
563 
564 	uint32_t reg;
565 
566 	if (cpu_vendor != CPUVENDOR_VORTEX86)
567 		return;
568 	/*
569 	 * CPU model available from "Customer ID register" in
570 	 * North Bridge Function 0 PCI space
571 	 * we can't use pci_conf_read() because the PCI subsystem is not
572 	 * not initialised early enough
573 	 */
574 
575 	outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE | 0x90);
576 	reg = inl(PCI_MODE1_DATA_REG);
577 
578 	switch(reg) {
579 	case 0x31504d44:
580 		strcpy(cpu_brand_string, "Vortex86SX");
581 		break;
582 	case 0x32504d44:
583 		strcpy(cpu_brand_string, "Vortex86DX");
584 		break;
585 	case 0x33504d44:
586 		strcpy(cpu_brand_string, "Vortex86MX");
587 		break;
588 	default:
589 		strcpy(cpu_brand_string, "Unknown Vortex86");
590 		break;
591 	}
592 
593 #undef PCI_MODE1_ENABLE
594 #undef PCI_MODE1_ADDRESS_REG
595 #undef PCI_MODE1_DATA_REG
596 }
597 
598 void
599 cpu_probe(struct cpu_info *ci)
600 {
601 	const struct x86_cache_info *cai;
602 	u_int descs[4];
603 	int iterations, i, j;
604 	uint8_t desc;
605 	uint32_t miscbytes;
606 	uint32_t brand[12];
607 
608 	cpu_vendor = i386_nocpuid_cpus[cputype << 1];
609 	cpu_class = i386_nocpuid_cpus[(cputype << 1) + 1];
610 
611 	if (cpuid_level < 0)
612 		return;
613 
614 	for (i = 0; i < __arraycount(ci->ci_feat_val); i++) {
615 		ci->ci_feat_val[i] = 0;
616 	}
617 
618 	x86_cpuid(0, descs);
619 	cpuid_level = descs[0];
620 	ci->ci_vendor[0] = descs[1];
621 	ci->ci_vendor[2] = descs[2];
622 	ci->ci_vendor[1] = descs[3];
623 	ci->ci_vendor[3] = 0;
624 
625 	if (memcmp(ci->ci_vendor, "GenuineIntel", 12) == 0)
626 		cpu_vendor = CPUVENDOR_INTEL;
627 	else if (memcmp(ci->ci_vendor,  "AuthenticAMD", 12) == 0)
628 		cpu_vendor = CPUVENDOR_AMD;
629 	else if (memcmp(ci->ci_vendor,  "CyrixInstead", 12) == 0)
630 		cpu_vendor = CPUVENDOR_CYRIX;
631 	else if (memcmp(ci->ci_vendor,  "Geode by NSC", 12) == 0)
632 		cpu_vendor = CPUVENDOR_CYRIX;
633 	else if (memcmp(ci->ci_vendor, "CentaurHauls", 12) == 0)
634 		cpu_vendor = CPUVENDOR_IDT;
635 	else if (memcmp(ci->ci_vendor, "GenuineTMx86", 12) == 0)
636 		cpu_vendor = CPUVENDOR_TRANSMETA;
637 	else if (memcmp(ci->ci_vendor, "Vortex86 SoC", 12) == 0)
638 		cpu_vendor = CPUVENDOR_VORTEX86;
639 	else
640 		cpu_vendor = CPUVENDOR_UNKNOWN;
641 
642 	x86_cpuid(0x80000000, brand);
643 	if (brand[0] >= 0x80000004) {
644 		x86_cpuid(0x80000002, brand);
645 		x86_cpuid(0x80000003, brand + 4);
646 		x86_cpuid(0x80000004, brand + 8);
647 		for (i = 0; i < 48; i++) {
648 			if (((char *) brand)[i] != ' ')
649 				break;
650 		}
651 		memcpy(cpu_brand_string, ((char *) brand) + i, 48 - i);
652 	}
653 
654 	if (cpuid_level >= 1) {
655 		x86_cpuid(1, descs);
656 		ci->ci_signature = descs[0];
657 		miscbytes = descs[1];
658 		ci->ci_feat_val[1] = descs[2];
659 		ci->ci_feat_val[0] = descs[3];
660 
661 		/* Determine family + class. */
662 		cpu_class = CPUID_TO_FAMILY(ci->ci_signature)
663 		    + (CPUCLASS_386 - 3);
664 		if (cpu_class > CPUCLASS_686)
665 			cpu_class = CPUCLASS_686;
666 
667 		/* CLFLUSH line size is next 8 bits */
668 		if (ci->ci_feat_val[0] & CPUID_CFLUSH)
669 			ci->ci_cflush_lsize = ((miscbytes >> 8) & 0xff) << 3;
670 		ci->ci_initapicid = (miscbytes >> 24) & 0xff;
671 	}
672 
673 	if (cpuid_level >= 2) {
674 		/* Parse the cache info from `cpuid leaf 2', if we have it. */
675 		x86_cpuid(2, descs);
676 		iterations = descs[0] & 0xff;
677 		while (iterations-- > 0) {
678 			for (i = 0; i < 4; i++) {
679 				if (descs[i] & 0x80000000)
680 					continue;
681 				for (j = 0; j < 4; j++) {
682 					if (i == 0 && j == 0)
683 						continue;
684 					desc = (descs[i] >> (j * 8)) & 0xff;
685 					if (desc == 0)
686 						continue;
687 					cai = cache_info_lookup(
688 					    intel_cpuid_cache_info, desc);
689 					if (cai != NULL) {
690 						ci->ci_cinfo[cai->cai_index] =
691 						    *cai;
692 					}
693 				}
694 			}
695 		}
696 	}
697 
698 	if (cpuid_level >= 4) {
699 		int type, level;
700 		int ways, partitions, linesize, sets;
701 		int caitype = -1;
702 		int totalsize;
703 
704 		/* Parse the cache info from `cpuid leaf 4', if we have it. */
705 		for (i = 0; ; i++) {
706 			x86_cpuid2(4, i, descs);
707 			type = __SHIFTOUT(descs[0], CPUID_DCP_CACHETYPE);
708 			if (type == CPUID_DCP_CACHETYPE_N)
709 				break;
710 			level = __SHIFTOUT(descs[0], CPUID_DCP_CACHELEVEL);
711 			switch (level) {
712 			case 1:
713 				if (type == CPUID_DCP_CACHETYPE_I)
714 					caitype = CAI_ICACHE;
715 				else if (type == CPUID_DCP_CACHETYPE_D)
716 					caitype = CAI_DCACHE;
717 				else
718 					caitype = -1;
719 				break;
720 			case 2:
721 				if (type == CPUID_DCP_CACHETYPE_U)
722 					caitype = CAI_L2CACHE;
723 				else
724 					caitype = -1;
725 				break;
726 			case 3:
727 				if (type == CPUID_DCP_CACHETYPE_U)
728 					caitype = CAI_L3CACHE;
729 				else
730 					caitype = -1;
731 				break;
732 			default:
733 				caitype = -1;
734 				break;
735 			}
736 			if (caitype == -1)
737 				continue;
738 
739 			ways = __SHIFTOUT(descs[1], CPUID_DCP_WAYS) + 1;
740 			partitions =__SHIFTOUT(descs[1], CPUID_DCP_PARTITIONS)
741 			    + 1;
742 			linesize = __SHIFTOUT(descs[1], CPUID_DCP_LINESIZE)
743 			    + 1;
744 			sets = descs[2] + 1;
745 			totalsize = ways * partitions * linesize * sets;
746 			ci->ci_cinfo[caitype].cai_totalsize = totalsize;
747 			ci->ci_cinfo[caitype].cai_associativity = ways;
748 			ci->ci_cinfo[caitype].cai_linesize = linesize;
749 		}
750 	}
751 
752 	cpu_probe_k5(ci);
753 	cpu_probe_k678(ci);
754 	cpu_probe_cyrix(ci);
755 	cpu_probe_winchip(ci);
756 	cpu_probe_c3(ci);
757 	cpu_probe_geode(ci);
758 	cpu_probe_vortex86(ci);
759 
760 	x86_cpu_topology(ci);
761 
762 	if (cpu_vendor != CPUVENDOR_AMD && (ci->ci_feat_val[0] & CPUID_TM) &&
763 	    (rdmsr(MSR_MISC_ENABLE) & (1 << 3)) == 0) {
764 		/* Enable thermal monitor 1. */
765 		wrmsr(MSR_MISC_ENABLE, rdmsr(MSR_MISC_ENABLE) | (1<<3));
766 	}
767 
768 	ci->ci_feat_val[0] &= ~CPUID_FEAT_BLACKLIST;
769 	if (ci == &cpu_info_primary) {
770 		/* If first. Boot Processor is the cpu_feature reference. */
771 		for (i = 0; i < __arraycount(cpu_feature); i++) {
772 			cpu_feature[i] = ci->ci_feat_val[i];
773 		}
774 #ifndef XEN
775 		/* Early patch of text segment. */
776 		x86_patch(true);
777 #endif
778 	} else {
779 		/*
780 		 * If not first. Warn about cpu_feature mismatch for
781 		 * secondary CPUs.
782 		 */
783 		for (i = 0; i < __arraycount(cpu_feature); i++) {
784 			if (cpu_feature[i] != ci->ci_feat_val[i])
785 				aprint_error_dev(ci->ci_dev,
786 				    "feature mismatch: cpu_feature[%d] is "
787 				    "%#x, but CPU reported %#x\n",
788 				    i, cpu_feature[i], ci->ci_feat_val[i]);
789 		}
790 	}
791 }
792 
793 void
794 cpu_identify(struct cpu_info *ci)
795 {
796 
797 	snprintf(cpu_model, sizeof(cpu_model), "%s %d86-class",
798 	    cpu_vendor_names[cpu_vendor], cpu_class + 3);
799 	if (cpu_brand_string[0] != '\0') {
800 		aprint_normal(": %s", cpu_brand_string);
801 	} else {
802 		aprint_normal(": %s", cpu_model);
803 		if (ci->ci_data.cpu_cc_freq != 0)
804 			aprint_normal(", %dMHz",
805 			    (int)(ci->ci_data.cpu_cc_freq / 1000000));
806 	}
807 	if (ci->ci_signature != 0)
808 		aprint_normal(", id 0x%x", ci->ci_signature);
809 	aprint_normal("\n");
810 
811 	if (cpu_brand_string[0] == '\0') {
812 		strlcpy(cpu_brand_string, cpu_model, sizeof(cpu_brand_string));
813 	}
814 	if (cpu_class == CPUCLASS_386) {
815 		panic("NetBSD requires an 80486DX or later processor");
816 	}
817 	if (cputype == CPU_486DLC) {
818 		aprint_error("WARNING: BUGGY CYRIX CACHE\n");
819 	}
820 
821 	if ((cpu_vendor == CPUVENDOR_AMD) /* check enablement of an */
822 	  && (device_unit(ci->ci_dev) == 0) /* AMD feature only once */
823 	  && ((cpu_feature[3] & CPUID_SVM) == CPUID_SVM)
824 #if defined(XEN) && !defined(DOM0OPS)
825 	  && (false)  /* on Xen rdmsr is for Dom0 only */
826 #endif
827 	  )
828 	{
829 		uint64_t val;
830 
831 		val = rdmsr(MSR_VMCR);
832 		if (((val & VMCR_SVMED) == VMCR_SVMED)
833 		  && ((val & VMCR_LOCK) == VMCR_LOCK))
834 		{
835 			aprint_normal_dev(ci->ci_dev,
836 				"SVM disabled by the BIOS\n");
837 		}
838 	}
839 
840 #ifdef i386 /* XXX for now */
841 	if (cpu_vendor == CPUVENDOR_TRANSMETA) {
842 		u_int descs[4];
843 		x86_cpuid(0x80860000, descs);
844 		if (descs[0] >= 0x80860007)
845 			tmx86_init_longrun();
846 	}
847 
848 	/* If we have FXSAVE/FXRESTOR, use them. */
849 	if (cpu_feature[0] & CPUID_FXSR) {
850 		i386_use_fxsave = 1;
851 		/*
852 		 * If we have SSE/SSE2, enable XMM exceptions, and
853 		 * notify userland.
854 		 */
855 		if (cpu_feature[0] & CPUID_SSE)
856 			i386_has_sse = 1;
857 		if (cpu_feature[0] & CPUID_SSE2)
858 			i386_has_sse2 = 1;
859 	} else
860 		i386_use_fxsave = 0;
861 #endif	/* i386 */
862 }
863