xref: /netbsd-src/sys/arch/x86/x86/identcpu.c (revision 404fbe5fb94ca1e054339640cabb2801ce52dd30)
1 /*	$NetBSD: identcpu.c,v 1.13 2008/12/19 15:11:55 cegger Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Frank van der Linden,  and by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*-
33  * Copyright (c)2008 YAMAMOTO Takashi,
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  *
45  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55  * SUCH DAMAGE.
56  */
57 
58 #include <sys/cdefs.h>
59 __KERNEL_RCSID(0, "$NetBSD: identcpu.c,v 1.13 2008/12/19 15:11:55 cegger Exp $");
60 
61 #include "opt_enhanced_speedstep.h"
62 #include "opt_intel_odcm.h"
63 #include "opt_intel_coretemp.h"
64 #include "opt_powernow_k8.h"
65 #include "opt_xen.h"
66 #ifdef i386	/* XXX */
67 #include "opt_powernow_k7.h"
68 #endif
69 
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/bitops.h>
73 
74 #include <uvm/uvm_extern.h>
75 
76 #include <machine/specialreg.h>
77 #include <machine/pio.h>
78 #include <machine/cpu.h>
79 
80 #include <x86/cputypes.h>
81 #include <x86/cacheinfo.h>
82 #include <x86/cpuvar.h>
83 #include <x86/cpu_msr.h>
84 #include <x86/powernow.h>
85 
86 static const struct x86_cache_info intel_cpuid_cache_info[] = INTEL_CACHE_INFO;
87 
88 static const struct x86_cache_info amd_cpuid_l2cache_assoc_info[] =
89 	AMD_L2CACHE_INFO;
90 
91 static const struct x86_cache_info amd_cpuid_l3cache_assoc_info[] =
92 	AMD_L3CACHE_INFO;
93 
94 int cpu_vendor;
95 char cpu_brand_string[49];
96 
97 /*
98  * Info for CTL_HW
99  */
100 char	cpu_model[120];
101 
102 /*
103  * Note: these are just the ones that may not have a cpuid instruction.
104  * We deal with the rest in a different way.
105  */
106 const int i386_nocpuid_cpus[] = {
107 	CPUVENDOR_INTEL, CPUCLASS_386,	/* CPU_386SX */
108 	CPUVENDOR_INTEL, CPUCLASS_386,	/* CPU_386   */
109 	CPUVENDOR_INTEL, CPUCLASS_486,	/* CPU_486SX */
110 	CPUVENDOR_INTEL, CPUCLASS_486, 	/* CPU_486   */
111 	CPUVENDOR_CYRIX, CPUCLASS_486,	/* CPU_486DLC */
112 	CPUVENDOR_CYRIX, CPUCLASS_486,	/* CPU_6x86 */
113 	CPUVENDOR_NEXGEN, CPUCLASS_386,	/* CPU_NX586 */
114 };
115 
116 static const char cpu_vendor_names[][10] = {
117 	"Unknown", "Intel", "NS/Cyrix", "NexGen", "AMD", "IDT/VIA", "Transmeta"
118 };
119 
120 static const struct x86_cache_info *
121 cache_info_lookup(const struct x86_cache_info *cai, uint8_t desc)
122 {
123 	int i;
124 
125 	for (i = 0; cai[i].cai_desc != 0; i++) {
126 		if (cai[i].cai_desc == desc)
127 			return (&cai[i]);
128 	}
129 
130 	return (NULL);
131 }
132 
133 static void
134 cpu_probe_p6(struct cpu_info *ci)
135 {
136 	u_int lp_max = 1;	/* logical processors per package */
137 	u_int smt_max;		/* smt per core */
138 	u_int core_max = 1;	/* core per package */
139 	int smt_bits, core_bits;
140 	uint32_t descs[4];
141 
142 	if (cpu_vendor != CPUVENDOR_INTEL ||
143 	    CPUID2FAMILY(ci->ci_signature) < 6)
144 		return;
145 
146 	/* Determine the extended feature flags. */
147 	x86_cpuid(0x80000000, descs);
148 	if (descs[0] >= 0x80000001) {
149 		x86_cpuid(0x80000001, descs);
150 		ci->ci_feature3_flags |= descs[3]; /* %edx */
151 	}
152 
153 	/* Determine topology. 253668.pdf 7.10.2. */
154 	ci->ci_packageid = ci->ci_initapicid;
155 	ci->ci_coreid = 0;
156 	ci->ci_smtid = 0;
157 	if ((ci->ci_feature_flags & CPUID_HTT) != 0) {
158 		x86_cpuid(1, descs);
159 		lp_max = (descs[1] >> 16) & 0xff;
160 	}
161 	x86_cpuid(0, descs);
162 	if (descs[0] >= 4) {
163 		x86_cpuid2(4, 0, descs);
164 		core_max = (descs[0] >> 26) + 1;
165 	}
166 	KASSERT(lp_max >= core_max);
167 	smt_max = lp_max / core_max;
168 	smt_bits = ilog2(smt_max - 1) + 1;
169 	core_bits = ilog2(core_max - 1) + 1;
170 	if (smt_bits + core_bits) {
171 		ci->ci_packageid = ci->ci_initapicid >> (smt_bits + core_bits);
172 	}
173 	if (core_bits) {
174 		u_int core_mask = __BITS(smt_bits, smt_bits + core_bits - 1);
175 		ci->ci_coreid = __SHIFTOUT(ci->ci_initapicid, core_mask);
176 	}
177 	if (smt_bits) {
178 		u_int smt_mask = __BITS(0, smt_bits - 1);
179 		ci->ci_smtid = __SHIFTOUT(ci->ci_initapicid, smt_mask);
180 	}
181 }
182 
183 static void
184 cpu_probe_amd_cache(struct cpu_info *ci)
185 {
186 	const struct x86_cache_info *cp;
187 	struct x86_cache_info *cai;
188 	int family, model;
189 	u_int descs[4];
190 	u_int lfunc;
191 
192 	family = CPUID2FAMILY(ci->ci_signature);
193 	model = CPUID2MODEL(ci->ci_signature);
194 
195 	/*
196 	 * K5 model 0 has none of this info.
197 	 */
198 	if (family == 5 && model == 0)
199 		return;
200 
201 	/*
202 	 * Get extended values for K8 and up.
203 	 */
204 	if (family == 0xf) {
205 		family += CPUID2EXTFAMILY(ci->ci_signature);
206 		model += CPUID2EXTMODEL(ci->ci_signature);
207 	}
208 
209 	/*
210 	 * Determine the largest extended function value.
211 	 */
212 	x86_cpuid(0x80000000, descs);
213 	lfunc = descs[0];
214 
215 	/*
216 	 * Determine L1 cache/TLB info.
217 	 */
218 	if (lfunc < 0x80000005) {
219 		/* No L1 cache info available. */
220 		return;
221 	}
222 
223 	x86_cpuid(0x80000005, descs);
224 
225 	/*
226 	 * K6-III and higher have large page TLBs.
227 	 */
228 	if ((family == 5 && model >= 9) || family >= 6) {
229 		cai = &ci->ci_cinfo[CAI_ITLB2];
230 		cai->cai_totalsize = AMD_L1_EAX_ITLB_ENTRIES(descs[0]);
231 		cai->cai_associativity = AMD_L1_EAX_ITLB_ASSOC(descs[0]);
232 		cai->cai_linesize = (4 * 1024 * 1024);
233 
234 		cai = &ci->ci_cinfo[CAI_DTLB2];
235 		cai->cai_totalsize = AMD_L1_EAX_DTLB_ENTRIES(descs[0]);
236 		cai->cai_associativity = AMD_L1_EAX_DTLB_ASSOC(descs[0]);
237 		cai->cai_linesize = (4 * 1024 * 1024);
238 	}
239 
240 	cai = &ci->ci_cinfo[CAI_ITLB];
241 	cai->cai_totalsize = AMD_L1_EBX_ITLB_ENTRIES(descs[1]);
242 	cai->cai_associativity = AMD_L1_EBX_ITLB_ASSOC(descs[1]);
243 	cai->cai_linesize = (4 * 1024);
244 
245 	cai = &ci->ci_cinfo[CAI_DTLB];
246 	cai->cai_totalsize = AMD_L1_EBX_DTLB_ENTRIES(descs[1]);
247 	cai->cai_associativity = AMD_L1_EBX_DTLB_ASSOC(descs[1]);
248 	cai->cai_linesize = (4 * 1024);
249 
250 	cai = &ci->ci_cinfo[CAI_DCACHE];
251 	cai->cai_totalsize = AMD_L1_ECX_DC_SIZE(descs[2]);
252 	cai->cai_associativity = AMD_L1_ECX_DC_ASSOC(descs[2]);
253 	cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[2]);
254 
255 	cai = &ci->ci_cinfo[CAI_ICACHE];
256 	cai->cai_totalsize = AMD_L1_EDX_IC_SIZE(descs[3]);
257 	cai->cai_associativity = AMD_L1_EDX_IC_ASSOC(descs[3]);
258 	cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[3]);
259 
260 	/*
261 	 * Determine L2 cache/TLB info.
262 	 */
263 	if (lfunc < 0x80000006) {
264 		/* No L2 cache info available. */
265 		return;
266 	}
267 
268 	x86_cpuid(0x80000006, descs);
269 
270 	cai = &ci->ci_cinfo[CAI_L2CACHE];
271 	cai->cai_totalsize = AMD_L2_ECX_C_SIZE(descs[2]);
272 	cai->cai_associativity = AMD_L2_ECX_C_ASSOC(descs[2]);
273 	cai->cai_linesize = AMD_L2_ECX_C_LS(descs[2]);
274 
275 	cp = cache_info_lookup(amd_cpuid_l2cache_assoc_info,
276 	    cai->cai_associativity);
277 	if (cp != NULL)
278 		cai->cai_associativity = cp->cai_associativity;
279 	else
280 		cai->cai_associativity = 0;	/* XXX Unknown/reserved */
281 
282 	if (family < 0xf) {
283 		/* No L3 cache info available. */
284 		return;
285 	}
286 
287 	cai = &ci->ci_cinfo[CAI_L3CACHE];
288 	cai->cai_totalsize = AMD_L3_EDX_C_SIZE(descs[3]);
289 	cai->cai_associativity = AMD_L3_EDX_C_ASSOC(descs[3]);
290 	cai->cai_linesize = AMD_L3_EDX_C_LS(descs[3]);
291 
292 	cp = cache_info_lookup(amd_cpuid_l3cache_assoc_info,
293 	    cai->cai_associativity);
294 	if (cp != NULL)
295 		cai->cai_associativity = cp->cai_associativity;
296 	else
297 		cai->cai_associativity = 0;	/* XXX Unknown reserved */
298 
299 	if (lfunc < 0x80000019) {
300 		/* No 1GB Page TLB */
301 		return;
302 	}
303 
304 	x86_cpuid(0x80000019, descs);
305 
306 	cai = &ci->ci_cinfo[CAI_L1_1GBDTLB];
307 	cai->cai_totalsize = AMD_L1_1GB_EAX_DTLB_ENTRIES(descs[1]);
308 	cai->cai_associativity = AMD_L1_1GB_EAX_DTLB_ASSOC(descs[1]);
309 	cai->cai_linesize = (1 * 1024);
310 
311 	cai = &ci->ci_cinfo[CAI_L1_1GBITLB];
312 	cai->cai_totalsize = AMD_L1_1GB_EAX_IUTLB_ENTRIES(descs[0]);
313 	cai->cai_associativity = AMD_L1_1GB_EAX_IUTLB_ASSOC(descs[0]);
314 	cai->cai_linesize = (1 * 1024);
315 
316 	cai = &ci->ci_cinfo[CAI_L2_1GBDTLB];
317 	cai->cai_totalsize = AMD_L2_1GB_EBX_DUTLB_ENTRIES(descs[1]);
318 	cai->cai_associativity = AMD_L2_1GB_EBX_DUTLB_ASSOC(descs[1]);
319 	cai->cai_linesize = (1 * 1024);
320 
321 	cai = &ci->ci_cinfo[CAI_L2_1GBITLB];
322 	cai->cai_totalsize = AMD_L2_1GB_EBX_IUTLB_ENTRIES(descs[0]);
323 	cai->cai_associativity = AMD_L2_1GB_EBX_IUTLB_ASSOC(descs[0]);
324 	cai->cai_linesize = (1 * 1024);
325 }
326 
327 static void
328 cpu_probe_k5(struct cpu_info *ci)
329 {
330 	int flag;
331 
332 	if (cpu_vendor != CPUVENDOR_AMD ||
333 	    CPUID2FAMILY(ci->ci_signature) != 5)
334 		return;
335 
336 	if (CPUID2MODEL(ci->ci_signature) == 0) {
337 		/*
338 		 * According to the AMD Processor Recognition App Note,
339 		 * the AMD-K5 Model 0 uses the wrong bit to indicate
340 		 * support for global PTEs, instead using bit 9 (APIC)
341 		 * rather than bit 13 (i.e. "0x200" vs. 0x2000".  Oops!).
342 		 */
343 		flag = ci->ci_feature_flags;
344 		if ((flag & CPUID_APIC) != 0)
345 			flag = (flag & ~CPUID_APIC) | CPUID_PGE;
346 		ci->ci_feature_flags = flag;
347 	}
348 
349 	cpu_probe_amd_cache(ci);
350 }
351 
352 static void
353 cpu_probe_k678(struct cpu_info *ci)
354 {
355 	uint32_t descs[4];
356 
357 	if (cpu_vendor != CPUVENDOR_AMD ||
358 	    CPUID2FAMILY(ci->ci_signature) < 6)
359 		return;
360 
361 	/* Determine the extended feature flags. */
362 	x86_cpuid(0x80000000, descs);
363 	if (descs[0] >= 0x80000001) {
364 		x86_cpuid(0x80000001, descs);
365 		ci->ci_feature3_flags |= descs[3]; /* %edx */
366 		ci->ci_feature4_flags = descs[2];  /* %ecx */
367 	}
368 
369 	cpu_probe_amd_cache(ci);
370 }
371 
372 static inline uint8_t
373 cyrix_read_reg(uint8_t reg)
374 {
375 
376 	outb(0x22, reg);
377 	return inb(0x23);
378 }
379 
380 static inline void
381 cyrix_write_reg(uint8_t reg, uint8_t data)
382 {
383 
384 	outb(0x22, reg);
385 	outb(0x23, data);
386 }
387 
388 static void
389 cpu_probe_cyrix_cmn(struct cpu_info *ci)
390 {
391 	/*
392 	 * i8254 latch check routine:
393 	 *     National Geode (formerly Cyrix MediaGX) has a serious bug in
394 	 *     its built-in i8254-compatible clock module (cs5510 cs5520).
395 	 *     Set the variable 'clock_broken_latch' to indicate it.
396 	 *
397 	 * This bug is not present in the cs5530, and the flag
398 	 * is disabled again in sys/arch/i386/pci/pcib.c if this later
399 	 * model device is detected. Ideally, this work-around should not
400 	 * even be in here, it should be in there. XXX
401 	 */
402 	uint8_t c3;
403 #ifndef XEN
404 	extern int clock_broken_latch;
405 
406 	switch (ci->ci_signature) {
407 	case 0x440:     /* Cyrix MediaGX */
408 	case 0x540:     /* GXm */
409 		clock_broken_latch = 1;
410 		break;
411 	}
412 #endif
413 
414 	/* set up various cyrix registers */
415 	/*
416 	 * Enable suspend on halt (powersave mode).
417 	 * When powersave mode is enabled, the TSC stops counting
418 	 * while the CPU is halted in idle() waiting for an interrupt.
419 	 * This means we can't use the TSC for interval time in
420 	 * microtime(9), and thus it is disabled here.
421 	 *
422 	 * It still makes a perfectly good cycle counter
423 	 * for program profiling, so long as you remember you're
424 	 * counting cycles, and not time. Further, if you don't
425 	 * mind not using powersave mode, the TSC works just fine,
426 	 * so this should really be optional. XXX
427 	 */
428 	cyrix_write_reg(0xc2, cyrix_read_reg(0xc2) | 0x08);
429 
430 	/*
431 	 * Do not disable the TSC on the Geode GX, it's reported to
432 	 * work fine.
433 	 */
434 	if (ci->ci_signature != 0x552)
435 		ci->ci_feature_flags &= ~CPUID_TSC;
436 
437 	/* enable access to ccr4/ccr5 */
438 	c3 = cyrix_read_reg(0xC3);
439 	cyrix_write_reg(0xC3, c3 | 0x10);
440 	/* cyrix's workaround  for the "coma bug" */
441 	cyrix_write_reg(0x31, cyrix_read_reg(0x31) | 0xf8);
442 	cyrix_write_reg(0x32, cyrix_read_reg(0x32) | 0x7f);
443 	cyrix_write_reg(0x33, cyrix_read_reg(0x33) & ~0xff);
444 	cyrix_write_reg(0x3c, cyrix_read_reg(0x3c) | 0x87);
445 	/* disable access to ccr4/ccr5 */
446 	cyrix_write_reg(0xC3, c3);
447 }
448 
449 static void
450 cpu_probe_cyrix(struct cpu_info *ci)
451 {
452 
453 	if (cpu_vendor != CPUVENDOR_CYRIX ||
454 	    CPUID2FAMILY(ci->ci_signature) < 4 ||
455 	    CPUID2FAMILY(ci->ci_signature) > 6)
456 		return;
457 
458 	cpu_probe_cyrix_cmn(ci);
459 }
460 
461 static void
462 cpu_probe_winchip(struct cpu_info *ci)
463 {
464 
465 	if (cpu_vendor != CPUVENDOR_IDT ||
466 	    CPUID2FAMILY(ci->ci_signature) != 5)
467 	    	return;
468 
469 	if (CPUID2MODEL(ci->ci_signature) == 4) {
470 		/* WinChip C6 */
471 		ci->ci_feature_flags &= ~CPUID_TSC;
472 	}
473 }
474 
475 static void
476 cpu_probe_c3(struct cpu_info *ci)
477 {
478 	u_int family, model, stepping, descs[4], lfunc, msr;
479 	struct x86_cache_info *cai;
480 
481 	if (cpu_vendor != CPUVENDOR_IDT ||
482 	    CPUID2FAMILY(ci->ci_signature) != 5)
483 	    	return;
484 
485 	family = CPUID2FAMILY(ci->ci_signature);
486 	model = CPUID2MODEL(ci->ci_signature);
487 	stepping = CPUID2STEPPING(ci->ci_signature);
488 
489 	/* Determine the largest extended function value. */
490 	x86_cpuid(0x80000000, descs);
491 	lfunc = descs[0];
492 
493 	/* Determine the extended feature flags. */
494 	if (lfunc >= 0x80000001) {
495 		x86_cpuid(0x80000001, descs);
496 		ci->ci_feature_flags |= descs[3];
497 	}
498 
499 	if (model >= 0x9) {
500 		/* Nehemiah or Esther */
501 		x86_cpuid(0xc0000000, descs);
502 		lfunc = descs[0];
503 		if (lfunc >= 0xc0000001) {	/* has ACE, RNG */
504 			x86_cpuid(0xc0000001, descs);
505 			lfunc = descs[3];
506 			if (model > 0x9 || stepping >= 8) {	/* ACE */
507 				if (lfunc & CPUID_VIA_HAS_ACE) {
508 					ci->ci_padlock_flags = lfunc;
509 					if ((lfunc & CPUID_VIA_DO_ACE) == 0) {
510 						msr = rdmsr(MSR_VIA_ACE);
511 						wrmsr(MSR_VIA_ACE, msr |
512 						    MSR_VIA_ACE_ENABLE);
513 						ci->ci_padlock_flags |=
514 						    CPUID_VIA_DO_ACE;
515 					}
516 				}
517 			}
518 		}
519 	}
520 
521 	/*
522 	 * Determine L1 cache/TLB info.
523 	 */
524 	if (lfunc < 0x80000005) {
525 		/* No L1 cache info available. */
526 		return;
527 	}
528 
529 	x86_cpuid(0x80000005, descs);
530 
531 	cai = &ci->ci_cinfo[CAI_ITLB];
532 	cai->cai_totalsize = VIA_L1_EBX_ITLB_ENTRIES(descs[1]);
533 	cai->cai_associativity = VIA_L1_EBX_ITLB_ASSOC(descs[1]);
534 	cai->cai_linesize = (4 * 1024);
535 
536 	cai = &ci->ci_cinfo[CAI_DTLB];
537 	cai->cai_totalsize = VIA_L1_EBX_DTLB_ENTRIES(descs[1]);
538 	cai->cai_associativity = VIA_L1_EBX_DTLB_ASSOC(descs[1]);
539 	cai->cai_linesize = (4 * 1024);
540 
541 	cai = &ci->ci_cinfo[CAI_DCACHE];
542 	cai->cai_totalsize = VIA_L1_ECX_DC_SIZE(descs[2]);
543 	cai->cai_associativity = VIA_L1_ECX_DC_ASSOC(descs[2]);
544 	cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[2]);
545 	if (model == 9 && stepping == 8) {
546 		/* Erratum: stepping 8 reports 4 when it should be 2 */
547 		cai->cai_associativity = 2;
548 	}
549 
550 	cai = &ci->ci_cinfo[CAI_ICACHE];
551 	cai->cai_totalsize = VIA_L1_EDX_IC_SIZE(descs[3]);
552 	cai->cai_associativity = VIA_L1_EDX_IC_ASSOC(descs[3]);
553 	cai->cai_linesize = VIA_L1_EDX_IC_LS(descs[3]);
554 	if (model == 9 && stepping == 8) {
555 		/* Erratum: stepping 8 reports 4 when it should be 2 */
556 		cai->cai_associativity = 2;
557 	}
558 
559 	/*
560 	 * Determine L2 cache/TLB info.
561 	 */
562 	if (lfunc < 0x80000006) {
563 		/* No L2 cache info available. */
564 		return;
565 	}
566 
567 	x86_cpuid(0x80000006, descs);
568 
569 	cai = &ci->ci_cinfo[CAI_L2CACHE];
570 	if (model >= 9) {
571 		cai->cai_totalsize = VIA_L2N_ECX_C_SIZE(descs[2]);
572 		cai->cai_associativity = VIA_L2N_ECX_C_ASSOC(descs[2]);
573 		cai->cai_linesize = VIA_L2N_ECX_C_LS(descs[2]);
574 	} else {
575 		cai->cai_totalsize = VIA_L2_ECX_C_SIZE(descs[2]);
576 		cai->cai_associativity = VIA_L2_ECX_C_ASSOC(descs[2]);
577 		cai->cai_linesize = VIA_L2_ECX_C_LS(descs[2]);
578 	}
579 }
580 
581 static void
582 cpu_probe_geode(struct cpu_info *ci)
583 {
584 
585 	if (memcmp("Geode by NSC", ci->ci_vendor, 12) != 0 ||
586 	    CPUID2FAMILY(ci->ci_signature) != 5)
587 	    	return;
588 
589 	cpu_probe_cyrix_cmn(ci);
590 	cpu_probe_amd_cache(ci);
591 }
592 
593 void
594 cpu_probe(struct cpu_info *ci)
595 {
596 	const struct x86_cache_info *cai;
597 	u_int descs[4];
598 	int iterations, i, j;
599 	uint8_t desc;
600 	uint32_t miscbytes;
601 	uint32_t brand[12];
602 
603 	cpu_vendor = i386_nocpuid_cpus[cpu << 1];
604 	cpu_class = i386_nocpuid_cpus[(cpu << 1) + 1];
605 
606 	if (cpuid_level < 0)
607 		return;
608 
609 	x86_cpuid(0, descs);
610 	cpuid_level = descs[0];
611 	ci->ci_vendor[0] = descs[1];
612 	ci->ci_vendor[2] = descs[2];
613 	ci->ci_vendor[1] = descs[3];
614 	ci->ci_vendor[3] = 0;
615 
616 	if (memcmp(ci->ci_vendor, "GenuineIntel", 12) == 0)
617 		cpu_vendor = CPUVENDOR_INTEL;
618 	else if (memcmp(ci->ci_vendor,  "AuthenticAMD", 12) == 0)
619 		cpu_vendor = CPUVENDOR_AMD;
620 	else if (memcmp(ci->ci_vendor,  "CyrixInstead", 12) == 0)
621 		cpu_vendor = CPUVENDOR_CYRIX;
622 	else if (memcmp(ci->ci_vendor,  "Geode by NSC", 12) == 0)
623 		cpu_vendor = CPUVENDOR_CYRIX;
624 	else if (memcmp(ci->ci_vendor, "CentaurHauls", 12) == 0)
625 		cpu_vendor = CPUVENDOR_IDT;
626 	else if (memcmp(ci->ci_vendor, "GenuineTMx86", 12) == 0)
627 		cpu_vendor = CPUVENDOR_TRANSMETA;
628 	else
629 		cpu_vendor = CPUVENDOR_UNKNOWN;
630 
631 	x86_cpuid(0x80000000, brand);
632 	if (brand[0] >= 0x80000004) {
633 		x86_cpuid(0x80000002, brand);
634 		x86_cpuid(0x80000003, brand + 4);
635 		x86_cpuid(0x80000004, brand + 8);
636 		for (i = 0; i < 48; i++) {
637 			if (((char *) brand)[i] != ' ')
638 				break;
639 		}
640 		memcpy(cpu_brand_string, ((char *) brand) + i, 48 - i);
641 	}
642 
643 	if (cpuid_level >= 1) {
644 		x86_cpuid(1, descs);
645 		ci->ci_signature = descs[0];
646 		miscbytes = descs[1];
647 		ci->ci_feature2_flags = descs[2];
648 		ci->ci_feature_flags = descs[3];
649 
650 		/* Determine family + class. */
651 		cpu_class = CPUID2FAMILY(ci->ci_signature) + (CPUCLASS_386 - 3);
652 		if (cpu_class > CPUCLASS_686)
653 			cpu_class = CPUCLASS_686;
654 
655 		/* CLFLUSH line size is next 8 bits */
656 		if (ci->ci_feature_flags & CPUID_CFLUSH)
657 			ci->ci_cflush_lsize = ((miscbytes >> 8) & 0xff) << 3;
658 		ci->ci_initapicid = (miscbytes >> 24) & 0xff;
659 	}
660 
661 	if (cpuid_level >= 2) {
662 		/* Parse the cache info from `cpuid', if we have it. */
663 		x86_cpuid(2, descs);
664 		iterations = descs[0] & 0xff;
665 		while (iterations-- > 0) {
666 			for (i = 0; i < 4; i++) {
667 				if (descs[i] & 0x80000000)
668 					continue;
669 				for (j = 0; j < 4; j++) {
670 					if (i == 0 && j == 0)
671 						continue;
672 					desc = (descs[i] >> (j * 8)) & 0xff;
673 					if (desc == 0)
674 						continue;
675 					cai = cache_info_lookup(
676 					    intel_cpuid_cache_info, desc);
677 					if (cai != NULL) {
678 						ci->ci_cinfo[cai->cai_index] =
679 						    *cai;
680 					}
681 				}
682 			}
683 		}
684 	}
685 
686 	cpu_probe_p6(ci);
687 	cpu_probe_k5(ci);
688 	cpu_probe_k678(ci);
689 	cpu_probe_cyrix(ci);
690 	cpu_probe_winchip(ci);
691 	cpu_probe_c3(ci);
692 	cpu_probe_geode(ci);
693 
694 	if (cpu_vendor != CPUVENDOR_AMD && (ci->ci_feature_flags & CPUID_TM) &&
695 	    (rdmsr(MSR_MISC_ENABLE) & (1 << 3)) == 0) {
696 		/* Enable thermal monitor 1. */
697 		wrmsr(MSR_MISC_ENABLE, rdmsr(MSR_MISC_ENABLE) | (1<<3));
698 	}
699 
700 	if ((cpu_feature | cpu_feature2) == 0) {
701 		/* If first. */
702 		cpu_feature = ci->ci_feature_flags;
703 		cpu_feature2 = ci->ci_feature2_flags;
704 		/* Early patch of text segment. */
705 #ifndef XEN
706 		x86_patch(true);
707 #endif
708 	} else {
709 		/* If not first. */
710 		cpu_feature &= ci->ci_feature_flags;
711 		cpu_feature2 &= ci->ci_feature2_flags;
712 	}
713 }
714 
715 void
716 cpu_identify(struct cpu_info *ci)
717 {
718 
719 	snprintf(cpu_model, sizeof(cpu_model), "%s %d86-class",
720 	    cpu_vendor_names[cpu_vendor], cpu_class + 3);
721 	aprint_normal(": %s", cpu_model);
722 	if (ci->ci_data.cpu_cc_freq != 0)
723 		aprint_normal(", %dMHz", (int)(ci->ci_data.cpu_cc_freq / 1000000));
724 	if (ci->ci_signature != 0)
725 		aprint_normal(", id 0x%x", ci->ci_signature);
726 	aprint_normal("\n");
727 
728 	if (cpu_brand_string[0] == '\0') {
729 		strlcpy(cpu_brand_string, cpu_model, sizeof(cpu_brand_string));
730 	}
731 	if (cpu_class == CPUCLASS_386) {
732 		panic("NetBSD requires an 80486DX or later processor");
733 	}
734 	if (cpu == CPU_486DLC) {
735 		aprint_error("WARNING: BUGGY CYRIX CACHE\n");
736 	}
737 
738 	if ((cpu_vendor == CPUVENDOR_AMD) /* check enablement of an */
739 	  && (device_unit(ci->ci_dev) == 0) /* AMD feature only once */
740 	  && ((ci->ci_feature4_flags & CPUID_SVM) == CPUID_SVM)
741 #if defined(XEN) && !defined(DOM0OPS)
742 	  && (false)  /* on Xen rdmsr is for Dom0 only */
743 #endif
744 	  )
745 	{
746 		uint64_t val;
747 
748 		val = rdmsr(MSR_VMCR);
749 		if (((val & VMCR_SVMED) == VMCR_SVMED)
750 		  && ((val & VMCR_LOCK) == VMCR_LOCK))
751 		{
752 			aprint_normal_dev(ci->ci_dev,
753 				"SVM disabled by the BIOS\n");
754 		}
755 	}
756 
757 #ifdef i386 /* XXX for now */
758 	if (cpu_vendor == CPUVENDOR_TRANSMETA) {
759 		u_int descs[4];
760 		x86_cpuid(0x80860000, descs);
761 		if (descs[0] >= 0x80860007)
762 			tmx86_init_longrun();
763 	}
764 
765 	/* If we have FXSAVE/FXRESTOR, use them. */
766 	if (cpu_feature & CPUID_FXSR) {
767 		i386_use_fxsave = 1;
768 		/*
769 		 * If we have SSE/SSE2, enable XMM exceptions, and
770 		 * notify userland.
771 		 */
772 		if (cpu_feature & CPUID_SSE)
773 			i386_has_sse = 1;
774 		if (cpu_feature & CPUID_SSE2)
775 			i386_has_sse2 = 1;
776 	} else
777 		i386_use_fxsave = 0;
778 #endif	/* i386 */
779 
780 #ifdef ENHANCED_SPEEDSTEP
781 	if (cpu_feature2 & CPUID2_EST) {
782 		if (rdmsr(MSR_MISC_ENABLE) & (1 << 16))
783 			est_init(cpu_vendor);
784 	}
785 #endif /* ENHANCED_SPEEDSTEP */
786 
787 #ifdef INTEL_CORETEMP
788 	if (cpu_vendor == CPUVENDOR_INTEL && cpuid_level >= 0x06)
789 		coretemp_register(ci);
790 #endif
791 
792 #if defined(POWERNOW_K7) || defined(POWERNOW_K8)
793 	if (cpu_vendor == CPUVENDOR_AMD && powernow_probe(ci)) {
794 		switch (CPUID2FAMILY(ci->ci_signature)) {
795 #ifdef POWERNOW_K7
796 		case 6:
797 			k7_powernow_init();
798 			break;
799 #endif
800 #ifdef POWERNOW_K8
801 		case 15:
802 			k8_powernow_init();
803 			break;
804 #endif
805 		default:
806 			break;
807 		}
808 	}
809 #endif /* POWERNOW_K7 || POWERNOW_K8 */
810 
811 #ifdef INTEL_ONDEMAND_CLOCKMOD
812 	if (cpuid_level >= 1) {
813 		clockmod_init();
814 	}
815 #endif
816 }
817