xref: /onnv-gate/usr/src/uts/i86pc/os/cpuid.c (revision 13134:8315ff49e22e)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51582Skchow  * Common Development and Distribution License (the "License").
61582Skchow  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
2212090SFrank.Vanderlinden@Sun.COM  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
230Sstevel@tonic-gate  */
249283SBill.Holler@Sun.COM /*
2513029SKrishnendu.Sadhukhan@Sun.COM  * Copyright (c) 2010, Intel Corporation.
269283SBill.Holler@Sun.COM  * All rights reserved.
279283SBill.Holler@Sun.COM  */
2810947SSrihari.Venkatesan@Sun.COM /*
2910947SSrihari.Venkatesan@Sun.COM  * Portions Copyright 2009 Advanced Micro Devices, Inc.
3010947SSrihari.Venkatesan@Sun.COM  */
310Sstevel@tonic-gate 
320Sstevel@tonic-gate /*
330Sstevel@tonic-gate  * Various routines to handle identification
340Sstevel@tonic-gate  * and classification of x86 processors.
350Sstevel@tonic-gate  */
360Sstevel@tonic-gate 
370Sstevel@tonic-gate #include <sys/types.h>
380Sstevel@tonic-gate #include <sys/archsystm.h>
390Sstevel@tonic-gate #include <sys/x86_archext.h>
400Sstevel@tonic-gate #include <sys/kmem.h>
410Sstevel@tonic-gate #include <sys/systm.h>
420Sstevel@tonic-gate #include <sys/cmn_err.h>
430Sstevel@tonic-gate #include <sys/sunddi.h>
440Sstevel@tonic-gate #include <sys/sunndi.h>
450Sstevel@tonic-gate #include <sys/cpuvar.h>
460Sstevel@tonic-gate #include <sys/processor.h>
475045Sbholler #include <sys/sysmacros.h>
483434Sesaxe #include <sys/pg.h>
490Sstevel@tonic-gate #include <sys/fp.h>
500Sstevel@tonic-gate #include <sys/controlregs.h>
510Sstevel@tonic-gate #include <sys/auxv_386.h>
520Sstevel@tonic-gate #include <sys/bitmap.h>
530Sstevel@tonic-gate #include <sys/memnode.h>
5410947SSrihari.Venkatesan@Sun.COM #include <sys/pci_cfgspace.h>
550Sstevel@tonic-gate 
567532SSean.Ye@Sun.COM #ifdef __xpv
577532SSean.Ye@Sun.COM #include <sys/hypervisor.h>
588930SBill.Holler@Sun.COM #else
598930SBill.Holler@Sun.COM #include <sys/ontrap.h>
607532SSean.Ye@Sun.COM #endif
617532SSean.Ye@Sun.COM 
620Sstevel@tonic-gate /*
630Sstevel@tonic-gate  * Pass 0 of cpuid feature analysis happens in locore. It contains special code
640Sstevel@tonic-gate  * to recognize Cyrix processors that are not cpuid-compliant, and to deal with
650Sstevel@tonic-gate  * them accordingly. For most modern processors, feature detection occurs here
660Sstevel@tonic-gate  * in pass 1.
670Sstevel@tonic-gate  *
680Sstevel@tonic-gate  * Pass 1 of cpuid feature analysis happens just at the beginning of mlsetup()
690Sstevel@tonic-gate  * for the boot CPU and does the basic analysis that the early kernel needs.
7012826Skuriakose.kuruvilla@oracle.com  * x86_featureset is set based on the return value of cpuid_pass1() of the boot
710Sstevel@tonic-gate  * CPU.
720Sstevel@tonic-gate  *
730Sstevel@tonic-gate  * Pass 1 includes:
740Sstevel@tonic-gate  *
750Sstevel@tonic-gate  *	o Determining vendor/model/family/stepping and setting x86_type and
760Sstevel@tonic-gate  *	  x86_vendor accordingly.
770Sstevel@tonic-gate  *	o Processing the feature flags returned by the cpuid instruction while
780Sstevel@tonic-gate  *	  applying any workarounds or tricks for the specific processor.
790Sstevel@tonic-gate  *	o Mapping the feature flags into Solaris feature bits (X86_*).
800Sstevel@tonic-gate  *	o Processing extended feature flags if supported by the processor,
810Sstevel@tonic-gate  *	  again while applying specific processor knowledge.
820Sstevel@tonic-gate  *	o Determining the CMT characteristics of the system.
830Sstevel@tonic-gate  *
840Sstevel@tonic-gate  * Pass 1 is done on non-boot CPUs during their initialization and the results
850Sstevel@tonic-gate  * are used only as a meager attempt at ensuring that all processors within the
860Sstevel@tonic-gate  * system support the same features.
870Sstevel@tonic-gate  *
880Sstevel@tonic-gate  * Pass 2 of cpuid feature analysis happens just at the beginning
890Sstevel@tonic-gate  * of startup().  It just copies in and corrects the remainder
900Sstevel@tonic-gate  * of the cpuid data we depend on: standard cpuid functions that we didn't
910Sstevel@tonic-gate  * need for pass1 feature analysis, and extended cpuid functions beyond the
920Sstevel@tonic-gate  * simple feature processing done in pass1.
930Sstevel@tonic-gate  *
940Sstevel@tonic-gate  * Pass 3 of cpuid analysis is invoked after basic kernel services; in
950Sstevel@tonic-gate  * particular kernel memory allocation has been made available. It creates a
960Sstevel@tonic-gate  * readable brand string based on the data collected in the first two passes.
970Sstevel@tonic-gate  *
980Sstevel@tonic-gate  * Pass 4 of cpuid analysis is invoked after post_startup() when all
990Sstevel@tonic-gate  * the support infrastructure for various hardware features has been
1000Sstevel@tonic-gate  * initialized. It determines which processor features will be reported
1010Sstevel@tonic-gate  * to userland via the aux vector.
1020Sstevel@tonic-gate  *
1030Sstevel@tonic-gate  * All passes are executed on all CPUs, but only the boot CPU determines what
1040Sstevel@tonic-gate  * features the kernel will use.
1050Sstevel@tonic-gate  *
1060Sstevel@tonic-gate  * Much of the worst junk in this file is for the support of processors
1070Sstevel@tonic-gate  * that didn't really implement the cpuid instruction properly.
1080Sstevel@tonic-gate  *
1090Sstevel@tonic-gate  * NOTE: The accessor functions (cpuid_get*) are aware of, and ASSERT upon,
1100Sstevel@tonic-gate  * the pass numbers.  Accordingly, changes to the pass code may require changes
1110Sstevel@tonic-gate  * to the accessor code.
1120Sstevel@tonic-gate  */
1130Sstevel@tonic-gate 
1140Sstevel@tonic-gate uint_t x86_vendor = X86_VENDOR_IntelClone;
1150Sstevel@tonic-gate uint_t x86_type = X86_TYPE_OTHER;
1167589SVikram.Hegde@Sun.COM uint_t x86_clflush_size = 0;
1170Sstevel@tonic-gate 
1180Sstevel@tonic-gate uint_t pentiumpro_bug4046376;
1190Sstevel@tonic-gate uint_t pentiumpro_bug4064495;
1200Sstevel@tonic-gate 
121*13134Skuriakose.kuruvilla@oracle.com #define	NUM_X86_FEATURES	35
12212826Skuriakose.kuruvilla@oracle.com void    *x86_featureset;
12312826Skuriakose.kuruvilla@oracle.com ulong_t x86_featureset0[BT_SIZEOFMAP(NUM_X86_FEATURES)];
12412826Skuriakose.kuruvilla@oracle.com 
12512826Skuriakose.kuruvilla@oracle.com char *x86_feature_names[NUM_X86_FEATURES] = {
12612826Skuriakose.kuruvilla@oracle.com 	"lgpg",
12712826Skuriakose.kuruvilla@oracle.com 	"tsc",
12812826Skuriakose.kuruvilla@oracle.com 	"msr",
12912826Skuriakose.kuruvilla@oracle.com 	"mtrr",
13012826Skuriakose.kuruvilla@oracle.com 	"pge",
13112826Skuriakose.kuruvilla@oracle.com 	"de",
13212826Skuriakose.kuruvilla@oracle.com 	"cmov",
13312826Skuriakose.kuruvilla@oracle.com 	"mmx",
13412826Skuriakose.kuruvilla@oracle.com 	"mca",
13512826Skuriakose.kuruvilla@oracle.com 	"pae",
13612826Skuriakose.kuruvilla@oracle.com 	"cv8",
13712826Skuriakose.kuruvilla@oracle.com 	"pat",
13812826Skuriakose.kuruvilla@oracle.com 	"sep",
13912826Skuriakose.kuruvilla@oracle.com 	"sse",
14012826Skuriakose.kuruvilla@oracle.com 	"sse2",
14112826Skuriakose.kuruvilla@oracle.com 	"htt",
14212826Skuriakose.kuruvilla@oracle.com 	"asysc",
14312826Skuriakose.kuruvilla@oracle.com 	"nx",
14412826Skuriakose.kuruvilla@oracle.com 	"sse3",
14512826Skuriakose.kuruvilla@oracle.com 	"cx16",
14612826Skuriakose.kuruvilla@oracle.com 	"cmp",
14712826Skuriakose.kuruvilla@oracle.com 	"tscp",
14812826Skuriakose.kuruvilla@oracle.com 	"mwait",
14912826Skuriakose.kuruvilla@oracle.com 	"sse4a",
15012826Skuriakose.kuruvilla@oracle.com 	"cpuid",
15112826Skuriakose.kuruvilla@oracle.com 	"ssse3",
15212826Skuriakose.kuruvilla@oracle.com 	"sse4_1",
15312826Skuriakose.kuruvilla@oracle.com 	"sse4_2",
15412826Skuriakose.kuruvilla@oracle.com 	"1gpg",
15512826Skuriakose.kuruvilla@oracle.com 	"clfsh",
15612826Skuriakose.kuruvilla@oracle.com 	"64",
15712826Skuriakose.kuruvilla@oracle.com 	"aes",
158*13134Skuriakose.kuruvilla@oracle.com 	"pclmulqdq",
159*13134Skuriakose.kuruvilla@oracle.com 	"xsave",
160*13134Skuriakose.kuruvilla@oracle.com 	"avx" };
16112826Skuriakose.kuruvilla@oracle.com 
16212826Skuriakose.kuruvilla@oracle.com static void *
16312826Skuriakose.kuruvilla@oracle.com init_x86_featureset(void)
16412826Skuriakose.kuruvilla@oracle.com {
16512826Skuriakose.kuruvilla@oracle.com 	return (kmem_zalloc(BT_SIZEOFMAP(NUM_X86_FEATURES), KM_SLEEP));
16612826Skuriakose.kuruvilla@oracle.com }
16712826Skuriakose.kuruvilla@oracle.com 
16812826Skuriakose.kuruvilla@oracle.com void
16912826Skuriakose.kuruvilla@oracle.com free_x86_featureset(void *featureset)
17012826Skuriakose.kuruvilla@oracle.com {
17112826Skuriakose.kuruvilla@oracle.com 	kmem_free(featureset, BT_SIZEOFMAP(NUM_X86_FEATURES));
17212826Skuriakose.kuruvilla@oracle.com }
17312826Skuriakose.kuruvilla@oracle.com 
17412826Skuriakose.kuruvilla@oracle.com boolean_t
17512826Skuriakose.kuruvilla@oracle.com is_x86_feature(void *featureset, uint_t feature)
17612826Skuriakose.kuruvilla@oracle.com {
17712826Skuriakose.kuruvilla@oracle.com 	ASSERT(feature < NUM_X86_FEATURES);
17812826Skuriakose.kuruvilla@oracle.com 	return (BT_TEST((ulong_t *)featureset, feature));
17912826Skuriakose.kuruvilla@oracle.com }
18012826Skuriakose.kuruvilla@oracle.com 
18112826Skuriakose.kuruvilla@oracle.com void
18212826Skuriakose.kuruvilla@oracle.com add_x86_feature(void *featureset, uint_t feature)
18312826Skuriakose.kuruvilla@oracle.com {
18412826Skuriakose.kuruvilla@oracle.com 	ASSERT(feature < NUM_X86_FEATURES);
18512826Skuriakose.kuruvilla@oracle.com 	BT_SET((ulong_t *)featureset, feature);
18612826Skuriakose.kuruvilla@oracle.com }
18712826Skuriakose.kuruvilla@oracle.com 
18812826Skuriakose.kuruvilla@oracle.com void
18912826Skuriakose.kuruvilla@oracle.com remove_x86_feature(void *featureset, uint_t feature)
19012826Skuriakose.kuruvilla@oracle.com {
19112826Skuriakose.kuruvilla@oracle.com 	ASSERT(feature < NUM_X86_FEATURES);
19212826Skuriakose.kuruvilla@oracle.com 	BT_CLEAR((ulong_t *)featureset, feature);
19312826Skuriakose.kuruvilla@oracle.com }
19412826Skuriakose.kuruvilla@oracle.com 
19512826Skuriakose.kuruvilla@oracle.com boolean_t
19612826Skuriakose.kuruvilla@oracle.com compare_x86_featureset(void *setA, void *setB)
19712826Skuriakose.kuruvilla@oracle.com {
19812826Skuriakose.kuruvilla@oracle.com 	/*
19912826Skuriakose.kuruvilla@oracle.com 	 * We assume that the unused bits of the bitmap are always zero.
20012826Skuriakose.kuruvilla@oracle.com 	 */
20112826Skuriakose.kuruvilla@oracle.com 	if (memcmp(setA, setB, BT_SIZEOFMAP(NUM_X86_FEATURES)) == 0) {
20212826Skuriakose.kuruvilla@oracle.com 		return (B_TRUE);
20312826Skuriakose.kuruvilla@oracle.com 	} else {
20412826Skuriakose.kuruvilla@oracle.com 		return (B_FALSE);
20512826Skuriakose.kuruvilla@oracle.com 	}
20612826Skuriakose.kuruvilla@oracle.com }
20712826Skuriakose.kuruvilla@oracle.com 
20812826Skuriakose.kuruvilla@oracle.com void
20912826Skuriakose.kuruvilla@oracle.com print_x86_featureset(void *featureset)
21012826Skuriakose.kuruvilla@oracle.com {
21112826Skuriakose.kuruvilla@oracle.com 	uint_t i;
21212826Skuriakose.kuruvilla@oracle.com 
21312826Skuriakose.kuruvilla@oracle.com 	for (i = 0; i < NUM_X86_FEATURES; i++) {
21412826Skuriakose.kuruvilla@oracle.com 		if (is_x86_feature(featureset, i)) {
21512826Skuriakose.kuruvilla@oracle.com 			cmn_err(CE_CONT, "?x86_feature: %s\n",
21612826Skuriakose.kuruvilla@oracle.com 			    x86_feature_names[i]);
21712826Skuriakose.kuruvilla@oracle.com 		}
21812826Skuriakose.kuruvilla@oracle.com 	}
21912826Skuriakose.kuruvilla@oracle.com }
22012826Skuriakose.kuruvilla@oracle.com 
2210Sstevel@tonic-gate uint_t enable486;
222*13134Skuriakose.kuruvilla@oracle.com 
223*13134Skuriakose.kuruvilla@oracle.com static size_t xsave_state_size = 0;
224*13134Skuriakose.kuruvilla@oracle.com uint64_t xsave_bv_all = (XFEATURE_LEGACY_FP | XFEATURE_SSE);
225*13134Skuriakose.kuruvilla@oracle.com boolean_t xsave_force_disable = B_FALSE;
226*13134Skuriakose.kuruvilla@oracle.com 
2278990SSurya.Prakki@Sun.COM /*
2289000SStuart.Maybee@Sun.COM  * This is set to platform type Solaris is running on.
2298990SSurya.Prakki@Sun.COM  */
23010175SStuart.Maybee@Sun.COM static int platform_type = -1;
23110175SStuart.Maybee@Sun.COM 
23210175SStuart.Maybee@Sun.COM #if !defined(__xpv)
23310175SStuart.Maybee@Sun.COM /*
23410175SStuart.Maybee@Sun.COM  * Variable to patch if hypervisor platform detection needs to be
23510175SStuart.Maybee@Sun.COM  * disabled (e.g. platform_type will always be HW_NATIVE if this is 0).
23610175SStuart.Maybee@Sun.COM  */
23710175SStuart.Maybee@Sun.COM int enable_platform_detection = 1;
23810175SStuart.Maybee@Sun.COM #endif
2390Sstevel@tonic-gate 
2400Sstevel@tonic-gate /*
2414481Sbholler  * monitor/mwait info.
2425045Sbholler  *
2435045Sbholler  * size_actual and buf_actual are the real address and size allocated to get
2445045Sbholler  * proper mwait_buf alignement.  buf_actual and size_actual should be passed
2455045Sbholler  * to kmem_free().  Currently kmem_alloc() and mwait happen to both use
2465045Sbholler  * processor cache-line alignment, but this is not guarantied in the furture.
2474481Sbholler  */
2484481Sbholler struct mwait_info {
2494481Sbholler 	size_t		mon_min;	/* min size to avoid missed wakeups */
2504481Sbholler 	size_t		mon_max;	/* size to avoid false wakeups */
2515045Sbholler 	size_t		size_actual;	/* size actually allocated */
2525045Sbholler 	void		*buf_actual;	/* memory actually allocated */
2534481Sbholler 	uint32_t	support;	/* processor support of monitor/mwait */
2544481Sbholler };
2554481Sbholler 
2564481Sbholler /*
257*13134Skuriakose.kuruvilla@oracle.com  * xsave/xrestor info.
258*13134Skuriakose.kuruvilla@oracle.com  *
259*13134Skuriakose.kuruvilla@oracle.com  * This structure contains HW feature bits and size of the xsave save area.
260*13134Skuriakose.kuruvilla@oracle.com  * Note: the kernel will use the maximum size required for all hardware
261*13134Skuriakose.kuruvilla@oracle.com  * features. It is not optimize for potential memory savings if features at
262*13134Skuriakose.kuruvilla@oracle.com  * the end of the save area are not enabled.
263*13134Skuriakose.kuruvilla@oracle.com  */
264*13134Skuriakose.kuruvilla@oracle.com struct xsave_info {
265*13134Skuriakose.kuruvilla@oracle.com 	uint32_t	xsav_hw_features_low;   /* Supported HW features */
266*13134Skuriakose.kuruvilla@oracle.com 	uint32_t	xsav_hw_features_high;  /* Supported HW features */
267*13134Skuriakose.kuruvilla@oracle.com 	size_t		xsav_max_size;  /* max size save area for HW features */
268*13134Skuriakose.kuruvilla@oracle.com 	size_t		ymm_size;	/* AVX: size of ymm save area */
269*13134Skuriakose.kuruvilla@oracle.com 	size_t		ymm_offset;	/* AVX: offset for ymm save area */
270*13134Skuriakose.kuruvilla@oracle.com };
271*13134Skuriakose.kuruvilla@oracle.com 
272*13134Skuriakose.kuruvilla@oracle.com 
273*13134Skuriakose.kuruvilla@oracle.com /*
2740Sstevel@tonic-gate  * These constants determine how many of the elements of the
2750Sstevel@tonic-gate  * cpuid we cache in the cpuid_info data structure; the
2760Sstevel@tonic-gate  * remaining elements are accessible via the cpuid instruction.
2770Sstevel@tonic-gate  */
2780Sstevel@tonic-gate 
2790Sstevel@tonic-gate #define	NMAX_CPI_STD	6		/* eax = 0 .. 5 */
28010947SSrihari.Venkatesan@Sun.COM #define	NMAX_CPI_EXTD	0x1c		/* eax = 0x80000000 .. 0x8000001b */
28110947SSrihari.Venkatesan@Sun.COM 
28210947SSrihari.Venkatesan@Sun.COM /*
28310947SSrihari.Venkatesan@Sun.COM  * Some terminology needs to be explained:
28410947SSrihari.Venkatesan@Sun.COM  *  - Socket: Something that can be plugged into a motherboard.
28510947SSrihari.Venkatesan@Sun.COM  *  - Package: Same as socket
28610947SSrihari.Venkatesan@Sun.COM  *  - Chip: Same as socket. Note that AMD's documentation uses term "chip"
28710947SSrihari.Venkatesan@Sun.COM  *    differently: there, chip is the same as processor node (below)
28810947SSrihari.Venkatesan@Sun.COM  *  - Processor node: Some AMD processors have more than one
28910947SSrihari.Venkatesan@Sun.COM  *    "subprocessor" embedded in a package. These subprocessors (nodes)
29010947SSrihari.Venkatesan@Sun.COM  *    are fully-functional processors themselves with cores, caches,
29110947SSrihari.Venkatesan@Sun.COM  *    memory controllers, PCI configuration spaces. They are connected
29210947SSrihari.Venkatesan@Sun.COM  *    inside the package with Hypertransport links. On single-node
29310947SSrihari.Venkatesan@Sun.COM  *    processors, processor node is equivalent to chip/socket/package.
29410947SSrihari.Venkatesan@Sun.COM  */
2950Sstevel@tonic-gate 
2960Sstevel@tonic-gate struct cpuid_info {
2970Sstevel@tonic-gate 	uint_t cpi_pass;		/* last pass completed */
2980Sstevel@tonic-gate 	/*
2990Sstevel@tonic-gate 	 * standard function information
3000Sstevel@tonic-gate 	 */
3010Sstevel@tonic-gate 	uint_t cpi_maxeax;		/* fn 0: %eax */
3020Sstevel@tonic-gate 	char cpi_vendorstr[13];		/* fn 0: %ebx:%ecx:%edx */
3030Sstevel@tonic-gate 	uint_t cpi_vendor;		/* enum of cpi_vendorstr */
3040Sstevel@tonic-gate 
3050Sstevel@tonic-gate 	uint_t cpi_family;		/* fn 1: extended family */
3060Sstevel@tonic-gate 	uint_t cpi_model;		/* fn 1: extended model */
3070Sstevel@tonic-gate 	uint_t cpi_step;		/* fn 1: stepping */
30810947SSrihari.Venkatesan@Sun.COM 	chipid_t cpi_chipid;		/* fn 1: %ebx:  Intel: chip # */
30910947SSrihari.Venkatesan@Sun.COM 					/*		AMD: package/socket # */
3100Sstevel@tonic-gate 	uint_t cpi_brandid;		/* fn 1: %ebx: brand ID */
3110Sstevel@tonic-gate 	int cpi_clogid;			/* fn 1: %ebx: thread # */
3121228Sandrei 	uint_t cpi_ncpu_per_chip;	/* fn 1: %ebx: logical cpu count */
3130Sstevel@tonic-gate 	uint8_t cpi_cacheinfo[16];	/* fn 2: intel-style cache desc */
3140Sstevel@tonic-gate 	uint_t cpi_ncache;		/* fn 2: number of elements */
3154606Sesaxe 	uint_t cpi_ncpu_shr_last_cache;	/* fn 4: %eax: ncpus sharing cache */
3164606Sesaxe 	id_t cpi_last_lvl_cacheid;	/* fn 4: %eax: derived cache id */
3174606Sesaxe 	uint_t cpi_std_4_size;		/* fn 4: number of fn 4 elements */
3184606Sesaxe 	struct cpuid_regs **cpi_std_4;	/* fn 4: %ecx == 0 .. fn4_size */
3191228Sandrei 	struct cpuid_regs cpi_std[NMAX_CPI_STD];	/* 0 .. 5 */
3200Sstevel@tonic-gate 	/*
3210Sstevel@tonic-gate 	 * extended function information
3220Sstevel@tonic-gate 	 */
3230Sstevel@tonic-gate 	uint_t cpi_xmaxeax;		/* fn 0x80000000: %eax */
3240Sstevel@tonic-gate 	char cpi_brandstr[49];		/* fn 0x8000000[234] */
3250Sstevel@tonic-gate 	uint8_t cpi_pabits;		/* fn 0x80000006: %eax */
32610947SSrihari.Venkatesan@Sun.COM 	uint8_t	cpi_vabits;		/* fn 0x80000006: %eax */
32710947SSrihari.Venkatesan@Sun.COM 	struct	cpuid_regs cpi_extd[NMAX_CPI_EXTD];	/* 0x800000XX */
32810947SSrihari.Venkatesan@Sun.COM 
3295870Sgavinm 	id_t cpi_coreid;		/* same coreid => strands share core */
3305870Sgavinm 	int cpi_pkgcoreid;		/* core number within single package */
3311228Sandrei 	uint_t cpi_ncore_per_chip;	/* AMD: fn 0x80000008: %ecx[7-0] */
3321228Sandrei 					/* Intel: fn 4: %eax[31-26] */
3330Sstevel@tonic-gate 	/*
3340Sstevel@tonic-gate 	 * supported feature information
3350Sstevel@tonic-gate 	 */
3363446Smrj 	uint32_t cpi_support[5];
3370Sstevel@tonic-gate #define	STD_EDX_FEATURES	0
3380Sstevel@tonic-gate #define	AMD_EDX_FEATURES	1
3390Sstevel@tonic-gate #define	TM_EDX_FEATURES		2
3400Sstevel@tonic-gate #define	STD_ECX_FEATURES	3
3413446Smrj #define	AMD_ECX_FEATURES	4
3422869Sgavinm 	/*
3432869Sgavinm 	 * Synthesized information, where known.
3442869Sgavinm 	 */
3452869Sgavinm 	uint32_t cpi_chiprev;		/* See X86_CHIPREV_* in x86_archext.h */
3462869Sgavinm 	const char *cpi_chiprevstr;	/* May be NULL if chiprev unknown */
3472869Sgavinm 	uint32_t cpi_socket;		/* Chip package/socket type */
3484481Sbholler 
3494481Sbholler 	struct mwait_info cpi_mwait;	/* fn 5: monitor/mwait info */
3507282Smishra 	uint32_t cpi_apicid;
35110947SSrihari.Venkatesan@Sun.COM 	uint_t cpi_procnodeid;		/* AMD: nodeID on HT, Intel: chipid */
35210947SSrihari.Venkatesan@Sun.COM 	uint_t cpi_procnodes_per_pkg;	/* AMD: # of nodes in the package */
35310947SSrihari.Venkatesan@Sun.COM 					/* Intel: 1 */
354*13134Skuriakose.kuruvilla@oracle.com 
355*13134Skuriakose.kuruvilla@oracle.com 	struct xsave_info cpi_xsave;	/* fn D: xsave/xrestor info */
3560Sstevel@tonic-gate };
3570Sstevel@tonic-gate 
3580Sstevel@tonic-gate 
3590Sstevel@tonic-gate static struct cpuid_info cpuid_info0;
3600Sstevel@tonic-gate 
3610Sstevel@tonic-gate /*
3620Sstevel@tonic-gate  * These bit fields are defined by the Intel Application Note AP-485
3630Sstevel@tonic-gate  * "Intel Processor Identification and the CPUID Instruction"
3640Sstevel@tonic-gate  */
3650Sstevel@tonic-gate #define	CPI_FAMILY_XTD(cpi)	BITX((cpi)->cpi_std[1].cp_eax, 27, 20)
3660Sstevel@tonic-gate #define	CPI_MODEL_XTD(cpi)	BITX((cpi)->cpi_std[1].cp_eax, 19, 16)
3670Sstevel@tonic-gate #define	CPI_TYPE(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 13, 12)
3680Sstevel@tonic-gate #define	CPI_FAMILY(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 11, 8)
3690Sstevel@tonic-gate #define	CPI_STEP(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 3, 0)
3700Sstevel@tonic-gate #define	CPI_MODEL(cpi)		BITX((cpi)->cpi_std[1].cp_eax, 7, 4)
3710Sstevel@tonic-gate 
3720Sstevel@tonic-gate #define	CPI_FEATURES_EDX(cpi)		((cpi)->cpi_std[1].cp_edx)
3730Sstevel@tonic-gate #define	CPI_FEATURES_ECX(cpi)		((cpi)->cpi_std[1].cp_ecx)
3740Sstevel@tonic-gate #define	CPI_FEATURES_XTD_EDX(cpi)	((cpi)->cpi_extd[1].cp_edx)
3750Sstevel@tonic-gate #define	CPI_FEATURES_XTD_ECX(cpi)	((cpi)->cpi_extd[1].cp_ecx)
3760Sstevel@tonic-gate 
3770Sstevel@tonic-gate #define	CPI_BRANDID(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 7, 0)
3780Sstevel@tonic-gate #define	CPI_CHUNKS(cpi)		BITX((cpi)->cpi_std[1].cp_ebx, 15, 7)
3790Sstevel@tonic-gate #define	CPI_CPU_COUNT(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 23, 16)
3800Sstevel@tonic-gate #define	CPI_APIC_ID(cpi)	BITX((cpi)->cpi_std[1].cp_ebx, 31, 24)
3810Sstevel@tonic-gate 
3820Sstevel@tonic-gate #define	CPI_MAXEAX_MAX		0x100		/* sanity control */
3830Sstevel@tonic-gate #define	CPI_XMAXEAX_MAX		0x80000100
3844606Sesaxe #define	CPI_FN4_ECX_MAX		0x20		/* sanity: max fn 4 levels */
3857282Smishra #define	CPI_FNB_ECX_MAX		0x20		/* sanity: max fn B levels */
3864606Sesaxe 
3874606Sesaxe /*
3884606Sesaxe  * Function 4 (Deterministic Cache Parameters) macros
3894606Sesaxe  * Defined by Intel Application Note AP-485
3904606Sesaxe  */
3914606Sesaxe #define	CPI_NUM_CORES(regs)		BITX((regs)->cp_eax, 31, 26)
3924606Sesaxe #define	CPI_NTHR_SHR_CACHE(regs)	BITX((regs)->cp_eax, 25, 14)
3934606Sesaxe #define	CPI_FULL_ASSOC_CACHE(regs)	BITX((regs)->cp_eax, 9, 9)
3944606Sesaxe #define	CPI_SELF_INIT_CACHE(regs)	BITX((regs)->cp_eax, 8, 8)
3954606Sesaxe #define	CPI_CACHE_LVL(regs)		BITX((regs)->cp_eax, 7, 5)
3964606Sesaxe #define	CPI_CACHE_TYPE(regs)		BITX((regs)->cp_eax, 4, 0)
3977282Smishra #define	CPI_CPU_LEVEL_TYPE(regs)	BITX((regs)->cp_ecx, 15, 8)
3984606Sesaxe 
3994606Sesaxe #define	CPI_CACHE_WAYS(regs)		BITX((regs)->cp_ebx, 31, 22)
4004606Sesaxe #define	CPI_CACHE_PARTS(regs)		BITX((regs)->cp_ebx, 21, 12)
4014606Sesaxe #define	CPI_CACHE_COH_LN_SZ(regs)	BITX((regs)->cp_ebx, 11, 0)
4024606Sesaxe 
4034606Sesaxe #define	CPI_CACHE_SETS(regs)		BITX((regs)->cp_ecx, 31, 0)
4044606Sesaxe 
4054606Sesaxe #define	CPI_PREFCH_STRIDE(regs)		BITX((regs)->cp_edx, 9, 0)
4064606Sesaxe 
4070Sstevel@tonic-gate 
4080Sstevel@tonic-gate /*
4091975Sdmick  * A couple of shorthand macros to identify "later" P6-family chips
4101975Sdmick  * like the Pentium M and Core.  First, the "older" P6-based stuff
4111975Sdmick  * (loosely defined as "pre-Pentium-4"):
4121975Sdmick  * P6, PII, Mobile PII, PII Xeon, PIII, Mobile PIII, PIII Xeon
4131975Sdmick  */
4141975Sdmick 
4151975Sdmick #define	IS_LEGACY_P6(cpi) (			\
4161975Sdmick 	cpi->cpi_family == 6 && 		\
4171975Sdmick 		(cpi->cpi_model == 1 ||		\
4181975Sdmick 		cpi->cpi_model == 3 ||		\
4191975Sdmick 		cpi->cpi_model == 5 ||		\
4201975Sdmick 		cpi->cpi_model == 6 ||		\
4211975Sdmick 		cpi->cpi_model == 7 ||		\
4221975Sdmick 		cpi->cpi_model == 8 ||		\
4231975Sdmick 		cpi->cpi_model == 0xA ||	\
4241975Sdmick 		cpi->cpi_model == 0xB)		\
4251975Sdmick )
4261975Sdmick 
4271975Sdmick /* A "new F6" is everything with family 6 that's not the above */
4281975Sdmick #define	IS_NEW_F6(cpi) ((cpi->cpi_family == 6) && !IS_LEGACY_P6(cpi))
4291975Sdmick 
4304855Sksadhukh /* Extended family/model support */
4314855Sksadhukh #define	IS_EXTENDED_MODEL_INTEL(cpi) (cpi->cpi_family == 0x6 || \
4324855Sksadhukh 	cpi->cpi_family >= 0xf)
4334855Sksadhukh 
4341975Sdmick /*
4354481Sbholler  * Info for monitor/mwait idle loop.
4364481Sbholler  *
4374481Sbholler  * See cpuid section of "Intel 64 and IA-32 Architectures Software Developer's
4384481Sbholler  * Manual Volume 2A: Instruction Set Reference, A-M" #25366-022US, November
4394481Sbholler  * 2006.
4404481Sbholler  * See MONITOR/MWAIT section of "AMD64 Architecture Programmer's Manual
4414481Sbholler  * Documentation Updates" #33633, Rev 2.05, December 2006.
4424481Sbholler  */
4434481Sbholler #define	MWAIT_SUPPORT		(0x00000001)	/* mwait supported */
4444481Sbholler #define	MWAIT_EXTENSIONS	(0x00000002)	/* extenstion supported */
4454481Sbholler #define	MWAIT_ECX_INT_ENABLE	(0x00000004)	/* ecx 1 extension supported */
4464481Sbholler #define	MWAIT_SUPPORTED(cpi)	((cpi)->cpi_std[1].cp_ecx & CPUID_INTC_ECX_MON)
4474481Sbholler #define	MWAIT_INT_ENABLE(cpi)	((cpi)->cpi_std[5].cp_ecx & 0x2)
4484481Sbholler #define	MWAIT_EXTENSION(cpi)	((cpi)->cpi_std[5].cp_ecx & 0x1)
4494481Sbholler #define	MWAIT_SIZE_MIN(cpi)	BITX((cpi)->cpi_std[5].cp_eax, 15, 0)
4504481Sbholler #define	MWAIT_SIZE_MAX(cpi)	BITX((cpi)->cpi_std[5].cp_ebx, 15, 0)
4514481Sbholler /*
4524481Sbholler  * Number of sub-cstates for a given c-state.
4534481Sbholler  */
4544481Sbholler #define	MWAIT_NUM_SUBC_STATES(cpi, c_state)			\
4554481Sbholler 	BITX((cpi)->cpi_std[5].cp_edx, c_state + 3, c_state)
4564481Sbholler 
4577532SSean.Ye@Sun.COM /*
458*13134Skuriakose.kuruvilla@oracle.com  * XSAVE leaf 0xD enumeration
459*13134Skuriakose.kuruvilla@oracle.com  */
460*13134Skuriakose.kuruvilla@oracle.com #define	CPUID_LEAFD_2_YMM_OFFSET	576
461*13134Skuriakose.kuruvilla@oracle.com #define	CPUID_LEAFD_2_YMM_SIZE		256
462*13134Skuriakose.kuruvilla@oracle.com 
463*13134Skuriakose.kuruvilla@oracle.com /*
4647532SSean.Ye@Sun.COM  * Functions we consune from cpuid_subr.c;  don't publish these in a header
4657532SSean.Ye@Sun.COM  * file to try and keep people using the expected cpuid_* interfaces.
4667532SSean.Ye@Sun.COM  */
4677532SSean.Ye@Sun.COM extern uint32_t _cpuid_skt(uint_t, uint_t, uint_t, uint_t);
4689482SKuriakose.Kuruvilla@Sun.COM extern const char *_cpuid_sktstr(uint_t, uint_t, uint_t, uint_t);
4697532SSean.Ye@Sun.COM extern uint32_t _cpuid_chiprev(uint_t, uint_t, uint_t, uint_t);
4707532SSean.Ye@Sun.COM extern const char *_cpuid_chiprevstr(uint_t, uint_t, uint_t, uint_t);
4717532SSean.Ye@Sun.COM extern uint_t _cpuid_vendorstr_to_vendorcode(char *);
4722869Sgavinm 
4732869Sgavinm /*
4743446Smrj  * Apply up various platform-dependent restrictions where the
4753446Smrj  * underlying platform restrictions mean the CPU can be marked
4763446Smrj  * as less capable than its cpuid instruction would imply.
4773446Smrj  */
4785084Sjohnlev #if defined(__xpv)
4795084Sjohnlev static void
4805084Sjohnlev platform_cpuid_mangle(uint_t vendor, uint32_t eax, struct cpuid_regs *cp)
4815084Sjohnlev {
4825084Sjohnlev 	switch (eax) {
4837532SSean.Ye@Sun.COM 	case 1: {
4847532SSean.Ye@Sun.COM 		uint32_t mcamask = DOMAIN_IS_INITDOMAIN(xen_info) ?
4857532SSean.Ye@Sun.COM 		    0 : CPUID_INTC_EDX_MCA;
4865084Sjohnlev 		cp->cp_edx &=
4877532SSean.Ye@Sun.COM 		    ~(mcamask |
4887532SSean.Ye@Sun.COM 		    CPUID_INTC_EDX_PSE |
4895084Sjohnlev 		    CPUID_INTC_EDX_VME | CPUID_INTC_EDX_DE |
4905084Sjohnlev 		    CPUID_INTC_EDX_SEP | CPUID_INTC_EDX_MTRR |
4915084Sjohnlev 		    CPUID_INTC_EDX_PGE | CPUID_INTC_EDX_PAT |
4925084Sjohnlev 		    CPUID_AMD_EDX_SYSC | CPUID_INTC_EDX_SEP |
4935084Sjohnlev 		    CPUID_INTC_EDX_PSE36 | CPUID_INTC_EDX_HTT);
4945084Sjohnlev 		break;
4957532SSean.Ye@Sun.COM 	}
4965084Sjohnlev 
4975084Sjohnlev 	case 0x80000001:
4985084Sjohnlev 		cp->cp_edx &=
4995084Sjohnlev 		    ~(CPUID_AMD_EDX_PSE |
5005084Sjohnlev 		    CPUID_INTC_EDX_VME | CPUID_INTC_EDX_DE |
5015084Sjohnlev 		    CPUID_AMD_EDX_MTRR | CPUID_AMD_EDX_PGE |
5025084Sjohnlev 		    CPUID_AMD_EDX_PAT | CPUID_AMD_EDX_PSE36 |
5035084Sjohnlev 		    CPUID_AMD_EDX_SYSC | CPUID_INTC_EDX_SEP |
5045084Sjohnlev 		    CPUID_AMD_EDX_TSCP);
5055084Sjohnlev 		cp->cp_ecx &= ~CPUID_AMD_ECX_CMP_LGCY;
5065084Sjohnlev 		break;
5075084Sjohnlev 	default:
5085084Sjohnlev 		break;
5095084Sjohnlev 	}
5105084Sjohnlev 
5115084Sjohnlev 	switch (vendor) {
5125084Sjohnlev 	case X86_VENDOR_Intel:
5135084Sjohnlev 		switch (eax) {
5145084Sjohnlev 		case 4:
5155084Sjohnlev 			/*
5165084Sjohnlev 			 * Zero out the (ncores-per-chip - 1) field
5175084Sjohnlev 			 */
5185084Sjohnlev 			cp->cp_eax &= 0x03fffffff;
5195084Sjohnlev 			break;
5205084Sjohnlev 		default:
5215084Sjohnlev 			break;
5225084Sjohnlev 		}
5235084Sjohnlev 		break;
5245084Sjohnlev 	case X86_VENDOR_AMD:
5255084Sjohnlev 		switch (eax) {
52610080SJoe.Bonasera@sun.com 
52710080SJoe.Bonasera@sun.com 		case 0x80000001:
52810080SJoe.Bonasera@sun.com 			cp->cp_ecx &= ~CPUID_AMD_ECX_CR8D;
52910080SJoe.Bonasera@sun.com 			break;
53010080SJoe.Bonasera@sun.com 
5315084Sjohnlev 		case 0x80000008:
5325084Sjohnlev 			/*
5335084Sjohnlev 			 * Zero out the (ncores-per-chip - 1) field
5345084Sjohnlev 			 */
5355084Sjohnlev 			cp->cp_ecx &= 0xffffff00;
5365084Sjohnlev 			break;
5375084Sjohnlev 		default:
5385084Sjohnlev 			break;
5395084Sjohnlev 		}
5405084Sjohnlev 		break;
5415084Sjohnlev 	default:
5425084Sjohnlev 		break;
5435084Sjohnlev 	}
5445084Sjohnlev }
5455084Sjohnlev #else
5463446Smrj #define	platform_cpuid_mangle(vendor, eax, cp)	/* nothing */
5475084Sjohnlev #endif
5483446Smrj 
5493446Smrj /*
5500Sstevel@tonic-gate  *  Some undocumented ways of patching the results of the cpuid
5510Sstevel@tonic-gate  *  instruction to permit running Solaris 10 on future cpus that
5520Sstevel@tonic-gate  *  we don't currently support.  Could be set to non-zero values
5530Sstevel@tonic-gate  *  via settings in eeprom.
5540Sstevel@tonic-gate  */
5550Sstevel@tonic-gate 
5560Sstevel@tonic-gate uint32_t cpuid_feature_ecx_include;
5570Sstevel@tonic-gate uint32_t cpuid_feature_ecx_exclude;
5580Sstevel@tonic-gate uint32_t cpuid_feature_edx_include;
5590Sstevel@tonic-gate uint32_t cpuid_feature_edx_exclude;
5600Sstevel@tonic-gate 
56112004Sjiang.liu@intel.com /*
56212004Sjiang.liu@intel.com  * Allocate space for mcpu_cpi in the machcpu structure for all non-boot CPUs.
56312004Sjiang.liu@intel.com  */
5643446Smrj void
5653446Smrj cpuid_alloc_space(cpu_t *cpu)
5663446Smrj {
5673446Smrj 	/*
5683446Smrj 	 * By convention, cpu0 is the boot cpu, which is set up
5693446Smrj 	 * before memory allocation is available.  All other cpus get
5703446Smrj 	 * their cpuid_info struct allocated here.
5713446Smrj 	 */
5723446Smrj 	ASSERT(cpu->cpu_id != 0);
57312004Sjiang.liu@intel.com 	ASSERT(cpu->cpu_m.mcpu_cpi == NULL);
5743446Smrj 	cpu->cpu_m.mcpu_cpi =
5753446Smrj 	    kmem_zalloc(sizeof (*cpu->cpu_m.mcpu_cpi), KM_SLEEP);
5763446Smrj }
5773446Smrj 
5783446Smrj void
5793446Smrj cpuid_free_space(cpu_t *cpu)
5803446Smrj {
5814606Sesaxe 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
5824606Sesaxe 	int i;
5834606Sesaxe 
58412004Sjiang.liu@intel.com 	ASSERT(cpi != NULL);
58512004Sjiang.liu@intel.com 	ASSERT(cpi != &cpuid_info0);
5864606Sesaxe 
5874606Sesaxe 	/*
5884606Sesaxe 	 * Free up any function 4 related dynamic storage
5894606Sesaxe 	 */
5904606Sesaxe 	for (i = 1; i < cpi->cpi_std_4_size; i++)
5914606Sesaxe 		kmem_free(cpi->cpi_std_4[i], sizeof (struct cpuid_regs));
5924606Sesaxe 	if (cpi->cpi_std_4_size > 0)
5934606Sesaxe 		kmem_free(cpi->cpi_std_4,
5944606Sesaxe 		    cpi->cpi_std_4_size * sizeof (struct cpuid_regs *));
5954606Sesaxe 
59612004Sjiang.liu@intel.com 	kmem_free(cpi, sizeof (*cpi));
59712004Sjiang.liu@intel.com 	cpu->cpu_m.mcpu_cpi = NULL;
5983446Smrj }
5993446Smrj 
6005741Smrj #if !defined(__xpv)
6015741Smrj 
6025741Smrj static void
6039000SStuart.Maybee@Sun.COM determine_platform()
6045741Smrj {
6055741Smrj 	struct cpuid_regs cp;
6065741Smrj 	char *xen_str;
60712090SFrank.Vanderlinden@Sun.COM 	uint32_t xen_signature[4], base;
6085741Smrj 
60910175SStuart.Maybee@Sun.COM 	platform_type = HW_NATIVE;
61010175SStuart.Maybee@Sun.COM 
61110175SStuart.Maybee@Sun.COM 	if (!enable_platform_detection)
61210175SStuart.Maybee@Sun.COM 		return;
61310175SStuart.Maybee@Sun.COM 
6145741Smrj 	/*
6155741Smrj 	 * In a fully virtualized domain, Xen's pseudo-cpuid function
61612090SFrank.Vanderlinden@Sun.COM 	 * returns a string representing the Xen signature in %ebx, %ecx,
61712090SFrank.Vanderlinden@Sun.COM 	 * and %edx. %eax contains the maximum supported cpuid function.
61812090SFrank.Vanderlinden@Sun.COM 	 * We need at least a (base + 2) leaf value to do what we want
61912090SFrank.Vanderlinden@Sun.COM 	 * to do. Try different base values, since the hypervisor might
62012090SFrank.Vanderlinden@Sun.COM 	 * use a different one depending on whether hyper-v emulation
62112090SFrank.Vanderlinden@Sun.COM 	 * is switched on by default or not.
6225741Smrj 	 */
62312090SFrank.Vanderlinden@Sun.COM 	for (base = 0x40000000; base < 0x40010000; base += 0x100) {
62412090SFrank.Vanderlinden@Sun.COM 		cp.cp_eax = base;
62512090SFrank.Vanderlinden@Sun.COM 		(void) __cpuid_insn(&cp);
62612090SFrank.Vanderlinden@Sun.COM 		xen_signature[0] = cp.cp_ebx;
62712090SFrank.Vanderlinden@Sun.COM 		xen_signature[1] = cp.cp_ecx;
62812090SFrank.Vanderlinden@Sun.COM 		xen_signature[2] = cp.cp_edx;
62912090SFrank.Vanderlinden@Sun.COM 		xen_signature[3] = 0;
63012090SFrank.Vanderlinden@Sun.COM 		xen_str = (char *)xen_signature;
63112090SFrank.Vanderlinden@Sun.COM 		if (strcmp("XenVMMXenVMM", xen_str) == 0 &&
63212090SFrank.Vanderlinden@Sun.COM 		    cp.cp_eax >= (base + 2)) {
63312090SFrank.Vanderlinden@Sun.COM 			platform_type = HW_XEN_HVM;
63412090SFrank.Vanderlinden@Sun.COM 			return;
63512090SFrank.Vanderlinden@Sun.COM 		}
63612090SFrank.Vanderlinden@Sun.COM 	}
63712090SFrank.Vanderlinden@Sun.COM 
63812090SFrank.Vanderlinden@Sun.COM 	if (vmware_platform()) /* running under vmware hypervisor? */
6399000SStuart.Maybee@Sun.COM 		platform_type = HW_VMWARE;
6409000SStuart.Maybee@Sun.COM }
6419000SStuart.Maybee@Sun.COM 
6429000SStuart.Maybee@Sun.COM int
6439000SStuart.Maybee@Sun.COM get_hwenv(void)
6449000SStuart.Maybee@Sun.COM {
64510175SStuart.Maybee@Sun.COM 	if (platform_type == -1)
64610175SStuart.Maybee@Sun.COM 		determine_platform();
64710175SStuart.Maybee@Sun.COM 
6489000SStuart.Maybee@Sun.COM 	return (platform_type);
6495741Smrj }
6509000SStuart.Maybee@Sun.COM 
6519000SStuart.Maybee@Sun.COM int
6529000SStuart.Maybee@Sun.COM is_controldom(void)
6539000SStuart.Maybee@Sun.COM {
6549000SStuart.Maybee@Sun.COM 	return (0);
6559000SStuart.Maybee@Sun.COM }
6569000SStuart.Maybee@Sun.COM 
6579000SStuart.Maybee@Sun.COM #else
6589000SStuart.Maybee@Sun.COM 
6599000SStuart.Maybee@Sun.COM int
6609000SStuart.Maybee@Sun.COM get_hwenv(void)
6619000SStuart.Maybee@Sun.COM {
6629000SStuart.Maybee@Sun.COM 	return (HW_XEN_PV);
6639000SStuart.Maybee@Sun.COM }
6649000SStuart.Maybee@Sun.COM 
6659000SStuart.Maybee@Sun.COM int
6669000SStuart.Maybee@Sun.COM is_controldom(void)
6679000SStuart.Maybee@Sun.COM {
6689000SStuart.Maybee@Sun.COM 	return (DOMAIN_IS_INITDOMAIN(xen_info));
6699000SStuart.Maybee@Sun.COM }
6709000SStuart.Maybee@Sun.COM 
6715741Smrj #endif	/* __xpv */
6725741Smrj 
67310947SSrihari.Venkatesan@Sun.COM static void
67412826Skuriakose.kuruvilla@oracle.com cpuid_intel_getids(cpu_t *cpu, void *feature)
67510947SSrihari.Venkatesan@Sun.COM {
67610947SSrihari.Venkatesan@Sun.COM 	uint_t i;
67710947SSrihari.Venkatesan@Sun.COM 	uint_t chipid_shift = 0;
67810947SSrihari.Venkatesan@Sun.COM 	uint_t coreid_shift = 0;
67910947SSrihari.Venkatesan@Sun.COM 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
68010947SSrihari.Venkatesan@Sun.COM 
68110947SSrihari.Venkatesan@Sun.COM 	for (i = 1; i < cpi->cpi_ncpu_per_chip; i <<= 1)
68210947SSrihari.Venkatesan@Sun.COM 		chipid_shift++;
68310947SSrihari.Venkatesan@Sun.COM 
68410947SSrihari.Venkatesan@Sun.COM 	cpi->cpi_chipid = cpi->cpi_apicid >> chipid_shift;
68510947SSrihari.Venkatesan@Sun.COM 	cpi->cpi_clogid = cpi->cpi_apicid & ((1 << chipid_shift) - 1);
68610947SSrihari.Venkatesan@Sun.COM 
68712826Skuriakose.kuruvilla@oracle.com 	if (is_x86_feature(feature, X86FSET_CMP)) {
68810947SSrihari.Venkatesan@Sun.COM 		/*
68910947SSrihari.Venkatesan@Sun.COM 		 * Multi-core (and possibly multi-threaded)
69010947SSrihari.Venkatesan@Sun.COM 		 * processors.
69110947SSrihari.Venkatesan@Sun.COM 		 */
69210947SSrihari.Venkatesan@Sun.COM 		uint_t ncpu_per_core;
69310947SSrihari.Venkatesan@Sun.COM 		if (cpi->cpi_ncore_per_chip == 1)
69410947SSrihari.Venkatesan@Sun.COM 			ncpu_per_core = cpi->cpi_ncpu_per_chip;
69510947SSrihari.Venkatesan@Sun.COM 		else if (cpi->cpi_ncore_per_chip > 1)
69610947SSrihari.Venkatesan@Sun.COM 			ncpu_per_core = cpi->cpi_ncpu_per_chip /
69710947SSrihari.Venkatesan@Sun.COM 			    cpi->cpi_ncore_per_chip;
69810947SSrihari.Venkatesan@Sun.COM 		/*
69910947SSrihari.Venkatesan@Sun.COM 		 * 8bit APIC IDs on dual core Pentiums
70010947SSrihari.Venkatesan@Sun.COM 		 * look like this:
70110947SSrihari.Venkatesan@Sun.COM 		 *
70210947SSrihari.Venkatesan@Sun.COM 		 * +-----------------------+------+------+
70310947SSrihari.Venkatesan@Sun.COM 		 * | Physical Package ID   |  MC  |  HT  |
70410947SSrihari.Venkatesan@Sun.COM 		 * +-----------------------+------+------+
70510947SSrihari.Venkatesan@Sun.COM 		 * <------- chipid -------->
70610947SSrihari.Venkatesan@Sun.COM 		 * <------- coreid --------------->
70710947SSrihari.Venkatesan@Sun.COM 		 *			   <--- clogid -->
70810947SSrihari.Venkatesan@Sun.COM 		 *			   <------>
70910947SSrihari.Venkatesan@Sun.COM 		 *			   pkgcoreid
71010947SSrihari.Venkatesan@Sun.COM 		 *
71110947SSrihari.Venkatesan@Sun.COM 		 * Where the number of bits necessary to
71210947SSrihari.Venkatesan@Sun.COM 		 * represent MC and HT fields together equals
71310947SSrihari.Venkatesan@Sun.COM 		 * to the minimum number of bits necessary to
71410947SSrihari.Venkatesan@Sun.COM 		 * store the value of cpi->cpi_ncpu_per_chip.
71510947SSrihari.Venkatesan@Sun.COM 		 * Of those bits, the MC part uses the number
71610947SSrihari.Venkatesan@Sun.COM 		 * of bits necessary to store the value of
71710947SSrihari.Venkatesan@Sun.COM 		 * cpi->cpi_ncore_per_chip.
71810947SSrihari.Venkatesan@Sun.COM 		 */
71910947SSrihari.Venkatesan@Sun.COM 		for (i = 1; i < ncpu_per_core; i <<= 1)
72010947SSrihari.Venkatesan@Sun.COM 			coreid_shift++;
72110947SSrihari.Venkatesan@Sun.COM 		cpi->cpi_coreid = cpi->cpi_apicid >> coreid_shift;
72210947SSrihari.Venkatesan@Sun.COM 		cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift;
72312826Skuriakose.kuruvilla@oracle.com 	} else if (is_x86_feature(feature, X86FSET_HTT)) {
72410947SSrihari.Venkatesan@Sun.COM 		/*
72510947SSrihari.Venkatesan@Sun.COM 		 * Single-core multi-threaded processors.
72610947SSrihari.Venkatesan@Sun.COM 		 */
72710947SSrihari.Venkatesan@Sun.COM 		cpi->cpi_coreid = cpi->cpi_chipid;
72810947SSrihari.Venkatesan@Sun.COM 		cpi->cpi_pkgcoreid = 0;
72910947SSrihari.Venkatesan@Sun.COM 	}
73010947SSrihari.Venkatesan@Sun.COM 	cpi->cpi_procnodeid = cpi->cpi_chipid;
73110947SSrihari.Venkatesan@Sun.COM }
73210947SSrihari.Venkatesan@Sun.COM 
73310947SSrihari.Venkatesan@Sun.COM static void
73410947SSrihari.Venkatesan@Sun.COM cpuid_amd_getids(cpu_t *cpu)
73510947SSrihari.Venkatesan@Sun.COM {
73611013SSrihari.Venkatesan@Sun.COM 	int i, first_half, coreidsz;
73710947SSrihari.Venkatesan@Sun.COM 	uint32_t nb_caps_reg;
73810947SSrihari.Venkatesan@Sun.COM 	uint_t node2_1;
73910947SSrihari.Venkatesan@Sun.COM 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
74010947SSrihari.Venkatesan@Sun.COM 
74110947SSrihari.Venkatesan@Sun.COM 	/*
74210947SSrihari.Venkatesan@Sun.COM 	 * AMD CMP chips currently have a single thread per core.
74310947SSrihari.Venkatesan@Sun.COM 	 *
74410947SSrihari.Venkatesan@Sun.COM 	 * Since no two cpus share a core we must assign a distinct coreid
74510947SSrihari.Venkatesan@Sun.COM 	 * per cpu, and we do this by using the cpu_id.  This scheme does not,
74610947SSrihari.Venkatesan@Sun.COM 	 * however, guarantee that sibling cores of a chip will have sequential
74710947SSrihari.Venkatesan@Sun.COM 	 * coreids starting at a multiple of the number of cores per chip -
74810947SSrihari.Venkatesan@Sun.COM 	 * that is usually the case, but if the ACPI MADT table is presented
74910947SSrihari.Venkatesan@Sun.COM 	 * in a different order then we need to perform a few more gymnastics
75010947SSrihari.Venkatesan@Sun.COM 	 * for the pkgcoreid.
75110947SSrihari.Venkatesan@Sun.COM 	 *
75210947SSrihari.Venkatesan@Sun.COM 	 * All processors in the system have the same number of enabled
75310947SSrihari.Venkatesan@Sun.COM 	 * cores. Cores within a processor are always numbered sequentially
75410947SSrihari.Venkatesan@Sun.COM 	 * from 0 regardless of how many or which are disabled, and there
75510947SSrihari.Venkatesan@Sun.COM 	 * is no way for operating system to discover the real core id when some
75610947SSrihari.Venkatesan@Sun.COM 	 * are disabled.
75710947SSrihari.Venkatesan@Sun.COM 	 */
75810947SSrihari.Venkatesan@Sun.COM 
75910947SSrihari.Venkatesan@Sun.COM 	cpi->cpi_coreid = cpu->cpu_id;
76010947SSrihari.Venkatesan@Sun.COM 
76110947SSrihari.Venkatesan@Sun.COM 	if (cpi->cpi_xmaxeax >= 0x80000008) {
76210947SSrihari.Venkatesan@Sun.COM 
76310947SSrihari.Venkatesan@Sun.COM 		coreidsz = BITX((cpi)->cpi_extd[8].cp_ecx, 15, 12);
76410947SSrihari.Venkatesan@Sun.COM 
76510947SSrihari.Venkatesan@Sun.COM 		/*
76610947SSrihari.Venkatesan@Sun.COM 		 * In AMD parlance chip is really a node while Solaris
76710947SSrihari.Venkatesan@Sun.COM 		 * sees chip as equivalent to socket/package.
76810947SSrihari.Venkatesan@Sun.COM 		 */
76910947SSrihari.Venkatesan@Sun.COM 		cpi->cpi_ncore_per_chip =
77010947SSrihari.Venkatesan@Sun.COM 		    BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1;
77111013SSrihari.Venkatesan@Sun.COM 		if (coreidsz == 0) {
77210947SSrihari.Venkatesan@Sun.COM 			/* Use legacy method */
77311013SSrihari.Venkatesan@Sun.COM 			for (i = 1; i < cpi->cpi_ncore_per_chip; i <<= 1)
77411013SSrihari.Venkatesan@Sun.COM 				coreidsz++;
77511013SSrihari.Venkatesan@Sun.COM 			if (coreidsz == 0)
77611013SSrihari.Venkatesan@Sun.COM 				coreidsz = 1;
77711013SSrihari.Venkatesan@Sun.COM 		}
77810947SSrihari.Venkatesan@Sun.COM 	} else {
77910947SSrihari.Venkatesan@Sun.COM 		/* Assume single-core part */
78011013SSrihari.Venkatesan@Sun.COM 		cpi->cpi_ncore_per_chip = 1;
78112726SJakub.Jermar@Sun.COM 		coreidsz = 1;
78210947SSrihari.Venkatesan@Sun.COM 	}
78310947SSrihari.Venkatesan@Sun.COM 
78411013SSrihari.Venkatesan@Sun.COM 	cpi->cpi_clogid = cpi->cpi_pkgcoreid =
78511013SSrihari.Venkatesan@Sun.COM 	    cpi->cpi_apicid & ((1<<coreidsz) - 1);
78610947SSrihari.Venkatesan@Sun.COM 	cpi->cpi_ncpu_per_chip = cpi->cpi_ncore_per_chip;
78710947SSrihari.Venkatesan@Sun.COM 
78810947SSrihari.Venkatesan@Sun.COM 	/* Get nodeID */
78910947SSrihari.Venkatesan@Sun.COM 	if (cpi->cpi_family == 0xf) {
79011013SSrihari.Venkatesan@Sun.COM 		cpi->cpi_procnodeid = (cpi->cpi_apicid >> coreidsz) & 7;
79110947SSrihari.Venkatesan@Sun.COM 		cpi->cpi_chipid = cpi->cpi_procnodeid;
79210947SSrihari.Venkatesan@Sun.COM 	} else if (cpi->cpi_family == 0x10) {
79310947SSrihari.Venkatesan@Sun.COM 		/*
79410947SSrihari.Venkatesan@Sun.COM 		 * See if we are a multi-node processor.
79510947SSrihari.Venkatesan@Sun.COM 		 * All processors in the system have the same number of nodes
79610947SSrihari.Venkatesan@Sun.COM 		 */
79710947SSrihari.Venkatesan@Sun.COM 		nb_caps_reg =  pci_getl_func(0, 24, 3, 0xe8);
79810947SSrihari.Venkatesan@Sun.COM 		if ((cpi->cpi_model < 8) || BITX(nb_caps_reg, 29, 29) == 0) {
79910947SSrihari.Venkatesan@Sun.COM 			/* Single-node */
80011013SSrihari.Venkatesan@Sun.COM 			cpi->cpi_procnodeid = BITX(cpi->cpi_apicid, 5,
80111013SSrihari.Venkatesan@Sun.COM 			    coreidsz);
80210947SSrihari.Venkatesan@Sun.COM 			cpi->cpi_chipid = cpi->cpi_procnodeid;
80310947SSrihari.Venkatesan@Sun.COM 		} else {
80410947SSrihari.Venkatesan@Sun.COM 
80510947SSrihari.Venkatesan@Sun.COM 			/*
80610947SSrihari.Venkatesan@Sun.COM 			 * Multi-node revision D (2 nodes per package
80710947SSrihari.Venkatesan@Sun.COM 			 * are supported)
80810947SSrihari.Venkatesan@Sun.COM 			 */
80910947SSrihari.Venkatesan@Sun.COM 			cpi->cpi_procnodes_per_pkg = 2;
81010947SSrihari.Venkatesan@Sun.COM 
81110947SSrihari.Venkatesan@Sun.COM 			first_half = (cpi->cpi_pkgcoreid <=
81210947SSrihari.Venkatesan@Sun.COM 			    (cpi->cpi_ncore_per_chip/2 - 1));
81310947SSrihari.Venkatesan@Sun.COM 
81410947SSrihari.Venkatesan@Sun.COM 			if (cpi->cpi_apicid == cpi->cpi_pkgcoreid) {
81510947SSrihari.Venkatesan@Sun.COM 				/* We are BSP */
81610947SSrihari.Venkatesan@Sun.COM 				cpi->cpi_procnodeid = (first_half ? 0 : 1);
81710947SSrihari.Venkatesan@Sun.COM 				cpi->cpi_chipid = cpi->cpi_procnodeid >> 1;
81810947SSrihari.Venkatesan@Sun.COM 			} else {
81910947SSrihari.Venkatesan@Sun.COM 
82010947SSrihari.Venkatesan@Sun.COM 				/* We are AP */
82110947SSrihari.Venkatesan@Sun.COM 				/* NodeId[2:1] bits to use for reading F3xe8 */
82210947SSrihari.Venkatesan@Sun.COM 				node2_1 = BITX(cpi->cpi_apicid, 5, 4) << 1;
82310947SSrihari.Venkatesan@Sun.COM 
82410947SSrihari.Venkatesan@Sun.COM 				nb_caps_reg =
82510947SSrihari.Venkatesan@Sun.COM 				    pci_getl_func(0, 24 + node2_1, 3, 0xe8);
82610947SSrihari.Venkatesan@Sun.COM 
82710947SSrihari.Venkatesan@Sun.COM 				/*
82810947SSrihari.Venkatesan@Sun.COM 				 * Check IntNodeNum bit (31:30, but bit 31 is
82910947SSrihari.Venkatesan@Sun.COM 				 * always 0 on dual-node processors)
83010947SSrihari.Venkatesan@Sun.COM 				 */
83110947SSrihari.Venkatesan@Sun.COM 				if (BITX(nb_caps_reg, 30, 30) == 0)
83210947SSrihari.Venkatesan@Sun.COM 					cpi->cpi_procnodeid = node2_1 +
83310947SSrihari.Venkatesan@Sun.COM 					    !first_half;
83410947SSrihari.Venkatesan@Sun.COM 				else
83510947SSrihari.Venkatesan@Sun.COM 					cpi->cpi_procnodeid = node2_1 +
83610947SSrihari.Venkatesan@Sun.COM 					    first_half;
83710947SSrihari.Venkatesan@Sun.COM 
83810947SSrihari.Venkatesan@Sun.COM 				cpi->cpi_chipid = cpi->cpi_procnodeid >> 1;
83910947SSrihari.Venkatesan@Sun.COM 			}
84010947SSrihari.Venkatesan@Sun.COM 		}
84110947SSrihari.Venkatesan@Sun.COM 	} else if (cpi->cpi_family >= 0x11) {
84210947SSrihari.Venkatesan@Sun.COM 		cpi->cpi_procnodeid = (cpi->cpi_apicid >> coreidsz) & 7;
84310947SSrihari.Venkatesan@Sun.COM 		cpi->cpi_chipid = cpi->cpi_procnodeid;
84410947SSrihari.Venkatesan@Sun.COM 	} else {
84510947SSrihari.Venkatesan@Sun.COM 		cpi->cpi_procnodeid = 0;
84610947SSrihari.Venkatesan@Sun.COM 		cpi->cpi_chipid = cpi->cpi_procnodeid;
84710947SSrihari.Venkatesan@Sun.COM 	}
84810947SSrihari.Venkatesan@Sun.COM }
84910947SSrihari.Venkatesan@Sun.COM 
850*13134Skuriakose.kuruvilla@oracle.com /*
851*13134Skuriakose.kuruvilla@oracle.com  * Setup XFeature_Enabled_Mask register. Required by xsave feature.
852*13134Skuriakose.kuruvilla@oracle.com  */
853*13134Skuriakose.kuruvilla@oracle.com void
854*13134Skuriakose.kuruvilla@oracle.com setup_xfem(void)
855*13134Skuriakose.kuruvilla@oracle.com {
856*13134Skuriakose.kuruvilla@oracle.com 	uint64_t flags = XFEATURE_LEGACY_FP;
857*13134Skuriakose.kuruvilla@oracle.com 
858*13134Skuriakose.kuruvilla@oracle.com 	ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
859*13134Skuriakose.kuruvilla@oracle.com 
860*13134Skuriakose.kuruvilla@oracle.com 	if (is_x86_feature(x86_featureset, X86FSET_SSE))
861*13134Skuriakose.kuruvilla@oracle.com 		flags |= XFEATURE_SSE;
862*13134Skuriakose.kuruvilla@oracle.com 
863*13134Skuriakose.kuruvilla@oracle.com 	if (is_x86_feature(x86_featureset, X86FSET_AVX))
864*13134Skuriakose.kuruvilla@oracle.com 		flags |= XFEATURE_AVX;
865*13134Skuriakose.kuruvilla@oracle.com 
866*13134Skuriakose.kuruvilla@oracle.com 	set_xcr(XFEATURE_ENABLED_MASK, flags);
867*13134Skuriakose.kuruvilla@oracle.com 
868*13134Skuriakose.kuruvilla@oracle.com 	xsave_bv_all = flags;
869*13134Skuriakose.kuruvilla@oracle.com }
870*13134Skuriakose.kuruvilla@oracle.com 
87112826Skuriakose.kuruvilla@oracle.com void *
8720Sstevel@tonic-gate cpuid_pass1(cpu_t *cpu)
8730Sstevel@tonic-gate {
8740Sstevel@tonic-gate 	uint32_t mask_ecx, mask_edx;
87512826Skuriakose.kuruvilla@oracle.com 	void *featureset;
8760Sstevel@tonic-gate 	struct cpuid_info *cpi;
8771228Sandrei 	struct cpuid_regs *cp;
8780Sstevel@tonic-gate 	int xcpuid;
8795084Sjohnlev #if !defined(__xpv)
8805045Sbholler 	extern int idle_cpu_prefer_mwait;
8815084Sjohnlev #endif
8823446Smrj 
8839482SKuriakose.Kuruvilla@Sun.COM #if !defined(__xpv)
8849482SKuriakose.Kuruvilla@Sun.COM 	determine_platform();
8859482SKuriakose.Kuruvilla@Sun.COM #endif
8860Sstevel@tonic-gate 	/*
88712004Sjiang.liu@intel.com 	 * Space statically allocated for BSP, ensure pointer is set
8880Sstevel@tonic-gate 	 */
88912826Skuriakose.kuruvilla@oracle.com 	if (cpu->cpu_id == 0) {
89012826Skuriakose.kuruvilla@oracle.com 		if (cpu->cpu_m.mcpu_cpi == NULL)
89112826Skuriakose.kuruvilla@oracle.com 			cpu->cpu_m.mcpu_cpi = &cpuid_info0;
89212826Skuriakose.kuruvilla@oracle.com 		featureset = x86_featureset0;
89312826Skuriakose.kuruvilla@oracle.com 	} else {
89412826Skuriakose.kuruvilla@oracle.com 		featureset = init_x86_featureset();
89512826Skuriakose.kuruvilla@oracle.com 	}
89612826Skuriakose.kuruvilla@oracle.com 
89712826Skuriakose.kuruvilla@oracle.com 	add_x86_feature(featureset, X86FSET_CPUID);
89812826Skuriakose.kuruvilla@oracle.com 
8993446Smrj 	cpi = cpu->cpu_m.mcpu_cpi;
9003446Smrj 	ASSERT(cpi != NULL);
9010Sstevel@tonic-gate 	cp = &cpi->cpi_std[0];
9021228Sandrei 	cp->cp_eax = 0;
9031228Sandrei 	cpi->cpi_maxeax = __cpuid_insn(cp);
9040Sstevel@tonic-gate 	{
9050Sstevel@tonic-gate 		uint32_t *iptr = (uint32_t *)cpi->cpi_vendorstr;
9060Sstevel@tonic-gate 		*iptr++ = cp->cp_ebx;
9070Sstevel@tonic-gate 		*iptr++ = cp->cp_edx;
9080Sstevel@tonic-gate 		*iptr++ = cp->cp_ecx;
9090Sstevel@tonic-gate 		*(char *)&cpi->cpi_vendorstr[12] = '\0';
9100Sstevel@tonic-gate 	}
9110Sstevel@tonic-gate 
9127532SSean.Ye@Sun.COM 	cpi->cpi_vendor = _cpuid_vendorstr_to_vendorcode(cpi->cpi_vendorstr);
9130Sstevel@tonic-gate 	x86_vendor = cpi->cpi_vendor; /* for compatibility */
9140Sstevel@tonic-gate 
9150Sstevel@tonic-gate 	/*
9160Sstevel@tonic-gate 	 * Limit the range in case of weird hardware
9170Sstevel@tonic-gate 	 */
9180Sstevel@tonic-gate 	if (cpi->cpi_maxeax > CPI_MAXEAX_MAX)
9190Sstevel@tonic-gate 		cpi->cpi_maxeax = CPI_MAXEAX_MAX;
9200Sstevel@tonic-gate 	if (cpi->cpi_maxeax < 1)
9210Sstevel@tonic-gate 		goto pass1_done;
9220Sstevel@tonic-gate 
9230Sstevel@tonic-gate 	cp = &cpi->cpi_std[1];
9241228Sandrei 	cp->cp_eax = 1;
9251228Sandrei 	(void) __cpuid_insn(cp);
9260Sstevel@tonic-gate 
9270Sstevel@tonic-gate 	/*
9280Sstevel@tonic-gate 	 * Extract identifying constants for easy access.
9290Sstevel@tonic-gate 	 */
9300Sstevel@tonic-gate 	cpi->cpi_model = CPI_MODEL(cpi);
9310Sstevel@tonic-gate 	cpi->cpi_family = CPI_FAMILY(cpi);
9320Sstevel@tonic-gate 
9331975Sdmick 	if (cpi->cpi_family == 0xf)
9340Sstevel@tonic-gate 		cpi->cpi_family += CPI_FAMILY_XTD(cpi);
9351975Sdmick 
9362001Sdmick 	/*
9374265Skchow 	 * Beware: AMD uses "extended model" iff base *FAMILY* == 0xf.
9382001Sdmick 	 * Intel, and presumably everyone else, uses model == 0xf, as
9392001Sdmick 	 * one would expect (max value means possible overflow).  Sigh.
9402001Sdmick 	 */
9412001Sdmick 
9422001Sdmick 	switch (cpi->cpi_vendor) {
9434855Sksadhukh 	case X86_VENDOR_Intel:
9444855Sksadhukh 		if (IS_EXTENDED_MODEL_INTEL(cpi))
9454855Sksadhukh 			cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
9464858Sksadhukh 		break;
9472001Sdmick 	case X86_VENDOR_AMD:
9484265Skchow 		if (CPI_FAMILY(cpi) == 0xf)
9492001Sdmick 			cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
9502001Sdmick 		break;
9512001Sdmick 	default:
9522001Sdmick 		if (cpi->cpi_model == 0xf)
9532001Sdmick 			cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
9542001Sdmick 		break;
9552001Sdmick 	}
9560Sstevel@tonic-gate 
9570Sstevel@tonic-gate 	cpi->cpi_step = CPI_STEP(cpi);
9580Sstevel@tonic-gate 	cpi->cpi_brandid = CPI_BRANDID(cpi);
9590Sstevel@tonic-gate 
9600Sstevel@tonic-gate 	/*
9610Sstevel@tonic-gate 	 * *default* assumptions:
9620Sstevel@tonic-gate 	 * - believe %edx feature word
9630Sstevel@tonic-gate 	 * - ignore %ecx feature word
9640Sstevel@tonic-gate 	 * - 32-bit virtual and physical addressing
9650Sstevel@tonic-gate 	 */
9660Sstevel@tonic-gate 	mask_edx = 0xffffffff;
9670Sstevel@tonic-gate 	mask_ecx = 0;
9680Sstevel@tonic-gate 
9690Sstevel@tonic-gate 	cpi->cpi_pabits = cpi->cpi_vabits = 32;
9700Sstevel@tonic-gate 
9710Sstevel@tonic-gate 	switch (cpi->cpi_vendor) {
9720Sstevel@tonic-gate 	case X86_VENDOR_Intel:
9730Sstevel@tonic-gate 		if (cpi->cpi_family == 5)
9740Sstevel@tonic-gate 			x86_type = X86_TYPE_P5;
9751975Sdmick 		else if (IS_LEGACY_P6(cpi)) {
9760Sstevel@tonic-gate 			x86_type = X86_TYPE_P6;
9770Sstevel@tonic-gate 			pentiumpro_bug4046376 = 1;
9780Sstevel@tonic-gate 			pentiumpro_bug4064495 = 1;
9790Sstevel@tonic-gate 			/*
9800Sstevel@tonic-gate 			 * Clear the SEP bit when it was set erroneously
9810Sstevel@tonic-gate 			 */
9820Sstevel@tonic-gate 			if (cpi->cpi_model < 3 && cpi->cpi_step < 3)
9830Sstevel@tonic-gate 				cp->cp_edx &= ~CPUID_INTC_EDX_SEP;
9841975Sdmick 		} else if (IS_NEW_F6(cpi) || cpi->cpi_family == 0xf) {
9850Sstevel@tonic-gate 			x86_type = X86_TYPE_P4;
9860Sstevel@tonic-gate 			/*
9870Sstevel@tonic-gate 			 * We don't currently depend on any of the %ecx
9880Sstevel@tonic-gate 			 * features until Prescott, so we'll only check
9890Sstevel@tonic-gate 			 * this from P4 onwards.  We might want to revisit
9900Sstevel@tonic-gate 			 * that idea later.
9910Sstevel@tonic-gate 			 */
9920Sstevel@tonic-gate 			mask_ecx = 0xffffffff;
9930Sstevel@tonic-gate 		} else if (cpi->cpi_family > 0xf)
9940Sstevel@tonic-gate 			mask_ecx = 0xffffffff;
9954636Sbholler 		/*
9964636Sbholler 		 * We don't support MONITOR/MWAIT if leaf 5 is not available
9974636Sbholler 		 * to obtain the monitor linesize.
9984636Sbholler 		 */
9994636Sbholler 		if (cpi->cpi_maxeax < 5)
10004636Sbholler 			mask_ecx &= ~CPUID_INTC_ECX_MON;
10010Sstevel@tonic-gate 		break;
10020Sstevel@tonic-gate 	case X86_VENDOR_IntelClone:
10030Sstevel@tonic-gate 	default:
10040Sstevel@tonic-gate 		break;
10050Sstevel@tonic-gate 	case X86_VENDOR_AMD:
10060Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_108)
10070Sstevel@tonic-gate 		if (cpi->cpi_family == 0xf && cpi->cpi_model == 0xe) {
10080Sstevel@tonic-gate 			cp->cp_eax = (0xf0f & cp->cp_eax) | 0xc0;
10090Sstevel@tonic-gate 			cpi->cpi_model = 0xc;
10100Sstevel@tonic-gate 		} else
10110Sstevel@tonic-gate #endif
10120Sstevel@tonic-gate 		if (cpi->cpi_family == 5) {
10130Sstevel@tonic-gate 			/*
10140Sstevel@tonic-gate 			 * AMD K5 and K6
10150Sstevel@tonic-gate 			 *
10160Sstevel@tonic-gate 			 * These CPUs have an incomplete implementation
10170Sstevel@tonic-gate 			 * of MCA/MCE which we mask away.
10180Sstevel@tonic-gate 			 */
10191228Sandrei 			mask_edx &= ~(CPUID_INTC_EDX_MCE | CPUID_INTC_EDX_MCA);
10201228Sandrei 
10211228Sandrei 			/*
10221228Sandrei 			 * Model 0 uses the wrong (APIC) bit
10231228Sandrei 			 * to indicate PGE.  Fix it here.
10241228Sandrei 			 */
10250Sstevel@tonic-gate 			if (cpi->cpi_model == 0) {
10260Sstevel@tonic-gate 				if (cp->cp_edx & 0x200) {
10270Sstevel@tonic-gate 					cp->cp_edx &= ~0x200;
10280Sstevel@tonic-gate 					cp->cp_edx |= CPUID_INTC_EDX_PGE;
10290Sstevel@tonic-gate 				}
10301228Sandrei 			}
10311228Sandrei 
10321228Sandrei 			/*
10331228Sandrei 			 * Early models had problems w/ MMX; disable.
10341228Sandrei 			 */
10351228Sandrei 			if (cpi->cpi_model < 6)
10361228Sandrei 				mask_edx &= ~CPUID_INTC_EDX_MMX;
10371228Sandrei 		}
10381228Sandrei 
10391228Sandrei 		/*
10401228Sandrei 		 * For newer families, SSE3 and CX16, at least, are valid;
10411228Sandrei 		 * enable all
10421228Sandrei 		 */
10431228Sandrei 		if (cpi->cpi_family >= 0xf)
1044771Sdmick 			mask_ecx = 0xffffffff;
10454636Sbholler 		/*
10464636Sbholler 		 * We don't support MONITOR/MWAIT if leaf 5 is not available
10474636Sbholler 		 * to obtain the monitor linesize.
10484636Sbholler 		 */
10494636Sbholler 		if (cpi->cpi_maxeax < 5)
10504636Sbholler 			mask_ecx &= ~CPUID_INTC_ECX_MON;
10515045Sbholler 
10525084Sjohnlev #if !defined(__xpv)
10535045Sbholler 		/*
10545045Sbholler 		 * Do not use MONITOR/MWAIT to halt in the idle loop on any AMD
10555045Sbholler 		 * processors.  AMD does not intend MWAIT to be used in the cpu
10565045Sbholler 		 * idle loop on current and future processors.  10h and future
10575045Sbholler 		 * AMD processors use more power in MWAIT than HLT.
10585045Sbholler 		 * Pre-family-10h Opterons do not have the MWAIT instruction.
10595045Sbholler 		 */
10605045Sbholler 		idle_cpu_prefer_mwait = 0;
10615084Sjohnlev #endif
10625045Sbholler 
10630Sstevel@tonic-gate 		break;
10640Sstevel@tonic-gate 	case X86_VENDOR_TM:
10650Sstevel@tonic-gate 		/*
10660Sstevel@tonic-gate 		 * workaround the NT workaround in CMS 4.1
10670Sstevel@tonic-gate 		 */
10680Sstevel@tonic-gate 		if (cpi->cpi_family == 5 && cpi->cpi_model == 4 &&
10690Sstevel@tonic-gate 		    (cpi->cpi_step == 2 || cpi->cpi_step == 3))
10700Sstevel@tonic-gate 			cp->cp_edx |= CPUID_INTC_EDX_CX8;
10710Sstevel@tonic-gate 		break;
10720Sstevel@tonic-gate 	case X86_VENDOR_Centaur:
10730Sstevel@tonic-gate 		/*
10740Sstevel@tonic-gate 		 * workaround the NT workarounds again
10750Sstevel@tonic-gate 		 */
10760Sstevel@tonic-gate 		if (cpi->cpi_family == 6)
10770Sstevel@tonic-gate 			cp->cp_edx |= CPUID_INTC_EDX_CX8;
10780Sstevel@tonic-gate 		break;
10790Sstevel@tonic-gate 	case X86_VENDOR_Cyrix:
10800Sstevel@tonic-gate 		/*
10810Sstevel@tonic-gate 		 * We rely heavily on the probing in locore
10820Sstevel@tonic-gate 		 * to actually figure out what parts, if any,
10830Sstevel@tonic-gate 		 * of the Cyrix cpuid instruction to believe.
10840Sstevel@tonic-gate 		 */
10850Sstevel@tonic-gate 		switch (x86_type) {
10860Sstevel@tonic-gate 		case X86_TYPE_CYRIX_486:
10870Sstevel@tonic-gate 			mask_edx = 0;
10880Sstevel@tonic-gate 			break;
10890Sstevel@tonic-gate 		case X86_TYPE_CYRIX_6x86:
10900Sstevel@tonic-gate 			mask_edx = 0;
10910Sstevel@tonic-gate 			break;
10920Sstevel@tonic-gate 		case X86_TYPE_CYRIX_6x86L:
10930Sstevel@tonic-gate 			mask_edx =
10940Sstevel@tonic-gate 			    CPUID_INTC_EDX_DE |
10950Sstevel@tonic-gate 			    CPUID_INTC_EDX_CX8;
10960Sstevel@tonic-gate 			break;
10970Sstevel@tonic-gate 		case X86_TYPE_CYRIX_6x86MX:
10980Sstevel@tonic-gate 			mask_edx =
10990Sstevel@tonic-gate 			    CPUID_INTC_EDX_DE |
11000Sstevel@tonic-gate 			    CPUID_INTC_EDX_MSR |
11010Sstevel@tonic-gate 			    CPUID_INTC_EDX_CX8 |
11020Sstevel@tonic-gate 			    CPUID_INTC_EDX_PGE |
11030Sstevel@tonic-gate 			    CPUID_INTC_EDX_CMOV |
11040Sstevel@tonic-gate 			    CPUID_INTC_EDX_MMX;
11050Sstevel@tonic-gate 			break;
11060Sstevel@tonic-gate 		case X86_TYPE_CYRIX_GXm:
11070Sstevel@tonic-gate 			mask_edx =
11080Sstevel@tonic-gate 			    CPUID_INTC_EDX_MSR |
11090Sstevel@tonic-gate 			    CPUID_INTC_EDX_CX8 |
11100Sstevel@tonic-gate 			    CPUID_INTC_EDX_CMOV |
11110Sstevel@tonic-gate 			    CPUID_INTC_EDX_MMX;
11120Sstevel@tonic-gate 			break;
11130Sstevel@tonic-gate 		case X86_TYPE_CYRIX_MediaGX:
11140Sstevel@tonic-gate 			break;
11150Sstevel@tonic-gate 		case X86_TYPE_CYRIX_MII:
11160Sstevel@tonic-gate 		case X86_TYPE_VIA_CYRIX_III:
11170Sstevel@tonic-gate 			mask_edx =
11180Sstevel@tonic-gate 			    CPUID_INTC_EDX_DE |
11190Sstevel@tonic-gate 			    CPUID_INTC_EDX_TSC |
11200Sstevel@tonic-gate 			    CPUID_INTC_EDX_MSR |
11210Sstevel@tonic-gate 			    CPUID_INTC_EDX_CX8 |
11220Sstevel@tonic-gate 			    CPUID_INTC_EDX_PGE |
11230Sstevel@tonic-gate 			    CPUID_INTC_EDX_CMOV |
11240Sstevel@tonic-gate 			    CPUID_INTC_EDX_MMX;
11250Sstevel@tonic-gate 			break;
11260Sstevel@tonic-gate 		default:
11270Sstevel@tonic-gate 			break;
11280Sstevel@tonic-gate 		}
11290Sstevel@tonic-gate 		break;
11300Sstevel@tonic-gate 	}
11310Sstevel@tonic-gate 
11325084Sjohnlev #if defined(__xpv)
11335084Sjohnlev 	/*
11345084Sjohnlev 	 * Do not support MONITOR/MWAIT under a hypervisor
11355084Sjohnlev 	 */
11365084Sjohnlev 	mask_ecx &= ~CPUID_INTC_ECX_MON;
1137*13134Skuriakose.kuruvilla@oracle.com 	/*
1138*13134Skuriakose.kuruvilla@oracle.com 	 * Do not support XSAVE under a hypervisor for now
1139*13134Skuriakose.kuruvilla@oracle.com 	 */
1140*13134Skuriakose.kuruvilla@oracle.com 	xsave_force_disable = B_TRUE;
1141*13134Skuriakose.kuruvilla@oracle.com 
11425084Sjohnlev #endif	/* __xpv */
11435084Sjohnlev 
1144*13134Skuriakose.kuruvilla@oracle.com 	if (xsave_force_disable) {
1145*13134Skuriakose.kuruvilla@oracle.com 		mask_ecx &= ~CPUID_INTC_ECX_XSAVE;
1146*13134Skuriakose.kuruvilla@oracle.com 		mask_ecx &= ~CPUID_INTC_ECX_AVX;
1147*13134Skuriakose.kuruvilla@oracle.com 	}
1148*13134Skuriakose.kuruvilla@oracle.com 
11490Sstevel@tonic-gate 	/*
11500Sstevel@tonic-gate 	 * Now we've figured out the masks that determine
11510Sstevel@tonic-gate 	 * which bits we choose to believe, apply the masks
11520Sstevel@tonic-gate 	 * to the feature words, then map the kernel's view
11530Sstevel@tonic-gate 	 * of these feature words into its feature word.
11540Sstevel@tonic-gate 	 */
11550Sstevel@tonic-gate 	cp->cp_edx &= mask_edx;
11560Sstevel@tonic-gate 	cp->cp_ecx &= mask_ecx;
11570Sstevel@tonic-gate 
11580Sstevel@tonic-gate 	/*
11593446Smrj 	 * apply any platform restrictions (we don't call this
11603446Smrj 	 * immediately after __cpuid_insn here, because we need the
11613446Smrj 	 * workarounds applied above first)
11620Sstevel@tonic-gate 	 */
11633446Smrj 	platform_cpuid_mangle(cpi->cpi_vendor, 1, cp);
11640Sstevel@tonic-gate 
11653446Smrj 	/*
11663446Smrj 	 * fold in overrides from the "eeprom" mechanism
11673446Smrj 	 */
11680Sstevel@tonic-gate 	cp->cp_edx |= cpuid_feature_edx_include;
11690Sstevel@tonic-gate 	cp->cp_edx &= ~cpuid_feature_edx_exclude;
11700Sstevel@tonic-gate 
11710Sstevel@tonic-gate 	cp->cp_ecx |= cpuid_feature_ecx_include;
11720Sstevel@tonic-gate 	cp->cp_ecx &= ~cpuid_feature_ecx_exclude;
11730Sstevel@tonic-gate 
117412826Skuriakose.kuruvilla@oracle.com 	if (cp->cp_edx & CPUID_INTC_EDX_PSE) {
117512826Skuriakose.kuruvilla@oracle.com 		add_x86_feature(featureset, X86FSET_LARGEPAGE);
117612826Skuriakose.kuruvilla@oracle.com 	}
117712826Skuriakose.kuruvilla@oracle.com 	if (cp->cp_edx & CPUID_INTC_EDX_TSC) {
117812826Skuriakose.kuruvilla@oracle.com 		add_x86_feature(featureset, X86FSET_TSC);
117912826Skuriakose.kuruvilla@oracle.com 	}
118012826Skuriakose.kuruvilla@oracle.com 	if (cp->cp_edx & CPUID_INTC_EDX_MSR) {
118112826Skuriakose.kuruvilla@oracle.com 		add_x86_feature(featureset, X86FSET_MSR);
118212826Skuriakose.kuruvilla@oracle.com 	}
118312826Skuriakose.kuruvilla@oracle.com 	if (cp->cp_edx & CPUID_INTC_EDX_MTRR) {
118412826Skuriakose.kuruvilla@oracle.com 		add_x86_feature(featureset, X86FSET_MTRR);
118512826Skuriakose.kuruvilla@oracle.com 	}
118612826Skuriakose.kuruvilla@oracle.com 	if (cp->cp_edx & CPUID_INTC_EDX_PGE) {
118712826Skuriakose.kuruvilla@oracle.com 		add_x86_feature(featureset, X86FSET_PGE);
118812826Skuriakose.kuruvilla@oracle.com 	}
118912826Skuriakose.kuruvilla@oracle.com 	if (cp->cp_edx & CPUID_INTC_EDX_CMOV) {
119012826Skuriakose.kuruvilla@oracle.com 		add_x86_feature(featureset, X86FSET_CMOV);
119112826Skuriakose.kuruvilla@oracle.com 	}
119212826Skuriakose.kuruvilla@oracle.com 	if (cp->cp_edx & CPUID_INTC_EDX_MMX) {
119312826Skuriakose.kuruvilla@oracle.com 		add_x86_feature(featureset, X86FSET_MMX);
119412826Skuriakose.kuruvilla@oracle.com 	}
11950Sstevel@tonic-gate 	if ((cp->cp_edx & CPUID_INTC_EDX_MCE) != 0 &&
119612826Skuriakose.kuruvilla@oracle.com 	    (cp->cp_edx & CPUID_INTC_EDX_MCA) != 0) {
119712826Skuriakose.kuruvilla@oracle.com 		add_x86_feature(featureset, X86FSET_MCA);
119812826Skuriakose.kuruvilla@oracle.com 	}
119912826Skuriakose.kuruvilla@oracle.com 	if (cp->cp_edx & CPUID_INTC_EDX_PAE) {
120012826Skuriakose.kuruvilla@oracle.com 		add_x86_feature(featureset, X86FSET_PAE);
120112826Skuriakose.kuruvilla@oracle.com 	}
120212826Skuriakose.kuruvilla@oracle.com 	if (cp->cp_edx & CPUID_INTC_EDX_CX8) {
120312826Skuriakose.kuruvilla@oracle.com 		add_x86_feature(featureset, X86FSET_CX8);
120412826Skuriakose.kuruvilla@oracle.com 	}
120512826Skuriakose.kuruvilla@oracle.com 	if (cp->cp_ecx & CPUID_INTC_ECX_CX16) {
120612826Skuriakose.kuruvilla@oracle.com 		add_x86_feature(featureset, X86FSET_CX16);
120712826Skuriakose.kuruvilla@oracle.com 	}
120812826Skuriakose.kuruvilla@oracle.com 	if (cp->cp_edx & CPUID_INTC_EDX_PAT) {
120912826Skuriakose.kuruvilla@oracle.com 		add_x86_feature(featureset, X86FSET_PAT);
121012826Skuriakose.kuruvilla@oracle.com 	}
121112826Skuriakose.kuruvilla@oracle.com 	if (cp->cp_edx & CPUID_INTC_EDX_SEP) {
121212826Skuriakose.kuruvilla@oracle.com 		add_x86_feature(featureset, X86FSET_SEP);
121312826Skuriakose.kuruvilla@oracle.com 	}
12140Sstevel@tonic-gate 	if (cp->cp_edx & CPUID_INTC_EDX_FXSR) {
12150Sstevel@tonic-gate 		/*
12160Sstevel@tonic-gate 		 * In our implementation, fxsave/fxrstor
12170Sstevel@tonic-gate 		 * are prerequisites before we'll even
12180Sstevel@tonic-gate 		 * try and do SSE things.
12190Sstevel@tonic-gate 		 */
122012826Skuriakose.kuruvilla@oracle.com 		if (cp->cp_edx & CPUID_INTC_EDX_SSE) {
122112826Skuriakose.kuruvilla@oracle.com 			add_x86_feature(featureset, X86FSET_SSE);
122212826Skuriakose.kuruvilla@oracle.com 		}
122312826Skuriakose.kuruvilla@oracle.com 		if (cp->cp_edx & CPUID_INTC_EDX_SSE2) {
122412826Skuriakose.kuruvilla@oracle.com 			add_x86_feature(featureset, X86FSET_SSE2);
122512826Skuriakose.kuruvilla@oracle.com 		}
122612826Skuriakose.kuruvilla@oracle.com 		if (cp->cp_ecx & CPUID_INTC_ECX_SSE3) {
122712826Skuriakose.kuruvilla@oracle.com 			add_x86_feature(featureset, X86FSET_SSE3);
122812826Skuriakose.kuruvilla@oracle.com 		}
12295269Skk208521 		if (cpi->cpi_vendor == X86_VENDOR_Intel) {
123012826Skuriakose.kuruvilla@oracle.com 			if (cp->cp_ecx & CPUID_INTC_ECX_SSSE3) {
123112826Skuriakose.kuruvilla@oracle.com 				add_x86_feature(featureset, X86FSET_SSSE3);
123212826Skuriakose.kuruvilla@oracle.com 			}
123312826Skuriakose.kuruvilla@oracle.com 			if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_1) {
123412826Skuriakose.kuruvilla@oracle.com 				add_x86_feature(featureset, X86FSET_SSE4_1);
123512826Skuriakose.kuruvilla@oracle.com 			}
123612826Skuriakose.kuruvilla@oracle.com 			if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_2) {
123712826Skuriakose.kuruvilla@oracle.com 				add_x86_feature(featureset, X86FSET_SSE4_2);
123812826Skuriakose.kuruvilla@oracle.com 			}
123912826Skuriakose.kuruvilla@oracle.com 			if (cp->cp_ecx & CPUID_INTC_ECX_AES) {
124012826Skuriakose.kuruvilla@oracle.com 				add_x86_feature(featureset, X86FSET_AES);
124112826Skuriakose.kuruvilla@oracle.com 			}
124212826Skuriakose.kuruvilla@oracle.com 			if (cp->cp_ecx & CPUID_INTC_ECX_PCLMULQDQ) {
124312826Skuriakose.kuruvilla@oracle.com 				add_x86_feature(featureset, X86FSET_PCLMULQDQ);
124412826Skuriakose.kuruvilla@oracle.com 			}
1245*13134Skuriakose.kuruvilla@oracle.com 
1246*13134Skuriakose.kuruvilla@oracle.com 			if (cp->cp_ecx & CPUID_INTC_ECX_XSAVE) {
1247*13134Skuriakose.kuruvilla@oracle.com 				add_x86_feature(featureset, X86FSET_XSAVE);
1248*13134Skuriakose.kuruvilla@oracle.com 				/* We only test AVX when there is XSAVE */
1249*13134Skuriakose.kuruvilla@oracle.com 				if (cp->cp_ecx & CPUID_INTC_ECX_AVX) {
1250*13134Skuriakose.kuruvilla@oracle.com 					add_x86_feature(featureset,
1251*13134Skuriakose.kuruvilla@oracle.com 					    X86FSET_AVX);
1252*13134Skuriakose.kuruvilla@oracle.com 				}
1253*13134Skuriakose.kuruvilla@oracle.com 			}
12545269Skk208521 		}
12550Sstevel@tonic-gate 	}
125612826Skuriakose.kuruvilla@oracle.com 	if (cp->cp_edx & CPUID_INTC_EDX_DE) {
125712826Skuriakose.kuruvilla@oracle.com 		add_x86_feature(featureset, X86FSET_DE);
125812826Skuriakose.kuruvilla@oracle.com 	}
12597716SBill.Holler@Sun.COM #if !defined(__xpv)
12604481Sbholler 	if (cp->cp_ecx & CPUID_INTC_ECX_MON) {
12617716SBill.Holler@Sun.COM 
12627716SBill.Holler@Sun.COM 		/*
12637716SBill.Holler@Sun.COM 		 * We require the CLFLUSH instruction for erratum workaround
12647716SBill.Holler@Sun.COM 		 * to use MONITOR/MWAIT.
12657716SBill.Holler@Sun.COM 		 */
12667716SBill.Holler@Sun.COM 		if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) {
12677716SBill.Holler@Sun.COM 			cpi->cpi_mwait.support |= MWAIT_SUPPORT;
126812826Skuriakose.kuruvilla@oracle.com 			add_x86_feature(featureset, X86FSET_MWAIT);
12697716SBill.Holler@Sun.COM 		} else {
12707716SBill.Holler@Sun.COM 			extern int idle_cpu_assert_cflush_monitor;
12717716SBill.Holler@Sun.COM 
12727716SBill.Holler@Sun.COM 			/*
12737716SBill.Holler@Sun.COM 			 * All processors we are aware of which have
12747716SBill.Holler@Sun.COM 			 * MONITOR/MWAIT also have CLFLUSH.
12757716SBill.Holler@Sun.COM 			 */
12767716SBill.Holler@Sun.COM 			if (idle_cpu_assert_cflush_monitor) {
12777716SBill.Holler@Sun.COM 				ASSERT((cp->cp_ecx & CPUID_INTC_ECX_MON) &&
12787716SBill.Holler@Sun.COM 				    (cp->cp_edx & CPUID_INTC_EDX_CLFSH));
12797716SBill.Holler@Sun.COM 			}
12807716SBill.Holler@Sun.COM 		}
12814481Sbholler 	}
12827716SBill.Holler@Sun.COM #endif	/* __xpv */
12830Sstevel@tonic-gate 
12847589SVikram.Hegde@Sun.COM 	/*
12857589SVikram.Hegde@Sun.COM 	 * Only need it first time, rest of the cpus would follow suite.
12867589SVikram.Hegde@Sun.COM 	 * we only capture this for the bootcpu.
12877589SVikram.Hegde@Sun.COM 	 */
12887589SVikram.Hegde@Sun.COM 	if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) {
128912826Skuriakose.kuruvilla@oracle.com 		add_x86_feature(featureset, X86FSET_CLFSH);
12907589SVikram.Hegde@Sun.COM 		x86_clflush_size = (BITX(cp->cp_ebx, 15, 8) * 8);
12917589SVikram.Hegde@Sun.COM 	}
129212826Skuriakose.kuruvilla@oracle.com 	if (is_x86_feature(featureset, X86FSET_PAE))
12930Sstevel@tonic-gate 		cpi->cpi_pabits = 36;
12940Sstevel@tonic-gate 
12950Sstevel@tonic-gate 	/*
12960Sstevel@tonic-gate 	 * Hyperthreading configuration is slightly tricky on Intel
12970Sstevel@tonic-gate 	 * and pure clones, and even trickier on AMD.
12980Sstevel@tonic-gate 	 *
12990Sstevel@tonic-gate 	 * (AMD chose to set the HTT bit on their CMP processors,
13000Sstevel@tonic-gate 	 * even though they're not actually hyperthreaded.  Thus it
13010Sstevel@tonic-gate 	 * takes a bit more work to figure out what's really going
13023446Smrj 	 * on ... see the handling of the CMP_LGCY bit below)
13030Sstevel@tonic-gate 	 */
13040Sstevel@tonic-gate 	if (cp->cp_edx & CPUID_INTC_EDX_HTT) {
13050Sstevel@tonic-gate 		cpi->cpi_ncpu_per_chip = CPI_CPU_COUNT(cpi);
13060Sstevel@tonic-gate 		if (cpi->cpi_ncpu_per_chip > 1)
130712826Skuriakose.kuruvilla@oracle.com 			add_x86_feature(featureset, X86FSET_HTT);
13081228Sandrei 	} else {
13091228Sandrei 		cpi->cpi_ncpu_per_chip = 1;
13100Sstevel@tonic-gate 	}
13110Sstevel@tonic-gate 
13120Sstevel@tonic-gate 	/*
13130Sstevel@tonic-gate 	 * Work on the "extended" feature information, doing
13140Sstevel@tonic-gate 	 * some basic initialization for cpuid_pass2()
13150Sstevel@tonic-gate 	 */
13160Sstevel@tonic-gate 	xcpuid = 0;
13170Sstevel@tonic-gate 	switch (cpi->cpi_vendor) {
13180Sstevel@tonic-gate 	case X86_VENDOR_Intel:
13191975Sdmick 		if (IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf)
13200Sstevel@tonic-gate 			xcpuid++;
13210Sstevel@tonic-gate 		break;
13220Sstevel@tonic-gate 	case X86_VENDOR_AMD:
13230Sstevel@tonic-gate 		if (cpi->cpi_family > 5 ||
13240Sstevel@tonic-gate 		    (cpi->cpi_family == 5 && cpi->cpi_model >= 1))
13250Sstevel@tonic-gate 			xcpuid++;
13260Sstevel@tonic-gate 		break;
13270Sstevel@tonic-gate 	case X86_VENDOR_Cyrix:
13280Sstevel@tonic-gate 		/*
13290Sstevel@tonic-gate 		 * Only these Cyrix CPUs are -known- to support
13300Sstevel@tonic-gate 		 * extended cpuid operations.
13310Sstevel@tonic-gate 		 */
13320Sstevel@tonic-gate 		if (x86_type == X86_TYPE_VIA_CYRIX_III ||
13330Sstevel@tonic-gate 		    x86_type == X86_TYPE_CYRIX_GXm)
13340Sstevel@tonic-gate 			xcpuid++;
13350Sstevel@tonic-gate 		break;
13360Sstevel@tonic-gate 	case X86_VENDOR_Centaur:
13370Sstevel@tonic-gate 	case X86_VENDOR_TM:
13380Sstevel@tonic-gate 	default:
13390Sstevel@tonic-gate 		xcpuid++;
13400Sstevel@tonic-gate 		break;
13410Sstevel@tonic-gate 	}
13420Sstevel@tonic-gate 
13430Sstevel@tonic-gate 	if (xcpuid) {
13440Sstevel@tonic-gate 		cp = &cpi->cpi_extd[0];
13451228Sandrei 		cp->cp_eax = 0x80000000;
13461228Sandrei 		cpi->cpi_xmaxeax = __cpuid_insn(cp);
13470Sstevel@tonic-gate 	}
13480Sstevel@tonic-gate 
13490Sstevel@tonic-gate 	if (cpi->cpi_xmaxeax & 0x80000000) {
13500Sstevel@tonic-gate 
13510Sstevel@tonic-gate 		if (cpi->cpi_xmaxeax > CPI_XMAXEAX_MAX)
13520Sstevel@tonic-gate 			cpi->cpi_xmaxeax = CPI_XMAXEAX_MAX;
13530Sstevel@tonic-gate 
13540Sstevel@tonic-gate 		switch (cpi->cpi_vendor) {
13550Sstevel@tonic-gate 		case X86_VENDOR_Intel:
13560Sstevel@tonic-gate 		case X86_VENDOR_AMD:
13570Sstevel@tonic-gate 			if (cpi->cpi_xmaxeax < 0x80000001)
13580Sstevel@tonic-gate 				break;
13590Sstevel@tonic-gate 			cp = &cpi->cpi_extd[1];
13601228Sandrei 			cp->cp_eax = 0x80000001;
13611228Sandrei 			(void) __cpuid_insn(cp);
13623446Smrj 
13630Sstevel@tonic-gate 			if (cpi->cpi_vendor == X86_VENDOR_AMD &&
13640Sstevel@tonic-gate 			    cpi->cpi_family == 5 &&
13650Sstevel@tonic-gate 			    cpi->cpi_model == 6 &&
13660Sstevel@tonic-gate 			    cpi->cpi_step == 6) {
13670Sstevel@tonic-gate 				/*
13680Sstevel@tonic-gate 				 * K6 model 6 uses bit 10 to indicate SYSC
13690Sstevel@tonic-gate 				 * Later models use bit 11. Fix it here.
13700Sstevel@tonic-gate 				 */
13710Sstevel@tonic-gate 				if (cp->cp_edx & 0x400) {
13720Sstevel@tonic-gate 					cp->cp_edx &= ~0x400;
13730Sstevel@tonic-gate 					cp->cp_edx |= CPUID_AMD_EDX_SYSC;
13740Sstevel@tonic-gate 				}
13750Sstevel@tonic-gate 			}
13760Sstevel@tonic-gate 
13773446Smrj 			platform_cpuid_mangle(cpi->cpi_vendor, 0x80000001, cp);
13783446Smrj 
13790Sstevel@tonic-gate 			/*
13800Sstevel@tonic-gate 			 * Compute the additions to the kernel's feature word.
13810Sstevel@tonic-gate 			 */
138212826Skuriakose.kuruvilla@oracle.com 			if (cp->cp_edx & CPUID_AMD_EDX_NX) {
138312826Skuriakose.kuruvilla@oracle.com 				add_x86_feature(featureset, X86FSET_NX);
138412826Skuriakose.kuruvilla@oracle.com 			}
13850Sstevel@tonic-gate 
13867656SSherry.Moore@Sun.COM 			/*
13877656SSherry.Moore@Sun.COM 			 * Regardless whether or not we boot 64-bit,
13887656SSherry.Moore@Sun.COM 			 * we should have a way to identify whether
13897656SSherry.Moore@Sun.COM 			 * the CPU is capable of running 64-bit.
13907656SSherry.Moore@Sun.COM 			 */
139112826Skuriakose.kuruvilla@oracle.com 			if (cp->cp_edx & CPUID_AMD_EDX_LM) {
139212826Skuriakose.kuruvilla@oracle.com 				add_x86_feature(featureset, X86FSET_64);
139312826Skuriakose.kuruvilla@oracle.com 			}
13947656SSherry.Moore@Sun.COM 
13955349Skchow #if defined(__amd64)
13965349Skchow 			/* 1 GB large page - enable only for 64 bit kernel */
139712826Skuriakose.kuruvilla@oracle.com 			if (cp->cp_edx & CPUID_AMD_EDX_1GPG) {
139812826Skuriakose.kuruvilla@oracle.com 				add_x86_feature(featureset, X86FSET_1GPG);
139912826Skuriakose.kuruvilla@oracle.com 			}
14005349Skchow #endif
14015349Skchow 
14024628Skk208521 			if ((cpi->cpi_vendor == X86_VENDOR_AMD) &&
14034628Skk208521 			    (cpi->cpi_std[1].cp_edx & CPUID_INTC_EDX_FXSR) &&
140412826Skuriakose.kuruvilla@oracle.com 			    (cp->cp_ecx & CPUID_AMD_ECX_SSE4A)) {
140512826Skuriakose.kuruvilla@oracle.com 				add_x86_feature(featureset, X86FSET_SSE4A);
140612826Skuriakose.kuruvilla@oracle.com 			}
14074628Skk208521 
14080Sstevel@tonic-gate 			/*
14093446Smrj 			 * If both the HTT and CMP_LGCY bits are set,
14101228Sandrei 			 * then we're not actually HyperThreaded.  Read
14111228Sandrei 			 * "AMD CPUID Specification" for more details.
14120Sstevel@tonic-gate 			 */
14130Sstevel@tonic-gate 			if (cpi->cpi_vendor == X86_VENDOR_AMD &&
141412826Skuriakose.kuruvilla@oracle.com 			    is_x86_feature(featureset, X86FSET_HTT) &&
14153446Smrj 			    (cp->cp_ecx & CPUID_AMD_ECX_CMP_LGCY)) {
141612826Skuriakose.kuruvilla@oracle.com 				remove_x86_feature(featureset, X86FSET_HTT);
141712826Skuriakose.kuruvilla@oracle.com 				add_x86_feature(featureset, X86FSET_CMP);
14181228Sandrei 			}
14193446Smrj #if defined(__amd64)
14200Sstevel@tonic-gate 			/*
14210Sstevel@tonic-gate 			 * It's really tricky to support syscall/sysret in
14220Sstevel@tonic-gate 			 * the i386 kernel; we rely on sysenter/sysexit
14230Sstevel@tonic-gate 			 * instead.  In the amd64 kernel, things are -way-
14240Sstevel@tonic-gate 			 * better.
14250Sstevel@tonic-gate 			 */
142612826Skuriakose.kuruvilla@oracle.com 			if (cp->cp_edx & CPUID_AMD_EDX_SYSC) {
142712826Skuriakose.kuruvilla@oracle.com 				add_x86_feature(featureset, X86FSET_ASYSC);
142812826Skuriakose.kuruvilla@oracle.com 			}
14290Sstevel@tonic-gate 
14300Sstevel@tonic-gate 			/*
14310Sstevel@tonic-gate 			 * While we're thinking about system calls, note
14320Sstevel@tonic-gate 			 * that AMD processors don't support sysenter
14330Sstevel@tonic-gate 			 * in long mode at all, so don't try to program them.
14340Sstevel@tonic-gate 			 */
143512826Skuriakose.kuruvilla@oracle.com 			if (x86_vendor == X86_VENDOR_AMD) {
143612826Skuriakose.kuruvilla@oracle.com 				remove_x86_feature(featureset, X86FSET_SEP);
143712826Skuriakose.kuruvilla@oracle.com 			}
14380Sstevel@tonic-gate #endif
143912826Skuriakose.kuruvilla@oracle.com 			if (cp->cp_edx & CPUID_AMD_EDX_TSCP) {
144012826Skuriakose.kuruvilla@oracle.com 				add_x86_feature(featureset, X86FSET_TSCP);
144112826Skuriakose.kuruvilla@oracle.com 			}
14420Sstevel@tonic-gate 			break;
14430Sstevel@tonic-gate 		default:
14440Sstevel@tonic-gate 			break;
14450Sstevel@tonic-gate 		}
14460Sstevel@tonic-gate 
14471228Sandrei 		/*
14481228Sandrei 		 * Get CPUID data about processor cores and hyperthreads.
14491228Sandrei 		 */
14500Sstevel@tonic-gate 		switch (cpi->cpi_vendor) {
14510Sstevel@tonic-gate 		case X86_VENDOR_Intel:
14521228Sandrei 			if (cpi->cpi_maxeax >= 4) {
14531228Sandrei 				cp = &cpi->cpi_std[4];
14541228Sandrei 				cp->cp_eax = 4;
14551228Sandrei 				cp->cp_ecx = 0;
14561228Sandrei 				(void) __cpuid_insn(cp);
14573446Smrj 				platform_cpuid_mangle(cpi->cpi_vendor, 4, cp);
14581228Sandrei 			}
14591228Sandrei 			/*FALLTHROUGH*/
14600Sstevel@tonic-gate 		case X86_VENDOR_AMD:
14610Sstevel@tonic-gate 			if (cpi->cpi_xmaxeax < 0x80000008)
14620Sstevel@tonic-gate 				break;
14630Sstevel@tonic-gate 			cp = &cpi->cpi_extd[8];
14641228Sandrei 			cp->cp_eax = 0x80000008;
14651228Sandrei 			(void) __cpuid_insn(cp);
14663446Smrj 			platform_cpuid_mangle(cpi->cpi_vendor, 0x80000008, cp);
14673446Smrj 
14680Sstevel@tonic-gate 			/*
14690Sstevel@tonic-gate 			 * Virtual and physical address limits from
14700Sstevel@tonic-gate 			 * cpuid override previously guessed values.
14710Sstevel@tonic-gate 			 */
14720Sstevel@tonic-gate 			cpi->cpi_pabits = BITX(cp->cp_eax, 7, 0);
14730Sstevel@tonic-gate 			cpi->cpi_vabits = BITX(cp->cp_eax, 15, 8);
14740Sstevel@tonic-gate 			break;
14750Sstevel@tonic-gate 		default:
14760Sstevel@tonic-gate 			break;
14770Sstevel@tonic-gate 		}
14781228Sandrei 
14794606Sesaxe 		/*
14804606Sesaxe 		 * Derive the number of cores per chip
14814606Sesaxe 		 */
14821228Sandrei 		switch (cpi->cpi_vendor) {
14831228Sandrei 		case X86_VENDOR_Intel:
14841228Sandrei 			if (cpi->cpi_maxeax < 4) {
14851228Sandrei 				cpi->cpi_ncore_per_chip = 1;
14861228Sandrei 				break;
14871228Sandrei 			} else {
14881228Sandrei 				cpi->cpi_ncore_per_chip =
14891228Sandrei 				    BITX((cpi)->cpi_std[4].cp_eax, 31, 26) + 1;
14901228Sandrei 			}
14911228Sandrei 			break;
14921228Sandrei 		case X86_VENDOR_AMD:
14931228Sandrei 			if (cpi->cpi_xmaxeax < 0x80000008) {
14941228Sandrei 				cpi->cpi_ncore_per_chip = 1;
14951228Sandrei 				break;
14961228Sandrei 			} else {
14975870Sgavinm 				/*
14985870Sgavinm 				 * On family 0xf cpuid fn 2 ECX[7:0] "NC" is
14995870Sgavinm 				 * 1 less than the number of physical cores on
15005870Sgavinm 				 * the chip.  In family 0x10 this value can
15015870Sgavinm 				 * be affected by "downcoring" - it reflects
15025870Sgavinm 				 * 1 less than the number of cores actually
15035870Sgavinm 				 * enabled on this node.
15045870Sgavinm 				 */
15051228Sandrei 				cpi->cpi_ncore_per_chip =
15061228Sandrei 				    BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1;
15071228Sandrei 			}
15081228Sandrei 			break;
15091228Sandrei 		default:
15101228Sandrei 			cpi->cpi_ncore_per_chip = 1;
15111228Sandrei 			break;
15121228Sandrei 		}
15138906SEric.Saxe@Sun.COM 
15148906SEric.Saxe@Sun.COM 		/*
15158906SEric.Saxe@Sun.COM 		 * Get CPUID data about TSC Invariance in Deep C-State.
15168906SEric.Saxe@Sun.COM 		 */
15178906SEric.Saxe@Sun.COM 		switch (cpi->cpi_vendor) {
15188906SEric.Saxe@Sun.COM 		case X86_VENDOR_Intel:
15198906SEric.Saxe@Sun.COM 			if (cpi->cpi_maxeax >= 7) {
15208906SEric.Saxe@Sun.COM 				cp = &cpi->cpi_extd[7];
15218906SEric.Saxe@Sun.COM 				cp->cp_eax = 0x80000007;
15228906SEric.Saxe@Sun.COM 				cp->cp_ecx = 0;
15238906SEric.Saxe@Sun.COM 				(void) __cpuid_insn(cp);
15248906SEric.Saxe@Sun.COM 			}
15258906SEric.Saxe@Sun.COM 			break;
15268906SEric.Saxe@Sun.COM 		default:
15278906SEric.Saxe@Sun.COM 			break;
15288906SEric.Saxe@Sun.COM 		}
15295284Sgavinm 	} else {
15305284Sgavinm 		cpi->cpi_ncore_per_chip = 1;
15310Sstevel@tonic-gate 	}
15320Sstevel@tonic-gate 
15331228Sandrei 	/*
15341228Sandrei 	 * If more than one core, then this processor is CMP.
15351228Sandrei 	 */
153612826Skuriakose.kuruvilla@oracle.com 	if (cpi->cpi_ncore_per_chip > 1) {
153712826Skuriakose.kuruvilla@oracle.com 		add_x86_feature(featureset, X86FSET_CMP);
153812826Skuriakose.kuruvilla@oracle.com 	}
15393446Smrj 
15401228Sandrei 	/*
15411228Sandrei 	 * If the number of cores is the same as the number
15421228Sandrei 	 * of CPUs, then we cannot have HyperThreading.
15431228Sandrei 	 */
154412826Skuriakose.kuruvilla@oracle.com 	if (cpi->cpi_ncpu_per_chip == cpi->cpi_ncore_per_chip) {
154512826Skuriakose.kuruvilla@oracle.com 		remove_x86_feature(featureset, X86FSET_HTT);
154612826Skuriakose.kuruvilla@oracle.com 	}
15471228Sandrei 
154810947SSrihari.Venkatesan@Sun.COM 	cpi->cpi_apicid = CPI_APIC_ID(cpi);
154910947SSrihari.Venkatesan@Sun.COM 	cpi->cpi_procnodes_per_pkg = 1;
155012826Skuriakose.kuruvilla@oracle.com 	if (is_x86_feature(featureset, X86FSET_HTT) == B_FALSE &&
155112826Skuriakose.kuruvilla@oracle.com 	    is_x86_feature(featureset, X86FSET_CMP) == B_FALSE) {
15521228Sandrei 		/*
15531228Sandrei 		 * Single-core single-threaded processors.
15541228Sandrei 		 */
15550Sstevel@tonic-gate 		cpi->cpi_chipid = -1;
15560Sstevel@tonic-gate 		cpi->cpi_clogid = 0;
15571228Sandrei 		cpi->cpi_coreid = cpu->cpu_id;
15585870Sgavinm 		cpi->cpi_pkgcoreid = 0;
155910947SSrihari.Venkatesan@Sun.COM 		if (cpi->cpi_vendor == X86_VENDOR_AMD)
156010947SSrihari.Venkatesan@Sun.COM 			cpi->cpi_procnodeid = BITX(cpi->cpi_apicid, 3, 0);
156110947SSrihari.Venkatesan@Sun.COM 		else
156210947SSrihari.Venkatesan@Sun.COM 			cpi->cpi_procnodeid = cpi->cpi_chipid;
15630Sstevel@tonic-gate 	} else if (cpi->cpi_ncpu_per_chip > 1) {
156410947SSrihari.Venkatesan@Sun.COM 		if (cpi->cpi_vendor == X86_VENDOR_Intel)
156512826Skuriakose.kuruvilla@oracle.com 			cpuid_intel_getids(cpu, featureset);
156610947SSrihari.Venkatesan@Sun.COM 		else if (cpi->cpi_vendor == X86_VENDOR_AMD)
156710947SSrihari.Venkatesan@Sun.COM 			cpuid_amd_getids(cpu);
156810947SSrihari.Venkatesan@Sun.COM 		else {
15691228Sandrei 			/*
15701228Sandrei 			 * All other processors are currently
15711228Sandrei 			 * assumed to have single cores.
15721228Sandrei 			 */
15731228Sandrei 			cpi->cpi_coreid = cpi->cpi_chipid;
15745870Sgavinm 			cpi->cpi_pkgcoreid = 0;
157510947SSrihari.Venkatesan@Sun.COM 			cpi->cpi_procnodeid = cpi->cpi_chipid;
15761228Sandrei 		}
15770Sstevel@tonic-gate 	}
15780Sstevel@tonic-gate 
15792869Sgavinm 	/*
15802869Sgavinm 	 * Synthesize chip "revision" and socket type
15812869Sgavinm 	 */
15827532SSean.Ye@Sun.COM 	cpi->cpi_chiprev = _cpuid_chiprev(cpi->cpi_vendor, cpi->cpi_family,
15837532SSean.Ye@Sun.COM 	    cpi->cpi_model, cpi->cpi_step);
15847532SSean.Ye@Sun.COM 	cpi->cpi_chiprevstr = _cpuid_chiprevstr(cpi->cpi_vendor,
15857532SSean.Ye@Sun.COM 	    cpi->cpi_family, cpi->cpi_model, cpi->cpi_step);
15867532SSean.Ye@Sun.COM 	cpi->cpi_socket = _cpuid_skt(cpi->cpi_vendor, cpi->cpi_family,
15877532SSean.Ye@Sun.COM 	    cpi->cpi_model, cpi->cpi_step);
15882869Sgavinm 
15890Sstevel@tonic-gate pass1_done:
15900Sstevel@tonic-gate 	cpi->cpi_pass = 1;
159112826Skuriakose.kuruvilla@oracle.com 	return (featureset);
15920Sstevel@tonic-gate }
15930Sstevel@tonic-gate 
15940Sstevel@tonic-gate /*
15950Sstevel@tonic-gate  * Make copies of the cpuid table entries we depend on, in
15960Sstevel@tonic-gate  * part for ease of parsing now, in part so that we have only
15970Sstevel@tonic-gate  * one place to correct any of it, in part for ease of
15980Sstevel@tonic-gate  * later export to userland, and in part so we can look at
15990Sstevel@tonic-gate  * this stuff in a crash dump.
16000Sstevel@tonic-gate  */
16010Sstevel@tonic-gate 
16020Sstevel@tonic-gate /*ARGSUSED*/
16030Sstevel@tonic-gate void
16040Sstevel@tonic-gate cpuid_pass2(cpu_t *cpu)
16050Sstevel@tonic-gate {
16060Sstevel@tonic-gate 	uint_t n, nmax;
16070Sstevel@tonic-gate 	int i;
16081228Sandrei 	struct cpuid_regs *cp;
16090Sstevel@tonic-gate 	uint8_t *dp;
16100Sstevel@tonic-gate 	uint32_t *iptr;
16110Sstevel@tonic-gate 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
16120Sstevel@tonic-gate 
16130Sstevel@tonic-gate 	ASSERT(cpi->cpi_pass == 1);
16140Sstevel@tonic-gate 
16150Sstevel@tonic-gate 	if (cpi->cpi_maxeax < 1)
16160Sstevel@tonic-gate 		goto pass2_done;
16170Sstevel@tonic-gate 
16180Sstevel@tonic-gate 	if ((nmax = cpi->cpi_maxeax + 1) > NMAX_CPI_STD)
16190Sstevel@tonic-gate 		nmax = NMAX_CPI_STD;
16200Sstevel@tonic-gate 	/*
16210Sstevel@tonic-gate 	 * (We already handled n == 0 and n == 1 in pass 1)
16220Sstevel@tonic-gate 	 */
16230Sstevel@tonic-gate 	for (n = 2, cp = &cpi->cpi_std[2]; n < nmax; n++, cp++) {
16241228Sandrei 		cp->cp_eax = n;
16254606Sesaxe 
16264606Sesaxe 		/*
16274606Sesaxe 		 * CPUID function 4 expects %ecx to be initialized
16284606Sesaxe 		 * with an index which indicates which cache to return
16294606Sesaxe 		 * information about. The OS is expected to call function 4
16304606Sesaxe 		 * with %ecx set to 0, 1, 2, ... until it returns with
16314606Sesaxe 		 * EAX[4:0] set to 0, which indicates there are no more
16324606Sesaxe 		 * caches.
16334606Sesaxe 		 *
16344606Sesaxe 		 * Here, populate cpi_std[4] with the information returned by
16354606Sesaxe 		 * function 4 when %ecx == 0, and do the rest in cpuid_pass3()
16364606Sesaxe 		 * when dynamic memory allocation becomes available.
16374606Sesaxe 		 *
16384606Sesaxe 		 * Note: we need to explicitly initialize %ecx here, since
16394606Sesaxe 		 * function 4 may have been previously invoked.
16404606Sesaxe 		 */
16414606Sesaxe 		if (n == 4)
16424606Sesaxe 			cp->cp_ecx = 0;
16434606Sesaxe 
16441228Sandrei 		(void) __cpuid_insn(cp);
16453446Smrj 		platform_cpuid_mangle(cpi->cpi_vendor, n, cp);
16460Sstevel@tonic-gate 		switch (n) {
16470Sstevel@tonic-gate 		case 2:
16480Sstevel@tonic-gate 			/*
16490Sstevel@tonic-gate 			 * "the lower 8 bits of the %eax register
16500Sstevel@tonic-gate 			 * contain a value that identifies the number
16510Sstevel@tonic-gate 			 * of times the cpuid [instruction] has to be
16520Sstevel@tonic-gate 			 * executed to obtain a complete image of the
16530Sstevel@tonic-gate 			 * processor's caching systems."
16540Sstevel@tonic-gate 			 *
16550Sstevel@tonic-gate 			 * How *do* they make this stuff up?
16560Sstevel@tonic-gate 			 */
16570Sstevel@tonic-gate 			cpi->cpi_ncache = sizeof (*cp) *
16580Sstevel@tonic-gate 			    BITX(cp->cp_eax, 7, 0);
16590Sstevel@tonic-gate 			if (cpi->cpi_ncache == 0)
16600Sstevel@tonic-gate 				break;
16610Sstevel@tonic-gate 			cpi->cpi_ncache--;	/* skip count byte */
16620Sstevel@tonic-gate 
16630Sstevel@tonic-gate 			/*
16640Sstevel@tonic-gate 			 * Well, for now, rather than attempt to implement
16650Sstevel@tonic-gate 			 * this slightly dubious algorithm, we just look
16660Sstevel@tonic-gate 			 * at the first 15 ..
16670Sstevel@tonic-gate 			 */
16680Sstevel@tonic-gate 			if (cpi->cpi_ncache > (sizeof (*cp) - 1))
16690Sstevel@tonic-gate 				cpi->cpi_ncache = sizeof (*cp) - 1;
16700Sstevel@tonic-gate 
16710Sstevel@tonic-gate 			dp = cpi->cpi_cacheinfo;
16720Sstevel@tonic-gate 			if (BITX(cp->cp_eax, 31, 31) == 0) {
16730Sstevel@tonic-gate 				uint8_t *p = (void *)&cp->cp_eax;
16746317Skk208521 				for (i = 1; i < 4; i++)
16750Sstevel@tonic-gate 					if (p[i] != 0)
16760Sstevel@tonic-gate 						*dp++ = p[i];
16770Sstevel@tonic-gate 			}
16780Sstevel@tonic-gate 			if (BITX(cp->cp_ebx, 31, 31) == 0) {
16790Sstevel@tonic-gate 				uint8_t *p = (void *)&cp->cp_ebx;
16800Sstevel@tonic-gate 				for (i = 0; i < 4; i++)
16810Sstevel@tonic-gate 					if (p[i] != 0)
16820Sstevel@tonic-gate 						*dp++ = p[i];
16830Sstevel@tonic-gate 			}
16840Sstevel@tonic-gate 			if (BITX(cp->cp_ecx, 31, 31) == 0) {
16850Sstevel@tonic-gate 				uint8_t *p = (void *)&cp->cp_ecx;
16860Sstevel@tonic-gate 				for (i = 0; i < 4; i++)
16870Sstevel@tonic-gate 					if (p[i] != 0)
16880Sstevel@tonic-gate 						*dp++ = p[i];
16890Sstevel@tonic-gate 			}
16900Sstevel@tonic-gate 			if (BITX(cp->cp_edx, 31, 31) == 0) {
16910Sstevel@tonic-gate 				uint8_t *p = (void *)&cp->cp_edx;
16920Sstevel@tonic-gate 				for (i = 0; i < 4; i++)
16930Sstevel@tonic-gate 					if (p[i] != 0)
16940Sstevel@tonic-gate 						*dp++ = p[i];
16950Sstevel@tonic-gate 			}
16960Sstevel@tonic-gate 			break;
16974481Sbholler 
16980Sstevel@tonic-gate 		case 3:	/* Processor serial number, if PSN supported */
16994481Sbholler 			break;
17004481Sbholler 
17010Sstevel@tonic-gate 		case 4:	/* Deterministic cache parameters */
17024481Sbholler 			break;
17034481Sbholler 
17040Sstevel@tonic-gate 		case 5:	/* Monitor/Mwait parameters */
17055045Sbholler 		{
17065045Sbholler 			size_t mwait_size;
17074481Sbholler 
17084481Sbholler 			/*
17094481Sbholler 			 * check cpi_mwait.support which was set in cpuid_pass1
17104481Sbholler 			 */
17114481Sbholler 			if (!(cpi->cpi_mwait.support & MWAIT_SUPPORT))
17124481Sbholler 				break;
17134481Sbholler 
17145045Sbholler 			/*
17155045Sbholler 			 * Protect ourself from insane mwait line size.
17165045Sbholler 			 * Workaround for incomplete hardware emulator(s).
17175045Sbholler 			 */
17185045Sbholler 			mwait_size = (size_t)MWAIT_SIZE_MAX(cpi);
17195045Sbholler 			if (mwait_size < sizeof (uint32_t) ||
17205045Sbholler 			    !ISP2(mwait_size)) {
17215045Sbholler #if DEBUG
17225045Sbholler 				cmn_err(CE_NOTE, "Cannot handle cpu %d mwait "
17237798SSaurabh.Mishra@Sun.COM 				    "size %ld", cpu->cpu_id, (long)mwait_size);
17245045Sbholler #endif
17255045Sbholler 				break;
17265045Sbholler 			}
17275045Sbholler 
17284481Sbholler 			cpi->cpi_mwait.mon_min = (size_t)MWAIT_SIZE_MIN(cpi);
17295045Sbholler 			cpi->cpi_mwait.mon_max = mwait_size;
17304481Sbholler 			if (MWAIT_EXTENSION(cpi)) {
17314481Sbholler 				cpi->cpi_mwait.support |= MWAIT_EXTENSIONS;
17324481Sbholler 				if (MWAIT_INT_ENABLE(cpi))
17334481Sbholler 					cpi->cpi_mwait.support |=
17344481Sbholler 					    MWAIT_ECX_INT_ENABLE;
17354481Sbholler 			}
17364481Sbholler 			break;
17375045Sbholler 		}
17380Sstevel@tonic-gate 		default:
17390Sstevel@tonic-gate 			break;
17400Sstevel@tonic-gate 		}
17410Sstevel@tonic-gate 	}
17420Sstevel@tonic-gate 
17437282Smishra 	if (cpi->cpi_maxeax >= 0xB && cpi->cpi_vendor == X86_VENDOR_Intel) {
17447798SSaurabh.Mishra@Sun.COM 		struct cpuid_regs regs;
17457798SSaurabh.Mishra@Sun.COM 
17467798SSaurabh.Mishra@Sun.COM 		cp = &regs;
17477282Smishra 		cp->cp_eax = 0xB;
17487798SSaurabh.Mishra@Sun.COM 		cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
17497282Smishra 
17507282Smishra 		(void) __cpuid_insn(cp);
17517282Smishra 
17527282Smishra 		/*
17537282Smishra 		 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
17547282Smishra 		 * indicates that the extended topology enumeration leaf is
17557282Smishra 		 * available.
17567282Smishra 		 */
17577282Smishra 		if (cp->cp_ebx) {
17587282Smishra 			uint32_t x2apic_id;
17597282Smishra 			uint_t coreid_shift = 0;
17607282Smishra 			uint_t ncpu_per_core = 1;
17617282Smishra 			uint_t chipid_shift = 0;
17627282Smishra 			uint_t ncpu_per_chip = 1;
17637282Smishra 			uint_t i;
17647282Smishra 			uint_t level;
17657282Smishra 
17667282Smishra 			for (i = 0; i < CPI_FNB_ECX_MAX; i++) {
17677282Smishra 				cp->cp_eax = 0xB;
17687282Smishra 				cp->cp_ecx = i;
17697282Smishra 
17707282Smishra 				(void) __cpuid_insn(cp);
17717282Smishra 				level = CPI_CPU_LEVEL_TYPE(cp);
17727282Smishra 
17737282Smishra 				if (level == 1) {
17747282Smishra 					x2apic_id = cp->cp_edx;
17757282Smishra 					coreid_shift = BITX(cp->cp_eax, 4, 0);
17767282Smishra 					ncpu_per_core = BITX(cp->cp_ebx, 15, 0);
17777282Smishra 				} else if (level == 2) {
17787282Smishra 					x2apic_id = cp->cp_edx;
17797282Smishra 					chipid_shift = BITX(cp->cp_eax, 4, 0);
17807282Smishra 					ncpu_per_chip = BITX(cp->cp_ebx, 15, 0);
17817282Smishra 				}
17827282Smishra 			}
17837282Smishra 
17847282Smishra 			cpi->cpi_apicid = x2apic_id;
17857282Smishra 			cpi->cpi_ncpu_per_chip = ncpu_per_chip;
17867282Smishra 			cpi->cpi_ncore_per_chip = ncpu_per_chip /
17877282Smishra 			    ncpu_per_core;
17887282Smishra 			cpi->cpi_chipid = x2apic_id >> chipid_shift;
17897282Smishra 			cpi->cpi_clogid = x2apic_id & ((1 << chipid_shift) - 1);
17907282Smishra 			cpi->cpi_coreid = x2apic_id >> coreid_shift;
17917282Smishra 			cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift;
17927282Smishra 		}
17937798SSaurabh.Mishra@Sun.COM 
17947798SSaurabh.Mishra@Sun.COM 		/* Make cp NULL so that we don't stumble on others */
17957798SSaurabh.Mishra@Sun.COM 		cp = NULL;
17967282Smishra 	}
17977282Smishra 
1798*13134Skuriakose.kuruvilla@oracle.com 	/*
1799*13134Skuriakose.kuruvilla@oracle.com 	 * XSAVE enumeration
1800*13134Skuriakose.kuruvilla@oracle.com 	 */
1801*13134Skuriakose.kuruvilla@oracle.com 	if (cpi->cpi_maxeax >= 0xD && cpi->cpi_vendor == X86_VENDOR_Intel) {
1802*13134Skuriakose.kuruvilla@oracle.com 		struct cpuid_regs regs;
1803*13134Skuriakose.kuruvilla@oracle.com 		boolean_t cpuid_d_valid = B_TRUE;
1804*13134Skuriakose.kuruvilla@oracle.com 
1805*13134Skuriakose.kuruvilla@oracle.com 		cp = &regs;
1806*13134Skuriakose.kuruvilla@oracle.com 		cp->cp_eax = 0xD;
1807*13134Skuriakose.kuruvilla@oracle.com 		cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
1808*13134Skuriakose.kuruvilla@oracle.com 
1809*13134Skuriakose.kuruvilla@oracle.com 		(void) __cpuid_insn(cp);
1810*13134Skuriakose.kuruvilla@oracle.com 
1811*13134Skuriakose.kuruvilla@oracle.com 		/*
1812*13134Skuriakose.kuruvilla@oracle.com 		 * Sanity checks for debug
1813*13134Skuriakose.kuruvilla@oracle.com 		 */
1814*13134Skuriakose.kuruvilla@oracle.com 		if ((cp->cp_eax & XFEATURE_LEGACY_FP) == 0 ||
1815*13134Skuriakose.kuruvilla@oracle.com 		    (cp->cp_eax & XFEATURE_SSE) == 0) {
1816*13134Skuriakose.kuruvilla@oracle.com 			cpuid_d_valid = B_FALSE;
1817*13134Skuriakose.kuruvilla@oracle.com 		}
1818*13134Skuriakose.kuruvilla@oracle.com 
1819*13134Skuriakose.kuruvilla@oracle.com 		cpi->cpi_xsave.xsav_hw_features_low = cp->cp_eax;
1820*13134Skuriakose.kuruvilla@oracle.com 		cpi->cpi_xsave.xsav_hw_features_high = cp->cp_edx;
1821*13134Skuriakose.kuruvilla@oracle.com 		cpi->cpi_xsave.xsav_max_size = cp->cp_ecx;
1822*13134Skuriakose.kuruvilla@oracle.com 
1823*13134Skuriakose.kuruvilla@oracle.com 		/*
1824*13134Skuriakose.kuruvilla@oracle.com 		 * If the hw supports AVX, get the size and offset in the save
1825*13134Skuriakose.kuruvilla@oracle.com 		 * area for the ymm state.
1826*13134Skuriakose.kuruvilla@oracle.com 		 */
1827*13134Skuriakose.kuruvilla@oracle.com 		if (cpi->cpi_xsave.xsav_hw_features_low & XFEATURE_AVX) {
1828*13134Skuriakose.kuruvilla@oracle.com 			cp->cp_eax = 0xD;
1829*13134Skuriakose.kuruvilla@oracle.com 			cp->cp_ecx = 2;
1830*13134Skuriakose.kuruvilla@oracle.com 			cp->cp_edx = cp->cp_ebx = 0;
1831*13134Skuriakose.kuruvilla@oracle.com 
1832*13134Skuriakose.kuruvilla@oracle.com 			(void) __cpuid_insn(cp);
1833*13134Skuriakose.kuruvilla@oracle.com 
1834*13134Skuriakose.kuruvilla@oracle.com 			if (cp->cp_ebx != CPUID_LEAFD_2_YMM_OFFSET ||
1835*13134Skuriakose.kuruvilla@oracle.com 			    cp->cp_eax != CPUID_LEAFD_2_YMM_SIZE) {
1836*13134Skuriakose.kuruvilla@oracle.com 				cpuid_d_valid = B_FALSE;
1837*13134Skuriakose.kuruvilla@oracle.com 			}
1838*13134Skuriakose.kuruvilla@oracle.com 
1839*13134Skuriakose.kuruvilla@oracle.com 			cpi->cpi_xsave.ymm_size = cp->cp_eax;
1840*13134Skuriakose.kuruvilla@oracle.com 			cpi->cpi_xsave.ymm_offset = cp->cp_ebx;
1841*13134Skuriakose.kuruvilla@oracle.com 		}
1842*13134Skuriakose.kuruvilla@oracle.com 
1843*13134Skuriakose.kuruvilla@oracle.com 		if (is_x86_feature(x86_featureset, X86FSET_XSAVE)) {
1844*13134Skuriakose.kuruvilla@oracle.com 			xsave_state_size = 0;
1845*13134Skuriakose.kuruvilla@oracle.com 		} else if (cpuid_d_valid) {
1846*13134Skuriakose.kuruvilla@oracle.com 			xsave_state_size = cpi->cpi_xsave.xsav_max_size;
1847*13134Skuriakose.kuruvilla@oracle.com 		} else {
1848*13134Skuriakose.kuruvilla@oracle.com 			/* Broken CPUID 0xD, probably in HVM */
1849*13134Skuriakose.kuruvilla@oracle.com 			cmn_err(CE_WARN, "cpu%d: CPUID.0xD returns invalid "
1850*13134Skuriakose.kuruvilla@oracle.com 			    "value: hw_low = %d, hw_high = %d, xsave_size = %d"
1851*13134Skuriakose.kuruvilla@oracle.com 			    ", ymm_size = %d, ymm_offset = %d\n",
1852*13134Skuriakose.kuruvilla@oracle.com 			    cpu->cpu_id, cpi->cpi_xsave.xsav_hw_features_low,
1853*13134Skuriakose.kuruvilla@oracle.com 			    cpi->cpi_xsave.xsav_hw_features_high,
1854*13134Skuriakose.kuruvilla@oracle.com 			    (int)cpi->cpi_xsave.xsav_max_size,
1855*13134Skuriakose.kuruvilla@oracle.com 			    (int)cpi->cpi_xsave.ymm_size,
1856*13134Skuriakose.kuruvilla@oracle.com 			    (int)cpi->cpi_xsave.ymm_offset);
1857*13134Skuriakose.kuruvilla@oracle.com 
1858*13134Skuriakose.kuruvilla@oracle.com 			if (xsave_state_size != 0) {
1859*13134Skuriakose.kuruvilla@oracle.com 				/*
1860*13134Skuriakose.kuruvilla@oracle.com 				 * This must be a non-boot CPU. We cannot
1861*13134Skuriakose.kuruvilla@oracle.com 				 * continue, because boot cpu has already
1862*13134Skuriakose.kuruvilla@oracle.com 				 * enabled XSAVE.
1863*13134Skuriakose.kuruvilla@oracle.com 				 */
1864*13134Skuriakose.kuruvilla@oracle.com 				ASSERT(cpu->cpu_id != 0);
1865*13134Skuriakose.kuruvilla@oracle.com 				cmn_err(CE_PANIC, "cpu%d: we have already "
1866*13134Skuriakose.kuruvilla@oracle.com 				    "enabled XSAVE on boot cpu, cannot "
1867*13134Skuriakose.kuruvilla@oracle.com 				    "continue.", cpu->cpu_id);
1868*13134Skuriakose.kuruvilla@oracle.com 			} else {
1869*13134Skuriakose.kuruvilla@oracle.com 				/*
1870*13134Skuriakose.kuruvilla@oracle.com 				 * Must be from boot CPU, OK to disable XSAVE.
1871*13134Skuriakose.kuruvilla@oracle.com 				 */
1872*13134Skuriakose.kuruvilla@oracle.com 				ASSERT(cpu->cpu_id == 0);
1873*13134Skuriakose.kuruvilla@oracle.com 				remove_x86_feature(x86_featureset,
1874*13134Skuriakose.kuruvilla@oracle.com 				    X86FSET_XSAVE);
1875*13134Skuriakose.kuruvilla@oracle.com 				remove_x86_feature(x86_featureset, X86FSET_AVX);
1876*13134Skuriakose.kuruvilla@oracle.com 				CPI_FEATURES_ECX(cpi) &= ~CPUID_INTC_ECX_XSAVE;
1877*13134Skuriakose.kuruvilla@oracle.com 				CPI_FEATURES_ECX(cpi) &= ~CPUID_INTC_ECX_AVX;
1878*13134Skuriakose.kuruvilla@oracle.com 				xsave_force_disable = B_TRUE;
1879*13134Skuriakose.kuruvilla@oracle.com 			}
1880*13134Skuriakose.kuruvilla@oracle.com 		}
1881*13134Skuriakose.kuruvilla@oracle.com 	}
1882*13134Skuriakose.kuruvilla@oracle.com 
1883*13134Skuriakose.kuruvilla@oracle.com 
18840Sstevel@tonic-gate 	if ((cpi->cpi_xmaxeax & 0x80000000) == 0)
18850Sstevel@tonic-gate 		goto pass2_done;
18860Sstevel@tonic-gate 
18870Sstevel@tonic-gate 	if ((nmax = cpi->cpi_xmaxeax - 0x80000000 + 1) > NMAX_CPI_EXTD)
18880Sstevel@tonic-gate 		nmax = NMAX_CPI_EXTD;
18890Sstevel@tonic-gate 	/*
18900Sstevel@tonic-gate 	 * Copy the extended properties, fixing them as we go.
18910Sstevel@tonic-gate 	 * (We already handled n == 0 and n == 1 in pass 1)
18920Sstevel@tonic-gate 	 */
18930Sstevel@tonic-gate 	iptr = (void *)cpi->cpi_brandstr;
18940Sstevel@tonic-gate 	for (n = 2, cp = &cpi->cpi_extd[2]; n < nmax; cp++, n++) {
18951228Sandrei 		cp->cp_eax = 0x80000000 + n;
18961228Sandrei 		(void) __cpuid_insn(cp);
18973446Smrj 		platform_cpuid_mangle(cpi->cpi_vendor, 0x80000000 + n, cp);
18980Sstevel@tonic-gate 		switch (n) {
18990Sstevel@tonic-gate 		case 2:
19000Sstevel@tonic-gate 		case 3:
19010Sstevel@tonic-gate 		case 4:
19020Sstevel@tonic-gate 			/*
19030Sstevel@tonic-gate 			 * Extract the brand string
19040Sstevel@tonic-gate 			 */
19050Sstevel@tonic-gate 			*iptr++ = cp->cp_eax;
19060Sstevel@tonic-gate 			*iptr++ = cp->cp_ebx;
19070Sstevel@tonic-gate 			*iptr++ = cp->cp_ecx;
19080Sstevel@tonic-gate 			*iptr++ = cp->cp_edx;
19090Sstevel@tonic-gate 			break;
19100Sstevel@tonic-gate 		case 5:
19110Sstevel@tonic-gate 			switch (cpi->cpi_vendor) {
19120Sstevel@tonic-gate 			case X86_VENDOR_AMD:
19130Sstevel@tonic-gate 				/*
19140Sstevel@tonic-gate 				 * The Athlon and Duron were the first
19150Sstevel@tonic-gate 				 * parts to report the sizes of the
19160Sstevel@tonic-gate 				 * TLB for large pages. Before then,
19170Sstevel@tonic-gate 				 * we don't trust the data.
19180Sstevel@tonic-gate 				 */
19190Sstevel@tonic-gate 				if (cpi->cpi_family < 6 ||
19200Sstevel@tonic-gate 				    (cpi->cpi_family == 6 &&
19210Sstevel@tonic-gate 				    cpi->cpi_model < 1))
19220Sstevel@tonic-gate 					cp->cp_eax = 0;
19230Sstevel@tonic-gate 				break;
19240Sstevel@tonic-gate 			default:
19250Sstevel@tonic-gate 				break;
19260Sstevel@tonic-gate 			}
19270Sstevel@tonic-gate 			break;
19280Sstevel@tonic-gate 		case 6:
19290Sstevel@tonic-gate 			switch (cpi->cpi_vendor) {
19300Sstevel@tonic-gate 			case X86_VENDOR_AMD:
19310Sstevel@tonic-gate 				/*
19320Sstevel@tonic-gate 				 * The Athlon and Duron were the first
19330Sstevel@tonic-gate 				 * AMD parts with L2 TLB's.
19340Sstevel@tonic-gate 				 * Before then, don't trust the data.
19350Sstevel@tonic-gate 				 */
19360Sstevel@tonic-gate 				if (cpi->cpi_family < 6 ||
19370Sstevel@tonic-gate 				    cpi->cpi_family == 6 &&
19380Sstevel@tonic-gate 				    cpi->cpi_model < 1)
19390Sstevel@tonic-gate 					cp->cp_eax = cp->cp_ebx = 0;
19400Sstevel@tonic-gate 				/*
19410Sstevel@tonic-gate 				 * AMD Duron rev A0 reports L2
19420Sstevel@tonic-gate 				 * cache size incorrectly as 1K
19430Sstevel@tonic-gate 				 * when it is really 64K
19440Sstevel@tonic-gate 				 */
19450Sstevel@tonic-gate 				if (cpi->cpi_family == 6 &&
19460Sstevel@tonic-gate 				    cpi->cpi_model == 3 &&
19470Sstevel@tonic-gate 				    cpi->cpi_step == 0) {
19480Sstevel@tonic-gate 					cp->cp_ecx &= 0xffff;
19490Sstevel@tonic-gate 					cp->cp_ecx |= 0x400000;
19500Sstevel@tonic-gate 				}
19510Sstevel@tonic-gate 				break;
19520Sstevel@tonic-gate 			case X86_VENDOR_Cyrix:	/* VIA C3 */
19530Sstevel@tonic-gate 				/*
19540Sstevel@tonic-gate 				 * VIA C3 processors are a bit messed
19550Sstevel@tonic-gate 				 * up w.r.t. encoding cache sizes in %ecx
19560Sstevel@tonic-gate 				 */
19570Sstevel@tonic-gate 				if (cpi->cpi_family != 6)
19580Sstevel@tonic-gate 					break;
19590Sstevel@tonic-gate 				/*
19600Sstevel@tonic-gate 				 * model 7 and 8 were incorrectly encoded
19610Sstevel@tonic-gate 				 *
19620Sstevel@tonic-gate 				 * xxx is model 8 really broken?
19630Sstevel@tonic-gate 				 */
19640Sstevel@tonic-gate 				if (cpi->cpi_model == 7 ||
19650Sstevel@tonic-gate 				    cpi->cpi_model == 8)
19660Sstevel@tonic-gate 					cp->cp_ecx =
19670Sstevel@tonic-gate 					    BITX(cp->cp_ecx, 31, 24) << 16 |
19680Sstevel@tonic-gate 					    BITX(cp->cp_ecx, 23, 16) << 12 |
19690Sstevel@tonic-gate 					    BITX(cp->cp_ecx, 15, 8) << 8 |
19700Sstevel@tonic-gate 					    BITX(cp->cp_ecx, 7, 0);
19710Sstevel@tonic-gate 				/*
19720Sstevel@tonic-gate 				 * model 9 stepping 1 has wrong associativity
19730Sstevel@tonic-gate 				 */
19740Sstevel@tonic-gate 				if (cpi->cpi_model == 9 && cpi->cpi_step == 1)
19750Sstevel@tonic-gate 					cp->cp_ecx |= 8 << 12;
19760Sstevel@tonic-gate 				break;
19770Sstevel@tonic-gate 			case X86_VENDOR_Intel:
19780Sstevel@tonic-gate 				/*
19790Sstevel@tonic-gate 				 * Extended L2 Cache features function.
19800Sstevel@tonic-gate 				 * First appeared on Prescott.
19810Sstevel@tonic-gate 				 */
19820Sstevel@tonic-gate 			default:
19830Sstevel@tonic-gate 				break;
19840Sstevel@tonic-gate 			}
19850Sstevel@tonic-gate 			break;
19860Sstevel@tonic-gate 		default:
19870Sstevel@tonic-gate 			break;
19880Sstevel@tonic-gate 		}
19890Sstevel@tonic-gate 	}
19900Sstevel@tonic-gate 
19910Sstevel@tonic-gate pass2_done:
19920Sstevel@tonic-gate 	cpi->cpi_pass = 2;
19930Sstevel@tonic-gate }
19940Sstevel@tonic-gate 
19950Sstevel@tonic-gate static const char *
19960Sstevel@tonic-gate intel_cpubrand(const struct cpuid_info *cpi)
19970Sstevel@tonic-gate {
19980Sstevel@tonic-gate 	int i;
19990Sstevel@tonic-gate 
200012826Skuriakose.kuruvilla@oracle.com 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
20010Sstevel@tonic-gate 	    cpi->cpi_maxeax < 1 || cpi->cpi_family < 5)
20020Sstevel@tonic-gate 		return ("i486");
20030Sstevel@tonic-gate 
20040Sstevel@tonic-gate 	switch (cpi->cpi_family) {
20050Sstevel@tonic-gate 	case 5:
20060Sstevel@tonic-gate 		return ("Intel Pentium(r)");
20070Sstevel@tonic-gate 	case 6:
20080Sstevel@tonic-gate 		switch (cpi->cpi_model) {
20090Sstevel@tonic-gate 			uint_t celeron, xeon;
20101228Sandrei 			const struct cpuid_regs *cp;
20110Sstevel@tonic-gate 		case 0:
20120Sstevel@tonic-gate 		case 1:
20130Sstevel@tonic-gate 		case 2:
20140Sstevel@tonic-gate 			return ("Intel Pentium(r) Pro");
20150Sstevel@tonic-gate 		case 3:
20160Sstevel@tonic-gate 		case 4:
20170Sstevel@tonic-gate 			return ("Intel Pentium(r) II");
20180Sstevel@tonic-gate 		case 6:
20190Sstevel@tonic-gate 			return ("Intel Celeron(r)");
20200Sstevel@tonic-gate 		case 5:
20210Sstevel@tonic-gate 		case 7:
20220Sstevel@tonic-gate 			celeron = xeon = 0;
20230Sstevel@tonic-gate 			cp = &cpi->cpi_std[2];	/* cache info */
20240Sstevel@tonic-gate 
20256317Skk208521 			for (i = 1; i < 4; i++) {
20260Sstevel@tonic-gate 				uint_t tmp;
20270Sstevel@tonic-gate 
20280Sstevel@tonic-gate 				tmp = (cp->cp_eax >> (8 * i)) & 0xff;
20290Sstevel@tonic-gate 				if (tmp == 0x40)
20300Sstevel@tonic-gate 					celeron++;
20310Sstevel@tonic-gate 				if (tmp >= 0x44 && tmp <= 0x45)
20320Sstevel@tonic-gate 					xeon++;
20330Sstevel@tonic-gate 			}
20340Sstevel@tonic-gate 
20350Sstevel@tonic-gate 			for (i = 0; i < 2; i++) {
20360Sstevel@tonic-gate 				uint_t tmp;
20370Sstevel@tonic-gate 
20380Sstevel@tonic-gate 				tmp = (cp->cp_ebx >> (8 * i)) & 0xff;
20390Sstevel@tonic-gate 				if (tmp == 0x40)
20400Sstevel@tonic-gate 					celeron++;
20410Sstevel@tonic-gate 				else if (tmp >= 0x44 && tmp <= 0x45)
20420Sstevel@tonic-gate 					xeon++;
20430Sstevel@tonic-gate 			}
20440Sstevel@tonic-gate 
20450Sstevel@tonic-gate 			for (i = 0; i < 4; i++) {
20460Sstevel@tonic-gate 				uint_t tmp;
20470Sstevel@tonic-gate 
20480Sstevel@tonic-gate 				tmp = (cp->cp_ecx >> (8 * i)) & 0xff;
20490Sstevel@tonic-gate 				if (tmp == 0x40)
20500Sstevel@tonic-gate 					celeron++;
20510Sstevel@tonic-gate 				else if (tmp >= 0x44 && tmp <= 0x45)
20520Sstevel@tonic-gate 					xeon++;
20530Sstevel@tonic-gate 			}
20540Sstevel@tonic-gate 
20550Sstevel@tonic-gate 			for (i = 0; i < 4; i++) {
20560Sstevel@tonic-gate 				uint_t tmp;
20570Sstevel@tonic-gate 
20580Sstevel@tonic-gate 				tmp = (cp->cp_edx >> (8 * i)) & 0xff;
20590Sstevel@tonic-gate 				if (tmp == 0x40)
20600Sstevel@tonic-gate 					celeron++;
20610Sstevel@tonic-gate 				else if (tmp >= 0x44 && tmp <= 0x45)
20620Sstevel@tonic-gate 					xeon++;
20630Sstevel@tonic-gate 			}
20640Sstevel@tonic-gate 
20650Sstevel@tonic-gate 			if (celeron)
20660Sstevel@tonic-gate 				return ("Intel Celeron(r)");
20670Sstevel@tonic-gate 			if (xeon)
20680Sstevel@tonic-gate 				return (cpi->cpi_model == 5 ?
20690Sstevel@tonic-gate 				    "Intel Pentium(r) II Xeon(tm)" :
20700Sstevel@tonic-gate 				    "Intel Pentium(r) III Xeon(tm)");
20710Sstevel@tonic-gate 			return (cpi->cpi_model == 5 ?
20720Sstevel@tonic-gate 			    "Intel Pentium(r) II or Pentium(r) II Xeon(tm)" :
20730Sstevel@tonic-gate 			    "Intel Pentium(r) III or Pentium(r) III Xeon(tm)");
20740Sstevel@tonic-gate 		default:
20750Sstevel@tonic-gate 			break;
20760Sstevel@tonic-gate 		}
20770Sstevel@tonic-gate 	default:
20780Sstevel@tonic-gate 		break;
20790Sstevel@tonic-gate 	}
20800Sstevel@tonic-gate 
20811975Sdmick 	/* BrandID is present if the field is nonzero */
20821975Sdmick 	if (cpi->cpi_brandid != 0) {
20830Sstevel@tonic-gate 		static const struct {
20840Sstevel@tonic-gate 			uint_t bt_bid;
20850Sstevel@tonic-gate 			const char *bt_str;
20860Sstevel@tonic-gate 		} brand_tbl[] = {
20870Sstevel@tonic-gate 			{ 0x1,	"Intel(r) Celeron(r)" },
20880Sstevel@tonic-gate 			{ 0x2,	"Intel(r) Pentium(r) III" },
20890Sstevel@tonic-gate 			{ 0x3,	"Intel(r) Pentium(r) III Xeon(tm)" },
20900Sstevel@tonic-gate 			{ 0x4,	"Intel(r) Pentium(r) III" },
20910Sstevel@tonic-gate 			{ 0x6,	"Mobile Intel(r) Pentium(r) III" },
20920Sstevel@tonic-gate 			{ 0x7,	"Mobile Intel(r) Celeron(r)" },
20930Sstevel@tonic-gate 			{ 0x8,	"Intel(r) Pentium(r) 4" },
20940Sstevel@tonic-gate 			{ 0x9,	"Intel(r) Pentium(r) 4" },
20950Sstevel@tonic-gate 			{ 0xa,	"Intel(r) Celeron(r)" },
20960Sstevel@tonic-gate 			{ 0xb,	"Intel(r) Xeon(tm)" },
20970Sstevel@tonic-gate 			{ 0xc,	"Intel(r) Xeon(tm) MP" },
20980Sstevel@tonic-gate 			{ 0xe,	"Mobile Intel(r) Pentium(r) 4" },
20991975Sdmick 			{ 0xf,	"Mobile Intel(r) Celeron(r)" },
21001975Sdmick 			{ 0x11, "Mobile Genuine Intel(r)" },
21011975Sdmick 			{ 0x12, "Intel(r) Celeron(r) M" },
21021975Sdmick 			{ 0x13, "Mobile Intel(r) Celeron(r)" },
21031975Sdmick 			{ 0x14, "Intel(r) Celeron(r)" },
21041975Sdmick 			{ 0x15, "Mobile Genuine Intel(r)" },
21051975Sdmick 			{ 0x16,	"Intel(r) Pentium(r) M" },
21061975Sdmick 			{ 0x17, "Mobile Intel(r) Celeron(r)" }
21070Sstevel@tonic-gate 		};
21080Sstevel@tonic-gate 		uint_t btblmax = sizeof (brand_tbl) / sizeof (brand_tbl[0]);
21090Sstevel@tonic-gate 		uint_t sgn;
21100Sstevel@tonic-gate 
21110Sstevel@tonic-gate 		sgn = (cpi->cpi_family << 8) |
21120Sstevel@tonic-gate 		    (cpi->cpi_model << 4) | cpi->cpi_step;
21130Sstevel@tonic-gate 
21140Sstevel@tonic-gate 		for (i = 0; i < btblmax; i++)
21150Sstevel@tonic-gate 			if (brand_tbl[i].bt_bid == cpi->cpi_brandid)
21160Sstevel@tonic-gate 				break;
21170Sstevel@tonic-gate 		if (i < btblmax) {
21180Sstevel@tonic-gate 			if (sgn == 0x6b1 && cpi->cpi_brandid == 3)
21190Sstevel@tonic-gate 				return ("Intel(r) Celeron(r)");
21200Sstevel@tonic-gate 			if (sgn < 0xf13 && cpi->cpi_brandid == 0xb)
21210Sstevel@tonic-gate 				return ("Intel(r) Xeon(tm) MP");
21220Sstevel@tonic-gate 			if (sgn < 0xf13 && cpi->cpi_brandid == 0xe)
21230Sstevel@tonic-gate 				return ("Intel(r) Xeon(tm)");
21240Sstevel@tonic-gate 			return (brand_tbl[i].bt_str);
21250Sstevel@tonic-gate 		}
21260Sstevel@tonic-gate 	}
21270Sstevel@tonic-gate 
21280Sstevel@tonic-gate 	return (NULL);
21290Sstevel@tonic-gate }
21300Sstevel@tonic-gate 
21310Sstevel@tonic-gate static const char *
21320Sstevel@tonic-gate amd_cpubrand(const struct cpuid_info *cpi)
21330Sstevel@tonic-gate {
213412826Skuriakose.kuruvilla@oracle.com 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
21350Sstevel@tonic-gate 	    cpi->cpi_maxeax < 1 || cpi->cpi_family < 5)
21360Sstevel@tonic-gate 		return ("i486 compatible");
21370Sstevel@tonic-gate 
21380Sstevel@tonic-gate 	switch (cpi->cpi_family) {
21390Sstevel@tonic-gate 	case 5:
21400Sstevel@tonic-gate 		switch (cpi->cpi_model) {
21410Sstevel@tonic-gate 		case 0:
21420Sstevel@tonic-gate 		case 1:
21430Sstevel@tonic-gate 		case 2:
21440Sstevel@tonic-gate 		case 3:
21450Sstevel@tonic-gate 		case 4:
21460Sstevel@tonic-gate 		case 5:
21470Sstevel@tonic-gate 			return ("AMD-K5(r)");
21480Sstevel@tonic-gate 		case 6:
21490Sstevel@tonic-gate 		case 7:
21500Sstevel@tonic-gate 			return ("AMD-K6(r)");
21510Sstevel@tonic-gate 		case 8:
21520Sstevel@tonic-gate 			return ("AMD-K6(r)-2");
21530Sstevel@tonic-gate 		case 9:
21540Sstevel@tonic-gate 			return ("AMD-K6(r)-III");
21550Sstevel@tonic-gate 		default:
21560Sstevel@tonic-gate 			return ("AMD (family 5)");
21570Sstevel@tonic-gate 		}
21580Sstevel@tonic-gate 	case 6:
21590Sstevel@tonic-gate 		switch (cpi->cpi_model) {
21600Sstevel@tonic-gate 		case 1:
21610Sstevel@tonic-gate 			return ("AMD-K7(tm)");
21620Sstevel@tonic-gate 		case 0:
21630Sstevel@tonic-gate 		case 2:
21640Sstevel@tonic-gate 		case 4:
21650Sstevel@tonic-gate 			return ("AMD Athlon(tm)");
21660Sstevel@tonic-gate 		case 3:
21670Sstevel@tonic-gate 		case 7:
21680Sstevel@tonic-gate 			return ("AMD Duron(tm)");
21690Sstevel@tonic-gate 		case 6:
21700Sstevel@tonic-gate 		case 8:
21710Sstevel@tonic-gate 		case 10:
21720Sstevel@tonic-gate 			/*
21730Sstevel@tonic-gate 			 * Use the L2 cache size to distinguish
21740Sstevel@tonic-gate 			 */
21750Sstevel@tonic-gate 			return ((cpi->cpi_extd[6].cp_ecx >> 16) >= 256 ?
21760Sstevel@tonic-gate 			    "AMD Athlon(tm)" : "AMD Duron(tm)");
21770Sstevel@tonic-gate 		default:
21780Sstevel@tonic-gate 			return ("AMD (family 6)");
21790Sstevel@tonic-gate 		}
21800Sstevel@tonic-gate 	default:
21810Sstevel@tonic-gate 		break;
21820Sstevel@tonic-gate 	}
21830Sstevel@tonic-gate 
21840Sstevel@tonic-gate 	if (cpi->cpi_family == 0xf && cpi->cpi_model == 5 &&
21850Sstevel@tonic-gate 	    cpi->cpi_brandid != 0) {
21860Sstevel@tonic-gate 		switch (BITX(cpi->cpi_brandid, 7, 5)) {
21870Sstevel@tonic-gate 		case 3:
21880Sstevel@tonic-gate 			return ("AMD Opteron(tm) UP 1xx");
21890Sstevel@tonic-gate 		case 4:
21900Sstevel@tonic-gate 			return ("AMD Opteron(tm) DP 2xx");
21910Sstevel@tonic-gate 		case 5:
21920Sstevel@tonic-gate 			return ("AMD Opteron(tm) MP 8xx");
21930Sstevel@tonic-gate 		default:
21940Sstevel@tonic-gate 			return ("AMD Opteron(tm)");
21950Sstevel@tonic-gate 		}
21960Sstevel@tonic-gate 	}
21970Sstevel@tonic-gate 
21980Sstevel@tonic-gate 	return (NULL);
21990Sstevel@tonic-gate }
22000Sstevel@tonic-gate 
22010Sstevel@tonic-gate static const char *
22020Sstevel@tonic-gate cyrix_cpubrand(struct cpuid_info *cpi, uint_t type)
22030Sstevel@tonic-gate {
220412826Skuriakose.kuruvilla@oracle.com 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
22050Sstevel@tonic-gate 	    cpi->cpi_maxeax < 1 || cpi->cpi_family < 5 ||
22060Sstevel@tonic-gate 	    type == X86_TYPE_CYRIX_486)
22070Sstevel@tonic-gate 		return ("i486 compatible");
22080Sstevel@tonic-gate 
22090Sstevel@tonic-gate 	switch (type) {
22100Sstevel@tonic-gate 	case X86_TYPE_CYRIX_6x86:
22110Sstevel@tonic-gate 		return ("Cyrix 6x86");
22120Sstevel@tonic-gate 	case X86_TYPE_CYRIX_6x86L:
22130Sstevel@tonic-gate 		return ("Cyrix 6x86L");
22140Sstevel@tonic-gate 	case X86_TYPE_CYRIX_6x86MX:
22150Sstevel@tonic-gate 		return ("Cyrix 6x86MX");
22160Sstevel@tonic-gate 	case X86_TYPE_CYRIX_GXm:
22170Sstevel@tonic-gate 		return ("Cyrix GXm");
22180Sstevel@tonic-gate 	case X86_TYPE_CYRIX_MediaGX:
22190Sstevel@tonic-gate 		return ("Cyrix MediaGX");
22200Sstevel@tonic-gate 	case X86_TYPE_CYRIX_MII:
22210Sstevel@tonic-gate 		return ("Cyrix M2");
22220Sstevel@tonic-gate 	case X86_TYPE_VIA_CYRIX_III:
22230Sstevel@tonic-gate 		return ("VIA Cyrix M3");
22240Sstevel@tonic-gate 	default:
22250Sstevel@tonic-gate 		/*
22260Sstevel@tonic-gate 		 * Have another wild guess ..
22270Sstevel@tonic-gate 		 */
22280Sstevel@tonic-gate 		if (cpi->cpi_family == 4 && cpi->cpi_model == 9)
22290Sstevel@tonic-gate 			return ("Cyrix 5x86");
22300Sstevel@tonic-gate 		else if (cpi->cpi_family == 5) {
22310Sstevel@tonic-gate 			switch (cpi->cpi_model) {
22320Sstevel@tonic-gate 			case 2:
22330Sstevel@tonic-gate 				return ("Cyrix 6x86");	/* Cyrix M1 */
22340Sstevel@tonic-gate 			case 4:
22350Sstevel@tonic-gate 				return ("Cyrix MediaGX");
22360Sstevel@tonic-gate 			default:
22370Sstevel@tonic-gate 				break;
22380Sstevel@tonic-gate 			}
22390Sstevel@tonic-gate 		} else if (cpi->cpi_family == 6) {
22400Sstevel@tonic-gate 			switch (cpi->cpi_model) {
22410Sstevel@tonic-gate 			case 0:
22420Sstevel@tonic-gate 				return ("Cyrix 6x86MX"); /* Cyrix M2? */
22430Sstevel@tonic-gate 			case 5:
22440Sstevel@tonic-gate 			case 6:
22450Sstevel@tonic-gate 			case 7:
22460Sstevel@tonic-gate 			case 8:
22470Sstevel@tonic-gate 			case 9:
22480Sstevel@tonic-gate 				return ("VIA C3");
22490Sstevel@tonic-gate 			default:
22500Sstevel@tonic-gate 				break;
22510Sstevel@tonic-gate 			}
22520Sstevel@tonic-gate 		}
22530Sstevel@tonic-gate 		break;
22540Sstevel@tonic-gate 	}
22550Sstevel@tonic-gate 	return (NULL);
22560Sstevel@tonic-gate }
22570Sstevel@tonic-gate 
22580Sstevel@tonic-gate /*
22590Sstevel@tonic-gate  * This only gets called in the case that the CPU extended
22600Sstevel@tonic-gate  * feature brand string (0x80000002, 0x80000003, 0x80000004)
22610Sstevel@tonic-gate  * aren't available, or contain null bytes for some reason.
22620Sstevel@tonic-gate  */
22630Sstevel@tonic-gate static void
22640Sstevel@tonic-gate fabricate_brandstr(struct cpuid_info *cpi)
22650Sstevel@tonic-gate {
22660Sstevel@tonic-gate 	const char *brand = NULL;
22670Sstevel@tonic-gate 
22680Sstevel@tonic-gate 	switch (cpi->cpi_vendor) {
22690Sstevel@tonic-gate 	case X86_VENDOR_Intel:
22700Sstevel@tonic-gate 		brand = intel_cpubrand(cpi);
22710Sstevel@tonic-gate 		break;
22720Sstevel@tonic-gate 	case X86_VENDOR_AMD:
22730Sstevel@tonic-gate 		brand = amd_cpubrand(cpi);
22740Sstevel@tonic-gate 		break;
22750Sstevel@tonic-gate 	case X86_VENDOR_Cyrix:
22760Sstevel@tonic-gate 		brand = cyrix_cpubrand(cpi, x86_type);
22770Sstevel@tonic-gate 		break;
22780Sstevel@tonic-gate 	case X86_VENDOR_NexGen:
22790Sstevel@tonic-gate 		if (cpi->cpi_family == 5 && cpi->cpi_model == 0)
22800Sstevel@tonic-gate 			brand = "NexGen Nx586";
22810Sstevel@tonic-gate 		break;
22820Sstevel@tonic-gate 	case X86_VENDOR_Centaur:
22830Sstevel@tonic-gate 		if (cpi->cpi_family == 5)
22840Sstevel@tonic-gate 			switch (cpi->cpi_model) {
22850Sstevel@tonic-gate 			case 4:
22860Sstevel@tonic-gate 				brand = "Centaur C6";
22870Sstevel@tonic-gate 				break;
22880Sstevel@tonic-gate 			case 8:
22890Sstevel@tonic-gate 				brand = "Centaur C2";
22900Sstevel@tonic-gate 				break;
22910Sstevel@tonic-gate 			case 9:
22920Sstevel@tonic-gate 				brand = "Centaur C3";
22930Sstevel@tonic-gate 				break;
22940Sstevel@tonic-gate 			default:
22950Sstevel@tonic-gate 				break;
22960Sstevel@tonic-gate 			}
22970Sstevel@tonic-gate 		break;
22980Sstevel@tonic-gate 	case X86_VENDOR_Rise:
22990Sstevel@tonic-gate 		if (cpi->cpi_family == 5 &&
23000Sstevel@tonic-gate 		    (cpi->cpi_model == 0 || cpi->cpi_model == 2))
23010Sstevel@tonic-gate 			brand = "Rise mP6";
23020Sstevel@tonic-gate 		break;
23030Sstevel@tonic-gate 	case X86_VENDOR_SiS:
23040Sstevel@tonic-gate 		if (cpi->cpi_family == 5 && cpi->cpi_model == 0)
23050Sstevel@tonic-gate 			brand = "SiS 55x";
23060Sstevel@tonic-gate 		break;
23070Sstevel@tonic-gate 	case X86_VENDOR_TM:
23080Sstevel@tonic-gate 		if (cpi->cpi_family == 5 && cpi->cpi_model == 4)
23090Sstevel@tonic-gate 			brand = "Transmeta Crusoe TM3x00 or TM5x00";
23100Sstevel@tonic-gate 		break;
23110Sstevel@tonic-gate 	case X86_VENDOR_NSC:
23120Sstevel@tonic-gate 	case X86_VENDOR_UMC:
23130Sstevel@tonic-gate 	default:
23140Sstevel@tonic-gate 		break;
23150Sstevel@tonic-gate 	}
23160Sstevel@tonic-gate 	if (brand) {
23170Sstevel@tonic-gate 		(void) strcpy((char *)cpi->cpi_brandstr, brand);
23180Sstevel@tonic-gate 		return;
23190Sstevel@tonic-gate 	}
23200Sstevel@tonic-gate 
23210Sstevel@tonic-gate 	/*
23220Sstevel@tonic-gate 	 * If all else fails ...
23230Sstevel@tonic-gate 	 */
23240Sstevel@tonic-gate 	(void) snprintf(cpi->cpi_brandstr, sizeof (cpi->cpi_brandstr),
23250Sstevel@tonic-gate 	    "%s %d.%d.%d", cpi->cpi_vendorstr, cpi->cpi_family,
23260Sstevel@tonic-gate 	    cpi->cpi_model, cpi->cpi_step);
23270Sstevel@tonic-gate }
23280Sstevel@tonic-gate 
23290Sstevel@tonic-gate /*
23300Sstevel@tonic-gate  * This routine is called just after kernel memory allocation
23310Sstevel@tonic-gate  * becomes available on cpu0, and as part of mp_startup() on
23320Sstevel@tonic-gate  * the other cpus.
23330Sstevel@tonic-gate  *
23344606Sesaxe  * Fixup the brand string, and collect any information from cpuid
23354606Sesaxe  * that requires dynamicically allocated storage to represent.
23360Sstevel@tonic-gate  */
23370Sstevel@tonic-gate /*ARGSUSED*/
23380Sstevel@tonic-gate void
23390Sstevel@tonic-gate cpuid_pass3(cpu_t *cpu)
23400Sstevel@tonic-gate {
23414606Sesaxe 	int	i, max, shft, level, size;
23424606Sesaxe 	struct cpuid_regs regs;
23434606Sesaxe 	struct cpuid_regs *cp;
23440Sstevel@tonic-gate 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
23450Sstevel@tonic-gate 
23460Sstevel@tonic-gate 	ASSERT(cpi->cpi_pass == 2);
23470Sstevel@tonic-gate 
23484606Sesaxe 	/*
23494606Sesaxe 	 * Function 4: Deterministic cache parameters
23504606Sesaxe 	 *
23514606Sesaxe 	 * Take this opportunity to detect the number of threads
23524606Sesaxe 	 * sharing the last level cache, and construct a corresponding
23534606Sesaxe 	 * cache id. The respective cpuid_info members are initialized
23544606Sesaxe 	 * to the default case of "no last level cache sharing".
23554606Sesaxe 	 */
23564606Sesaxe 	cpi->cpi_ncpu_shr_last_cache = 1;
23574606Sesaxe 	cpi->cpi_last_lvl_cacheid = cpu->cpu_id;
23584606Sesaxe 
23594606Sesaxe 	if (cpi->cpi_maxeax >= 4 && cpi->cpi_vendor == X86_VENDOR_Intel) {
23604606Sesaxe 
23614606Sesaxe 		/*
23624606Sesaxe 		 * Find the # of elements (size) returned by fn 4, and along
23634606Sesaxe 		 * the way detect last level cache sharing details.
23644606Sesaxe 		 */
23654606Sesaxe 		bzero(&regs, sizeof (regs));
23664606Sesaxe 		cp = &regs;
23674606Sesaxe 		for (i = 0, max = 0; i < CPI_FN4_ECX_MAX; i++) {
23684606Sesaxe 			cp->cp_eax = 4;
23694606Sesaxe 			cp->cp_ecx = i;
23704606Sesaxe 
23714606Sesaxe 			(void) __cpuid_insn(cp);
23724606Sesaxe 
23734606Sesaxe 			if (CPI_CACHE_TYPE(cp) == 0)
23744606Sesaxe 				break;
23754606Sesaxe 			level = CPI_CACHE_LVL(cp);
23764606Sesaxe 			if (level > max) {
23774606Sesaxe 				max = level;
23784606Sesaxe 				cpi->cpi_ncpu_shr_last_cache =
23794606Sesaxe 				    CPI_NTHR_SHR_CACHE(cp) + 1;
23804606Sesaxe 			}
23814606Sesaxe 		}
23824606Sesaxe 		cpi->cpi_std_4_size = size = i;
23834606Sesaxe 
23844606Sesaxe 		/*
23854606Sesaxe 		 * Allocate the cpi_std_4 array. The first element
23864606Sesaxe 		 * references the regs for fn 4, %ecx == 0, which
23874606Sesaxe 		 * cpuid_pass2() stashed in cpi->cpi_std[4].
23884606Sesaxe 		 */
23894606Sesaxe 		if (size > 0) {
23904606Sesaxe 			cpi->cpi_std_4 =
23914606Sesaxe 			    kmem_alloc(size * sizeof (cp), KM_SLEEP);
23924606Sesaxe 			cpi->cpi_std_4[0] = &cpi->cpi_std[4];
23934606Sesaxe 
23944606Sesaxe 			/*
23954606Sesaxe 			 * Allocate storage to hold the additional regs
23964606Sesaxe 			 * for function 4, %ecx == 1 .. cpi_std_4_size.
23974606Sesaxe 			 *
23984606Sesaxe 			 * The regs for fn 4, %ecx == 0 has already
23994606Sesaxe 			 * been allocated as indicated above.
24004606Sesaxe 			 */
24014606Sesaxe 			for (i = 1; i < size; i++) {
24024606Sesaxe 				cp = cpi->cpi_std_4[i] =
24034606Sesaxe 				    kmem_zalloc(sizeof (regs), KM_SLEEP);
24044606Sesaxe 				cp->cp_eax = 4;
24054606Sesaxe 				cp->cp_ecx = i;
24064606Sesaxe 
24074606Sesaxe 				(void) __cpuid_insn(cp);
24084606Sesaxe 			}
24094606Sesaxe 		}
24104606Sesaxe 		/*
24114606Sesaxe 		 * Determine the number of bits needed to represent
24124606Sesaxe 		 * the number of CPUs sharing the last level cache.
24134606Sesaxe 		 *
24144606Sesaxe 		 * Shift off that number of bits from the APIC id to
24154606Sesaxe 		 * derive the cache id.
24164606Sesaxe 		 */
24174606Sesaxe 		shft = 0;
24184606Sesaxe 		for (i = 1; i < cpi->cpi_ncpu_shr_last_cache; i <<= 1)
24194606Sesaxe 			shft++;
24207282Smishra 		cpi->cpi_last_lvl_cacheid = cpi->cpi_apicid >> shft;
24210Sstevel@tonic-gate 	}
24220Sstevel@tonic-gate 
24230Sstevel@tonic-gate 	/*
24244606Sesaxe 	 * Now fixup the brand string
24250Sstevel@tonic-gate 	 */
24264606Sesaxe 	if ((cpi->cpi_xmaxeax & 0x80000000) == 0) {
24274606Sesaxe 		fabricate_brandstr(cpi);
24284606Sesaxe 	} else {
24290Sstevel@tonic-gate 
24300Sstevel@tonic-gate 		/*
24314606Sesaxe 		 * If we successfully extracted a brand string from the cpuid
24324606Sesaxe 		 * instruction, clean it up by removing leading spaces and
24334606Sesaxe 		 * similar junk.
24340Sstevel@tonic-gate 		 */
24354606Sesaxe 		if (cpi->cpi_brandstr[0]) {
24364606Sesaxe 			size_t maxlen = sizeof (cpi->cpi_brandstr);
24374606Sesaxe 			char *src, *dst;
24384606Sesaxe 
24394606Sesaxe 			dst = src = (char *)cpi->cpi_brandstr;
24404606Sesaxe 			src[maxlen - 1] = '\0';
24414606Sesaxe 			/*
24424606Sesaxe 			 * strip leading spaces
24434606Sesaxe 			 */
24444606Sesaxe 			while (*src == ' ')
24454606Sesaxe 				src++;
24464606Sesaxe 			/*
24474606Sesaxe 			 * Remove any 'Genuine' or "Authentic" prefixes
24484606Sesaxe 			 */
24494606Sesaxe 			if (strncmp(src, "Genuine ", 8) == 0)
24504606Sesaxe 				src += 8;
24514606Sesaxe 			if (strncmp(src, "Authentic ", 10) == 0)
24524606Sesaxe 				src += 10;
24534606Sesaxe 
24544606Sesaxe 			/*
24554606Sesaxe 			 * Now do an in-place copy.
24564606Sesaxe 			 * Map (R) to (r) and (TM) to (tm).
24574606Sesaxe 			 * The era of teletypes is long gone, and there's
24584606Sesaxe 			 * -really- no need to shout.
24594606Sesaxe 			 */
24604606Sesaxe 			while (*src != '\0') {
24614606Sesaxe 				if (src[0] == '(') {
24624606Sesaxe 					if (strncmp(src + 1, "R)", 2) == 0) {
24634606Sesaxe 						(void) strncpy(dst, "(r)", 3);
24644606Sesaxe 						src += 3;
24654606Sesaxe 						dst += 3;
24664606Sesaxe 						continue;
24674606Sesaxe 					}
24684606Sesaxe 					if (strncmp(src + 1, "TM)", 3) == 0) {
24694606Sesaxe 						(void) strncpy(dst, "(tm)", 4);
24704606Sesaxe 						src += 4;
24714606Sesaxe 						dst += 4;
24724606Sesaxe 						continue;
24734606Sesaxe 					}
24740Sstevel@tonic-gate 				}
24754606Sesaxe 				*dst++ = *src++;
24760Sstevel@tonic-gate 			}
24774606Sesaxe 			*dst = '\0';
24784606Sesaxe 
24794606Sesaxe 			/*
24804606Sesaxe 			 * Finally, remove any trailing spaces
24814606Sesaxe 			 */
24824606Sesaxe 			while (--dst > cpi->cpi_brandstr)
24834606Sesaxe 				if (*dst == ' ')
24844606Sesaxe 					*dst = '\0';
24854606Sesaxe 				else
24864606Sesaxe 					break;
24874606Sesaxe 		} else
24884606Sesaxe 			fabricate_brandstr(cpi);
24894606Sesaxe 	}
24900Sstevel@tonic-gate 	cpi->cpi_pass = 3;
24910Sstevel@tonic-gate }
24920Sstevel@tonic-gate 
24930Sstevel@tonic-gate /*
24940Sstevel@tonic-gate  * This routine is called out of bind_hwcap() much later in the life
24950Sstevel@tonic-gate  * of the kernel (post_startup()).  The job of this routine is to resolve
24960Sstevel@tonic-gate  * the hardware feature support and kernel support for those features into
24970Sstevel@tonic-gate  * what we're actually going to tell applications via the aux vector.
24980Sstevel@tonic-gate  */
24990Sstevel@tonic-gate uint_t
25000Sstevel@tonic-gate cpuid_pass4(cpu_t *cpu)
25010Sstevel@tonic-gate {
25020Sstevel@tonic-gate 	struct cpuid_info *cpi;
25030Sstevel@tonic-gate 	uint_t hwcap_flags = 0;
25040Sstevel@tonic-gate 
25050Sstevel@tonic-gate 	if (cpu == NULL)
25060Sstevel@tonic-gate 		cpu = CPU;
25070Sstevel@tonic-gate 	cpi = cpu->cpu_m.mcpu_cpi;
25080Sstevel@tonic-gate 
25090Sstevel@tonic-gate 	ASSERT(cpi->cpi_pass == 3);
25100Sstevel@tonic-gate 
25110Sstevel@tonic-gate 	if (cpi->cpi_maxeax >= 1) {
25120Sstevel@tonic-gate 		uint32_t *edx = &cpi->cpi_support[STD_EDX_FEATURES];
25130Sstevel@tonic-gate 		uint32_t *ecx = &cpi->cpi_support[STD_ECX_FEATURES];
25140Sstevel@tonic-gate 
25150Sstevel@tonic-gate 		*edx = CPI_FEATURES_EDX(cpi);
25160Sstevel@tonic-gate 		*ecx = CPI_FEATURES_ECX(cpi);
25170Sstevel@tonic-gate 
25180Sstevel@tonic-gate 		/*
25190Sstevel@tonic-gate 		 * [these require explicit kernel support]
25200Sstevel@tonic-gate 		 */
252112826Skuriakose.kuruvilla@oracle.com 		if (!is_x86_feature(x86_featureset, X86FSET_SEP))
25220Sstevel@tonic-gate 			*edx &= ~CPUID_INTC_EDX_SEP;
25230Sstevel@tonic-gate 
252412826Skuriakose.kuruvilla@oracle.com 		if (!is_x86_feature(x86_featureset, X86FSET_SSE))
25250Sstevel@tonic-gate 			*edx &= ~(CPUID_INTC_EDX_FXSR|CPUID_INTC_EDX_SSE);
252612826Skuriakose.kuruvilla@oracle.com 		if (!is_x86_feature(x86_featureset, X86FSET_SSE2))
25270Sstevel@tonic-gate 			*edx &= ~CPUID_INTC_EDX_SSE2;
25280Sstevel@tonic-gate 
252912826Skuriakose.kuruvilla@oracle.com 		if (!is_x86_feature(x86_featureset, X86FSET_HTT))
25300Sstevel@tonic-gate 			*edx &= ~CPUID_INTC_EDX_HTT;
25310Sstevel@tonic-gate 
253212826Skuriakose.kuruvilla@oracle.com 		if (!is_x86_feature(x86_featureset, X86FSET_SSE3))
25330Sstevel@tonic-gate 			*ecx &= ~CPUID_INTC_ECX_SSE3;
25340Sstevel@tonic-gate 
25355269Skk208521 		if (cpi->cpi_vendor == X86_VENDOR_Intel) {
253612826Skuriakose.kuruvilla@oracle.com 			if (!is_x86_feature(x86_featureset, X86FSET_SSSE3))
25375269Skk208521 				*ecx &= ~CPUID_INTC_ECX_SSSE3;
253812826Skuriakose.kuruvilla@oracle.com 			if (!is_x86_feature(x86_featureset, X86FSET_SSE4_1))
25395269Skk208521 				*ecx &= ~CPUID_INTC_ECX_SSE4_1;
254012826Skuriakose.kuruvilla@oracle.com 			if (!is_x86_feature(x86_featureset, X86FSET_SSE4_2))
25415269Skk208521 				*ecx &= ~CPUID_INTC_ECX_SSE4_2;
254212826Skuriakose.kuruvilla@oracle.com 			if (!is_x86_feature(x86_featureset, X86FSET_AES))
25439370SKuriakose.Kuruvilla@Sun.COM 				*ecx &= ~CPUID_INTC_ECX_AES;
254412826Skuriakose.kuruvilla@oracle.com 			if (!is_x86_feature(x86_featureset, X86FSET_PCLMULQDQ))
254512826Skuriakose.kuruvilla@oracle.com 				*ecx &= ~CPUID_INTC_ECX_PCLMULQDQ;
2546*13134Skuriakose.kuruvilla@oracle.com 			if (!is_x86_feature(x86_featureset, X86FSET_XSAVE))
2547*13134Skuriakose.kuruvilla@oracle.com 				*ecx &= ~(CPUID_INTC_ECX_XSAVE |
2548*13134Skuriakose.kuruvilla@oracle.com 				    CPUID_INTC_ECX_OSXSAVE);
2549*13134Skuriakose.kuruvilla@oracle.com 			if (!is_x86_feature(x86_featureset, X86FSET_AVX))
2550*13134Skuriakose.kuruvilla@oracle.com 				*ecx &= ~CPUID_INTC_ECX_AVX;
25515269Skk208521 		}
25525269Skk208521 
25530Sstevel@tonic-gate 		/*
25540Sstevel@tonic-gate 		 * [no explicit support required beyond x87 fp context]
25550Sstevel@tonic-gate 		 */
25560Sstevel@tonic-gate 		if (!fpu_exists)
25570Sstevel@tonic-gate 			*edx &= ~(CPUID_INTC_EDX_FPU | CPUID_INTC_EDX_MMX);
25580Sstevel@tonic-gate 
25590Sstevel@tonic-gate 		/*
25600Sstevel@tonic-gate 		 * Now map the supported feature vector to things that we
25610Sstevel@tonic-gate 		 * think userland will care about.
25620Sstevel@tonic-gate 		 */
25630Sstevel@tonic-gate 		if (*edx & CPUID_INTC_EDX_SEP)
25640Sstevel@tonic-gate 			hwcap_flags |= AV_386_SEP;
25650Sstevel@tonic-gate 		if (*edx & CPUID_INTC_EDX_SSE)
25660Sstevel@tonic-gate 			hwcap_flags |= AV_386_FXSR | AV_386_SSE;
25670Sstevel@tonic-gate 		if (*edx & CPUID_INTC_EDX_SSE2)
25680Sstevel@tonic-gate 			hwcap_flags |= AV_386_SSE2;
25690Sstevel@tonic-gate 		if (*ecx & CPUID_INTC_ECX_SSE3)
25700Sstevel@tonic-gate 			hwcap_flags |= AV_386_SSE3;
25715269Skk208521 		if (cpi->cpi_vendor == X86_VENDOR_Intel) {
25725269Skk208521 			if (*ecx & CPUID_INTC_ECX_SSSE3)
25735269Skk208521 				hwcap_flags |= AV_386_SSSE3;
25745269Skk208521 			if (*ecx & CPUID_INTC_ECX_SSE4_1)
25755269Skk208521 				hwcap_flags |= AV_386_SSE4_1;
25765269Skk208521 			if (*ecx & CPUID_INTC_ECX_SSE4_2)
25775269Skk208521 				hwcap_flags |= AV_386_SSE4_2;
25788418SKrishnendu.Sadhukhan@Sun.COM 			if (*ecx & CPUID_INTC_ECX_MOVBE)
25798418SKrishnendu.Sadhukhan@Sun.COM 				hwcap_flags |= AV_386_MOVBE;
25809370SKuriakose.Kuruvilla@Sun.COM 			if (*ecx & CPUID_INTC_ECX_AES)
25819370SKuriakose.Kuruvilla@Sun.COM 				hwcap_flags |= AV_386_AES;
25829370SKuriakose.Kuruvilla@Sun.COM 			if (*ecx & CPUID_INTC_ECX_PCLMULQDQ)
25839370SKuriakose.Kuruvilla@Sun.COM 				hwcap_flags |= AV_386_PCLMULQDQ;
2584*13134Skuriakose.kuruvilla@oracle.com 			if ((*ecx & CPUID_INTC_ECX_XSAVE) &&
2585*13134Skuriakose.kuruvilla@oracle.com 			    (*ecx & CPUID_INTC_ECX_OSXSAVE))
2586*13134Skuriakose.kuruvilla@oracle.com 				hwcap_flags |= AV_386_XSAVE;
25875269Skk208521 		}
25884628Skk208521 		if (*ecx & CPUID_INTC_ECX_POPCNT)
25894628Skk208521 			hwcap_flags |= AV_386_POPCNT;
25900Sstevel@tonic-gate 		if (*edx & CPUID_INTC_EDX_FPU)
25910Sstevel@tonic-gate 			hwcap_flags |= AV_386_FPU;
25920Sstevel@tonic-gate 		if (*edx & CPUID_INTC_EDX_MMX)
25930Sstevel@tonic-gate 			hwcap_flags |= AV_386_MMX;
25940Sstevel@tonic-gate 
25950Sstevel@tonic-gate 		if (*edx & CPUID_INTC_EDX_TSC)
25960Sstevel@tonic-gate 			hwcap_flags |= AV_386_TSC;
25970Sstevel@tonic-gate 		if (*edx & CPUID_INTC_EDX_CX8)
25980Sstevel@tonic-gate 			hwcap_flags |= AV_386_CX8;
25990Sstevel@tonic-gate 		if (*edx & CPUID_INTC_EDX_CMOV)
26000Sstevel@tonic-gate 			hwcap_flags |= AV_386_CMOV;
26010Sstevel@tonic-gate 		if (*ecx & CPUID_INTC_ECX_CX16)
26020Sstevel@tonic-gate 			hwcap_flags |= AV_386_CX16;
26030Sstevel@tonic-gate 	}
26040Sstevel@tonic-gate 
26050Sstevel@tonic-gate 	if (cpi->cpi_xmaxeax < 0x80000001)
26060Sstevel@tonic-gate 		goto pass4_done;
26070Sstevel@tonic-gate 
26080Sstevel@tonic-gate 	switch (cpi->cpi_vendor) {
26091228Sandrei 		struct cpuid_regs cp;
26103446Smrj 		uint32_t *edx, *ecx;
26110Sstevel@tonic-gate 
26123446Smrj 	case X86_VENDOR_Intel:
26133446Smrj 		/*
26143446Smrj 		 * Seems like Intel duplicated what we necessary
26153446Smrj 		 * here to make the initial crop of 64-bit OS's work.
26163446Smrj 		 * Hopefully, those are the only "extended" bits
26173446Smrj 		 * they'll add.
26183446Smrj 		 */
26193446Smrj 		/*FALLTHROUGH*/
26203446Smrj 
26210Sstevel@tonic-gate 	case X86_VENDOR_AMD:
26220Sstevel@tonic-gate 		edx = &cpi->cpi_support[AMD_EDX_FEATURES];
26233446Smrj 		ecx = &cpi->cpi_support[AMD_ECX_FEATURES];
26240Sstevel@tonic-gate 
26250Sstevel@tonic-gate 		*edx = CPI_FEATURES_XTD_EDX(cpi);
26263446Smrj 		*ecx = CPI_FEATURES_XTD_ECX(cpi);
26273446Smrj 
26283446Smrj 		/*
26293446Smrj 		 * [these features require explicit kernel support]
26303446Smrj 		 */
26313446Smrj 		switch (cpi->cpi_vendor) {
26323446Smrj 		case X86_VENDOR_Intel:
263312826Skuriakose.kuruvilla@oracle.com 			if (!is_x86_feature(x86_featureset, X86FSET_TSCP))
26346657Ssudheer 				*edx &= ~CPUID_AMD_EDX_TSCP;
26353446Smrj 			break;
26363446Smrj 
26373446Smrj 		case X86_VENDOR_AMD:
263812826Skuriakose.kuruvilla@oracle.com 			if (!is_x86_feature(x86_featureset, X86FSET_TSCP))
26393446Smrj 				*edx &= ~CPUID_AMD_EDX_TSCP;
264012826Skuriakose.kuruvilla@oracle.com 			if (!is_x86_feature(x86_featureset, X86FSET_SSE4A))
26414628Skk208521 				*ecx &= ~CPUID_AMD_ECX_SSE4A;
26423446Smrj 			break;
26433446Smrj 
26443446Smrj 		default:
26453446Smrj 			break;
26463446Smrj 		}
26470Sstevel@tonic-gate 
26480Sstevel@tonic-gate 		/*
26490Sstevel@tonic-gate 		 * [no explicit support required beyond
26500Sstevel@tonic-gate 		 * x87 fp context and exception handlers]
26510Sstevel@tonic-gate 		 */
26520Sstevel@tonic-gate 		if (!fpu_exists)
26530Sstevel@tonic-gate 			*edx &= ~(CPUID_AMD_EDX_MMXamd |
26540Sstevel@tonic-gate 			    CPUID_AMD_EDX_3DNow | CPUID_AMD_EDX_3DNowx);
26550Sstevel@tonic-gate 
265612826Skuriakose.kuruvilla@oracle.com 		if (!is_x86_feature(x86_featureset, X86FSET_NX))
26570Sstevel@tonic-gate 			*edx &= ~CPUID_AMD_EDX_NX;
26583446Smrj #if !defined(__amd64)
26590Sstevel@tonic-gate 		*edx &= ~CPUID_AMD_EDX_LM;
26600Sstevel@tonic-gate #endif
26610Sstevel@tonic-gate 		/*
26620Sstevel@tonic-gate 		 * Now map the supported feature vector to
26630Sstevel@tonic-gate 		 * things that we think userland will care about.
26640Sstevel@tonic-gate 		 */
26653446Smrj #if defined(__amd64)
26660Sstevel@tonic-gate 		if (*edx & CPUID_AMD_EDX_SYSC)
26670Sstevel@tonic-gate 			hwcap_flags |= AV_386_AMD_SYSC;
26683446Smrj #endif
26690Sstevel@tonic-gate 		if (*edx & CPUID_AMD_EDX_MMXamd)
26700Sstevel@tonic-gate 			hwcap_flags |= AV_386_AMD_MMX;
26710Sstevel@tonic-gate 		if (*edx & CPUID_AMD_EDX_3DNow)
26720Sstevel@tonic-gate 			hwcap_flags |= AV_386_AMD_3DNow;
26730Sstevel@tonic-gate 		if (*edx & CPUID_AMD_EDX_3DNowx)
26740Sstevel@tonic-gate 			hwcap_flags |= AV_386_AMD_3DNowx;
26753446Smrj 
26763446Smrj 		switch (cpi->cpi_vendor) {
26773446Smrj 		case X86_VENDOR_AMD:
26783446Smrj 			if (*edx & CPUID_AMD_EDX_TSCP)
26793446Smrj 				hwcap_flags |= AV_386_TSCP;
26803446Smrj 			if (*ecx & CPUID_AMD_ECX_AHF64)
26813446Smrj 				hwcap_flags |= AV_386_AHF;
26824628Skk208521 			if (*ecx & CPUID_AMD_ECX_SSE4A)
26834628Skk208521 				hwcap_flags |= AV_386_AMD_SSE4A;
26844628Skk208521 			if (*ecx & CPUID_AMD_ECX_LZCNT)
26854628Skk208521 				hwcap_flags |= AV_386_AMD_LZCNT;
26863446Smrj 			break;
26873446Smrj 
26883446Smrj 		case X86_VENDOR_Intel:
26896657Ssudheer 			if (*edx & CPUID_AMD_EDX_TSCP)
26906657Ssudheer 				hwcap_flags |= AV_386_TSCP;
26913446Smrj 			/*
26923446Smrj 			 * Aarrgh.
26933446Smrj 			 * Intel uses a different bit in the same word.
26943446Smrj 			 */
26953446Smrj 			if (*ecx & CPUID_INTC_ECX_AHF64)
26963446Smrj 				hwcap_flags |= AV_386_AHF;
26973446Smrj 			break;
26983446Smrj 
26993446Smrj 		default:
27003446Smrj 			break;
27013446Smrj 		}
27020Sstevel@tonic-gate 		break;
27030Sstevel@tonic-gate 
27040Sstevel@tonic-gate 	case X86_VENDOR_TM:
27051228Sandrei 		cp.cp_eax = 0x80860001;
27061228Sandrei 		(void) __cpuid_insn(&cp);
27071228Sandrei 		cpi->cpi_support[TM_EDX_FEATURES] = cp.cp_edx;
27080Sstevel@tonic-gate 		break;
27090Sstevel@tonic-gate 
27100Sstevel@tonic-gate 	default:
27110Sstevel@tonic-gate 		break;
27120Sstevel@tonic-gate 	}
27130Sstevel@tonic-gate 
27140Sstevel@tonic-gate pass4_done:
27150Sstevel@tonic-gate 	cpi->cpi_pass = 4;
27160Sstevel@tonic-gate 	return (hwcap_flags);
27170Sstevel@tonic-gate }
27180Sstevel@tonic-gate 
27190Sstevel@tonic-gate 
27200Sstevel@tonic-gate /*
27210Sstevel@tonic-gate  * Simulate the cpuid instruction using the data we previously
27220Sstevel@tonic-gate  * captured about this CPU.  We try our best to return the truth
27230Sstevel@tonic-gate  * about the hardware, independently of kernel support.
27240Sstevel@tonic-gate  */
27250Sstevel@tonic-gate uint32_t
27261228Sandrei cpuid_insn(cpu_t *cpu, struct cpuid_regs *cp)
27270Sstevel@tonic-gate {
27280Sstevel@tonic-gate 	struct cpuid_info *cpi;
27291228Sandrei 	struct cpuid_regs *xcp;
27300Sstevel@tonic-gate 
27310Sstevel@tonic-gate 	if (cpu == NULL)
27320Sstevel@tonic-gate 		cpu = CPU;
27330Sstevel@tonic-gate 	cpi = cpu->cpu_m.mcpu_cpi;
27340Sstevel@tonic-gate 
27350Sstevel@tonic-gate 	ASSERT(cpuid_checkpass(cpu, 3));
27360Sstevel@tonic-gate 
27370Sstevel@tonic-gate 	/*
27380Sstevel@tonic-gate 	 * CPUID data is cached in two separate places: cpi_std for standard
27390Sstevel@tonic-gate 	 * CPUID functions, and cpi_extd for extended CPUID functions.
27400Sstevel@tonic-gate 	 */
27411228Sandrei 	if (cp->cp_eax <= cpi->cpi_maxeax && cp->cp_eax < NMAX_CPI_STD)
27421228Sandrei 		xcp = &cpi->cpi_std[cp->cp_eax];
27431228Sandrei 	else if (cp->cp_eax >= 0x80000000 && cp->cp_eax <= cpi->cpi_xmaxeax &&
27441228Sandrei 	    cp->cp_eax < 0x80000000 + NMAX_CPI_EXTD)
27451228Sandrei 		xcp = &cpi->cpi_extd[cp->cp_eax - 0x80000000];
27460Sstevel@tonic-gate 	else
27470Sstevel@tonic-gate 		/*
27480Sstevel@tonic-gate 		 * The caller is asking for data from an input parameter which
27490Sstevel@tonic-gate 		 * the kernel has not cached.  In this case we go fetch from
27500Sstevel@tonic-gate 		 * the hardware and return the data directly to the user.
27510Sstevel@tonic-gate 		 */
27521228Sandrei 		return (__cpuid_insn(cp));
27531228Sandrei 
27541228Sandrei 	cp->cp_eax = xcp->cp_eax;
27551228Sandrei 	cp->cp_ebx = xcp->cp_ebx;
27561228Sandrei 	cp->cp_ecx = xcp->cp_ecx;
27571228Sandrei 	cp->cp_edx = xcp->cp_edx;
27580Sstevel@tonic-gate 	return (cp->cp_eax);
27590Sstevel@tonic-gate }
27600Sstevel@tonic-gate 
27610Sstevel@tonic-gate int
27620Sstevel@tonic-gate cpuid_checkpass(cpu_t *cpu, int pass)
27630Sstevel@tonic-gate {
27640Sstevel@tonic-gate 	return (cpu != NULL && cpu->cpu_m.mcpu_cpi != NULL &&
27650Sstevel@tonic-gate 	    cpu->cpu_m.mcpu_cpi->cpi_pass >= pass);
27660Sstevel@tonic-gate }
27670Sstevel@tonic-gate 
27680Sstevel@tonic-gate int
27690Sstevel@tonic-gate cpuid_getbrandstr(cpu_t *cpu, char *s, size_t n)
27700Sstevel@tonic-gate {
27710Sstevel@tonic-gate 	ASSERT(cpuid_checkpass(cpu, 3));
27720Sstevel@tonic-gate 
27730Sstevel@tonic-gate 	return (snprintf(s, n, "%s", cpu->cpu_m.mcpu_cpi->cpi_brandstr));
27740Sstevel@tonic-gate }
27750Sstevel@tonic-gate 
27760Sstevel@tonic-gate int
27771228Sandrei cpuid_is_cmt(cpu_t *cpu)
27780Sstevel@tonic-gate {
27790Sstevel@tonic-gate 	if (cpu == NULL)
27800Sstevel@tonic-gate 		cpu = CPU;
27810Sstevel@tonic-gate 
27820Sstevel@tonic-gate 	ASSERT(cpuid_checkpass(cpu, 1));
27830Sstevel@tonic-gate 
27840Sstevel@tonic-gate 	return (cpu->cpu_m.mcpu_cpi->cpi_chipid >= 0);
27850Sstevel@tonic-gate }
27860Sstevel@tonic-gate 
27870Sstevel@tonic-gate /*
27880Sstevel@tonic-gate  * AMD and Intel both implement the 64-bit variant of the syscall
27890Sstevel@tonic-gate  * instruction (syscallq), so if there's -any- support for syscall,
27900Sstevel@tonic-gate  * cpuid currently says "yes, we support this".
27910Sstevel@tonic-gate  *
27920Sstevel@tonic-gate  * However, Intel decided to -not- implement the 32-bit variant of the
27930Sstevel@tonic-gate  * syscall instruction, so we provide a predicate to allow our caller
27940Sstevel@tonic-gate  * to test that subtlety here.
27955084Sjohnlev  *
27965084Sjohnlev  * XXPV	Currently, 32-bit syscall instructions don't work via the hypervisor,
27975084Sjohnlev  *	even in the case where the hardware would in fact support it.
27980Sstevel@tonic-gate  */
27990Sstevel@tonic-gate /*ARGSUSED*/
28000Sstevel@tonic-gate int
28010Sstevel@tonic-gate cpuid_syscall32_insn(cpu_t *cpu)
28020Sstevel@tonic-gate {
28030Sstevel@tonic-gate 	ASSERT(cpuid_checkpass((cpu == NULL ? CPU : cpu), 1));
28040Sstevel@tonic-gate 
28055084Sjohnlev #if !defined(__xpv)
28063446Smrj 	if (cpu == NULL)
28073446Smrj 		cpu = CPU;
28083446Smrj 
28093446Smrj 	/*CSTYLED*/
28103446Smrj 	{
28113446Smrj 		struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
28123446Smrj 
28133446Smrj 		if (cpi->cpi_vendor == X86_VENDOR_AMD &&
28143446Smrj 		    cpi->cpi_xmaxeax >= 0x80000001 &&
28153446Smrj 		    (CPI_FEATURES_XTD_EDX(cpi) & CPUID_AMD_EDX_SYSC))
28163446Smrj 			return (1);
28173446Smrj 	}
28185084Sjohnlev #endif
28190Sstevel@tonic-gate 	return (0);
28200Sstevel@tonic-gate }
28210Sstevel@tonic-gate 
28220Sstevel@tonic-gate int
28230Sstevel@tonic-gate cpuid_getidstr(cpu_t *cpu, char *s, size_t n)
28240Sstevel@tonic-gate {
28250Sstevel@tonic-gate 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
28260Sstevel@tonic-gate 
28270Sstevel@tonic-gate 	static const char fmt[] =
28283779Sdmick 	    "x86 (%s %X family %d model %d step %d clock %d MHz)";
28290Sstevel@tonic-gate 	static const char fmt_ht[] =
28303779Sdmick 	    "x86 (chipid 0x%x %s %X family %d model %d step %d clock %d MHz)";
28310Sstevel@tonic-gate 
28320Sstevel@tonic-gate 	ASSERT(cpuid_checkpass(cpu, 1));
28330Sstevel@tonic-gate 
28341228Sandrei 	if (cpuid_is_cmt(cpu))
28350Sstevel@tonic-gate 		return (snprintf(s, n, fmt_ht, cpi->cpi_chipid,
28363779Sdmick 		    cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax,
28373779Sdmick 		    cpi->cpi_family, cpi->cpi_model,
28380Sstevel@tonic-gate 		    cpi->cpi_step, cpu->cpu_type_info.pi_clock));
28390Sstevel@tonic-gate 	return (snprintf(s, n, fmt,
28403779Sdmick 	    cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax,
28413779Sdmick 	    cpi->cpi_family, cpi->cpi_model,
28420Sstevel@tonic-gate 	    cpi->cpi_step, cpu->cpu_type_info.pi_clock));
28430Sstevel@tonic-gate }
28440Sstevel@tonic-gate 
28450Sstevel@tonic-gate const char *
28460Sstevel@tonic-gate cpuid_getvendorstr(cpu_t *cpu)
28470Sstevel@tonic-gate {
28480Sstevel@tonic-gate 	ASSERT(cpuid_checkpass(cpu, 1));
28490Sstevel@tonic-gate 	return ((const char *)cpu->cpu_m.mcpu_cpi->cpi_vendorstr);
28500Sstevel@tonic-gate }
28510Sstevel@tonic-gate 
28520Sstevel@tonic-gate uint_t
28530Sstevel@tonic-gate cpuid_getvendor(cpu_t *cpu)
28540Sstevel@tonic-gate {
28550Sstevel@tonic-gate 	ASSERT(cpuid_checkpass(cpu, 1));
28560Sstevel@tonic-gate 	return (cpu->cpu_m.mcpu_cpi->cpi_vendor);
28570Sstevel@tonic-gate }
28580Sstevel@tonic-gate 
28590Sstevel@tonic-gate uint_t
28600Sstevel@tonic-gate cpuid_getfamily(cpu_t *cpu)
28610Sstevel@tonic-gate {
28620Sstevel@tonic-gate 	ASSERT(cpuid_checkpass(cpu, 1));
28630Sstevel@tonic-gate 	return (cpu->cpu_m.mcpu_cpi->cpi_family);
28640Sstevel@tonic-gate }
28650Sstevel@tonic-gate 
28660Sstevel@tonic-gate uint_t
28670Sstevel@tonic-gate cpuid_getmodel(cpu_t *cpu)
28680Sstevel@tonic-gate {
28690Sstevel@tonic-gate 	ASSERT(cpuid_checkpass(cpu, 1));
28700Sstevel@tonic-gate 	return (cpu->cpu_m.mcpu_cpi->cpi_model);
28710Sstevel@tonic-gate }
28720Sstevel@tonic-gate 
28730Sstevel@tonic-gate uint_t
28740Sstevel@tonic-gate cpuid_get_ncpu_per_chip(cpu_t *cpu)
28750Sstevel@tonic-gate {
28760Sstevel@tonic-gate 	ASSERT(cpuid_checkpass(cpu, 1));
28770Sstevel@tonic-gate 	return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_per_chip);
28780Sstevel@tonic-gate }
28790Sstevel@tonic-gate 
28800Sstevel@tonic-gate uint_t
28811228Sandrei cpuid_get_ncore_per_chip(cpu_t *cpu)
28821228Sandrei {
28831228Sandrei 	ASSERT(cpuid_checkpass(cpu, 1));
28841228Sandrei 	return (cpu->cpu_m.mcpu_cpi->cpi_ncore_per_chip);
28851228Sandrei }
28861228Sandrei 
28871228Sandrei uint_t
28884606Sesaxe cpuid_get_ncpu_sharing_last_cache(cpu_t *cpu)
28894606Sesaxe {
28904606Sesaxe 	ASSERT(cpuid_checkpass(cpu, 2));
28914606Sesaxe 	return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_shr_last_cache);
28924606Sesaxe }
28934606Sesaxe 
28944606Sesaxe id_t
28954606Sesaxe cpuid_get_last_lvl_cacheid(cpu_t *cpu)
28964606Sesaxe {
28974606Sesaxe 	ASSERT(cpuid_checkpass(cpu, 2));
28984606Sesaxe 	return (cpu->cpu_m.mcpu_cpi->cpi_last_lvl_cacheid);
28994606Sesaxe }
29004606Sesaxe 
29014606Sesaxe uint_t
29020Sstevel@tonic-gate cpuid_getstep(cpu_t *cpu)
29030Sstevel@tonic-gate {
29040Sstevel@tonic-gate 	ASSERT(cpuid_checkpass(cpu, 1));
29050Sstevel@tonic-gate 	return (cpu->cpu_m.mcpu_cpi->cpi_step);
29060Sstevel@tonic-gate }
29070Sstevel@tonic-gate 
29084581Ssherrym uint_t
29094581Ssherrym cpuid_getsig(struct cpu *cpu)
29104581Ssherrym {
29114581Ssherrym 	ASSERT(cpuid_checkpass(cpu, 1));
29124581Ssherrym 	return (cpu->cpu_m.mcpu_cpi->cpi_std[1].cp_eax);
29134581Ssherrym }
29144581Ssherrym 
29152869Sgavinm uint32_t
29162869Sgavinm cpuid_getchiprev(struct cpu *cpu)
29172869Sgavinm {
29182869Sgavinm 	ASSERT(cpuid_checkpass(cpu, 1));
29192869Sgavinm 	return (cpu->cpu_m.mcpu_cpi->cpi_chiprev);
29202869Sgavinm }
29212869Sgavinm 
29222869Sgavinm const char *
29232869Sgavinm cpuid_getchiprevstr(struct cpu *cpu)
29242869Sgavinm {
29252869Sgavinm 	ASSERT(cpuid_checkpass(cpu, 1));
29262869Sgavinm 	return (cpu->cpu_m.mcpu_cpi->cpi_chiprevstr);
29272869Sgavinm }
29282869Sgavinm 
29292869Sgavinm uint32_t
29302869Sgavinm cpuid_getsockettype(struct cpu *cpu)
29312869Sgavinm {
29322869Sgavinm 	ASSERT(cpuid_checkpass(cpu, 1));
29332869Sgavinm 	return (cpu->cpu_m.mcpu_cpi->cpi_socket);
29342869Sgavinm }
29352869Sgavinm 
29369482SKuriakose.Kuruvilla@Sun.COM const char *
29379482SKuriakose.Kuruvilla@Sun.COM cpuid_getsocketstr(cpu_t *cpu)
29389482SKuriakose.Kuruvilla@Sun.COM {
29399482SKuriakose.Kuruvilla@Sun.COM 	static const char *socketstr = NULL;
29409482SKuriakose.Kuruvilla@Sun.COM 	struct cpuid_info *cpi;
29419482SKuriakose.Kuruvilla@Sun.COM 
29429482SKuriakose.Kuruvilla@Sun.COM 	ASSERT(cpuid_checkpass(cpu, 1));
29439482SKuriakose.Kuruvilla@Sun.COM 	cpi = cpu->cpu_m.mcpu_cpi;
29449482SKuriakose.Kuruvilla@Sun.COM 
29459482SKuriakose.Kuruvilla@Sun.COM 	/* Assume that socket types are the same across the system */
29469482SKuriakose.Kuruvilla@Sun.COM 	if (socketstr == NULL)
29479482SKuriakose.Kuruvilla@Sun.COM 		socketstr = _cpuid_sktstr(cpi->cpi_vendor, cpi->cpi_family,
29489482SKuriakose.Kuruvilla@Sun.COM 		    cpi->cpi_model, cpi->cpi_step);
29499482SKuriakose.Kuruvilla@Sun.COM 
29509482SKuriakose.Kuruvilla@Sun.COM 
29519482SKuriakose.Kuruvilla@Sun.COM 	return (socketstr);
29529482SKuriakose.Kuruvilla@Sun.COM }
29539482SKuriakose.Kuruvilla@Sun.COM 
29543434Sesaxe int
29553434Sesaxe cpuid_get_chipid(cpu_t *cpu)
29560Sstevel@tonic-gate {
29570Sstevel@tonic-gate 	ASSERT(cpuid_checkpass(cpu, 1));
29580Sstevel@tonic-gate 
29591228Sandrei 	if (cpuid_is_cmt(cpu))
29600Sstevel@tonic-gate 		return (cpu->cpu_m.mcpu_cpi->cpi_chipid);
29610Sstevel@tonic-gate 	return (cpu->cpu_id);
29620Sstevel@tonic-gate }
29630Sstevel@tonic-gate 
29641228Sandrei id_t
29653434Sesaxe cpuid_get_coreid(cpu_t *cpu)
29661228Sandrei {
29671228Sandrei 	ASSERT(cpuid_checkpass(cpu, 1));
29681228Sandrei 	return (cpu->cpu_m.mcpu_cpi->cpi_coreid);
29691228Sandrei }
29701228Sandrei 
29710Sstevel@tonic-gate int
29725870Sgavinm cpuid_get_pkgcoreid(cpu_t *cpu)
29735870Sgavinm {
29745870Sgavinm 	ASSERT(cpuid_checkpass(cpu, 1));
29755870Sgavinm 	return (cpu->cpu_m.mcpu_cpi->cpi_pkgcoreid);
29765870Sgavinm }
29775870Sgavinm 
29785870Sgavinm int
29793434Sesaxe cpuid_get_clogid(cpu_t *cpu)
29800Sstevel@tonic-gate {
29810Sstevel@tonic-gate 	ASSERT(cpuid_checkpass(cpu, 1));
29820Sstevel@tonic-gate 	return (cpu->cpu_m.mcpu_cpi->cpi_clogid);
29830Sstevel@tonic-gate }
29840Sstevel@tonic-gate 
298511389SAlexander.Kolbasov@Sun.COM int
298611389SAlexander.Kolbasov@Sun.COM cpuid_get_cacheid(cpu_t *cpu)
298711389SAlexander.Kolbasov@Sun.COM {
298811389SAlexander.Kolbasov@Sun.COM 	ASSERT(cpuid_checkpass(cpu, 1));
298911389SAlexander.Kolbasov@Sun.COM 	return (cpu->cpu_m.mcpu_cpi->cpi_last_lvl_cacheid);
299011389SAlexander.Kolbasov@Sun.COM }
299111389SAlexander.Kolbasov@Sun.COM 
299210947SSrihari.Venkatesan@Sun.COM uint_t
299310947SSrihari.Venkatesan@Sun.COM cpuid_get_procnodeid(cpu_t *cpu)
299410947SSrihari.Venkatesan@Sun.COM {
299510947SSrihari.Venkatesan@Sun.COM 	ASSERT(cpuid_checkpass(cpu, 1));
299610947SSrihari.Venkatesan@Sun.COM 	return (cpu->cpu_m.mcpu_cpi->cpi_procnodeid);
299710947SSrihari.Venkatesan@Sun.COM }
299810947SSrihari.Venkatesan@Sun.COM 
299910947SSrihari.Venkatesan@Sun.COM uint_t
300010947SSrihari.Venkatesan@Sun.COM cpuid_get_procnodes_per_pkg(cpu_t *cpu)
300110947SSrihari.Venkatesan@Sun.COM {
300210947SSrihari.Venkatesan@Sun.COM 	ASSERT(cpuid_checkpass(cpu, 1));
300310947SSrihari.Venkatesan@Sun.COM 	return (cpu->cpu_m.mcpu_cpi->cpi_procnodes_per_pkg);
300410947SSrihari.Venkatesan@Sun.COM }
300510947SSrihari.Venkatesan@Sun.COM 
300610080SJoe.Bonasera@sun.com /*ARGSUSED*/
300710080SJoe.Bonasera@sun.com int
300810080SJoe.Bonasera@sun.com cpuid_have_cr8access(cpu_t *cpu)
300910080SJoe.Bonasera@sun.com {
301010080SJoe.Bonasera@sun.com #if defined(__amd64)
301110080SJoe.Bonasera@sun.com 	return (1);
301210080SJoe.Bonasera@sun.com #else
301310080SJoe.Bonasera@sun.com 	struct cpuid_info *cpi;
301410080SJoe.Bonasera@sun.com 
301510080SJoe.Bonasera@sun.com 	ASSERT(cpu != NULL);
301610080SJoe.Bonasera@sun.com 	cpi = cpu->cpu_m.mcpu_cpi;
301710080SJoe.Bonasera@sun.com 	if (cpi->cpi_vendor == X86_VENDOR_AMD && cpi->cpi_maxeax >= 1 &&
301810080SJoe.Bonasera@sun.com 	    (CPI_FEATURES_XTD_ECX(cpi) & CPUID_AMD_ECX_CR8D) != 0)
301910080SJoe.Bonasera@sun.com 		return (1);
302010080SJoe.Bonasera@sun.com 	return (0);
302110080SJoe.Bonasera@sun.com #endif
302210080SJoe.Bonasera@sun.com }
302310080SJoe.Bonasera@sun.com 
30249652SMichael.Corcoran@Sun.COM uint32_t
30259652SMichael.Corcoran@Sun.COM cpuid_get_apicid(cpu_t *cpu)
30269652SMichael.Corcoran@Sun.COM {
30279652SMichael.Corcoran@Sun.COM 	ASSERT(cpuid_checkpass(cpu, 1));
30289652SMichael.Corcoran@Sun.COM 	if (cpu->cpu_m.mcpu_cpi->cpi_maxeax < 1) {
30299652SMichael.Corcoran@Sun.COM 		return (UINT32_MAX);
30309652SMichael.Corcoran@Sun.COM 	} else {
30319652SMichael.Corcoran@Sun.COM 		return (cpu->cpu_m.mcpu_cpi->cpi_apicid);
30329652SMichael.Corcoran@Sun.COM 	}
30339652SMichael.Corcoran@Sun.COM }
30349652SMichael.Corcoran@Sun.COM 
30350Sstevel@tonic-gate void
30360Sstevel@tonic-gate cpuid_get_addrsize(cpu_t *cpu, uint_t *pabits, uint_t *vabits)
30370Sstevel@tonic-gate {
30380Sstevel@tonic-gate 	struct cpuid_info *cpi;
30390Sstevel@tonic-gate 
30400Sstevel@tonic-gate 	if (cpu == NULL)
30410Sstevel@tonic-gate 		cpu = CPU;
30420Sstevel@tonic-gate 	cpi = cpu->cpu_m.mcpu_cpi;
30430Sstevel@tonic-gate 
30440Sstevel@tonic-gate 	ASSERT(cpuid_checkpass(cpu, 1));
30450Sstevel@tonic-gate 
30460Sstevel@tonic-gate 	if (pabits)
30470Sstevel@tonic-gate 		*pabits = cpi->cpi_pabits;
30480Sstevel@tonic-gate 	if (vabits)
30490Sstevel@tonic-gate 		*vabits = cpi->cpi_vabits;
30500Sstevel@tonic-gate }
30510Sstevel@tonic-gate 
30520Sstevel@tonic-gate /*
30530Sstevel@tonic-gate  * Returns the number of data TLB entries for a corresponding
30540Sstevel@tonic-gate  * pagesize.  If it can't be computed, or isn't known, the
30550Sstevel@tonic-gate  * routine returns zero.  If you ask about an architecturally
30560Sstevel@tonic-gate  * impossible pagesize, the routine will panic (so that the
30570Sstevel@tonic-gate  * hat implementor knows that things are inconsistent.)
30580Sstevel@tonic-gate  */
30590Sstevel@tonic-gate uint_t
30600Sstevel@tonic-gate cpuid_get_dtlb_nent(cpu_t *cpu, size_t pagesize)
30610Sstevel@tonic-gate {
30620Sstevel@tonic-gate 	struct cpuid_info *cpi;
30630Sstevel@tonic-gate 	uint_t dtlb_nent = 0;
30640Sstevel@tonic-gate 
30650Sstevel@tonic-gate 	if (cpu == NULL)
30660Sstevel@tonic-gate 		cpu = CPU;
30670Sstevel@tonic-gate 	cpi = cpu->cpu_m.mcpu_cpi;
30680Sstevel@tonic-gate 
30690Sstevel@tonic-gate 	ASSERT(cpuid_checkpass(cpu, 1));
30700Sstevel@tonic-gate 
30710Sstevel@tonic-gate 	/*
30720Sstevel@tonic-gate 	 * Check the L2 TLB info
30730Sstevel@tonic-gate 	 */
30740Sstevel@tonic-gate 	if (cpi->cpi_xmaxeax >= 0x80000006) {
30751228Sandrei 		struct cpuid_regs *cp = &cpi->cpi_extd[6];
30760Sstevel@tonic-gate 
30770Sstevel@tonic-gate 		switch (pagesize) {
30780Sstevel@tonic-gate 
30790Sstevel@tonic-gate 		case 4 * 1024:
30800Sstevel@tonic-gate 			/*
30810Sstevel@tonic-gate 			 * All zero in the top 16 bits of the register
30820Sstevel@tonic-gate 			 * indicates a unified TLB. Size is in low 16 bits.
30830Sstevel@tonic-gate 			 */
30840Sstevel@tonic-gate 			if ((cp->cp_ebx & 0xffff0000) == 0)
30850Sstevel@tonic-gate 				dtlb_nent = cp->cp_ebx & 0x0000ffff;
30860Sstevel@tonic-gate 			else
30870Sstevel@tonic-gate 				dtlb_nent = BITX(cp->cp_ebx, 27, 16);
30880Sstevel@tonic-gate 			break;
30890Sstevel@tonic-gate 
30900Sstevel@tonic-gate 		case 2 * 1024 * 1024:
30910Sstevel@tonic-gate 			if ((cp->cp_eax & 0xffff0000) == 0)
30920Sstevel@tonic-gate 				dtlb_nent = cp->cp_eax & 0x0000ffff;
30930Sstevel@tonic-gate 			else
30940Sstevel@tonic-gate 				dtlb_nent = BITX(cp->cp_eax, 27, 16);
30950Sstevel@tonic-gate 			break;
30960Sstevel@tonic-gate 
30970Sstevel@tonic-gate 		default:
30980Sstevel@tonic-gate 			panic("unknown L2 pagesize");
30990Sstevel@tonic-gate 			/*NOTREACHED*/
31000Sstevel@tonic-gate 		}
31010Sstevel@tonic-gate 	}
31020Sstevel@tonic-gate 
31030Sstevel@tonic-gate 	if (dtlb_nent != 0)
31040Sstevel@tonic-gate 		return (dtlb_nent);
31050Sstevel@tonic-gate 
31060Sstevel@tonic-gate 	/*
31070Sstevel@tonic-gate 	 * No L2 TLB support for this size, try L1.
31080Sstevel@tonic-gate 	 */
31090Sstevel@tonic-gate 	if (cpi->cpi_xmaxeax >= 0x80000005) {
31101228Sandrei 		struct cpuid_regs *cp = &cpi->cpi_extd[5];
31110Sstevel@tonic-gate 
31120Sstevel@tonic-gate 		switch (pagesize) {
31130Sstevel@tonic-gate 		case 4 * 1024:
31140Sstevel@tonic-gate 			dtlb_nent = BITX(cp->cp_ebx, 23, 16);
31150Sstevel@tonic-gate 			break;
31160Sstevel@tonic-gate 		case 2 * 1024 * 1024:
31170Sstevel@tonic-gate 			dtlb_nent = BITX(cp->cp_eax, 23, 16);
31180Sstevel@tonic-gate 			break;
31190Sstevel@tonic-gate 		default:
31200Sstevel@tonic-gate 			panic("unknown L1 d-TLB pagesize");
31210Sstevel@tonic-gate 			/*NOTREACHED*/
31220Sstevel@tonic-gate 		}
31230Sstevel@tonic-gate 	}
31240Sstevel@tonic-gate 
31250Sstevel@tonic-gate 	return (dtlb_nent);
31260Sstevel@tonic-gate }
31270Sstevel@tonic-gate 
31280Sstevel@tonic-gate /*
31290Sstevel@tonic-gate  * Return 0 if the erratum is not present or not applicable, positive
31300Sstevel@tonic-gate  * if it is, and negative if the status of the erratum is unknown.
31310Sstevel@tonic-gate  *
31320Sstevel@tonic-gate  * See "Revision Guide for AMD Athlon(tm) 64 and AMD Opteron(tm)
3133359Skucharsk  * Processors" #25759, Rev 3.57, August 2005
31340Sstevel@tonic-gate  */
31350Sstevel@tonic-gate int
31360Sstevel@tonic-gate cpuid_opteron_erratum(cpu_t *cpu, uint_t erratum)
31370Sstevel@tonic-gate {
31380Sstevel@tonic-gate 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
31391228Sandrei 	uint_t eax;
31400Sstevel@tonic-gate 
31412584Ssethg 	/*
31422584Ssethg 	 * Bail out if this CPU isn't an AMD CPU, or if it's
31432584Ssethg 	 * a legacy (32-bit) AMD CPU.
31442584Ssethg 	 */
31452584Ssethg 	if (cpi->cpi_vendor != X86_VENDOR_AMD ||
31464265Skchow 	    cpi->cpi_family == 4 || cpi->cpi_family == 5 ||
31474265Skchow 	    cpi->cpi_family == 6)
31482869Sgavinm 
31490Sstevel@tonic-gate 		return (0);
31500Sstevel@tonic-gate 
31510Sstevel@tonic-gate 	eax = cpi->cpi_std[1].cp_eax;
31520Sstevel@tonic-gate 
31530Sstevel@tonic-gate #define	SH_B0(eax)	(eax == 0xf40 || eax == 0xf50)
31540Sstevel@tonic-gate #define	SH_B3(eax) 	(eax == 0xf51)
31551582Skchow #define	B(eax)		(SH_B0(eax) || SH_B3(eax))
31560Sstevel@tonic-gate 
31570Sstevel@tonic-gate #define	SH_C0(eax)	(eax == 0xf48 || eax == 0xf58)
31580Sstevel@tonic-gate 
31590Sstevel@tonic-gate #define	SH_CG(eax)	(eax == 0xf4a || eax == 0xf5a || eax == 0xf7a)
31600Sstevel@tonic-gate #define	DH_CG(eax)	(eax == 0xfc0 || eax == 0xfe0 || eax == 0xff0)
31610Sstevel@tonic-gate #define	CH_CG(eax)	(eax == 0xf82 || eax == 0xfb2)
31621582Skchow #define	CG(eax)		(SH_CG(eax) || DH_CG(eax) || CH_CG(eax))
31630Sstevel@tonic-gate 
31640Sstevel@tonic-gate #define	SH_D0(eax)	(eax == 0x10f40 || eax == 0x10f50 || eax == 0x10f70)
31650Sstevel@tonic-gate #define	DH_D0(eax)	(eax == 0x10fc0 || eax == 0x10ff0)
31660Sstevel@tonic-gate #define	CH_D0(eax)	(eax == 0x10f80 || eax == 0x10fb0)
31671582Skchow #define	D0(eax)		(SH_D0(eax) || DH_D0(eax) || CH_D0(eax))
31680Sstevel@tonic-gate 
31690Sstevel@tonic-gate #define	SH_E0(eax)	(eax == 0x20f50 || eax == 0x20f40 || eax == 0x20f70)
31700Sstevel@tonic-gate #define	JH_E1(eax)	(eax == 0x20f10)	/* JH8_E0 had 0x20f30 */
31710Sstevel@tonic-gate #define	DH_E3(eax)	(eax == 0x20fc0 || eax == 0x20ff0)
31720Sstevel@tonic-gate #define	SH_E4(eax)	(eax == 0x20f51 || eax == 0x20f71)
31730Sstevel@tonic-gate #define	BH_E4(eax)	(eax == 0x20fb1)
31740Sstevel@tonic-gate #define	SH_E5(eax)	(eax == 0x20f42)
31750Sstevel@tonic-gate #define	DH_E6(eax)	(eax == 0x20ff2 || eax == 0x20fc2)
31760Sstevel@tonic-gate #define	JH_E6(eax)	(eax == 0x20f12 || eax == 0x20f32)
31771582Skchow #define	EX(eax)		(SH_E0(eax) || JH_E1(eax) || DH_E3(eax) || \
31781582Skchow 			    SH_E4(eax) || BH_E4(eax) || SH_E5(eax) || \
31791582Skchow 			    DH_E6(eax) || JH_E6(eax))
31800Sstevel@tonic-gate 
31816691Skchow #define	DR_AX(eax)	(eax == 0x100f00 || eax == 0x100f01 || eax == 0x100f02)
31826691Skchow #define	DR_B0(eax)	(eax == 0x100f20)
31836691Skchow #define	DR_B1(eax)	(eax == 0x100f21)
31846691Skchow #define	DR_BA(eax)	(eax == 0x100f2a)
31856691Skchow #define	DR_B2(eax)	(eax == 0x100f22)
31866691Skchow #define	DR_B3(eax)	(eax == 0x100f23)
31876691Skchow #define	RB_C0(eax)	(eax == 0x100f40)
31886691Skchow 
31890Sstevel@tonic-gate 	switch (erratum) {
31900Sstevel@tonic-gate 	case 1:
31914265Skchow 		return (cpi->cpi_family < 0x10);
31920Sstevel@tonic-gate 	case 51:	/* what does the asterisk mean? */
31930Sstevel@tonic-gate 		return (B(eax) || SH_C0(eax) || CG(eax));
31940Sstevel@tonic-gate 	case 52:
31950Sstevel@tonic-gate 		return (B(eax));
31960Sstevel@tonic-gate 	case 57:
31976691Skchow 		return (cpi->cpi_family <= 0x11);
31980Sstevel@tonic-gate 	case 58:
31990Sstevel@tonic-gate 		return (B(eax));
32000Sstevel@tonic-gate 	case 60:
32016691Skchow 		return (cpi->cpi_family <= 0x11);
32020Sstevel@tonic-gate 	case 61:
32030Sstevel@tonic-gate 	case 62:
32040Sstevel@tonic-gate 	case 63:
32050Sstevel@tonic-gate 	case 64:
32060Sstevel@tonic-gate 	case 65:
32070Sstevel@tonic-gate 	case 66:
32080Sstevel@tonic-gate 	case 68:
32090Sstevel@tonic-gate 	case 69:
32100Sstevel@tonic-gate 	case 70:
32110Sstevel@tonic-gate 	case 71:
32120Sstevel@tonic-gate 		return (B(eax));
32130Sstevel@tonic-gate 	case 72:
32140Sstevel@tonic-gate 		return (SH_B0(eax));
32150Sstevel@tonic-gate 	case 74:
32160Sstevel@tonic-gate 		return (B(eax));
32170Sstevel@tonic-gate 	case 75:
32184265Skchow 		return (cpi->cpi_family < 0x10);
32190Sstevel@tonic-gate 	case 76:
32200Sstevel@tonic-gate 		return (B(eax));
32210Sstevel@tonic-gate 	case 77:
32226691Skchow 		return (cpi->cpi_family <= 0x11);
32230Sstevel@tonic-gate 	case 78:
32240Sstevel@tonic-gate 		return (B(eax) || SH_C0(eax));
32250Sstevel@tonic-gate 	case 79:
32260Sstevel@tonic-gate 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
32270Sstevel@tonic-gate 	case 80:
32280Sstevel@tonic-gate 	case 81:
32290Sstevel@tonic-gate 	case 82:
32300Sstevel@tonic-gate 		return (B(eax));
32310Sstevel@tonic-gate 	case 83:
32320Sstevel@tonic-gate 		return (B(eax) || SH_C0(eax) || CG(eax));
32330Sstevel@tonic-gate 	case 85:
32344265Skchow 		return (cpi->cpi_family < 0x10);
32350Sstevel@tonic-gate 	case 86:
32360Sstevel@tonic-gate 		return (SH_C0(eax) || CG(eax));
32370Sstevel@tonic-gate 	case 88:
32380Sstevel@tonic-gate #if !defined(__amd64)
32390Sstevel@tonic-gate 		return (0);
32400Sstevel@tonic-gate #else
32410Sstevel@tonic-gate 		return (B(eax) || SH_C0(eax));
32420Sstevel@tonic-gate #endif
32430Sstevel@tonic-gate 	case 89:
32444265Skchow 		return (cpi->cpi_family < 0x10);
32450Sstevel@tonic-gate 	case 90:
32460Sstevel@tonic-gate 		return (B(eax) || SH_C0(eax) || CG(eax));
32470Sstevel@tonic-gate 	case 91:
32480Sstevel@tonic-gate 	case 92:
32490Sstevel@tonic-gate 		return (B(eax) || SH_C0(eax));
32500Sstevel@tonic-gate 	case 93:
32510Sstevel@tonic-gate 		return (SH_C0(eax));
32520Sstevel@tonic-gate 	case 94:
32530Sstevel@tonic-gate 		return (B(eax) || SH_C0(eax) || CG(eax));
32540Sstevel@tonic-gate 	case 95:
32550Sstevel@tonic-gate #if !defined(__amd64)
32560Sstevel@tonic-gate 		return (0);
32570Sstevel@tonic-gate #else
32580Sstevel@tonic-gate 		return (B(eax) || SH_C0(eax));
32590Sstevel@tonic-gate #endif
32600Sstevel@tonic-gate 	case 96:
32610Sstevel@tonic-gate 		return (B(eax) || SH_C0(eax) || CG(eax));
32620Sstevel@tonic-gate 	case 97:
32630Sstevel@tonic-gate 	case 98:
32640Sstevel@tonic-gate 		return (SH_C0(eax) || CG(eax));
32650Sstevel@tonic-gate 	case 99:
32660Sstevel@tonic-gate 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
32670Sstevel@tonic-gate 	case 100:
32680Sstevel@tonic-gate 		return (B(eax) || SH_C0(eax));
32690Sstevel@tonic-gate 	case 101:
32700Sstevel@tonic-gate 	case 103:
32710Sstevel@tonic-gate 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
32720Sstevel@tonic-gate 	case 104:
32730Sstevel@tonic-gate 		return (SH_C0(eax) || CG(eax) || D0(eax));
32740Sstevel@tonic-gate 	case 105:
32750Sstevel@tonic-gate 	case 106:
32760Sstevel@tonic-gate 	case 107:
32770Sstevel@tonic-gate 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
32780Sstevel@tonic-gate 	case 108:
32790Sstevel@tonic-gate 		return (DH_CG(eax));
32800Sstevel@tonic-gate 	case 109:
32810Sstevel@tonic-gate 		return (SH_C0(eax) || CG(eax) || D0(eax));
32820Sstevel@tonic-gate 	case 110:
32830Sstevel@tonic-gate 		return (D0(eax) || EX(eax));
32840Sstevel@tonic-gate 	case 111:
32850Sstevel@tonic-gate 		return (CG(eax));
32860Sstevel@tonic-gate 	case 112:
32870Sstevel@tonic-gate 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
32880Sstevel@tonic-gate 	case 113:
32890Sstevel@tonic-gate 		return (eax == 0x20fc0);
32900Sstevel@tonic-gate 	case 114:
32910Sstevel@tonic-gate 		return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax));
32920Sstevel@tonic-gate 	case 115:
32930Sstevel@tonic-gate 		return (SH_E0(eax) || JH_E1(eax));
32940Sstevel@tonic-gate 	case 116:
32950Sstevel@tonic-gate 		return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax));
32960Sstevel@tonic-gate 	case 117:
32970Sstevel@tonic-gate 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
32980Sstevel@tonic-gate 	case 118:
32990Sstevel@tonic-gate 		return (SH_E0(eax) || JH_E1(eax) || SH_E4(eax) || BH_E4(eax) ||
33000Sstevel@tonic-gate 		    JH_E6(eax));
33010Sstevel@tonic-gate 	case 121:
33020Sstevel@tonic-gate 		return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
33030Sstevel@tonic-gate 	case 122:
33046691Skchow 		return (cpi->cpi_family < 0x10 || cpi->cpi_family == 0x11);
33050Sstevel@tonic-gate 	case 123:
33060Sstevel@tonic-gate 		return (JH_E1(eax) || BH_E4(eax) || JH_E6(eax));
3307359Skucharsk 	case 131:
33084265Skchow 		return (cpi->cpi_family < 0x10);
3309938Sesaxe 	case 6336786:
3310938Sesaxe 		/*
3311938Sesaxe 		 * Test for AdvPowerMgmtInfo.TscPStateInvariant
33124265Skchow 		 * if this is a K8 family or newer processor
3313938Sesaxe 		 */
3314938Sesaxe 		if (CPI_FAMILY(cpi) == 0xf) {
33151228Sandrei 			struct cpuid_regs regs;
33161228Sandrei 			regs.cp_eax = 0x80000007;
33171228Sandrei 			(void) __cpuid_insn(&regs);
33181228Sandrei 			return (!(regs.cp_edx & 0x100));
3319938Sesaxe 		}
3320938Sesaxe 		return (0);
33211582Skchow 	case 6323525:
33221582Skchow 		return (((((eax >> 12) & 0xff00) + (eax & 0xf00)) |
33231582Skchow 		    (((eax >> 4) & 0xf) | ((eax >> 12) & 0xf0))) < 0xf40);
33241582Skchow 
33256691Skchow 	case 6671130:
33266691Skchow 		/*
33276691Skchow 		 * check for processors (pre-Shanghai) that do not provide
33286691Skchow 		 * optimal management of 1gb ptes in its tlb.
33296691Skchow 		 */
33306691Skchow 		return (cpi->cpi_family == 0x10 && cpi->cpi_model < 4);
33316691Skchow 
33326691Skchow 	case 298:
33336691Skchow 		return (DR_AX(eax) || DR_B0(eax) || DR_B1(eax) || DR_BA(eax) ||
33346691Skchow 		    DR_B2(eax) || RB_C0(eax));
33356691Skchow 
33366691Skchow 	default:
33376691Skchow 		return (-1);
33386691Skchow 
33396691Skchow 	}
33406691Skchow }
33416691Skchow 
33426691Skchow /*
33436691Skchow  * Determine if specified erratum is present via OSVW (OS Visible Workaround).
33446691Skchow  * Return 1 if erratum is present, 0 if not present and -1 if indeterminate.
33456691Skchow  */
33466691Skchow int
33476691Skchow osvw_opteron_erratum(cpu_t *cpu, uint_t erratum)
33486691Skchow {
33496691Skchow 	struct cpuid_info	*cpi;
33506691Skchow 	uint_t			osvwid;
33516691Skchow 	static int		osvwfeature = -1;
33526691Skchow 	uint64_t		osvwlength;
33536691Skchow 
33546691Skchow 
33556691Skchow 	cpi = cpu->cpu_m.mcpu_cpi;
33566691Skchow 
33576691Skchow 	/* confirm OSVW supported */
33586691Skchow 	if (osvwfeature == -1) {
33596691Skchow 		osvwfeature = cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW;
33606691Skchow 	} else {
33616691Skchow 		/* assert that osvw feature setting is consistent on all cpus */
33626691Skchow 		ASSERT(osvwfeature ==
33636691Skchow 		    (cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW));
33646691Skchow 	}
33656691Skchow 	if (!osvwfeature)
33666691Skchow 		return (-1);
33676691Skchow 
33686691Skchow 	osvwlength = rdmsr(MSR_AMD_OSVW_ID_LEN) & OSVW_ID_LEN_MASK;
33696691Skchow 
33706691Skchow 	switch (erratum) {
33716691Skchow 	case 298:	/* osvwid is 0 */
33726691Skchow 		osvwid = 0;
33736691Skchow 		if (osvwlength <= (uint64_t)osvwid) {
33746691Skchow 			/* osvwid 0 is unknown */
33756691Skchow 			return (-1);
33766691Skchow 		}
33776691Skchow 
33786691Skchow 		/*
33796691Skchow 		 * Check the OSVW STATUS MSR to determine the state
33806691Skchow 		 * of the erratum where:
33816691Skchow 		 *   0 - fixed by HW
33826691Skchow 		 *   1 - BIOS has applied the workaround when BIOS
33836691Skchow 		 *   workaround is available. (Or for other errata,
33846691Skchow 		 *   OS workaround is required.)
33856691Skchow 		 * For a value of 1, caller will confirm that the
33866691Skchow 		 * erratum 298 workaround has indeed been applied by BIOS.
33876691Skchow 		 *
33886691Skchow 		 * A 1 may be set in cpus that have a HW fix
33896691Skchow 		 * in a mixed cpu system. Regarding erratum 298:
33906691Skchow 		 *   In a multiprocessor platform, the workaround above
33916691Skchow 		 *   should be applied to all processors regardless of
33926691Skchow 		 *   silicon revision when an affected processor is
33936691Skchow 		 *   present.
33946691Skchow 		 */
33956691Skchow 
33966691Skchow 		return (rdmsr(MSR_AMD_OSVW_STATUS +
33976691Skchow 		    (osvwid / OSVW_ID_CNT_PER_MSR)) &
33986691Skchow 		    (1ULL << (osvwid % OSVW_ID_CNT_PER_MSR)));
33996691Skchow 
34000Sstevel@tonic-gate 	default:
34010Sstevel@tonic-gate 		return (-1);
34020Sstevel@tonic-gate 	}
34030Sstevel@tonic-gate }
34040Sstevel@tonic-gate 
34050Sstevel@tonic-gate static const char assoc_str[] = "associativity";
34060Sstevel@tonic-gate static const char line_str[] = "line-size";
34070Sstevel@tonic-gate static const char size_str[] = "size";
34080Sstevel@tonic-gate 
34090Sstevel@tonic-gate static void
34100Sstevel@tonic-gate add_cache_prop(dev_info_t *devi, const char *label, const char *type,
34110Sstevel@tonic-gate     uint32_t val)
34120Sstevel@tonic-gate {
34130Sstevel@tonic-gate 	char buf[128];
34140Sstevel@tonic-gate 
34150Sstevel@tonic-gate 	/*
34160Sstevel@tonic-gate 	 * ndi_prop_update_int() is used because it is desirable for
34170Sstevel@tonic-gate 	 * DDI_PROP_HW_DEF and DDI_PROP_DONTSLEEP to be set.
34180Sstevel@tonic-gate 	 */
34190Sstevel@tonic-gate 	if (snprintf(buf, sizeof (buf), "%s-%s", label, type) < sizeof (buf))
34200Sstevel@tonic-gate 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, devi, buf, val);
34210Sstevel@tonic-gate }
34220Sstevel@tonic-gate 
34230Sstevel@tonic-gate /*
34240Sstevel@tonic-gate  * Intel-style cache/tlb description
34250Sstevel@tonic-gate  *
34260Sstevel@tonic-gate  * Standard cpuid level 2 gives a randomly ordered
34270Sstevel@tonic-gate  * selection of tags that index into a table that describes
34280Sstevel@tonic-gate  * cache and tlb properties.
34290Sstevel@tonic-gate  */
34300Sstevel@tonic-gate 
34310Sstevel@tonic-gate static const char l1_icache_str[] = "l1-icache";
34320Sstevel@tonic-gate static const char l1_dcache_str[] = "l1-dcache";
34330Sstevel@tonic-gate static const char l2_cache_str[] = "l2-cache";
34343446Smrj static const char l3_cache_str[] = "l3-cache";
34350Sstevel@tonic-gate static const char itlb4k_str[] = "itlb-4K";
34360Sstevel@tonic-gate static const char dtlb4k_str[] = "dtlb-4K";
34376964Svd224797 static const char itlb2M_str[] = "itlb-2M";
34380Sstevel@tonic-gate static const char itlb4M_str[] = "itlb-4M";
34390Sstevel@tonic-gate static const char dtlb4M_str[] = "dtlb-4M";
34406334Sksadhukh static const char dtlb24_str[] = "dtlb0-2M-4M";
34410Sstevel@tonic-gate static const char itlb424_str[] = "itlb-4K-2M-4M";
34426334Sksadhukh static const char itlb24_str[] = "itlb-2M-4M";
34430Sstevel@tonic-gate static const char dtlb44_str[] = "dtlb-4K-4M";
34440Sstevel@tonic-gate static const char sl1_dcache_str[] = "sectored-l1-dcache";
34450Sstevel@tonic-gate static const char sl2_cache_str[] = "sectored-l2-cache";
34460Sstevel@tonic-gate static const char itrace_str[] = "itrace-cache";
34470Sstevel@tonic-gate static const char sl3_cache_str[] = "sectored-l3-cache";
34486334Sksadhukh static const char sh_l2_tlb4k_str[] = "shared-l2-tlb-4k";
34490Sstevel@tonic-gate 
34500Sstevel@tonic-gate static const struct cachetab {
34510Sstevel@tonic-gate 	uint8_t 	ct_code;
34520Sstevel@tonic-gate 	uint8_t		ct_assoc;
34530Sstevel@tonic-gate 	uint16_t 	ct_line_size;
34540Sstevel@tonic-gate 	size_t		ct_size;
34550Sstevel@tonic-gate 	const char	*ct_label;
34560Sstevel@tonic-gate } intel_ctab[] = {
34576964Svd224797 	/*
34586964Svd224797 	 * maintain descending order!
34596964Svd224797 	 *
34606964Svd224797 	 * Codes ignored - Reason
34616964Svd224797 	 * ----------------------
34626964Svd224797 	 * 40H - intel_cpuid_4_cache_info() disambiguates l2/l3 cache
34636964Svd224797 	 * f0H/f1H - Currently we do not interpret prefetch size by design
34646964Svd224797 	 */
34656334Sksadhukh 	{ 0xe4, 16, 64, 8*1024*1024, l3_cache_str},
34666334Sksadhukh 	{ 0xe3, 16, 64, 4*1024*1024, l3_cache_str},
34676334Sksadhukh 	{ 0xe2, 16, 64, 2*1024*1024, l3_cache_str},
34686334Sksadhukh 	{ 0xde, 12, 64, 6*1024*1024, l3_cache_str},
34696334Sksadhukh 	{ 0xdd, 12, 64, 3*1024*1024, l3_cache_str},
34706334Sksadhukh 	{ 0xdc, 12, 64, ((1*1024*1024)+(512*1024)), l3_cache_str},
34716334Sksadhukh 	{ 0xd8, 8, 64, 4*1024*1024, l3_cache_str},
34726334Sksadhukh 	{ 0xd7, 8, 64, 2*1024*1024, l3_cache_str},
34736334Sksadhukh 	{ 0xd6, 8, 64, 1*1024*1024, l3_cache_str},
34746334Sksadhukh 	{ 0xd2, 4, 64, 2*1024*1024, l3_cache_str},
34756334Sksadhukh 	{ 0xd1, 4, 64, 1*1024*1024, l3_cache_str},
34766334Sksadhukh 	{ 0xd0, 4, 64, 512*1024, l3_cache_str},
34776334Sksadhukh 	{ 0xca, 4, 0, 512, sh_l2_tlb4k_str},
34786964Svd224797 	{ 0xc0, 4, 0, 8, dtlb44_str },
34796964Svd224797 	{ 0xba, 4, 0, 64, dtlb4k_str },
34803446Smrj 	{ 0xb4, 4, 0, 256, dtlb4k_str },
34810Sstevel@tonic-gate 	{ 0xb3, 4, 0, 128, dtlb4k_str },
34826334Sksadhukh 	{ 0xb2, 4, 0, 64, itlb4k_str },
34830Sstevel@tonic-gate 	{ 0xb0, 4, 0, 128, itlb4k_str },
34840Sstevel@tonic-gate 	{ 0x87, 8, 64, 1024*1024, l2_cache_str},
34850Sstevel@tonic-gate 	{ 0x86, 4, 64, 512*1024, l2_cache_str},
34860Sstevel@tonic-gate 	{ 0x85, 8, 32, 2*1024*1024, l2_cache_str},
34870Sstevel@tonic-gate 	{ 0x84, 8, 32, 1024*1024, l2_cache_str},
34880Sstevel@tonic-gate 	{ 0x83, 8, 32, 512*1024, l2_cache_str},
34890Sstevel@tonic-gate 	{ 0x82, 8, 32, 256*1024, l2_cache_str},
34906964Svd224797 	{ 0x80, 8, 64, 512*1024, l2_cache_str},
34910Sstevel@tonic-gate 	{ 0x7f, 2, 64, 512*1024, l2_cache_str},
34920Sstevel@tonic-gate 	{ 0x7d, 8, 64, 2*1024*1024, sl2_cache_str},
34930Sstevel@tonic-gate 	{ 0x7c, 8, 64, 1024*1024, sl2_cache_str},
34940Sstevel@tonic-gate 	{ 0x7b, 8, 64, 512*1024, sl2_cache_str},
34950Sstevel@tonic-gate 	{ 0x7a, 8, 64, 256*1024, sl2_cache_str},
34960Sstevel@tonic-gate 	{ 0x79, 8, 64, 128*1024, sl2_cache_str},
34970Sstevel@tonic-gate 	{ 0x78, 8, 64, 1024*1024, l2_cache_str},
34983446Smrj 	{ 0x73, 8, 0, 64*1024, itrace_str},
34990Sstevel@tonic-gate 	{ 0x72, 8, 0, 32*1024, itrace_str},
35000Sstevel@tonic-gate 	{ 0x71, 8, 0, 16*1024, itrace_str},
35010Sstevel@tonic-gate 	{ 0x70, 8, 0, 12*1024, itrace_str},
35020Sstevel@tonic-gate 	{ 0x68, 4, 64, 32*1024, sl1_dcache_str},
35030Sstevel@tonic-gate 	{ 0x67, 4, 64, 16*1024, sl1_dcache_str},
35040Sstevel@tonic-gate 	{ 0x66, 4, 64, 8*1024, sl1_dcache_str},
35050Sstevel@tonic-gate 	{ 0x60, 8, 64, 16*1024, sl1_dcache_str},
35060Sstevel@tonic-gate 	{ 0x5d, 0, 0, 256, dtlb44_str},
35070Sstevel@tonic-gate 	{ 0x5c, 0, 0, 128, dtlb44_str},
35080Sstevel@tonic-gate 	{ 0x5b, 0, 0, 64, dtlb44_str},
35096334Sksadhukh 	{ 0x5a, 4, 0, 32, dtlb24_str},
35106964Svd224797 	{ 0x59, 0, 0, 16, dtlb4k_str},
35116964Svd224797 	{ 0x57, 4, 0, 16, dtlb4k_str},
35126964Svd224797 	{ 0x56, 4, 0, 16, dtlb4M_str},
35136334Sksadhukh 	{ 0x55, 0, 0, 7, itlb24_str},
35140Sstevel@tonic-gate 	{ 0x52, 0, 0, 256, itlb424_str},
35150Sstevel@tonic-gate 	{ 0x51, 0, 0, 128, itlb424_str},
35160Sstevel@tonic-gate 	{ 0x50, 0, 0, 64, itlb424_str},
35176964Svd224797 	{ 0x4f, 0, 0, 32, itlb4k_str},
35186964Svd224797 	{ 0x4e, 24, 64, 6*1024*1024, l2_cache_str},
35193446Smrj 	{ 0x4d, 16, 64, 16*1024*1024, l3_cache_str},
35203446Smrj 	{ 0x4c, 12, 64, 12*1024*1024, l3_cache_str},
35213446Smrj 	{ 0x4b, 16, 64, 8*1024*1024, l3_cache_str},
35223446Smrj 	{ 0x4a, 12, 64, 6*1024*1024, l3_cache_str},
35233446Smrj 	{ 0x49, 16, 64, 4*1024*1024, l3_cache_str},
35246964Svd224797 	{ 0x48, 12, 64, 3*1024*1024, l2_cache_str},
35253446Smrj 	{ 0x47, 8, 64, 8*1024*1024, l3_cache_str},
35263446Smrj 	{ 0x46, 4, 64, 4*1024*1024, l3_cache_str},
35270Sstevel@tonic-gate 	{ 0x45, 4, 32, 2*1024*1024, l2_cache_str},
35280Sstevel@tonic-gate 	{ 0x44, 4, 32, 1024*1024, l2_cache_str},
35290Sstevel@tonic-gate 	{ 0x43, 4, 32, 512*1024, l2_cache_str},
35300Sstevel@tonic-gate 	{ 0x42, 4, 32, 256*1024, l2_cache_str},
35310Sstevel@tonic-gate 	{ 0x41, 4, 32, 128*1024, l2_cache_str},
35323446Smrj 	{ 0x3e, 4, 64, 512*1024, sl2_cache_str},
35333446Smrj 	{ 0x3d, 6, 64, 384*1024, sl2_cache_str},
35340Sstevel@tonic-gate 	{ 0x3c, 4, 64, 256*1024, sl2_cache_str},
35350Sstevel@tonic-gate 	{ 0x3b, 2, 64, 128*1024, sl2_cache_str},
35363446Smrj 	{ 0x3a, 6, 64, 192*1024, sl2_cache_str},
35370Sstevel@tonic-gate 	{ 0x39, 4, 64, 128*1024, sl2_cache_str},
35380Sstevel@tonic-gate 	{ 0x30, 8, 64, 32*1024, l1_icache_str},
35390Sstevel@tonic-gate 	{ 0x2c, 8, 64, 32*1024, l1_dcache_str},
35400Sstevel@tonic-gate 	{ 0x29, 8, 64, 4096*1024, sl3_cache_str},
35410Sstevel@tonic-gate 	{ 0x25, 8, 64, 2048*1024, sl3_cache_str},
35420Sstevel@tonic-gate 	{ 0x23, 8, 64, 1024*1024, sl3_cache_str},
35430Sstevel@tonic-gate 	{ 0x22, 4, 64, 512*1024, sl3_cache_str},
35446964Svd224797 	{ 0x0e, 6, 64, 24*1024, l1_dcache_str},
35456334Sksadhukh 	{ 0x0d, 4, 32, 16*1024, l1_dcache_str},
35460Sstevel@tonic-gate 	{ 0x0c, 4, 32, 16*1024, l1_dcache_str},
35473446Smrj 	{ 0x0b, 4, 0, 4, itlb4M_str},
35480Sstevel@tonic-gate 	{ 0x0a, 2, 32, 8*1024, l1_dcache_str},
35490Sstevel@tonic-gate 	{ 0x08, 4, 32, 16*1024, l1_icache_str},
35500Sstevel@tonic-gate 	{ 0x06, 4, 32, 8*1024, l1_icache_str},
35516964Svd224797 	{ 0x05, 4, 0, 32, dtlb4M_str},
35520Sstevel@tonic-gate 	{ 0x04, 4, 0, 8, dtlb4M_str},
35530Sstevel@tonic-gate 	{ 0x03, 4, 0, 64, dtlb4k_str},
35540Sstevel@tonic-gate 	{ 0x02, 4, 0, 2, itlb4M_str},
35550Sstevel@tonic-gate 	{ 0x01, 4, 0, 32, itlb4k_str},
35560Sstevel@tonic-gate 	{ 0 }
35570Sstevel@tonic-gate };
35580Sstevel@tonic-gate 
35590Sstevel@tonic-gate static const struct cachetab cyrix_ctab[] = {
35600Sstevel@tonic-gate 	{ 0x70, 4, 0, 32, "tlb-4K" },
35610Sstevel@tonic-gate 	{ 0x80, 4, 16, 16*1024, "l1-cache" },
35620Sstevel@tonic-gate 	{ 0 }
35630Sstevel@tonic-gate };
35640Sstevel@tonic-gate 
35650Sstevel@tonic-gate /*
35660Sstevel@tonic-gate  * Search a cache table for a matching entry
35670Sstevel@tonic-gate  */
35680Sstevel@tonic-gate static const struct cachetab *
35690Sstevel@tonic-gate find_cacheent(const struct cachetab *ct, uint_t code)
35700Sstevel@tonic-gate {
35710Sstevel@tonic-gate 	if (code != 0) {
35720Sstevel@tonic-gate 		for (; ct->ct_code != 0; ct++)
35730Sstevel@tonic-gate 			if (ct->ct_code <= code)
35740Sstevel@tonic-gate 				break;
35750Sstevel@tonic-gate 		if (ct->ct_code == code)
35760Sstevel@tonic-gate 			return (ct);
35770Sstevel@tonic-gate 	}
35780Sstevel@tonic-gate 	return (NULL);
35790Sstevel@tonic-gate }
35800Sstevel@tonic-gate 
35810Sstevel@tonic-gate /*
35825438Sksadhukh  * Populate cachetab entry with L2 or L3 cache-information using
35835438Sksadhukh  * cpuid function 4. This function is called from intel_walk_cacheinfo()
35845438Sksadhukh  * when descriptor 0x49 is encountered. It returns 0 if no such cache
35855438Sksadhukh  * information is found.
35865438Sksadhukh  */
35875438Sksadhukh static int
35885438Sksadhukh intel_cpuid_4_cache_info(struct cachetab *ct, struct cpuid_info *cpi)
35895438Sksadhukh {
35905438Sksadhukh 	uint32_t level, i;
35915438Sksadhukh 	int ret = 0;
35925438Sksadhukh 
35935438Sksadhukh 	for (i = 0; i < cpi->cpi_std_4_size; i++) {
35945438Sksadhukh 		level = CPI_CACHE_LVL(cpi->cpi_std_4[i]);
35955438Sksadhukh 
35965438Sksadhukh 		if (level == 2 || level == 3) {
35975438Sksadhukh 			ct->ct_assoc = CPI_CACHE_WAYS(cpi->cpi_std_4[i]) + 1;
35985438Sksadhukh 			ct->ct_line_size =
35995438Sksadhukh 			    CPI_CACHE_COH_LN_SZ(cpi->cpi_std_4[i]) + 1;
36005438Sksadhukh 			ct->ct_size = ct->ct_assoc *
36015438Sksadhukh 			    (CPI_CACHE_PARTS(cpi->cpi_std_4[i]) + 1) *
36025438Sksadhukh 			    ct->ct_line_size *
36035438Sksadhukh 			    (cpi->cpi_std_4[i]->cp_ecx + 1);
36045438Sksadhukh 
36055438Sksadhukh 			if (level == 2) {
36065438Sksadhukh 				ct->ct_label = l2_cache_str;
36075438Sksadhukh 			} else if (level == 3) {
36085438Sksadhukh 				ct->ct_label = l3_cache_str;
36095438Sksadhukh 			}
36105438Sksadhukh 			ret = 1;
36115438Sksadhukh 		}
36125438Sksadhukh 	}
36135438Sksadhukh 
36145438Sksadhukh 	return (ret);
36155438Sksadhukh }
36165438Sksadhukh 
36175438Sksadhukh /*
36180Sstevel@tonic-gate  * Walk the cacheinfo descriptor, applying 'func' to every valid element
36190Sstevel@tonic-gate  * The walk is terminated if the walker returns non-zero.
36200Sstevel@tonic-gate  */
36210Sstevel@tonic-gate static void
36220Sstevel@tonic-gate intel_walk_cacheinfo(struct cpuid_info *cpi,
36230Sstevel@tonic-gate     void *arg, int (*func)(void *, const struct cachetab *))
36240Sstevel@tonic-gate {
36250Sstevel@tonic-gate 	const struct cachetab *ct;
36266964Svd224797 	struct cachetab des_49_ct, des_b1_ct;
36270Sstevel@tonic-gate 	uint8_t *dp;
36280Sstevel@tonic-gate 	int i;
36290Sstevel@tonic-gate 
36300Sstevel@tonic-gate 	if ((dp = cpi->cpi_cacheinfo) == NULL)
36310Sstevel@tonic-gate 		return;
36324797Sksadhukh 	for (i = 0; i < cpi->cpi_ncache; i++, dp++) {
36334797Sksadhukh 		/*
36344797Sksadhukh 		 * For overloaded descriptor 0x49 we use cpuid function 4
36355438Sksadhukh 		 * if supported by the current processor, to create
36364797Sksadhukh 		 * cache information.
36376964Svd224797 		 * For overloaded descriptor 0xb1 we use X86_PAE flag
36386964Svd224797 		 * to disambiguate the cache information.
36394797Sksadhukh 		 */
36405438Sksadhukh 		if (*dp == 0x49 && cpi->cpi_maxeax >= 0x4 &&
36415438Sksadhukh 		    intel_cpuid_4_cache_info(&des_49_ct, cpi) == 1) {
36425438Sksadhukh 				ct = &des_49_ct;
36436964Svd224797 		} else if (*dp == 0xb1) {
36446964Svd224797 			des_b1_ct.ct_code = 0xb1;
36456964Svd224797 			des_b1_ct.ct_assoc = 4;
36466964Svd224797 			des_b1_ct.ct_line_size = 0;
364712826Skuriakose.kuruvilla@oracle.com 			if (is_x86_feature(x86_featureset, X86FSET_PAE)) {
36486964Svd224797 				des_b1_ct.ct_size = 8;
36496964Svd224797 				des_b1_ct.ct_label = itlb2M_str;
36506964Svd224797 			} else {
36516964Svd224797 				des_b1_ct.ct_size = 4;
36526964Svd224797 				des_b1_ct.ct_label = itlb4M_str;
36536964Svd224797 			}
36546964Svd224797 			ct = &des_b1_ct;
36555438Sksadhukh 		} else {
36565438Sksadhukh 			if ((ct = find_cacheent(intel_ctab, *dp)) == NULL) {
36575438Sksadhukh 				continue;
36585438Sksadhukh 			}
36594797Sksadhukh 		}
36604797Sksadhukh 
36615438Sksadhukh 		if (func(arg, ct) != 0) {
36625438Sksadhukh 			break;
36630Sstevel@tonic-gate 		}
36644797Sksadhukh 	}
36650Sstevel@tonic-gate }
36660Sstevel@tonic-gate 
36670Sstevel@tonic-gate /*
36680Sstevel@tonic-gate  * (Like the Intel one, except for Cyrix CPUs)
36690Sstevel@tonic-gate  */
36700Sstevel@tonic-gate static void
36710Sstevel@tonic-gate cyrix_walk_cacheinfo(struct cpuid_info *cpi,
36720Sstevel@tonic-gate     void *arg, int (*func)(void *, const struct cachetab *))
36730Sstevel@tonic-gate {
36740Sstevel@tonic-gate 	const struct cachetab *ct;
36750Sstevel@tonic-gate 	uint8_t *dp;
36760Sstevel@tonic-gate 	int i;
36770Sstevel@tonic-gate 
36780Sstevel@tonic-gate 	if ((dp = cpi->cpi_cacheinfo) == NULL)
36790Sstevel@tonic-gate 		return;
36800Sstevel@tonic-gate 	for (i = 0; i < cpi->cpi_ncache; i++, dp++) {
36810Sstevel@tonic-gate 		/*
36820Sstevel@tonic-gate 		 * Search Cyrix-specific descriptor table first ..
36830Sstevel@tonic-gate 		 */
36840Sstevel@tonic-gate 		if ((ct = find_cacheent(cyrix_ctab, *dp)) != NULL) {
36850Sstevel@tonic-gate 			if (func(arg, ct) != 0)
36860Sstevel@tonic-gate 				break;
36870Sstevel@tonic-gate 			continue;
36880Sstevel@tonic-gate 		}
36890Sstevel@tonic-gate 		/*
36900Sstevel@tonic-gate 		 * .. else fall back to the Intel one
36910Sstevel@tonic-gate 		 */
36920Sstevel@tonic-gate 		if ((ct = find_cacheent(intel_ctab, *dp)) != NULL) {
36930Sstevel@tonic-gate 			if (func(arg, ct) != 0)
36940Sstevel@tonic-gate 				break;
36950Sstevel@tonic-gate 			continue;
36960Sstevel@tonic-gate 		}
36970Sstevel@tonic-gate 	}
36980Sstevel@tonic-gate }
36990Sstevel@tonic-gate 
37000Sstevel@tonic-gate /*
37010Sstevel@tonic-gate  * A cacheinfo walker that adds associativity, line-size, and size properties
37020Sstevel@tonic-gate  * to the devinfo node it is passed as an argument.
37030Sstevel@tonic-gate  */
37040Sstevel@tonic-gate static int
37050Sstevel@tonic-gate add_cacheent_props(void *arg, const struct cachetab *ct)
37060Sstevel@tonic-gate {
37070Sstevel@tonic-gate 	dev_info_t *devi = arg;
37080Sstevel@tonic-gate 
37090Sstevel@tonic-gate 	add_cache_prop(devi, ct->ct_label, assoc_str, ct->ct_assoc);
37100Sstevel@tonic-gate 	if (ct->ct_line_size != 0)
37110Sstevel@tonic-gate 		add_cache_prop(devi, ct->ct_label, line_str,
37120Sstevel@tonic-gate 		    ct->ct_line_size);
37130Sstevel@tonic-gate 	add_cache_prop(devi, ct->ct_label, size_str, ct->ct_size);
37140Sstevel@tonic-gate 	return (0);
37150Sstevel@tonic-gate }
37160Sstevel@tonic-gate 
37174797Sksadhukh 
37180Sstevel@tonic-gate static const char fully_assoc[] = "fully-associative?";
37190Sstevel@tonic-gate 
37200Sstevel@tonic-gate /*
37210Sstevel@tonic-gate  * AMD style cache/tlb description
37220Sstevel@tonic-gate  *
37230Sstevel@tonic-gate  * Extended functions 5 and 6 directly describe properties of
37240Sstevel@tonic-gate  * tlbs and various cache levels.
37250Sstevel@tonic-gate  */
37260Sstevel@tonic-gate static void
37270Sstevel@tonic-gate add_amd_assoc(dev_info_t *devi, const char *label, uint_t assoc)
37280Sstevel@tonic-gate {
37290Sstevel@tonic-gate 	switch (assoc) {
37300Sstevel@tonic-gate 	case 0:	/* reserved; ignore */
37310Sstevel@tonic-gate 		break;
37320Sstevel@tonic-gate 	default:
37330Sstevel@tonic-gate 		add_cache_prop(devi, label, assoc_str, assoc);
37340Sstevel@tonic-gate 		break;
37350Sstevel@tonic-gate 	case 0xff:
37360Sstevel@tonic-gate 		add_cache_prop(devi, label, fully_assoc, 1);
37370Sstevel@tonic-gate 		break;
37380Sstevel@tonic-gate 	}
37390Sstevel@tonic-gate }
37400Sstevel@tonic-gate 
37410Sstevel@tonic-gate static void
37420Sstevel@tonic-gate add_amd_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size)
37430Sstevel@tonic-gate {
37440Sstevel@tonic-gate 	if (size == 0)
37450Sstevel@tonic-gate 		return;
37460Sstevel@tonic-gate 	add_cache_prop(devi, label, size_str, size);
37470Sstevel@tonic-gate 	add_amd_assoc(devi, label, assoc);
37480Sstevel@tonic-gate }
37490Sstevel@tonic-gate 
37500Sstevel@tonic-gate static void
37510Sstevel@tonic-gate add_amd_cache(dev_info_t *devi, const char *label,
37520Sstevel@tonic-gate     uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size)
37530Sstevel@tonic-gate {
37540Sstevel@tonic-gate 	if (size == 0 || line_size == 0)
37550Sstevel@tonic-gate 		return;
37560Sstevel@tonic-gate 	add_amd_assoc(devi, label, assoc);
37570Sstevel@tonic-gate 	/*
37580Sstevel@tonic-gate 	 * Most AMD parts have a sectored cache. Multiple cache lines are
37590Sstevel@tonic-gate 	 * associated with each tag. A sector consists of all cache lines
37600Sstevel@tonic-gate 	 * associated with a tag. For example, the AMD K6-III has a sector
37610Sstevel@tonic-gate 	 * size of 2 cache lines per tag.
37620Sstevel@tonic-gate 	 */
37630Sstevel@tonic-gate 	if (lines_per_tag != 0)
37640Sstevel@tonic-gate 		add_cache_prop(devi, label, "lines-per-tag", lines_per_tag);
37650Sstevel@tonic-gate 	add_cache_prop(devi, label, line_str, line_size);
37660Sstevel@tonic-gate 	add_cache_prop(devi, label, size_str, size * 1024);
37670Sstevel@tonic-gate }
37680Sstevel@tonic-gate 
37690Sstevel@tonic-gate static void
37700Sstevel@tonic-gate add_amd_l2_assoc(dev_info_t *devi, const char *label, uint_t assoc)
37710Sstevel@tonic-gate {
37720Sstevel@tonic-gate 	switch (assoc) {
37730Sstevel@tonic-gate 	case 0:	/* off */
37740Sstevel@tonic-gate 		break;
37750Sstevel@tonic-gate 	case 1:
37760Sstevel@tonic-gate 	case 2:
37770Sstevel@tonic-gate 	case 4:
37780Sstevel@tonic-gate 		add_cache_prop(devi, label, assoc_str, assoc);
37790Sstevel@tonic-gate 		break;
37800Sstevel@tonic-gate 	case 6:
37810Sstevel@tonic-gate 		add_cache_prop(devi, label, assoc_str, 8);
37820Sstevel@tonic-gate 		break;
37830Sstevel@tonic-gate 	case 8:
37840Sstevel@tonic-gate 		add_cache_prop(devi, label, assoc_str, 16);
37850Sstevel@tonic-gate 		break;
37860Sstevel@tonic-gate 	case 0xf:
37870Sstevel@tonic-gate 		add_cache_prop(devi, label, fully_assoc, 1);
37880Sstevel@tonic-gate 		break;
37890Sstevel@tonic-gate 	default: /* reserved; ignore */
37900Sstevel@tonic-gate 		break;
37910Sstevel@tonic-gate 	}
37920Sstevel@tonic-gate }
37930Sstevel@tonic-gate 
37940Sstevel@tonic-gate static void
37950Sstevel@tonic-gate add_amd_l2_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size)
37960Sstevel@tonic-gate {
37970Sstevel@tonic-gate 	if (size == 0 || assoc == 0)
37980Sstevel@tonic-gate 		return;
37990Sstevel@tonic-gate 	add_amd_l2_assoc(devi, label, assoc);
38000Sstevel@tonic-gate 	add_cache_prop(devi, label, size_str, size);
38010Sstevel@tonic-gate }
38020Sstevel@tonic-gate 
38030Sstevel@tonic-gate static void
38040Sstevel@tonic-gate add_amd_l2_cache(dev_info_t *devi, const char *label,
38050Sstevel@tonic-gate     uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size)
38060Sstevel@tonic-gate {
38070Sstevel@tonic-gate 	if (size == 0 || assoc == 0 || line_size == 0)
38080Sstevel@tonic-gate 		return;
38090Sstevel@tonic-gate 	add_amd_l2_assoc(devi, label, assoc);
38100Sstevel@tonic-gate 	if (lines_per_tag != 0)
38110Sstevel@tonic-gate 		add_cache_prop(devi, label, "lines-per-tag", lines_per_tag);
38120Sstevel@tonic-gate 	add_cache_prop(devi, label, line_str, line_size);
38130Sstevel@tonic-gate 	add_cache_prop(devi, label, size_str, size * 1024);
38140Sstevel@tonic-gate }
38150Sstevel@tonic-gate 
38160Sstevel@tonic-gate static void
38170Sstevel@tonic-gate amd_cache_info(struct cpuid_info *cpi, dev_info_t *devi)
38180Sstevel@tonic-gate {
38191228Sandrei 	struct cpuid_regs *cp;
38200Sstevel@tonic-gate 
38210Sstevel@tonic-gate 	if (cpi->cpi_xmaxeax < 0x80000005)
38220Sstevel@tonic-gate 		return;
38230Sstevel@tonic-gate 	cp = &cpi->cpi_extd[5];
38240Sstevel@tonic-gate 
38250Sstevel@tonic-gate 	/*
38260Sstevel@tonic-gate 	 * 4M/2M L1 TLB configuration
38270Sstevel@tonic-gate 	 *
38280Sstevel@tonic-gate 	 * We report the size for 2M pages because AMD uses two
38290Sstevel@tonic-gate 	 * TLB entries for one 4M page.
38300Sstevel@tonic-gate 	 */
38310Sstevel@tonic-gate 	add_amd_tlb(devi, "dtlb-2M",
38320Sstevel@tonic-gate 	    BITX(cp->cp_eax, 31, 24), BITX(cp->cp_eax, 23, 16));
38330Sstevel@tonic-gate 	add_amd_tlb(devi, "itlb-2M",
38340Sstevel@tonic-gate 	    BITX(cp->cp_eax, 15, 8), BITX(cp->cp_eax, 7, 0));
38350Sstevel@tonic-gate 
38360Sstevel@tonic-gate 	/*
38370Sstevel@tonic-gate 	 * 4K L1 TLB configuration
38380Sstevel@tonic-gate 	 */
38390Sstevel@tonic-gate 
38400Sstevel@tonic-gate 	switch (cpi->cpi_vendor) {
38410Sstevel@tonic-gate 		uint_t nentries;
38420Sstevel@tonic-gate 	case X86_VENDOR_TM:
38430Sstevel@tonic-gate 		if (cpi->cpi_family >= 5) {
38440Sstevel@tonic-gate 			/*
38450Sstevel@tonic-gate 			 * Crusoe processors have 256 TLB entries, but
38460Sstevel@tonic-gate 			 * cpuid data format constrains them to only
38470Sstevel@tonic-gate 			 * reporting 255 of them.
38480Sstevel@tonic-gate 			 */
38490Sstevel@tonic-gate 			if ((nentries = BITX(cp->cp_ebx, 23, 16)) == 255)
38500Sstevel@tonic-gate 				nentries = 256;
38510Sstevel@tonic-gate 			/*
38520Sstevel@tonic-gate 			 * Crusoe processors also have a unified TLB
38530Sstevel@tonic-gate 			 */
38540Sstevel@tonic-gate 			add_amd_tlb(devi, "tlb-4K", BITX(cp->cp_ebx, 31, 24),
38550Sstevel@tonic-gate 			    nentries);
38560Sstevel@tonic-gate 			break;
38570Sstevel@tonic-gate 		}
38580Sstevel@tonic-gate 		/*FALLTHROUGH*/
38590Sstevel@tonic-gate 	default:
38600Sstevel@tonic-gate 		add_amd_tlb(devi, itlb4k_str,
38610Sstevel@tonic-gate 		    BITX(cp->cp_ebx, 31, 24), BITX(cp->cp_ebx, 23, 16));
38620Sstevel@tonic-gate 		add_amd_tlb(devi, dtlb4k_str,
38630Sstevel@tonic-gate 		    BITX(cp->cp_ebx, 15, 8), BITX(cp->cp_ebx, 7, 0));
38640Sstevel@tonic-gate 		break;
38650Sstevel@tonic-gate 	}
38660Sstevel@tonic-gate 
38670Sstevel@tonic-gate 	/*
38680Sstevel@tonic-gate 	 * data L1 cache configuration
38690Sstevel@tonic-gate 	 */
38700Sstevel@tonic-gate 
38710Sstevel@tonic-gate 	add_amd_cache(devi, l1_dcache_str,
38720Sstevel@tonic-gate 	    BITX(cp->cp_ecx, 31, 24), BITX(cp->cp_ecx, 23, 16),
38730Sstevel@tonic-gate 	    BITX(cp->cp_ecx, 15, 8), BITX(cp->cp_ecx, 7, 0));
38740Sstevel@tonic-gate 
38750Sstevel@tonic-gate 	/*
38760Sstevel@tonic-gate 	 * code L1 cache configuration
38770Sstevel@tonic-gate 	 */
38780Sstevel@tonic-gate 
38790Sstevel@tonic-gate 	add_amd_cache(devi, l1_icache_str,
38800Sstevel@tonic-gate 	    BITX(cp->cp_edx, 31, 24), BITX(cp->cp_edx, 23, 16),
38810Sstevel@tonic-gate 	    BITX(cp->cp_edx, 15, 8), BITX(cp->cp_edx, 7, 0));
38820Sstevel@tonic-gate 
38830Sstevel@tonic-gate 	if (cpi->cpi_xmaxeax < 0x80000006)
38840Sstevel@tonic-gate 		return;
38850Sstevel@tonic-gate 	cp = &cpi->cpi_extd[6];
38860Sstevel@tonic-gate 
38870Sstevel@tonic-gate 	/* Check for a unified L2 TLB for large pages */
38880Sstevel@tonic-gate 
38890Sstevel@tonic-gate 	if (BITX(cp->cp_eax, 31, 16) == 0)
38900Sstevel@tonic-gate 		add_amd_l2_tlb(devi, "l2-tlb-2M",
38910Sstevel@tonic-gate 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
38920Sstevel@tonic-gate 	else {
38930Sstevel@tonic-gate 		add_amd_l2_tlb(devi, "l2-dtlb-2M",
38940Sstevel@tonic-gate 		    BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16));
38950Sstevel@tonic-gate 		add_amd_l2_tlb(devi, "l2-itlb-2M",
38960Sstevel@tonic-gate 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
38970Sstevel@tonic-gate 	}
38980Sstevel@tonic-gate 
38990Sstevel@tonic-gate 	/* Check for a unified L2 TLB for 4K pages */
39000Sstevel@tonic-gate 
39010Sstevel@tonic-gate 	if (BITX(cp->cp_ebx, 31, 16) == 0) {
39020Sstevel@tonic-gate 		add_amd_l2_tlb(devi, "l2-tlb-4K",
39030Sstevel@tonic-gate 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
39040Sstevel@tonic-gate 	} else {
39050Sstevel@tonic-gate 		add_amd_l2_tlb(devi, "l2-dtlb-4K",
39060Sstevel@tonic-gate 		    BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16));
39070Sstevel@tonic-gate 		add_amd_l2_tlb(devi, "l2-itlb-4K",
39080Sstevel@tonic-gate 		    BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
39090Sstevel@tonic-gate 	}
39100Sstevel@tonic-gate 
39110Sstevel@tonic-gate 	add_amd_l2_cache(devi, l2_cache_str,
39120Sstevel@tonic-gate 	    BITX(cp->cp_ecx, 31, 16), BITX(cp->cp_ecx, 15, 12),
39130Sstevel@tonic-gate 	    BITX(cp->cp_ecx, 11, 8), BITX(cp->cp_ecx, 7, 0));
39140Sstevel@tonic-gate }
39150Sstevel@tonic-gate 
39160Sstevel@tonic-gate /*
39170Sstevel@tonic-gate  * There are two basic ways that the x86 world describes it cache
39180Sstevel@tonic-gate  * and tlb architecture - Intel's way and AMD's way.
39190Sstevel@tonic-gate  *
39200Sstevel@tonic-gate  * Return which flavor of cache architecture we should use
39210Sstevel@tonic-gate  */
39220Sstevel@tonic-gate static int
39230Sstevel@tonic-gate x86_which_cacheinfo(struct cpuid_info *cpi)
39240Sstevel@tonic-gate {
39250Sstevel@tonic-gate 	switch (cpi->cpi_vendor) {
39260Sstevel@tonic-gate 	case X86_VENDOR_Intel:
39270Sstevel@tonic-gate 		if (cpi->cpi_maxeax >= 2)
39280Sstevel@tonic-gate 			return (X86_VENDOR_Intel);
39290Sstevel@tonic-gate 		break;
39300Sstevel@tonic-gate 	case X86_VENDOR_AMD:
39310Sstevel@tonic-gate 		/*
39320Sstevel@tonic-gate 		 * The K5 model 1 was the first part from AMD that reported
39330Sstevel@tonic-gate 		 * cache sizes via extended cpuid functions.
39340Sstevel@tonic-gate 		 */
39350Sstevel@tonic-gate 		if (cpi->cpi_family > 5 ||
39360Sstevel@tonic-gate 		    (cpi->cpi_family == 5 && cpi->cpi_model >= 1))
39370Sstevel@tonic-gate 			return (X86_VENDOR_AMD);
39380Sstevel@tonic-gate 		break;
39390Sstevel@tonic-gate 	case X86_VENDOR_TM:
39400Sstevel@tonic-gate 		if (cpi->cpi_family >= 5)
39410Sstevel@tonic-gate 			return (X86_VENDOR_AMD);
39420Sstevel@tonic-gate 		/*FALLTHROUGH*/
39430Sstevel@tonic-gate 	default:
39440Sstevel@tonic-gate 		/*
39450Sstevel@tonic-gate 		 * If they have extended CPU data for 0x80000005
39460Sstevel@tonic-gate 		 * then we assume they have AMD-format cache
39470Sstevel@tonic-gate 		 * information.
39480Sstevel@tonic-gate 		 *
39490Sstevel@tonic-gate 		 * If not, and the vendor happens to be Cyrix,
39500Sstevel@tonic-gate 		 * then try our-Cyrix specific handler.
39510Sstevel@tonic-gate 		 *
39520Sstevel@tonic-gate 		 * If we're not Cyrix, then assume we're using Intel's
39530Sstevel@tonic-gate 		 * table-driven format instead.
39540Sstevel@tonic-gate 		 */
39550Sstevel@tonic-gate 		if (cpi->cpi_xmaxeax >= 0x80000005)
39560Sstevel@tonic-gate 			return (X86_VENDOR_AMD);
39570Sstevel@tonic-gate 		else if (cpi->cpi_vendor == X86_VENDOR_Cyrix)
39580Sstevel@tonic-gate 			return (X86_VENDOR_Cyrix);
39590Sstevel@tonic-gate 		else if (cpi->cpi_maxeax >= 2)
39600Sstevel@tonic-gate 			return (X86_VENDOR_Intel);
39610Sstevel@tonic-gate 		break;
39620Sstevel@tonic-gate 	}
39630Sstevel@tonic-gate 	return (-1);
39640Sstevel@tonic-gate }
39650Sstevel@tonic-gate 
39660Sstevel@tonic-gate void
39679652SMichael.Corcoran@Sun.COM cpuid_set_cpu_properties(void *dip, processorid_t cpu_id,
39689652SMichael.Corcoran@Sun.COM     struct cpuid_info *cpi)
39690Sstevel@tonic-gate {
39700Sstevel@tonic-gate 	dev_info_t *cpu_devi;
39710Sstevel@tonic-gate 	int create;
39720Sstevel@tonic-gate 
39739652SMichael.Corcoran@Sun.COM 	cpu_devi = (dev_info_t *)dip;
39740Sstevel@tonic-gate 
39750Sstevel@tonic-gate 	/* device_type */
39760Sstevel@tonic-gate 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
39770Sstevel@tonic-gate 	    "device_type", "cpu");
39780Sstevel@tonic-gate 
39790Sstevel@tonic-gate 	/* reg */
39800Sstevel@tonic-gate 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
39810Sstevel@tonic-gate 	    "reg", cpu_id);
39820Sstevel@tonic-gate 
39830Sstevel@tonic-gate 	/* cpu-mhz, and clock-frequency */
39840Sstevel@tonic-gate 	if (cpu_freq > 0) {
39850Sstevel@tonic-gate 		long long mul;
39860Sstevel@tonic-gate 
39870Sstevel@tonic-gate 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
39880Sstevel@tonic-gate 		    "cpu-mhz", cpu_freq);
39890Sstevel@tonic-gate 		if ((mul = cpu_freq * 1000000LL) <= INT_MAX)
39900Sstevel@tonic-gate 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
39910Sstevel@tonic-gate 			    "clock-frequency", (int)mul);
39920Sstevel@tonic-gate 	}
39930Sstevel@tonic-gate 
399412826Skuriakose.kuruvilla@oracle.com 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID)) {
39950Sstevel@tonic-gate 		return;
39960Sstevel@tonic-gate 	}
39970Sstevel@tonic-gate 
39980Sstevel@tonic-gate 	/* vendor-id */
39990Sstevel@tonic-gate 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
40004481Sbholler 	    "vendor-id", cpi->cpi_vendorstr);
40010Sstevel@tonic-gate 
40020Sstevel@tonic-gate 	if (cpi->cpi_maxeax == 0) {
40030Sstevel@tonic-gate 		return;
40040Sstevel@tonic-gate 	}
40050Sstevel@tonic-gate 
40060Sstevel@tonic-gate 	/*
40070Sstevel@tonic-gate 	 * family, model, and step
40080Sstevel@tonic-gate 	 */
40090Sstevel@tonic-gate 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
40104481Sbholler 	    "family", CPI_FAMILY(cpi));
40110Sstevel@tonic-gate 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
40124481Sbholler 	    "cpu-model", CPI_MODEL(cpi));
40130Sstevel@tonic-gate 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
40144481Sbholler 	    "stepping-id", CPI_STEP(cpi));
40150Sstevel@tonic-gate 
40160Sstevel@tonic-gate 	/* type */
40170Sstevel@tonic-gate 	switch (cpi->cpi_vendor) {
40180Sstevel@tonic-gate 	case X86_VENDOR_Intel:
40190Sstevel@tonic-gate 		create = 1;
40200Sstevel@tonic-gate 		break;
40210Sstevel@tonic-gate 	default:
40220Sstevel@tonic-gate 		create = 0;
40230Sstevel@tonic-gate 		break;
40240Sstevel@tonic-gate 	}
40250Sstevel@tonic-gate 	if (create)
40260Sstevel@tonic-gate 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
40274481Sbholler 		    "type", CPI_TYPE(cpi));
40280Sstevel@tonic-gate 
40290Sstevel@tonic-gate 	/* ext-family */
40300Sstevel@tonic-gate 	switch (cpi->cpi_vendor) {
40310Sstevel@tonic-gate 	case X86_VENDOR_Intel:
40320Sstevel@tonic-gate 	case X86_VENDOR_AMD:
40330Sstevel@tonic-gate 		create = cpi->cpi_family >= 0xf;
40340Sstevel@tonic-gate 		break;
40350Sstevel@tonic-gate 	default:
40360Sstevel@tonic-gate 		create = 0;
40370Sstevel@tonic-gate 		break;
40380Sstevel@tonic-gate 	}
40390Sstevel@tonic-gate 	if (create)
40400Sstevel@tonic-gate 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
40410Sstevel@tonic-gate 		    "ext-family", CPI_FAMILY_XTD(cpi));
40420Sstevel@tonic-gate 
40430Sstevel@tonic-gate 	/* ext-model */
40440Sstevel@tonic-gate 	switch (cpi->cpi_vendor) {
40450Sstevel@tonic-gate 	case X86_VENDOR_Intel:
40466317Skk208521 		create = IS_EXTENDED_MODEL_INTEL(cpi);
40472001Sdmick 		break;
40480Sstevel@tonic-gate 	case X86_VENDOR_AMD:
40491582Skchow 		create = CPI_FAMILY(cpi) == 0xf;
40500Sstevel@tonic-gate 		break;
40510Sstevel@tonic-gate 	default:
40520Sstevel@tonic-gate 		create = 0;
40530Sstevel@tonic-gate 		break;
40540Sstevel@tonic-gate 	}
40550Sstevel@tonic-gate 	if (create)
40560Sstevel@tonic-gate 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
40574481Sbholler 		    "ext-model", CPI_MODEL_XTD(cpi));
40580Sstevel@tonic-gate 
40590Sstevel@tonic-gate 	/* generation */
40600Sstevel@tonic-gate 	switch (cpi->cpi_vendor) {
40610Sstevel@tonic-gate 	case X86_VENDOR_AMD:
40620Sstevel@tonic-gate 		/*
40630Sstevel@tonic-gate 		 * AMD K5 model 1 was the first part to support this
40640Sstevel@tonic-gate 		 */
40650Sstevel@tonic-gate 		create = cpi->cpi_xmaxeax >= 0x80000001;
40660Sstevel@tonic-gate 		break;
40670Sstevel@tonic-gate 	default:
40680Sstevel@tonic-gate 		create = 0;
40690Sstevel@tonic-gate 		break;
40700Sstevel@tonic-gate 	}
40710Sstevel@tonic-gate 	if (create)
40720Sstevel@tonic-gate 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
40730Sstevel@tonic-gate 		    "generation", BITX((cpi)->cpi_extd[1].cp_eax, 11, 8));
40740Sstevel@tonic-gate 
40750Sstevel@tonic-gate 	/* brand-id */
40760Sstevel@tonic-gate 	switch (cpi->cpi_vendor) {
40770Sstevel@tonic-gate 	case X86_VENDOR_Intel:
40780Sstevel@tonic-gate 		/*
40790Sstevel@tonic-gate 		 * brand id first appeared on Pentium III Xeon model 8,
40800Sstevel@tonic-gate 		 * and Celeron model 8 processors and Opteron
40810Sstevel@tonic-gate 		 */
40820Sstevel@tonic-gate 		create = cpi->cpi_family > 6 ||
40830Sstevel@tonic-gate 		    (cpi->cpi_family == 6 && cpi->cpi_model >= 8);
40840Sstevel@tonic-gate 		break;
40850Sstevel@tonic-gate 	case X86_VENDOR_AMD:
40860Sstevel@tonic-gate 		create = cpi->cpi_family >= 0xf;
40870Sstevel@tonic-gate 		break;
40880Sstevel@tonic-gate 	default:
40890Sstevel@tonic-gate 		create = 0;
40900Sstevel@tonic-gate 		break;
40910Sstevel@tonic-gate 	}
40920Sstevel@tonic-gate 	if (create && cpi->cpi_brandid != 0) {
40930Sstevel@tonic-gate 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
40940Sstevel@tonic-gate 		    "brand-id", cpi->cpi_brandid);
40950Sstevel@tonic-gate 	}
40960Sstevel@tonic-gate 
40970Sstevel@tonic-gate 	/* chunks, and apic-id */
40980Sstevel@tonic-gate 	switch (cpi->cpi_vendor) {
40990Sstevel@tonic-gate 		/*
41000Sstevel@tonic-gate 		 * first available on Pentium IV and Opteron (K8)
41010Sstevel@tonic-gate 		 */
41021975Sdmick 	case X86_VENDOR_Intel:
41031975Sdmick 		create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf;
41041975Sdmick 		break;
41051975Sdmick 	case X86_VENDOR_AMD:
41060Sstevel@tonic-gate 		create = cpi->cpi_family >= 0xf;
41070Sstevel@tonic-gate 		break;
41080Sstevel@tonic-gate 	default:
41090Sstevel@tonic-gate 		create = 0;
41100Sstevel@tonic-gate 		break;
41110Sstevel@tonic-gate 	}
41120Sstevel@tonic-gate 	if (create) {
41130Sstevel@tonic-gate 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
41144481Sbholler 		    "chunks", CPI_CHUNKS(cpi));
41150Sstevel@tonic-gate 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
41167282Smishra 		    "apic-id", cpi->cpi_apicid);
41171414Scindi 		if (cpi->cpi_chipid >= 0) {
41180Sstevel@tonic-gate 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
41190Sstevel@tonic-gate 			    "chip#", cpi->cpi_chipid);
41201414Scindi 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
41211414Scindi 			    "clog#", cpi->cpi_clogid);
41221414Scindi 		}
41230Sstevel@tonic-gate 	}
41240Sstevel@tonic-gate 
41250Sstevel@tonic-gate 	/* cpuid-features */
41260Sstevel@tonic-gate 	(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
41270Sstevel@tonic-gate 	    "cpuid-features", CPI_FEATURES_EDX(cpi));
41280Sstevel@tonic-gate 
41290Sstevel@tonic-gate 
41300Sstevel@tonic-gate 	/* cpuid-features-ecx */
41310Sstevel@tonic-gate 	switch (cpi->cpi_vendor) {
41320Sstevel@tonic-gate 	case X86_VENDOR_Intel:
41331975Sdmick 		create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf;
41340Sstevel@tonic-gate 		break;
41350Sstevel@tonic-gate 	default:
41360Sstevel@tonic-gate 		create = 0;
41370Sstevel@tonic-gate 		break;
41380Sstevel@tonic-gate 	}
41390Sstevel@tonic-gate 	if (create)
41400Sstevel@tonic-gate 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
41410Sstevel@tonic-gate 		    "cpuid-features-ecx", CPI_FEATURES_ECX(cpi));
41420Sstevel@tonic-gate 
41430Sstevel@tonic-gate 	/* ext-cpuid-features */
41440Sstevel@tonic-gate 	switch (cpi->cpi_vendor) {
41451975Sdmick 	case X86_VENDOR_Intel:
41460Sstevel@tonic-gate 	case X86_VENDOR_AMD:
41470Sstevel@tonic-gate 	case X86_VENDOR_Cyrix:
41480Sstevel@tonic-gate 	case X86_VENDOR_TM:
41490Sstevel@tonic-gate 	case X86_VENDOR_Centaur:
41500Sstevel@tonic-gate 		create = cpi->cpi_xmaxeax >= 0x80000001;
41510Sstevel@tonic-gate 		break;
41520Sstevel@tonic-gate 	default:
41530Sstevel@tonic-gate 		create = 0;
41540Sstevel@tonic-gate 		break;
41550Sstevel@tonic-gate 	}
41561975Sdmick 	if (create) {
41570Sstevel@tonic-gate 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
41584481Sbholler 		    "ext-cpuid-features", CPI_FEATURES_XTD_EDX(cpi));
41591975Sdmick 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
41604481Sbholler 		    "ext-cpuid-features-ecx", CPI_FEATURES_XTD_ECX(cpi));
41611975Sdmick 	}
41620Sstevel@tonic-gate 
41630Sstevel@tonic-gate 	/*
41640Sstevel@tonic-gate 	 * Brand String first appeared in Intel Pentium IV, AMD K5
41650Sstevel@tonic-gate 	 * model 1, and Cyrix GXm.  On earlier models we try and
41660Sstevel@tonic-gate 	 * simulate something similar .. so this string should always
41670Sstevel@tonic-gate 	 * same -something- about the processor, however lame.
41680Sstevel@tonic-gate 	 */
41690Sstevel@tonic-gate 	(void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
41700Sstevel@tonic-gate 	    "brand-string", cpi->cpi_brandstr);
41710Sstevel@tonic-gate 
41720Sstevel@tonic-gate 	/*
41730Sstevel@tonic-gate 	 * Finally, cache and tlb information
41740Sstevel@tonic-gate 	 */
41750Sstevel@tonic-gate 	switch (x86_which_cacheinfo(cpi)) {
41760Sstevel@tonic-gate 	case X86_VENDOR_Intel:
41770Sstevel@tonic-gate 		intel_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props);
41780Sstevel@tonic-gate 		break;
41790Sstevel@tonic-gate 	case X86_VENDOR_Cyrix:
41800Sstevel@tonic-gate 		cyrix_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props);
41810Sstevel@tonic-gate 		break;
41820Sstevel@tonic-gate 	case X86_VENDOR_AMD:
41830Sstevel@tonic-gate 		amd_cache_info(cpi, cpu_devi);
41840Sstevel@tonic-gate 		break;
41850Sstevel@tonic-gate 	default:
41860Sstevel@tonic-gate 		break;
41870Sstevel@tonic-gate 	}
41880Sstevel@tonic-gate }
41890Sstevel@tonic-gate 
41900Sstevel@tonic-gate struct l2info {
41910Sstevel@tonic-gate 	int *l2i_csz;
41920Sstevel@tonic-gate 	int *l2i_lsz;
41930Sstevel@tonic-gate 	int *l2i_assoc;
41940Sstevel@tonic-gate 	int l2i_ret;
41950Sstevel@tonic-gate };
41960Sstevel@tonic-gate 
41970Sstevel@tonic-gate /*
41980Sstevel@tonic-gate  * A cacheinfo walker that fetches the size, line-size and associativity
41990Sstevel@tonic-gate  * of the L2 cache
42000Sstevel@tonic-gate  */
42010Sstevel@tonic-gate static int
42020Sstevel@tonic-gate intel_l2cinfo(void *arg, const struct cachetab *ct)
42030Sstevel@tonic-gate {
42040Sstevel@tonic-gate 	struct l2info *l2i = arg;
42050Sstevel@tonic-gate 	int *ip;
42060Sstevel@tonic-gate 
42070Sstevel@tonic-gate 	if (ct->ct_label != l2_cache_str &&
42080Sstevel@tonic-gate 	    ct->ct_label != sl2_cache_str)
42090Sstevel@tonic-gate 		return (0);	/* not an L2 -- keep walking */
42100Sstevel@tonic-gate 
42110Sstevel@tonic-gate 	if ((ip = l2i->l2i_csz) != NULL)
42120Sstevel@tonic-gate 		*ip = ct->ct_size;
42130Sstevel@tonic-gate 	if ((ip = l2i->l2i_lsz) != NULL)
42140Sstevel@tonic-gate 		*ip = ct->ct_line_size;
42150Sstevel@tonic-gate 	if ((ip = l2i->l2i_assoc) != NULL)
42160Sstevel@tonic-gate 		*ip = ct->ct_assoc;
42170Sstevel@tonic-gate 	l2i->l2i_ret = ct->ct_size;
42180Sstevel@tonic-gate 	return (1);		/* was an L2 -- terminate walk */
42190Sstevel@tonic-gate }
42200Sstevel@tonic-gate 
42215070Skchow /*
42225070Skchow  * AMD L2/L3 Cache and TLB Associativity Field Definition:
42235070Skchow  *
42245070Skchow  *	Unlike the associativity for the L1 cache and tlb where the 8 bit
42255070Skchow  *	value is the associativity, the associativity for the L2 cache and
42265070Skchow  *	tlb is encoded in the following table. The 4 bit L2 value serves as
42275070Skchow  *	an index into the amd_afd[] array to determine the associativity.
42285070Skchow  *	-1 is undefined. 0 is fully associative.
42295070Skchow  */
42305070Skchow 
42315070Skchow static int amd_afd[] =
42325070Skchow 	{-1, 1, 2, -1, 4, -1, 8, -1, 16, -1, 32, 48, 64, 96, 128, 0};
42335070Skchow 
42340Sstevel@tonic-gate static void
42350Sstevel@tonic-gate amd_l2cacheinfo(struct cpuid_info *cpi, struct l2info *l2i)
42360Sstevel@tonic-gate {
42371228Sandrei 	struct cpuid_regs *cp;
42380Sstevel@tonic-gate 	uint_t size, assoc;
42395070Skchow 	int i;
42400Sstevel@tonic-gate 	int *ip;
42410Sstevel@tonic-gate 
42420Sstevel@tonic-gate 	if (cpi->cpi_xmaxeax < 0x80000006)
42430Sstevel@tonic-gate 		return;
42440Sstevel@tonic-gate 	cp = &cpi->cpi_extd[6];
42450Sstevel@tonic-gate 
42465070Skchow 	if ((i = BITX(cp->cp_ecx, 15, 12)) != 0 &&
42470Sstevel@tonic-gate 	    (size = BITX(cp->cp_ecx, 31, 16)) != 0) {
42480Sstevel@tonic-gate 		uint_t cachesz = size * 1024;
42495070Skchow 		assoc = amd_afd[i];
42505070Skchow 
42515070Skchow 		ASSERT(assoc != -1);
42520Sstevel@tonic-gate 
42530Sstevel@tonic-gate 		if ((ip = l2i->l2i_csz) != NULL)
42540Sstevel@tonic-gate 			*ip = cachesz;
42550Sstevel@tonic-gate 		if ((ip = l2i->l2i_lsz) != NULL)
42560Sstevel@tonic-gate 			*ip = BITX(cp->cp_ecx, 7, 0);
42570Sstevel@tonic-gate 		if ((ip = l2i->l2i_assoc) != NULL)
42580Sstevel@tonic-gate 			*ip = assoc;
42590Sstevel@tonic-gate 		l2i->l2i_ret = cachesz;
42600Sstevel@tonic-gate 	}
42610Sstevel@tonic-gate }
42620Sstevel@tonic-gate 
42630Sstevel@tonic-gate int
42640Sstevel@tonic-gate getl2cacheinfo(cpu_t *cpu, int *csz, int *lsz, int *assoc)
42650Sstevel@tonic-gate {
42660Sstevel@tonic-gate 	struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
42670Sstevel@tonic-gate 	struct l2info __l2info, *l2i = &__l2info;
42680Sstevel@tonic-gate 
42690Sstevel@tonic-gate 	l2i->l2i_csz = csz;
42700Sstevel@tonic-gate 	l2i->l2i_lsz = lsz;
42710Sstevel@tonic-gate 	l2i->l2i_assoc = assoc;
42720Sstevel@tonic-gate 	l2i->l2i_ret = -1;
42730Sstevel@tonic-gate 
42740Sstevel@tonic-gate 	switch (x86_which_cacheinfo(cpi)) {
42750Sstevel@tonic-gate 	case X86_VENDOR_Intel:
42760Sstevel@tonic-gate 		intel_walk_cacheinfo(cpi, l2i, intel_l2cinfo);
42770Sstevel@tonic-gate 		break;
42780Sstevel@tonic-gate 	case X86_VENDOR_Cyrix:
42790Sstevel@tonic-gate 		cyrix_walk_cacheinfo(cpi, l2i, intel_l2cinfo);
42800Sstevel@tonic-gate 		break;
42810Sstevel@tonic-gate 	case X86_VENDOR_AMD:
42820Sstevel@tonic-gate 		amd_l2cacheinfo(cpi, l2i);
42830Sstevel@tonic-gate 		break;
42840Sstevel@tonic-gate 	default:
42850Sstevel@tonic-gate 		break;
42860Sstevel@tonic-gate 	}
42870Sstevel@tonic-gate 	return (l2i->l2i_ret);
42880Sstevel@tonic-gate }
42894481Sbholler 
42905084Sjohnlev #if !defined(__xpv)
42915084Sjohnlev 
42925045Sbholler uint32_t *
42935045Sbholler cpuid_mwait_alloc(cpu_t *cpu)
42945045Sbholler {
42955045Sbholler 	uint32_t	*ret;
42965045Sbholler 	size_t		mwait_size;
42975045Sbholler 
429812004Sjiang.liu@intel.com 	ASSERT(cpuid_checkpass(CPU, 2));
429912004Sjiang.liu@intel.com 
430012004Sjiang.liu@intel.com 	mwait_size = CPU->cpu_m.mcpu_cpi->cpi_mwait.mon_max;
43015045Sbholler 	if (mwait_size == 0)
43025045Sbholler 		return (NULL);
43035045Sbholler 
43045045Sbholler 	/*
43055045Sbholler 	 * kmem_alloc() returns cache line size aligned data for mwait_size
43065045Sbholler 	 * allocations.  mwait_size is currently cache line sized.  Neither
43075045Sbholler 	 * of these implementation details are guarantied to be true in the
43085045Sbholler 	 * future.
43095045Sbholler 	 *
43105045Sbholler 	 * First try allocating mwait_size as kmem_alloc() currently returns
43115045Sbholler 	 * correctly aligned memory.  If kmem_alloc() does not return
43125045Sbholler 	 * mwait_size aligned memory, then use mwait_size ROUNDUP.
43135045Sbholler 	 *
43145045Sbholler 	 * Set cpi_mwait.buf_actual and cpi_mwait.size_actual in case we
43155045Sbholler 	 * decide to free this memory.
43165045Sbholler 	 */
43175045Sbholler 	ret = kmem_zalloc(mwait_size, KM_SLEEP);
43185045Sbholler 	if (ret == (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size)) {
43195045Sbholler 		cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = ret;
43205045Sbholler 		cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = mwait_size;
43215045Sbholler 		*ret = MWAIT_RUNNING;
43225045Sbholler 		return (ret);
43235045Sbholler 	} else {
43245045Sbholler 		kmem_free(ret, mwait_size);
43255045Sbholler 		ret = kmem_zalloc(mwait_size * 2, KM_SLEEP);
43265045Sbholler 		cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = ret;
43275045Sbholler 		cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = mwait_size * 2;
43285045Sbholler 		ret = (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size);
43295045Sbholler 		*ret = MWAIT_RUNNING;
43305045Sbholler 		return (ret);
43315045Sbholler 	}
43325045Sbholler }
43335045Sbholler 
43345045Sbholler void
43355045Sbholler cpuid_mwait_free(cpu_t *cpu)
43364481Sbholler {
433712004Sjiang.liu@intel.com 	if (cpu->cpu_m.mcpu_cpi == NULL) {
433812004Sjiang.liu@intel.com 		return;
433912004Sjiang.liu@intel.com 	}
43405045Sbholler 
43415045Sbholler 	if (cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual != NULL &&
43425045Sbholler 	    cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual > 0) {
43435045Sbholler 		kmem_free(cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual,
43445045Sbholler 		    cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual);
43455045Sbholler 	}
43465045Sbholler 
43475045Sbholler 	cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = NULL;
43485045Sbholler 	cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = 0;
43494481Sbholler }
43505084Sjohnlev 
43515322Ssudheer void
43525322Ssudheer patch_tsc_read(int flag)
43535322Ssudheer {
43545322Ssudheer 	size_t cnt;
43557532SSean.Ye@Sun.COM 
43565322Ssudheer 	switch (flag) {
43575322Ssudheer 	case X86_NO_TSC:
43585322Ssudheer 		cnt = &_no_rdtsc_end - &_no_rdtsc_start;
43595338Ssudheer 		(void) memcpy((void *)tsc_read, (void *)&_no_rdtsc_start, cnt);
43605322Ssudheer 		break;
43615322Ssudheer 	case X86_HAVE_TSCP:
43625322Ssudheer 		cnt = &_tscp_end - &_tscp_start;
43635338Ssudheer 		(void) memcpy((void *)tsc_read, (void *)&_tscp_start, cnt);
43645322Ssudheer 		break;
43655322Ssudheer 	case X86_TSC_MFENCE:
43665322Ssudheer 		cnt = &_tsc_mfence_end - &_tsc_mfence_start;
43675338Ssudheer 		(void) memcpy((void *)tsc_read,
43685338Ssudheer 		    (void *)&_tsc_mfence_start, cnt);
43695322Ssudheer 		break;
43706642Ssudheer 	case X86_TSC_LFENCE:
43716642Ssudheer 		cnt = &_tsc_lfence_end - &_tsc_lfence_start;
43726642Ssudheer 		(void) memcpy((void *)tsc_read,
43736642Ssudheer 		    (void *)&_tsc_lfence_start, cnt);
43746642Ssudheer 		break;
43755322Ssudheer 	default:
43765322Ssudheer 		break;
43775322Ssudheer 	}
43785322Ssudheer }
43795322Ssudheer 
43808906SEric.Saxe@Sun.COM int
43818906SEric.Saxe@Sun.COM cpuid_deep_cstates_supported(void)
43828906SEric.Saxe@Sun.COM {
43838906SEric.Saxe@Sun.COM 	struct cpuid_info *cpi;
43848906SEric.Saxe@Sun.COM 	struct cpuid_regs regs;
43858906SEric.Saxe@Sun.COM 
43868906SEric.Saxe@Sun.COM 	ASSERT(cpuid_checkpass(CPU, 1));
43878906SEric.Saxe@Sun.COM 
43888906SEric.Saxe@Sun.COM 	cpi = CPU->cpu_m.mcpu_cpi;
43898906SEric.Saxe@Sun.COM 
439012826Skuriakose.kuruvilla@oracle.com 	if (!is_x86_feature(x86_featureset, X86FSET_CPUID))
43918906SEric.Saxe@Sun.COM 		return (0);
43928906SEric.Saxe@Sun.COM 
43938906SEric.Saxe@Sun.COM 	switch (cpi->cpi_vendor) {
43948906SEric.Saxe@Sun.COM 	case X86_VENDOR_Intel:
43958906SEric.Saxe@Sun.COM 		if (cpi->cpi_xmaxeax < 0x80000007)
43968906SEric.Saxe@Sun.COM 			return (0);
43978906SEric.Saxe@Sun.COM 
43988906SEric.Saxe@Sun.COM 		/*
43998906SEric.Saxe@Sun.COM 		 * TSC run at a constant rate in all ACPI C-states?
44008906SEric.Saxe@Sun.COM 		 */
44018906SEric.Saxe@Sun.COM 		regs.cp_eax = 0x80000007;
44028906SEric.Saxe@Sun.COM 		(void) __cpuid_insn(&regs);
44038906SEric.Saxe@Sun.COM 		return (regs.cp_edx & CPUID_TSC_CSTATE_INVARIANCE);
44048906SEric.Saxe@Sun.COM 
44058906SEric.Saxe@Sun.COM 	default:
44068906SEric.Saxe@Sun.COM 		return (0);
44078906SEric.Saxe@Sun.COM 	}
44088906SEric.Saxe@Sun.COM }
44098906SEric.Saxe@Sun.COM 
44108930SBill.Holler@Sun.COM #endif	/* !__xpv */
44118930SBill.Holler@Sun.COM 
44128930SBill.Holler@Sun.COM void
44138930SBill.Holler@Sun.COM post_startup_cpu_fixups(void)
44148930SBill.Holler@Sun.COM {
44158930SBill.Holler@Sun.COM #ifndef __xpv
44168930SBill.Holler@Sun.COM 	/*
44178930SBill.Holler@Sun.COM 	 * Some AMD processors support C1E state. Entering this state will
44188930SBill.Holler@Sun.COM 	 * cause the local APIC timer to stop, which we can't deal with at
44198930SBill.Holler@Sun.COM 	 * this time.
44208930SBill.Holler@Sun.COM 	 */
44218930SBill.Holler@Sun.COM 	if (cpuid_getvendor(CPU) == X86_VENDOR_AMD) {
44228930SBill.Holler@Sun.COM 		on_trap_data_t otd;
44238930SBill.Holler@Sun.COM 		uint64_t reg;
44248930SBill.Holler@Sun.COM 
44258930SBill.Holler@Sun.COM 		if (!on_trap(&otd, OT_DATA_ACCESS)) {
44268930SBill.Holler@Sun.COM 			reg = rdmsr(MSR_AMD_INT_PENDING_CMP_HALT);
44278930SBill.Holler@Sun.COM 			/* Disable C1E state if it is enabled by BIOS */
44288930SBill.Holler@Sun.COM 			if ((reg >> AMD_ACTONCMPHALT_SHIFT) &
44298930SBill.Holler@Sun.COM 			    AMD_ACTONCMPHALT_MASK) {
44308930SBill.Holler@Sun.COM 				reg &= ~(AMD_ACTONCMPHALT_MASK <<
44318930SBill.Holler@Sun.COM 				    AMD_ACTONCMPHALT_SHIFT);
44328930SBill.Holler@Sun.COM 				wrmsr(MSR_AMD_INT_PENDING_CMP_HALT, reg);
44338930SBill.Holler@Sun.COM 			}
44348930SBill.Holler@Sun.COM 		}
44358930SBill.Holler@Sun.COM 		no_trap();
44368930SBill.Holler@Sun.COM 	}
44378930SBill.Holler@Sun.COM #endif	/* !__xpv */
44388930SBill.Holler@Sun.COM }
44398930SBill.Holler@Sun.COM 
44409283SBill.Holler@Sun.COM /*
4441*13134Skuriakose.kuruvilla@oracle.com  * Setup necessary registers to enable XSAVE feature on this processor.
4442*13134Skuriakose.kuruvilla@oracle.com  * This function needs to be called early enough, so that no xsave/xrstor
4443*13134Skuriakose.kuruvilla@oracle.com  * ops will execute on the processor before the MSRs are properly set up.
4444*13134Skuriakose.kuruvilla@oracle.com  *
4445*13134Skuriakose.kuruvilla@oracle.com  * Current implementation has the following assumption:
4446*13134Skuriakose.kuruvilla@oracle.com  * - cpuid_pass1() is done, so that X86 features are known.
4447*13134Skuriakose.kuruvilla@oracle.com  * - fpu_probe() is done, so that fp_save_mech is chosen.
4448*13134Skuriakose.kuruvilla@oracle.com  */
4449*13134Skuriakose.kuruvilla@oracle.com void
4450*13134Skuriakose.kuruvilla@oracle.com xsave_setup_msr(cpu_t *cpu)
4451*13134Skuriakose.kuruvilla@oracle.com {
4452*13134Skuriakose.kuruvilla@oracle.com 	ASSERT(fp_save_mech == FP_XSAVE);
4453*13134Skuriakose.kuruvilla@oracle.com 	ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
4454*13134Skuriakose.kuruvilla@oracle.com 
4455*13134Skuriakose.kuruvilla@oracle.com 	/* Enable OSXSAVE in CR4. */
4456*13134Skuriakose.kuruvilla@oracle.com 	setcr4(getcr4() | CR4_OSXSAVE);
4457*13134Skuriakose.kuruvilla@oracle.com 	/*
4458*13134Skuriakose.kuruvilla@oracle.com 	 * Update SW copy of ECX, so that /dev/cpu/self/cpuid will report
4459*13134Skuriakose.kuruvilla@oracle.com 	 * correct value.
4460*13134Skuriakose.kuruvilla@oracle.com 	 */
4461*13134Skuriakose.kuruvilla@oracle.com 	cpu->cpu_m.mcpu_cpi->cpi_std[1].cp_ecx |= CPUID_INTC_ECX_OSXSAVE;
4462*13134Skuriakose.kuruvilla@oracle.com 	setup_xfem();
4463*13134Skuriakose.kuruvilla@oracle.com }
4464*13134Skuriakose.kuruvilla@oracle.com 
4465*13134Skuriakose.kuruvilla@oracle.com /*
44669283SBill.Holler@Sun.COM  * Starting with the Westmere processor the local
44679283SBill.Holler@Sun.COM  * APIC timer will continue running in all C-states,
44689283SBill.Holler@Sun.COM  * including the deepest C-states.
44699283SBill.Holler@Sun.COM  */
44709283SBill.Holler@Sun.COM int
44719283SBill.Holler@Sun.COM cpuid_arat_supported(void)
44729283SBill.Holler@Sun.COM {
44739283SBill.Holler@Sun.COM 	struct cpuid_info *cpi;
44749283SBill.Holler@Sun.COM 	struct cpuid_regs regs;
44759283SBill.Holler@Sun.COM 
44769283SBill.Holler@Sun.COM 	ASSERT(cpuid_checkpass(CPU, 1));
447712826Skuriakose.kuruvilla@oracle.com 	ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
44789283SBill.Holler@Sun.COM 
44799283SBill.Holler@Sun.COM 	cpi = CPU->cpu_m.mcpu_cpi;
44809283SBill.Holler@Sun.COM 
44819283SBill.Holler@Sun.COM 	switch (cpi->cpi_vendor) {
44829283SBill.Holler@Sun.COM 	case X86_VENDOR_Intel:
44839283SBill.Holler@Sun.COM 		/*
44849283SBill.Holler@Sun.COM 		 * Always-running Local APIC Timer is
44859283SBill.Holler@Sun.COM 		 * indicated by CPUID.6.EAX[2].
44869283SBill.Holler@Sun.COM 		 */
44879283SBill.Holler@Sun.COM 		if (cpi->cpi_maxeax >= 6) {
44889283SBill.Holler@Sun.COM 			regs.cp_eax = 6;
44899283SBill.Holler@Sun.COM 			(void) cpuid_insn(NULL, &regs);
44909283SBill.Holler@Sun.COM 			return (regs.cp_eax & CPUID_CSTATE_ARAT);
44919283SBill.Holler@Sun.COM 		} else {
44929283SBill.Holler@Sun.COM 			return (0);
44939283SBill.Holler@Sun.COM 		}
44949283SBill.Holler@Sun.COM 	default:
44959283SBill.Holler@Sun.COM 		return (0);
44969283SBill.Holler@Sun.COM 	}
44979283SBill.Holler@Sun.COM }
44989283SBill.Holler@Sun.COM 
449910992Saubrey.li@intel.com /*
450010992Saubrey.li@intel.com  * Check support for Intel ENERGY_PERF_BIAS feature
450110992Saubrey.li@intel.com  */
450210992Saubrey.li@intel.com int
450310992Saubrey.li@intel.com cpuid_iepb_supported(struct cpu *cp)
450410992Saubrey.li@intel.com {
450510992Saubrey.li@intel.com 	struct cpuid_info *cpi = cp->cpu_m.mcpu_cpi;
450610992Saubrey.li@intel.com 	struct cpuid_regs regs;
450710992Saubrey.li@intel.com 
450810992Saubrey.li@intel.com 	ASSERT(cpuid_checkpass(cp, 1));
450910992Saubrey.li@intel.com 
451012826Skuriakose.kuruvilla@oracle.com 	if (!(is_x86_feature(x86_featureset, X86FSET_CPUID)) ||
451112826Skuriakose.kuruvilla@oracle.com 	    !(is_x86_feature(x86_featureset, X86FSET_MSR))) {
451210992Saubrey.li@intel.com 		return (0);
451310992Saubrey.li@intel.com 	}
451410992Saubrey.li@intel.com 
451510992Saubrey.li@intel.com 	/*
451610992Saubrey.li@intel.com 	 * Intel ENERGY_PERF_BIAS MSR is indicated by
451710992Saubrey.li@intel.com 	 * capability bit CPUID.6.ECX.3
451810992Saubrey.li@intel.com 	 */
451910992Saubrey.li@intel.com 	if ((cpi->cpi_vendor != X86_VENDOR_Intel) || (cpi->cpi_maxeax < 6))
452010992Saubrey.li@intel.com 		return (0);
452110992Saubrey.li@intel.com 
452210992Saubrey.li@intel.com 	regs.cp_eax = 0x6;
452310992Saubrey.li@intel.com 	(void) cpuid_insn(NULL, &regs);
452410992Saubrey.li@intel.com 	return (regs.cp_ecx & CPUID_EPB_SUPPORT);
452510992Saubrey.li@intel.com }
452610992Saubrey.li@intel.com 
452713029SKrishnendu.Sadhukhan@Sun.COM /*
452813029SKrishnendu.Sadhukhan@Sun.COM  * Check support for TSC deadline timer
452913029SKrishnendu.Sadhukhan@Sun.COM  *
453013029SKrishnendu.Sadhukhan@Sun.COM  * TSC deadline timer provides a superior software programming
453113029SKrishnendu.Sadhukhan@Sun.COM  * model over local APIC timer that eliminates "time drifts".
453213029SKrishnendu.Sadhukhan@Sun.COM  * Instead of specifying a relative time, software specifies an
453313029SKrishnendu.Sadhukhan@Sun.COM  * absolute time as the target at which the processor should
453413029SKrishnendu.Sadhukhan@Sun.COM  * generate a timer event.
453513029SKrishnendu.Sadhukhan@Sun.COM  */
453613029SKrishnendu.Sadhukhan@Sun.COM int
453713029SKrishnendu.Sadhukhan@Sun.COM cpuid_deadline_tsc_supported(void)
453813029SKrishnendu.Sadhukhan@Sun.COM {
453913029SKrishnendu.Sadhukhan@Sun.COM 	struct cpuid_info *cpi = CPU->cpu_m.mcpu_cpi;
454013029SKrishnendu.Sadhukhan@Sun.COM 	struct cpuid_regs regs;
454113029SKrishnendu.Sadhukhan@Sun.COM 
454213029SKrishnendu.Sadhukhan@Sun.COM 	ASSERT(cpuid_checkpass(CPU, 1));
454313029SKrishnendu.Sadhukhan@Sun.COM 	ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
454413029SKrishnendu.Sadhukhan@Sun.COM 
454513029SKrishnendu.Sadhukhan@Sun.COM 	switch (cpi->cpi_vendor) {
454613029SKrishnendu.Sadhukhan@Sun.COM 	case X86_VENDOR_Intel:
454713029SKrishnendu.Sadhukhan@Sun.COM 		if (cpi->cpi_maxeax >= 1) {
454813029SKrishnendu.Sadhukhan@Sun.COM 			regs.cp_eax = 1;
454913029SKrishnendu.Sadhukhan@Sun.COM 			(void) cpuid_insn(NULL, &regs);
455013029SKrishnendu.Sadhukhan@Sun.COM 			return (regs.cp_ecx & CPUID_DEADLINE_TSC);
455113029SKrishnendu.Sadhukhan@Sun.COM 		} else {
455213029SKrishnendu.Sadhukhan@Sun.COM 			return (0);
455313029SKrishnendu.Sadhukhan@Sun.COM 		}
455413029SKrishnendu.Sadhukhan@Sun.COM 	default:
455513029SKrishnendu.Sadhukhan@Sun.COM 		return (0);
455613029SKrishnendu.Sadhukhan@Sun.COM 	}
455713029SKrishnendu.Sadhukhan@Sun.COM }
455813029SKrishnendu.Sadhukhan@Sun.COM 
45598377SBill.Holler@Sun.COM #if defined(__amd64) && !defined(__xpv)
45608377SBill.Holler@Sun.COM /*
45618377SBill.Holler@Sun.COM  * Patch in versions of bcopy for high performance Intel Nhm processors
45628377SBill.Holler@Sun.COM  * and later...
45638377SBill.Holler@Sun.COM  */
45648377SBill.Holler@Sun.COM void
45658377SBill.Holler@Sun.COM patch_memops(uint_t vendor)
45668377SBill.Holler@Sun.COM {
45678377SBill.Holler@Sun.COM 	size_t cnt, i;
45688377SBill.Holler@Sun.COM 	caddr_t to, from;
45698377SBill.Holler@Sun.COM 
457012826Skuriakose.kuruvilla@oracle.com 	if ((vendor == X86_VENDOR_Intel) &&
457112826Skuriakose.kuruvilla@oracle.com 	    is_x86_feature(x86_featureset, X86FSET_SSE4_2)) {
45728377SBill.Holler@Sun.COM 		cnt = &bcopy_patch_end - &bcopy_patch_start;
45738377SBill.Holler@Sun.COM 		to = &bcopy_ck_size;
45748377SBill.Holler@Sun.COM 		from = &bcopy_patch_start;
45758377SBill.Holler@Sun.COM 		for (i = 0; i < cnt; i++) {
45768377SBill.Holler@Sun.COM 			*to++ = *from++;
45778377SBill.Holler@Sun.COM 		}
45788377SBill.Holler@Sun.COM 	}
45798377SBill.Holler@Sun.COM }
45808377SBill.Holler@Sun.COM #endif  /* __amd64 && !__xpv */
458112261SVuong.Nguyen@Sun.COM 
458212261SVuong.Nguyen@Sun.COM /*
458312261SVuong.Nguyen@Sun.COM  * This function finds the number of bits to represent the number of cores per
458412261SVuong.Nguyen@Sun.COM  * chip and the number of strands per core for the Intel platforms.
458512261SVuong.Nguyen@Sun.COM  * It re-uses the x2APIC cpuid code of the cpuid_pass2().
458612261SVuong.Nguyen@Sun.COM  */
458712261SVuong.Nguyen@Sun.COM void
458812261SVuong.Nguyen@Sun.COM cpuid_get_ext_topo(uint_t vendor, uint_t *core_nbits, uint_t *strand_nbits)
458912261SVuong.Nguyen@Sun.COM {
459012261SVuong.Nguyen@Sun.COM 	struct cpuid_regs regs;
459112261SVuong.Nguyen@Sun.COM 	struct cpuid_regs *cp = &regs;
459212261SVuong.Nguyen@Sun.COM 
459312261SVuong.Nguyen@Sun.COM 	if (vendor != X86_VENDOR_Intel) {
459412261SVuong.Nguyen@Sun.COM 		return;
459512261SVuong.Nguyen@Sun.COM 	}
459612261SVuong.Nguyen@Sun.COM 
459712261SVuong.Nguyen@Sun.COM 	/* if the cpuid level is 0xB, extended topo is available. */
459812261SVuong.Nguyen@Sun.COM 	cp->cp_eax = 0;
459912261SVuong.Nguyen@Sun.COM 	if (__cpuid_insn(cp) >= 0xB) {
460012261SVuong.Nguyen@Sun.COM 
460112261SVuong.Nguyen@Sun.COM 		cp->cp_eax = 0xB;
460212261SVuong.Nguyen@Sun.COM 		cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
460312261SVuong.Nguyen@Sun.COM 		(void) __cpuid_insn(cp);
460412261SVuong.Nguyen@Sun.COM 
460512261SVuong.Nguyen@Sun.COM 		/*
460612261SVuong.Nguyen@Sun.COM 		 * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
460712261SVuong.Nguyen@Sun.COM 		 * indicates that the extended topology enumeration leaf is
460812261SVuong.Nguyen@Sun.COM 		 * available.
460912261SVuong.Nguyen@Sun.COM 		 */
461012261SVuong.Nguyen@Sun.COM 		if (cp->cp_ebx) {
461112261SVuong.Nguyen@Sun.COM 			uint_t coreid_shift = 0;
461212261SVuong.Nguyen@Sun.COM 			uint_t chipid_shift = 0;
461312261SVuong.Nguyen@Sun.COM 			uint_t i;
461412261SVuong.Nguyen@Sun.COM 			uint_t level;
461512261SVuong.Nguyen@Sun.COM 
461612261SVuong.Nguyen@Sun.COM 			for (i = 0; i < CPI_FNB_ECX_MAX; i++) {
461712261SVuong.Nguyen@Sun.COM 				cp->cp_eax = 0xB;
461812261SVuong.Nguyen@Sun.COM 				cp->cp_ecx = i;
461912261SVuong.Nguyen@Sun.COM 
462012261SVuong.Nguyen@Sun.COM 				(void) __cpuid_insn(cp);
462112261SVuong.Nguyen@Sun.COM 				level = CPI_CPU_LEVEL_TYPE(cp);
462212261SVuong.Nguyen@Sun.COM 
462312261SVuong.Nguyen@Sun.COM 				if (level == 1) {
462412261SVuong.Nguyen@Sun.COM 					/*
462512261SVuong.Nguyen@Sun.COM 					 * Thread level processor topology
462612261SVuong.Nguyen@Sun.COM 					 * Number of bits shift right APIC ID
462712261SVuong.Nguyen@Sun.COM 					 * to get the coreid.
462812261SVuong.Nguyen@Sun.COM 					 */
462912261SVuong.Nguyen@Sun.COM 					coreid_shift = BITX(cp->cp_eax, 4, 0);
463012261SVuong.Nguyen@Sun.COM 				} else if (level == 2) {
463112261SVuong.Nguyen@Sun.COM 					/*
463212261SVuong.Nguyen@Sun.COM 					 * Core level processor topology
463312261SVuong.Nguyen@Sun.COM 					 * Number of bits shift right APIC ID
463412261SVuong.Nguyen@Sun.COM 					 * to get the chipid.
463512261SVuong.Nguyen@Sun.COM 					 */
463612261SVuong.Nguyen@Sun.COM 					chipid_shift = BITX(cp->cp_eax, 4, 0);
463712261SVuong.Nguyen@Sun.COM 				}
463812261SVuong.Nguyen@Sun.COM 			}
463912261SVuong.Nguyen@Sun.COM 
464012261SVuong.Nguyen@Sun.COM 			if (coreid_shift > 0 && chipid_shift > coreid_shift) {
464112261SVuong.Nguyen@Sun.COM 				*strand_nbits = coreid_shift;
464212261SVuong.Nguyen@Sun.COM 				*core_nbits = chipid_shift - coreid_shift;
464312261SVuong.Nguyen@Sun.COM 			}
464412261SVuong.Nguyen@Sun.COM 		}
464512261SVuong.Nguyen@Sun.COM 	}
464612261SVuong.Nguyen@Sun.COM }
4647