10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
51582Skchow * Common Development and Distribution License (the "License").
61582Skchow * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate /*
2212090SFrank.Vanderlinden@Sun.COM * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
230Sstevel@tonic-gate */
249283SBill.Holler@Sun.COM /*
2513029SKrishnendu.Sadhukhan@Sun.COM * Copyright (c) 2010, Intel Corporation.
269283SBill.Holler@Sun.COM * All rights reserved.
279283SBill.Holler@Sun.COM */
2810947SSrihari.Venkatesan@Sun.COM /*
2910947SSrihari.Venkatesan@Sun.COM * Portions Copyright 2009 Advanced Micro Devices, Inc.
3010947SSrihari.Venkatesan@Sun.COM */
310Sstevel@tonic-gate
320Sstevel@tonic-gate /*
330Sstevel@tonic-gate * Various routines to handle identification
340Sstevel@tonic-gate * and classification of x86 processors.
350Sstevel@tonic-gate */
360Sstevel@tonic-gate
370Sstevel@tonic-gate #include <sys/types.h>
380Sstevel@tonic-gate #include <sys/archsystm.h>
390Sstevel@tonic-gate #include <sys/x86_archext.h>
400Sstevel@tonic-gate #include <sys/kmem.h>
410Sstevel@tonic-gate #include <sys/systm.h>
420Sstevel@tonic-gate #include <sys/cmn_err.h>
430Sstevel@tonic-gate #include <sys/sunddi.h>
440Sstevel@tonic-gate #include <sys/sunndi.h>
450Sstevel@tonic-gate #include <sys/cpuvar.h>
460Sstevel@tonic-gate #include <sys/processor.h>
475045Sbholler #include <sys/sysmacros.h>
483434Sesaxe #include <sys/pg.h>
490Sstevel@tonic-gate #include <sys/fp.h>
500Sstevel@tonic-gate #include <sys/controlregs.h>
51*13136Skuriakose.kuruvilla@oracle.com #include <sys/bitmap.h>
520Sstevel@tonic-gate #include <sys/auxv_386.h>
530Sstevel@tonic-gate #include <sys/memnode.h>
5410947SSrihari.Venkatesan@Sun.COM #include <sys/pci_cfgspace.h>
550Sstevel@tonic-gate
567532SSean.Ye@Sun.COM #ifdef __xpv
577532SSean.Ye@Sun.COM #include <sys/hypervisor.h>
588930SBill.Holler@Sun.COM #else
598930SBill.Holler@Sun.COM #include <sys/ontrap.h>
607532SSean.Ye@Sun.COM #endif
617532SSean.Ye@Sun.COM
620Sstevel@tonic-gate /*
630Sstevel@tonic-gate * Pass 0 of cpuid feature analysis happens in locore. It contains special code
640Sstevel@tonic-gate * to recognize Cyrix processors that are not cpuid-compliant, and to deal with
650Sstevel@tonic-gate * them accordingly. For most modern processors, feature detection occurs here
660Sstevel@tonic-gate * in pass 1.
670Sstevel@tonic-gate *
680Sstevel@tonic-gate * Pass 1 of cpuid feature analysis happens just at the beginning of mlsetup()
690Sstevel@tonic-gate * for the boot CPU and does the basic analysis that the early kernel needs.
7012826Skuriakose.kuruvilla@oracle.com * x86_featureset is set based on the return value of cpuid_pass1() of the boot
710Sstevel@tonic-gate * CPU.
720Sstevel@tonic-gate *
730Sstevel@tonic-gate * Pass 1 includes:
740Sstevel@tonic-gate *
750Sstevel@tonic-gate * o Determining vendor/model/family/stepping and setting x86_type and
760Sstevel@tonic-gate * x86_vendor accordingly.
770Sstevel@tonic-gate * o Processing the feature flags returned by the cpuid instruction while
780Sstevel@tonic-gate * applying any workarounds or tricks for the specific processor.
790Sstevel@tonic-gate * o Mapping the feature flags into Solaris feature bits (X86_*).
800Sstevel@tonic-gate * o Processing extended feature flags if supported by the processor,
810Sstevel@tonic-gate * again while applying specific processor knowledge.
820Sstevel@tonic-gate * o Determining the CMT characteristics of the system.
830Sstevel@tonic-gate *
840Sstevel@tonic-gate * Pass 1 is done on non-boot CPUs during their initialization and the results
850Sstevel@tonic-gate * are used only as a meager attempt at ensuring that all processors within the
860Sstevel@tonic-gate * system support the same features.
870Sstevel@tonic-gate *
880Sstevel@tonic-gate * Pass 2 of cpuid feature analysis happens just at the beginning
890Sstevel@tonic-gate * of startup(). It just copies in and corrects the remainder
900Sstevel@tonic-gate * of the cpuid data we depend on: standard cpuid functions that we didn't
910Sstevel@tonic-gate * need for pass1 feature analysis, and extended cpuid functions beyond the
920Sstevel@tonic-gate * simple feature processing done in pass1.
930Sstevel@tonic-gate *
940Sstevel@tonic-gate * Pass 3 of cpuid analysis is invoked after basic kernel services; in
950Sstevel@tonic-gate * particular kernel memory allocation has been made available. It creates a
960Sstevel@tonic-gate * readable brand string based on the data collected in the first two passes.
970Sstevel@tonic-gate *
980Sstevel@tonic-gate * Pass 4 of cpuid analysis is invoked after post_startup() when all
990Sstevel@tonic-gate * the support infrastructure for various hardware features has been
1000Sstevel@tonic-gate * initialized. It determines which processor features will be reported
1010Sstevel@tonic-gate * to userland via the aux vector.
1020Sstevel@tonic-gate *
1030Sstevel@tonic-gate * All passes are executed on all CPUs, but only the boot CPU determines what
1040Sstevel@tonic-gate * features the kernel will use.
1050Sstevel@tonic-gate *
1060Sstevel@tonic-gate * Much of the worst junk in this file is for the support of processors
1070Sstevel@tonic-gate * that didn't really implement the cpuid instruction properly.
1080Sstevel@tonic-gate *
1090Sstevel@tonic-gate * NOTE: The accessor functions (cpuid_get*) are aware of, and ASSERT upon,
1100Sstevel@tonic-gate * the pass numbers. Accordingly, changes to the pass code may require changes
1110Sstevel@tonic-gate * to the accessor code.
1120Sstevel@tonic-gate */
1130Sstevel@tonic-gate
1140Sstevel@tonic-gate uint_t x86_vendor = X86_VENDOR_IntelClone;
1150Sstevel@tonic-gate uint_t x86_type = X86_TYPE_OTHER;
1167589SVikram.Hegde@Sun.COM uint_t x86_clflush_size = 0;
1170Sstevel@tonic-gate
1180Sstevel@tonic-gate uint_t pentiumpro_bug4046376;
1190Sstevel@tonic-gate uint_t pentiumpro_bug4064495;
1200Sstevel@tonic-gate
121*13136Skuriakose.kuruvilla@oracle.com uchar_t x86_featureset[BT_SIZEOFMAP(NUM_X86_FEATURES)];
122*13136Skuriakose.kuruvilla@oracle.com
123*13136Skuriakose.kuruvilla@oracle.com static char *x86_feature_names[NUM_X86_FEATURES] = {
12412826Skuriakose.kuruvilla@oracle.com "lgpg",
12512826Skuriakose.kuruvilla@oracle.com "tsc",
12612826Skuriakose.kuruvilla@oracle.com "msr",
12712826Skuriakose.kuruvilla@oracle.com "mtrr",
12812826Skuriakose.kuruvilla@oracle.com "pge",
12912826Skuriakose.kuruvilla@oracle.com "de",
13012826Skuriakose.kuruvilla@oracle.com "cmov",
13112826Skuriakose.kuruvilla@oracle.com "mmx",
13212826Skuriakose.kuruvilla@oracle.com "mca",
13312826Skuriakose.kuruvilla@oracle.com "pae",
13412826Skuriakose.kuruvilla@oracle.com "cv8",
13512826Skuriakose.kuruvilla@oracle.com "pat",
13612826Skuriakose.kuruvilla@oracle.com "sep",
13712826Skuriakose.kuruvilla@oracle.com "sse",
13812826Skuriakose.kuruvilla@oracle.com "sse2",
13912826Skuriakose.kuruvilla@oracle.com "htt",
14012826Skuriakose.kuruvilla@oracle.com "asysc",
14112826Skuriakose.kuruvilla@oracle.com "nx",
14212826Skuriakose.kuruvilla@oracle.com "sse3",
14312826Skuriakose.kuruvilla@oracle.com "cx16",
14412826Skuriakose.kuruvilla@oracle.com "cmp",
14512826Skuriakose.kuruvilla@oracle.com "tscp",
14612826Skuriakose.kuruvilla@oracle.com "mwait",
14712826Skuriakose.kuruvilla@oracle.com "sse4a",
14812826Skuriakose.kuruvilla@oracle.com "cpuid",
14912826Skuriakose.kuruvilla@oracle.com "ssse3",
15012826Skuriakose.kuruvilla@oracle.com "sse4_1",
15112826Skuriakose.kuruvilla@oracle.com "sse4_2",
15212826Skuriakose.kuruvilla@oracle.com "1gpg",
15312826Skuriakose.kuruvilla@oracle.com "clfsh",
15412826Skuriakose.kuruvilla@oracle.com "64",
15512826Skuriakose.kuruvilla@oracle.com "aes",
15613134Skuriakose.kuruvilla@oracle.com "pclmulqdq",
15713134Skuriakose.kuruvilla@oracle.com "xsave",
15813134Skuriakose.kuruvilla@oracle.com "avx" };
15912826Skuriakose.kuruvilla@oracle.com
16012826Skuriakose.kuruvilla@oracle.com boolean_t
is_x86_feature(void * featureset,uint_t feature)16112826Skuriakose.kuruvilla@oracle.com is_x86_feature(void *featureset, uint_t feature)
16212826Skuriakose.kuruvilla@oracle.com {
16312826Skuriakose.kuruvilla@oracle.com ASSERT(feature < NUM_X86_FEATURES);
16412826Skuriakose.kuruvilla@oracle.com return (BT_TEST((ulong_t *)featureset, feature));
16512826Skuriakose.kuruvilla@oracle.com }
16612826Skuriakose.kuruvilla@oracle.com
16712826Skuriakose.kuruvilla@oracle.com void
add_x86_feature(void * featureset,uint_t feature)16812826Skuriakose.kuruvilla@oracle.com add_x86_feature(void *featureset, uint_t feature)
16912826Skuriakose.kuruvilla@oracle.com {
17012826Skuriakose.kuruvilla@oracle.com ASSERT(feature < NUM_X86_FEATURES);
17112826Skuriakose.kuruvilla@oracle.com BT_SET((ulong_t *)featureset, feature);
17212826Skuriakose.kuruvilla@oracle.com }
17312826Skuriakose.kuruvilla@oracle.com
17412826Skuriakose.kuruvilla@oracle.com void
remove_x86_feature(void * featureset,uint_t feature)17512826Skuriakose.kuruvilla@oracle.com remove_x86_feature(void *featureset, uint_t feature)
17612826Skuriakose.kuruvilla@oracle.com {
17712826Skuriakose.kuruvilla@oracle.com ASSERT(feature < NUM_X86_FEATURES);
17812826Skuriakose.kuruvilla@oracle.com BT_CLEAR((ulong_t *)featureset, feature);
17912826Skuriakose.kuruvilla@oracle.com }
18012826Skuriakose.kuruvilla@oracle.com
18112826Skuriakose.kuruvilla@oracle.com boolean_t
compare_x86_featureset(void * setA,void * setB)18212826Skuriakose.kuruvilla@oracle.com compare_x86_featureset(void *setA, void *setB)
18312826Skuriakose.kuruvilla@oracle.com {
18412826Skuriakose.kuruvilla@oracle.com /*
18512826Skuriakose.kuruvilla@oracle.com * We assume that the unused bits of the bitmap are always zero.
18612826Skuriakose.kuruvilla@oracle.com */
18712826Skuriakose.kuruvilla@oracle.com if (memcmp(setA, setB, BT_SIZEOFMAP(NUM_X86_FEATURES)) == 0) {
18812826Skuriakose.kuruvilla@oracle.com return (B_TRUE);
18912826Skuriakose.kuruvilla@oracle.com } else {
19012826Skuriakose.kuruvilla@oracle.com return (B_FALSE);
19112826Skuriakose.kuruvilla@oracle.com }
19212826Skuriakose.kuruvilla@oracle.com }
19312826Skuriakose.kuruvilla@oracle.com
19412826Skuriakose.kuruvilla@oracle.com void
print_x86_featureset(void * featureset)19512826Skuriakose.kuruvilla@oracle.com print_x86_featureset(void *featureset)
19612826Skuriakose.kuruvilla@oracle.com {
19712826Skuriakose.kuruvilla@oracle.com uint_t i;
19812826Skuriakose.kuruvilla@oracle.com
19912826Skuriakose.kuruvilla@oracle.com for (i = 0; i < NUM_X86_FEATURES; i++) {
20012826Skuriakose.kuruvilla@oracle.com if (is_x86_feature(featureset, i)) {
20112826Skuriakose.kuruvilla@oracle.com cmn_err(CE_CONT, "?x86_feature: %s\n",
20212826Skuriakose.kuruvilla@oracle.com x86_feature_names[i]);
20312826Skuriakose.kuruvilla@oracle.com }
20412826Skuriakose.kuruvilla@oracle.com }
20512826Skuriakose.kuruvilla@oracle.com }
20612826Skuriakose.kuruvilla@oracle.com
2070Sstevel@tonic-gate uint_t enable486;
20813134Skuriakose.kuruvilla@oracle.com
20913134Skuriakose.kuruvilla@oracle.com static size_t xsave_state_size = 0;
21013134Skuriakose.kuruvilla@oracle.com uint64_t xsave_bv_all = (XFEATURE_LEGACY_FP | XFEATURE_SSE);
21113134Skuriakose.kuruvilla@oracle.com boolean_t xsave_force_disable = B_FALSE;
21213134Skuriakose.kuruvilla@oracle.com
2138990SSurya.Prakki@Sun.COM /*
2149000SStuart.Maybee@Sun.COM * This is set to platform type Solaris is running on.
2158990SSurya.Prakki@Sun.COM */
21610175SStuart.Maybee@Sun.COM static int platform_type = -1;
21710175SStuart.Maybee@Sun.COM
21810175SStuart.Maybee@Sun.COM #if !defined(__xpv)
21910175SStuart.Maybee@Sun.COM /*
22010175SStuart.Maybee@Sun.COM * Variable to patch if hypervisor platform detection needs to be
22110175SStuart.Maybee@Sun.COM * disabled (e.g. platform_type will always be HW_NATIVE if this is 0).
22210175SStuart.Maybee@Sun.COM */
22310175SStuart.Maybee@Sun.COM int enable_platform_detection = 1;
22410175SStuart.Maybee@Sun.COM #endif
2250Sstevel@tonic-gate
2260Sstevel@tonic-gate /*
2274481Sbholler * monitor/mwait info.
2285045Sbholler *
2295045Sbholler * size_actual and buf_actual are the real address and size allocated to get
2305045Sbholler * proper mwait_buf alignement. buf_actual and size_actual should be passed
2315045Sbholler * to kmem_free(). Currently kmem_alloc() and mwait happen to both use
2325045Sbholler * processor cache-line alignment, but this is not guarantied in the furture.
2334481Sbholler */
2344481Sbholler struct mwait_info {
2354481Sbholler size_t mon_min; /* min size to avoid missed wakeups */
2364481Sbholler size_t mon_max; /* size to avoid false wakeups */
2375045Sbholler size_t size_actual; /* size actually allocated */
2385045Sbholler void *buf_actual; /* memory actually allocated */
2394481Sbholler uint32_t support; /* processor support of monitor/mwait */
2404481Sbholler };
2414481Sbholler
2424481Sbholler /*
24313134Skuriakose.kuruvilla@oracle.com * xsave/xrestor info.
24413134Skuriakose.kuruvilla@oracle.com *
24513134Skuriakose.kuruvilla@oracle.com * This structure contains HW feature bits and size of the xsave save area.
24613134Skuriakose.kuruvilla@oracle.com * Note: the kernel will use the maximum size required for all hardware
24713134Skuriakose.kuruvilla@oracle.com * features. It is not optimize for potential memory savings if features at
24813134Skuriakose.kuruvilla@oracle.com * the end of the save area are not enabled.
24913134Skuriakose.kuruvilla@oracle.com */
25013134Skuriakose.kuruvilla@oracle.com struct xsave_info {
25113134Skuriakose.kuruvilla@oracle.com uint32_t xsav_hw_features_low; /* Supported HW features */
25213134Skuriakose.kuruvilla@oracle.com uint32_t xsav_hw_features_high; /* Supported HW features */
25313134Skuriakose.kuruvilla@oracle.com size_t xsav_max_size; /* max size save area for HW features */
25413134Skuriakose.kuruvilla@oracle.com size_t ymm_size; /* AVX: size of ymm save area */
25513134Skuriakose.kuruvilla@oracle.com size_t ymm_offset; /* AVX: offset for ymm save area */
25613134Skuriakose.kuruvilla@oracle.com };
25713134Skuriakose.kuruvilla@oracle.com
25813134Skuriakose.kuruvilla@oracle.com
25913134Skuriakose.kuruvilla@oracle.com /*
2600Sstevel@tonic-gate * These constants determine how many of the elements of the
2610Sstevel@tonic-gate * cpuid we cache in the cpuid_info data structure; the
2620Sstevel@tonic-gate * remaining elements are accessible via the cpuid instruction.
2630Sstevel@tonic-gate */
2640Sstevel@tonic-gate
2650Sstevel@tonic-gate #define NMAX_CPI_STD 6 /* eax = 0 .. 5 */
26610947SSrihari.Venkatesan@Sun.COM #define NMAX_CPI_EXTD 0x1c /* eax = 0x80000000 .. 0x8000001b */
26710947SSrihari.Venkatesan@Sun.COM
26810947SSrihari.Venkatesan@Sun.COM /*
26910947SSrihari.Venkatesan@Sun.COM * Some terminology needs to be explained:
27010947SSrihari.Venkatesan@Sun.COM * - Socket: Something that can be plugged into a motherboard.
27110947SSrihari.Venkatesan@Sun.COM * - Package: Same as socket
27210947SSrihari.Venkatesan@Sun.COM * - Chip: Same as socket. Note that AMD's documentation uses term "chip"
27310947SSrihari.Venkatesan@Sun.COM * differently: there, chip is the same as processor node (below)
27410947SSrihari.Venkatesan@Sun.COM * - Processor node: Some AMD processors have more than one
27510947SSrihari.Venkatesan@Sun.COM * "subprocessor" embedded in a package. These subprocessors (nodes)
27610947SSrihari.Venkatesan@Sun.COM * are fully-functional processors themselves with cores, caches,
27710947SSrihari.Venkatesan@Sun.COM * memory controllers, PCI configuration spaces. They are connected
27810947SSrihari.Venkatesan@Sun.COM * inside the package with Hypertransport links. On single-node
27910947SSrihari.Venkatesan@Sun.COM * processors, processor node is equivalent to chip/socket/package.
28010947SSrihari.Venkatesan@Sun.COM */
2810Sstevel@tonic-gate
2820Sstevel@tonic-gate struct cpuid_info {
2830Sstevel@tonic-gate uint_t cpi_pass; /* last pass completed */
2840Sstevel@tonic-gate /*
2850Sstevel@tonic-gate * standard function information
2860Sstevel@tonic-gate */
2870Sstevel@tonic-gate uint_t cpi_maxeax; /* fn 0: %eax */
2880Sstevel@tonic-gate char cpi_vendorstr[13]; /* fn 0: %ebx:%ecx:%edx */
2890Sstevel@tonic-gate uint_t cpi_vendor; /* enum of cpi_vendorstr */
2900Sstevel@tonic-gate
2910Sstevel@tonic-gate uint_t cpi_family; /* fn 1: extended family */
2920Sstevel@tonic-gate uint_t cpi_model; /* fn 1: extended model */
2930Sstevel@tonic-gate uint_t cpi_step; /* fn 1: stepping */
29410947SSrihari.Venkatesan@Sun.COM chipid_t cpi_chipid; /* fn 1: %ebx: Intel: chip # */
29510947SSrihari.Venkatesan@Sun.COM /* AMD: package/socket # */
2960Sstevel@tonic-gate uint_t cpi_brandid; /* fn 1: %ebx: brand ID */
2970Sstevel@tonic-gate int cpi_clogid; /* fn 1: %ebx: thread # */
2981228Sandrei uint_t cpi_ncpu_per_chip; /* fn 1: %ebx: logical cpu count */
2990Sstevel@tonic-gate uint8_t cpi_cacheinfo[16]; /* fn 2: intel-style cache desc */
3000Sstevel@tonic-gate uint_t cpi_ncache; /* fn 2: number of elements */
3014606Sesaxe uint_t cpi_ncpu_shr_last_cache; /* fn 4: %eax: ncpus sharing cache */
3024606Sesaxe id_t cpi_last_lvl_cacheid; /* fn 4: %eax: derived cache id */
3034606Sesaxe uint_t cpi_std_4_size; /* fn 4: number of fn 4 elements */
3044606Sesaxe struct cpuid_regs **cpi_std_4; /* fn 4: %ecx == 0 .. fn4_size */
3051228Sandrei struct cpuid_regs cpi_std[NMAX_CPI_STD]; /* 0 .. 5 */
3060Sstevel@tonic-gate /*
3070Sstevel@tonic-gate * extended function information
3080Sstevel@tonic-gate */
3090Sstevel@tonic-gate uint_t cpi_xmaxeax; /* fn 0x80000000: %eax */
3100Sstevel@tonic-gate char cpi_brandstr[49]; /* fn 0x8000000[234] */
3110Sstevel@tonic-gate uint8_t cpi_pabits; /* fn 0x80000006: %eax */
31210947SSrihari.Venkatesan@Sun.COM uint8_t cpi_vabits; /* fn 0x80000006: %eax */
31310947SSrihari.Venkatesan@Sun.COM struct cpuid_regs cpi_extd[NMAX_CPI_EXTD]; /* 0x800000XX */
31410947SSrihari.Venkatesan@Sun.COM
3155870Sgavinm id_t cpi_coreid; /* same coreid => strands share core */
3165870Sgavinm int cpi_pkgcoreid; /* core number within single package */
3171228Sandrei uint_t cpi_ncore_per_chip; /* AMD: fn 0x80000008: %ecx[7-0] */
3181228Sandrei /* Intel: fn 4: %eax[31-26] */
3190Sstevel@tonic-gate /*
3200Sstevel@tonic-gate * supported feature information
3210Sstevel@tonic-gate */
3223446Smrj uint32_t cpi_support[5];
3230Sstevel@tonic-gate #define STD_EDX_FEATURES 0
3240Sstevel@tonic-gate #define AMD_EDX_FEATURES 1
3250Sstevel@tonic-gate #define TM_EDX_FEATURES 2
3260Sstevel@tonic-gate #define STD_ECX_FEATURES 3
3273446Smrj #define AMD_ECX_FEATURES 4
3282869Sgavinm /*
3292869Sgavinm * Synthesized information, where known.
3302869Sgavinm */
3312869Sgavinm uint32_t cpi_chiprev; /* See X86_CHIPREV_* in x86_archext.h */
3322869Sgavinm const char *cpi_chiprevstr; /* May be NULL if chiprev unknown */
3332869Sgavinm uint32_t cpi_socket; /* Chip package/socket type */
3344481Sbholler
3354481Sbholler struct mwait_info cpi_mwait; /* fn 5: monitor/mwait info */
3367282Smishra uint32_t cpi_apicid;
33710947SSrihari.Venkatesan@Sun.COM uint_t cpi_procnodeid; /* AMD: nodeID on HT, Intel: chipid */
33810947SSrihari.Venkatesan@Sun.COM uint_t cpi_procnodes_per_pkg; /* AMD: # of nodes in the package */
33910947SSrihari.Venkatesan@Sun.COM /* Intel: 1 */
34013134Skuriakose.kuruvilla@oracle.com
34113134Skuriakose.kuruvilla@oracle.com struct xsave_info cpi_xsave; /* fn D: xsave/xrestor info */
3420Sstevel@tonic-gate };
3430Sstevel@tonic-gate
3440Sstevel@tonic-gate
3450Sstevel@tonic-gate static struct cpuid_info cpuid_info0;
3460Sstevel@tonic-gate
3470Sstevel@tonic-gate /*
3480Sstevel@tonic-gate * These bit fields are defined by the Intel Application Note AP-485
3490Sstevel@tonic-gate * "Intel Processor Identification and the CPUID Instruction"
3500Sstevel@tonic-gate */
3510Sstevel@tonic-gate #define CPI_FAMILY_XTD(cpi) BITX((cpi)->cpi_std[1].cp_eax, 27, 20)
3520Sstevel@tonic-gate #define CPI_MODEL_XTD(cpi) BITX((cpi)->cpi_std[1].cp_eax, 19, 16)
3530Sstevel@tonic-gate #define CPI_TYPE(cpi) BITX((cpi)->cpi_std[1].cp_eax, 13, 12)
3540Sstevel@tonic-gate #define CPI_FAMILY(cpi) BITX((cpi)->cpi_std[1].cp_eax, 11, 8)
3550Sstevel@tonic-gate #define CPI_STEP(cpi) BITX((cpi)->cpi_std[1].cp_eax, 3, 0)
3560Sstevel@tonic-gate #define CPI_MODEL(cpi) BITX((cpi)->cpi_std[1].cp_eax, 7, 4)
3570Sstevel@tonic-gate
3580Sstevel@tonic-gate #define CPI_FEATURES_EDX(cpi) ((cpi)->cpi_std[1].cp_edx)
3590Sstevel@tonic-gate #define CPI_FEATURES_ECX(cpi) ((cpi)->cpi_std[1].cp_ecx)
3600Sstevel@tonic-gate #define CPI_FEATURES_XTD_EDX(cpi) ((cpi)->cpi_extd[1].cp_edx)
3610Sstevel@tonic-gate #define CPI_FEATURES_XTD_ECX(cpi) ((cpi)->cpi_extd[1].cp_ecx)
3620Sstevel@tonic-gate
3630Sstevel@tonic-gate #define CPI_BRANDID(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 7, 0)
3640Sstevel@tonic-gate #define CPI_CHUNKS(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 15, 7)
3650Sstevel@tonic-gate #define CPI_CPU_COUNT(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 23, 16)
3660Sstevel@tonic-gate #define CPI_APIC_ID(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 31, 24)
3670Sstevel@tonic-gate
3680Sstevel@tonic-gate #define CPI_MAXEAX_MAX 0x100 /* sanity control */
3690Sstevel@tonic-gate #define CPI_XMAXEAX_MAX 0x80000100
3704606Sesaxe #define CPI_FN4_ECX_MAX 0x20 /* sanity: max fn 4 levels */
3717282Smishra #define CPI_FNB_ECX_MAX 0x20 /* sanity: max fn B levels */
3724606Sesaxe
3734606Sesaxe /*
3744606Sesaxe * Function 4 (Deterministic Cache Parameters) macros
3754606Sesaxe * Defined by Intel Application Note AP-485
3764606Sesaxe */
3774606Sesaxe #define CPI_NUM_CORES(regs) BITX((regs)->cp_eax, 31, 26)
3784606Sesaxe #define CPI_NTHR_SHR_CACHE(regs) BITX((regs)->cp_eax, 25, 14)
3794606Sesaxe #define CPI_FULL_ASSOC_CACHE(regs) BITX((regs)->cp_eax, 9, 9)
3804606Sesaxe #define CPI_SELF_INIT_CACHE(regs) BITX((regs)->cp_eax, 8, 8)
3814606Sesaxe #define CPI_CACHE_LVL(regs) BITX((regs)->cp_eax, 7, 5)
3824606Sesaxe #define CPI_CACHE_TYPE(regs) BITX((regs)->cp_eax, 4, 0)
3837282Smishra #define CPI_CPU_LEVEL_TYPE(regs) BITX((regs)->cp_ecx, 15, 8)
3844606Sesaxe
3854606Sesaxe #define CPI_CACHE_WAYS(regs) BITX((regs)->cp_ebx, 31, 22)
3864606Sesaxe #define CPI_CACHE_PARTS(regs) BITX((regs)->cp_ebx, 21, 12)
3874606Sesaxe #define CPI_CACHE_COH_LN_SZ(regs) BITX((regs)->cp_ebx, 11, 0)
3884606Sesaxe
3894606Sesaxe #define CPI_CACHE_SETS(regs) BITX((regs)->cp_ecx, 31, 0)
3904606Sesaxe
3914606Sesaxe #define CPI_PREFCH_STRIDE(regs) BITX((regs)->cp_edx, 9, 0)
3924606Sesaxe
3930Sstevel@tonic-gate
3940Sstevel@tonic-gate /*
3951975Sdmick * A couple of shorthand macros to identify "later" P6-family chips
3961975Sdmick * like the Pentium M and Core. First, the "older" P6-based stuff
3971975Sdmick * (loosely defined as "pre-Pentium-4"):
3981975Sdmick * P6, PII, Mobile PII, PII Xeon, PIII, Mobile PIII, PIII Xeon
3991975Sdmick */
4001975Sdmick
4011975Sdmick #define IS_LEGACY_P6(cpi) ( \
4021975Sdmick cpi->cpi_family == 6 && \
4031975Sdmick (cpi->cpi_model == 1 || \
4041975Sdmick cpi->cpi_model == 3 || \
4051975Sdmick cpi->cpi_model == 5 || \
4061975Sdmick cpi->cpi_model == 6 || \
4071975Sdmick cpi->cpi_model == 7 || \
4081975Sdmick cpi->cpi_model == 8 || \
4091975Sdmick cpi->cpi_model == 0xA || \
4101975Sdmick cpi->cpi_model == 0xB) \
4111975Sdmick )
4121975Sdmick
4131975Sdmick /* A "new F6" is everything with family 6 that's not the above */
4141975Sdmick #define IS_NEW_F6(cpi) ((cpi->cpi_family == 6) && !IS_LEGACY_P6(cpi))
4151975Sdmick
4164855Sksadhukh /* Extended family/model support */
4174855Sksadhukh #define IS_EXTENDED_MODEL_INTEL(cpi) (cpi->cpi_family == 0x6 || \
4184855Sksadhukh cpi->cpi_family >= 0xf)
4194855Sksadhukh
4201975Sdmick /*
4214481Sbholler * Info for monitor/mwait idle loop.
4224481Sbholler *
4234481Sbholler * See cpuid section of "Intel 64 and IA-32 Architectures Software Developer's
4244481Sbholler * Manual Volume 2A: Instruction Set Reference, A-M" #25366-022US, November
4254481Sbholler * 2006.
4264481Sbholler * See MONITOR/MWAIT section of "AMD64 Architecture Programmer's Manual
4274481Sbholler * Documentation Updates" #33633, Rev 2.05, December 2006.
4284481Sbholler */
4294481Sbholler #define MWAIT_SUPPORT (0x00000001) /* mwait supported */
4304481Sbholler #define MWAIT_EXTENSIONS (0x00000002) /* extenstion supported */
4314481Sbholler #define MWAIT_ECX_INT_ENABLE (0x00000004) /* ecx 1 extension supported */
4324481Sbholler #define MWAIT_SUPPORTED(cpi) ((cpi)->cpi_std[1].cp_ecx & CPUID_INTC_ECX_MON)
4334481Sbholler #define MWAIT_INT_ENABLE(cpi) ((cpi)->cpi_std[5].cp_ecx & 0x2)
4344481Sbholler #define MWAIT_EXTENSION(cpi) ((cpi)->cpi_std[5].cp_ecx & 0x1)
4354481Sbholler #define MWAIT_SIZE_MIN(cpi) BITX((cpi)->cpi_std[5].cp_eax, 15, 0)
4364481Sbholler #define MWAIT_SIZE_MAX(cpi) BITX((cpi)->cpi_std[5].cp_ebx, 15, 0)
4374481Sbholler /*
4384481Sbholler * Number of sub-cstates for a given c-state.
4394481Sbholler */
4404481Sbholler #define MWAIT_NUM_SUBC_STATES(cpi, c_state) \
4414481Sbholler BITX((cpi)->cpi_std[5].cp_edx, c_state + 3, c_state)
4424481Sbholler
4437532SSean.Ye@Sun.COM /*
44413134Skuriakose.kuruvilla@oracle.com * XSAVE leaf 0xD enumeration
44513134Skuriakose.kuruvilla@oracle.com */
44613134Skuriakose.kuruvilla@oracle.com #define CPUID_LEAFD_2_YMM_OFFSET 576
44713134Skuriakose.kuruvilla@oracle.com #define CPUID_LEAFD_2_YMM_SIZE 256
44813134Skuriakose.kuruvilla@oracle.com
44913134Skuriakose.kuruvilla@oracle.com /*
4507532SSean.Ye@Sun.COM * Functions we consune from cpuid_subr.c; don't publish these in a header
4517532SSean.Ye@Sun.COM * file to try and keep people using the expected cpuid_* interfaces.
4527532SSean.Ye@Sun.COM */
4537532SSean.Ye@Sun.COM extern uint32_t _cpuid_skt(uint_t, uint_t, uint_t, uint_t);
4549482SKuriakose.Kuruvilla@Sun.COM extern const char *_cpuid_sktstr(uint_t, uint_t, uint_t, uint_t);
4557532SSean.Ye@Sun.COM extern uint32_t _cpuid_chiprev(uint_t, uint_t, uint_t, uint_t);
4567532SSean.Ye@Sun.COM extern const char *_cpuid_chiprevstr(uint_t, uint_t, uint_t, uint_t);
4577532SSean.Ye@Sun.COM extern uint_t _cpuid_vendorstr_to_vendorcode(char *);
4582869Sgavinm
4592869Sgavinm /*
4603446Smrj * Apply up various platform-dependent restrictions where the
4613446Smrj * underlying platform restrictions mean the CPU can be marked
4623446Smrj * as less capable than its cpuid instruction would imply.
4633446Smrj */
4645084Sjohnlev #if defined(__xpv)
4655084Sjohnlev static void
platform_cpuid_mangle(uint_t vendor,uint32_t eax,struct cpuid_regs * cp)4665084Sjohnlev platform_cpuid_mangle(uint_t vendor, uint32_t eax, struct cpuid_regs *cp)
4675084Sjohnlev {
4685084Sjohnlev switch (eax) {
4697532SSean.Ye@Sun.COM case 1: {
4707532SSean.Ye@Sun.COM uint32_t mcamask = DOMAIN_IS_INITDOMAIN(xen_info) ?
4717532SSean.Ye@Sun.COM 0 : CPUID_INTC_EDX_MCA;
4725084Sjohnlev cp->cp_edx &=
4737532SSean.Ye@Sun.COM ~(mcamask |
4747532SSean.Ye@Sun.COM CPUID_INTC_EDX_PSE |
4755084Sjohnlev CPUID_INTC_EDX_VME | CPUID_INTC_EDX_DE |
4765084Sjohnlev CPUID_INTC_EDX_SEP | CPUID_INTC_EDX_MTRR |
4775084Sjohnlev CPUID_INTC_EDX_PGE | CPUID_INTC_EDX_PAT |
4785084Sjohnlev CPUID_AMD_EDX_SYSC | CPUID_INTC_EDX_SEP |
4795084Sjohnlev CPUID_INTC_EDX_PSE36 | CPUID_INTC_EDX_HTT);
4805084Sjohnlev break;
4817532SSean.Ye@Sun.COM }
4825084Sjohnlev
4835084Sjohnlev case 0x80000001:
4845084Sjohnlev cp->cp_edx &=
4855084Sjohnlev ~(CPUID_AMD_EDX_PSE |
4865084Sjohnlev CPUID_INTC_EDX_VME | CPUID_INTC_EDX_DE |
4875084Sjohnlev CPUID_AMD_EDX_MTRR | CPUID_AMD_EDX_PGE |
4885084Sjohnlev CPUID_AMD_EDX_PAT | CPUID_AMD_EDX_PSE36 |
4895084Sjohnlev CPUID_AMD_EDX_SYSC | CPUID_INTC_EDX_SEP |
4905084Sjohnlev CPUID_AMD_EDX_TSCP);
4915084Sjohnlev cp->cp_ecx &= ~CPUID_AMD_ECX_CMP_LGCY;
4925084Sjohnlev break;
4935084Sjohnlev default:
4945084Sjohnlev break;
4955084Sjohnlev }
4965084Sjohnlev
4975084Sjohnlev switch (vendor) {
4985084Sjohnlev case X86_VENDOR_Intel:
4995084Sjohnlev switch (eax) {
5005084Sjohnlev case 4:
5015084Sjohnlev /*
5025084Sjohnlev * Zero out the (ncores-per-chip - 1) field
5035084Sjohnlev */
5045084Sjohnlev cp->cp_eax &= 0x03fffffff;
5055084Sjohnlev break;
5065084Sjohnlev default:
5075084Sjohnlev break;
5085084Sjohnlev }
5095084Sjohnlev break;
5105084Sjohnlev case X86_VENDOR_AMD:
5115084Sjohnlev switch (eax) {
51210080SJoe.Bonasera@sun.com
51310080SJoe.Bonasera@sun.com case 0x80000001:
51410080SJoe.Bonasera@sun.com cp->cp_ecx &= ~CPUID_AMD_ECX_CR8D;
51510080SJoe.Bonasera@sun.com break;
51610080SJoe.Bonasera@sun.com
5175084Sjohnlev case 0x80000008:
5185084Sjohnlev /*
5195084Sjohnlev * Zero out the (ncores-per-chip - 1) field
5205084Sjohnlev */
5215084Sjohnlev cp->cp_ecx &= 0xffffff00;
5225084Sjohnlev break;
5235084Sjohnlev default:
5245084Sjohnlev break;
5255084Sjohnlev }
5265084Sjohnlev break;
5275084Sjohnlev default:
5285084Sjohnlev break;
5295084Sjohnlev }
5305084Sjohnlev }
5315084Sjohnlev #else
5323446Smrj #define platform_cpuid_mangle(vendor, eax, cp) /* nothing */
5335084Sjohnlev #endif
5343446Smrj
5353446Smrj /*
5360Sstevel@tonic-gate * Some undocumented ways of patching the results of the cpuid
5370Sstevel@tonic-gate * instruction to permit running Solaris 10 on future cpus that
5380Sstevel@tonic-gate * we don't currently support. Could be set to non-zero values
5390Sstevel@tonic-gate * via settings in eeprom.
5400Sstevel@tonic-gate */
5410Sstevel@tonic-gate
5420Sstevel@tonic-gate uint32_t cpuid_feature_ecx_include;
5430Sstevel@tonic-gate uint32_t cpuid_feature_ecx_exclude;
5440Sstevel@tonic-gate uint32_t cpuid_feature_edx_include;
5450Sstevel@tonic-gate uint32_t cpuid_feature_edx_exclude;
5460Sstevel@tonic-gate
54712004Sjiang.liu@intel.com /*
54812004Sjiang.liu@intel.com * Allocate space for mcpu_cpi in the machcpu structure for all non-boot CPUs.
54912004Sjiang.liu@intel.com */
5503446Smrj void
cpuid_alloc_space(cpu_t * cpu)5513446Smrj cpuid_alloc_space(cpu_t *cpu)
5523446Smrj {
5533446Smrj /*
5543446Smrj * By convention, cpu0 is the boot cpu, which is set up
5553446Smrj * before memory allocation is available. All other cpus get
5563446Smrj * their cpuid_info struct allocated here.
5573446Smrj */
5583446Smrj ASSERT(cpu->cpu_id != 0);
55912004Sjiang.liu@intel.com ASSERT(cpu->cpu_m.mcpu_cpi == NULL);
5603446Smrj cpu->cpu_m.mcpu_cpi =
5613446Smrj kmem_zalloc(sizeof (*cpu->cpu_m.mcpu_cpi), KM_SLEEP);
5623446Smrj }
5633446Smrj
5643446Smrj void
cpuid_free_space(cpu_t * cpu)5653446Smrj cpuid_free_space(cpu_t *cpu)
5663446Smrj {
5674606Sesaxe struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
5684606Sesaxe int i;
5694606Sesaxe
57012004Sjiang.liu@intel.com ASSERT(cpi != NULL);
57112004Sjiang.liu@intel.com ASSERT(cpi != &cpuid_info0);
5724606Sesaxe
5734606Sesaxe /*
5744606Sesaxe * Free up any function 4 related dynamic storage
5754606Sesaxe */
5764606Sesaxe for (i = 1; i < cpi->cpi_std_4_size; i++)
5774606Sesaxe kmem_free(cpi->cpi_std_4[i], sizeof (struct cpuid_regs));
5784606Sesaxe if (cpi->cpi_std_4_size > 0)
5794606Sesaxe kmem_free(cpi->cpi_std_4,
5804606Sesaxe cpi->cpi_std_4_size * sizeof (struct cpuid_regs *));
5814606Sesaxe
58212004Sjiang.liu@intel.com kmem_free(cpi, sizeof (*cpi));
58312004Sjiang.liu@intel.com cpu->cpu_m.mcpu_cpi = NULL;
5843446Smrj }
5853446Smrj
5865741Smrj #if !defined(__xpv)
5875741Smrj
5885741Smrj static void
determine_platform()5899000SStuart.Maybee@Sun.COM determine_platform()
5905741Smrj {
5915741Smrj struct cpuid_regs cp;
5925741Smrj char *xen_str;
59312090SFrank.Vanderlinden@Sun.COM uint32_t xen_signature[4], base;
5945741Smrj
59510175SStuart.Maybee@Sun.COM platform_type = HW_NATIVE;
59610175SStuart.Maybee@Sun.COM
59710175SStuart.Maybee@Sun.COM if (!enable_platform_detection)
59810175SStuart.Maybee@Sun.COM return;
59910175SStuart.Maybee@Sun.COM
6005741Smrj /*
6015741Smrj * In a fully virtualized domain, Xen's pseudo-cpuid function
60212090SFrank.Vanderlinden@Sun.COM * returns a string representing the Xen signature in %ebx, %ecx,
60312090SFrank.Vanderlinden@Sun.COM * and %edx. %eax contains the maximum supported cpuid function.
60412090SFrank.Vanderlinden@Sun.COM * We need at least a (base + 2) leaf value to do what we want
60512090SFrank.Vanderlinden@Sun.COM * to do. Try different base values, since the hypervisor might
60612090SFrank.Vanderlinden@Sun.COM * use a different one depending on whether hyper-v emulation
60712090SFrank.Vanderlinden@Sun.COM * is switched on by default or not.
6085741Smrj */
60912090SFrank.Vanderlinden@Sun.COM for (base = 0x40000000; base < 0x40010000; base += 0x100) {
61012090SFrank.Vanderlinden@Sun.COM cp.cp_eax = base;
61112090SFrank.Vanderlinden@Sun.COM (void) __cpuid_insn(&cp);
61212090SFrank.Vanderlinden@Sun.COM xen_signature[0] = cp.cp_ebx;
61312090SFrank.Vanderlinden@Sun.COM xen_signature[1] = cp.cp_ecx;
61412090SFrank.Vanderlinden@Sun.COM xen_signature[2] = cp.cp_edx;
61512090SFrank.Vanderlinden@Sun.COM xen_signature[3] = 0;
61612090SFrank.Vanderlinden@Sun.COM xen_str = (char *)xen_signature;
61712090SFrank.Vanderlinden@Sun.COM if (strcmp("XenVMMXenVMM", xen_str) == 0 &&
61812090SFrank.Vanderlinden@Sun.COM cp.cp_eax >= (base + 2)) {
61912090SFrank.Vanderlinden@Sun.COM platform_type = HW_XEN_HVM;
62012090SFrank.Vanderlinden@Sun.COM return;
62112090SFrank.Vanderlinden@Sun.COM }
62212090SFrank.Vanderlinden@Sun.COM }
62312090SFrank.Vanderlinden@Sun.COM
62412090SFrank.Vanderlinden@Sun.COM if (vmware_platform()) /* running under vmware hypervisor? */
6259000SStuart.Maybee@Sun.COM platform_type = HW_VMWARE;
6269000SStuart.Maybee@Sun.COM }
6279000SStuart.Maybee@Sun.COM
6289000SStuart.Maybee@Sun.COM int
get_hwenv(void)6299000SStuart.Maybee@Sun.COM get_hwenv(void)
6309000SStuart.Maybee@Sun.COM {
63110175SStuart.Maybee@Sun.COM if (platform_type == -1)
63210175SStuart.Maybee@Sun.COM determine_platform();
63310175SStuart.Maybee@Sun.COM
6349000SStuart.Maybee@Sun.COM return (platform_type);
6355741Smrj }
6369000SStuart.Maybee@Sun.COM
6379000SStuart.Maybee@Sun.COM int
is_controldom(void)6389000SStuart.Maybee@Sun.COM is_controldom(void)
6399000SStuart.Maybee@Sun.COM {
6409000SStuart.Maybee@Sun.COM return (0);
6419000SStuart.Maybee@Sun.COM }
6429000SStuart.Maybee@Sun.COM
6439000SStuart.Maybee@Sun.COM #else
6449000SStuart.Maybee@Sun.COM
6459000SStuart.Maybee@Sun.COM int
get_hwenv(void)6469000SStuart.Maybee@Sun.COM get_hwenv(void)
6479000SStuart.Maybee@Sun.COM {
6489000SStuart.Maybee@Sun.COM return (HW_XEN_PV);
6499000SStuart.Maybee@Sun.COM }
6509000SStuart.Maybee@Sun.COM
6519000SStuart.Maybee@Sun.COM int
is_controldom(void)6529000SStuart.Maybee@Sun.COM is_controldom(void)
6539000SStuart.Maybee@Sun.COM {
6549000SStuart.Maybee@Sun.COM return (DOMAIN_IS_INITDOMAIN(xen_info));
6559000SStuart.Maybee@Sun.COM }
6569000SStuart.Maybee@Sun.COM
6575741Smrj #endif /* __xpv */
6585741Smrj
65910947SSrihari.Venkatesan@Sun.COM static void
cpuid_intel_getids(cpu_t * cpu,void * feature)66012826Skuriakose.kuruvilla@oracle.com cpuid_intel_getids(cpu_t *cpu, void *feature)
66110947SSrihari.Venkatesan@Sun.COM {
66210947SSrihari.Venkatesan@Sun.COM uint_t i;
66310947SSrihari.Venkatesan@Sun.COM uint_t chipid_shift = 0;
66410947SSrihari.Venkatesan@Sun.COM uint_t coreid_shift = 0;
66510947SSrihari.Venkatesan@Sun.COM struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
66610947SSrihari.Venkatesan@Sun.COM
66710947SSrihari.Venkatesan@Sun.COM for (i = 1; i < cpi->cpi_ncpu_per_chip; i <<= 1)
66810947SSrihari.Venkatesan@Sun.COM chipid_shift++;
66910947SSrihari.Venkatesan@Sun.COM
67010947SSrihari.Venkatesan@Sun.COM cpi->cpi_chipid = cpi->cpi_apicid >> chipid_shift;
67110947SSrihari.Venkatesan@Sun.COM cpi->cpi_clogid = cpi->cpi_apicid & ((1 << chipid_shift) - 1);
67210947SSrihari.Venkatesan@Sun.COM
67312826Skuriakose.kuruvilla@oracle.com if (is_x86_feature(feature, X86FSET_CMP)) {
67410947SSrihari.Venkatesan@Sun.COM /*
67510947SSrihari.Venkatesan@Sun.COM * Multi-core (and possibly multi-threaded)
67610947SSrihari.Venkatesan@Sun.COM * processors.
67710947SSrihari.Venkatesan@Sun.COM */
67810947SSrihari.Venkatesan@Sun.COM uint_t ncpu_per_core;
67910947SSrihari.Venkatesan@Sun.COM if (cpi->cpi_ncore_per_chip == 1)
68010947SSrihari.Venkatesan@Sun.COM ncpu_per_core = cpi->cpi_ncpu_per_chip;
68110947SSrihari.Venkatesan@Sun.COM else if (cpi->cpi_ncore_per_chip > 1)
68210947SSrihari.Venkatesan@Sun.COM ncpu_per_core = cpi->cpi_ncpu_per_chip /
68310947SSrihari.Venkatesan@Sun.COM cpi->cpi_ncore_per_chip;
68410947SSrihari.Venkatesan@Sun.COM /*
68510947SSrihari.Venkatesan@Sun.COM * 8bit APIC IDs on dual core Pentiums
68610947SSrihari.Venkatesan@Sun.COM * look like this:
68710947SSrihari.Venkatesan@Sun.COM *
68810947SSrihari.Venkatesan@Sun.COM * +-----------------------+------+------+
68910947SSrihari.Venkatesan@Sun.COM * | Physical Package ID | MC | HT |
69010947SSrihari.Venkatesan@Sun.COM * +-----------------------+------+------+
69110947SSrihari.Venkatesan@Sun.COM * <------- chipid -------->
69210947SSrihari.Venkatesan@Sun.COM * <------- coreid --------------->
69310947SSrihari.Venkatesan@Sun.COM * <--- clogid -->
69410947SSrihari.Venkatesan@Sun.COM * <------>
69510947SSrihari.Venkatesan@Sun.COM * pkgcoreid
69610947SSrihari.Venkatesan@Sun.COM *
69710947SSrihari.Venkatesan@Sun.COM * Where the number of bits necessary to
69810947SSrihari.Venkatesan@Sun.COM * represent MC and HT fields together equals
69910947SSrihari.Venkatesan@Sun.COM * to the minimum number of bits necessary to
70010947SSrihari.Venkatesan@Sun.COM * store the value of cpi->cpi_ncpu_per_chip.
70110947SSrihari.Venkatesan@Sun.COM * Of those bits, the MC part uses the number
70210947SSrihari.Venkatesan@Sun.COM * of bits necessary to store the value of
70310947SSrihari.Venkatesan@Sun.COM * cpi->cpi_ncore_per_chip.
70410947SSrihari.Venkatesan@Sun.COM */
70510947SSrihari.Venkatesan@Sun.COM for (i = 1; i < ncpu_per_core; i <<= 1)
70610947SSrihari.Venkatesan@Sun.COM coreid_shift++;
70710947SSrihari.Venkatesan@Sun.COM cpi->cpi_coreid = cpi->cpi_apicid >> coreid_shift;
70810947SSrihari.Venkatesan@Sun.COM cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift;
70912826Skuriakose.kuruvilla@oracle.com } else if (is_x86_feature(feature, X86FSET_HTT)) {
71010947SSrihari.Venkatesan@Sun.COM /*
71110947SSrihari.Venkatesan@Sun.COM * Single-core multi-threaded processors.
71210947SSrihari.Venkatesan@Sun.COM */
71310947SSrihari.Venkatesan@Sun.COM cpi->cpi_coreid = cpi->cpi_chipid;
71410947SSrihari.Venkatesan@Sun.COM cpi->cpi_pkgcoreid = 0;
71510947SSrihari.Venkatesan@Sun.COM }
71610947SSrihari.Venkatesan@Sun.COM cpi->cpi_procnodeid = cpi->cpi_chipid;
71710947SSrihari.Venkatesan@Sun.COM }
71810947SSrihari.Venkatesan@Sun.COM
71910947SSrihari.Venkatesan@Sun.COM static void
cpuid_amd_getids(cpu_t * cpu)72010947SSrihari.Venkatesan@Sun.COM cpuid_amd_getids(cpu_t *cpu)
72110947SSrihari.Venkatesan@Sun.COM {
72211013SSrihari.Venkatesan@Sun.COM int i, first_half, coreidsz;
72310947SSrihari.Venkatesan@Sun.COM uint32_t nb_caps_reg;
72410947SSrihari.Venkatesan@Sun.COM uint_t node2_1;
72510947SSrihari.Venkatesan@Sun.COM struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
72610947SSrihari.Venkatesan@Sun.COM
72710947SSrihari.Venkatesan@Sun.COM /*
72810947SSrihari.Venkatesan@Sun.COM * AMD CMP chips currently have a single thread per core.
72910947SSrihari.Venkatesan@Sun.COM *
73010947SSrihari.Venkatesan@Sun.COM * Since no two cpus share a core we must assign a distinct coreid
73110947SSrihari.Venkatesan@Sun.COM * per cpu, and we do this by using the cpu_id. This scheme does not,
73210947SSrihari.Venkatesan@Sun.COM * however, guarantee that sibling cores of a chip will have sequential
73310947SSrihari.Venkatesan@Sun.COM * coreids starting at a multiple of the number of cores per chip -
73410947SSrihari.Venkatesan@Sun.COM * that is usually the case, but if the ACPI MADT table is presented
73510947SSrihari.Venkatesan@Sun.COM * in a different order then we need to perform a few more gymnastics
73610947SSrihari.Venkatesan@Sun.COM * for the pkgcoreid.
73710947SSrihari.Venkatesan@Sun.COM *
73810947SSrihari.Venkatesan@Sun.COM * All processors in the system have the same number of enabled
73910947SSrihari.Venkatesan@Sun.COM * cores. Cores within a processor are always numbered sequentially
74010947SSrihari.Venkatesan@Sun.COM * from 0 regardless of how many or which are disabled, and there
74110947SSrihari.Venkatesan@Sun.COM * is no way for operating system to discover the real core id when some
74210947SSrihari.Venkatesan@Sun.COM * are disabled.
74310947SSrihari.Venkatesan@Sun.COM */
74410947SSrihari.Venkatesan@Sun.COM
74510947SSrihari.Venkatesan@Sun.COM cpi->cpi_coreid = cpu->cpu_id;
74610947SSrihari.Venkatesan@Sun.COM
74710947SSrihari.Venkatesan@Sun.COM if (cpi->cpi_xmaxeax >= 0x80000008) {
74810947SSrihari.Venkatesan@Sun.COM
74910947SSrihari.Venkatesan@Sun.COM coreidsz = BITX((cpi)->cpi_extd[8].cp_ecx, 15, 12);
75010947SSrihari.Venkatesan@Sun.COM
75110947SSrihari.Venkatesan@Sun.COM /*
75210947SSrihari.Venkatesan@Sun.COM * In AMD parlance chip is really a node while Solaris
75310947SSrihari.Venkatesan@Sun.COM * sees chip as equivalent to socket/package.
75410947SSrihari.Venkatesan@Sun.COM */
75510947SSrihari.Venkatesan@Sun.COM cpi->cpi_ncore_per_chip =
75610947SSrihari.Venkatesan@Sun.COM BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1;
75711013SSrihari.Venkatesan@Sun.COM if (coreidsz == 0) {
75810947SSrihari.Venkatesan@Sun.COM /* Use legacy method */
75911013SSrihari.Venkatesan@Sun.COM for (i = 1; i < cpi->cpi_ncore_per_chip; i <<= 1)
76011013SSrihari.Venkatesan@Sun.COM coreidsz++;
76111013SSrihari.Venkatesan@Sun.COM if (coreidsz == 0)
76211013SSrihari.Venkatesan@Sun.COM coreidsz = 1;
76311013SSrihari.Venkatesan@Sun.COM }
76410947SSrihari.Venkatesan@Sun.COM } else {
76510947SSrihari.Venkatesan@Sun.COM /* Assume single-core part */
76611013SSrihari.Venkatesan@Sun.COM cpi->cpi_ncore_per_chip = 1;
76712726SJakub.Jermar@Sun.COM coreidsz = 1;
76810947SSrihari.Venkatesan@Sun.COM }
76910947SSrihari.Venkatesan@Sun.COM
77011013SSrihari.Venkatesan@Sun.COM cpi->cpi_clogid = cpi->cpi_pkgcoreid =
77111013SSrihari.Venkatesan@Sun.COM cpi->cpi_apicid & ((1<<coreidsz) - 1);
77210947SSrihari.Venkatesan@Sun.COM cpi->cpi_ncpu_per_chip = cpi->cpi_ncore_per_chip;
77310947SSrihari.Venkatesan@Sun.COM
77410947SSrihari.Venkatesan@Sun.COM /* Get nodeID */
77510947SSrihari.Venkatesan@Sun.COM if (cpi->cpi_family == 0xf) {
77611013SSrihari.Venkatesan@Sun.COM cpi->cpi_procnodeid = (cpi->cpi_apicid >> coreidsz) & 7;
77710947SSrihari.Venkatesan@Sun.COM cpi->cpi_chipid = cpi->cpi_procnodeid;
77810947SSrihari.Venkatesan@Sun.COM } else if (cpi->cpi_family == 0x10) {
77910947SSrihari.Venkatesan@Sun.COM /*
78010947SSrihari.Venkatesan@Sun.COM * See if we are a multi-node processor.
78110947SSrihari.Venkatesan@Sun.COM * All processors in the system have the same number of nodes
78210947SSrihari.Venkatesan@Sun.COM */
78310947SSrihari.Venkatesan@Sun.COM nb_caps_reg = pci_getl_func(0, 24, 3, 0xe8);
78410947SSrihari.Venkatesan@Sun.COM if ((cpi->cpi_model < 8) || BITX(nb_caps_reg, 29, 29) == 0) {
78510947SSrihari.Venkatesan@Sun.COM /* Single-node */
78611013SSrihari.Venkatesan@Sun.COM cpi->cpi_procnodeid = BITX(cpi->cpi_apicid, 5,
78711013SSrihari.Venkatesan@Sun.COM coreidsz);
78810947SSrihari.Venkatesan@Sun.COM cpi->cpi_chipid = cpi->cpi_procnodeid;
78910947SSrihari.Venkatesan@Sun.COM } else {
79010947SSrihari.Venkatesan@Sun.COM
79110947SSrihari.Venkatesan@Sun.COM /*
79210947SSrihari.Venkatesan@Sun.COM * Multi-node revision D (2 nodes per package
79310947SSrihari.Venkatesan@Sun.COM * are supported)
79410947SSrihari.Venkatesan@Sun.COM */
79510947SSrihari.Venkatesan@Sun.COM cpi->cpi_procnodes_per_pkg = 2;
79610947SSrihari.Venkatesan@Sun.COM
79710947SSrihari.Venkatesan@Sun.COM first_half = (cpi->cpi_pkgcoreid <=
79810947SSrihari.Venkatesan@Sun.COM (cpi->cpi_ncore_per_chip/2 - 1));
79910947SSrihari.Venkatesan@Sun.COM
80010947SSrihari.Venkatesan@Sun.COM if (cpi->cpi_apicid == cpi->cpi_pkgcoreid) {
80110947SSrihari.Venkatesan@Sun.COM /* We are BSP */
80210947SSrihari.Venkatesan@Sun.COM cpi->cpi_procnodeid = (first_half ? 0 : 1);
80310947SSrihari.Venkatesan@Sun.COM cpi->cpi_chipid = cpi->cpi_procnodeid >> 1;
80410947SSrihari.Venkatesan@Sun.COM } else {
80510947SSrihari.Venkatesan@Sun.COM
80610947SSrihari.Venkatesan@Sun.COM /* We are AP */
80710947SSrihari.Venkatesan@Sun.COM /* NodeId[2:1] bits to use for reading F3xe8 */
80810947SSrihari.Venkatesan@Sun.COM node2_1 = BITX(cpi->cpi_apicid, 5, 4) << 1;
80910947SSrihari.Venkatesan@Sun.COM
81010947SSrihari.Venkatesan@Sun.COM nb_caps_reg =
81110947SSrihari.Venkatesan@Sun.COM pci_getl_func(0, 24 + node2_1, 3, 0xe8);
81210947SSrihari.Venkatesan@Sun.COM
81310947SSrihari.Venkatesan@Sun.COM /*
81410947SSrihari.Venkatesan@Sun.COM * Check IntNodeNum bit (31:30, but bit 31 is
81510947SSrihari.Venkatesan@Sun.COM * always 0 on dual-node processors)
81610947SSrihari.Venkatesan@Sun.COM */
81710947SSrihari.Venkatesan@Sun.COM if (BITX(nb_caps_reg, 30, 30) == 0)
81810947SSrihari.Venkatesan@Sun.COM cpi->cpi_procnodeid = node2_1 +
81910947SSrihari.Venkatesan@Sun.COM !first_half;
82010947SSrihari.Venkatesan@Sun.COM else
82110947SSrihari.Venkatesan@Sun.COM cpi->cpi_procnodeid = node2_1 +
82210947SSrihari.Venkatesan@Sun.COM first_half;
82310947SSrihari.Venkatesan@Sun.COM
82410947SSrihari.Venkatesan@Sun.COM cpi->cpi_chipid = cpi->cpi_procnodeid >> 1;
82510947SSrihari.Venkatesan@Sun.COM }
82610947SSrihari.Venkatesan@Sun.COM }
82710947SSrihari.Venkatesan@Sun.COM } else if (cpi->cpi_family >= 0x11) {
82810947SSrihari.Venkatesan@Sun.COM cpi->cpi_procnodeid = (cpi->cpi_apicid >> coreidsz) & 7;
82910947SSrihari.Venkatesan@Sun.COM cpi->cpi_chipid = cpi->cpi_procnodeid;
83010947SSrihari.Venkatesan@Sun.COM } else {
83110947SSrihari.Venkatesan@Sun.COM cpi->cpi_procnodeid = 0;
83210947SSrihari.Venkatesan@Sun.COM cpi->cpi_chipid = cpi->cpi_procnodeid;
83310947SSrihari.Venkatesan@Sun.COM }
83410947SSrihari.Venkatesan@Sun.COM }
83510947SSrihari.Venkatesan@Sun.COM
83613134Skuriakose.kuruvilla@oracle.com /*
83713134Skuriakose.kuruvilla@oracle.com * Setup XFeature_Enabled_Mask register. Required by xsave feature.
83813134Skuriakose.kuruvilla@oracle.com */
83913134Skuriakose.kuruvilla@oracle.com void
setup_xfem(void)84013134Skuriakose.kuruvilla@oracle.com setup_xfem(void)
84113134Skuriakose.kuruvilla@oracle.com {
84213134Skuriakose.kuruvilla@oracle.com uint64_t flags = XFEATURE_LEGACY_FP;
84313134Skuriakose.kuruvilla@oracle.com
84413134Skuriakose.kuruvilla@oracle.com ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
84513134Skuriakose.kuruvilla@oracle.com
84613134Skuriakose.kuruvilla@oracle.com if (is_x86_feature(x86_featureset, X86FSET_SSE))
84713134Skuriakose.kuruvilla@oracle.com flags |= XFEATURE_SSE;
84813134Skuriakose.kuruvilla@oracle.com
84913134Skuriakose.kuruvilla@oracle.com if (is_x86_feature(x86_featureset, X86FSET_AVX))
85013134Skuriakose.kuruvilla@oracle.com flags |= XFEATURE_AVX;
85113134Skuriakose.kuruvilla@oracle.com
85213134Skuriakose.kuruvilla@oracle.com set_xcr(XFEATURE_ENABLED_MASK, flags);
85313134Skuriakose.kuruvilla@oracle.com
85413134Skuriakose.kuruvilla@oracle.com xsave_bv_all = flags;
85513134Skuriakose.kuruvilla@oracle.com }
85613134Skuriakose.kuruvilla@oracle.com
857*13136Skuriakose.kuruvilla@oracle.com void
cpuid_pass1(cpu_t * cpu,uchar_t * featureset)858*13136Skuriakose.kuruvilla@oracle.com cpuid_pass1(cpu_t *cpu, uchar_t *featureset)
8590Sstevel@tonic-gate {
8600Sstevel@tonic-gate uint32_t mask_ecx, mask_edx;
8610Sstevel@tonic-gate struct cpuid_info *cpi;
8621228Sandrei struct cpuid_regs *cp;
8630Sstevel@tonic-gate int xcpuid;
8645084Sjohnlev #if !defined(__xpv)
8655045Sbholler extern int idle_cpu_prefer_mwait;
8665084Sjohnlev #endif
8673446Smrj
8689482SKuriakose.Kuruvilla@Sun.COM #if !defined(__xpv)
8699482SKuriakose.Kuruvilla@Sun.COM determine_platform();
8709482SKuriakose.Kuruvilla@Sun.COM #endif
8710Sstevel@tonic-gate /*
87212004Sjiang.liu@intel.com * Space statically allocated for BSP, ensure pointer is set
8730Sstevel@tonic-gate */
87412826Skuriakose.kuruvilla@oracle.com if (cpu->cpu_id == 0) {
87512826Skuriakose.kuruvilla@oracle.com if (cpu->cpu_m.mcpu_cpi == NULL)
87612826Skuriakose.kuruvilla@oracle.com cpu->cpu_m.mcpu_cpi = &cpuid_info0;
87712826Skuriakose.kuruvilla@oracle.com }
87812826Skuriakose.kuruvilla@oracle.com
87912826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_CPUID);
88012826Skuriakose.kuruvilla@oracle.com
8813446Smrj cpi = cpu->cpu_m.mcpu_cpi;
8823446Smrj ASSERT(cpi != NULL);
8830Sstevel@tonic-gate cp = &cpi->cpi_std[0];
8841228Sandrei cp->cp_eax = 0;
8851228Sandrei cpi->cpi_maxeax = __cpuid_insn(cp);
8860Sstevel@tonic-gate {
8870Sstevel@tonic-gate uint32_t *iptr = (uint32_t *)cpi->cpi_vendorstr;
8880Sstevel@tonic-gate *iptr++ = cp->cp_ebx;
8890Sstevel@tonic-gate *iptr++ = cp->cp_edx;
8900Sstevel@tonic-gate *iptr++ = cp->cp_ecx;
8910Sstevel@tonic-gate *(char *)&cpi->cpi_vendorstr[12] = '\0';
8920Sstevel@tonic-gate }
8930Sstevel@tonic-gate
8947532SSean.Ye@Sun.COM cpi->cpi_vendor = _cpuid_vendorstr_to_vendorcode(cpi->cpi_vendorstr);
8950Sstevel@tonic-gate x86_vendor = cpi->cpi_vendor; /* for compatibility */
8960Sstevel@tonic-gate
8970Sstevel@tonic-gate /*
8980Sstevel@tonic-gate * Limit the range in case of weird hardware
8990Sstevel@tonic-gate */
9000Sstevel@tonic-gate if (cpi->cpi_maxeax > CPI_MAXEAX_MAX)
9010Sstevel@tonic-gate cpi->cpi_maxeax = CPI_MAXEAX_MAX;
9020Sstevel@tonic-gate if (cpi->cpi_maxeax < 1)
9030Sstevel@tonic-gate goto pass1_done;
9040Sstevel@tonic-gate
9050Sstevel@tonic-gate cp = &cpi->cpi_std[1];
9061228Sandrei cp->cp_eax = 1;
9071228Sandrei (void) __cpuid_insn(cp);
9080Sstevel@tonic-gate
9090Sstevel@tonic-gate /*
9100Sstevel@tonic-gate * Extract identifying constants for easy access.
9110Sstevel@tonic-gate */
9120Sstevel@tonic-gate cpi->cpi_model = CPI_MODEL(cpi);
9130Sstevel@tonic-gate cpi->cpi_family = CPI_FAMILY(cpi);
9140Sstevel@tonic-gate
9151975Sdmick if (cpi->cpi_family == 0xf)
9160Sstevel@tonic-gate cpi->cpi_family += CPI_FAMILY_XTD(cpi);
9171975Sdmick
9182001Sdmick /*
9194265Skchow * Beware: AMD uses "extended model" iff base *FAMILY* == 0xf.
9202001Sdmick * Intel, and presumably everyone else, uses model == 0xf, as
9212001Sdmick * one would expect (max value means possible overflow). Sigh.
9222001Sdmick */
9232001Sdmick
9242001Sdmick switch (cpi->cpi_vendor) {
9254855Sksadhukh case X86_VENDOR_Intel:
9264855Sksadhukh if (IS_EXTENDED_MODEL_INTEL(cpi))
9274855Sksadhukh cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
9284858Sksadhukh break;
9292001Sdmick case X86_VENDOR_AMD:
9304265Skchow if (CPI_FAMILY(cpi) == 0xf)
9312001Sdmick cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
9322001Sdmick break;
9332001Sdmick default:
9342001Sdmick if (cpi->cpi_model == 0xf)
9352001Sdmick cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
9362001Sdmick break;
9372001Sdmick }
9380Sstevel@tonic-gate
9390Sstevel@tonic-gate cpi->cpi_step = CPI_STEP(cpi);
9400Sstevel@tonic-gate cpi->cpi_brandid = CPI_BRANDID(cpi);
9410Sstevel@tonic-gate
9420Sstevel@tonic-gate /*
9430Sstevel@tonic-gate * *default* assumptions:
9440Sstevel@tonic-gate * - believe %edx feature word
9450Sstevel@tonic-gate * - ignore %ecx feature word
9460Sstevel@tonic-gate * - 32-bit virtual and physical addressing
9470Sstevel@tonic-gate */
9480Sstevel@tonic-gate mask_edx = 0xffffffff;
9490Sstevel@tonic-gate mask_ecx = 0;
9500Sstevel@tonic-gate
9510Sstevel@tonic-gate cpi->cpi_pabits = cpi->cpi_vabits = 32;
9520Sstevel@tonic-gate
9530Sstevel@tonic-gate switch (cpi->cpi_vendor) {
9540Sstevel@tonic-gate case X86_VENDOR_Intel:
9550Sstevel@tonic-gate if (cpi->cpi_family == 5)
9560Sstevel@tonic-gate x86_type = X86_TYPE_P5;
9571975Sdmick else if (IS_LEGACY_P6(cpi)) {
9580Sstevel@tonic-gate x86_type = X86_TYPE_P6;
9590Sstevel@tonic-gate pentiumpro_bug4046376 = 1;
9600Sstevel@tonic-gate pentiumpro_bug4064495 = 1;
9610Sstevel@tonic-gate /*
9620Sstevel@tonic-gate * Clear the SEP bit when it was set erroneously
9630Sstevel@tonic-gate */
9640Sstevel@tonic-gate if (cpi->cpi_model < 3 && cpi->cpi_step < 3)
9650Sstevel@tonic-gate cp->cp_edx &= ~CPUID_INTC_EDX_SEP;
9661975Sdmick } else if (IS_NEW_F6(cpi) || cpi->cpi_family == 0xf) {
9670Sstevel@tonic-gate x86_type = X86_TYPE_P4;
9680Sstevel@tonic-gate /*
9690Sstevel@tonic-gate * We don't currently depend on any of the %ecx
9700Sstevel@tonic-gate * features until Prescott, so we'll only check
9710Sstevel@tonic-gate * this from P4 onwards. We might want to revisit
9720Sstevel@tonic-gate * that idea later.
9730Sstevel@tonic-gate */
9740Sstevel@tonic-gate mask_ecx = 0xffffffff;
9750Sstevel@tonic-gate } else if (cpi->cpi_family > 0xf)
9760Sstevel@tonic-gate mask_ecx = 0xffffffff;
9774636Sbholler /*
9784636Sbholler * We don't support MONITOR/MWAIT if leaf 5 is not available
9794636Sbholler * to obtain the monitor linesize.
9804636Sbholler */
9814636Sbholler if (cpi->cpi_maxeax < 5)
9824636Sbholler mask_ecx &= ~CPUID_INTC_ECX_MON;
9830Sstevel@tonic-gate break;
9840Sstevel@tonic-gate case X86_VENDOR_IntelClone:
9850Sstevel@tonic-gate default:
9860Sstevel@tonic-gate break;
9870Sstevel@tonic-gate case X86_VENDOR_AMD:
9880Sstevel@tonic-gate #if defined(OPTERON_ERRATUM_108)
9890Sstevel@tonic-gate if (cpi->cpi_family == 0xf && cpi->cpi_model == 0xe) {
9900Sstevel@tonic-gate cp->cp_eax = (0xf0f & cp->cp_eax) | 0xc0;
9910Sstevel@tonic-gate cpi->cpi_model = 0xc;
9920Sstevel@tonic-gate } else
9930Sstevel@tonic-gate #endif
9940Sstevel@tonic-gate if (cpi->cpi_family == 5) {
9950Sstevel@tonic-gate /*
9960Sstevel@tonic-gate * AMD K5 and K6
9970Sstevel@tonic-gate *
9980Sstevel@tonic-gate * These CPUs have an incomplete implementation
9990Sstevel@tonic-gate * of MCA/MCE which we mask away.
10000Sstevel@tonic-gate */
10011228Sandrei mask_edx &= ~(CPUID_INTC_EDX_MCE | CPUID_INTC_EDX_MCA);
10021228Sandrei
10031228Sandrei /*
10041228Sandrei * Model 0 uses the wrong (APIC) bit
10051228Sandrei * to indicate PGE. Fix it here.
10061228Sandrei */
10070Sstevel@tonic-gate if (cpi->cpi_model == 0) {
10080Sstevel@tonic-gate if (cp->cp_edx & 0x200) {
10090Sstevel@tonic-gate cp->cp_edx &= ~0x200;
10100Sstevel@tonic-gate cp->cp_edx |= CPUID_INTC_EDX_PGE;
10110Sstevel@tonic-gate }
10121228Sandrei }
10131228Sandrei
10141228Sandrei /*
10151228Sandrei * Early models had problems w/ MMX; disable.
10161228Sandrei */
10171228Sandrei if (cpi->cpi_model < 6)
10181228Sandrei mask_edx &= ~CPUID_INTC_EDX_MMX;
10191228Sandrei }
10201228Sandrei
10211228Sandrei /*
10221228Sandrei * For newer families, SSE3 and CX16, at least, are valid;
10231228Sandrei * enable all
10241228Sandrei */
10251228Sandrei if (cpi->cpi_family >= 0xf)
1026771Sdmick mask_ecx = 0xffffffff;
10274636Sbholler /*
10284636Sbholler * We don't support MONITOR/MWAIT if leaf 5 is not available
10294636Sbholler * to obtain the monitor linesize.
10304636Sbholler */
10314636Sbholler if (cpi->cpi_maxeax < 5)
10324636Sbholler mask_ecx &= ~CPUID_INTC_ECX_MON;
10335045Sbholler
10345084Sjohnlev #if !defined(__xpv)
10355045Sbholler /*
10365045Sbholler * Do not use MONITOR/MWAIT to halt in the idle loop on any AMD
10375045Sbholler * processors. AMD does not intend MWAIT to be used in the cpu
10385045Sbholler * idle loop on current and future processors. 10h and future
10395045Sbholler * AMD processors use more power in MWAIT than HLT.
10405045Sbholler * Pre-family-10h Opterons do not have the MWAIT instruction.
10415045Sbholler */
10425045Sbholler idle_cpu_prefer_mwait = 0;
10435084Sjohnlev #endif
10445045Sbholler
10450Sstevel@tonic-gate break;
10460Sstevel@tonic-gate case X86_VENDOR_TM:
10470Sstevel@tonic-gate /*
10480Sstevel@tonic-gate * workaround the NT workaround in CMS 4.1
10490Sstevel@tonic-gate */
10500Sstevel@tonic-gate if (cpi->cpi_family == 5 && cpi->cpi_model == 4 &&
10510Sstevel@tonic-gate (cpi->cpi_step == 2 || cpi->cpi_step == 3))
10520Sstevel@tonic-gate cp->cp_edx |= CPUID_INTC_EDX_CX8;
10530Sstevel@tonic-gate break;
10540Sstevel@tonic-gate case X86_VENDOR_Centaur:
10550Sstevel@tonic-gate /*
10560Sstevel@tonic-gate * workaround the NT workarounds again
10570Sstevel@tonic-gate */
10580Sstevel@tonic-gate if (cpi->cpi_family == 6)
10590Sstevel@tonic-gate cp->cp_edx |= CPUID_INTC_EDX_CX8;
10600Sstevel@tonic-gate break;
10610Sstevel@tonic-gate case X86_VENDOR_Cyrix:
10620Sstevel@tonic-gate /*
10630Sstevel@tonic-gate * We rely heavily on the probing in locore
10640Sstevel@tonic-gate * to actually figure out what parts, if any,
10650Sstevel@tonic-gate * of the Cyrix cpuid instruction to believe.
10660Sstevel@tonic-gate */
10670Sstevel@tonic-gate switch (x86_type) {
10680Sstevel@tonic-gate case X86_TYPE_CYRIX_486:
10690Sstevel@tonic-gate mask_edx = 0;
10700Sstevel@tonic-gate break;
10710Sstevel@tonic-gate case X86_TYPE_CYRIX_6x86:
10720Sstevel@tonic-gate mask_edx = 0;
10730Sstevel@tonic-gate break;
10740Sstevel@tonic-gate case X86_TYPE_CYRIX_6x86L:
10750Sstevel@tonic-gate mask_edx =
10760Sstevel@tonic-gate CPUID_INTC_EDX_DE |
10770Sstevel@tonic-gate CPUID_INTC_EDX_CX8;
10780Sstevel@tonic-gate break;
10790Sstevel@tonic-gate case X86_TYPE_CYRIX_6x86MX:
10800Sstevel@tonic-gate mask_edx =
10810Sstevel@tonic-gate CPUID_INTC_EDX_DE |
10820Sstevel@tonic-gate CPUID_INTC_EDX_MSR |
10830Sstevel@tonic-gate CPUID_INTC_EDX_CX8 |
10840Sstevel@tonic-gate CPUID_INTC_EDX_PGE |
10850Sstevel@tonic-gate CPUID_INTC_EDX_CMOV |
10860Sstevel@tonic-gate CPUID_INTC_EDX_MMX;
10870Sstevel@tonic-gate break;
10880Sstevel@tonic-gate case X86_TYPE_CYRIX_GXm:
10890Sstevel@tonic-gate mask_edx =
10900Sstevel@tonic-gate CPUID_INTC_EDX_MSR |
10910Sstevel@tonic-gate CPUID_INTC_EDX_CX8 |
10920Sstevel@tonic-gate CPUID_INTC_EDX_CMOV |
10930Sstevel@tonic-gate CPUID_INTC_EDX_MMX;
10940Sstevel@tonic-gate break;
10950Sstevel@tonic-gate case X86_TYPE_CYRIX_MediaGX:
10960Sstevel@tonic-gate break;
10970Sstevel@tonic-gate case X86_TYPE_CYRIX_MII:
10980Sstevel@tonic-gate case X86_TYPE_VIA_CYRIX_III:
10990Sstevel@tonic-gate mask_edx =
11000Sstevel@tonic-gate CPUID_INTC_EDX_DE |
11010Sstevel@tonic-gate CPUID_INTC_EDX_TSC |
11020Sstevel@tonic-gate CPUID_INTC_EDX_MSR |
11030Sstevel@tonic-gate CPUID_INTC_EDX_CX8 |
11040Sstevel@tonic-gate CPUID_INTC_EDX_PGE |
11050Sstevel@tonic-gate CPUID_INTC_EDX_CMOV |
11060Sstevel@tonic-gate CPUID_INTC_EDX_MMX;
11070Sstevel@tonic-gate break;
11080Sstevel@tonic-gate default:
11090Sstevel@tonic-gate break;
11100Sstevel@tonic-gate }
11110Sstevel@tonic-gate break;
11120Sstevel@tonic-gate }
11130Sstevel@tonic-gate
11145084Sjohnlev #if defined(__xpv)
11155084Sjohnlev /*
11165084Sjohnlev * Do not support MONITOR/MWAIT under a hypervisor
11175084Sjohnlev */
11185084Sjohnlev mask_ecx &= ~CPUID_INTC_ECX_MON;
111913134Skuriakose.kuruvilla@oracle.com /*
112013134Skuriakose.kuruvilla@oracle.com * Do not support XSAVE under a hypervisor for now
112113134Skuriakose.kuruvilla@oracle.com */
112213134Skuriakose.kuruvilla@oracle.com xsave_force_disable = B_TRUE;
112313134Skuriakose.kuruvilla@oracle.com
11245084Sjohnlev #endif /* __xpv */
11255084Sjohnlev
112613134Skuriakose.kuruvilla@oracle.com if (xsave_force_disable) {
112713134Skuriakose.kuruvilla@oracle.com mask_ecx &= ~CPUID_INTC_ECX_XSAVE;
112813134Skuriakose.kuruvilla@oracle.com mask_ecx &= ~CPUID_INTC_ECX_AVX;
112913134Skuriakose.kuruvilla@oracle.com }
113013134Skuriakose.kuruvilla@oracle.com
11310Sstevel@tonic-gate /*
11320Sstevel@tonic-gate * Now we've figured out the masks that determine
11330Sstevel@tonic-gate * which bits we choose to believe, apply the masks
11340Sstevel@tonic-gate * to the feature words, then map the kernel's view
11350Sstevel@tonic-gate * of these feature words into its feature word.
11360Sstevel@tonic-gate */
11370Sstevel@tonic-gate cp->cp_edx &= mask_edx;
11380Sstevel@tonic-gate cp->cp_ecx &= mask_ecx;
11390Sstevel@tonic-gate
11400Sstevel@tonic-gate /*
11413446Smrj * apply any platform restrictions (we don't call this
11423446Smrj * immediately after __cpuid_insn here, because we need the
11433446Smrj * workarounds applied above first)
11440Sstevel@tonic-gate */
11453446Smrj platform_cpuid_mangle(cpi->cpi_vendor, 1, cp);
11460Sstevel@tonic-gate
11473446Smrj /*
11483446Smrj * fold in overrides from the "eeprom" mechanism
11493446Smrj */
11500Sstevel@tonic-gate cp->cp_edx |= cpuid_feature_edx_include;
11510Sstevel@tonic-gate cp->cp_edx &= ~cpuid_feature_edx_exclude;
11520Sstevel@tonic-gate
11530Sstevel@tonic-gate cp->cp_ecx |= cpuid_feature_ecx_include;
11540Sstevel@tonic-gate cp->cp_ecx &= ~cpuid_feature_ecx_exclude;
11550Sstevel@tonic-gate
115612826Skuriakose.kuruvilla@oracle.com if (cp->cp_edx & CPUID_INTC_EDX_PSE) {
115712826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_LARGEPAGE);
115812826Skuriakose.kuruvilla@oracle.com }
115912826Skuriakose.kuruvilla@oracle.com if (cp->cp_edx & CPUID_INTC_EDX_TSC) {
116012826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_TSC);
116112826Skuriakose.kuruvilla@oracle.com }
116212826Skuriakose.kuruvilla@oracle.com if (cp->cp_edx & CPUID_INTC_EDX_MSR) {
116312826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_MSR);
116412826Skuriakose.kuruvilla@oracle.com }
116512826Skuriakose.kuruvilla@oracle.com if (cp->cp_edx & CPUID_INTC_EDX_MTRR) {
116612826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_MTRR);
116712826Skuriakose.kuruvilla@oracle.com }
116812826Skuriakose.kuruvilla@oracle.com if (cp->cp_edx & CPUID_INTC_EDX_PGE) {
116912826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_PGE);
117012826Skuriakose.kuruvilla@oracle.com }
117112826Skuriakose.kuruvilla@oracle.com if (cp->cp_edx & CPUID_INTC_EDX_CMOV) {
117212826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_CMOV);
117312826Skuriakose.kuruvilla@oracle.com }
117412826Skuriakose.kuruvilla@oracle.com if (cp->cp_edx & CPUID_INTC_EDX_MMX) {
117512826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_MMX);
117612826Skuriakose.kuruvilla@oracle.com }
11770Sstevel@tonic-gate if ((cp->cp_edx & CPUID_INTC_EDX_MCE) != 0 &&
117812826Skuriakose.kuruvilla@oracle.com (cp->cp_edx & CPUID_INTC_EDX_MCA) != 0) {
117912826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_MCA);
118012826Skuriakose.kuruvilla@oracle.com }
118112826Skuriakose.kuruvilla@oracle.com if (cp->cp_edx & CPUID_INTC_EDX_PAE) {
118212826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_PAE);
118312826Skuriakose.kuruvilla@oracle.com }
118412826Skuriakose.kuruvilla@oracle.com if (cp->cp_edx & CPUID_INTC_EDX_CX8) {
118512826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_CX8);
118612826Skuriakose.kuruvilla@oracle.com }
118712826Skuriakose.kuruvilla@oracle.com if (cp->cp_ecx & CPUID_INTC_ECX_CX16) {
118812826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_CX16);
118912826Skuriakose.kuruvilla@oracle.com }
119012826Skuriakose.kuruvilla@oracle.com if (cp->cp_edx & CPUID_INTC_EDX_PAT) {
119112826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_PAT);
119212826Skuriakose.kuruvilla@oracle.com }
119312826Skuriakose.kuruvilla@oracle.com if (cp->cp_edx & CPUID_INTC_EDX_SEP) {
119412826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_SEP);
119512826Skuriakose.kuruvilla@oracle.com }
11960Sstevel@tonic-gate if (cp->cp_edx & CPUID_INTC_EDX_FXSR) {
11970Sstevel@tonic-gate /*
11980Sstevel@tonic-gate * In our implementation, fxsave/fxrstor
11990Sstevel@tonic-gate * are prerequisites before we'll even
12000Sstevel@tonic-gate * try and do SSE things.
12010Sstevel@tonic-gate */
120212826Skuriakose.kuruvilla@oracle.com if (cp->cp_edx & CPUID_INTC_EDX_SSE) {
120312826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_SSE);
120412826Skuriakose.kuruvilla@oracle.com }
120512826Skuriakose.kuruvilla@oracle.com if (cp->cp_edx & CPUID_INTC_EDX_SSE2) {
120612826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_SSE2);
120712826Skuriakose.kuruvilla@oracle.com }
120812826Skuriakose.kuruvilla@oracle.com if (cp->cp_ecx & CPUID_INTC_ECX_SSE3) {
120912826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_SSE3);
121012826Skuriakose.kuruvilla@oracle.com }
12115269Skk208521 if (cpi->cpi_vendor == X86_VENDOR_Intel) {
121212826Skuriakose.kuruvilla@oracle.com if (cp->cp_ecx & CPUID_INTC_ECX_SSSE3) {
121312826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_SSSE3);
121412826Skuriakose.kuruvilla@oracle.com }
121512826Skuriakose.kuruvilla@oracle.com if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_1) {
121612826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_SSE4_1);
121712826Skuriakose.kuruvilla@oracle.com }
121812826Skuriakose.kuruvilla@oracle.com if (cp->cp_ecx & CPUID_INTC_ECX_SSE4_2) {
121912826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_SSE4_2);
122012826Skuriakose.kuruvilla@oracle.com }
122112826Skuriakose.kuruvilla@oracle.com if (cp->cp_ecx & CPUID_INTC_ECX_AES) {
122212826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_AES);
122312826Skuriakose.kuruvilla@oracle.com }
122412826Skuriakose.kuruvilla@oracle.com if (cp->cp_ecx & CPUID_INTC_ECX_PCLMULQDQ) {
122512826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_PCLMULQDQ);
122612826Skuriakose.kuruvilla@oracle.com }
122713134Skuriakose.kuruvilla@oracle.com
122813134Skuriakose.kuruvilla@oracle.com if (cp->cp_ecx & CPUID_INTC_ECX_XSAVE) {
122913134Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_XSAVE);
123013134Skuriakose.kuruvilla@oracle.com /* We only test AVX when there is XSAVE */
123113134Skuriakose.kuruvilla@oracle.com if (cp->cp_ecx & CPUID_INTC_ECX_AVX) {
123213134Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset,
123313134Skuriakose.kuruvilla@oracle.com X86FSET_AVX);
123413134Skuriakose.kuruvilla@oracle.com }
123513134Skuriakose.kuruvilla@oracle.com }
12365269Skk208521 }
12370Sstevel@tonic-gate }
123812826Skuriakose.kuruvilla@oracle.com if (cp->cp_edx & CPUID_INTC_EDX_DE) {
123912826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_DE);
124012826Skuriakose.kuruvilla@oracle.com }
12417716SBill.Holler@Sun.COM #if !defined(__xpv)
12424481Sbholler if (cp->cp_ecx & CPUID_INTC_ECX_MON) {
12437716SBill.Holler@Sun.COM
12447716SBill.Holler@Sun.COM /*
12457716SBill.Holler@Sun.COM * We require the CLFLUSH instruction for erratum workaround
12467716SBill.Holler@Sun.COM * to use MONITOR/MWAIT.
12477716SBill.Holler@Sun.COM */
12487716SBill.Holler@Sun.COM if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) {
12497716SBill.Holler@Sun.COM cpi->cpi_mwait.support |= MWAIT_SUPPORT;
125012826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_MWAIT);
12517716SBill.Holler@Sun.COM } else {
12527716SBill.Holler@Sun.COM extern int idle_cpu_assert_cflush_monitor;
12537716SBill.Holler@Sun.COM
12547716SBill.Holler@Sun.COM /*
12557716SBill.Holler@Sun.COM * All processors we are aware of which have
12567716SBill.Holler@Sun.COM * MONITOR/MWAIT also have CLFLUSH.
12577716SBill.Holler@Sun.COM */
12587716SBill.Holler@Sun.COM if (idle_cpu_assert_cflush_monitor) {
12597716SBill.Holler@Sun.COM ASSERT((cp->cp_ecx & CPUID_INTC_ECX_MON) &&
12607716SBill.Holler@Sun.COM (cp->cp_edx & CPUID_INTC_EDX_CLFSH));
12617716SBill.Holler@Sun.COM }
12627716SBill.Holler@Sun.COM }
12634481Sbholler }
12647716SBill.Holler@Sun.COM #endif /* __xpv */
12650Sstevel@tonic-gate
12667589SVikram.Hegde@Sun.COM /*
12677589SVikram.Hegde@Sun.COM * Only need it first time, rest of the cpus would follow suite.
12687589SVikram.Hegde@Sun.COM * we only capture this for the bootcpu.
12697589SVikram.Hegde@Sun.COM */
12707589SVikram.Hegde@Sun.COM if (cp->cp_edx & CPUID_INTC_EDX_CLFSH) {
127112826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_CLFSH);
12727589SVikram.Hegde@Sun.COM x86_clflush_size = (BITX(cp->cp_ebx, 15, 8) * 8);
12737589SVikram.Hegde@Sun.COM }
127412826Skuriakose.kuruvilla@oracle.com if (is_x86_feature(featureset, X86FSET_PAE))
12750Sstevel@tonic-gate cpi->cpi_pabits = 36;
12760Sstevel@tonic-gate
12770Sstevel@tonic-gate /*
12780Sstevel@tonic-gate * Hyperthreading configuration is slightly tricky on Intel
12790Sstevel@tonic-gate * and pure clones, and even trickier on AMD.
12800Sstevel@tonic-gate *
12810Sstevel@tonic-gate * (AMD chose to set the HTT bit on their CMP processors,
12820Sstevel@tonic-gate * even though they're not actually hyperthreaded. Thus it
12830Sstevel@tonic-gate * takes a bit more work to figure out what's really going
12843446Smrj * on ... see the handling of the CMP_LGCY bit below)
12850Sstevel@tonic-gate */
12860Sstevel@tonic-gate if (cp->cp_edx & CPUID_INTC_EDX_HTT) {
12870Sstevel@tonic-gate cpi->cpi_ncpu_per_chip = CPI_CPU_COUNT(cpi);
12880Sstevel@tonic-gate if (cpi->cpi_ncpu_per_chip > 1)
128912826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_HTT);
12901228Sandrei } else {
12911228Sandrei cpi->cpi_ncpu_per_chip = 1;
12920Sstevel@tonic-gate }
12930Sstevel@tonic-gate
12940Sstevel@tonic-gate /*
12950Sstevel@tonic-gate * Work on the "extended" feature information, doing
12960Sstevel@tonic-gate * some basic initialization for cpuid_pass2()
12970Sstevel@tonic-gate */
12980Sstevel@tonic-gate xcpuid = 0;
12990Sstevel@tonic-gate switch (cpi->cpi_vendor) {
13000Sstevel@tonic-gate case X86_VENDOR_Intel:
13011975Sdmick if (IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf)
13020Sstevel@tonic-gate xcpuid++;
13030Sstevel@tonic-gate break;
13040Sstevel@tonic-gate case X86_VENDOR_AMD:
13050Sstevel@tonic-gate if (cpi->cpi_family > 5 ||
13060Sstevel@tonic-gate (cpi->cpi_family == 5 && cpi->cpi_model >= 1))
13070Sstevel@tonic-gate xcpuid++;
13080Sstevel@tonic-gate break;
13090Sstevel@tonic-gate case X86_VENDOR_Cyrix:
13100Sstevel@tonic-gate /*
13110Sstevel@tonic-gate * Only these Cyrix CPUs are -known- to support
13120Sstevel@tonic-gate * extended cpuid operations.
13130Sstevel@tonic-gate */
13140Sstevel@tonic-gate if (x86_type == X86_TYPE_VIA_CYRIX_III ||
13150Sstevel@tonic-gate x86_type == X86_TYPE_CYRIX_GXm)
13160Sstevel@tonic-gate xcpuid++;
13170Sstevel@tonic-gate break;
13180Sstevel@tonic-gate case X86_VENDOR_Centaur:
13190Sstevel@tonic-gate case X86_VENDOR_TM:
13200Sstevel@tonic-gate default:
13210Sstevel@tonic-gate xcpuid++;
13220Sstevel@tonic-gate break;
13230Sstevel@tonic-gate }
13240Sstevel@tonic-gate
13250Sstevel@tonic-gate if (xcpuid) {
13260Sstevel@tonic-gate cp = &cpi->cpi_extd[0];
13271228Sandrei cp->cp_eax = 0x80000000;
13281228Sandrei cpi->cpi_xmaxeax = __cpuid_insn(cp);
13290Sstevel@tonic-gate }
13300Sstevel@tonic-gate
13310Sstevel@tonic-gate if (cpi->cpi_xmaxeax & 0x80000000) {
13320Sstevel@tonic-gate
13330Sstevel@tonic-gate if (cpi->cpi_xmaxeax > CPI_XMAXEAX_MAX)
13340Sstevel@tonic-gate cpi->cpi_xmaxeax = CPI_XMAXEAX_MAX;
13350Sstevel@tonic-gate
13360Sstevel@tonic-gate switch (cpi->cpi_vendor) {
13370Sstevel@tonic-gate case X86_VENDOR_Intel:
13380Sstevel@tonic-gate case X86_VENDOR_AMD:
13390Sstevel@tonic-gate if (cpi->cpi_xmaxeax < 0x80000001)
13400Sstevel@tonic-gate break;
13410Sstevel@tonic-gate cp = &cpi->cpi_extd[1];
13421228Sandrei cp->cp_eax = 0x80000001;
13431228Sandrei (void) __cpuid_insn(cp);
13443446Smrj
13450Sstevel@tonic-gate if (cpi->cpi_vendor == X86_VENDOR_AMD &&
13460Sstevel@tonic-gate cpi->cpi_family == 5 &&
13470Sstevel@tonic-gate cpi->cpi_model == 6 &&
13480Sstevel@tonic-gate cpi->cpi_step == 6) {
13490Sstevel@tonic-gate /*
13500Sstevel@tonic-gate * K6 model 6 uses bit 10 to indicate SYSC
13510Sstevel@tonic-gate * Later models use bit 11. Fix it here.
13520Sstevel@tonic-gate */
13530Sstevel@tonic-gate if (cp->cp_edx & 0x400) {
13540Sstevel@tonic-gate cp->cp_edx &= ~0x400;
13550Sstevel@tonic-gate cp->cp_edx |= CPUID_AMD_EDX_SYSC;
13560Sstevel@tonic-gate }
13570Sstevel@tonic-gate }
13580Sstevel@tonic-gate
13593446Smrj platform_cpuid_mangle(cpi->cpi_vendor, 0x80000001, cp);
13603446Smrj
13610Sstevel@tonic-gate /*
13620Sstevel@tonic-gate * Compute the additions to the kernel's feature word.
13630Sstevel@tonic-gate */
136412826Skuriakose.kuruvilla@oracle.com if (cp->cp_edx & CPUID_AMD_EDX_NX) {
136512826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_NX);
136612826Skuriakose.kuruvilla@oracle.com }
13670Sstevel@tonic-gate
13687656SSherry.Moore@Sun.COM /*
13697656SSherry.Moore@Sun.COM * Regardless whether or not we boot 64-bit,
13707656SSherry.Moore@Sun.COM * we should have a way to identify whether
13717656SSherry.Moore@Sun.COM * the CPU is capable of running 64-bit.
13727656SSherry.Moore@Sun.COM */
137312826Skuriakose.kuruvilla@oracle.com if (cp->cp_edx & CPUID_AMD_EDX_LM) {
137412826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_64);
137512826Skuriakose.kuruvilla@oracle.com }
13767656SSherry.Moore@Sun.COM
13775349Skchow #if defined(__amd64)
13785349Skchow /* 1 GB large page - enable only for 64 bit kernel */
137912826Skuriakose.kuruvilla@oracle.com if (cp->cp_edx & CPUID_AMD_EDX_1GPG) {
138012826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_1GPG);
138112826Skuriakose.kuruvilla@oracle.com }
13825349Skchow #endif
13835349Skchow
13844628Skk208521 if ((cpi->cpi_vendor == X86_VENDOR_AMD) &&
13854628Skk208521 (cpi->cpi_std[1].cp_edx & CPUID_INTC_EDX_FXSR) &&
138612826Skuriakose.kuruvilla@oracle.com (cp->cp_ecx & CPUID_AMD_ECX_SSE4A)) {
138712826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_SSE4A);
138812826Skuriakose.kuruvilla@oracle.com }
13894628Skk208521
13900Sstevel@tonic-gate /*
13913446Smrj * If both the HTT and CMP_LGCY bits are set,
13921228Sandrei * then we're not actually HyperThreaded. Read
13931228Sandrei * "AMD CPUID Specification" for more details.
13940Sstevel@tonic-gate */
13950Sstevel@tonic-gate if (cpi->cpi_vendor == X86_VENDOR_AMD &&
139612826Skuriakose.kuruvilla@oracle.com is_x86_feature(featureset, X86FSET_HTT) &&
13973446Smrj (cp->cp_ecx & CPUID_AMD_ECX_CMP_LGCY)) {
139812826Skuriakose.kuruvilla@oracle.com remove_x86_feature(featureset, X86FSET_HTT);
139912826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_CMP);
14001228Sandrei }
14013446Smrj #if defined(__amd64)
14020Sstevel@tonic-gate /*
14030Sstevel@tonic-gate * It's really tricky to support syscall/sysret in
14040Sstevel@tonic-gate * the i386 kernel; we rely on sysenter/sysexit
14050Sstevel@tonic-gate * instead. In the amd64 kernel, things are -way-
14060Sstevel@tonic-gate * better.
14070Sstevel@tonic-gate */
140812826Skuriakose.kuruvilla@oracle.com if (cp->cp_edx & CPUID_AMD_EDX_SYSC) {
140912826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_ASYSC);
141012826Skuriakose.kuruvilla@oracle.com }
14110Sstevel@tonic-gate
14120Sstevel@tonic-gate /*
14130Sstevel@tonic-gate * While we're thinking about system calls, note
14140Sstevel@tonic-gate * that AMD processors don't support sysenter
14150Sstevel@tonic-gate * in long mode at all, so don't try to program them.
14160Sstevel@tonic-gate */
141712826Skuriakose.kuruvilla@oracle.com if (x86_vendor == X86_VENDOR_AMD) {
141812826Skuriakose.kuruvilla@oracle.com remove_x86_feature(featureset, X86FSET_SEP);
141912826Skuriakose.kuruvilla@oracle.com }
14200Sstevel@tonic-gate #endif
142112826Skuriakose.kuruvilla@oracle.com if (cp->cp_edx & CPUID_AMD_EDX_TSCP) {
142212826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_TSCP);
142312826Skuriakose.kuruvilla@oracle.com }
14240Sstevel@tonic-gate break;
14250Sstevel@tonic-gate default:
14260Sstevel@tonic-gate break;
14270Sstevel@tonic-gate }
14280Sstevel@tonic-gate
14291228Sandrei /*
14301228Sandrei * Get CPUID data about processor cores and hyperthreads.
14311228Sandrei */
14320Sstevel@tonic-gate switch (cpi->cpi_vendor) {
14330Sstevel@tonic-gate case X86_VENDOR_Intel:
14341228Sandrei if (cpi->cpi_maxeax >= 4) {
14351228Sandrei cp = &cpi->cpi_std[4];
14361228Sandrei cp->cp_eax = 4;
14371228Sandrei cp->cp_ecx = 0;
14381228Sandrei (void) __cpuid_insn(cp);
14393446Smrj platform_cpuid_mangle(cpi->cpi_vendor, 4, cp);
14401228Sandrei }
14411228Sandrei /*FALLTHROUGH*/
14420Sstevel@tonic-gate case X86_VENDOR_AMD:
14430Sstevel@tonic-gate if (cpi->cpi_xmaxeax < 0x80000008)
14440Sstevel@tonic-gate break;
14450Sstevel@tonic-gate cp = &cpi->cpi_extd[8];
14461228Sandrei cp->cp_eax = 0x80000008;
14471228Sandrei (void) __cpuid_insn(cp);
14483446Smrj platform_cpuid_mangle(cpi->cpi_vendor, 0x80000008, cp);
14493446Smrj
14500Sstevel@tonic-gate /*
14510Sstevel@tonic-gate * Virtual and physical address limits from
14520Sstevel@tonic-gate * cpuid override previously guessed values.
14530Sstevel@tonic-gate */
14540Sstevel@tonic-gate cpi->cpi_pabits = BITX(cp->cp_eax, 7, 0);
14550Sstevel@tonic-gate cpi->cpi_vabits = BITX(cp->cp_eax, 15, 8);
14560Sstevel@tonic-gate break;
14570Sstevel@tonic-gate default:
14580Sstevel@tonic-gate break;
14590Sstevel@tonic-gate }
14601228Sandrei
14614606Sesaxe /*
14624606Sesaxe * Derive the number of cores per chip
14634606Sesaxe */
14641228Sandrei switch (cpi->cpi_vendor) {
14651228Sandrei case X86_VENDOR_Intel:
14661228Sandrei if (cpi->cpi_maxeax < 4) {
14671228Sandrei cpi->cpi_ncore_per_chip = 1;
14681228Sandrei break;
14691228Sandrei } else {
14701228Sandrei cpi->cpi_ncore_per_chip =
14711228Sandrei BITX((cpi)->cpi_std[4].cp_eax, 31, 26) + 1;
14721228Sandrei }
14731228Sandrei break;
14741228Sandrei case X86_VENDOR_AMD:
14751228Sandrei if (cpi->cpi_xmaxeax < 0x80000008) {
14761228Sandrei cpi->cpi_ncore_per_chip = 1;
14771228Sandrei break;
14781228Sandrei } else {
14795870Sgavinm /*
14805870Sgavinm * On family 0xf cpuid fn 2 ECX[7:0] "NC" is
14815870Sgavinm * 1 less than the number of physical cores on
14825870Sgavinm * the chip. In family 0x10 this value can
14835870Sgavinm * be affected by "downcoring" - it reflects
14845870Sgavinm * 1 less than the number of cores actually
14855870Sgavinm * enabled on this node.
14865870Sgavinm */
14871228Sandrei cpi->cpi_ncore_per_chip =
14881228Sandrei BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1;
14891228Sandrei }
14901228Sandrei break;
14911228Sandrei default:
14921228Sandrei cpi->cpi_ncore_per_chip = 1;
14931228Sandrei break;
14941228Sandrei }
14958906SEric.Saxe@Sun.COM
14968906SEric.Saxe@Sun.COM /*
14978906SEric.Saxe@Sun.COM * Get CPUID data about TSC Invariance in Deep C-State.
14988906SEric.Saxe@Sun.COM */
14998906SEric.Saxe@Sun.COM switch (cpi->cpi_vendor) {
15008906SEric.Saxe@Sun.COM case X86_VENDOR_Intel:
15018906SEric.Saxe@Sun.COM if (cpi->cpi_maxeax >= 7) {
15028906SEric.Saxe@Sun.COM cp = &cpi->cpi_extd[7];
15038906SEric.Saxe@Sun.COM cp->cp_eax = 0x80000007;
15048906SEric.Saxe@Sun.COM cp->cp_ecx = 0;
15058906SEric.Saxe@Sun.COM (void) __cpuid_insn(cp);
15068906SEric.Saxe@Sun.COM }
15078906SEric.Saxe@Sun.COM break;
15088906SEric.Saxe@Sun.COM default:
15098906SEric.Saxe@Sun.COM break;
15108906SEric.Saxe@Sun.COM }
15115284Sgavinm } else {
15125284Sgavinm cpi->cpi_ncore_per_chip = 1;
15130Sstevel@tonic-gate }
15140Sstevel@tonic-gate
15151228Sandrei /*
15161228Sandrei * If more than one core, then this processor is CMP.
15171228Sandrei */
151812826Skuriakose.kuruvilla@oracle.com if (cpi->cpi_ncore_per_chip > 1) {
151912826Skuriakose.kuruvilla@oracle.com add_x86_feature(featureset, X86FSET_CMP);
152012826Skuriakose.kuruvilla@oracle.com }
15213446Smrj
15221228Sandrei /*
15231228Sandrei * If the number of cores is the same as the number
15241228Sandrei * of CPUs, then we cannot have HyperThreading.
15251228Sandrei */
152612826Skuriakose.kuruvilla@oracle.com if (cpi->cpi_ncpu_per_chip == cpi->cpi_ncore_per_chip) {
152712826Skuriakose.kuruvilla@oracle.com remove_x86_feature(featureset, X86FSET_HTT);
152812826Skuriakose.kuruvilla@oracle.com }
15291228Sandrei
153010947SSrihari.Venkatesan@Sun.COM cpi->cpi_apicid = CPI_APIC_ID(cpi);
153110947SSrihari.Venkatesan@Sun.COM cpi->cpi_procnodes_per_pkg = 1;
153212826Skuriakose.kuruvilla@oracle.com if (is_x86_feature(featureset, X86FSET_HTT) == B_FALSE &&
153312826Skuriakose.kuruvilla@oracle.com is_x86_feature(featureset, X86FSET_CMP) == B_FALSE) {
15341228Sandrei /*
15351228Sandrei * Single-core single-threaded processors.
15361228Sandrei */
15370Sstevel@tonic-gate cpi->cpi_chipid = -1;
15380Sstevel@tonic-gate cpi->cpi_clogid = 0;
15391228Sandrei cpi->cpi_coreid = cpu->cpu_id;
15405870Sgavinm cpi->cpi_pkgcoreid = 0;
154110947SSrihari.Venkatesan@Sun.COM if (cpi->cpi_vendor == X86_VENDOR_AMD)
154210947SSrihari.Venkatesan@Sun.COM cpi->cpi_procnodeid = BITX(cpi->cpi_apicid, 3, 0);
154310947SSrihari.Venkatesan@Sun.COM else
154410947SSrihari.Venkatesan@Sun.COM cpi->cpi_procnodeid = cpi->cpi_chipid;
15450Sstevel@tonic-gate } else if (cpi->cpi_ncpu_per_chip > 1) {
154610947SSrihari.Venkatesan@Sun.COM if (cpi->cpi_vendor == X86_VENDOR_Intel)
154712826Skuriakose.kuruvilla@oracle.com cpuid_intel_getids(cpu, featureset);
154810947SSrihari.Venkatesan@Sun.COM else if (cpi->cpi_vendor == X86_VENDOR_AMD)
154910947SSrihari.Venkatesan@Sun.COM cpuid_amd_getids(cpu);
155010947SSrihari.Venkatesan@Sun.COM else {
15511228Sandrei /*
15521228Sandrei * All other processors are currently
15531228Sandrei * assumed to have single cores.
15541228Sandrei */
15551228Sandrei cpi->cpi_coreid = cpi->cpi_chipid;
15565870Sgavinm cpi->cpi_pkgcoreid = 0;
155710947SSrihari.Venkatesan@Sun.COM cpi->cpi_procnodeid = cpi->cpi_chipid;
15581228Sandrei }
15590Sstevel@tonic-gate }
15600Sstevel@tonic-gate
15612869Sgavinm /*
15622869Sgavinm * Synthesize chip "revision" and socket type
15632869Sgavinm */
15647532SSean.Ye@Sun.COM cpi->cpi_chiprev = _cpuid_chiprev(cpi->cpi_vendor, cpi->cpi_family,
15657532SSean.Ye@Sun.COM cpi->cpi_model, cpi->cpi_step);
15667532SSean.Ye@Sun.COM cpi->cpi_chiprevstr = _cpuid_chiprevstr(cpi->cpi_vendor,
15677532SSean.Ye@Sun.COM cpi->cpi_family, cpi->cpi_model, cpi->cpi_step);
15687532SSean.Ye@Sun.COM cpi->cpi_socket = _cpuid_skt(cpi->cpi_vendor, cpi->cpi_family,
15697532SSean.Ye@Sun.COM cpi->cpi_model, cpi->cpi_step);
15702869Sgavinm
15710Sstevel@tonic-gate pass1_done:
15720Sstevel@tonic-gate cpi->cpi_pass = 1;
15730Sstevel@tonic-gate }
15740Sstevel@tonic-gate
15750Sstevel@tonic-gate /*
15760Sstevel@tonic-gate * Make copies of the cpuid table entries we depend on, in
15770Sstevel@tonic-gate * part for ease of parsing now, in part so that we have only
15780Sstevel@tonic-gate * one place to correct any of it, in part for ease of
15790Sstevel@tonic-gate * later export to userland, and in part so we can look at
15800Sstevel@tonic-gate * this stuff in a crash dump.
15810Sstevel@tonic-gate */
15820Sstevel@tonic-gate
15830Sstevel@tonic-gate /*ARGSUSED*/
15840Sstevel@tonic-gate void
cpuid_pass2(cpu_t * cpu)15850Sstevel@tonic-gate cpuid_pass2(cpu_t *cpu)
15860Sstevel@tonic-gate {
15870Sstevel@tonic-gate uint_t n, nmax;
15880Sstevel@tonic-gate int i;
15891228Sandrei struct cpuid_regs *cp;
15900Sstevel@tonic-gate uint8_t *dp;
15910Sstevel@tonic-gate uint32_t *iptr;
15920Sstevel@tonic-gate struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
15930Sstevel@tonic-gate
15940Sstevel@tonic-gate ASSERT(cpi->cpi_pass == 1);
15950Sstevel@tonic-gate
15960Sstevel@tonic-gate if (cpi->cpi_maxeax < 1)
15970Sstevel@tonic-gate goto pass2_done;
15980Sstevel@tonic-gate
15990Sstevel@tonic-gate if ((nmax = cpi->cpi_maxeax + 1) > NMAX_CPI_STD)
16000Sstevel@tonic-gate nmax = NMAX_CPI_STD;
16010Sstevel@tonic-gate /*
16020Sstevel@tonic-gate * (We already handled n == 0 and n == 1 in pass 1)
16030Sstevel@tonic-gate */
16040Sstevel@tonic-gate for (n = 2, cp = &cpi->cpi_std[2]; n < nmax; n++, cp++) {
16051228Sandrei cp->cp_eax = n;
16064606Sesaxe
16074606Sesaxe /*
16084606Sesaxe * CPUID function 4 expects %ecx to be initialized
16094606Sesaxe * with an index which indicates which cache to return
16104606Sesaxe * information about. The OS is expected to call function 4
16114606Sesaxe * with %ecx set to 0, 1, 2, ... until it returns with
16124606Sesaxe * EAX[4:0] set to 0, which indicates there are no more
16134606Sesaxe * caches.
16144606Sesaxe *
16154606Sesaxe * Here, populate cpi_std[4] with the information returned by
16164606Sesaxe * function 4 when %ecx == 0, and do the rest in cpuid_pass3()
16174606Sesaxe * when dynamic memory allocation becomes available.
16184606Sesaxe *
16194606Sesaxe * Note: we need to explicitly initialize %ecx here, since
16204606Sesaxe * function 4 may have been previously invoked.
16214606Sesaxe */
16224606Sesaxe if (n == 4)
16234606Sesaxe cp->cp_ecx = 0;
16244606Sesaxe
16251228Sandrei (void) __cpuid_insn(cp);
16263446Smrj platform_cpuid_mangle(cpi->cpi_vendor, n, cp);
16270Sstevel@tonic-gate switch (n) {
16280Sstevel@tonic-gate case 2:
16290Sstevel@tonic-gate /*
16300Sstevel@tonic-gate * "the lower 8 bits of the %eax register
16310Sstevel@tonic-gate * contain a value that identifies the number
16320Sstevel@tonic-gate * of times the cpuid [instruction] has to be
16330Sstevel@tonic-gate * executed to obtain a complete image of the
16340Sstevel@tonic-gate * processor's caching systems."
16350Sstevel@tonic-gate *
16360Sstevel@tonic-gate * How *do* they make this stuff up?
16370Sstevel@tonic-gate */
16380Sstevel@tonic-gate cpi->cpi_ncache = sizeof (*cp) *
16390Sstevel@tonic-gate BITX(cp->cp_eax, 7, 0);
16400Sstevel@tonic-gate if (cpi->cpi_ncache == 0)
16410Sstevel@tonic-gate break;
16420Sstevel@tonic-gate cpi->cpi_ncache--; /* skip count byte */
16430Sstevel@tonic-gate
16440Sstevel@tonic-gate /*
16450Sstevel@tonic-gate * Well, for now, rather than attempt to implement
16460Sstevel@tonic-gate * this slightly dubious algorithm, we just look
16470Sstevel@tonic-gate * at the first 15 ..
16480Sstevel@tonic-gate */
16490Sstevel@tonic-gate if (cpi->cpi_ncache > (sizeof (*cp) - 1))
16500Sstevel@tonic-gate cpi->cpi_ncache = sizeof (*cp) - 1;
16510Sstevel@tonic-gate
16520Sstevel@tonic-gate dp = cpi->cpi_cacheinfo;
16530Sstevel@tonic-gate if (BITX(cp->cp_eax, 31, 31) == 0) {
16540Sstevel@tonic-gate uint8_t *p = (void *)&cp->cp_eax;
16556317Skk208521 for (i = 1; i < 4; i++)
16560Sstevel@tonic-gate if (p[i] != 0)
16570Sstevel@tonic-gate *dp++ = p[i];
16580Sstevel@tonic-gate }
16590Sstevel@tonic-gate if (BITX(cp->cp_ebx, 31, 31) == 0) {
16600Sstevel@tonic-gate uint8_t *p = (void *)&cp->cp_ebx;
16610Sstevel@tonic-gate for (i = 0; i < 4; i++)
16620Sstevel@tonic-gate if (p[i] != 0)
16630Sstevel@tonic-gate *dp++ = p[i];
16640Sstevel@tonic-gate }
16650Sstevel@tonic-gate if (BITX(cp->cp_ecx, 31, 31) == 0) {
16660Sstevel@tonic-gate uint8_t *p = (void *)&cp->cp_ecx;
16670Sstevel@tonic-gate for (i = 0; i < 4; i++)
16680Sstevel@tonic-gate if (p[i] != 0)
16690Sstevel@tonic-gate *dp++ = p[i];
16700Sstevel@tonic-gate }
16710Sstevel@tonic-gate if (BITX(cp->cp_edx, 31, 31) == 0) {
16720Sstevel@tonic-gate uint8_t *p = (void *)&cp->cp_edx;
16730Sstevel@tonic-gate for (i = 0; i < 4; i++)
16740Sstevel@tonic-gate if (p[i] != 0)
16750Sstevel@tonic-gate *dp++ = p[i];
16760Sstevel@tonic-gate }
16770Sstevel@tonic-gate break;
16784481Sbholler
16790Sstevel@tonic-gate case 3: /* Processor serial number, if PSN supported */
16804481Sbholler break;
16814481Sbholler
16820Sstevel@tonic-gate case 4: /* Deterministic cache parameters */
16834481Sbholler break;
16844481Sbholler
16850Sstevel@tonic-gate case 5: /* Monitor/Mwait parameters */
16865045Sbholler {
16875045Sbholler size_t mwait_size;
16884481Sbholler
16894481Sbholler /*
16904481Sbholler * check cpi_mwait.support which was set in cpuid_pass1
16914481Sbholler */
16924481Sbholler if (!(cpi->cpi_mwait.support & MWAIT_SUPPORT))
16934481Sbholler break;
16944481Sbholler
16955045Sbholler /*
16965045Sbholler * Protect ourself from insane mwait line size.
16975045Sbholler * Workaround for incomplete hardware emulator(s).
16985045Sbholler */
16995045Sbholler mwait_size = (size_t)MWAIT_SIZE_MAX(cpi);
17005045Sbholler if (mwait_size < sizeof (uint32_t) ||
17015045Sbholler !ISP2(mwait_size)) {
17025045Sbholler #if DEBUG
17035045Sbholler cmn_err(CE_NOTE, "Cannot handle cpu %d mwait "
17047798SSaurabh.Mishra@Sun.COM "size %ld", cpu->cpu_id, (long)mwait_size);
17055045Sbholler #endif
17065045Sbholler break;
17075045Sbholler }
17085045Sbholler
17094481Sbholler cpi->cpi_mwait.mon_min = (size_t)MWAIT_SIZE_MIN(cpi);
17105045Sbholler cpi->cpi_mwait.mon_max = mwait_size;
17114481Sbholler if (MWAIT_EXTENSION(cpi)) {
17124481Sbholler cpi->cpi_mwait.support |= MWAIT_EXTENSIONS;
17134481Sbholler if (MWAIT_INT_ENABLE(cpi))
17144481Sbholler cpi->cpi_mwait.support |=
17154481Sbholler MWAIT_ECX_INT_ENABLE;
17164481Sbholler }
17174481Sbholler break;
17185045Sbholler }
17190Sstevel@tonic-gate default:
17200Sstevel@tonic-gate break;
17210Sstevel@tonic-gate }
17220Sstevel@tonic-gate }
17230Sstevel@tonic-gate
17247282Smishra if (cpi->cpi_maxeax >= 0xB && cpi->cpi_vendor == X86_VENDOR_Intel) {
17257798SSaurabh.Mishra@Sun.COM struct cpuid_regs regs;
17267798SSaurabh.Mishra@Sun.COM
17277798SSaurabh.Mishra@Sun.COM cp = ®s;
17287282Smishra cp->cp_eax = 0xB;
17297798SSaurabh.Mishra@Sun.COM cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
17307282Smishra
17317282Smishra (void) __cpuid_insn(cp);
17327282Smishra
17337282Smishra /*
17347282Smishra * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
17357282Smishra * indicates that the extended topology enumeration leaf is
17367282Smishra * available.
17377282Smishra */
17387282Smishra if (cp->cp_ebx) {
17397282Smishra uint32_t x2apic_id;
17407282Smishra uint_t coreid_shift = 0;
17417282Smishra uint_t ncpu_per_core = 1;
17427282Smishra uint_t chipid_shift = 0;
17437282Smishra uint_t ncpu_per_chip = 1;
17447282Smishra uint_t i;
17457282Smishra uint_t level;
17467282Smishra
17477282Smishra for (i = 0; i < CPI_FNB_ECX_MAX; i++) {
17487282Smishra cp->cp_eax = 0xB;
17497282Smishra cp->cp_ecx = i;
17507282Smishra
17517282Smishra (void) __cpuid_insn(cp);
17527282Smishra level = CPI_CPU_LEVEL_TYPE(cp);
17537282Smishra
17547282Smishra if (level == 1) {
17557282Smishra x2apic_id = cp->cp_edx;
17567282Smishra coreid_shift = BITX(cp->cp_eax, 4, 0);
17577282Smishra ncpu_per_core = BITX(cp->cp_ebx, 15, 0);
17587282Smishra } else if (level == 2) {
17597282Smishra x2apic_id = cp->cp_edx;
17607282Smishra chipid_shift = BITX(cp->cp_eax, 4, 0);
17617282Smishra ncpu_per_chip = BITX(cp->cp_ebx, 15, 0);
17627282Smishra }
17637282Smishra }
17647282Smishra
17657282Smishra cpi->cpi_apicid = x2apic_id;
17667282Smishra cpi->cpi_ncpu_per_chip = ncpu_per_chip;
17677282Smishra cpi->cpi_ncore_per_chip = ncpu_per_chip /
17687282Smishra ncpu_per_core;
17697282Smishra cpi->cpi_chipid = x2apic_id >> chipid_shift;
17707282Smishra cpi->cpi_clogid = x2apic_id & ((1 << chipid_shift) - 1);
17717282Smishra cpi->cpi_coreid = x2apic_id >> coreid_shift;
17727282Smishra cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift;
17737282Smishra }
17747798SSaurabh.Mishra@Sun.COM
17757798SSaurabh.Mishra@Sun.COM /* Make cp NULL so that we don't stumble on others */
17767798SSaurabh.Mishra@Sun.COM cp = NULL;
17777282Smishra }
17787282Smishra
177913134Skuriakose.kuruvilla@oracle.com /*
178013134Skuriakose.kuruvilla@oracle.com * XSAVE enumeration
178113134Skuriakose.kuruvilla@oracle.com */
178213134Skuriakose.kuruvilla@oracle.com if (cpi->cpi_maxeax >= 0xD && cpi->cpi_vendor == X86_VENDOR_Intel) {
178313134Skuriakose.kuruvilla@oracle.com struct cpuid_regs regs;
178413134Skuriakose.kuruvilla@oracle.com boolean_t cpuid_d_valid = B_TRUE;
178513134Skuriakose.kuruvilla@oracle.com
178613134Skuriakose.kuruvilla@oracle.com cp = ®s;
178713134Skuriakose.kuruvilla@oracle.com cp->cp_eax = 0xD;
178813134Skuriakose.kuruvilla@oracle.com cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
178913134Skuriakose.kuruvilla@oracle.com
179013134Skuriakose.kuruvilla@oracle.com (void) __cpuid_insn(cp);
179113134Skuriakose.kuruvilla@oracle.com
179213134Skuriakose.kuruvilla@oracle.com /*
179313134Skuriakose.kuruvilla@oracle.com * Sanity checks for debug
179413134Skuriakose.kuruvilla@oracle.com */
179513134Skuriakose.kuruvilla@oracle.com if ((cp->cp_eax & XFEATURE_LEGACY_FP) == 0 ||
179613134Skuriakose.kuruvilla@oracle.com (cp->cp_eax & XFEATURE_SSE) == 0) {
179713134Skuriakose.kuruvilla@oracle.com cpuid_d_valid = B_FALSE;
179813134Skuriakose.kuruvilla@oracle.com }
179913134Skuriakose.kuruvilla@oracle.com
180013134Skuriakose.kuruvilla@oracle.com cpi->cpi_xsave.xsav_hw_features_low = cp->cp_eax;
180113134Skuriakose.kuruvilla@oracle.com cpi->cpi_xsave.xsav_hw_features_high = cp->cp_edx;
180213134Skuriakose.kuruvilla@oracle.com cpi->cpi_xsave.xsav_max_size = cp->cp_ecx;
180313134Skuriakose.kuruvilla@oracle.com
180413134Skuriakose.kuruvilla@oracle.com /*
180513134Skuriakose.kuruvilla@oracle.com * If the hw supports AVX, get the size and offset in the save
180613134Skuriakose.kuruvilla@oracle.com * area for the ymm state.
180713134Skuriakose.kuruvilla@oracle.com */
180813134Skuriakose.kuruvilla@oracle.com if (cpi->cpi_xsave.xsav_hw_features_low & XFEATURE_AVX) {
180913134Skuriakose.kuruvilla@oracle.com cp->cp_eax = 0xD;
181013134Skuriakose.kuruvilla@oracle.com cp->cp_ecx = 2;
181113134Skuriakose.kuruvilla@oracle.com cp->cp_edx = cp->cp_ebx = 0;
181213134Skuriakose.kuruvilla@oracle.com
181313134Skuriakose.kuruvilla@oracle.com (void) __cpuid_insn(cp);
181413134Skuriakose.kuruvilla@oracle.com
181513134Skuriakose.kuruvilla@oracle.com if (cp->cp_ebx != CPUID_LEAFD_2_YMM_OFFSET ||
181613134Skuriakose.kuruvilla@oracle.com cp->cp_eax != CPUID_LEAFD_2_YMM_SIZE) {
181713134Skuriakose.kuruvilla@oracle.com cpuid_d_valid = B_FALSE;
181813134Skuriakose.kuruvilla@oracle.com }
181913134Skuriakose.kuruvilla@oracle.com
182013134Skuriakose.kuruvilla@oracle.com cpi->cpi_xsave.ymm_size = cp->cp_eax;
182113134Skuriakose.kuruvilla@oracle.com cpi->cpi_xsave.ymm_offset = cp->cp_ebx;
182213134Skuriakose.kuruvilla@oracle.com }
182313134Skuriakose.kuruvilla@oracle.com
182413134Skuriakose.kuruvilla@oracle.com if (is_x86_feature(x86_featureset, X86FSET_XSAVE)) {
182513134Skuriakose.kuruvilla@oracle.com xsave_state_size = 0;
182613134Skuriakose.kuruvilla@oracle.com } else if (cpuid_d_valid) {
182713134Skuriakose.kuruvilla@oracle.com xsave_state_size = cpi->cpi_xsave.xsav_max_size;
182813134Skuriakose.kuruvilla@oracle.com } else {
182913134Skuriakose.kuruvilla@oracle.com /* Broken CPUID 0xD, probably in HVM */
183013134Skuriakose.kuruvilla@oracle.com cmn_err(CE_WARN, "cpu%d: CPUID.0xD returns invalid "
183113134Skuriakose.kuruvilla@oracle.com "value: hw_low = %d, hw_high = %d, xsave_size = %d"
183213134Skuriakose.kuruvilla@oracle.com ", ymm_size = %d, ymm_offset = %d\n",
183313134Skuriakose.kuruvilla@oracle.com cpu->cpu_id, cpi->cpi_xsave.xsav_hw_features_low,
183413134Skuriakose.kuruvilla@oracle.com cpi->cpi_xsave.xsav_hw_features_high,
183513134Skuriakose.kuruvilla@oracle.com (int)cpi->cpi_xsave.xsav_max_size,
183613134Skuriakose.kuruvilla@oracle.com (int)cpi->cpi_xsave.ymm_size,
183713134Skuriakose.kuruvilla@oracle.com (int)cpi->cpi_xsave.ymm_offset);
183813134Skuriakose.kuruvilla@oracle.com
183913134Skuriakose.kuruvilla@oracle.com if (xsave_state_size != 0) {
184013134Skuriakose.kuruvilla@oracle.com /*
184113134Skuriakose.kuruvilla@oracle.com * This must be a non-boot CPU. We cannot
184213134Skuriakose.kuruvilla@oracle.com * continue, because boot cpu has already
184313134Skuriakose.kuruvilla@oracle.com * enabled XSAVE.
184413134Skuriakose.kuruvilla@oracle.com */
184513134Skuriakose.kuruvilla@oracle.com ASSERT(cpu->cpu_id != 0);
184613134Skuriakose.kuruvilla@oracle.com cmn_err(CE_PANIC, "cpu%d: we have already "
184713134Skuriakose.kuruvilla@oracle.com "enabled XSAVE on boot cpu, cannot "
184813134Skuriakose.kuruvilla@oracle.com "continue.", cpu->cpu_id);
184913134Skuriakose.kuruvilla@oracle.com } else {
185013134Skuriakose.kuruvilla@oracle.com /*
185113134Skuriakose.kuruvilla@oracle.com * Must be from boot CPU, OK to disable XSAVE.
185213134Skuriakose.kuruvilla@oracle.com */
185313134Skuriakose.kuruvilla@oracle.com ASSERT(cpu->cpu_id == 0);
185413134Skuriakose.kuruvilla@oracle.com remove_x86_feature(x86_featureset,
185513134Skuriakose.kuruvilla@oracle.com X86FSET_XSAVE);
185613134Skuriakose.kuruvilla@oracle.com remove_x86_feature(x86_featureset, X86FSET_AVX);
185713134Skuriakose.kuruvilla@oracle.com CPI_FEATURES_ECX(cpi) &= ~CPUID_INTC_ECX_XSAVE;
185813134Skuriakose.kuruvilla@oracle.com CPI_FEATURES_ECX(cpi) &= ~CPUID_INTC_ECX_AVX;
185913134Skuriakose.kuruvilla@oracle.com xsave_force_disable = B_TRUE;
186013134Skuriakose.kuruvilla@oracle.com }
186113134Skuriakose.kuruvilla@oracle.com }
186213134Skuriakose.kuruvilla@oracle.com }
186313134Skuriakose.kuruvilla@oracle.com
186413134Skuriakose.kuruvilla@oracle.com
18650Sstevel@tonic-gate if ((cpi->cpi_xmaxeax & 0x80000000) == 0)
18660Sstevel@tonic-gate goto pass2_done;
18670Sstevel@tonic-gate
18680Sstevel@tonic-gate if ((nmax = cpi->cpi_xmaxeax - 0x80000000 + 1) > NMAX_CPI_EXTD)
18690Sstevel@tonic-gate nmax = NMAX_CPI_EXTD;
18700Sstevel@tonic-gate /*
18710Sstevel@tonic-gate * Copy the extended properties, fixing them as we go.
18720Sstevel@tonic-gate * (We already handled n == 0 and n == 1 in pass 1)
18730Sstevel@tonic-gate */
18740Sstevel@tonic-gate iptr = (void *)cpi->cpi_brandstr;
18750Sstevel@tonic-gate for (n = 2, cp = &cpi->cpi_extd[2]; n < nmax; cp++, n++) {
18761228Sandrei cp->cp_eax = 0x80000000 + n;
18771228Sandrei (void) __cpuid_insn(cp);
18783446Smrj platform_cpuid_mangle(cpi->cpi_vendor, 0x80000000 + n, cp);
18790Sstevel@tonic-gate switch (n) {
18800Sstevel@tonic-gate case 2:
18810Sstevel@tonic-gate case 3:
18820Sstevel@tonic-gate case 4:
18830Sstevel@tonic-gate /*
18840Sstevel@tonic-gate * Extract the brand string
18850Sstevel@tonic-gate */
18860Sstevel@tonic-gate *iptr++ = cp->cp_eax;
18870Sstevel@tonic-gate *iptr++ = cp->cp_ebx;
18880Sstevel@tonic-gate *iptr++ = cp->cp_ecx;
18890Sstevel@tonic-gate *iptr++ = cp->cp_edx;
18900Sstevel@tonic-gate break;
18910Sstevel@tonic-gate case 5:
18920Sstevel@tonic-gate switch (cpi->cpi_vendor) {
18930Sstevel@tonic-gate case X86_VENDOR_AMD:
18940Sstevel@tonic-gate /*
18950Sstevel@tonic-gate * The Athlon and Duron were the first
18960Sstevel@tonic-gate * parts to report the sizes of the
18970Sstevel@tonic-gate * TLB for large pages. Before then,
18980Sstevel@tonic-gate * we don't trust the data.
18990Sstevel@tonic-gate */
19000Sstevel@tonic-gate if (cpi->cpi_family < 6 ||
19010Sstevel@tonic-gate (cpi->cpi_family == 6 &&
19020Sstevel@tonic-gate cpi->cpi_model < 1))
19030Sstevel@tonic-gate cp->cp_eax = 0;
19040Sstevel@tonic-gate break;
19050Sstevel@tonic-gate default:
19060Sstevel@tonic-gate break;
19070Sstevel@tonic-gate }
19080Sstevel@tonic-gate break;
19090Sstevel@tonic-gate case 6:
19100Sstevel@tonic-gate switch (cpi->cpi_vendor) {
19110Sstevel@tonic-gate case X86_VENDOR_AMD:
19120Sstevel@tonic-gate /*
19130Sstevel@tonic-gate * The Athlon and Duron were the first
19140Sstevel@tonic-gate * AMD parts with L2 TLB's.
19150Sstevel@tonic-gate * Before then, don't trust the data.
19160Sstevel@tonic-gate */
19170Sstevel@tonic-gate if (cpi->cpi_family < 6 ||
19180Sstevel@tonic-gate cpi->cpi_family == 6 &&
19190Sstevel@tonic-gate cpi->cpi_model < 1)
19200Sstevel@tonic-gate cp->cp_eax = cp->cp_ebx = 0;
19210Sstevel@tonic-gate /*
19220Sstevel@tonic-gate * AMD Duron rev A0 reports L2
19230Sstevel@tonic-gate * cache size incorrectly as 1K
19240Sstevel@tonic-gate * when it is really 64K
19250Sstevel@tonic-gate */
19260Sstevel@tonic-gate if (cpi->cpi_family == 6 &&
19270Sstevel@tonic-gate cpi->cpi_model == 3 &&
19280Sstevel@tonic-gate cpi->cpi_step == 0) {
19290Sstevel@tonic-gate cp->cp_ecx &= 0xffff;
19300Sstevel@tonic-gate cp->cp_ecx |= 0x400000;
19310Sstevel@tonic-gate }
19320Sstevel@tonic-gate break;
19330Sstevel@tonic-gate case X86_VENDOR_Cyrix: /* VIA C3 */
19340Sstevel@tonic-gate /*
19350Sstevel@tonic-gate * VIA C3 processors are a bit messed
19360Sstevel@tonic-gate * up w.r.t. encoding cache sizes in %ecx
19370Sstevel@tonic-gate */
19380Sstevel@tonic-gate if (cpi->cpi_family != 6)
19390Sstevel@tonic-gate break;
19400Sstevel@tonic-gate /*
19410Sstevel@tonic-gate * model 7 and 8 were incorrectly encoded
19420Sstevel@tonic-gate *
19430Sstevel@tonic-gate * xxx is model 8 really broken?
19440Sstevel@tonic-gate */
19450Sstevel@tonic-gate if (cpi->cpi_model == 7 ||
19460Sstevel@tonic-gate cpi->cpi_model == 8)
19470Sstevel@tonic-gate cp->cp_ecx =
19480Sstevel@tonic-gate BITX(cp->cp_ecx, 31, 24) << 16 |
19490Sstevel@tonic-gate BITX(cp->cp_ecx, 23, 16) << 12 |
19500Sstevel@tonic-gate BITX(cp->cp_ecx, 15, 8) << 8 |
19510Sstevel@tonic-gate BITX(cp->cp_ecx, 7, 0);
19520Sstevel@tonic-gate /*
19530Sstevel@tonic-gate * model 9 stepping 1 has wrong associativity
19540Sstevel@tonic-gate */
19550Sstevel@tonic-gate if (cpi->cpi_model == 9 && cpi->cpi_step == 1)
19560Sstevel@tonic-gate cp->cp_ecx |= 8 << 12;
19570Sstevel@tonic-gate break;
19580Sstevel@tonic-gate case X86_VENDOR_Intel:
19590Sstevel@tonic-gate /*
19600Sstevel@tonic-gate * Extended L2 Cache features function.
19610Sstevel@tonic-gate * First appeared on Prescott.
19620Sstevel@tonic-gate */
19630Sstevel@tonic-gate default:
19640Sstevel@tonic-gate break;
19650Sstevel@tonic-gate }
19660Sstevel@tonic-gate break;
19670Sstevel@tonic-gate default:
19680Sstevel@tonic-gate break;
19690Sstevel@tonic-gate }
19700Sstevel@tonic-gate }
19710Sstevel@tonic-gate
19720Sstevel@tonic-gate pass2_done:
19730Sstevel@tonic-gate cpi->cpi_pass = 2;
19740Sstevel@tonic-gate }
19750Sstevel@tonic-gate
19760Sstevel@tonic-gate static const char *
intel_cpubrand(const struct cpuid_info * cpi)19770Sstevel@tonic-gate intel_cpubrand(const struct cpuid_info *cpi)
19780Sstevel@tonic-gate {
19790Sstevel@tonic-gate int i;
19800Sstevel@tonic-gate
198112826Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
19820Sstevel@tonic-gate cpi->cpi_maxeax < 1 || cpi->cpi_family < 5)
19830Sstevel@tonic-gate return ("i486");
19840Sstevel@tonic-gate
19850Sstevel@tonic-gate switch (cpi->cpi_family) {
19860Sstevel@tonic-gate case 5:
19870Sstevel@tonic-gate return ("Intel Pentium(r)");
19880Sstevel@tonic-gate case 6:
19890Sstevel@tonic-gate switch (cpi->cpi_model) {
19900Sstevel@tonic-gate uint_t celeron, xeon;
19911228Sandrei const struct cpuid_regs *cp;
19920Sstevel@tonic-gate case 0:
19930Sstevel@tonic-gate case 1:
19940Sstevel@tonic-gate case 2:
19950Sstevel@tonic-gate return ("Intel Pentium(r) Pro");
19960Sstevel@tonic-gate case 3:
19970Sstevel@tonic-gate case 4:
19980Sstevel@tonic-gate return ("Intel Pentium(r) II");
19990Sstevel@tonic-gate case 6:
20000Sstevel@tonic-gate return ("Intel Celeron(r)");
20010Sstevel@tonic-gate case 5:
20020Sstevel@tonic-gate case 7:
20030Sstevel@tonic-gate celeron = xeon = 0;
20040Sstevel@tonic-gate cp = &cpi->cpi_std[2]; /* cache info */
20050Sstevel@tonic-gate
20066317Skk208521 for (i = 1; i < 4; i++) {
20070Sstevel@tonic-gate uint_t tmp;
20080Sstevel@tonic-gate
20090Sstevel@tonic-gate tmp = (cp->cp_eax >> (8 * i)) & 0xff;
20100Sstevel@tonic-gate if (tmp == 0x40)
20110Sstevel@tonic-gate celeron++;
20120Sstevel@tonic-gate if (tmp >= 0x44 && tmp <= 0x45)
20130Sstevel@tonic-gate xeon++;
20140Sstevel@tonic-gate }
20150Sstevel@tonic-gate
20160Sstevel@tonic-gate for (i = 0; i < 2; i++) {
20170Sstevel@tonic-gate uint_t tmp;
20180Sstevel@tonic-gate
20190Sstevel@tonic-gate tmp = (cp->cp_ebx >> (8 * i)) & 0xff;
20200Sstevel@tonic-gate if (tmp == 0x40)
20210Sstevel@tonic-gate celeron++;
20220Sstevel@tonic-gate else if (tmp >= 0x44 && tmp <= 0x45)
20230Sstevel@tonic-gate xeon++;
20240Sstevel@tonic-gate }
20250Sstevel@tonic-gate
20260Sstevel@tonic-gate for (i = 0; i < 4; i++) {
20270Sstevel@tonic-gate uint_t tmp;
20280Sstevel@tonic-gate
20290Sstevel@tonic-gate tmp = (cp->cp_ecx >> (8 * i)) & 0xff;
20300Sstevel@tonic-gate if (tmp == 0x40)
20310Sstevel@tonic-gate celeron++;
20320Sstevel@tonic-gate else if (tmp >= 0x44 && tmp <= 0x45)
20330Sstevel@tonic-gate xeon++;
20340Sstevel@tonic-gate }
20350Sstevel@tonic-gate
20360Sstevel@tonic-gate for (i = 0; i < 4; i++) {
20370Sstevel@tonic-gate uint_t tmp;
20380Sstevel@tonic-gate
20390Sstevel@tonic-gate tmp = (cp->cp_edx >> (8 * i)) & 0xff;
20400Sstevel@tonic-gate if (tmp == 0x40)
20410Sstevel@tonic-gate celeron++;
20420Sstevel@tonic-gate else if (tmp >= 0x44 && tmp <= 0x45)
20430Sstevel@tonic-gate xeon++;
20440Sstevel@tonic-gate }
20450Sstevel@tonic-gate
20460Sstevel@tonic-gate if (celeron)
20470Sstevel@tonic-gate return ("Intel Celeron(r)");
20480Sstevel@tonic-gate if (xeon)
20490Sstevel@tonic-gate return (cpi->cpi_model == 5 ?
20500Sstevel@tonic-gate "Intel Pentium(r) II Xeon(tm)" :
20510Sstevel@tonic-gate "Intel Pentium(r) III Xeon(tm)");
20520Sstevel@tonic-gate return (cpi->cpi_model == 5 ?
20530Sstevel@tonic-gate "Intel Pentium(r) II or Pentium(r) II Xeon(tm)" :
20540Sstevel@tonic-gate "Intel Pentium(r) III or Pentium(r) III Xeon(tm)");
20550Sstevel@tonic-gate default:
20560Sstevel@tonic-gate break;
20570Sstevel@tonic-gate }
20580Sstevel@tonic-gate default:
20590Sstevel@tonic-gate break;
20600Sstevel@tonic-gate }
20610Sstevel@tonic-gate
20621975Sdmick /* BrandID is present if the field is nonzero */
20631975Sdmick if (cpi->cpi_brandid != 0) {
20640Sstevel@tonic-gate static const struct {
20650Sstevel@tonic-gate uint_t bt_bid;
20660Sstevel@tonic-gate const char *bt_str;
20670Sstevel@tonic-gate } brand_tbl[] = {
20680Sstevel@tonic-gate { 0x1, "Intel(r) Celeron(r)" },
20690Sstevel@tonic-gate { 0x2, "Intel(r) Pentium(r) III" },
20700Sstevel@tonic-gate { 0x3, "Intel(r) Pentium(r) III Xeon(tm)" },
20710Sstevel@tonic-gate { 0x4, "Intel(r) Pentium(r) III" },
20720Sstevel@tonic-gate { 0x6, "Mobile Intel(r) Pentium(r) III" },
20730Sstevel@tonic-gate { 0x7, "Mobile Intel(r) Celeron(r)" },
20740Sstevel@tonic-gate { 0x8, "Intel(r) Pentium(r) 4" },
20750Sstevel@tonic-gate { 0x9, "Intel(r) Pentium(r) 4" },
20760Sstevel@tonic-gate { 0xa, "Intel(r) Celeron(r)" },
20770Sstevel@tonic-gate { 0xb, "Intel(r) Xeon(tm)" },
20780Sstevel@tonic-gate { 0xc, "Intel(r) Xeon(tm) MP" },
20790Sstevel@tonic-gate { 0xe, "Mobile Intel(r) Pentium(r) 4" },
20801975Sdmick { 0xf, "Mobile Intel(r) Celeron(r)" },
20811975Sdmick { 0x11, "Mobile Genuine Intel(r)" },
20821975Sdmick { 0x12, "Intel(r) Celeron(r) M" },
20831975Sdmick { 0x13, "Mobile Intel(r) Celeron(r)" },
20841975Sdmick { 0x14, "Intel(r) Celeron(r)" },
20851975Sdmick { 0x15, "Mobile Genuine Intel(r)" },
20861975Sdmick { 0x16, "Intel(r) Pentium(r) M" },
20871975Sdmick { 0x17, "Mobile Intel(r) Celeron(r)" }
20880Sstevel@tonic-gate };
20890Sstevel@tonic-gate uint_t btblmax = sizeof (brand_tbl) / sizeof (brand_tbl[0]);
20900Sstevel@tonic-gate uint_t sgn;
20910Sstevel@tonic-gate
20920Sstevel@tonic-gate sgn = (cpi->cpi_family << 8) |
20930Sstevel@tonic-gate (cpi->cpi_model << 4) | cpi->cpi_step;
20940Sstevel@tonic-gate
20950Sstevel@tonic-gate for (i = 0; i < btblmax; i++)
20960Sstevel@tonic-gate if (brand_tbl[i].bt_bid == cpi->cpi_brandid)
20970Sstevel@tonic-gate break;
20980Sstevel@tonic-gate if (i < btblmax) {
20990Sstevel@tonic-gate if (sgn == 0x6b1 && cpi->cpi_brandid == 3)
21000Sstevel@tonic-gate return ("Intel(r) Celeron(r)");
21010Sstevel@tonic-gate if (sgn < 0xf13 && cpi->cpi_brandid == 0xb)
21020Sstevel@tonic-gate return ("Intel(r) Xeon(tm) MP");
21030Sstevel@tonic-gate if (sgn < 0xf13 && cpi->cpi_brandid == 0xe)
21040Sstevel@tonic-gate return ("Intel(r) Xeon(tm)");
21050Sstevel@tonic-gate return (brand_tbl[i].bt_str);
21060Sstevel@tonic-gate }
21070Sstevel@tonic-gate }
21080Sstevel@tonic-gate
21090Sstevel@tonic-gate return (NULL);
21100Sstevel@tonic-gate }
21110Sstevel@tonic-gate
21120Sstevel@tonic-gate static const char *
amd_cpubrand(const struct cpuid_info * cpi)21130Sstevel@tonic-gate amd_cpubrand(const struct cpuid_info *cpi)
21140Sstevel@tonic-gate {
211512826Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
21160Sstevel@tonic-gate cpi->cpi_maxeax < 1 || cpi->cpi_family < 5)
21170Sstevel@tonic-gate return ("i486 compatible");
21180Sstevel@tonic-gate
21190Sstevel@tonic-gate switch (cpi->cpi_family) {
21200Sstevel@tonic-gate case 5:
21210Sstevel@tonic-gate switch (cpi->cpi_model) {
21220Sstevel@tonic-gate case 0:
21230Sstevel@tonic-gate case 1:
21240Sstevel@tonic-gate case 2:
21250Sstevel@tonic-gate case 3:
21260Sstevel@tonic-gate case 4:
21270Sstevel@tonic-gate case 5:
21280Sstevel@tonic-gate return ("AMD-K5(r)");
21290Sstevel@tonic-gate case 6:
21300Sstevel@tonic-gate case 7:
21310Sstevel@tonic-gate return ("AMD-K6(r)");
21320Sstevel@tonic-gate case 8:
21330Sstevel@tonic-gate return ("AMD-K6(r)-2");
21340Sstevel@tonic-gate case 9:
21350Sstevel@tonic-gate return ("AMD-K6(r)-III");
21360Sstevel@tonic-gate default:
21370Sstevel@tonic-gate return ("AMD (family 5)");
21380Sstevel@tonic-gate }
21390Sstevel@tonic-gate case 6:
21400Sstevel@tonic-gate switch (cpi->cpi_model) {
21410Sstevel@tonic-gate case 1:
21420Sstevel@tonic-gate return ("AMD-K7(tm)");
21430Sstevel@tonic-gate case 0:
21440Sstevel@tonic-gate case 2:
21450Sstevel@tonic-gate case 4:
21460Sstevel@tonic-gate return ("AMD Athlon(tm)");
21470Sstevel@tonic-gate case 3:
21480Sstevel@tonic-gate case 7:
21490Sstevel@tonic-gate return ("AMD Duron(tm)");
21500Sstevel@tonic-gate case 6:
21510Sstevel@tonic-gate case 8:
21520Sstevel@tonic-gate case 10:
21530Sstevel@tonic-gate /*
21540Sstevel@tonic-gate * Use the L2 cache size to distinguish
21550Sstevel@tonic-gate */
21560Sstevel@tonic-gate return ((cpi->cpi_extd[6].cp_ecx >> 16) >= 256 ?
21570Sstevel@tonic-gate "AMD Athlon(tm)" : "AMD Duron(tm)");
21580Sstevel@tonic-gate default:
21590Sstevel@tonic-gate return ("AMD (family 6)");
21600Sstevel@tonic-gate }
21610Sstevel@tonic-gate default:
21620Sstevel@tonic-gate break;
21630Sstevel@tonic-gate }
21640Sstevel@tonic-gate
21650Sstevel@tonic-gate if (cpi->cpi_family == 0xf && cpi->cpi_model == 5 &&
21660Sstevel@tonic-gate cpi->cpi_brandid != 0) {
21670Sstevel@tonic-gate switch (BITX(cpi->cpi_brandid, 7, 5)) {
21680Sstevel@tonic-gate case 3:
21690Sstevel@tonic-gate return ("AMD Opteron(tm) UP 1xx");
21700Sstevel@tonic-gate case 4:
21710Sstevel@tonic-gate return ("AMD Opteron(tm) DP 2xx");
21720Sstevel@tonic-gate case 5:
21730Sstevel@tonic-gate return ("AMD Opteron(tm) MP 8xx");
21740Sstevel@tonic-gate default:
21750Sstevel@tonic-gate return ("AMD Opteron(tm)");
21760Sstevel@tonic-gate }
21770Sstevel@tonic-gate }
21780Sstevel@tonic-gate
21790Sstevel@tonic-gate return (NULL);
21800Sstevel@tonic-gate }
21810Sstevel@tonic-gate
21820Sstevel@tonic-gate static const char *
cyrix_cpubrand(struct cpuid_info * cpi,uint_t type)21830Sstevel@tonic-gate cyrix_cpubrand(struct cpuid_info *cpi, uint_t type)
21840Sstevel@tonic-gate {
218512826Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
21860Sstevel@tonic-gate cpi->cpi_maxeax < 1 || cpi->cpi_family < 5 ||
21870Sstevel@tonic-gate type == X86_TYPE_CYRIX_486)
21880Sstevel@tonic-gate return ("i486 compatible");
21890Sstevel@tonic-gate
21900Sstevel@tonic-gate switch (type) {
21910Sstevel@tonic-gate case X86_TYPE_CYRIX_6x86:
21920Sstevel@tonic-gate return ("Cyrix 6x86");
21930Sstevel@tonic-gate case X86_TYPE_CYRIX_6x86L:
21940Sstevel@tonic-gate return ("Cyrix 6x86L");
21950Sstevel@tonic-gate case X86_TYPE_CYRIX_6x86MX:
21960Sstevel@tonic-gate return ("Cyrix 6x86MX");
21970Sstevel@tonic-gate case X86_TYPE_CYRIX_GXm:
21980Sstevel@tonic-gate return ("Cyrix GXm");
21990Sstevel@tonic-gate case X86_TYPE_CYRIX_MediaGX:
22000Sstevel@tonic-gate return ("Cyrix MediaGX");
22010Sstevel@tonic-gate case X86_TYPE_CYRIX_MII:
22020Sstevel@tonic-gate return ("Cyrix M2");
22030Sstevel@tonic-gate case X86_TYPE_VIA_CYRIX_III:
22040Sstevel@tonic-gate return ("VIA Cyrix M3");
22050Sstevel@tonic-gate default:
22060Sstevel@tonic-gate /*
22070Sstevel@tonic-gate * Have another wild guess ..
22080Sstevel@tonic-gate */
22090Sstevel@tonic-gate if (cpi->cpi_family == 4 && cpi->cpi_model == 9)
22100Sstevel@tonic-gate return ("Cyrix 5x86");
22110Sstevel@tonic-gate else if (cpi->cpi_family == 5) {
22120Sstevel@tonic-gate switch (cpi->cpi_model) {
22130Sstevel@tonic-gate case 2:
22140Sstevel@tonic-gate return ("Cyrix 6x86"); /* Cyrix M1 */
22150Sstevel@tonic-gate case 4:
22160Sstevel@tonic-gate return ("Cyrix MediaGX");
22170Sstevel@tonic-gate default:
22180Sstevel@tonic-gate break;
22190Sstevel@tonic-gate }
22200Sstevel@tonic-gate } else if (cpi->cpi_family == 6) {
22210Sstevel@tonic-gate switch (cpi->cpi_model) {
22220Sstevel@tonic-gate case 0:
22230Sstevel@tonic-gate return ("Cyrix 6x86MX"); /* Cyrix M2? */
22240Sstevel@tonic-gate case 5:
22250Sstevel@tonic-gate case 6:
22260Sstevel@tonic-gate case 7:
22270Sstevel@tonic-gate case 8:
22280Sstevel@tonic-gate case 9:
22290Sstevel@tonic-gate return ("VIA C3");
22300Sstevel@tonic-gate default:
22310Sstevel@tonic-gate break;
22320Sstevel@tonic-gate }
22330Sstevel@tonic-gate }
22340Sstevel@tonic-gate break;
22350Sstevel@tonic-gate }
22360Sstevel@tonic-gate return (NULL);
22370Sstevel@tonic-gate }
22380Sstevel@tonic-gate
22390Sstevel@tonic-gate /*
22400Sstevel@tonic-gate * This only gets called in the case that the CPU extended
22410Sstevel@tonic-gate * feature brand string (0x80000002, 0x80000003, 0x80000004)
22420Sstevel@tonic-gate * aren't available, or contain null bytes for some reason.
22430Sstevel@tonic-gate */
22440Sstevel@tonic-gate static void
fabricate_brandstr(struct cpuid_info * cpi)22450Sstevel@tonic-gate fabricate_brandstr(struct cpuid_info *cpi)
22460Sstevel@tonic-gate {
22470Sstevel@tonic-gate const char *brand = NULL;
22480Sstevel@tonic-gate
22490Sstevel@tonic-gate switch (cpi->cpi_vendor) {
22500Sstevel@tonic-gate case X86_VENDOR_Intel:
22510Sstevel@tonic-gate brand = intel_cpubrand(cpi);
22520Sstevel@tonic-gate break;
22530Sstevel@tonic-gate case X86_VENDOR_AMD:
22540Sstevel@tonic-gate brand = amd_cpubrand(cpi);
22550Sstevel@tonic-gate break;
22560Sstevel@tonic-gate case X86_VENDOR_Cyrix:
22570Sstevel@tonic-gate brand = cyrix_cpubrand(cpi, x86_type);
22580Sstevel@tonic-gate break;
22590Sstevel@tonic-gate case X86_VENDOR_NexGen:
22600Sstevel@tonic-gate if (cpi->cpi_family == 5 && cpi->cpi_model == 0)
22610Sstevel@tonic-gate brand = "NexGen Nx586";
22620Sstevel@tonic-gate break;
22630Sstevel@tonic-gate case X86_VENDOR_Centaur:
22640Sstevel@tonic-gate if (cpi->cpi_family == 5)
22650Sstevel@tonic-gate switch (cpi->cpi_model) {
22660Sstevel@tonic-gate case 4:
22670Sstevel@tonic-gate brand = "Centaur C6";
22680Sstevel@tonic-gate break;
22690Sstevel@tonic-gate case 8:
22700Sstevel@tonic-gate brand = "Centaur C2";
22710Sstevel@tonic-gate break;
22720Sstevel@tonic-gate case 9:
22730Sstevel@tonic-gate brand = "Centaur C3";
22740Sstevel@tonic-gate break;
22750Sstevel@tonic-gate default:
22760Sstevel@tonic-gate break;
22770Sstevel@tonic-gate }
22780Sstevel@tonic-gate break;
22790Sstevel@tonic-gate case X86_VENDOR_Rise:
22800Sstevel@tonic-gate if (cpi->cpi_family == 5 &&
22810Sstevel@tonic-gate (cpi->cpi_model == 0 || cpi->cpi_model == 2))
22820Sstevel@tonic-gate brand = "Rise mP6";
22830Sstevel@tonic-gate break;
22840Sstevel@tonic-gate case X86_VENDOR_SiS:
22850Sstevel@tonic-gate if (cpi->cpi_family == 5 && cpi->cpi_model == 0)
22860Sstevel@tonic-gate brand = "SiS 55x";
22870Sstevel@tonic-gate break;
22880Sstevel@tonic-gate case X86_VENDOR_TM:
22890Sstevel@tonic-gate if (cpi->cpi_family == 5 && cpi->cpi_model == 4)
22900Sstevel@tonic-gate brand = "Transmeta Crusoe TM3x00 or TM5x00";
22910Sstevel@tonic-gate break;
22920Sstevel@tonic-gate case X86_VENDOR_NSC:
22930Sstevel@tonic-gate case X86_VENDOR_UMC:
22940Sstevel@tonic-gate default:
22950Sstevel@tonic-gate break;
22960Sstevel@tonic-gate }
22970Sstevel@tonic-gate if (brand) {
22980Sstevel@tonic-gate (void) strcpy((char *)cpi->cpi_brandstr, brand);
22990Sstevel@tonic-gate return;
23000Sstevel@tonic-gate }
23010Sstevel@tonic-gate
23020Sstevel@tonic-gate /*
23030Sstevel@tonic-gate * If all else fails ...
23040Sstevel@tonic-gate */
23050Sstevel@tonic-gate (void) snprintf(cpi->cpi_brandstr, sizeof (cpi->cpi_brandstr),
23060Sstevel@tonic-gate "%s %d.%d.%d", cpi->cpi_vendorstr, cpi->cpi_family,
23070Sstevel@tonic-gate cpi->cpi_model, cpi->cpi_step);
23080Sstevel@tonic-gate }
23090Sstevel@tonic-gate
23100Sstevel@tonic-gate /*
23110Sstevel@tonic-gate * This routine is called just after kernel memory allocation
23120Sstevel@tonic-gate * becomes available on cpu0, and as part of mp_startup() on
23130Sstevel@tonic-gate * the other cpus.
23140Sstevel@tonic-gate *
23154606Sesaxe * Fixup the brand string, and collect any information from cpuid
23164606Sesaxe * that requires dynamicically allocated storage to represent.
23170Sstevel@tonic-gate */
23180Sstevel@tonic-gate /*ARGSUSED*/
23190Sstevel@tonic-gate void
cpuid_pass3(cpu_t * cpu)23200Sstevel@tonic-gate cpuid_pass3(cpu_t *cpu)
23210Sstevel@tonic-gate {
23224606Sesaxe int i, max, shft, level, size;
23234606Sesaxe struct cpuid_regs regs;
23244606Sesaxe struct cpuid_regs *cp;
23250Sstevel@tonic-gate struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
23260Sstevel@tonic-gate
23270Sstevel@tonic-gate ASSERT(cpi->cpi_pass == 2);
23280Sstevel@tonic-gate
23294606Sesaxe /*
23304606Sesaxe * Function 4: Deterministic cache parameters
23314606Sesaxe *
23324606Sesaxe * Take this opportunity to detect the number of threads
23334606Sesaxe * sharing the last level cache, and construct a corresponding
23344606Sesaxe * cache id. The respective cpuid_info members are initialized
23354606Sesaxe * to the default case of "no last level cache sharing".
23364606Sesaxe */
23374606Sesaxe cpi->cpi_ncpu_shr_last_cache = 1;
23384606Sesaxe cpi->cpi_last_lvl_cacheid = cpu->cpu_id;
23394606Sesaxe
23404606Sesaxe if (cpi->cpi_maxeax >= 4 && cpi->cpi_vendor == X86_VENDOR_Intel) {
23414606Sesaxe
23424606Sesaxe /*
23434606Sesaxe * Find the # of elements (size) returned by fn 4, and along
23444606Sesaxe * the way detect last level cache sharing details.
23454606Sesaxe */
23464606Sesaxe bzero(®s, sizeof (regs));
23474606Sesaxe cp = ®s;
23484606Sesaxe for (i = 0, max = 0; i < CPI_FN4_ECX_MAX; i++) {
23494606Sesaxe cp->cp_eax = 4;
23504606Sesaxe cp->cp_ecx = i;
23514606Sesaxe
23524606Sesaxe (void) __cpuid_insn(cp);
23534606Sesaxe
23544606Sesaxe if (CPI_CACHE_TYPE(cp) == 0)
23554606Sesaxe break;
23564606Sesaxe level = CPI_CACHE_LVL(cp);
23574606Sesaxe if (level > max) {
23584606Sesaxe max = level;
23594606Sesaxe cpi->cpi_ncpu_shr_last_cache =
23604606Sesaxe CPI_NTHR_SHR_CACHE(cp) + 1;
23614606Sesaxe }
23624606Sesaxe }
23634606Sesaxe cpi->cpi_std_4_size = size = i;
23644606Sesaxe
23654606Sesaxe /*
23664606Sesaxe * Allocate the cpi_std_4 array. The first element
23674606Sesaxe * references the regs for fn 4, %ecx == 0, which
23684606Sesaxe * cpuid_pass2() stashed in cpi->cpi_std[4].
23694606Sesaxe */
23704606Sesaxe if (size > 0) {
23714606Sesaxe cpi->cpi_std_4 =
23724606Sesaxe kmem_alloc(size * sizeof (cp), KM_SLEEP);
23734606Sesaxe cpi->cpi_std_4[0] = &cpi->cpi_std[4];
23744606Sesaxe
23754606Sesaxe /*
23764606Sesaxe * Allocate storage to hold the additional regs
23774606Sesaxe * for function 4, %ecx == 1 .. cpi_std_4_size.
23784606Sesaxe *
23794606Sesaxe * The regs for fn 4, %ecx == 0 has already
23804606Sesaxe * been allocated as indicated above.
23814606Sesaxe */
23824606Sesaxe for (i = 1; i < size; i++) {
23834606Sesaxe cp = cpi->cpi_std_4[i] =
23844606Sesaxe kmem_zalloc(sizeof (regs), KM_SLEEP);
23854606Sesaxe cp->cp_eax = 4;
23864606Sesaxe cp->cp_ecx = i;
23874606Sesaxe
23884606Sesaxe (void) __cpuid_insn(cp);
23894606Sesaxe }
23904606Sesaxe }
23914606Sesaxe /*
23924606Sesaxe * Determine the number of bits needed to represent
23934606Sesaxe * the number of CPUs sharing the last level cache.
23944606Sesaxe *
23954606Sesaxe * Shift off that number of bits from the APIC id to
23964606Sesaxe * derive the cache id.
23974606Sesaxe */
23984606Sesaxe shft = 0;
23994606Sesaxe for (i = 1; i < cpi->cpi_ncpu_shr_last_cache; i <<= 1)
24004606Sesaxe shft++;
24017282Smishra cpi->cpi_last_lvl_cacheid = cpi->cpi_apicid >> shft;
24020Sstevel@tonic-gate }
24030Sstevel@tonic-gate
24040Sstevel@tonic-gate /*
24054606Sesaxe * Now fixup the brand string
24060Sstevel@tonic-gate */
24074606Sesaxe if ((cpi->cpi_xmaxeax & 0x80000000) == 0) {
24084606Sesaxe fabricate_brandstr(cpi);
24094606Sesaxe } else {
24100Sstevel@tonic-gate
24110Sstevel@tonic-gate /*
24124606Sesaxe * If we successfully extracted a brand string from the cpuid
24134606Sesaxe * instruction, clean it up by removing leading spaces and
24144606Sesaxe * similar junk.
24150Sstevel@tonic-gate */
24164606Sesaxe if (cpi->cpi_brandstr[0]) {
24174606Sesaxe size_t maxlen = sizeof (cpi->cpi_brandstr);
24184606Sesaxe char *src, *dst;
24194606Sesaxe
24204606Sesaxe dst = src = (char *)cpi->cpi_brandstr;
24214606Sesaxe src[maxlen - 1] = '\0';
24224606Sesaxe /*
24234606Sesaxe * strip leading spaces
24244606Sesaxe */
24254606Sesaxe while (*src == ' ')
24264606Sesaxe src++;
24274606Sesaxe /*
24284606Sesaxe * Remove any 'Genuine' or "Authentic" prefixes
24294606Sesaxe */
24304606Sesaxe if (strncmp(src, "Genuine ", 8) == 0)
24314606Sesaxe src += 8;
24324606Sesaxe if (strncmp(src, "Authentic ", 10) == 0)
24334606Sesaxe src += 10;
24344606Sesaxe
24354606Sesaxe /*
24364606Sesaxe * Now do an in-place copy.
24374606Sesaxe * Map (R) to (r) and (TM) to (tm).
24384606Sesaxe * The era of teletypes is long gone, and there's
24394606Sesaxe * -really- no need to shout.
24404606Sesaxe */
24414606Sesaxe while (*src != '\0') {
24424606Sesaxe if (src[0] == '(') {
24434606Sesaxe if (strncmp(src + 1, "R)", 2) == 0) {
24444606Sesaxe (void) strncpy(dst, "(r)", 3);
24454606Sesaxe src += 3;
24464606Sesaxe dst += 3;
24474606Sesaxe continue;
24484606Sesaxe }
24494606Sesaxe if (strncmp(src + 1, "TM)", 3) == 0) {
24504606Sesaxe (void) strncpy(dst, "(tm)", 4);
24514606Sesaxe src += 4;
24524606Sesaxe dst += 4;
24534606Sesaxe continue;
24544606Sesaxe }
24550Sstevel@tonic-gate }
24564606Sesaxe *dst++ = *src++;
24570Sstevel@tonic-gate }
24584606Sesaxe *dst = '\0';
24594606Sesaxe
24604606Sesaxe /*
24614606Sesaxe * Finally, remove any trailing spaces
24624606Sesaxe */
24634606Sesaxe while (--dst > cpi->cpi_brandstr)
24644606Sesaxe if (*dst == ' ')
24654606Sesaxe *dst = '\0';
24664606Sesaxe else
24674606Sesaxe break;
24684606Sesaxe } else
24694606Sesaxe fabricate_brandstr(cpi);
24704606Sesaxe }
24710Sstevel@tonic-gate cpi->cpi_pass = 3;
24720Sstevel@tonic-gate }
24730Sstevel@tonic-gate
24740Sstevel@tonic-gate /*
24750Sstevel@tonic-gate * This routine is called out of bind_hwcap() much later in the life
24760Sstevel@tonic-gate * of the kernel (post_startup()). The job of this routine is to resolve
24770Sstevel@tonic-gate * the hardware feature support and kernel support for those features into
24780Sstevel@tonic-gate * what we're actually going to tell applications via the aux vector.
24790Sstevel@tonic-gate */
24800Sstevel@tonic-gate uint_t
cpuid_pass4(cpu_t * cpu)24810Sstevel@tonic-gate cpuid_pass4(cpu_t *cpu)
24820Sstevel@tonic-gate {
24830Sstevel@tonic-gate struct cpuid_info *cpi;
24840Sstevel@tonic-gate uint_t hwcap_flags = 0;
24850Sstevel@tonic-gate
24860Sstevel@tonic-gate if (cpu == NULL)
24870Sstevel@tonic-gate cpu = CPU;
24880Sstevel@tonic-gate cpi = cpu->cpu_m.mcpu_cpi;
24890Sstevel@tonic-gate
24900Sstevel@tonic-gate ASSERT(cpi->cpi_pass == 3);
24910Sstevel@tonic-gate
24920Sstevel@tonic-gate if (cpi->cpi_maxeax >= 1) {
24930Sstevel@tonic-gate uint32_t *edx = &cpi->cpi_support[STD_EDX_FEATURES];
24940Sstevel@tonic-gate uint32_t *ecx = &cpi->cpi_support[STD_ECX_FEATURES];
24950Sstevel@tonic-gate
24960Sstevel@tonic-gate *edx = CPI_FEATURES_EDX(cpi);
24970Sstevel@tonic-gate *ecx = CPI_FEATURES_ECX(cpi);
24980Sstevel@tonic-gate
24990Sstevel@tonic-gate /*
25000Sstevel@tonic-gate * [these require explicit kernel support]
25010Sstevel@tonic-gate */
250212826Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_SEP))
25030Sstevel@tonic-gate *edx &= ~CPUID_INTC_EDX_SEP;
25040Sstevel@tonic-gate
250512826Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_SSE))
25060Sstevel@tonic-gate *edx &= ~(CPUID_INTC_EDX_FXSR|CPUID_INTC_EDX_SSE);
250712826Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_SSE2))
25080Sstevel@tonic-gate *edx &= ~CPUID_INTC_EDX_SSE2;
25090Sstevel@tonic-gate
251012826Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_HTT))
25110Sstevel@tonic-gate *edx &= ~CPUID_INTC_EDX_HTT;
25120Sstevel@tonic-gate
251312826Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_SSE3))
25140Sstevel@tonic-gate *ecx &= ~CPUID_INTC_ECX_SSE3;
25150Sstevel@tonic-gate
25165269Skk208521 if (cpi->cpi_vendor == X86_VENDOR_Intel) {
251712826Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_SSSE3))
25185269Skk208521 *ecx &= ~CPUID_INTC_ECX_SSSE3;
251912826Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_SSE4_1))
25205269Skk208521 *ecx &= ~CPUID_INTC_ECX_SSE4_1;
252112826Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_SSE4_2))
25225269Skk208521 *ecx &= ~CPUID_INTC_ECX_SSE4_2;
252312826Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_AES))
25249370SKuriakose.Kuruvilla@Sun.COM *ecx &= ~CPUID_INTC_ECX_AES;
252512826Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_PCLMULQDQ))
252612826Skuriakose.kuruvilla@oracle.com *ecx &= ~CPUID_INTC_ECX_PCLMULQDQ;
252713134Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_XSAVE))
252813134Skuriakose.kuruvilla@oracle.com *ecx &= ~(CPUID_INTC_ECX_XSAVE |
252913134Skuriakose.kuruvilla@oracle.com CPUID_INTC_ECX_OSXSAVE);
253013134Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_AVX))
253113134Skuriakose.kuruvilla@oracle.com *ecx &= ~CPUID_INTC_ECX_AVX;
25325269Skk208521 }
25335269Skk208521
25340Sstevel@tonic-gate /*
25350Sstevel@tonic-gate * [no explicit support required beyond x87 fp context]
25360Sstevel@tonic-gate */
25370Sstevel@tonic-gate if (!fpu_exists)
25380Sstevel@tonic-gate *edx &= ~(CPUID_INTC_EDX_FPU | CPUID_INTC_EDX_MMX);
25390Sstevel@tonic-gate
25400Sstevel@tonic-gate /*
25410Sstevel@tonic-gate * Now map the supported feature vector to things that we
25420Sstevel@tonic-gate * think userland will care about.
25430Sstevel@tonic-gate */
25440Sstevel@tonic-gate if (*edx & CPUID_INTC_EDX_SEP)
25450Sstevel@tonic-gate hwcap_flags |= AV_386_SEP;
25460Sstevel@tonic-gate if (*edx & CPUID_INTC_EDX_SSE)
25470Sstevel@tonic-gate hwcap_flags |= AV_386_FXSR | AV_386_SSE;
25480Sstevel@tonic-gate if (*edx & CPUID_INTC_EDX_SSE2)
25490Sstevel@tonic-gate hwcap_flags |= AV_386_SSE2;
25500Sstevel@tonic-gate if (*ecx & CPUID_INTC_ECX_SSE3)
25510Sstevel@tonic-gate hwcap_flags |= AV_386_SSE3;
25525269Skk208521 if (cpi->cpi_vendor == X86_VENDOR_Intel) {
25535269Skk208521 if (*ecx & CPUID_INTC_ECX_SSSE3)
25545269Skk208521 hwcap_flags |= AV_386_SSSE3;
25555269Skk208521 if (*ecx & CPUID_INTC_ECX_SSE4_1)
25565269Skk208521 hwcap_flags |= AV_386_SSE4_1;
25575269Skk208521 if (*ecx & CPUID_INTC_ECX_SSE4_2)
25585269Skk208521 hwcap_flags |= AV_386_SSE4_2;
25598418SKrishnendu.Sadhukhan@Sun.COM if (*ecx & CPUID_INTC_ECX_MOVBE)
25608418SKrishnendu.Sadhukhan@Sun.COM hwcap_flags |= AV_386_MOVBE;
25619370SKuriakose.Kuruvilla@Sun.COM if (*ecx & CPUID_INTC_ECX_AES)
25629370SKuriakose.Kuruvilla@Sun.COM hwcap_flags |= AV_386_AES;
25639370SKuriakose.Kuruvilla@Sun.COM if (*ecx & CPUID_INTC_ECX_PCLMULQDQ)
25649370SKuriakose.Kuruvilla@Sun.COM hwcap_flags |= AV_386_PCLMULQDQ;
256513134Skuriakose.kuruvilla@oracle.com if ((*ecx & CPUID_INTC_ECX_XSAVE) &&
256613134Skuriakose.kuruvilla@oracle.com (*ecx & CPUID_INTC_ECX_OSXSAVE))
256713134Skuriakose.kuruvilla@oracle.com hwcap_flags |= AV_386_XSAVE;
25685269Skk208521 }
25694628Skk208521 if (*ecx & CPUID_INTC_ECX_POPCNT)
25704628Skk208521 hwcap_flags |= AV_386_POPCNT;
25710Sstevel@tonic-gate if (*edx & CPUID_INTC_EDX_FPU)
25720Sstevel@tonic-gate hwcap_flags |= AV_386_FPU;
25730Sstevel@tonic-gate if (*edx & CPUID_INTC_EDX_MMX)
25740Sstevel@tonic-gate hwcap_flags |= AV_386_MMX;
25750Sstevel@tonic-gate
25760Sstevel@tonic-gate if (*edx & CPUID_INTC_EDX_TSC)
25770Sstevel@tonic-gate hwcap_flags |= AV_386_TSC;
25780Sstevel@tonic-gate if (*edx & CPUID_INTC_EDX_CX8)
25790Sstevel@tonic-gate hwcap_flags |= AV_386_CX8;
25800Sstevel@tonic-gate if (*edx & CPUID_INTC_EDX_CMOV)
25810Sstevel@tonic-gate hwcap_flags |= AV_386_CMOV;
25820Sstevel@tonic-gate if (*ecx & CPUID_INTC_ECX_CX16)
25830Sstevel@tonic-gate hwcap_flags |= AV_386_CX16;
25840Sstevel@tonic-gate }
25850Sstevel@tonic-gate
25860Sstevel@tonic-gate if (cpi->cpi_xmaxeax < 0x80000001)
25870Sstevel@tonic-gate goto pass4_done;
25880Sstevel@tonic-gate
25890Sstevel@tonic-gate switch (cpi->cpi_vendor) {
25901228Sandrei struct cpuid_regs cp;
25913446Smrj uint32_t *edx, *ecx;
25920Sstevel@tonic-gate
25933446Smrj case X86_VENDOR_Intel:
25943446Smrj /*
25953446Smrj * Seems like Intel duplicated what we necessary
25963446Smrj * here to make the initial crop of 64-bit OS's work.
25973446Smrj * Hopefully, those are the only "extended" bits
25983446Smrj * they'll add.
25993446Smrj */
26003446Smrj /*FALLTHROUGH*/
26013446Smrj
26020Sstevel@tonic-gate case X86_VENDOR_AMD:
26030Sstevel@tonic-gate edx = &cpi->cpi_support[AMD_EDX_FEATURES];
26043446Smrj ecx = &cpi->cpi_support[AMD_ECX_FEATURES];
26050Sstevel@tonic-gate
26060Sstevel@tonic-gate *edx = CPI_FEATURES_XTD_EDX(cpi);
26073446Smrj *ecx = CPI_FEATURES_XTD_ECX(cpi);
26083446Smrj
26093446Smrj /*
26103446Smrj * [these features require explicit kernel support]
26113446Smrj */
26123446Smrj switch (cpi->cpi_vendor) {
26133446Smrj case X86_VENDOR_Intel:
261412826Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_TSCP))
26156657Ssudheer *edx &= ~CPUID_AMD_EDX_TSCP;
26163446Smrj break;
26173446Smrj
26183446Smrj case X86_VENDOR_AMD:
261912826Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_TSCP))
26203446Smrj *edx &= ~CPUID_AMD_EDX_TSCP;
262112826Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_SSE4A))
26224628Skk208521 *ecx &= ~CPUID_AMD_ECX_SSE4A;
26233446Smrj break;
26243446Smrj
26253446Smrj default:
26263446Smrj break;
26273446Smrj }
26280Sstevel@tonic-gate
26290Sstevel@tonic-gate /*
26300Sstevel@tonic-gate * [no explicit support required beyond
26310Sstevel@tonic-gate * x87 fp context and exception handlers]
26320Sstevel@tonic-gate */
26330Sstevel@tonic-gate if (!fpu_exists)
26340Sstevel@tonic-gate *edx &= ~(CPUID_AMD_EDX_MMXamd |
26350Sstevel@tonic-gate CPUID_AMD_EDX_3DNow | CPUID_AMD_EDX_3DNowx);
26360Sstevel@tonic-gate
263712826Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_NX))
26380Sstevel@tonic-gate *edx &= ~CPUID_AMD_EDX_NX;
26393446Smrj #if !defined(__amd64)
26400Sstevel@tonic-gate *edx &= ~CPUID_AMD_EDX_LM;
26410Sstevel@tonic-gate #endif
26420Sstevel@tonic-gate /*
26430Sstevel@tonic-gate * Now map the supported feature vector to
26440Sstevel@tonic-gate * things that we think userland will care about.
26450Sstevel@tonic-gate */
26463446Smrj #if defined(__amd64)
26470Sstevel@tonic-gate if (*edx & CPUID_AMD_EDX_SYSC)
26480Sstevel@tonic-gate hwcap_flags |= AV_386_AMD_SYSC;
26493446Smrj #endif
26500Sstevel@tonic-gate if (*edx & CPUID_AMD_EDX_MMXamd)
26510Sstevel@tonic-gate hwcap_flags |= AV_386_AMD_MMX;
26520Sstevel@tonic-gate if (*edx & CPUID_AMD_EDX_3DNow)
26530Sstevel@tonic-gate hwcap_flags |= AV_386_AMD_3DNow;
26540Sstevel@tonic-gate if (*edx & CPUID_AMD_EDX_3DNowx)
26550Sstevel@tonic-gate hwcap_flags |= AV_386_AMD_3DNowx;
26563446Smrj
26573446Smrj switch (cpi->cpi_vendor) {
26583446Smrj case X86_VENDOR_AMD:
26593446Smrj if (*edx & CPUID_AMD_EDX_TSCP)
26603446Smrj hwcap_flags |= AV_386_TSCP;
26613446Smrj if (*ecx & CPUID_AMD_ECX_AHF64)
26623446Smrj hwcap_flags |= AV_386_AHF;
26634628Skk208521 if (*ecx & CPUID_AMD_ECX_SSE4A)
26644628Skk208521 hwcap_flags |= AV_386_AMD_SSE4A;
26654628Skk208521 if (*ecx & CPUID_AMD_ECX_LZCNT)
26664628Skk208521 hwcap_flags |= AV_386_AMD_LZCNT;
26673446Smrj break;
26683446Smrj
26693446Smrj case X86_VENDOR_Intel:
26706657Ssudheer if (*edx & CPUID_AMD_EDX_TSCP)
26716657Ssudheer hwcap_flags |= AV_386_TSCP;
26723446Smrj /*
26733446Smrj * Aarrgh.
26743446Smrj * Intel uses a different bit in the same word.
26753446Smrj */
26763446Smrj if (*ecx & CPUID_INTC_ECX_AHF64)
26773446Smrj hwcap_flags |= AV_386_AHF;
26783446Smrj break;
26793446Smrj
26803446Smrj default:
26813446Smrj break;
26823446Smrj }
26830Sstevel@tonic-gate break;
26840Sstevel@tonic-gate
26850Sstevel@tonic-gate case X86_VENDOR_TM:
26861228Sandrei cp.cp_eax = 0x80860001;
26871228Sandrei (void) __cpuid_insn(&cp);
26881228Sandrei cpi->cpi_support[TM_EDX_FEATURES] = cp.cp_edx;
26890Sstevel@tonic-gate break;
26900Sstevel@tonic-gate
26910Sstevel@tonic-gate default:
26920Sstevel@tonic-gate break;
26930Sstevel@tonic-gate }
26940Sstevel@tonic-gate
26950Sstevel@tonic-gate pass4_done:
26960Sstevel@tonic-gate cpi->cpi_pass = 4;
26970Sstevel@tonic-gate return (hwcap_flags);
26980Sstevel@tonic-gate }
26990Sstevel@tonic-gate
27000Sstevel@tonic-gate
27010Sstevel@tonic-gate /*
27020Sstevel@tonic-gate * Simulate the cpuid instruction using the data we previously
27030Sstevel@tonic-gate * captured about this CPU. We try our best to return the truth
27040Sstevel@tonic-gate * about the hardware, independently of kernel support.
27050Sstevel@tonic-gate */
27060Sstevel@tonic-gate uint32_t
cpuid_insn(cpu_t * cpu,struct cpuid_regs * cp)27071228Sandrei cpuid_insn(cpu_t *cpu, struct cpuid_regs *cp)
27080Sstevel@tonic-gate {
27090Sstevel@tonic-gate struct cpuid_info *cpi;
27101228Sandrei struct cpuid_regs *xcp;
27110Sstevel@tonic-gate
27120Sstevel@tonic-gate if (cpu == NULL)
27130Sstevel@tonic-gate cpu = CPU;
27140Sstevel@tonic-gate cpi = cpu->cpu_m.mcpu_cpi;
27150Sstevel@tonic-gate
27160Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 3));
27170Sstevel@tonic-gate
27180Sstevel@tonic-gate /*
27190Sstevel@tonic-gate * CPUID data is cached in two separate places: cpi_std for standard
27200Sstevel@tonic-gate * CPUID functions, and cpi_extd for extended CPUID functions.
27210Sstevel@tonic-gate */
27221228Sandrei if (cp->cp_eax <= cpi->cpi_maxeax && cp->cp_eax < NMAX_CPI_STD)
27231228Sandrei xcp = &cpi->cpi_std[cp->cp_eax];
27241228Sandrei else if (cp->cp_eax >= 0x80000000 && cp->cp_eax <= cpi->cpi_xmaxeax &&
27251228Sandrei cp->cp_eax < 0x80000000 + NMAX_CPI_EXTD)
27261228Sandrei xcp = &cpi->cpi_extd[cp->cp_eax - 0x80000000];
27270Sstevel@tonic-gate else
27280Sstevel@tonic-gate /*
27290Sstevel@tonic-gate * The caller is asking for data from an input parameter which
27300Sstevel@tonic-gate * the kernel has not cached. In this case we go fetch from
27310Sstevel@tonic-gate * the hardware and return the data directly to the user.
27320Sstevel@tonic-gate */
27331228Sandrei return (__cpuid_insn(cp));
27341228Sandrei
27351228Sandrei cp->cp_eax = xcp->cp_eax;
27361228Sandrei cp->cp_ebx = xcp->cp_ebx;
27371228Sandrei cp->cp_ecx = xcp->cp_ecx;
27381228Sandrei cp->cp_edx = xcp->cp_edx;
27390Sstevel@tonic-gate return (cp->cp_eax);
27400Sstevel@tonic-gate }
27410Sstevel@tonic-gate
27420Sstevel@tonic-gate int
cpuid_checkpass(cpu_t * cpu,int pass)27430Sstevel@tonic-gate cpuid_checkpass(cpu_t *cpu, int pass)
27440Sstevel@tonic-gate {
27450Sstevel@tonic-gate return (cpu != NULL && cpu->cpu_m.mcpu_cpi != NULL &&
27460Sstevel@tonic-gate cpu->cpu_m.mcpu_cpi->cpi_pass >= pass);
27470Sstevel@tonic-gate }
27480Sstevel@tonic-gate
27490Sstevel@tonic-gate int
cpuid_getbrandstr(cpu_t * cpu,char * s,size_t n)27500Sstevel@tonic-gate cpuid_getbrandstr(cpu_t *cpu, char *s, size_t n)
27510Sstevel@tonic-gate {
27520Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 3));
27530Sstevel@tonic-gate
27540Sstevel@tonic-gate return (snprintf(s, n, "%s", cpu->cpu_m.mcpu_cpi->cpi_brandstr));
27550Sstevel@tonic-gate }
27560Sstevel@tonic-gate
27570Sstevel@tonic-gate int
cpuid_is_cmt(cpu_t * cpu)27581228Sandrei cpuid_is_cmt(cpu_t *cpu)
27590Sstevel@tonic-gate {
27600Sstevel@tonic-gate if (cpu == NULL)
27610Sstevel@tonic-gate cpu = CPU;
27620Sstevel@tonic-gate
27630Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1));
27640Sstevel@tonic-gate
27650Sstevel@tonic-gate return (cpu->cpu_m.mcpu_cpi->cpi_chipid >= 0);
27660Sstevel@tonic-gate }
27670Sstevel@tonic-gate
27680Sstevel@tonic-gate /*
27690Sstevel@tonic-gate * AMD and Intel both implement the 64-bit variant of the syscall
27700Sstevel@tonic-gate * instruction (syscallq), so if there's -any- support for syscall,
27710Sstevel@tonic-gate * cpuid currently says "yes, we support this".
27720Sstevel@tonic-gate *
27730Sstevel@tonic-gate * However, Intel decided to -not- implement the 32-bit variant of the
27740Sstevel@tonic-gate * syscall instruction, so we provide a predicate to allow our caller
27750Sstevel@tonic-gate * to test that subtlety here.
27765084Sjohnlev *
27775084Sjohnlev * XXPV Currently, 32-bit syscall instructions don't work via the hypervisor,
27785084Sjohnlev * even in the case where the hardware would in fact support it.
27790Sstevel@tonic-gate */
27800Sstevel@tonic-gate /*ARGSUSED*/
27810Sstevel@tonic-gate int
cpuid_syscall32_insn(cpu_t * cpu)27820Sstevel@tonic-gate cpuid_syscall32_insn(cpu_t *cpu)
27830Sstevel@tonic-gate {
27840Sstevel@tonic-gate ASSERT(cpuid_checkpass((cpu == NULL ? CPU : cpu), 1));
27850Sstevel@tonic-gate
27865084Sjohnlev #if !defined(__xpv)
27873446Smrj if (cpu == NULL)
27883446Smrj cpu = CPU;
27893446Smrj
27903446Smrj /*CSTYLED*/
27913446Smrj {
27923446Smrj struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
27933446Smrj
27943446Smrj if (cpi->cpi_vendor == X86_VENDOR_AMD &&
27953446Smrj cpi->cpi_xmaxeax >= 0x80000001 &&
27963446Smrj (CPI_FEATURES_XTD_EDX(cpi) & CPUID_AMD_EDX_SYSC))
27973446Smrj return (1);
27983446Smrj }
27995084Sjohnlev #endif
28000Sstevel@tonic-gate return (0);
28010Sstevel@tonic-gate }
28020Sstevel@tonic-gate
28030Sstevel@tonic-gate int
cpuid_getidstr(cpu_t * cpu,char * s,size_t n)28040Sstevel@tonic-gate cpuid_getidstr(cpu_t *cpu, char *s, size_t n)
28050Sstevel@tonic-gate {
28060Sstevel@tonic-gate struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
28070Sstevel@tonic-gate
28080Sstevel@tonic-gate static const char fmt[] =
28093779Sdmick "x86 (%s %X family %d model %d step %d clock %d MHz)";
28100Sstevel@tonic-gate static const char fmt_ht[] =
28113779Sdmick "x86 (chipid 0x%x %s %X family %d model %d step %d clock %d MHz)";
28120Sstevel@tonic-gate
28130Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1));
28140Sstevel@tonic-gate
28151228Sandrei if (cpuid_is_cmt(cpu))
28160Sstevel@tonic-gate return (snprintf(s, n, fmt_ht, cpi->cpi_chipid,
28173779Sdmick cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax,
28183779Sdmick cpi->cpi_family, cpi->cpi_model,
28190Sstevel@tonic-gate cpi->cpi_step, cpu->cpu_type_info.pi_clock));
28200Sstevel@tonic-gate return (snprintf(s, n, fmt,
28213779Sdmick cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax,
28223779Sdmick cpi->cpi_family, cpi->cpi_model,
28230Sstevel@tonic-gate cpi->cpi_step, cpu->cpu_type_info.pi_clock));
28240Sstevel@tonic-gate }
28250Sstevel@tonic-gate
28260Sstevel@tonic-gate const char *
cpuid_getvendorstr(cpu_t * cpu)28270Sstevel@tonic-gate cpuid_getvendorstr(cpu_t *cpu)
28280Sstevel@tonic-gate {
28290Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1));
28300Sstevel@tonic-gate return ((const char *)cpu->cpu_m.mcpu_cpi->cpi_vendorstr);
28310Sstevel@tonic-gate }
28320Sstevel@tonic-gate
28330Sstevel@tonic-gate uint_t
cpuid_getvendor(cpu_t * cpu)28340Sstevel@tonic-gate cpuid_getvendor(cpu_t *cpu)
28350Sstevel@tonic-gate {
28360Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1));
28370Sstevel@tonic-gate return (cpu->cpu_m.mcpu_cpi->cpi_vendor);
28380Sstevel@tonic-gate }
28390Sstevel@tonic-gate
28400Sstevel@tonic-gate uint_t
cpuid_getfamily(cpu_t * cpu)28410Sstevel@tonic-gate cpuid_getfamily(cpu_t *cpu)
28420Sstevel@tonic-gate {
28430Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1));
28440Sstevel@tonic-gate return (cpu->cpu_m.mcpu_cpi->cpi_family);
28450Sstevel@tonic-gate }
28460Sstevel@tonic-gate
28470Sstevel@tonic-gate uint_t
cpuid_getmodel(cpu_t * cpu)28480Sstevel@tonic-gate cpuid_getmodel(cpu_t *cpu)
28490Sstevel@tonic-gate {
28500Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1));
28510Sstevel@tonic-gate return (cpu->cpu_m.mcpu_cpi->cpi_model);
28520Sstevel@tonic-gate }
28530Sstevel@tonic-gate
28540Sstevel@tonic-gate uint_t
cpuid_get_ncpu_per_chip(cpu_t * cpu)28550Sstevel@tonic-gate cpuid_get_ncpu_per_chip(cpu_t *cpu)
28560Sstevel@tonic-gate {
28570Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1));
28580Sstevel@tonic-gate return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_per_chip);
28590Sstevel@tonic-gate }
28600Sstevel@tonic-gate
28610Sstevel@tonic-gate uint_t
cpuid_get_ncore_per_chip(cpu_t * cpu)28621228Sandrei cpuid_get_ncore_per_chip(cpu_t *cpu)
28631228Sandrei {
28641228Sandrei ASSERT(cpuid_checkpass(cpu, 1));
28651228Sandrei return (cpu->cpu_m.mcpu_cpi->cpi_ncore_per_chip);
28661228Sandrei }
28671228Sandrei
28681228Sandrei uint_t
cpuid_get_ncpu_sharing_last_cache(cpu_t * cpu)28694606Sesaxe cpuid_get_ncpu_sharing_last_cache(cpu_t *cpu)
28704606Sesaxe {
28714606Sesaxe ASSERT(cpuid_checkpass(cpu, 2));
28724606Sesaxe return (cpu->cpu_m.mcpu_cpi->cpi_ncpu_shr_last_cache);
28734606Sesaxe }
28744606Sesaxe
28754606Sesaxe id_t
cpuid_get_last_lvl_cacheid(cpu_t * cpu)28764606Sesaxe cpuid_get_last_lvl_cacheid(cpu_t *cpu)
28774606Sesaxe {
28784606Sesaxe ASSERT(cpuid_checkpass(cpu, 2));
28794606Sesaxe return (cpu->cpu_m.mcpu_cpi->cpi_last_lvl_cacheid);
28804606Sesaxe }
28814606Sesaxe
28824606Sesaxe uint_t
cpuid_getstep(cpu_t * cpu)28830Sstevel@tonic-gate cpuid_getstep(cpu_t *cpu)
28840Sstevel@tonic-gate {
28850Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1));
28860Sstevel@tonic-gate return (cpu->cpu_m.mcpu_cpi->cpi_step);
28870Sstevel@tonic-gate }
28880Sstevel@tonic-gate
28894581Ssherrym uint_t
cpuid_getsig(struct cpu * cpu)28904581Ssherrym cpuid_getsig(struct cpu *cpu)
28914581Ssherrym {
28924581Ssherrym ASSERT(cpuid_checkpass(cpu, 1));
28934581Ssherrym return (cpu->cpu_m.mcpu_cpi->cpi_std[1].cp_eax);
28944581Ssherrym }
28954581Ssherrym
28962869Sgavinm uint32_t
cpuid_getchiprev(struct cpu * cpu)28972869Sgavinm cpuid_getchiprev(struct cpu *cpu)
28982869Sgavinm {
28992869Sgavinm ASSERT(cpuid_checkpass(cpu, 1));
29002869Sgavinm return (cpu->cpu_m.mcpu_cpi->cpi_chiprev);
29012869Sgavinm }
29022869Sgavinm
29032869Sgavinm const char *
cpuid_getchiprevstr(struct cpu * cpu)29042869Sgavinm cpuid_getchiprevstr(struct cpu *cpu)
29052869Sgavinm {
29062869Sgavinm ASSERT(cpuid_checkpass(cpu, 1));
29072869Sgavinm return (cpu->cpu_m.mcpu_cpi->cpi_chiprevstr);
29082869Sgavinm }
29092869Sgavinm
29102869Sgavinm uint32_t
cpuid_getsockettype(struct cpu * cpu)29112869Sgavinm cpuid_getsockettype(struct cpu *cpu)
29122869Sgavinm {
29132869Sgavinm ASSERT(cpuid_checkpass(cpu, 1));
29142869Sgavinm return (cpu->cpu_m.mcpu_cpi->cpi_socket);
29152869Sgavinm }
29162869Sgavinm
29179482SKuriakose.Kuruvilla@Sun.COM const char *
cpuid_getsocketstr(cpu_t * cpu)29189482SKuriakose.Kuruvilla@Sun.COM cpuid_getsocketstr(cpu_t *cpu)
29199482SKuriakose.Kuruvilla@Sun.COM {
29209482SKuriakose.Kuruvilla@Sun.COM static const char *socketstr = NULL;
29219482SKuriakose.Kuruvilla@Sun.COM struct cpuid_info *cpi;
29229482SKuriakose.Kuruvilla@Sun.COM
29239482SKuriakose.Kuruvilla@Sun.COM ASSERT(cpuid_checkpass(cpu, 1));
29249482SKuriakose.Kuruvilla@Sun.COM cpi = cpu->cpu_m.mcpu_cpi;
29259482SKuriakose.Kuruvilla@Sun.COM
29269482SKuriakose.Kuruvilla@Sun.COM /* Assume that socket types are the same across the system */
29279482SKuriakose.Kuruvilla@Sun.COM if (socketstr == NULL)
29289482SKuriakose.Kuruvilla@Sun.COM socketstr = _cpuid_sktstr(cpi->cpi_vendor, cpi->cpi_family,
29299482SKuriakose.Kuruvilla@Sun.COM cpi->cpi_model, cpi->cpi_step);
29309482SKuriakose.Kuruvilla@Sun.COM
29319482SKuriakose.Kuruvilla@Sun.COM
29329482SKuriakose.Kuruvilla@Sun.COM return (socketstr);
29339482SKuriakose.Kuruvilla@Sun.COM }
29349482SKuriakose.Kuruvilla@Sun.COM
29353434Sesaxe int
cpuid_get_chipid(cpu_t * cpu)29363434Sesaxe cpuid_get_chipid(cpu_t *cpu)
29370Sstevel@tonic-gate {
29380Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1));
29390Sstevel@tonic-gate
29401228Sandrei if (cpuid_is_cmt(cpu))
29410Sstevel@tonic-gate return (cpu->cpu_m.mcpu_cpi->cpi_chipid);
29420Sstevel@tonic-gate return (cpu->cpu_id);
29430Sstevel@tonic-gate }
29440Sstevel@tonic-gate
29451228Sandrei id_t
cpuid_get_coreid(cpu_t * cpu)29463434Sesaxe cpuid_get_coreid(cpu_t *cpu)
29471228Sandrei {
29481228Sandrei ASSERT(cpuid_checkpass(cpu, 1));
29491228Sandrei return (cpu->cpu_m.mcpu_cpi->cpi_coreid);
29501228Sandrei }
29511228Sandrei
29520Sstevel@tonic-gate int
cpuid_get_pkgcoreid(cpu_t * cpu)29535870Sgavinm cpuid_get_pkgcoreid(cpu_t *cpu)
29545870Sgavinm {
29555870Sgavinm ASSERT(cpuid_checkpass(cpu, 1));
29565870Sgavinm return (cpu->cpu_m.mcpu_cpi->cpi_pkgcoreid);
29575870Sgavinm }
29585870Sgavinm
29595870Sgavinm int
cpuid_get_clogid(cpu_t * cpu)29603434Sesaxe cpuid_get_clogid(cpu_t *cpu)
29610Sstevel@tonic-gate {
29620Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1));
29630Sstevel@tonic-gate return (cpu->cpu_m.mcpu_cpi->cpi_clogid);
29640Sstevel@tonic-gate }
29650Sstevel@tonic-gate
296611389SAlexander.Kolbasov@Sun.COM int
cpuid_get_cacheid(cpu_t * cpu)296711389SAlexander.Kolbasov@Sun.COM cpuid_get_cacheid(cpu_t *cpu)
296811389SAlexander.Kolbasov@Sun.COM {
296911389SAlexander.Kolbasov@Sun.COM ASSERT(cpuid_checkpass(cpu, 1));
297011389SAlexander.Kolbasov@Sun.COM return (cpu->cpu_m.mcpu_cpi->cpi_last_lvl_cacheid);
297111389SAlexander.Kolbasov@Sun.COM }
297211389SAlexander.Kolbasov@Sun.COM
297310947SSrihari.Venkatesan@Sun.COM uint_t
cpuid_get_procnodeid(cpu_t * cpu)297410947SSrihari.Venkatesan@Sun.COM cpuid_get_procnodeid(cpu_t *cpu)
297510947SSrihari.Venkatesan@Sun.COM {
297610947SSrihari.Venkatesan@Sun.COM ASSERT(cpuid_checkpass(cpu, 1));
297710947SSrihari.Venkatesan@Sun.COM return (cpu->cpu_m.mcpu_cpi->cpi_procnodeid);
297810947SSrihari.Venkatesan@Sun.COM }
297910947SSrihari.Venkatesan@Sun.COM
298010947SSrihari.Venkatesan@Sun.COM uint_t
cpuid_get_procnodes_per_pkg(cpu_t * cpu)298110947SSrihari.Venkatesan@Sun.COM cpuid_get_procnodes_per_pkg(cpu_t *cpu)
298210947SSrihari.Venkatesan@Sun.COM {
298310947SSrihari.Venkatesan@Sun.COM ASSERT(cpuid_checkpass(cpu, 1));
298410947SSrihari.Venkatesan@Sun.COM return (cpu->cpu_m.mcpu_cpi->cpi_procnodes_per_pkg);
298510947SSrihari.Venkatesan@Sun.COM }
298610947SSrihari.Venkatesan@Sun.COM
298710080SJoe.Bonasera@sun.com /*ARGSUSED*/
298810080SJoe.Bonasera@sun.com int
cpuid_have_cr8access(cpu_t * cpu)298910080SJoe.Bonasera@sun.com cpuid_have_cr8access(cpu_t *cpu)
299010080SJoe.Bonasera@sun.com {
299110080SJoe.Bonasera@sun.com #if defined(__amd64)
299210080SJoe.Bonasera@sun.com return (1);
299310080SJoe.Bonasera@sun.com #else
299410080SJoe.Bonasera@sun.com struct cpuid_info *cpi;
299510080SJoe.Bonasera@sun.com
299610080SJoe.Bonasera@sun.com ASSERT(cpu != NULL);
299710080SJoe.Bonasera@sun.com cpi = cpu->cpu_m.mcpu_cpi;
299810080SJoe.Bonasera@sun.com if (cpi->cpi_vendor == X86_VENDOR_AMD && cpi->cpi_maxeax >= 1 &&
299910080SJoe.Bonasera@sun.com (CPI_FEATURES_XTD_ECX(cpi) & CPUID_AMD_ECX_CR8D) != 0)
300010080SJoe.Bonasera@sun.com return (1);
300110080SJoe.Bonasera@sun.com return (0);
300210080SJoe.Bonasera@sun.com #endif
300310080SJoe.Bonasera@sun.com }
300410080SJoe.Bonasera@sun.com
30059652SMichael.Corcoran@Sun.COM uint32_t
cpuid_get_apicid(cpu_t * cpu)30069652SMichael.Corcoran@Sun.COM cpuid_get_apicid(cpu_t *cpu)
30079652SMichael.Corcoran@Sun.COM {
30089652SMichael.Corcoran@Sun.COM ASSERT(cpuid_checkpass(cpu, 1));
30099652SMichael.Corcoran@Sun.COM if (cpu->cpu_m.mcpu_cpi->cpi_maxeax < 1) {
30109652SMichael.Corcoran@Sun.COM return (UINT32_MAX);
30119652SMichael.Corcoran@Sun.COM } else {
30129652SMichael.Corcoran@Sun.COM return (cpu->cpu_m.mcpu_cpi->cpi_apicid);
30139652SMichael.Corcoran@Sun.COM }
30149652SMichael.Corcoran@Sun.COM }
30159652SMichael.Corcoran@Sun.COM
30160Sstevel@tonic-gate void
cpuid_get_addrsize(cpu_t * cpu,uint_t * pabits,uint_t * vabits)30170Sstevel@tonic-gate cpuid_get_addrsize(cpu_t *cpu, uint_t *pabits, uint_t *vabits)
30180Sstevel@tonic-gate {
30190Sstevel@tonic-gate struct cpuid_info *cpi;
30200Sstevel@tonic-gate
30210Sstevel@tonic-gate if (cpu == NULL)
30220Sstevel@tonic-gate cpu = CPU;
30230Sstevel@tonic-gate cpi = cpu->cpu_m.mcpu_cpi;
30240Sstevel@tonic-gate
30250Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1));
30260Sstevel@tonic-gate
30270Sstevel@tonic-gate if (pabits)
30280Sstevel@tonic-gate *pabits = cpi->cpi_pabits;
30290Sstevel@tonic-gate if (vabits)
30300Sstevel@tonic-gate *vabits = cpi->cpi_vabits;
30310Sstevel@tonic-gate }
30320Sstevel@tonic-gate
30330Sstevel@tonic-gate /*
30340Sstevel@tonic-gate * Returns the number of data TLB entries for a corresponding
30350Sstevel@tonic-gate * pagesize. If it can't be computed, or isn't known, the
30360Sstevel@tonic-gate * routine returns zero. If you ask about an architecturally
30370Sstevel@tonic-gate * impossible pagesize, the routine will panic (so that the
30380Sstevel@tonic-gate * hat implementor knows that things are inconsistent.)
30390Sstevel@tonic-gate */
30400Sstevel@tonic-gate uint_t
cpuid_get_dtlb_nent(cpu_t * cpu,size_t pagesize)30410Sstevel@tonic-gate cpuid_get_dtlb_nent(cpu_t *cpu, size_t pagesize)
30420Sstevel@tonic-gate {
30430Sstevel@tonic-gate struct cpuid_info *cpi;
30440Sstevel@tonic-gate uint_t dtlb_nent = 0;
30450Sstevel@tonic-gate
30460Sstevel@tonic-gate if (cpu == NULL)
30470Sstevel@tonic-gate cpu = CPU;
30480Sstevel@tonic-gate cpi = cpu->cpu_m.mcpu_cpi;
30490Sstevel@tonic-gate
30500Sstevel@tonic-gate ASSERT(cpuid_checkpass(cpu, 1));
30510Sstevel@tonic-gate
30520Sstevel@tonic-gate /*
30530Sstevel@tonic-gate * Check the L2 TLB info
30540Sstevel@tonic-gate */
30550Sstevel@tonic-gate if (cpi->cpi_xmaxeax >= 0x80000006) {
30561228Sandrei struct cpuid_regs *cp = &cpi->cpi_extd[6];
30570Sstevel@tonic-gate
30580Sstevel@tonic-gate switch (pagesize) {
30590Sstevel@tonic-gate
30600Sstevel@tonic-gate case 4 * 1024:
30610Sstevel@tonic-gate /*
30620Sstevel@tonic-gate * All zero in the top 16 bits of the register
30630Sstevel@tonic-gate * indicates a unified TLB. Size is in low 16 bits.
30640Sstevel@tonic-gate */
30650Sstevel@tonic-gate if ((cp->cp_ebx & 0xffff0000) == 0)
30660Sstevel@tonic-gate dtlb_nent = cp->cp_ebx & 0x0000ffff;
30670Sstevel@tonic-gate else
30680Sstevel@tonic-gate dtlb_nent = BITX(cp->cp_ebx, 27, 16);
30690Sstevel@tonic-gate break;
30700Sstevel@tonic-gate
30710Sstevel@tonic-gate case 2 * 1024 * 1024:
30720Sstevel@tonic-gate if ((cp->cp_eax & 0xffff0000) == 0)
30730Sstevel@tonic-gate dtlb_nent = cp->cp_eax & 0x0000ffff;
30740Sstevel@tonic-gate else
30750Sstevel@tonic-gate dtlb_nent = BITX(cp->cp_eax, 27, 16);
30760Sstevel@tonic-gate break;
30770Sstevel@tonic-gate
30780Sstevel@tonic-gate default:
30790Sstevel@tonic-gate panic("unknown L2 pagesize");
30800Sstevel@tonic-gate /*NOTREACHED*/
30810Sstevel@tonic-gate }
30820Sstevel@tonic-gate }
30830Sstevel@tonic-gate
30840Sstevel@tonic-gate if (dtlb_nent != 0)
30850Sstevel@tonic-gate return (dtlb_nent);
30860Sstevel@tonic-gate
30870Sstevel@tonic-gate /*
30880Sstevel@tonic-gate * No L2 TLB support for this size, try L1.
30890Sstevel@tonic-gate */
30900Sstevel@tonic-gate if (cpi->cpi_xmaxeax >= 0x80000005) {
30911228Sandrei struct cpuid_regs *cp = &cpi->cpi_extd[5];
30920Sstevel@tonic-gate
30930Sstevel@tonic-gate switch (pagesize) {
30940Sstevel@tonic-gate case 4 * 1024:
30950Sstevel@tonic-gate dtlb_nent = BITX(cp->cp_ebx, 23, 16);
30960Sstevel@tonic-gate break;
30970Sstevel@tonic-gate case 2 * 1024 * 1024:
30980Sstevel@tonic-gate dtlb_nent = BITX(cp->cp_eax, 23, 16);
30990Sstevel@tonic-gate break;
31000Sstevel@tonic-gate default:
31010Sstevel@tonic-gate panic("unknown L1 d-TLB pagesize");
31020Sstevel@tonic-gate /*NOTREACHED*/
31030Sstevel@tonic-gate }
31040Sstevel@tonic-gate }
31050Sstevel@tonic-gate
31060Sstevel@tonic-gate return (dtlb_nent);
31070Sstevel@tonic-gate }
31080Sstevel@tonic-gate
31090Sstevel@tonic-gate /*
31100Sstevel@tonic-gate * Return 0 if the erratum is not present or not applicable, positive
31110Sstevel@tonic-gate * if it is, and negative if the status of the erratum is unknown.
31120Sstevel@tonic-gate *
31130Sstevel@tonic-gate * See "Revision Guide for AMD Athlon(tm) 64 and AMD Opteron(tm)
3114359Skucharsk * Processors" #25759, Rev 3.57, August 2005
31150Sstevel@tonic-gate */
31160Sstevel@tonic-gate int
cpuid_opteron_erratum(cpu_t * cpu,uint_t erratum)31170Sstevel@tonic-gate cpuid_opteron_erratum(cpu_t *cpu, uint_t erratum)
31180Sstevel@tonic-gate {
31190Sstevel@tonic-gate struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
31201228Sandrei uint_t eax;
31210Sstevel@tonic-gate
31222584Ssethg /*
31232584Ssethg * Bail out if this CPU isn't an AMD CPU, or if it's
31242584Ssethg * a legacy (32-bit) AMD CPU.
31252584Ssethg */
31262584Ssethg if (cpi->cpi_vendor != X86_VENDOR_AMD ||
31274265Skchow cpi->cpi_family == 4 || cpi->cpi_family == 5 ||
31284265Skchow cpi->cpi_family == 6)
31292869Sgavinm
31300Sstevel@tonic-gate return (0);
31310Sstevel@tonic-gate
31320Sstevel@tonic-gate eax = cpi->cpi_std[1].cp_eax;
31330Sstevel@tonic-gate
31340Sstevel@tonic-gate #define SH_B0(eax) (eax == 0xf40 || eax == 0xf50)
31350Sstevel@tonic-gate #define SH_B3(eax) (eax == 0xf51)
31361582Skchow #define B(eax) (SH_B0(eax) || SH_B3(eax))
31370Sstevel@tonic-gate
31380Sstevel@tonic-gate #define SH_C0(eax) (eax == 0xf48 || eax == 0xf58)
31390Sstevel@tonic-gate
31400Sstevel@tonic-gate #define SH_CG(eax) (eax == 0xf4a || eax == 0xf5a || eax == 0xf7a)
31410Sstevel@tonic-gate #define DH_CG(eax) (eax == 0xfc0 || eax == 0xfe0 || eax == 0xff0)
31420Sstevel@tonic-gate #define CH_CG(eax) (eax == 0xf82 || eax == 0xfb2)
31431582Skchow #define CG(eax) (SH_CG(eax) || DH_CG(eax) || CH_CG(eax))
31440Sstevel@tonic-gate
31450Sstevel@tonic-gate #define SH_D0(eax) (eax == 0x10f40 || eax == 0x10f50 || eax == 0x10f70)
31460Sstevel@tonic-gate #define DH_D0(eax) (eax == 0x10fc0 || eax == 0x10ff0)
31470Sstevel@tonic-gate #define CH_D0(eax) (eax == 0x10f80 || eax == 0x10fb0)
31481582Skchow #define D0(eax) (SH_D0(eax) || DH_D0(eax) || CH_D0(eax))
31490Sstevel@tonic-gate
31500Sstevel@tonic-gate #define SH_E0(eax) (eax == 0x20f50 || eax == 0x20f40 || eax == 0x20f70)
31510Sstevel@tonic-gate #define JH_E1(eax) (eax == 0x20f10) /* JH8_E0 had 0x20f30 */
31520Sstevel@tonic-gate #define DH_E3(eax) (eax == 0x20fc0 || eax == 0x20ff0)
31530Sstevel@tonic-gate #define SH_E4(eax) (eax == 0x20f51 || eax == 0x20f71)
31540Sstevel@tonic-gate #define BH_E4(eax) (eax == 0x20fb1)
31550Sstevel@tonic-gate #define SH_E5(eax) (eax == 0x20f42)
31560Sstevel@tonic-gate #define DH_E6(eax) (eax == 0x20ff2 || eax == 0x20fc2)
31570Sstevel@tonic-gate #define JH_E6(eax) (eax == 0x20f12 || eax == 0x20f32)
31581582Skchow #define EX(eax) (SH_E0(eax) || JH_E1(eax) || DH_E3(eax) || \
31591582Skchow SH_E4(eax) || BH_E4(eax) || SH_E5(eax) || \
31601582Skchow DH_E6(eax) || JH_E6(eax))
31610Sstevel@tonic-gate
31626691Skchow #define DR_AX(eax) (eax == 0x100f00 || eax == 0x100f01 || eax == 0x100f02)
31636691Skchow #define DR_B0(eax) (eax == 0x100f20)
31646691Skchow #define DR_B1(eax) (eax == 0x100f21)
31656691Skchow #define DR_BA(eax) (eax == 0x100f2a)
31666691Skchow #define DR_B2(eax) (eax == 0x100f22)
31676691Skchow #define DR_B3(eax) (eax == 0x100f23)
31686691Skchow #define RB_C0(eax) (eax == 0x100f40)
31696691Skchow
31700Sstevel@tonic-gate switch (erratum) {
31710Sstevel@tonic-gate case 1:
31724265Skchow return (cpi->cpi_family < 0x10);
31730Sstevel@tonic-gate case 51: /* what does the asterisk mean? */
31740Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax));
31750Sstevel@tonic-gate case 52:
31760Sstevel@tonic-gate return (B(eax));
31770Sstevel@tonic-gate case 57:
31786691Skchow return (cpi->cpi_family <= 0x11);
31790Sstevel@tonic-gate case 58:
31800Sstevel@tonic-gate return (B(eax));
31810Sstevel@tonic-gate case 60:
31826691Skchow return (cpi->cpi_family <= 0x11);
31830Sstevel@tonic-gate case 61:
31840Sstevel@tonic-gate case 62:
31850Sstevel@tonic-gate case 63:
31860Sstevel@tonic-gate case 64:
31870Sstevel@tonic-gate case 65:
31880Sstevel@tonic-gate case 66:
31890Sstevel@tonic-gate case 68:
31900Sstevel@tonic-gate case 69:
31910Sstevel@tonic-gate case 70:
31920Sstevel@tonic-gate case 71:
31930Sstevel@tonic-gate return (B(eax));
31940Sstevel@tonic-gate case 72:
31950Sstevel@tonic-gate return (SH_B0(eax));
31960Sstevel@tonic-gate case 74:
31970Sstevel@tonic-gate return (B(eax));
31980Sstevel@tonic-gate case 75:
31994265Skchow return (cpi->cpi_family < 0x10);
32000Sstevel@tonic-gate case 76:
32010Sstevel@tonic-gate return (B(eax));
32020Sstevel@tonic-gate case 77:
32036691Skchow return (cpi->cpi_family <= 0x11);
32040Sstevel@tonic-gate case 78:
32050Sstevel@tonic-gate return (B(eax) || SH_C0(eax));
32060Sstevel@tonic-gate case 79:
32070Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
32080Sstevel@tonic-gate case 80:
32090Sstevel@tonic-gate case 81:
32100Sstevel@tonic-gate case 82:
32110Sstevel@tonic-gate return (B(eax));
32120Sstevel@tonic-gate case 83:
32130Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax));
32140Sstevel@tonic-gate case 85:
32154265Skchow return (cpi->cpi_family < 0x10);
32160Sstevel@tonic-gate case 86:
32170Sstevel@tonic-gate return (SH_C0(eax) || CG(eax));
32180Sstevel@tonic-gate case 88:
32190Sstevel@tonic-gate #if !defined(__amd64)
32200Sstevel@tonic-gate return (0);
32210Sstevel@tonic-gate #else
32220Sstevel@tonic-gate return (B(eax) || SH_C0(eax));
32230Sstevel@tonic-gate #endif
32240Sstevel@tonic-gate case 89:
32254265Skchow return (cpi->cpi_family < 0x10);
32260Sstevel@tonic-gate case 90:
32270Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax));
32280Sstevel@tonic-gate case 91:
32290Sstevel@tonic-gate case 92:
32300Sstevel@tonic-gate return (B(eax) || SH_C0(eax));
32310Sstevel@tonic-gate case 93:
32320Sstevel@tonic-gate return (SH_C0(eax));
32330Sstevel@tonic-gate case 94:
32340Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax));
32350Sstevel@tonic-gate case 95:
32360Sstevel@tonic-gate #if !defined(__amd64)
32370Sstevel@tonic-gate return (0);
32380Sstevel@tonic-gate #else
32390Sstevel@tonic-gate return (B(eax) || SH_C0(eax));
32400Sstevel@tonic-gate #endif
32410Sstevel@tonic-gate case 96:
32420Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax));
32430Sstevel@tonic-gate case 97:
32440Sstevel@tonic-gate case 98:
32450Sstevel@tonic-gate return (SH_C0(eax) || CG(eax));
32460Sstevel@tonic-gate case 99:
32470Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
32480Sstevel@tonic-gate case 100:
32490Sstevel@tonic-gate return (B(eax) || SH_C0(eax));
32500Sstevel@tonic-gate case 101:
32510Sstevel@tonic-gate case 103:
32520Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
32530Sstevel@tonic-gate case 104:
32540Sstevel@tonic-gate return (SH_C0(eax) || CG(eax) || D0(eax));
32550Sstevel@tonic-gate case 105:
32560Sstevel@tonic-gate case 106:
32570Sstevel@tonic-gate case 107:
32580Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
32590Sstevel@tonic-gate case 108:
32600Sstevel@tonic-gate return (DH_CG(eax));
32610Sstevel@tonic-gate case 109:
32620Sstevel@tonic-gate return (SH_C0(eax) || CG(eax) || D0(eax));
32630Sstevel@tonic-gate case 110:
32640Sstevel@tonic-gate return (D0(eax) || EX(eax));
32650Sstevel@tonic-gate case 111:
32660Sstevel@tonic-gate return (CG(eax));
32670Sstevel@tonic-gate case 112:
32680Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
32690Sstevel@tonic-gate case 113:
32700Sstevel@tonic-gate return (eax == 0x20fc0);
32710Sstevel@tonic-gate case 114:
32720Sstevel@tonic-gate return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax));
32730Sstevel@tonic-gate case 115:
32740Sstevel@tonic-gate return (SH_E0(eax) || JH_E1(eax));
32750Sstevel@tonic-gate case 116:
32760Sstevel@tonic-gate return (SH_E0(eax) || JH_E1(eax) || DH_E3(eax));
32770Sstevel@tonic-gate case 117:
32780Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax));
32790Sstevel@tonic-gate case 118:
32800Sstevel@tonic-gate return (SH_E0(eax) || JH_E1(eax) || SH_E4(eax) || BH_E4(eax) ||
32810Sstevel@tonic-gate JH_E6(eax));
32820Sstevel@tonic-gate case 121:
32830Sstevel@tonic-gate return (B(eax) || SH_C0(eax) || CG(eax) || D0(eax) || EX(eax));
32840Sstevel@tonic-gate case 122:
32856691Skchow return (cpi->cpi_family < 0x10 || cpi->cpi_family == 0x11);
32860Sstevel@tonic-gate case 123:
32870Sstevel@tonic-gate return (JH_E1(eax) || BH_E4(eax) || JH_E6(eax));
3288359Skucharsk case 131:
32894265Skchow return (cpi->cpi_family < 0x10);
3290938Sesaxe case 6336786:
3291938Sesaxe /*
3292938Sesaxe * Test for AdvPowerMgmtInfo.TscPStateInvariant
32934265Skchow * if this is a K8 family or newer processor
3294938Sesaxe */
3295938Sesaxe if (CPI_FAMILY(cpi) == 0xf) {
32961228Sandrei struct cpuid_regs regs;
32971228Sandrei regs.cp_eax = 0x80000007;
32981228Sandrei (void) __cpuid_insn(®s);
32991228Sandrei return (!(regs.cp_edx & 0x100));
3300938Sesaxe }
3301938Sesaxe return (0);
33021582Skchow case 6323525:
33031582Skchow return (((((eax >> 12) & 0xff00) + (eax & 0xf00)) |
33041582Skchow (((eax >> 4) & 0xf) | ((eax >> 12) & 0xf0))) < 0xf40);
33051582Skchow
33066691Skchow case 6671130:
33076691Skchow /*
33086691Skchow * check for processors (pre-Shanghai) that do not provide
33096691Skchow * optimal management of 1gb ptes in its tlb.
33106691Skchow */
33116691Skchow return (cpi->cpi_family == 0x10 && cpi->cpi_model < 4);
33126691Skchow
33136691Skchow case 298:
33146691Skchow return (DR_AX(eax) || DR_B0(eax) || DR_B1(eax) || DR_BA(eax) ||
33156691Skchow DR_B2(eax) || RB_C0(eax));
33166691Skchow
33176691Skchow default:
33186691Skchow return (-1);
33196691Skchow
33206691Skchow }
33216691Skchow }
33226691Skchow
33236691Skchow /*
33246691Skchow * Determine if specified erratum is present via OSVW (OS Visible Workaround).
33256691Skchow * Return 1 if erratum is present, 0 if not present and -1 if indeterminate.
33266691Skchow */
33276691Skchow int
osvw_opteron_erratum(cpu_t * cpu,uint_t erratum)33286691Skchow osvw_opteron_erratum(cpu_t *cpu, uint_t erratum)
33296691Skchow {
33306691Skchow struct cpuid_info *cpi;
33316691Skchow uint_t osvwid;
33326691Skchow static int osvwfeature = -1;
33336691Skchow uint64_t osvwlength;
33346691Skchow
33356691Skchow
33366691Skchow cpi = cpu->cpu_m.mcpu_cpi;
33376691Skchow
33386691Skchow /* confirm OSVW supported */
33396691Skchow if (osvwfeature == -1) {
33406691Skchow osvwfeature = cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW;
33416691Skchow } else {
33426691Skchow /* assert that osvw feature setting is consistent on all cpus */
33436691Skchow ASSERT(osvwfeature ==
33446691Skchow (cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW));
33456691Skchow }
33466691Skchow if (!osvwfeature)
33476691Skchow return (-1);
33486691Skchow
33496691Skchow osvwlength = rdmsr(MSR_AMD_OSVW_ID_LEN) & OSVW_ID_LEN_MASK;
33506691Skchow
33516691Skchow switch (erratum) {
33526691Skchow case 298: /* osvwid is 0 */
33536691Skchow osvwid = 0;
33546691Skchow if (osvwlength <= (uint64_t)osvwid) {
33556691Skchow /* osvwid 0 is unknown */
33566691Skchow return (-1);
33576691Skchow }
33586691Skchow
33596691Skchow /*
33606691Skchow * Check the OSVW STATUS MSR to determine the state
33616691Skchow * of the erratum where:
33626691Skchow * 0 - fixed by HW
33636691Skchow * 1 - BIOS has applied the workaround when BIOS
33646691Skchow * workaround is available. (Or for other errata,
33656691Skchow * OS workaround is required.)
33666691Skchow * For a value of 1, caller will confirm that the
33676691Skchow * erratum 298 workaround has indeed been applied by BIOS.
33686691Skchow *
33696691Skchow * A 1 may be set in cpus that have a HW fix
33706691Skchow * in a mixed cpu system. Regarding erratum 298:
33716691Skchow * In a multiprocessor platform, the workaround above
33726691Skchow * should be applied to all processors regardless of
33736691Skchow * silicon revision when an affected processor is
33746691Skchow * present.
33756691Skchow */
33766691Skchow
33776691Skchow return (rdmsr(MSR_AMD_OSVW_STATUS +
33786691Skchow (osvwid / OSVW_ID_CNT_PER_MSR)) &
33796691Skchow (1ULL << (osvwid % OSVW_ID_CNT_PER_MSR)));
33806691Skchow
33810Sstevel@tonic-gate default:
33820Sstevel@tonic-gate return (-1);
33830Sstevel@tonic-gate }
33840Sstevel@tonic-gate }
33850Sstevel@tonic-gate
33860Sstevel@tonic-gate static const char assoc_str[] = "associativity";
33870Sstevel@tonic-gate static const char line_str[] = "line-size";
33880Sstevel@tonic-gate static const char size_str[] = "size";
33890Sstevel@tonic-gate
33900Sstevel@tonic-gate static void
add_cache_prop(dev_info_t * devi,const char * label,const char * type,uint32_t val)33910Sstevel@tonic-gate add_cache_prop(dev_info_t *devi, const char *label, const char *type,
33920Sstevel@tonic-gate uint32_t val)
33930Sstevel@tonic-gate {
33940Sstevel@tonic-gate char buf[128];
33950Sstevel@tonic-gate
33960Sstevel@tonic-gate /*
33970Sstevel@tonic-gate * ndi_prop_update_int() is used because it is desirable for
33980Sstevel@tonic-gate * DDI_PROP_HW_DEF and DDI_PROP_DONTSLEEP to be set.
33990Sstevel@tonic-gate */
34000Sstevel@tonic-gate if (snprintf(buf, sizeof (buf), "%s-%s", label, type) < sizeof (buf))
34010Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, devi, buf, val);
34020Sstevel@tonic-gate }
34030Sstevel@tonic-gate
34040Sstevel@tonic-gate /*
34050Sstevel@tonic-gate * Intel-style cache/tlb description
34060Sstevel@tonic-gate *
34070Sstevel@tonic-gate * Standard cpuid level 2 gives a randomly ordered
34080Sstevel@tonic-gate * selection of tags that index into a table that describes
34090Sstevel@tonic-gate * cache and tlb properties.
34100Sstevel@tonic-gate */
34110Sstevel@tonic-gate
34120Sstevel@tonic-gate static const char l1_icache_str[] = "l1-icache";
34130Sstevel@tonic-gate static const char l1_dcache_str[] = "l1-dcache";
34140Sstevel@tonic-gate static const char l2_cache_str[] = "l2-cache";
34153446Smrj static const char l3_cache_str[] = "l3-cache";
34160Sstevel@tonic-gate static const char itlb4k_str[] = "itlb-4K";
34170Sstevel@tonic-gate static const char dtlb4k_str[] = "dtlb-4K";
34186964Svd224797 static const char itlb2M_str[] = "itlb-2M";
34190Sstevel@tonic-gate static const char itlb4M_str[] = "itlb-4M";
34200Sstevel@tonic-gate static const char dtlb4M_str[] = "dtlb-4M";
34216334Sksadhukh static const char dtlb24_str[] = "dtlb0-2M-4M";
34220Sstevel@tonic-gate static const char itlb424_str[] = "itlb-4K-2M-4M";
34236334Sksadhukh static const char itlb24_str[] = "itlb-2M-4M";
34240Sstevel@tonic-gate static const char dtlb44_str[] = "dtlb-4K-4M";
34250Sstevel@tonic-gate static const char sl1_dcache_str[] = "sectored-l1-dcache";
34260Sstevel@tonic-gate static const char sl2_cache_str[] = "sectored-l2-cache";
34270Sstevel@tonic-gate static const char itrace_str[] = "itrace-cache";
34280Sstevel@tonic-gate static const char sl3_cache_str[] = "sectored-l3-cache";
34296334Sksadhukh static const char sh_l2_tlb4k_str[] = "shared-l2-tlb-4k";
34300Sstevel@tonic-gate
34310Sstevel@tonic-gate static const struct cachetab {
34320Sstevel@tonic-gate uint8_t ct_code;
34330Sstevel@tonic-gate uint8_t ct_assoc;
34340Sstevel@tonic-gate uint16_t ct_line_size;
34350Sstevel@tonic-gate size_t ct_size;
34360Sstevel@tonic-gate const char *ct_label;
34370Sstevel@tonic-gate } intel_ctab[] = {
34386964Svd224797 /*
34396964Svd224797 * maintain descending order!
34406964Svd224797 *
34416964Svd224797 * Codes ignored - Reason
34426964Svd224797 * ----------------------
34436964Svd224797 * 40H - intel_cpuid_4_cache_info() disambiguates l2/l3 cache
34446964Svd224797 * f0H/f1H - Currently we do not interpret prefetch size by design
34456964Svd224797 */
34466334Sksadhukh { 0xe4, 16, 64, 8*1024*1024, l3_cache_str},
34476334Sksadhukh { 0xe3, 16, 64, 4*1024*1024, l3_cache_str},
34486334Sksadhukh { 0xe2, 16, 64, 2*1024*1024, l3_cache_str},
34496334Sksadhukh { 0xde, 12, 64, 6*1024*1024, l3_cache_str},
34506334Sksadhukh { 0xdd, 12, 64, 3*1024*1024, l3_cache_str},
34516334Sksadhukh { 0xdc, 12, 64, ((1*1024*1024)+(512*1024)), l3_cache_str},
34526334Sksadhukh { 0xd8, 8, 64, 4*1024*1024, l3_cache_str},
34536334Sksadhukh { 0xd7, 8, 64, 2*1024*1024, l3_cache_str},
34546334Sksadhukh { 0xd6, 8, 64, 1*1024*1024, l3_cache_str},
34556334Sksadhukh { 0xd2, 4, 64, 2*1024*1024, l3_cache_str},
34566334Sksadhukh { 0xd1, 4, 64, 1*1024*1024, l3_cache_str},
34576334Sksadhukh { 0xd0, 4, 64, 512*1024, l3_cache_str},
34586334Sksadhukh { 0xca, 4, 0, 512, sh_l2_tlb4k_str},
34596964Svd224797 { 0xc0, 4, 0, 8, dtlb44_str },
34606964Svd224797 { 0xba, 4, 0, 64, dtlb4k_str },
34613446Smrj { 0xb4, 4, 0, 256, dtlb4k_str },
34620Sstevel@tonic-gate { 0xb3, 4, 0, 128, dtlb4k_str },
34636334Sksadhukh { 0xb2, 4, 0, 64, itlb4k_str },
34640Sstevel@tonic-gate { 0xb0, 4, 0, 128, itlb4k_str },
34650Sstevel@tonic-gate { 0x87, 8, 64, 1024*1024, l2_cache_str},
34660Sstevel@tonic-gate { 0x86, 4, 64, 512*1024, l2_cache_str},
34670Sstevel@tonic-gate { 0x85, 8, 32, 2*1024*1024, l2_cache_str},
34680Sstevel@tonic-gate { 0x84, 8, 32, 1024*1024, l2_cache_str},
34690Sstevel@tonic-gate { 0x83, 8, 32, 512*1024, l2_cache_str},
34700Sstevel@tonic-gate { 0x82, 8, 32, 256*1024, l2_cache_str},
34716964Svd224797 { 0x80, 8, 64, 512*1024, l2_cache_str},
34720Sstevel@tonic-gate { 0x7f, 2, 64, 512*1024, l2_cache_str},
34730Sstevel@tonic-gate { 0x7d, 8, 64, 2*1024*1024, sl2_cache_str},
34740Sstevel@tonic-gate { 0x7c, 8, 64, 1024*1024, sl2_cache_str},
34750Sstevel@tonic-gate { 0x7b, 8, 64, 512*1024, sl2_cache_str},
34760Sstevel@tonic-gate { 0x7a, 8, 64, 256*1024, sl2_cache_str},
34770Sstevel@tonic-gate { 0x79, 8, 64, 128*1024, sl2_cache_str},
34780Sstevel@tonic-gate { 0x78, 8, 64, 1024*1024, l2_cache_str},
34793446Smrj { 0x73, 8, 0, 64*1024, itrace_str},
34800Sstevel@tonic-gate { 0x72, 8, 0, 32*1024, itrace_str},
34810Sstevel@tonic-gate { 0x71, 8, 0, 16*1024, itrace_str},
34820Sstevel@tonic-gate { 0x70, 8, 0, 12*1024, itrace_str},
34830Sstevel@tonic-gate { 0x68, 4, 64, 32*1024, sl1_dcache_str},
34840Sstevel@tonic-gate { 0x67, 4, 64, 16*1024, sl1_dcache_str},
34850Sstevel@tonic-gate { 0x66, 4, 64, 8*1024, sl1_dcache_str},
34860Sstevel@tonic-gate { 0x60, 8, 64, 16*1024, sl1_dcache_str},
34870Sstevel@tonic-gate { 0x5d, 0, 0, 256, dtlb44_str},
34880Sstevel@tonic-gate { 0x5c, 0, 0, 128, dtlb44_str},
34890Sstevel@tonic-gate { 0x5b, 0, 0, 64, dtlb44_str},
34906334Sksadhukh { 0x5a, 4, 0, 32, dtlb24_str},
34916964Svd224797 { 0x59, 0, 0, 16, dtlb4k_str},
34926964Svd224797 { 0x57, 4, 0, 16, dtlb4k_str},
34936964Svd224797 { 0x56, 4, 0, 16, dtlb4M_str},
34946334Sksadhukh { 0x55, 0, 0, 7, itlb24_str},
34950Sstevel@tonic-gate { 0x52, 0, 0, 256, itlb424_str},
34960Sstevel@tonic-gate { 0x51, 0, 0, 128, itlb424_str},
34970Sstevel@tonic-gate { 0x50, 0, 0, 64, itlb424_str},
34986964Svd224797 { 0x4f, 0, 0, 32, itlb4k_str},
34996964Svd224797 { 0x4e, 24, 64, 6*1024*1024, l2_cache_str},
35003446Smrj { 0x4d, 16, 64, 16*1024*1024, l3_cache_str},
35013446Smrj { 0x4c, 12, 64, 12*1024*1024, l3_cache_str},
35023446Smrj { 0x4b, 16, 64, 8*1024*1024, l3_cache_str},
35033446Smrj { 0x4a, 12, 64, 6*1024*1024, l3_cache_str},
35043446Smrj { 0x49, 16, 64, 4*1024*1024, l3_cache_str},
35056964Svd224797 { 0x48, 12, 64, 3*1024*1024, l2_cache_str},
35063446Smrj { 0x47, 8, 64, 8*1024*1024, l3_cache_str},
35073446Smrj { 0x46, 4, 64, 4*1024*1024, l3_cache_str},
35080Sstevel@tonic-gate { 0x45, 4, 32, 2*1024*1024, l2_cache_str},
35090Sstevel@tonic-gate { 0x44, 4, 32, 1024*1024, l2_cache_str},
35100Sstevel@tonic-gate { 0x43, 4, 32, 512*1024, l2_cache_str},
35110Sstevel@tonic-gate { 0x42, 4, 32, 256*1024, l2_cache_str},
35120Sstevel@tonic-gate { 0x41, 4, 32, 128*1024, l2_cache_str},
35133446Smrj { 0x3e, 4, 64, 512*1024, sl2_cache_str},
35143446Smrj { 0x3d, 6, 64, 384*1024, sl2_cache_str},
35150Sstevel@tonic-gate { 0x3c, 4, 64, 256*1024, sl2_cache_str},
35160Sstevel@tonic-gate { 0x3b, 2, 64, 128*1024, sl2_cache_str},
35173446Smrj { 0x3a, 6, 64, 192*1024, sl2_cache_str},
35180Sstevel@tonic-gate { 0x39, 4, 64, 128*1024, sl2_cache_str},
35190Sstevel@tonic-gate { 0x30, 8, 64, 32*1024, l1_icache_str},
35200Sstevel@tonic-gate { 0x2c, 8, 64, 32*1024, l1_dcache_str},
35210Sstevel@tonic-gate { 0x29, 8, 64, 4096*1024, sl3_cache_str},
35220Sstevel@tonic-gate { 0x25, 8, 64, 2048*1024, sl3_cache_str},
35230Sstevel@tonic-gate { 0x23, 8, 64, 1024*1024, sl3_cache_str},
35240Sstevel@tonic-gate { 0x22, 4, 64, 512*1024, sl3_cache_str},
35256964Svd224797 { 0x0e, 6, 64, 24*1024, l1_dcache_str},
35266334Sksadhukh { 0x0d, 4, 32, 16*1024, l1_dcache_str},
35270Sstevel@tonic-gate { 0x0c, 4, 32, 16*1024, l1_dcache_str},
35283446Smrj { 0x0b, 4, 0, 4, itlb4M_str},
35290Sstevel@tonic-gate { 0x0a, 2, 32, 8*1024, l1_dcache_str},
35300Sstevel@tonic-gate { 0x08, 4, 32, 16*1024, l1_icache_str},
35310Sstevel@tonic-gate { 0x06, 4, 32, 8*1024, l1_icache_str},
35326964Svd224797 { 0x05, 4, 0, 32, dtlb4M_str},
35330Sstevel@tonic-gate { 0x04, 4, 0, 8, dtlb4M_str},
35340Sstevel@tonic-gate { 0x03, 4, 0, 64, dtlb4k_str},
35350Sstevel@tonic-gate { 0x02, 4, 0, 2, itlb4M_str},
35360Sstevel@tonic-gate { 0x01, 4, 0, 32, itlb4k_str},
35370Sstevel@tonic-gate { 0 }
35380Sstevel@tonic-gate };
35390Sstevel@tonic-gate
35400Sstevel@tonic-gate static const struct cachetab cyrix_ctab[] = {
35410Sstevel@tonic-gate { 0x70, 4, 0, 32, "tlb-4K" },
35420Sstevel@tonic-gate { 0x80, 4, 16, 16*1024, "l1-cache" },
35430Sstevel@tonic-gate { 0 }
35440Sstevel@tonic-gate };
35450Sstevel@tonic-gate
35460Sstevel@tonic-gate /*
35470Sstevel@tonic-gate * Search a cache table for a matching entry
35480Sstevel@tonic-gate */
35490Sstevel@tonic-gate static const struct cachetab *
find_cacheent(const struct cachetab * ct,uint_t code)35500Sstevel@tonic-gate find_cacheent(const struct cachetab *ct, uint_t code)
35510Sstevel@tonic-gate {
35520Sstevel@tonic-gate if (code != 0) {
35530Sstevel@tonic-gate for (; ct->ct_code != 0; ct++)
35540Sstevel@tonic-gate if (ct->ct_code <= code)
35550Sstevel@tonic-gate break;
35560Sstevel@tonic-gate if (ct->ct_code == code)
35570Sstevel@tonic-gate return (ct);
35580Sstevel@tonic-gate }
35590Sstevel@tonic-gate return (NULL);
35600Sstevel@tonic-gate }
35610Sstevel@tonic-gate
35620Sstevel@tonic-gate /*
35635438Sksadhukh * Populate cachetab entry with L2 or L3 cache-information using
35645438Sksadhukh * cpuid function 4. This function is called from intel_walk_cacheinfo()
35655438Sksadhukh * when descriptor 0x49 is encountered. It returns 0 if no such cache
35665438Sksadhukh * information is found.
35675438Sksadhukh */
35685438Sksadhukh static int
intel_cpuid_4_cache_info(struct cachetab * ct,struct cpuid_info * cpi)35695438Sksadhukh intel_cpuid_4_cache_info(struct cachetab *ct, struct cpuid_info *cpi)
35705438Sksadhukh {
35715438Sksadhukh uint32_t level, i;
35725438Sksadhukh int ret = 0;
35735438Sksadhukh
35745438Sksadhukh for (i = 0; i < cpi->cpi_std_4_size; i++) {
35755438Sksadhukh level = CPI_CACHE_LVL(cpi->cpi_std_4[i]);
35765438Sksadhukh
35775438Sksadhukh if (level == 2 || level == 3) {
35785438Sksadhukh ct->ct_assoc = CPI_CACHE_WAYS(cpi->cpi_std_4[i]) + 1;
35795438Sksadhukh ct->ct_line_size =
35805438Sksadhukh CPI_CACHE_COH_LN_SZ(cpi->cpi_std_4[i]) + 1;
35815438Sksadhukh ct->ct_size = ct->ct_assoc *
35825438Sksadhukh (CPI_CACHE_PARTS(cpi->cpi_std_4[i]) + 1) *
35835438Sksadhukh ct->ct_line_size *
35845438Sksadhukh (cpi->cpi_std_4[i]->cp_ecx + 1);
35855438Sksadhukh
35865438Sksadhukh if (level == 2) {
35875438Sksadhukh ct->ct_label = l2_cache_str;
35885438Sksadhukh } else if (level == 3) {
35895438Sksadhukh ct->ct_label = l3_cache_str;
35905438Sksadhukh }
35915438Sksadhukh ret = 1;
35925438Sksadhukh }
35935438Sksadhukh }
35945438Sksadhukh
35955438Sksadhukh return (ret);
35965438Sksadhukh }
35975438Sksadhukh
35985438Sksadhukh /*
35990Sstevel@tonic-gate * Walk the cacheinfo descriptor, applying 'func' to every valid element
36000Sstevel@tonic-gate * The walk is terminated if the walker returns non-zero.
36010Sstevel@tonic-gate */
36020Sstevel@tonic-gate static void
intel_walk_cacheinfo(struct cpuid_info * cpi,void * arg,int (* func)(void *,const struct cachetab *))36030Sstevel@tonic-gate intel_walk_cacheinfo(struct cpuid_info *cpi,
36040Sstevel@tonic-gate void *arg, int (*func)(void *, const struct cachetab *))
36050Sstevel@tonic-gate {
36060Sstevel@tonic-gate const struct cachetab *ct;
36076964Svd224797 struct cachetab des_49_ct, des_b1_ct;
36080Sstevel@tonic-gate uint8_t *dp;
36090Sstevel@tonic-gate int i;
36100Sstevel@tonic-gate
36110Sstevel@tonic-gate if ((dp = cpi->cpi_cacheinfo) == NULL)
36120Sstevel@tonic-gate return;
36134797Sksadhukh for (i = 0; i < cpi->cpi_ncache; i++, dp++) {
36144797Sksadhukh /*
36154797Sksadhukh * For overloaded descriptor 0x49 we use cpuid function 4
36165438Sksadhukh * if supported by the current processor, to create
36174797Sksadhukh * cache information.
36186964Svd224797 * For overloaded descriptor 0xb1 we use X86_PAE flag
36196964Svd224797 * to disambiguate the cache information.
36204797Sksadhukh */
36215438Sksadhukh if (*dp == 0x49 && cpi->cpi_maxeax >= 0x4 &&
36225438Sksadhukh intel_cpuid_4_cache_info(&des_49_ct, cpi) == 1) {
36235438Sksadhukh ct = &des_49_ct;
36246964Svd224797 } else if (*dp == 0xb1) {
36256964Svd224797 des_b1_ct.ct_code = 0xb1;
36266964Svd224797 des_b1_ct.ct_assoc = 4;
36276964Svd224797 des_b1_ct.ct_line_size = 0;
362812826Skuriakose.kuruvilla@oracle.com if (is_x86_feature(x86_featureset, X86FSET_PAE)) {
36296964Svd224797 des_b1_ct.ct_size = 8;
36306964Svd224797 des_b1_ct.ct_label = itlb2M_str;
36316964Svd224797 } else {
36326964Svd224797 des_b1_ct.ct_size = 4;
36336964Svd224797 des_b1_ct.ct_label = itlb4M_str;
36346964Svd224797 }
36356964Svd224797 ct = &des_b1_ct;
36365438Sksadhukh } else {
36375438Sksadhukh if ((ct = find_cacheent(intel_ctab, *dp)) == NULL) {
36385438Sksadhukh continue;
36395438Sksadhukh }
36404797Sksadhukh }
36414797Sksadhukh
36425438Sksadhukh if (func(arg, ct) != 0) {
36435438Sksadhukh break;
36440Sstevel@tonic-gate }
36454797Sksadhukh }
36460Sstevel@tonic-gate }
36470Sstevel@tonic-gate
36480Sstevel@tonic-gate /*
36490Sstevel@tonic-gate * (Like the Intel one, except for Cyrix CPUs)
36500Sstevel@tonic-gate */
36510Sstevel@tonic-gate static void
cyrix_walk_cacheinfo(struct cpuid_info * cpi,void * arg,int (* func)(void *,const struct cachetab *))36520Sstevel@tonic-gate cyrix_walk_cacheinfo(struct cpuid_info *cpi,
36530Sstevel@tonic-gate void *arg, int (*func)(void *, const struct cachetab *))
36540Sstevel@tonic-gate {
36550Sstevel@tonic-gate const struct cachetab *ct;
36560Sstevel@tonic-gate uint8_t *dp;
36570Sstevel@tonic-gate int i;
36580Sstevel@tonic-gate
36590Sstevel@tonic-gate if ((dp = cpi->cpi_cacheinfo) == NULL)
36600Sstevel@tonic-gate return;
36610Sstevel@tonic-gate for (i = 0; i < cpi->cpi_ncache; i++, dp++) {
36620Sstevel@tonic-gate /*
36630Sstevel@tonic-gate * Search Cyrix-specific descriptor table first ..
36640Sstevel@tonic-gate */
36650Sstevel@tonic-gate if ((ct = find_cacheent(cyrix_ctab, *dp)) != NULL) {
36660Sstevel@tonic-gate if (func(arg, ct) != 0)
36670Sstevel@tonic-gate break;
36680Sstevel@tonic-gate continue;
36690Sstevel@tonic-gate }
36700Sstevel@tonic-gate /*
36710Sstevel@tonic-gate * .. else fall back to the Intel one
36720Sstevel@tonic-gate */
36730Sstevel@tonic-gate if ((ct = find_cacheent(intel_ctab, *dp)) != NULL) {
36740Sstevel@tonic-gate if (func(arg, ct) != 0)
36750Sstevel@tonic-gate break;
36760Sstevel@tonic-gate continue;
36770Sstevel@tonic-gate }
36780Sstevel@tonic-gate }
36790Sstevel@tonic-gate }
36800Sstevel@tonic-gate
36810Sstevel@tonic-gate /*
36820Sstevel@tonic-gate * A cacheinfo walker that adds associativity, line-size, and size properties
36830Sstevel@tonic-gate * to the devinfo node it is passed as an argument.
36840Sstevel@tonic-gate */
36850Sstevel@tonic-gate static int
add_cacheent_props(void * arg,const struct cachetab * ct)36860Sstevel@tonic-gate add_cacheent_props(void *arg, const struct cachetab *ct)
36870Sstevel@tonic-gate {
36880Sstevel@tonic-gate dev_info_t *devi = arg;
36890Sstevel@tonic-gate
36900Sstevel@tonic-gate add_cache_prop(devi, ct->ct_label, assoc_str, ct->ct_assoc);
36910Sstevel@tonic-gate if (ct->ct_line_size != 0)
36920Sstevel@tonic-gate add_cache_prop(devi, ct->ct_label, line_str,
36930Sstevel@tonic-gate ct->ct_line_size);
36940Sstevel@tonic-gate add_cache_prop(devi, ct->ct_label, size_str, ct->ct_size);
36950Sstevel@tonic-gate return (0);
36960Sstevel@tonic-gate }
36970Sstevel@tonic-gate
36984797Sksadhukh
36990Sstevel@tonic-gate static const char fully_assoc[] = "fully-associative?";
37000Sstevel@tonic-gate
37010Sstevel@tonic-gate /*
37020Sstevel@tonic-gate * AMD style cache/tlb description
37030Sstevel@tonic-gate *
37040Sstevel@tonic-gate * Extended functions 5 and 6 directly describe properties of
37050Sstevel@tonic-gate * tlbs and various cache levels.
37060Sstevel@tonic-gate */
37070Sstevel@tonic-gate static void
add_amd_assoc(dev_info_t * devi,const char * label,uint_t assoc)37080Sstevel@tonic-gate add_amd_assoc(dev_info_t *devi, const char *label, uint_t assoc)
37090Sstevel@tonic-gate {
37100Sstevel@tonic-gate switch (assoc) {
37110Sstevel@tonic-gate case 0: /* reserved; ignore */
37120Sstevel@tonic-gate break;
37130Sstevel@tonic-gate default:
37140Sstevel@tonic-gate add_cache_prop(devi, label, assoc_str, assoc);
37150Sstevel@tonic-gate break;
37160Sstevel@tonic-gate case 0xff:
37170Sstevel@tonic-gate add_cache_prop(devi, label, fully_assoc, 1);
37180Sstevel@tonic-gate break;
37190Sstevel@tonic-gate }
37200Sstevel@tonic-gate }
37210Sstevel@tonic-gate
37220Sstevel@tonic-gate static void
add_amd_tlb(dev_info_t * devi,const char * label,uint_t assoc,uint_t size)37230Sstevel@tonic-gate add_amd_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size)
37240Sstevel@tonic-gate {
37250Sstevel@tonic-gate if (size == 0)
37260Sstevel@tonic-gate return;
37270Sstevel@tonic-gate add_cache_prop(devi, label, size_str, size);
37280Sstevel@tonic-gate add_amd_assoc(devi, label, assoc);
37290Sstevel@tonic-gate }
37300Sstevel@tonic-gate
37310Sstevel@tonic-gate static void
add_amd_cache(dev_info_t * devi,const char * label,uint_t size,uint_t assoc,uint_t lines_per_tag,uint_t line_size)37320Sstevel@tonic-gate add_amd_cache(dev_info_t *devi, const char *label,
37330Sstevel@tonic-gate uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size)
37340Sstevel@tonic-gate {
37350Sstevel@tonic-gate if (size == 0 || line_size == 0)
37360Sstevel@tonic-gate return;
37370Sstevel@tonic-gate add_amd_assoc(devi, label, assoc);
37380Sstevel@tonic-gate /*
37390Sstevel@tonic-gate * Most AMD parts have a sectored cache. Multiple cache lines are
37400Sstevel@tonic-gate * associated with each tag. A sector consists of all cache lines
37410Sstevel@tonic-gate * associated with a tag. For example, the AMD K6-III has a sector
37420Sstevel@tonic-gate * size of 2 cache lines per tag.
37430Sstevel@tonic-gate */
37440Sstevel@tonic-gate if (lines_per_tag != 0)
37450Sstevel@tonic-gate add_cache_prop(devi, label, "lines-per-tag", lines_per_tag);
37460Sstevel@tonic-gate add_cache_prop(devi, label, line_str, line_size);
37470Sstevel@tonic-gate add_cache_prop(devi, label, size_str, size * 1024);
37480Sstevel@tonic-gate }
37490Sstevel@tonic-gate
37500Sstevel@tonic-gate static void
add_amd_l2_assoc(dev_info_t * devi,const char * label,uint_t assoc)37510Sstevel@tonic-gate add_amd_l2_assoc(dev_info_t *devi, const char *label, uint_t assoc)
37520Sstevel@tonic-gate {
37530Sstevel@tonic-gate switch (assoc) {
37540Sstevel@tonic-gate case 0: /* off */
37550Sstevel@tonic-gate break;
37560Sstevel@tonic-gate case 1:
37570Sstevel@tonic-gate case 2:
37580Sstevel@tonic-gate case 4:
37590Sstevel@tonic-gate add_cache_prop(devi, label, assoc_str, assoc);
37600Sstevel@tonic-gate break;
37610Sstevel@tonic-gate case 6:
37620Sstevel@tonic-gate add_cache_prop(devi, label, assoc_str, 8);
37630Sstevel@tonic-gate break;
37640Sstevel@tonic-gate case 8:
37650Sstevel@tonic-gate add_cache_prop(devi, label, assoc_str, 16);
37660Sstevel@tonic-gate break;
37670Sstevel@tonic-gate case 0xf:
37680Sstevel@tonic-gate add_cache_prop(devi, label, fully_assoc, 1);
37690Sstevel@tonic-gate break;
37700Sstevel@tonic-gate default: /* reserved; ignore */
37710Sstevel@tonic-gate break;
37720Sstevel@tonic-gate }
37730Sstevel@tonic-gate }
37740Sstevel@tonic-gate
37750Sstevel@tonic-gate static void
add_amd_l2_tlb(dev_info_t * devi,const char * label,uint_t assoc,uint_t size)37760Sstevel@tonic-gate add_amd_l2_tlb(dev_info_t *devi, const char *label, uint_t assoc, uint_t size)
37770Sstevel@tonic-gate {
37780Sstevel@tonic-gate if (size == 0 || assoc == 0)
37790Sstevel@tonic-gate return;
37800Sstevel@tonic-gate add_amd_l2_assoc(devi, label, assoc);
37810Sstevel@tonic-gate add_cache_prop(devi, label, size_str, size);
37820Sstevel@tonic-gate }
37830Sstevel@tonic-gate
37840Sstevel@tonic-gate static void
add_amd_l2_cache(dev_info_t * devi,const char * label,uint_t size,uint_t assoc,uint_t lines_per_tag,uint_t line_size)37850Sstevel@tonic-gate add_amd_l2_cache(dev_info_t *devi, const char *label,
37860Sstevel@tonic-gate uint_t size, uint_t assoc, uint_t lines_per_tag, uint_t line_size)
37870Sstevel@tonic-gate {
37880Sstevel@tonic-gate if (size == 0 || assoc == 0 || line_size == 0)
37890Sstevel@tonic-gate return;
37900Sstevel@tonic-gate add_amd_l2_assoc(devi, label, assoc);
37910Sstevel@tonic-gate if (lines_per_tag != 0)
37920Sstevel@tonic-gate add_cache_prop(devi, label, "lines-per-tag", lines_per_tag);
37930Sstevel@tonic-gate add_cache_prop(devi, label, line_str, line_size);
37940Sstevel@tonic-gate add_cache_prop(devi, label, size_str, size * 1024);
37950Sstevel@tonic-gate }
37960Sstevel@tonic-gate
37970Sstevel@tonic-gate static void
amd_cache_info(struct cpuid_info * cpi,dev_info_t * devi)37980Sstevel@tonic-gate amd_cache_info(struct cpuid_info *cpi, dev_info_t *devi)
37990Sstevel@tonic-gate {
38001228Sandrei struct cpuid_regs *cp;
38010Sstevel@tonic-gate
38020Sstevel@tonic-gate if (cpi->cpi_xmaxeax < 0x80000005)
38030Sstevel@tonic-gate return;
38040Sstevel@tonic-gate cp = &cpi->cpi_extd[5];
38050Sstevel@tonic-gate
38060Sstevel@tonic-gate /*
38070Sstevel@tonic-gate * 4M/2M L1 TLB configuration
38080Sstevel@tonic-gate *
38090Sstevel@tonic-gate * We report the size for 2M pages because AMD uses two
38100Sstevel@tonic-gate * TLB entries for one 4M page.
38110Sstevel@tonic-gate */
38120Sstevel@tonic-gate add_amd_tlb(devi, "dtlb-2M",
38130Sstevel@tonic-gate BITX(cp->cp_eax, 31, 24), BITX(cp->cp_eax, 23, 16));
38140Sstevel@tonic-gate add_amd_tlb(devi, "itlb-2M",
38150Sstevel@tonic-gate BITX(cp->cp_eax, 15, 8), BITX(cp->cp_eax, 7, 0));
38160Sstevel@tonic-gate
38170Sstevel@tonic-gate /*
38180Sstevel@tonic-gate * 4K L1 TLB configuration
38190Sstevel@tonic-gate */
38200Sstevel@tonic-gate
38210Sstevel@tonic-gate switch (cpi->cpi_vendor) {
38220Sstevel@tonic-gate uint_t nentries;
38230Sstevel@tonic-gate case X86_VENDOR_TM:
38240Sstevel@tonic-gate if (cpi->cpi_family >= 5) {
38250Sstevel@tonic-gate /*
38260Sstevel@tonic-gate * Crusoe processors have 256 TLB entries, but
38270Sstevel@tonic-gate * cpuid data format constrains them to only
38280Sstevel@tonic-gate * reporting 255 of them.
38290Sstevel@tonic-gate */
38300Sstevel@tonic-gate if ((nentries = BITX(cp->cp_ebx, 23, 16)) == 255)
38310Sstevel@tonic-gate nentries = 256;
38320Sstevel@tonic-gate /*
38330Sstevel@tonic-gate * Crusoe processors also have a unified TLB
38340Sstevel@tonic-gate */
38350Sstevel@tonic-gate add_amd_tlb(devi, "tlb-4K", BITX(cp->cp_ebx, 31, 24),
38360Sstevel@tonic-gate nentries);
38370Sstevel@tonic-gate break;
38380Sstevel@tonic-gate }
38390Sstevel@tonic-gate /*FALLTHROUGH*/
38400Sstevel@tonic-gate default:
38410Sstevel@tonic-gate add_amd_tlb(devi, itlb4k_str,
38420Sstevel@tonic-gate BITX(cp->cp_ebx, 31, 24), BITX(cp->cp_ebx, 23, 16));
38430Sstevel@tonic-gate add_amd_tlb(devi, dtlb4k_str,
38440Sstevel@tonic-gate BITX(cp->cp_ebx, 15, 8), BITX(cp->cp_ebx, 7, 0));
38450Sstevel@tonic-gate break;
38460Sstevel@tonic-gate }
38470Sstevel@tonic-gate
38480Sstevel@tonic-gate /*
38490Sstevel@tonic-gate * data L1 cache configuration
38500Sstevel@tonic-gate */
38510Sstevel@tonic-gate
38520Sstevel@tonic-gate add_amd_cache(devi, l1_dcache_str,
38530Sstevel@tonic-gate BITX(cp->cp_ecx, 31, 24), BITX(cp->cp_ecx, 23, 16),
38540Sstevel@tonic-gate BITX(cp->cp_ecx, 15, 8), BITX(cp->cp_ecx, 7, 0));
38550Sstevel@tonic-gate
38560Sstevel@tonic-gate /*
38570Sstevel@tonic-gate * code L1 cache configuration
38580Sstevel@tonic-gate */
38590Sstevel@tonic-gate
38600Sstevel@tonic-gate add_amd_cache(devi, l1_icache_str,
38610Sstevel@tonic-gate BITX(cp->cp_edx, 31, 24), BITX(cp->cp_edx, 23, 16),
38620Sstevel@tonic-gate BITX(cp->cp_edx, 15, 8), BITX(cp->cp_edx, 7, 0));
38630Sstevel@tonic-gate
38640Sstevel@tonic-gate if (cpi->cpi_xmaxeax < 0x80000006)
38650Sstevel@tonic-gate return;
38660Sstevel@tonic-gate cp = &cpi->cpi_extd[6];
38670Sstevel@tonic-gate
38680Sstevel@tonic-gate /* Check for a unified L2 TLB for large pages */
38690Sstevel@tonic-gate
38700Sstevel@tonic-gate if (BITX(cp->cp_eax, 31, 16) == 0)
38710Sstevel@tonic-gate add_amd_l2_tlb(devi, "l2-tlb-2M",
38720Sstevel@tonic-gate BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
38730Sstevel@tonic-gate else {
38740Sstevel@tonic-gate add_amd_l2_tlb(devi, "l2-dtlb-2M",
38750Sstevel@tonic-gate BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16));
38760Sstevel@tonic-gate add_amd_l2_tlb(devi, "l2-itlb-2M",
38770Sstevel@tonic-gate BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
38780Sstevel@tonic-gate }
38790Sstevel@tonic-gate
38800Sstevel@tonic-gate /* Check for a unified L2 TLB for 4K pages */
38810Sstevel@tonic-gate
38820Sstevel@tonic-gate if (BITX(cp->cp_ebx, 31, 16) == 0) {
38830Sstevel@tonic-gate add_amd_l2_tlb(devi, "l2-tlb-4K",
38840Sstevel@tonic-gate BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
38850Sstevel@tonic-gate } else {
38860Sstevel@tonic-gate add_amd_l2_tlb(devi, "l2-dtlb-4K",
38870Sstevel@tonic-gate BITX(cp->cp_eax, 31, 28), BITX(cp->cp_eax, 27, 16));
38880Sstevel@tonic-gate add_amd_l2_tlb(devi, "l2-itlb-4K",
38890Sstevel@tonic-gate BITX(cp->cp_eax, 15, 12), BITX(cp->cp_eax, 11, 0));
38900Sstevel@tonic-gate }
38910Sstevel@tonic-gate
38920Sstevel@tonic-gate add_amd_l2_cache(devi, l2_cache_str,
38930Sstevel@tonic-gate BITX(cp->cp_ecx, 31, 16), BITX(cp->cp_ecx, 15, 12),
38940Sstevel@tonic-gate BITX(cp->cp_ecx, 11, 8), BITX(cp->cp_ecx, 7, 0));
38950Sstevel@tonic-gate }
38960Sstevel@tonic-gate
38970Sstevel@tonic-gate /*
38980Sstevel@tonic-gate * There are two basic ways that the x86 world describes it cache
38990Sstevel@tonic-gate * and tlb architecture - Intel's way and AMD's way.
39000Sstevel@tonic-gate *
39010Sstevel@tonic-gate * Return which flavor of cache architecture we should use
39020Sstevel@tonic-gate */
39030Sstevel@tonic-gate static int
x86_which_cacheinfo(struct cpuid_info * cpi)39040Sstevel@tonic-gate x86_which_cacheinfo(struct cpuid_info *cpi)
39050Sstevel@tonic-gate {
39060Sstevel@tonic-gate switch (cpi->cpi_vendor) {
39070Sstevel@tonic-gate case X86_VENDOR_Intel:
39080Sstevel@tonic-gate if (cpi->cpi_maxeax >= 2)
39090Sstevel@tonic-gate return (X86_VENDOR_Intel);
39100Sstevel@tonic-gate break;
39110Sstevel@tonic-gate case X86_VENDOR_AMD:
39120Sstevel@tonic-gate /*
39130Sstevel@tonic-gate * The K5 model 1 was the first part from AMD that reported
39140Sstevel@tonic-gate * cache sizes via extended cpuid functions.
39150Sstevel@tonic-gate */
39160Sstevel@tonic-gate if (cpi->cpi_family > 5 ||
39170Sstevel@tonic-gate (cpi->cpi_family == 5 && cpi->cpi_model >= 1))
39180Sstevel@tonic-gate return (X86_VENDOR_AMD);
39190Sstevel@tonic-gate break;
39200Sstevel@tonic-gate case X86_VENDOR_TM:
39210Sstevel@tonic-gate if (cpi->cpi_family >= 5)
39220Sstevel@tonic-gate return (X86_VENDOR_AMD);
39230Sstevel@tonic-gate /*FALLTHROUGH*/
39240Sstevel@tonic-gate default:
39250Sstevel@tonic-gate /*
39260Sstevel@tonic-gate * If they have extended CPU data for 0x80000005
39270Sstevel@tonic-gate * then we assume they have AMD-format cache
39280Sstevel@tonic-gate * information.
39290Sstevel@tonic-gate *
39300Sstevel@tonic-gate * If not, and the vendor happens to be Cyrix,
39310Sstevel@tonic-gate * then try our-Cyrix specific handler.
39320Sstevel@tonic-gate *
39330Sstevel@tonic-gate * If we're not Cyrix, then assume we're using Intel's
39340Sstevel@tonic-gate * table-driven format instead.
39350Sstevel@tonic-gate */
39360Sstevel@tonic-gate if (cpi->cpi_xmaxeax >= 0x80000005)
39370Sstevel@tonic-gate return (X86_VENDOR_AMD);
39380Sstevel@tonic-gate else if (cpi->cpi_vendor == X86_VENDOR_Cyrix)
39390Sstevel@tonic-gate return (X86_VENDOR_Cyrix);
39400Sstevel@tonic-gate else if (cpi->cpi_maxeax >= 2)
39410Sstevel@tonic-gate return (X86_VENDOR_Intel);
39420Sstevel@tonic-gate break;
39430Sstevel@tonic-gate }
39440Sstevel@tonic-gate return (-1);
39450Sstevel@tonic-gate }
39460Sstevel@tonic-gate
39470Sstevel@tonic-gate void
cpuid_set_cpu_properties(void * dip,processorid_t cpu_id,struct cpuid_info * cpi)39489652SMichael.Corcoran@Sun.COM cpuid_set_cpu_properties(void *dip, processorid_t cpu_id,
39499652SMichael.Corcoran@Sun.COM struct cpuid_info *cpi)
39500Sstevel@tonic-gate {
39510Sstevel@tonic-gate dev_info_t *cpu_devi;
39520Sstevel@tonic-gate int create;
39530Sstevel@tonic-gate
39549652SMichael.Corcoran@Sun.COM cpu_devi = (dev_info_t *)dip;
39550Sstevel@tonic-gate
39560Sstevel@tonic-gate /* device_type */
39570Sstevel@tonic-gate (void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
39580Sstevel@tonic-gate "device_type", "cpu");
39590Sstevel@tonic-gate
39600Sstevel@tonic-gate /* reg */
39610Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
39620Sstevel@tonic-gate "reg", cpu_id);
39630Sstevel@tonic-gate
39640Sstevel@tonic-gate /* cpu-mhz, and clock-frequency */
39650Sstevel@tonic-gate if (cpu_freq > 0) {
39660Sstevel@tonic-gate long long mul;
39670Sstevel@tonic-gate
39680Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
39690Sstevel@tonic-gate "cpu-mhz", cpu_freq);
39700Sstevel@tonic-gate if ((mul = cpu_freq * 1000000LL) <= INT_MAX)
39710Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
39720Sstevel@tonic-gate "clock-frequency", (int)mul);
39730Sstevel@tonic-gate }
39740Sstevel@tonic-gate
397512826Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_CPUID)) {
39760Sstevel@tonic-gate return;
39770Sstevel@tonic-gate }
39780Sstevel@tonic-gate
39790Sstevel@tonic-gate /* vendor-id */
39800Sstevel@tonic-gate (void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
39814481Sbholler "vendor-id", cpi->cpi_vendorstr);
39820Sstevel@tonic-gate
39830Sstevel@tonic-gate if (cpi->cpi_maxeax == 0) {
39840Sstevel@tonic-gate return;
39850Sstevel@tonic-gate }
39860Sstevel@tonic-gate
39870Sstevel@tonic-gate /*
39880Sstevel@tonic-gate * family, model, and step
39890Sstevel@tonic-gate */
39900Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
39914481Sbholler "family", CPI_FAMILY(cpi));
39920Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
39934481Sbholler "cpu-model", CPI_MODEL(cpi));
39940Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
39954481Sbholler "stepping-id", CPI_STEP(cpi));
39960Sstevel@tonic-gate
39970Sstevel@tonic-gate /* type */
39980Sstevel@tonic-gate switch (cpi->cpi_vendor) {
39990Sstevel@tonic-gate case X86_VENDOR_Intel:
40000Sstevel@tonic-gate create = 1;
40010Sstevel@tonic-gate break;
40020Sstevel@tonic-gate default:
40030Sstevel@tonic-gate create = 0;
40040Sstevel@tonic-gate break;
40050Sstevel@tonic-gate }
40060Sstevel@tonic-gate if (create)
40070Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
40084481Sbholler "type", CPI_TYPE(cpi));
40090Sstevel@tonic-gate
40100Sstevel@tonic-gate /* ext-family */
40110Sstevel@tonic-gate switch (cpi->cpi_vendor) {
40120Sstevel@tonic-gate case X86_VENDOR_Intel:
40130Sstevel@tonic-gate case X86_VENDOR_AMD:
40140Sstevel@tonic-gate create = cpi->cpi_family >= 0xf;
40150Sstevel@tonic-gate break;
40160Sstevel@tonic-gate default:
40170Sstevel@tonic-gate create = 0;
40180Sstevel@tonic-gate break;
40190Sstevel@tonic-gate }
40200Sstevel@tonic-gate if (create)
40210Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
40220Sstevel@tonic-gate "ext-family", CPI_FAMILY_XTD(cpi));
40230Sstevel@tonic-gate
40240Sstevel@tonic-gate /* ext-model */
40250Sstevel@tonic-gate switch (cpi->cpi_vendor) {
40260Sstevel@tonic-gate case X86_VENDOR_Intel:
40276317Skk208521 create = IS_EXTENDED_MODEL_INTEL(cpi);
40282001Sdmick break;
40290Sstevel@tonic-gate case X86_VENDOR_AMD:
40301582Skchow create = CPI_FAMILY(cpi) == 0xf;
40310Sstevel@tonic-gate break;
40320Sstevel@tonic-gate default:
40330Sstevel@tonic-gate create = 0;
40340Sstevel@tonic-gate break;
40350Sstevel@tonic-gate }
40360Sstevel@tonic-gate if (create)
40370Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
40384481Sbholler "ext-model", CPI_MODEL_XTD(cpi));
40390Sstevel@tonic-gate
40400Sstevel@tonic-gate /* generation */
40410Sstevel@tonic-gate switch (cpi->cpi_vendor) {
40420Sstevel@tonic-gate case X86_VENDOR_AMD:
40430Sstevel@tonic-gate /*
40440Sstevel@tonic-gate * AMD K5 model 1 was the first part to support this
40450Sstevel@tonic-gate */
40460Sstevel@tonic-gate create = cpi->cpi_xmaxeax >= 0x80000001;
40470Sstevel@tonic-gate break;
40480Sstevel@tonic-gate default:
40490Sstevel@tonic-gate create = 0;
40500Sstevel@tonic-gate break;
40510Sstevel@tonic-gate }
40520Sstevel@tonic-gate if (create)
40530Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
40540Sstevel@tonic-gate "generation", BITX((cpi)->cpi_extd[1].cp_eax, 11, 8));
40550Sstevel@tonic-gate
40560Sstevel@tonic-gate /* brand-id */
40570Sstevel@tonic-gate switch (cpi->cpi_vendor) {
40580Sstevel@tonic-gate case X86_VENDOR_Intel:
40590Sstevel@tonic-gate /*
40600Sstevel@tonic-gate * brand id first appeared on Pentium III Xeon model 8,
40610Sstevel@tonic-gate * and Celeron model 8 processors and Opteron
40620Sstevel@tonic-gate */
40630Sstevel@tonic-gate create = cpi->cpi_family > 6 ||
40640Sstevel@tonic-gate (cpi->cpi_family == 6 && cpi->cpi_model >= 8);
40650Sstevel@tonic-gate break;
40660Sstevel@tonic-gate case X86_VENDOR_AMD:
40670Sstevel@tonic-gate create = cpi->cpi_family >= 0xf;
40680Sstevel@tonic-gate break;
40690Sstevel@tonic-gate default:
40700Sstevel@tonic-gate create = 0;
40710Sstevel@tonic-gate break;
40720Sstevel@tonic-gate }
40730Sstevel@tonic-gate if (create && cpi->cpi_brandid != 0) {
40740Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
40750Sstevel@tonic-gate "brand-id", cpi->cpi_brandid);
40760Sstevel@tonic-gate }
40770Sstevel@tonic-gate
40780Sstevel@tonic-gate /* chunks, and apic-id */
40790Sstevel@tonic-gate switch (cpi->cpi_vendor) {
40800Sstevel@tonic-gate /*
40810Sstevel@tonic-gate * first available on Pentium IV and Opteron (K8)
40820Sstevel@tonic-gate */
40831975Sdmick case X86_VENDOR_Intel:
40841975Sdmick create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf;
40851975Sdmick break;
40861975Sdmick case X86_VENDOR_AMD:
40870Sstevel@tonic-gate create = cpi->cpi_family >= 0xf;
40880Sstevel@tonic-gate break;
40890Sstevel@tonic-gate default:
40900Sstevel@tonic-gate create = 0;
40910Sstevel@tonic-gate break;
40920Sstevel@tonic-gate }
40930Sstevel@tonic-gate if (create) {
40940Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
40954481Sbholler "chunks", CPI_CHUNKS(cpi));
40960Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
40977282Smishra "apic-id", cpi->cpi_apicid);
40981414Scindi if (cpi->cpi_chipid >= 0) {
40990Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
41000Sstevel@tonic-gate "chip#", cpi->cpi_chipid);
41011414Scindi (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
41021414Scindi "clog#", cpi->cpi_clogid);
41031414Scindi }
41040Sstevel@tonic-gate }
41050Sstevel@tonic-gate
41060Sstevel@tonic-gate /* cpuid-features */
41070Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
41080Sstevel@tonic-gate "cpuid-features", CPI_FEATURES_EDX(cpi));
41090Sstevel@tonic-gate
41100Sstevel@tonic-gate
41110Sstevel@tonic-gate /* cpuid-features-ecx */
41120Sstevel@tonic-gate switch (cpi->cpi_vendor) {
41130Sstevel@tonic-gate case X86_VENDOR_Intel:
41141975Sdmick create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf;
41150Sstevel@tonic-gate break;
41160Sstevel@tonic-gate default:
41170Sstevel@tonic-gate create = 0;
41180Sstevel@tonic-gate break;
41190Sstevel@tonic-gate }
41200Sstevel@tonic-gate if (create)
41210Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
41220Sstevel@tonic-gate "cpuid-features-ecx", CPI_FEATURES_ECX(cpi));
41230Sstevel@tonic-gate
41240Sstevel@tonic-gate /* ext-cpuid-features */
41250Sstevel@tonic-gate switch (cpi->cpi_vendor) {
41261975Sdmick case X86_VENDOR_Intel:
41270Sstevel@tonic-gate case X86_VENDOR_AMD:
41280Sstevel@tonic-gate case X86_VENDOR_Cyrix:
41290Sstevel@tonic-gate case X86_VENDOR_TM:
41300Sstevel@tonic-gate case X86_VENDOR_Centaur:
41310Sstevel@tonic-gate create = cpi->cpi_xmaxeax >= 0x80000001;
41320Sstevel@tonic-gate break;
41330Sstevel@tonic-gate default:
41340Sstevel@tonic-gate create = 0;
41350Sstevel@tonic-gate break;
41360Sstevel@tonic-gate }
41371975Sdmick if (create) {
41380Sstevel@tonic-gate (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
41394481Sbholler "ext-cpuid-features", CPI_FEATURES_XTD_EDX(cpi));
41401975Sdmick (void) ndi_prop_update_int(DDI_DEV_T_NONE, cpu_devi,
41414481Sbholler "ext-cpuid-features-ecx", CPI_FEATURES_XTD_ECX(cpi));
41421975Sdmick }
41430Sstevel@tonic-gate
41440Sstevel@tonic-gate /*
41450Sstevel@tonic-gate * Brand String first appeared in Intel Pentium IV, AMD K5
41460Sstevel@tonic-gate * model 1, and Cyrix GXm. On earlier models we try and
41470Sstevel@tonic-gate * simulate something similar .. so this string should always
41480Sstevel@tonic-gate * same -something- about the processor, however lame.
41490Sstevel@tonic-gate */
41500Sstevel@tonic-gate (void) ndi_prop_update_string(DDI_DEV_T_NONE, cpu_devi,
41510Sstevel@tonic-gate "brand-string", cpi->cpi_brandstr);
41520Sstevel@tonic-gate
41530Sstevel@tonic-gate /*
41540Sstevel@tonic-gate * Finally, cache and tlb information
41550Sstevel@tonic-gate */
41560Sstevel@tonic-gate switch (x86_which_cacheinfo(cpi)) {
41570Sstevel@tonic-gate case X86_VENDOR_Intel:
41580Sstevel@tonic-gate intel_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props);
41590Sstevel@tonic-gate break;
41600Sstevel@tonic-gate case X86_VENDOR_Cyrix:
41610Sstevel@tonic-gate cyrix_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props);
41620Sstevel@tonic-gate break;
41630Sstevel@tonic-gate case X86_VENDOR_AMD:
41640Sstevel@tonic-gate amd_cache_info(cpi, cpu_devi);
41650Sstevel@tonic-gate break;
41660Sstevel@tonic-gate default:
41670Sstevel@tonic-gate break;
41680Sstevel@tonic-gate }
41690Sstevel@tonic-gate }
41700Sstevel@tonic-gate
41710Sstevel@tonic-gate struct l2info {
41720Sstevel@tonic-gate int *l2i_csz;
41730Sstevel@tonic-gate int *l2i_lsz;
41740Sstevel@tonic-gate int *l2i_assoc;
41750Sstevel@tonic-gate int l2i_ret;
41760Sstevel@tonic-gate };
41770Sstevel@tonic-gate
41780Sstevel@tonic-gate /*
41790Sstevel@tonic-gate * A cacheinfo walker that fetches the size, line-size and associativity
41800Sstevel@tonic-gate * of the L2 cache
41810Sstevel@tonic-gate */
41820Sstevel@tonic-gate static int
intel_l2cinfo(void * arg,const struct cachetab * ct)41830Sstevel@tonic-gate intel_l2cinfo(void *arg, const struct cachetab *ct)
41840Sstevel@tonic-gate {
41850Sstevel@tonic-gate struct l2info *l2i = arg;
41860Sstevel@tonic-gate int *ip;
41870Sstevel@tonic-gate
41880Sstevel@tonic-gate if (ct->ct_label != l2_cache_str &&
41890Sstevel@tonic-gate ct->ct_label != sl2_cache_str)
41900Sstevel@tonic-gate return (0); /* not an L2 -- keep walking */
41910Sstevel@tonic-gate
41920Sstevel@tonic-gate if ((ip = l2i->l2i_csz) != NULL)
41930Sstevel@tonic-gate *ip = ct->ct_size;
41940Sstevel@tonic-gate if ((ip = l2i->l2i_lsz) != NULL)
41950Sstevel@tonic-gate *ip = ct->ct_line_size;
41960Sstevel@tonic-gate if ((ip = l2i->l2i_assoc) != NULL)
41970Sstevel@tonic-gate *ip = ct->ct_assoc;
41980Sstevel@tonic-gate l2i->l2i_ret = ct->ct_size;
41990Sstevel@tonic-gate return (1); /* was an L2 -- terminate walk */
42000Sstevel@tonic-gate }
42010Sstevel@tonic-gate
42025070Skchow /*
42035070Skchow * AMD L2/L3 Cache and TLB Associativity Field Definition:
42045070Skchow *
42055070Skchow * Unlike the associativity for the L1 cache and tlb where the 8 bit
42065070Skchow * value is the associativity, the associativity for the L2 cache and
42075070Skchow * tlb is encoded in the following table. The 4 bit L2 value serves as
42085070Skchow * an index into the amd_afd[] array to determine the associativity.
42095070Skchow * -1 is undefined. 0 is fully associative.
42105070Skchow */
42115070Skchow
42125070Skchow static int amd_afd[] =
42135070Skchow {-1, 1, 2, -1, 4, -1, 8, -1, 16, -1, 32, 48, 64, 96, 128, 0};
42145070Skchow
42150Sstevel@tonic-gate static void
amd_l2cacheinfo(struct cpuid_info * cpi,struct l2info * l2i)42160Sstevel@tonic-gate amd_l2cacheinfo(struct cpuid_info *cpi, struct l2info *l2i)
42170Sstevel@tonic-gate {
42181228Sandrei struct cpuid_regs *cp;
42190Sstevel@tonic-gate uint_t size, assoc;
42205070Skchow int i;
42210Sstevel@tonic-gate int *ip;
42220Sstevel@tonic-gate
42230Sstevel@tonic-gate if (cpi->cpi_xmaxeax < 0x80000006)
42240Sstevel@tonic-gate return;
42250Sstevel@tonic-gate cp = &cpi->cpi_extd[6];
42260Sstevel@tonic-gate
42275070Skchow if ((i = BITX(cp->cp_ecx, 15, 12)) != 0 &&
42280Sstevel@tonic-gate (size = BITX(cp->cp_ecx, 31, 16)) != 0) {
42290Sstevel@tonic-gate uint_t cachesz = size * 1024;
42305070Skchow assoc = amd_afd[i];
42315070Skchow
42325070Skchow ASSERT(assoc != -1);
42330Sstevel@tonic-gate
42340Sstevel@tonic-gate if ((ip = l2i->l2i_csz) != NULL)
42350Sstevel@tonic-gate *ip = cachesz;
42360Sstevel@tonic-gate if ((ip = l2i->l2i_lsz) != NULL)
42370Sstevel@tonic-gate *ip = BITX(cp->cp_ecx, 7, 0);
42380Sstevel@tonic-gate if ((ip = l2i->l2i_assoc) != NULL)
42390Sstevel@tonic-gate *ip = assoc;
42400Sstevel@tonic-gate l2i->l2i_ret = cachesz;
42410Sstevel@tonic-gate }
42420Sstevel@tonic-gate }
42430Sstevel@tonic-gate
42440Sstevel@tonic-gate int
getl2cacheinfo(cpu_t * cpu,int * csz,int * lsz,int * assoc)42450Sstevel@tonic-gate getl2cacheinfo(cpu_t *cpu, int *csz, int *lsz, int *assoc)
42460Sstevel@tonic-gate {
42470Sstevel@tonic-gate struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
42480Sstevel@tonic-gate struct l2info __l2info, *l2i = &__l2info;
42490Sstevel@tonic-gate
42500Sstevel@tonic-gate l2i->l2i_csz = csz;
42510Sstevel@tonic-gate l2i->l2i_lsz = lsz;
42520Sstevel@tonic-gate l2i->l2i_assoc = assoc;
42530Sstevel@tonic-gate l2i->l2i_ret = -1;
42540Sstevel@tonic-gate
42550Sstevel@tonic-gate switch (x86_which_cacheinfo(cpi)) {
42560Sstevel@tonic-gate case X86_VENDOR_Intel:
42570Sstevel@tonic-gate intel_walk_cacheinfo(cpi, l2i, intel_l2cinfo);
42580Sstevel@tonic-gate break;
42590Sstevel@tonic-gate case X86_VENDOR_Cyrix:
42600Sstevel@tonic-gate cyrix_walk_cacheinfo(cpi, l2i, intel_l2cinfo);
42610Sstevel@tonic-gate break;
42620Sstevel@tonic-gate case X86_VENDOR_AMD:
42630Sstevel@tonic-gate amd_l2cacheinfo(cpi, l2i);
42640Sstevel@tonic-gate break;
42650Sstevel@tonic-gate default:
42660Sstevel@tonic-gate break;
42670Sstevel@tonic-gate }
42680Sstevel@tonic-gate return (l2i->l2i_ret);
42690Sstevel@tonic-gate }
42704481Sbholler
42715084Sjohnlev #if !defined(__xpv)
42725084Sjohnlev
42735045Sbholler uint32_t *
cpuid_mwait_alloc(cpu_t * cpu)42745045Sbholler cpuid_mwait_alloc(cpu_t *cpu)
42755045Sbholler {
42765045Sbholler uint32_t *ret;
42775045Sbholler size_t mwait_size;
42785045Sbholler
427912004Sjiang.liu@intel.com ASSERT(cpuid_checkpass(CPU, 2));
428012004Sjiang.liu@intel.com
428112004Sjiang.liu@intel.com mwait_size = CPU->cpu_m.mcpu_cpi->cpi_mwait.mon_max;
42825045Sbholler if (mwait_size == 0)
42835045Sbholler return (NULL);
42845045Sbholler
42855045Sbholler /*
42865045Sbholler * kmem_alloc() returns cache line size aligned data for mwait_size
42875045Sbholler * allocations. mwait_size is currently cache line sized. Neither
42885045Sbholler * of these implementation details are guarantied to be true in the
42895045Sbholler * future.
42905045Sbholler *
42915045Sbholler * First try allocating mwait_size as kmem_alloc() currently returns
42925045Sbholler * correctly aligned memory. If kmem_alloc() does not return
42935045Sbholler * mwait_size aligned memory, then use mwait_size ROUNDUP.
42945045Sbholler *
42955045Sbholler * Set cpi_mwait.buf_actual and cpi_mwait.size_actual in case we
42965045Sbholler * decide to free this memory.
42975045Sbholler */
42985045Sbholler ret = kmem_zalloc(mwait_size, KM_SLEEP);
42995045Sbholler if (ret == (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size)) {
43005045Sbholler cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = ret;
43015045Sbholler cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = mwait_size;
43025045Sbholler *ret = MWAIT_RUNNING;
43035045Sbholler return (ret);
43045045Sbholler } else {
43055045Sbholler kmem_free(ret, mwait_size);
43065045Sbholler ret = kmem_zalloc(mwait_size * 2, KM_SLEEP);
43075045Sbholler cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = ret;
43085045Sbholler cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = mwait_size * 2;
43095045Sbholler ret = (uint32_t *)P2ROUNDUP((uintptr_t)ret, mwait_size);
43105045Sbholler *ret = MWAIT_RUNNING;
43115045Sbholler return (ret);
43125045Sbholler }
43135045Sbholler }
43145045Sbholler
43155045Sbholler void
cpuid_mwait_free(cpu_t * cpu)43165045Sbholler cpuid_mwait_free(cpu_t *cpu)
43174481Sbholler {
431812004Sjiang.liu@intel.com if (cpu->cpu_m.mcpu_cpi == NULL) {
431912004Sjiang.liu@intel.com return;
432012004Sjiang.liu@intel.com }
43215045Sbholler
43225045Sbholler if (cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual != NULL &&
43235045Sbholler cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual > 0) {
43245045Sbholler kmem_free(cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual,
43255045Sbholler cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual);
43265045Sbholler }
43275045Sbholler
43285045Sbholler cpu->cpu_m.mcpu_cpi->cpi_mwait.buf_actual = NULL;
43295045Sbholler cpu->cpu_m.mcpu_cpi->cpi_mwait.size_actual = 0;
43304481Sbholler }
43315084Sjohnlev
43325322Ssudheer void
patch_tsc_read(int flag)43335322Ssudheer patch_tsc_read(int flag)
43345322Ssudheer {
43355322Ssudheer size_t cnt;
43367532SSean.Ye@Sun.COM
43375322Ssudheer switch (flag) {
43385322Ssudheer case X86_NO_TSC:
43395322Ssudheer cnt = &_no_rdtsc_end - &_no_rdtsc_start;
43405338Ssudheer (void) memcpy((void *)tsc_read, (void *)&_no_rdtsc_start, cnt);
43415322Ssudheer break;
43425322Ssudheer case X86_HAVE_TSCP:
43435322Ssudheer cnt = &_tscp_end - &_tscp_start;
43445338Ssudheer (void) memcpy((void *)tsc_read, (void *)&_tscp_start, cnt);
43455322Ssudheer break;
43465322Ssudheer case X86_TSC_MFENCE:
43475322Ssudheer cnt = &_tsc_mfence_end - &_tsc_mfence_start;
43485338Ssudheer (void) memcpy((void *)tsc_read,
43495338Ssudheer (void *)&_tsc_mfence_start, cnt);
43505322Ssudheer break;
43516642Ssudheer case X86_TSC_LFENCE:
43526642Ssudheer cnt = &_tsc_lfence_end - &_tsc_lfence_start;
43536642Ssudheer (void) memcpy((void *)tsc_read,
43546642Ssudheer (void *)&_tsc_lfence_start, cnt);
43556642Ssudheer break;
43565322Ssudheer default:
43575322Ssudheer break;
43585322Ssudheer }
43595322Ssudheer }
43605322Ssudheer
43618906SEric.Saxe@Sun.COM int
cpuid_deep_cstates_supported(void)43628906SEric.Saxe@Sun.COM cpuid_deep_cstates_supported(void)
43638906SEric.Saxe@Sun.COM {
43648906SEric.Saxe@Sun.COM struct cpuid_info *cpi;
43658906SEric.Saxe@Sun.COM struct cpuid_regs regs;
43668906SEric.Saxe@Sun.COM
43678906SEric.Saxe@Sun.COM ASSERT(cpuid_checkpass(CPU, 1));
43688906SEric.Saxe@Sun.COM
43698906SEric.Saxe@Sun.COM cpi = CPU->cpu_m.mcpu_cpi;
43708906SEric.Saxe@Sun.COM
437112826Skuriakose.kuruvilla@oracle.com if (!is_x86_feature(x86_featureset, X86FSET_CPUID))
43728906SEric.Saxe@Sun.COM return (0);
43738906SEric.Saxe@Sun.COM
43748906SEric.Saxe@Sun.COM switch (cpi->cpi_vendor) {
43758906SEric.Saxe@Sun.COM case X86_VENDOR_Intel:
43768906SEric.Saxe@Sun.COM if (cpi->cpi_xmaxeax < 0x80000007)
43778906SEric.Saxe@Sun.COM return (0);
43788906SEric.Saxe@Sun.COM
43798906SEric.Saxe@Sun.COM /*
43808906SEric.Saxe@Sun.COM * TSC run at a constant rate in all ACPI C-states?
43818906SEric.Saxe@Sun.COM */
43828906SEric.Saxe@Sun.COM regs.cp_eax = 0x80000007;
43838906SEric.Saxe@Sun.COM (void) __cpuid_insn(®s);
43848906SEric.Saxe@Sun.COM return (regs.cp_edx & CPUID_TSC_CSTATE_INVARIANCE);
43858906SEric.Saxe@Sun.COM
43868906SEric.Saxe@Sun.COM default:
43878906SEric.Saxe@Sun.COM return (0);
43888906SEric.Saxe@Sun.COM }
43898906SEric.Saxe@Sun.COM }
43908906SEric.Saxe@Sun.COM
43918930SBill.Holler@Sun.COM #endif /* !__xpv */
43928930SBill.Holler@Sun.COM
43938930SBill.Holler@Sun.COM void
post_startup_cpu_fixups(void)43948930SBill.Holler@Sun.COM post_startup_cpu_fixups(void)
43958930SBill.Holler@Sun.COM {
43968930SBill.Holler@Sun.COM #ifndef __xpv
43978930SBill.Holler@Sun.COM /*
43988930SBill.Holler@Sun.COM * Some AMD processors support C1E state. Entering this state will
43998930SBill.Holler@Sun.COM * cause the local APIC timer to stop, which we can't deal with at
44008930SBill.Holler@Sun.COM * this time.
44018930SBill.Holler@Sun.COM */
44028930SBill.Holler@Sun.COM if (cpuid_getvendor(CPU) == X86_VENDOR_AMD) {
44038930SBill.Holler@Sun.COM on_trap_data_t otd;
44048930SBill.Holler@Sun.COM uint64_t reg;
44058930SBill.Holler@Sun.COM
44068930SBill.Holler@Sun.COM if (!on_trap(&otd, OT_DATA_ACCESS)) {
44078930SBill.Holler@Sun.COM reg = rdmsr(MSR_AMD_INT_PENDING_CMP_HALT);
44088930SBill.Holler@Sun.COM /* Disable C1E state if it is enabled by BIOS */
44098930SBill.Holler@Sun.COM if ((reg >> AMD_ACTONCMPHALT_SHIFT) &
44108930SBill.Holler@Sun.COM AMD_ACTONCMPHALT_MASK) {
44118930SBill.Holler@Sun.COM reg &= ~(AMD_ACTONCMPHALT_MASK <<
44128930SBill.Holler@Sun.COM AMD_ACTONCMPHALT_SHIFT);
44138930SBill.Holler@Sun.COM wrmsr(MSR_AMD_INT_PENDING_CMP_HALT, reg);
44148930SBill.Holler@Sun.COM }
44158930SBill.Holler@Sun.COM }
44168930SBill.Holler@Sun.COM no_trap();
44178930SBill.Holler@Sun.COM }
44188930SBill.Holler@Sun.COM #endif /* !__xpv */
44198930SBill.Holler@Sun.COM }
44208930SBill.Holler@Sun.COM
44219283SBill.Holler@Sun.COM /*
442213134Skuriakose.kuruvilla@oracle.com * Setup necessary registers to enable XSAVE feature on this processor.
442313134Skuriakose.kuruvilla@oracle.com * This function needs to be called early enough, so that no xsave/xrstor
442413134Skuriakose.kuruvilla@oracle.com * ops will execute on the processor before the MSRs are properly set up.
442513134Skuriakose.kuruvilla@oracle.com *
442613134Skuriakose.kuruvilla@oracle.com * Current implementation has the following assumption:
442713134Skuriakose.kuruvilla@oracle.com * - cpuid_pass1() is done, so that X86 features are known.
442813134Skuriakose.kuruvilla@oracle.com * - fpu_probe() is done, so that fp_save_mech is chosen.
442913134Skuriakose.kuruvilla@oracle.com */
443013134Skuriakose.kuruvilla@oracle.com void
xsave_setup_msr(cpu_t * cpu)443113134Skuriakose.kuruvilla@oracle.com xsave_setup_msr(cpu_t *cpu)
443213134Skuriakose.kuruvilla@oracle.com {
443313134Skuriakose.kuruvilla@oracle.com ASSERT(fp_save_mech == FP_XSAVE);
443413134Skuriakose.kuruvilla@oracle.com ASSERT(is_x86_feature(x86_featureset, X86FSET_XSAVE));
443513134Skuriakose.kuruvilla@oracle.com
443613134Skuriakose.kuruvilla@oracle.com /* Enable OSXSAVE in CR4. */
443713134Skuriakose.kuruvilla@oracle.com setcr4(getcr4() | CR4_OSXSAVE);
443813134Skuriakose.kuruvilla@oracle.com /*
443913134Skuriakose.kuruvilla@oracle.com * Update SW copy of ECX, so that /dev/cpu/self/cpuid will report
444013134Skuriakose.kuruvilla@oracle.com * correct value.
444113134Skuriakose.kuruvilla@oracle.com */
444213134Skuriakose.kuruvilla@oracle.com cpu->cpu_m.mcpu_cpi->cpi_std[1].cp_ecx |= CPUID_INTC_ECX_OSXSAVE;
444313134Skuriakose.kuruvilla@oracle.com setup_xfem();
444413134Skuriakose.kuruvilla@oracle.com }
444513134Skuriakose.kuruvilla@oracle.com
444613134Skuriakose.kuruvilla@oracle.com /*
44479283SBill.Holler@Sun.COM * Starting with the Westmere processor the local
44489283SBill.Holler@Sun.COM * APIC timer will continue running in all C-states,
44499283SBill.Holler@Sun.COM * including the deepest C-states.
44509283SBill.Holler@Sun.COM */
44519283SBill.Holler@Sun.COM int
cpuid_arat_supported(void)44529283SBill.Holler@Sun.COM cpuid_arat_supported(void)
44539283SBill.Holler@Sun.COM {
44549283SBill.Holler@Sun.COM struct cpuid_info *cpi;
44559283SBill.Holler@Sun.COM struct cpuid_regs regs;
44569283SBill.Holler@Sun.COM
44579283SBill.Holler@Sun.COM ASSERT(cpuid_checkpass(CPU, 1));
445812826Skuriakose.kuruvilla@oracle.com ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
44599283SBill.Holler@Sun.COM
44609283SBill.Holler@Sun.COM cpi = CPU->cpu_m.mcpu_cpi;
44619283SBill.Holler@Sun.COM
44629283SBill.Holler@Sun.COM switch (cpi->cpi_vendor) {
44639283SBill.Holler@Sun.COM case X86_VENDOR_Intel:
44649283SBill.Holler@Sun.COM /*
44659283SBill.Holler@Sun.COM * Always-running Local APIC Timer is
44669283SBill.Holler@Sun.COM * indicated by CPUID.6.EAX[2].
44679283SBill.Holler@Sun.COM */
44689283SBill.Holler@Sun.COM if (cpi->cpi_maxeax >= 6) {
44699283SBill.Holler@Sun.COM regs.cp_eax = 6;
44709283SBill.Holler@Sun.COM (void) cpuid_insn(NULL, ®s);
44719283SBill.Holler@Sun.COM return (regs.cp_eax & CPUID_CSTATE_ARAT);
44729283SBill.Holler@Sun.COM } else {
44739283SBill.Holler@Sun.COM return (0);
44749283SBill.Holler@Sun.COM }
44759283SBill.Holler@Sun.COM default:
44769283SBill.Holler@Sun.COM return (0);
44779283SBill.Holler@Sun.COM }
44789283SBill.Holler@Sun.COM }
44799283SBill.Holler@Sun.COM
448010992Saubrey.li@intel.com /*
448110992Saubrey.li@intel.com * Check support for Intel ENERGY_PERF_BIAS feature
448210992Saubrey.li@intel.com */
448310992Saubrey.li@intel.com int
cpuid_iepb_supported(struct cpu * cp)448410992Saubrey.li@intel.com cpuid_iepb_supported(struct cpu *cp)
448510992Saubrey.li@intel.com {
448610992Saubrey.li@intel.com struct cpuid_info *cpi = cp->cpu_m.mcpu_cpi;
448710992Saubrey.li@intel.com struct cpuid_regs regs;
448810992Saubrey.li@intel.com
448910992Saubrey.li@intel.com ASSERT(cpuid_checkpass(cp, 1));
449010992Saubrey.li@intel.com
449112826Skuriakose.kuruvilla@oracle.com if (!(is_x86_feature(x86_featureset, X86FSET_CPUID)) ||
449212826Skuriakose.kuruvilla@oracle.com !(is_x86_feature(x86_featureset, X86FSET_MSR))) {
449310992Saubrey.li@intel.com return (0);
449410992Saubrey.li@intel.com }
449510992Saubrey.li@intel.com
449610992Saubrey.li@intel.com /*
449710992Saubrey.li@intel.com * Intel ENERGY_PERF_BIAS MSR is indicated by
449810992Saubrey.li@intel.com * capability bit CPUID.6.ECX.3
449910992Saubrey.li@intel.com */
450010992Saubrey.li@intel.com if ((cpi->cpi_vendor != X86_VENDOR_Intel) || (cpi->cpi_maxeax < 6))
450110992Saubrey.li@intel.com return (0);
450210992Saubrey.li@intel.com
450310992Saubrey.li@intel.com regs.cp_eax = 0x6;
450410992Saubrey.li@intel.com (void) cpuid_insn(NULL, ®s);
450510992Saubrey.li@intel.com return (regs.cp_ecx & CPUID_EPB_SUPPORT);
450610992Saubrey.li@intel.com }
450710992Saubrey.li@intel.com
450813029SKrishnendu.Sadhukhan@Sun.COM /*
450913029SKrishnendu.Sadhukhan@Sun.COM * Check support for TSC deadline timer
451013029SKrishnendu.Sadhukhan@Sun.COM *
451113029SKrishnendu.Sadhukhan@Sun.COM * TSC deadline timer provides a superior software programming
451213029SKrishnendu.Sadhukhan@Sun.COM * model over local APIC timer that eliminates "time drifts".
451313029SKrishnendu.Sadhukhan@Sun.COM * Instead of specifying a relative time, software specifies an
451413029SKrishnendu.Sadhukhan@Sun.COM * absolute time as the target at which the processor should
451513029SKrishnendu.Sadhukhan@Sun.COM * generate a timer event.
451613029SKrishnendu.Sadhukhan@Sun.COM */
451713029SKrishnendu.Sadhukhan@Sun.COM int
cpuid_deadline_tsc_supported(void)451813029SKrishnendu.Sadhukhan@Sun.COM cpuid_deadline_tsc_supported(void)
451913029SKrishnendu.Sadhukhan@Sun.COM {
452013029SKrishnendu.Sadhukhan@Sun.COM struct cpuid_info *cpi = CPU->cpu_m.mcpu_cpi;
452113029SKrishnendu.Sadhukhan@Sun.COM struct cpuid_regs regs;
452213029SKrishnendu.Sadhukhan@Sun.COM
452313029SKrishnendu.Sadhukhan@Sun.COM ASSERT(cpuid_checkpass(CPU, 1));
452413029SKrishnendu.Sadhukhan@Sun.COM ASSERT(is_x86_feature(x86_featureset, X86FSET_CPUID));
452513029SKrishnendu.Sadhukhan@Sun.COM
452613029SKrishnendu.Sadhukhan@Sun.COM switch (cpi->cpi_vendor) {
452713029SKrishnendu.Sadhukhan@Sun.COM case X86_VENDOR_Intel:
452813029SKrishnendu.Sadhukhan@Sun.COM if (cpi->cpi_maxeax >= 1) {
452913029SKrishnendu.Sadhukhan@Sun.COM regs.cp_eax = 1;
453013029SKrishnendu.Sadhukhan@Sun.COM (void) cpuid_insn(NULL, ®s);
453113029SKrishnendu.Sadhukhan@Sun.COM return (regs.cp_ecx & CPUID_DEADLINE_TSC);
453213029SKrishnendu.Sadhukhan@Sun.COM } else {
453313029SKrishnendu.Sadhukhan@Sun.COM return (0);
453413029SKrishnendu.Sadhukhan@Sun.COM }
453513029SKrishnendu.Sadhukhan@Sun.COM default:
453613029SKrishnendu.Sadhukhan@Sun.COM return (0);
453713029SKrishnendu.Sadhukhan@Sun.COM }
453813029SKrishnendu.Sadhukhan@Sun.COM }
453913029SKrishnendu.Sadhukhan@Sun.COM
45408377SBill.Holler@Sun.COM #if defined(__amd64) && !defined(__xpv)
45418377SBill.Holler@Sun.COM /*
45428377SBill.Holler@Sun.COM * Patch in versions of bcopy for high performance Intel Nhm processors
45438377SBill.Holler@Sun.COM * and later...
45448377SBill.Holler@Sun.COM */
45458377SBill.Holler@Sun.COM void
patch_memops(uint_t vendor)45468377SBill.Holler@Sun.COM patch_memops(uint_t vendor)
45478377SBill.Holler@Sun.COM {
45488377SBill.Holler@Sun.COM size_t cnt, i;
45498377SBill.Holler@Sun.COM caddr_t to, from;
45508377SBill.Holler@Sun.COM
455112826Skuriakose.kuruvilla@oracle.com if ((vendor == X86_VENDOR_Intel) &&
455212826Skuriakose.kuruvilla@oracle.com is_x86_feature(x86_featureset, X86FSET_SSE4_2)) {
45538377SBill.Holler@Sun.COM cnt = &bcopy_patch_end - &bcopy_patch_start;
45548377SBill.Holler@Sun.COM to = &bcopy_ck_size;
45558377SBill.Holler@Sun.COM from = &bcopy_patch_start;
45568377SBill.Holler@Sun.COM for (i = 0; i < cnt; i++) {
45578377SBill.Holler@Sun.COM *to++ = *from++;
45588377SBill.Holler@Sun.COM }
45598377SBill.Holler@Sun.COM }
45608377SBill.Holler@Sun.COM }
45618377SBill.Holler@Sun.COM #endif /* __amd64 && !__xpv */
456212261SVuong.Nguyen@Sun.COM
456312261SVuong.Nguyen@Sun.COM /*
456412261SVuong.Nguyen@Sun.COM * This function finds the number of bits to represent the number of cores per
456512261SVuong.Nguyen@Sun.COM * chip and the number of strands per core for the Intel platforms.
456612261SVuong.Nguyen@Sun.COM * It re-uses the x2APIC cpuid code of the cpuid_pass2().
456712261SVuong.Nguyen@Sun.COM */
456812261SVuong.Nguyen@Sun.COM void
cpuid_get_ext_topo(uint_t vendor,uint_t * core_nbits,uint_t * strand_nbits)456912261SVuong.Nguyen@Sun.COM cpuid_get_ext_topo(uint_t vendor, uint_t *core_nbits, uint_t *strand_nbits)
457012261SVuong.Nguyen@Sun.COM {
457112261SVuong.Nguyen@Sun.COM struct cpuid_regs regs;
457212261SVuong.Nguyen@Sun.COM struct cpuid_regs *cp = ®s;
457312261SVuong.Nguyen@Sun.COM
457412261SVuong.Nguyen@Sun.COM if (vendor != X86_VENDOR_Intel) {
457512261SVuong.Nguyen@Sun.COM return;
457612261SVuong.Nguyen@Sun.COM }
457712261SVuong.Nguyen@Sun.COM
457812261SVuong.Nguyen@Sun.COM /* if the cpuid level is 0xB, extended topo is available. */
457912261SVuong.Nguyen@Sun.COM cp->cp_eax = 0;
458012261SVuong.Nguyen@Sun.COM if (__cpuid_insn(cp) >= 0xB) {
458112261SVuong.Nguyen@Sun.COM
458212261SVuong.Nguyen@Sun.COM cp->cp_eax = 0xB;
458312261SVuong.Nguyen@Sun.COM cp->cp_edx = cp->cp_ebx = cp->cp_ecx = 0;
458412261SVuong.Nguyen@Sun.COM (void) __cpuid_insn(cp);
458512261SVuong.Nguyen@Sun.COM
458612261SVuong.Nguyen@Sun.COM /*
458712261SVuong.Nguyen@Sun.COM * Check CPUID.EAX=0BH, ECX=0H:EBX is non-zero, which
458812261SVuong.Nguyen@Sun.COM * indicates that the extended topology enumeration leaf is
458912261SVuong.Nguyen@Sun.COM * available.
459012261SVuong.Nguyen@Sun.COM */
459112261SVuong.Nguyen@Sun.COM if (cp->cp_ebx) {
459212261SVuong.Nguyen@Sun.COM uint_t coreid_shift = 0;
459312261SVuong.Nguyen@Sun.COM uint_t chipid_shift = 0;
459412261SVuong.Nguyen@Sun.COM uint_t i;
459512261SVuong.Nguyen@Sun.COM uint_t level;
459612261SVuong.Nguyen@Sun.COM
459712261SVuong.Nguyen@Sun.COM for (i = 0; i < CPI_FNB_ECX_MAX; i++) {
459812261SVuong.Nguyen@Sun.COM cp->cp_eax = 0xB;
459912261SVuong.Nguyen@Sun.COM cp->cp_ecx = i;
460012261SVuong.Nguyen@Sun.COM
460112261SVuong.Nguyen@Sun.COM (void) __cpuid_insn(cp);
460212261SVuong.Nguyen@Sun.COM level = CPI_CPU_LEVEL_TYPE(cp);
460312261SVuong.Nguyen@Sun.COM
460412261SVuong.Nguyen@Sun.COM if (level == 1) {
460512261SVuong.Nguyen@Sun.COM /*
460612261SVuong.Nguyen@Sun.COM * Thread level processor topology
460712261SVuong.Nguyen@Sun.COM * Number of bits shift right APIC ID
460812261SVuong.Nguyen@Sun.COM * to get the coreid.
460912261SVuong.Nguyen@Sun.COM */
461012261SVuong.Nguyen@Sun.COM coreid_shift = BITX(cp->cp_eax, 4, 0);
461112261SVuong.Nguyen@Sun.COM } else if (level == 2) {
461212261SVuong.Nguyen@Sun.COM /*
461312261SVuong.Nguyen@Sun.COM * Core level processor topology
461412261SVuong.Nguyen@Sun.COM * Number of bits shift right APIC ID
461512261SVuong.Nguyen@Sun.COM * to get the chipid.
461612261SVuong.Nguyen@Sun.COM */
461712261SVuong.Nguyen@Sun.COM chipid_shift = BITX(cp->cp_eax, 4, 0);
461812261SVuong.Nguyen@Sun.COM }
461912261SVuong.Nguyen@Sun.COM }
462012261SVuong.Nguyen@Sun.COM
462112261SVuong.Nguyen@Sun.COM if (coreid_shift > 0 && chipid_shift > coreid_shift) {
462212261SVuong.Nguyen@Sun.COM *strand_nbits = coreid_shift;
462312261SVuong.Nguyen@Sun.COM *core_nbits = chipid_shift - coreid_shift;
462412261SVuong.Nguyen@Sun.COM }
462512261SVuong.Nguyen@Sun.COM }
462612261SVuong.Nguyen@Sun.COM }
462712261SVuong.Nguyen@Sun.COM }
4628