xref: /onnv-gate/usr/src/uts/sun4u/cpu/us3_cheetahplus.c (revision 8906:e559381f1e2b)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
52436Smb91622  * Common Development and Distribution License (the "License").
62436Smb91622  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
228803SJonathan.Haslam@Sun.COM  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #include <sys/types.h>
270Sstevel@tonic-gate #include <sys/systm.h>
280Sstevel@tonic-gate #include <sys/ddi.h>
290Sstevel@tonic-gate #include <sys/sysmacros.h>
300Sstevel@tonic-gate #include <sys/archsystm.h>
310Sstevel@tonic-gate #include <sys/vmsystm.h>
320Sstevel@tonic-gate #include <sys/machparam.h>
330Sstevel@tonic-gate #include <sys/machsystm.h>
340Sstevel@tonic-gate #include <sys/machthread.h>
350Sstevel@tonic-gate #include <sys/cpu.h>
360Sstevel@tonic-gate #include <sys/cmp.h>
370Sstevel@tonic-gate #include <sys/elf_SPARC.h>
380Sstevel@tonic-gate #include <vm/hat_sfmmu.h>
390Sstevel@tonic-gate #include <vm/seg_kmem.h>
400Sstevel@tonic-gate #include <sys/cpuvar.h>
410Sstevel@tonic-gate #include <sys/cheetahregs.h>
420Sstevel@tonic-gate #include <sys/us3_module.h>
430Sstevel@tonic-gate #include <sys/async.h>
440Sstevel@tonic-gate #include <sys/cmn_err.h>
450Sstevel@tonic-gate #include <sys/debug.h>
460Sstevel@tonic-gate #include <sys/dditypes.h>
470Sstevel@tonic-gate #include <sys/prom_debug.h>
480Sstevel@tonic-gate #include <sys/prom_plat.h>
490Sstevel@tonic-gate #include <sys/cpu_module.h>
500Sstevel@tonic-gate #include <sys/sysmacros.h>
510Sstevel@tonic-gate #include <sys/intreg.h>
520Sstevel@tonic-gate #include <sys/clock.h>
530Sstevel@tonic-gate #include <sys/platform_module.h>
540Sstevel@tonic-gate #include <sys/machtrap.h>
550Sstevel@tonic-gate #include <sys/ontrap.h>
560Sstevel@tonic-gate #include <sys/panic.h>
570Sstevel@tonic-gate #include <sys/memlist.h>
580Sstevel@tonic-gate #include <sys/bootconf.h>
590Sstevel@tonic-gate #include <sys/ivintr.h>
600Sstevel@tonic-gate #include <sys/atomic.h>
610Sstevel@tonic-gate #include <sys/fm/protocol.h>
620Sstevel@tonic-gate #include <sys/fm/cpu/UltraSPARC-III.h>
630Sstevel@tonic-gate #include <sys/fm/util.h>
643434Sesaxe #include <sys/pghw.h>
650Sstevel@tonic-gate 
660Sstevel@tonic-gate #ifdef	CHEETAHPLUS_ERRATUM_25
670Sstevel@tonic-gate #include <sys/cyclic.h>
680Sstevel@tonic-gate #endif	/* CHEETAHPLUS_ERRATUM_25 */
690Sstevel@tonic-gate 
700Sstevel@tonic-gate /*
710Sstevel@tonic-gate  * See comment above cpu_scrub_cpu_setup() for description
720Sstevel@tonic-gate  */
730Sstevel@tonic-gate #define	SCRUBBER_NEITHER_CORE_ONLINE	0x0
740Sstevel@tonic-gate #define	SCRUBBER_CORE_0_ONLINE		0x1
750Sstevel@tonic-gate #define	SCRUBBER_CORE_1_ONLINE		0x2
760Sstevel@tonic-gate #define	SCRUBBER_BOTH_CORES_ONLINE	(SCRUBBER_CORE_0_ONLINE | \
770Sstevel@tonic-gate 					SCRUBBER_CORE_1_ONLINE)
780Sstevel@tonic-gate 
790Sstevel@tonic-gate static int pn_matching_valid_l2_line(uint64_t faddr, ch_ec_data_t *clo_l2_data);
800Sstevel@tonic-gate static void cpu_async_log_tlb_parity_err(void *flt);
810Sstevel@tonic-gate static cpu_t *cpu_get_sibling_core(cpu_t *cpup);
820Sstevel@tonic-gate 
830Sstevel@tonic-gate 
840Sstevel@tonic-gate /*
850Sstevel@tonic-gate  * Setup trap handlers.
860Sstevel@tonic-gate  */
870Sstevel@tonic-gate void
cpu_init_trap(void)880Sstevel@tonic-gate cpu_init_trap(void)
890Sstevel@tonic-gate {
908803SJonathan.Haslam@Sun.COM 	CH_SET_TRAP(pil15_epilogue, ch_pil15_interrupt_instr);
910Sstevel@tonic-gate 
920Sstevel@tonic-gate 	CH_SET_TRAP(tt0_fecc, fecc_err_instr);
930Sstevel@tonic-gate 	CH_SET_TRAP(tt1_fecc, fecc_err_tl1_instr);
940Sstevel@tonic-gate 	CH_SET_TRAP(tt1_swtrap0, fecc_err_tl1_cont_instr);
950Sstevel@tonic-gate 
960Sstevel@tonic-gate 	CH_SET_TRAP(tt0_dperr, dcache_parity_instr);
970Sstevel@tonic-gate 	CH_SET_TRAP(tt1_dperr, dcache_parity_tl1_instr);
980Sstevel@tonic-gate 	CH_SET_TRAP(tt1_swtrap1, dcache_parity_tl1_cont_instr);
990Sstevel@tonic-gate 
1000Sstevel@tonic-gate 	CH_SET_TRAP(tt0_iperr, icache_parity_instr);
1010Sstevel@tonic-gate 	CH_SET_TRAP(tt1_iperr, icache_parity_tl1_instr);
1020Sstevel@tonic-gate 	CH_SET_TRAP(tt1_swtrap2, icache_parity_tl1_cont_instr);
1030Sstevel@tonic-gate }
1040Sstevel@tonic-gate 
1050Sstevel@tonic-gate /*
1060Sstevel@tonic-gate  * Set the magic constants of the implementation.
1070Sstevel@tonic-gate  */
1080Sstevel@tonic-gate /*ARGSUSED*/
1090Sstevel@tonic-gate void
cpu_fiximp(pnode_t dnode)110789Sahrens cpu_fiximp(pnode_t dnode)
1110Sstevel@tonic-gate {
1120Sstevel@tonic-gate 	int i, a;
1130Sstevel@tonic-gate 	extern int vac_size, vac_shift;
1140Sstevel@tonic-gate 	extern uint_t vac_mask;
1150Sstevel@tonic-gate 
1160Sstevel@tonic-gate 	dcache_size = CH_DCACHE_SIZE;
1170Sstevel@tonic-gate 	dcache_linesize = CH_DCACHE_LSIZE;
1180Sstevel@tonic-gate 
1190Sstevel@tonic-gate 	icache_size = CHP_ICACHE_MAX_SIZE;
1200Sstevel@tonic-gate 	icache_linesize = CHP_ICACHE_MIN_LSIZE;
1210Sstevel@tonic-gate 
1220Sstevel@tonic-gate 	ecache_size = CH_ECACHE_MAX_SIZE;
1230Sstevel@tonic-gate 	ecache_alignsize = CH_ECACHE_MAX_LSIZE;
1240Sstevel@tonic-gate 	ecache_associativity = CHP_ECACHE_MIN_NWAY;
1250Sstevel@tonic-gate 
1260Sstevel@tonic-gate 	/*
1270Sstevel@tonic-gate 	 * ecache_setsize needs to maximum of all cpu ecache setsizes
1280Sstevel@tonic-gate 	 */
1290Sstevel@tonic-gate 	ecache_setsize = CHP_ECACHE_MAX_SETSIZE;
1300Sstevel@tonic-gate 	ASSERT(ecache_setsize >= (ecache_size / ecache_associativity));
1310Sstevel@tonic-gate 
1320Sstevel@tonic-gate 	vac_size = CH_VAC_SIZE;
1330Sstevel@tonic-gate 	vac_mask = MMU_PAGEMASK & (vac_size - 1);
1340Sstevel@tonic-gate 	i = 0; a = vac_size;
1350Sstevel@tonic-gate 	while (a >>= 1)
1360Sstevel@tonic-gate 		++i;
1370Sstevel@tonic-gate 	vac_shift = i;
1380Sstevel@tonic-gate 	shm_alignment = vac_size;
1390Sstevel@tonic-gate 	vac = 1;
1400Sstevel@tonic-gate }
1410Sstevel@tonic-gate 
142106Ssusans /*
143106Ssusans  * Use Panther values for Panther-only domains.
144106Ssusans  * See Panther PRM, 1.5.4 Cache Hierarchy
145106Ssusans  */
146106Ssusans void
cpu_fix_allpanther(void)147106Ssusans cpu_fix_allpanther(void)
148106Ssusans {
149106Ssusans 	/* dcache same as Ch+ */
150106Ssusans 	icache_size = PN_ICACHE_SIZE;
151106Ssusans 	icache_linesize = PN_ICACHE_LSIZE;
152106Ssusans 	ecache_size = PN_L3_SIZE;
153106Ssusans 	ecache_alignsize = PN_L3_LINESIZE;
154106Ssusans 	ecache_associativity = PN_L3_NWAYS;
155106Ssusans 	ecache_setsize = PN_L3_SET_SIZE;
156106Ssusans 	ASSERT(ecache_setsize >= (ecache_size / ecache_associativity));
157106Ssusans 	/* vac same as Ch+ */
1581177Sck142721 	/* fix hwcaps for USIV+-only domains */
1591177Sck142721 	cpu_hwcap_flags |= AV_SPARC_POPC;
160106Ssusans }
161106Ssusans 
1620Sstevel@tonic-gate void
send_mondo_set(cpuset_t set)1630Sstevel@tonic-gate send_mondo_set(cpuset_t set)
1640Sstevel@tonic-gate {
1650Sstevel@tonic-gate 	int lo, busy, nack, shipped = 0;
1660Sstevel@tonic-gate 	uint16_t i, cpuids[IDSR_BN_SETS];
1670Sstevel@tonic-gate 	uint64_t idsr, nackmask = 0, busymask, curnack, curbusy;
1680Sstevel@tonic-gate 	uint64_t starttick, endtick, tick, lasttick;
1690Sstevel@tonic-gate #if (NCPU > IDSR_BN_SETS)
1700Sstevel@tonic-gate 	int index = 0;
1710Sstevel@tonic-gate 	int ncpuids = 0;
1720Sstevel@tonic-gate #endif
1730Sstevel@tonic-gate #ifdef	CHEETAHPLUS_ERRATUM_25
1740Sstevel@tonic-gate 	int recovered = 0;
1750Sstevel@tonic-gate 	int cpuid;
1760Sstevel@tonic-gate #endif
1770Sstevel@tonic-gate 
1780Sstevel@tonic-gate 	ASSERT(!CPUSET_ISNULL(set));
1790Sstevel@tonic-gate 	starttick = lasttick = gettick();
1800Sstevel@tonic-gate 
1810Sstevel@tonic-gate #if (NCPU <= IDSR_BN_SETS)
1820Sstevel@tonic-gate 	for (i = 0; i < NCPU; i++)
1830Sstevel@tonic-gate 		if (CPU_IN_SET(set, i)) {
1840Sstevel@tonic-gate 			shipit(i, shipped);
1850Sstevel@tonic-gate 			nackmask |= IDSR_NACK_BIT(shipped);
1860Sstevel@tonic-gate 			cpuids[shipped++] = i;
1870Sstevel@tonic-gate 			CPUSET_DEL(set, i);
1880Sstevel@tonic-gate 			if (CPUSET_ISNULL(set))
1890Sstevel@tonic-gate 				break;
1900Sstevel@tonic-gate 		}
1910Sstevel@tonic-gate 	CPU_STATS_ADDQ(CPU, sys, xcalls, shipped);
1920Sstevel@tonic-gate #else
1930Sstevel@tonic-gate 	for (i = 0; i < NCPU; i++)
1940Sstevel@tonic-gate 		if (CPU_IN_SET(set, i)) {
1950Sstevel@tonic-gate 			ncpuids++;
1960Sstevel@tonic-gate 
1970Sstevel@tonic-gate 			/*
1980Sstevel@tonic-gate 			 * Ship only to the first (IDSR_BN_SETS) CPUs.  If we
1990Sstevel@tonic-gate 			 * find we have shipped to more than (IDSR_BN_SETS)
2000Sstevel@tonic-gate 			 * CPUs, set "index" to the highest numbered CPU in
2010Sstevel@tonic-gate 			 * the set so we can ship to other CPUs a bit later on.
2020Sstevel@tonic-gate 			 */
2030Sstevel@tonic-gate 			if (shipped < IDSR_BN_SETS) {
2040Sstevel@tonic-gate 				shipit(i, shipped);
2050Sstevel@tonic-gate 				nackmask |= IDSR_NACK_BIT(shipped);
2060Sstevel@tonic-gate 				cpuids[shipped++] = i;
2070Sstevel@tonic-gate 				CPUSET_DEL(set, i);
2080Sstevel@tonic-gate 				if (CPUSET_ISNULL(set))
2090Sstevel@tonic-gate 					break;
2100Sstevel@tonic-gate 			} else
2110Sstevel@tonic-gate 				index = (int)i;
2120Sstevel@tonic-gate 		}
2130Sstevel@tonic-gate 
2140Sstevel@tonic-gate 	CPU_STATS_ADDQ(CPU, sys, xcalls, ncpuids);
2150Sstevel@tonic-gate #endif
2160Sstevel@tonic-gate 
2170Sstevel@tonic-gate 	busymask = IDSR_NACK_TO_BUSY(nackmask);
2180Sstevel@tonic-gate 	busy = nack = 0;
2190Sstevel@tonic-gate 	endtick = starttick + xc_tick_limit;
2200Sstevel@tonic-gate 	for (;;) {
2210Sstevel@tonic-gate 		idsr = getidsr();
2220Sstevel@tonic-gate #if (NCPU <= IDSR_BN_SETS)
2230Sstevel@tonic-gate 		if (idsr == 0)
2240Sstevel@tonic-gate 			break;
2250Sstevel@tonic-gate #else
2260Sstevel@tonic-gate 		if (idsr == 0 && shipped == ncpuids)
2270Sstevel@tonic-gate 			break;
2280Sstevel@tonic-gate #endif
2290Sstevel@tonic-gate 		tick = gettick();
2300Sstevel@tonic-gate 		/*
2310Sstevel@tonic-gate 		 * If there is a big jump between the current tick
2320Sstevel@tonic-gate 		 * count and lasttick, we have probably hit a break
2330Sstevel@tonic-gate 		 * point.  Adjust endtick accordingly to avoid panic.
2340Sstevel@tonic-gate 		 */
2350Sstevel@tonic-gate 		if (tick > (lasttick + xc_tick_jump_limit))
2360Sstevel@tonic-gate 			endtick += (tick - lasttick);
2370Sstevel@tonic-gate 		lasttick = tick;
2380Sstevel@tonic-gate 		if (tick > endtick) {
2390Sstevel@tonic-gate 			if (panic_quiesce)
2400Sstevel@tonic-gate 				return;
2410Sstevel@tonic-gate #ifdef	CHEETAHPLUS_ERRATUM_25
2420Sstevel@tonic-gate 			cpuid = -1;
2430Sstevel@tonic-gate 			for (i = 0; i < IDSR_BN_SETS; i++) {
2440Sstevel@tonic-gate 				if (idsr & (IDSR_NACK_BIT(i) |
2450Sstevel@tonic-gate 				    IDSR_BUSY_BIT(i))) {
2460Sstevel@tonic-gate 					cpuid = cpuids[i];
2470Sstevel@tonic-gate 					break;
2480Sstevel@tonic-gate 				}
2490Sstevel@tonic-gate 			}
2500Sstevel@tonic-gate 			if (cheetah_sendmondo_recover && cpuid != -1 &&
2510Sstevel@tonic-gate 			    recovered == 0) {
2520Sstevel@tonic-gate 				if (mondo_recover(cpuid, i)) {
2530Sstevel@tonic-gate 					/*
2540Sstevel@tonic-gate 					 * We claimed the whole memory or
2550Sstevel@tonic-gate 					 * full scan is disabled.
2560Sstevel@tonic-gate 					 */
2570Sstevel@tonic-gate 					recovered++;
2580Sstevel@tonic-gate 				}
2590Sstevel@tonic-gate 				tick = gettick();
2600Sstevel@tonic-gate 				endtick = tick + xc_tick_limit;
2610Sstevel@tonic-gate 				lasttick = tick;
2620Sstevel@tonic-gate 				/*
2630Sstevel@tonic-gate 				 * Recheck idsr
2640Sstevel@tonic-gate 				 */
2650Sstevel@tonic-gate 				continue;
2660Sstevel@tonic-gate 			} else
2670Sstevel@tonic-gate #endif	/* CHEETAHPLUS_ERRATUM_25 */
2680Sstevel@tonic-gate 			{
2690Sstevel@tonic-gate 				cmn_err(CE_CONT, "send mondo timeout "
2700Sstevel@tonic-gate 				    "[%d NACK %d BUSY]\nIDSR 0x%"
2710Sstevel@tonic-gate 				    "" PRIx64 "  cpuids:", nack, busy, idsr);
2720Sstevel@tonic-gate 				for (i = 0; i < IDSR_BN_SETS; i++) {
2730Sstevel@tonic-gate 					if (idsr & (IDSR_NACK_BIT(i) |
2740Sstevel@tonic-gate 					    IDSR_BUSY_BIT(i))) {
2750Sstevel@tonic-gate 						cmn_err(CE_CONT, " 0x%x",
2760Sstevel@tonic-gate 						    cpuids[i]);
2770Sstevel@tonic-gate 					}
2780Sstevel@tonic-gate 				}
2790Sstevel@tonic-gate 				cmn_err(CE_CONT, "\n");
2800Sstevel@tonic-gate 				cmn_err(CE_PANIC, "send_mondo_set: timeout");
2810Sstevel@tonic-gate 			}
2820Sstevel@tonic-gate 		}
2830Sstevel@tonic-gate 		curnack = idsr & nackmask;
2840Sstevel@tonic-gate 		curbusy = idsr & busymask;
2850Sstevel@tonic-gate #if (NCPU > IDSR_BN_SETS)
2860Sstevel@tonic-gate 		if (shipped < ncpuids) {
2870Sstevel@tonic-gate 			uint64_t cpus_left;
2880Sstevel@tonic-gate 			uint16_t next = (uint16_t)index;
2890Sstevel@tonic-gate 
2900Sstevel@tonic-gate 			cpus_left = ~(IDSR_NACK_TO_BUSY(curnack) | curbusy) &
2914718Smh27603 			    busymask;
2920Sstevel@tonic-gate 
2930Sstevel@tonic-gate 			if (cpus_left) {
2940Sstevel@tonic-gate 				do {
2950Sstevel@tonic-gate 					/*
2960Sstevel@tonic-gate 					 * Sequence through and ship to the
2970Sstevel@tonic-gate 					 * remainder of the CPUs in the system
2980Sstevel@tonic-gate 					 * (e.g. other than the first
2990Sstevel@tonic-gate 					 * (IDSR_BN_SETS)) in reverse order.
3000Sstevel@tonic-gate 					 */
3010Sstevel@tonic-gate 					lo = lowbit(cpus_left) - 1;
3020Sstevel@tonic-gate 					i = IDSR_BUSY_IDX(lo);
3030Sstevel@tonic-gate 					shipit(next, i);
3040Sstevel@tonic-gate 					shipped++;
3050Sstevel@tonic-gate 					cpuids[i] = next;
3060Sstevel@tonic-gate 
3070Sstevel@tonic-gate 					/*
3080Sstevel@tonic-gate 					 * If we've processed all the CPUs,
3090Sstevel@tonic-gate 					 * exit the loop now and save
3100Sstevel@tonic-gate 					 * instructions.
3110Sstevel@tonic-gate 					 */
3120Sstevel@tonic-gate 					if (shipped == ncpuids)
3130Sstevel@tonic-gate 						break;
3140Sstevel@tonic-gate 
3150Sstevel@tonic-gate 					for ((index = ((int)next - 1));
3160Sstevel@tonic-gate 					    index >= 0; index--)
3170Sstevel@tonic-gate 						if (CPU_IN_SET(set, index)) {
3180Sstevel@tonic-gate 							next = (uint16_t)index;
3190Sstevel@tonic-gate 							break;
3200Sstevel@tonic-gate 						}
3210Sstevel@tonic-gate 
3220Sstevel@tonic-gate 					cpus_left &= ~(1ull << lo);
3230Sstevel@tonic-gate 				} while (cpus_left);
3240Sstevel@tonic-gate #ifdef	CHEETAHPLUS_ERRATUM_25
3250Sstevel@tonic-gate 				/*
3260Sstevel@tonic-gate 				 * Clear recovered because we are sending to
3270Sstevel@tonic-gate 				 * a new set of targets.
3280Sstevel@tonic-gate 				 */
3290Sstevel@tonic-gate 				recovered = 0;
3300Sstevel@tonic-gate #endif
3310Sstevel@tonic-gate 				continue;
3320Sstevel@tonic-gate 			}
3330Sstevel@tonic-gate 		}
3340Sstevel@tonic-gate #endif
3350Sstevel@tonic-gate 		if (curbusy) {
3360Sstevel@tonic-gate 			busy++;
3370Sstevel@tonic-gate 			continue;
3380Sstevel@tonic-gate 		}
3390Sstevel@tonic-gate 
3400Sstevel@tonic-gate #ifdef SEND_MONDO_STATS
3410Sstevel@tonic-gate 		{
3420Sstevel@tonic-gate 			int n = gettick() - starttick;
3430Sstevel@tonic-gate 			if (n < 8192)
3440Sstevel@tonic-gate 				x_nack_stimes[n >> 7]++;
3450Sstevel@tonic-gate 		}
3460Sstevel@tonic-gate #endif
3470Sstevel@tonic-gate 		while (gettick() < (tick + sys_clock_mhz))
3480Sstevel@tonic-gate 			;
3490Sstevel@tonic-gate 		do {
3500Sstevel@tonic-gate 			lo = lowbit(curnack) - 1;
3510Sstevel@tonic-gate 			i = IDSR_NACK_IDX(lo);
3520Sstevel@tonic-gate 			shipit(cpuids[i], i);
3530Sstevel@tonic-gate 			curnack &= ~(1ull << lo);
3540Sstevel@tonic-gate 		} while (curnack);
3550Sstevel@tonic-gate 		nack++;
3560Sstevel@tonic-gate 		busy = 0;
3570Sstevel@tonic-gate 	}
3580Sstevel@tonic-gate #ifdef SEND_MONDO_STATS
3590Sstevel@tonic-gate 	{
3600Sstevel@tonic-gate 		int n = gettick() - starttick;
3610Sstevel@tonic-gate 		if (n < 8192)
3620Sstevel@tonic-gate 			x_set_stimes[n >> 7]++;
3630Sstevel@tonic-gate 		else
3640Sstevel@tonic-gate 			x_set_ltimes[(n >> 13) & 0xf]++;
3650Sstevel@tonic-gate 	}
3660Sstevel@tonic-gate 	x_set_cpus[shipped]++;
3670Sstevel@tonic-gate #endif
3680Sstevel@tonic-gate }
3690Sstevel@tonic-gate 
3700Sstevel@tonic-gate /*
3710Sstevel@tonic-gate  * Handles error logging for implementation specific error types
3720Sstevel@tonic-gate  */
3730Sstevel@tonic-gate /*ARGSUSED1*/
3740Sstevel@tonic-gate int
cpu_impl_async_log_err(void * flt,errorq_elem_t * eqep)3750Sstevel@tonic-gate cpu_impl_async_log_err(void *flt, errorq_elem_t *eqep)
3760Sstevel@tonic-gate {
3770Sstevel@tonic-gate 	ch_async_flt_t *ch_flt = (ch_async_flt_t *)flt;
3780Sstevel@tonic-gate 	struct async_flt *aflt = (struct async_flt *)flt;
3790Sstevel@tonic-gate 
3800Sstevel@tonic-gate 	switch (ch_flt->flt_type) {
3810Sstevel@tonic-gate 
3820Sstevel@tonic-gate 	case CPU_IC_PARITY:
3830Sstevel@tonic-gate 		cpu_async_log_ic_parity_err(flt);
3840Sstevel@tonic-gate 		return (CH_ASYNC_LOG_DONE);
3850Sstevel@tonic-gate 
3860Sstevel@tonic-gate 	case CPU_DC_PARITY:
3870Sstevel@tonic-gate 		cpu_async_log_dc_parity_err(flt);
3880Sstevel@tonic-gate 		return (CH_ASYNC_LOG_DONE);
3890Sstevel@tonic-gate 
3900Sstevel@tonic-gate 	case CPU_DUE:
3910Sstevel@tonic-gate 		cpu_log_err(aflt);
3920Sstevel@tonic-gate 		cpu_page_retire(ch_flt);
3930Sstevel@tonic-gate 		return (CH_ASYNC_LOG_DONE);
3940Sstevel@tonic-gate 
3950Sstevel@tonic-gate 	case CPU_ITLB_PARITY:
3960Sstevel@tonic-gate 	case CPU_DTLB_PARITY:
3970Sstevel@tonic-gate 		cpu_async_log_tlb_parity_err(flt);
3980Sstevel@tonic-gate 		return (CH_ASYNC_LOG_DONE);
3990Sstevel@tonic-gate 
4002946Smb91622 	/* report the error and continue */
4012946Smb91622 	case CPU_L3_ADDR_PE:
4022946Smb91622 		cpu_log_err(aflt);
4032946Smb91622 		return (CH_ASYNC_LOG_DONE);
4042946Smb91622 
4050Sstevel@tonic-gate 	default:
4060Sstevel@tonic-gate 		return (CH_ASYNC_LOG_UNKNOWN);
4070Sstevel@tonic-gate 	}
4080Sstevel@tonic-gate }
4090Sstevel@tonic-gate 
4100Sstevel@tonic-gate /*
4110Sstevel@tonic-gate  * Figure out if Ecache is direct-mapped (Cheetah or Cheetah+ with Ecache
4120Sstevel@tonic-gate  * control ECCR_ASSOC bit off or 2-way (Cheetah+ with ECCR_ASSOC on).
4130Sstevel@tonic-gate  * We need to do this on the fly because we may have mixed Cheetah+'s with
4140Sstevel@tonic-gate  * both direct and 2-way Ecaches. Panther only supports 4-way L3$.
4150Sstevel@tonic-gate  */
4160Sstevel@tonic-gate int
cpu_ecache_nway(void)4170Sstevel@tonic-gate cpu_ecache_nway(void)
4180Sstevel@tonic-gate {
4190Sstevel@tonic-gate 	if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation))
4200Sstevel@tonic-gate 		return (PN_L3_NWAYS);
4210Sstevel@tonic-gate 	return ((get_ecache_ctrl() & ECCR_ASSOC) ? 2 : 1);
4220Sstevel@tonic-gate }
4230Sstevel@tonic-gate 
4240Sstevel@tonic-gate /*
4250Sstevel@tonic-gate  * Note that these are entered into the table: Fatal Errors (PERR, IERR, ISAP,
4260Sstevel@tonic-gate  * EMU, IMU) first, orphaned UCU/UCC, AFAR Overwrite policy, finally IVU, IVC.
4270Sstevel@tonic-gate  * Afar overwrite policy is:
4280Sstevel@tonic-gate  *   Class 4:
4290Sstevel@tonic-gate  *      AFSR     -- UCC, UCU, TUE, TSCE, TUE_SH
4300Sstevel@tonic-gate  *      AFSR_EXT -- L3_UCC, L3_UCU, L3_TUE, L3_TUE_SH
4310Sstevel@tonic-gate  *   Class 3:
4320Sstevel@tonic-gate  *      AFSR     -- UE, DUE, EDU, WDU, CPU
4330Sstevel@tonic-gate  *      AFSR_EXT -- L3_EDU, L3_WDU, L3_CPU
4340Sstevel@tonic-gate  *   Class 2:
4350Sstevel@tonic-gate  *      AFSR     -- CE, EDC, EMC, WDC, CPC, THCE
4360Sstevel@tonic-gate  *      AFSR_EXT -- L3_EDC, L3_WDC, L3_CPC, L3_THCE
4370Sstevel@tonic-gate  *   Class 1:
4380Sstevel@tonic-gate  *      AFSR     -- TO, DTO, BERR, DBERR
4390Sstevel@tonic-gate  */
4400Sstevel@tonic-gate ecc_type_to_info_t ecc_type_to_info[] = {
4410Sstevel@tonic-gate 
4420Sstevel@tonic-gate 	/* Fatal Errors */
4430Sstevel@tonic-gate 	C_AFSR_PERR,		"PERR ",	ECC_ALL_TRAPS,
4440Sstevel@tonic-gate 		CPU_FATAL,	"PERR Fatal",
4450Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_SYSTEM2,
4460Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_PERR,
4470Sstevel@tonic-gate 	C_AFSR_IERR,		"IERR ", 	ECC_ALL_TRAPS,
4480Sstevel@tonic-gate 		CPU_FATAL,	"IERR Fatal",
4490Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_SYSTEM2,
4500Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_IERR,
4510Sstevel@tonic-gate 	C_AFSR_ISAP,		"ISAP ",	ECC_ALL_TRAPS,
4520Sstevel@tonic-gate 		CPU_FATAL,	"ISAP Fatal",
4530Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_SYSTEM1,
4540Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_ISAP,
4550Sstevel@tonic-gate 	C_AFSR_L3_TUE_SH,	"L3_TUE_SH ", 	ECC_C_TRAP,
4560Sstevel@tonic-gate 		CPU_FATAL,	"L3_TUE_SH Fatal",
4570Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L3_TAG_ECC,
4580Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_L3_TUE_SH,
4590Sstevel@tonic-gate 	C_AFSR_L3_TUE,		"L3_TUE ", 	ECC_C_TRAP,
4600Sstevel@tonic-gate 		CPU_FATAL,	"L3_TUE Fatal",
4610Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L3_TAG_ECC,
4620Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_L3_TUE,
4630Sstevel@tonic-gate 	C_AFSR_TUE_SH,		"TUE_SH ", 	ECC_C_TRAP,
4640Sstevel@tonic-gate 		CPU_FATAL,	"TUE_SH Fatal",
4650Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L2_TAG_ECC,
4660Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_TUE_SH,
4670Sstevel@tonic-gate 	C_AFSR_TUE,		"TUE ", 	ECC_ALL_TRAPS,
4680Sstevel@tonic-gate 		CPU_FATAL,	"TUE Fatal",
4690Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L2_TAG_ECC,
4700Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_TUE,
4710Sstevel@tonic-gate 	C_AFSR_EMU,		"EMU ",		ECC_ASYNC_TRAPS,
4720Sstevel@tonic-gate 		CPU_FATAL,	"EMU Fatal",
4730Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_MEMORY,
4740Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_EMU,
4750Sstevel@tonic-gate 	C_AFSR_IMU,		"IMU ",		ECC_C_TRAP,
4760Sstevel@tonic-gate 		CPU_FATAL,	"IMU Fatal",
4770Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_SYSTEM1,
4780Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_IMU,
4790Sstevel@tonic-gate 
4800Sstevel@tonic-gate 	/* L3$ Address parity errors are reported via the MECC bit */
4810Sstevel@tonic-gate 	C_AFSR_L3_MECC,		"L3_MECC ",	ECC_MECC_TRAPS,
4820Sstevel@tonic-gate 		CPU_L3_ADDR_PE,	"L3 Address Parity",
4830Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L3_DATA,
4840Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_L3_MECC,
4850Sstevel@tonic-gate 
4860Sstevel@tonic-gate 	/* Orphaned UCC/UCU Errors */
4870Sstevel@tonic-gate 	C_AFSR_L3_UCU,		"L3_OUCU ",	ECC_ORPH_TRAPS,
4880Sstevel@tonic-gate 		CPU_ORPH,	"Orphaned L3_UCU",
4890Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L3_DATA,
4900Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_L3_UCU,
4910Sstevel@tonic-gate 	C_AFSR_L3_UCC,		"L3_OUCC ",	ECC_ORPH_TRAPS,
4920Sstevel@tonic-gate 		CPU_ORPH,	"Orphaned L3_UCC",
4930Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L3_DATA,
4940Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_L3_UCC,
4950Sstevel@tonic-gate 	C_AFSR_UCU,		"OUCU ",	ECC_ORPH_TRAPS,
4960Sstevel@tonic-gate 		CPU_ORPH,	"Orphaned UCU",
4970Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L2_DATA,
4980Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_UCU,
4990Sstevel@tonic-gate 	C_AFSR_UCC,		"OUCC ",	ECC_ORPH_TRAPS,
5000Sstevel@tonic-gate 		CPU_ORPH,	"Orphaned UCC",
5010Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L2_DATA,
5020Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_UCC,
5030Sstevel@tonic-gate 
5040Sstevel@tonic-gate 	/* UCU, UCC */
5050Sstevel@tonic-gate 	C_AFSR_L3_UCU,		"L3_UCU ",	ECC_F_TRAP,
5060Sstevel@tonic-gate 		CPU_UE_ECACHE,	"L3_UCU",
5070Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L3_DATA,
5080Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_L3_UCU,
5090Sstevel@tonic-gate 	C_AFSR_L3_UCC,		"L3_UCC ",	ECC_F_TRAP,
5100Sstevel@tonic-gate 		CPU_CE_ECACHE,	"L3_UCC",
5110Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L3_DATA,
5120Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_L3_UCC,
5130Sstevel@tonic-gate 	C_AFSR_UCU,		"UCU ",		ECC_F_TRAP,
5140Sstevel@tonic-gate 		CPU_UE_ECACHE,	"UCU",
5150Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L2_DATA,
5160Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_UCU,
5170Sstevel@tonic-gate 	C_AFSR_UCC,		"UCC ",		ECC_F_TRAP,
5180Sstevel@tonic-gate 		CPU_CE_ECACHE,	"UCC",
5190Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L2_DATA,
5200Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_UCC,
5210Sstevel@tonic-gate 	C_AFSR_TSCE,		"TSCE ",	ECC_F_TRAP,
5220Sstevel@tonic-gate 		CPU_CE_ECACHE,	"TSCE",
5230Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L2_TAG_ECC,
5240Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_TSCE,
5250Sstevel@tonic-gate 
5260Sstevel@tonic-gate 	/* UE, EDU:ST, EDU:BLD, WDU, CPU */
5270Sstevel@tonic-gate 	C_AFSR_UE,		"UE ",		ECC_ASYNC_TRAPS,
5280Sstevel@tonic-gate 		CPU_UE,		"Uncorrectable system bus (UE)",
5290Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_MEMORY,
5300Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_UE,
5310Sstevel@tonic-gate 	C_AFSR_L3_EDU,		"L3_EDU ",	ECC_C_TRAP,
5320Sstevel@tonic-gate 		CPU_UE_ECACHE_RETIRE,	"L3_EDU:ST",
5330Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L3_DATA,
5340Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_L3_EDUST,
5350Sstevel@tonic-gate 	C_AFSR_L3_EDU,		"L3_EDU ",	ECC_D_TRAP,
5360Sstevel@tonic-gate 		CPU_UE_ECACHE_RETIRE,	"L3_EDU:BLD",
5370Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L3_DATA,
5380Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_L3_EDUBL,
5390Sstevel@tonic-gate 	C_AFSR_L3_WDU,		"L3_WDU ",	ECC_C_TRAP,
5400Sstevel@tonic-gate 		CPU_UE_ECACHE_RETIRE,	"L3_WDU",
5410Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L3_DATA,
5420Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_L3_WDU,
5430Sstevel@tonic-gate 	C_AFSR_L3_CPU,		"L3_CPU ",	ECC_C_TRAP,
5440Sstevel@tonic-gate 		CPU_UE_ECACHE,	"L3_CPU",
5450Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L3_DATA,
5460Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_L3_CPU,
5470Sstevel@tonic-gate 	C_AFSR_EDU,		"EDU ",		ECC_C_TRAP,
5480Sstevel@tonic-gate 		CPU_UE_ECACHE_RETIRE,	"EDU:ST",
5490Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L2_DATA,
5500Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_EDUST,
5510Sstevel@tonic-gate 	C_AFSR_EDU,		"EDU ",		ECC_D_TRAP,
5520Sstevel@tonic-gate 		CPU_UE_ECACHE_RETIRE,	"EDU:BLD",
5530Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L2_DATA,
5540Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_EDUBL,
5550Sstevel@tonic-gate 	C_AFSR_WDU,		"WDU ",		ECC_C_TRAP,
5560Sstevel@tonic-gate 		CPU_UE_ECACHE_RETIRE,	"WDU",
5570Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L2_DATA,
5580Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_WDU,
5590Sstevel@tonic-gate 	C_AFSR_CPU,		"CPU ",		ECC_C_TRAP,
5600Sstevel@tonic-gate 		CPU_UE_ECACHE,	"CPU",
5610Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L2_DATA,
5620Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_CPU,
5630Sstevel@tonic-gate 	C_AFSR_DUE,		"DUE ",		ECC_C_TRAP,
5640Sstevel@tonic-gate 		CPU_DUE,	"DUE",
5650Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_MEMORY,
5660Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_DUE,
5670Sstevel@tonic-gate 
5680Sstevel@tonic-gate 	/* CE, EDC, EMC, WDC, CPC */
5690Sstevel@tonic-gate 	C_AFSR_CE,		"CE ",		ECC_C_TRAP,
5700Sstevel@tonic-gate 		CPU_CE,		"Corrected system bus (CE)",
5710Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_MEMORY,
5720Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_CE,
5730Sstevel@tonic-gate 	C_AFSR_L3_EDC,		"L3_EDC ",	ECC_C_TRAP,
5740Sstevel@tonic-gate 		CPU_CE_ECACHE,	"L3_EDC",
5750Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L3_DATA,
5760Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_L3_EDC,
5770Sstevel@tonic-gate 	C_AFSR_EDC,		"EDC ",		ECC_C_TRAP,
5780Sstevel@tonic-gate 		CPU_CE_ECACHE,	"EDC",
5790Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L2_DATA,
5800Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_EDC,
5810Sstevel@tonic-gate 	C_AFSR_EMC,		"EMC ",		ECC_C_TRAP,
5820Sstevel@tonic-gate 		CPU_EMC,	"EMC",
5830Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_MEMORY,
5840Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_EMC,
5850Sstevel@tonic-gate 	C_AFSR_L3_WDC,		"L3_WDC ",	ECC_C_TRAP,
5860Sstevel@tonic-gate 		CPU_CE_ECACHE,	"L3_WDC",
5870Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L3_DATA,
5880Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_L3_WDC,
5890Sstevel@tonic-gate 	C_AFSR_L3_CPC,		"L3_CPC ",	ECC_C_TRAP,
5900Sstevel@tonic-gate 		CPU_CE_ECACHE,	"L3_CPC",
5910Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L3_DATA,
5920Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_L3_CPC,
5930Sstevel@tonic-gate 	C_AFSR_L3_THCE,		"L3_THCE ",	ECC_C_TRAP,
5940Sstevel@tonic-gate 		CPU_CE_ECACHE,	"L3_THCE",
5950Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L3_TAG_ECC,
5960Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_L3_THCE,
5970Sstevel@tonic-gate 	C_AFSR_WDC,		"WDC ",		ECC_C_TRAP,
5980Sstevel@tonic-gate 		CPU_CE_ECACHE,	"WDC",
5990Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L2_DATA,
6000Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_WDC,
6010Sstevel@tonic-gate 	C_AFSR_CPC,		"CPC ",		ECC_C_TRAP,
6020Sstevel@tonic-gate 		CPU_CE_ECACHE,	"CPC",
6030Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L2_DATA,
6040Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_CPC,
6050Sstevel@tonic-gate 	C_AFSR_THCE,		"THCE ",	ECC_C_TRAP,
6060Sstevel@tonic-gate 		CPU_CE_ECACHE,	"THCE",
6070Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_L2_TAG_ECC,
6080Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_THCE,
6090Sstevel@tonic-gate 
6100Sstevel@tonic-gate 	/* TO, BERR */
6110Sstevel@tonic-gate 	C_AFSR_TO,		"TO ",		ECC_ASYNC_TRAPS,
6120Sstevel@tonic-gate 		CPU_TO,		"Timeout (TO)",
6130Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_IO,
6140Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_TO,
6150Sstevel@tonic-gate 	C_AFSR_BERR,		"BERR ",	ECC_ASYNC_TRAPS,
6160Sstevel@tonic-gate 		CPU_BERR,	"Bus Error (BERR)",
6170Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_IO,
6180Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_BERR,
6190Sstevel@tonic-gate 	C_AFSR_DTO,		"DTO ",		ECC_C_TRAP,
6200Sstevel@tonic-gate 		CPU_TO,		"Disrupting Timeout (DTO)",
6210Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_IO,
6220Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_DTO,
6230Sstevel@tonic-gate 	C_AFSR_DBERR,		"DBERR ",	ECC_C_TRAP,
6240Sstevel@tonic-gate 		CPU_BERR,	"Disrupting Bus Error (DBERR)",
6250Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_IO,
6260Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_DBERR,
6270Sstevel@tonic-gate 
6280Sstevel@tonic-gate 	/* IVU, IVC, IMC */
6290Sstevel@tonic-gate 	C_AFSR_IVU,		"IVU ",		ECC_C_TRAP,
6300Sstevel@tonic-gate 		CPU_IV,		"IVU",
6310Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_SYSTEM1,
6320Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_IVU,
6330Sstevel@tonic-gate 	C_AFSR_IVC,		"IVC ",		ECC_C_TRAP,
6340Sstevel@tonic-gate 		CPU_IV,		"IVC",
6350Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_SYSTEM1,
6360Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_IVC,
6370Sstevel@tonic-gate 	C_AFSR_IMC,		"IMC ",		ECC_C_TRAP,
6380Sstevel@tonic-gate 		CPU_IV,		"IMC",
6390Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_SYSTEM1,
6400Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_IMC,
6410Sstevel@tonic-gate 
6420Sstevel@tonic-gate 	0,			NULL,		0,
6430Sstevel@tonic-gate 		0,		NULL,
6440Sstevel@tonic-gate 		FM_EREPORT_PAYLOAD_UNKNOWN,
6450Sstevel@tonic-gate 		FM_EREPORT_CPU_USIII_UNKNOWN,
6460Sstevel@tonic-gate };
6470Sstevel@tonic-gate 
6480Sstevel@tonic-gate /*
6490Sstevel@tonic-gate  * See Cheetah+ Delta PRM 10.9 and section P.6.1 of the Panther PRM
6500Sstevel@tonic-gate  *   Class 4:
6510Sstevel@tonic-gate  *      AFSR     -- UCC, UCU, TUE, TSCE, TUE_SH
6520Sstevel@tonic-gate  *      AFSR_EXT -- L3_UCC, L3_UCU, L3_TUE, L3_TUE_SH
6530Sstevel@tonic-gate  *   Class 3:
6540Sstevel@tonic-gate  *      AFSR     -- UE, DUE, EDU, EMU, WDU, CPU
6550Sstevel@tonic-gate  *      AFSR_EXT -- L3_EDU, L3_WDU, L3_CPU
6560Sstevel@tonic-gate  *   Class 2:
6570Sstevel@tonic-gate  *      AFSR     -- CE, EDC, EMC, WDC, CPC, THCE
6580Sstevel@tonic-gate  *      AFSR_EXT -- L3_EDC, L3_WDC, L3_CPC, L3_THCE
6590Sstevel@tonic-gate  *   Class 1:
6600Sstevel@tonic-gate  *      AFSR     -- TO, DTO, BERR, DBERR
6610Sstevel@tonic-gate  *      AFSR_EXT --
6620Sstevel@tonic-gate  */
6630Sstevel@tonic-gate uint64_t afar_overwrite[] = {
6640Sstevel@tonic-gate 	/* class 4: */
6650Sstevel@tonic-gate 	C_AFSR_UCC | C_AFSR_UCU | C_AFSR_TUE | C_AFSR_TSCE | C_AFSR_TUE_SH |
6660Sstevel@tonic-gate 	C_AFSR_L3_UCC | C_AFSR_L3_UCU | C_AFSR_L3_TUE | C_AFSR_L3_TUE_SH,
6670Sstevel@tonic-gate 	/* class 3: */
6680Sstevel@tonic-gate 	C_AFSR_UE | C_AFSR_DUE | C_AFSR_EDU | C_AFSR_EMU | C_AFSR_WDU |
6690Sstevel@tonic-gate 	C_AFSR_CPU | C_AFSR_L3_EDU | C_AFSR_L3_WDU | C_AFSR_L3_CPU,
6700Sstevel@tonic-gate 	/* class 2: */
6710Sstevel@tonic-gate 	C_AFSR_CE | C_AFSR_EDC | C_AFSR_EMC | C_AFSR_WDC | C_AFSR_CPC |
6720Sstevel@tonic-gate 	C_AFSR_THCE | C_AFSR_L3_EDC | C_AFSR_L3_WDC | C_AFSR_L3_CPC |
6730Sstevel@tonic-gate 	C_AFSR_L3_THCE,
6740Sstevel@tonic-gate 	/* class 1: */
6750Sstevel@tonic-gate 	C_AFSR_TO | C_AFSR_DTO | C_AFSR_BERR | C_AFSR_DBERR,
6760Sstevel@tonic-gate 
6770Sstevel@tonic-gate 	0
6780Sstevel@tonic-gate };
6790Sstevel@tonic-gate 
6800Sstevel@tonic-gate /*
6812436Smb91622  * For Cheetah+, the E_SYND and M_SYND overwrite priorities are combined.
6822436Smb91622  * See Cheetah+ Delta PRM 10.9 and Cheetah+ PRM 11.6.2
6832436Smb91622  *   Class 2:  UE, DUE, IVU, EDU, EMU, WDU, UCU, CPU
6842436Smb91622  *   Class 1:  CE, IVC, EDC, EMC, WDC, UCC, CPC
6850Sstevel@tonic-gate  */
6860Sstevel@tonic-gate uint64_t esynd_overwrite[] = {
6870Sstevel@tonic-gate 	/* class 2: */
6882436Smb91622 	C_AFSR_UE | C_AFSR_DUE | C_AFSR_IVU | C_AFSR_EDU | C_AFSR_EMU |
6892436Smb91622 	    C_AFSR_WDU | C_AFSR_UCU | C_AFSR_CPU,
6900Sstevel@tonic-gate 	/* class 1: */
6912436Smb91622 	C_AFSR_CE | C_AFSR_IVC | C_AFSR_EDC | C_AFSR_EMC | C_AFSR_WDC |
6922436Smb91622 	    C_AFSR_UCC | C_AFSR_CPC,
6930Sstevel@tonic-gate 	0
6940Sstevel@tonic-gate };
6950Sstevel@tonic-gate 
6960Sstevel@tonic-gate /*
6970Sstevel@tonic-gate  * In panther, the E_SYND overwrite policy changed a little bit
6980Sstevel@tonic-gate  * by adding one more level.
6992436Smb91622  * See Panther PRM P.6.2
7000Sstevel@tonic-gate  *   class 3:
7010Sstevel@tonic-gate  *      AFSR     -- UCU, UCC
7020Sstevel@tonic-gate  *      AFSR_EXT -- L3_UCU, L3_UCC
7030Sstevel@tonic-gate  *   Class 2:
7040Sstevel@tonic-gate  *      AFSR     -- UE, DUE, IVU, EDU, WDU, CPU
7050Sstevel@tonic-gate  *      AFSR_EXT -- L3_EDU, L3_WDU, L3_CPU
7060Sstevel@tonic-gate  *   Class 1:
7070Sstevel@tonic-gate  *      AFSR     -- CE, IVC, EDC, WDC, CPC
7080Sstevel@tonic-gate  *      AFSR_EXT -- L3_EDC, L3_WDC, L3_CPC
7090Sstevel@tonic-gate  */
7100Sstevel@tonic-gate uint64_t pn_esynd_overwrite[] = {
7110Sstevel@tonic-gate 	/* class 3: */
7120Sstevel@tonic-gate 	C_AFSR_UCU | C_AFSR_UCC |
7130Sstevel@tonic-gate 	C_AFSR_L3_UCU | C_AFSR_L3_UCC,
7140Sstevel@tonic-gate 	/* class 2: */
7150Sstevel@tonic-gate 	C_AFSR_UE | C_AFSR_DUE | C_AFSR_IVU | C_AFSR_EDU | C_AFSR_WDU |
7160Sstevel@tonic-gate 	    C_AFSR_CPU |
7170Sstevel@tonic-gate 	C_AFSR_L3_EDU | C_AFSR_L3_WDU | C_AFSR_L3_CPU,
7180Sstevel@tonic-gate 	/* class 1: */
7190Sstevel@tonic-gate 	C_AFSR_CE | C_AFSR_IVC | C_AFSR_EDC | C_AFSR_WDC | C_AFSR_CPC |
7200Sstevel@tonic-gate 	C_AFSR_L3_EDC | C_AFSR_L3_WDC | C_AFSR_L3_CPC,
7210Sstevel@tonic-gate 
7220Sstevel@tonic-gate 	0
7230Sstevel@tonic-gate };
7240Sstevel@tonic-gate 
7250Sstevel@tonic-gate int
afsr_to_pn_esynd_status(uint64_t afsr,uint64_t afsr_bit)7260Sstevel@tonic-gate afsr_to_pn_esynd_status(uint64_t afsr, uint64_t afsr_bit)
7270Sstevel@tonic-gate {
7280Sstevel@tonic-gate 	return (afsr_to_overw_status(afsr, afsr_bit, pn_esynd_overwrite));
7290Sstevel@tonic-gate }
7300Sstevel@tonic-gate 
7310Sstevel@tonic-gate /*
7320Sstevel@tonic-gate  * Prioritized list of Error bits for MSYND overwrite.
7332436Smb91622  * See Panther PRM P.6.2 (For Cheetah+, see esynd_overwrite classes)
7342436Smb91622  *   Class 2:  EMU, IMU
7352436Smb91622  *   Class 1:  EMC, IMC
7360Sstevel@tonic-gate  *
7370Sstevel@tonic-gate  * Panther adds IMU and IMC.
7380Sstevel@tonic-gate  */
7390Sstevel@tonic-gate uint64_t msynd_overwrite[] = {
7400Sstevel@tonic-gate 	/* class 2: */
7410Sstevel@tonic-gate 	C_AFSR_EMU | C_AFSR_IMU,
7420Sstevel@tonic-gate 	/* class 1: */
7430Sstevel@tonic-gate 	C_AFSR_EMC | C_AFSR_IMC,
7440Sstevel@tonic-gate 
7450Sstevel@tonic-gate 	0
7460Sstevel@tonic-gate };
7470Sstevel@tonic-gate 
7480Sstevel@tonic-gate /*
7490Sstevel@tonic-gate  * change cpu speed bits -- new speed will be normal-speed/divisor.
7500Sstevel@tonic-gate  *
7510Sstevel@tonic-gate  * The Jalapeno memory controllers are required to drain outstanding
7520Sstevel@tonic-gate  * memory transactions within 32 JBus clocks in order to be ready
7530Sstevel@tonic-gate  * to enter Estar mode.  In some corner cases however, that time
7540Sstevel@tonic-gate  * fell short.
7550Sstevel@tonic-gate  *
7560Sstevel@tonic-gate  * A safe software solution is to force MCU to act like in Estar mode,
7570Sstevel@tonic-gate  * then delay 1us (in ppm code) prior to assert J_CHNG_L signal.
7580Sstevel@tonic-gate  * To reverse the effect, upon exiting Estar, software restores the
7590Sstevel@tonic-gate  * MCU to its original state.
7600Sstevel@tonic-gate  */
7610Sstevel@tonic-gate /* ARGSUSED1 */
7620Sstevel@tonic-gate void
cpu_change_speed(uint64_t divisor,uint64_t arg2)7630Sstevel@tonic-gate cpu_change_speed(uint64_t divisor, uint64_t arg2)
7640Sstevel@tonic-gate {
7654667Smh27603 	bus_config_eclk_t	*bceclk;
7660Sstevel@tonic-gate 	uint64_t		reg;
7674667Smh27603 	processor_info_t	*pi = &(CPU->cpu_type_info);
7680Sstevel@tonic-gate 
7690Sstevel@tonic-gate 	for (bceclk = bus_config_eclk; bceclk->divisor; bceclk++) {
7700Sstevel@tonic-gate 		if (bceclk->divisor != divisor)
7710Sstevel@tonic-gate 			continue;
7720Sstevel@tonic-gate 		reg = get_safari_config();
7730Sstevel@tonic-gate 		reg &= ~SAFARI_CONFIG_ECLK_MASK;
7740Sstevel@tonic-gate 		reg |= bceclk->mask;
7750Sstevel@tonic-gate 		set_safari_config(reg);
7760Sstevel@tonic-gate 		CPU->cpu_m.divisor = (uchar_t)divisor;
777*8906SEric.Saxe@Sun.COM 		cpu_set_curr_clock(((uint64_t)pi->pi_clock * 1000000) /
778*8906SEric.Saxe@Sun.COM 		    divisor);
7790Sstevel@tonic-gate 		return;
7800Sstevel@tonic-gate 	}
7810Sstevel@tonic-gate 	/*
7820Sstevel@tonic-gate 	 * We will reach here only if OBP and kernel don't agree on
7830Sstevel@tonic-gate 	 * the speeds supported by the CPU.
7840Sstevel@tonic-gate 	 */
7850Sstevel@tonic-gate 	cmn_err(CE_WARN, "cpu_change_speed: bad divisor %" PRIu64, divisor);
7860Sstevel@tonic-gate }
7870Sstevel@tonic-gate 
7880Sstevel@tonic-gate /*
7890Sstevel@tonic-gate  * Cpu private initialization.  This includes allocating the cpu_private
7900Sstevel@tonic-gate  * data structure, initializing it, and initializing the scrubber for this
7910Sstevel@tonic-gate  * cpu.  This function calls cpu_init_ecache_scrub_dr to init the scrubber.
7920Sstevel@tonic-gate  * We use kmem_cache_create for the cheetah private data structure because
7930Sstevel@tonic-gate  * it needs to be allocated on a PAGESIZE (8192) byte boundary.
7940Sstevel@tonic-gate  */
7950Sstevel@tonic-gate void
cpu_init_private(struct cpu * cp)7960Sstevel@tonic-gate cpu_init_private(struct cpu *cp)
7970Sstevel@tonic-gate {
7980Sstevel@tonic-gate 	cheetah_private_t *chprp;
7990Sstevel@tonic-gate 	int i;
8000Sstevel@tonic-gate 
8010Sstevel@tonic-gate 	ASSERT(CPU_PRIVATE(cp) == NULL);
8020Sstevel@tonic-gate 
8030Sstevel@tonic-gate 	/* LINTED: E_TRUE_LOGICAL_EXPR */
8040Sstevel@tonic-gate 	ASSERT((offsetof(cheetah_private_t, chpr_tl1_err_data) +
8050Sstevel@tonic-gate 	    sizeof (ch_err_tl1_data_t) * CH_ERR_TL1_TLMAX) <= PAGESIZE);
8060Sstevel@tonic-gate 
8070Sstevel@tonic-gate 	/*
8080Sstevel@tonic-gate 	 * Running with Cheetah CPUs in a Cheetah+, Jaguar, Panther or
8090Sstevel@tonic-gate 	 * mixed Cheetah+/Jaguar/Panther machine is not a supported
8100Sstevel@tonic-gate 	 * configuration. Attempting to do so may result in unpredictable
8110Sstevel@tonic-gate 	 * failures (e.g. running Cheetah+ CPUs with Cheetah E$ disp flush)
8120Sstevel@tonic-gate 	 * so don't allow it.
8130Sstevel@tonic-gate 	 *
8140Sstevel@tonic-gate 	 * This is just defensive code since this configuration mismatch
8150Sstevel@tonic-gate 	 * should have been caught prior to OS execution.
8160Sstevel@tonic-gate 	 */
8170Sstevel@tonic-gate 	if (!(IS_CHEETAH_PLUS(cpunodes[cp->cpu_id].implementation) ||
8180Sstevel@tonic-gate 	    IS_JAGUAR(cpunodes[cp->cpu_id].implementation) ||
8190Sstevel@tonic-gate 	    IS_PANTHER(cpunodes[cp->cpu_id].implementation))) {
8200Sstevel@tonic-gate 		cmn_err(CE_PANIC, "CPU%d: UltraSPARC-III not supported"
8210Sstevel@tonic-gate 		    " on UltraSPARC-III+/IV/IV+ code\n", cp->cpu_id);
8220Sstevel@tonic-gate 	}
8230Sstevel@tonic-gate 
8240Sstevel@tonic-gate 	/*
8250Sstevel@tonic-gate 	 * If the ch_private_cache has not been created, create it.
8260Sstevel@tonic-gate 	 */
8270Sstevel@tonic-gate 	if (ch_private_cache == NULL) {
8280Sstevel@tonic-gate 		ch_private_cache = kmem_cache_create("ch_private_cache",
8290Sstevel@tonic-gate 		    sizeof (cheetah_private_t), PAGESIZE, NULL, NULL,
8300Sstevel@tonic-gate 		    NULL, NULL, static_arena, 0);
8310Sstevel@tonic-gate 	}
8320Sstevel@tonic-gate 
8330Sstevel@tonic-gate 	chprp = CPU_PRIVATE(cp) = kmem_cache_alloc(ch_private_cache, KM_SLEEP);
8340Sstevel@tonic-gate 
8350Sstevel@tonic-gate 	bzero(chprp, sizeof (cheetah_private_t));
8360Sstevel@tonic-gate 	chprp->chpr_fecctl0_logout.clo_data.chd_afar = LOGOUT_INVALID;
8370Sstevel@tonic-gate 	chprp->chpr_cecc_logout.clo_data.chd_afar = LOGOUT_INVALID;
8380Sstevel@tonic-gate 	chprp->chpr_async_logout.clo_data.chd_afar = LOGOUT_INVALID;
8390Sstevel@tonic-gate 	chprp->chpr_tlb_logout.tlo_addr = LOGOUT_INVALID;
8400Sstevel@tonic-gate 	for (i = 0; i < CH_ERR_TL1_TLMAX; i++)
8410Sstevel@tonic-gate 		chprp->chpr_tl1_err_data[i].ch_err_tl1_logout.clo_data.chd_afar
8420Sstevel@tonic-gate 		    = LOGOUT_INVALID;
8430Sstevel@tonic-gate 
8440Sstevel@tonic-gate 	/* Panther has a larger Icache compared to cheetahplus or Jaguar */
8450Sstevel@tonic-gate 	if (IS_PANTHER(cpunodes[cp->cpu_id].implementation)) {
8460Sstevel@tonic-gate 		chprp->chpr_icache_size = PN_ICACHE_SIZE;
8470Sstevel@tonic-gate 		chprp->chpr_icache_linesize = PN_ICACHE_LSIZE;
8480Sstevel@tonic-gate 	} else {
8490Sstevel@tonic-gate 		chprp->chpr_icache_size = CH_ICACHE_SIZE;
8500Sstevel@tonic-gate 		chprp->chpr_icache_linesize = CH_ICACHE_LSIZE;
8510Sstevel@tonic-gate 	}
8520Sstevel@tonic-gate 
8530Sstevel@tonic-gate 	cpu_init_ecache_scrub_dr(cp);
8540Sstevel@tonic-gate 
8550Sstevel@tonic-gate 	/*
8560Sstevel@tonic-gate 	 * Panther's L2$ and E$ are shared between cores, so the scrubber is
8570Sstevel@tonic-gate 	 * only needed on one of the cores.  At this point, we assume all cores
8580Sstevel@tonic-gate 	 * are online, and we only enable the scrubber on core 0.
8590Sstevel@tonic-gate 	 */
8600Sstevel@tonic-gate 	if (IS_PANTHER(cpunodes[cp->cpu_id].implementation)) {
8610Sstevel@tonic-gate 		chprp->chpr_scrub_misc.chsm_core_state =
8620Sstevel@tonic-gate 		    SCRUBBER_BOTH_CORES_ONLINE;
8630Sstevel@tonic-gate 		if (cp->cpu_id != (processorid_t)cmp_cpu_to_chip(cp->cpu_id)) {
8640Sstevel@tonic-gate 			chprp->chpr_scrub_misc.chsm_enable[
8650Sstevel@tonic-gate 			    CACHE_SCRUBBER_INFO_E] = 0;
8660Sstevel@tonic-gate 		}
8670Sstevel@tonic-gate 	}
8680Sstevel@tonic-gate 
8690Sstevel@tonic-gate 	chprp->chpr_ec_set_size = cpunodes[cp->cpu_id].ecache_size /
8700Sstevel@tonic-gate 	    cpu_ecache_nway();
8710Sstevel@tonic-gate 
8720Sstevel@tonic-gate 	adjust_hw_copy_limits(cpunodes[cp->cpu_id].ecache_size);
8730Sstevel@tonic-gate 	ch_err_tl1_paddrs[cp->cpu_id] = va_to_pa(chprp);
8740Sstevel@tonic-gate 	ASSERT(ch_err_tl1_paddrs[cp->cpu_id] != -1);
8750Sstevel@tonic-gate }
8760Sstevel@tonic-gate 
8770Sstevel@tonic-gate /*
8780Sstevel@tonic-gate  * Clear the error state registers for this CPU.
8790Sstevel@tonic-gate  * For Cheetah+/Jaguar, just clear the AFSR but
8800Sstevel@tonic-gate  * for Panther we also have to clear the AFSR_EXT.
8810Sstevel@tonic-gate  */
8820Sstevel@tonic-gate void
set_cpu_error_state(ch_cpu_errors_t * cpu_error_regs)8830Sstevel@tonic-gate set_cpu_error_state(ch_cpu_errors_t *cpu_error_regs)
8840Sstevel@tonic-gate {
8850Sstevel@tonic-gate 	set_asyncflt(cpu_error_regs->afsr & ~C_AFSR_FATAL_ERRS);
8860Sstevel@tonic-gate 	if (IS_PANTHER(cpunodes[CPU->cpu_id].implementation)) {
8870Sstevel@tonic-gate 		set_afsr_ext(cpu_error_regs->afsr_ext & ~C_AFSR_EXT_FATAL_ERRS);
8880Sstevel@tonic-gate 	}
8890Sstevel@tonic-gate }
8900Sstevel@tonic-gate 
8910Sstevel@tonic-gate void
pn_cpu_log_diag_l2_info(ch_async_flt_t * ch_flt)8920Sstevel@tonic-gate pn_cpu_log_diag_l2_info(ch_async_flt_t *ch_flt) {
8930Sstevel@tonic-gate 	struct async_flt *aflt = (struct async_flt *)ch_flt;
8940Sstevel@tonic-gate 	ch_ec_data_t *l2_data = &ch_flt->flt_diag_data.chd_l2_data[0];
8950Sstevel@tonic-gate 	uint64_t faddr = aflt->flt_addr;
8960Sstevel@tonic-gate 	uint8_t log_way_mask = 0;
8970Sstevel@tonic-gate 	int i;
8980Sstevel@tonic-gate 
8990Sstevel@tonic-gate 	/*
9000Sstevel@tonic-gate 	 * Only Panther CPUs have the additional L2$ data that needs
9010Sstevel@tonic-gate 	 * to be logged here
9020Sstevel@tonic-gate 	 */
9030Sstevel@tonic-gate 	if (!IS_PANTHER(cpunodes[aflt->flt_inst].implementation))
9040Sstevel@tonic-gate 		return;
9050Sstevel@tonic-gate 
9060Sstevel@tonic-gate 	/*
9070Sstevel@tonic-gate 	 * We'll use a simple bit mask to keep track of which way(s)
9080Sstevel@tonic-gate 	 * of the stored cache line we want to log. The idea is to
9090Sstevel@tonic-gate 	 * log the entry if it is a valid line and it matches our
9100Sstevel@tonic-gate 	 * fault AFAR. If no match is found, we will simply log all
9110Sstevel@tonic-gate 	 * the ways.
9120Sstevel@tonic-gate 	 */
9130Sstevel@tonic-gate 	for (i = 0; i < PN_L2_NWAYS; i++)
9140Sstevel@tonic-gate 		if (pn_matching_valid_l2_line(faddr, &l2_data[i]))
9150Sstevel@tonic-gate 			log_way_mask |= (1 << i);
9160Sstevel@tonic-gate 
9170Sstevel@tonic-gate 	/* If no matching valid lines were found, we log all ways */
9180Sstevel@tonic-gate 	if (log_way_mask == 0)
9190Sstevel@tonic-gate 		log_way_mask = (1 << PN_L2_NWAYS) - 1;
9200Sstevel@tonic-gate 
9210Sstevel@tonic-gate 	/* Log the cache lines */
9220Sstevel@tonic-gate 	for (i = 0; i < PN_L2_NWAYS; i++)
9230Sstevel@tonic-gate 		if (log_way_mask & (1 << i))
9240Sstevel@tonic-gate 			l2_data[i].ec_logflag = EC_LOGFLAG_MAGIC;
9250Sstevel@tonic-gate }
9260Sstevel@tonic-gate 
9270Sstevel@tonic-gate /*
9280Sstevel@tonic-gate  * For this routine to return true, the L2 tag in question must be valid
9290Sstevel@tonic-gate  * and the tag PA must match the fault address (faddr) assuming the correct
9300Sstevel@tonic-gate  * index is being used.
9310Sstevel@tonic-gate  */
9320Sstevel@tonic-gate static int
pn_matching_valid_l2_line(uint64_t faddr,ch_ec_data_t * clo_l2_data)9330Sstevel@tonic-gate pn_matching_valid_l2_line(uint64_t faddr, ch_ec_data_t *clo_l2_data) {
9340Sstevel@tonic-gate 	if ((!PN_L2_LINE_INVALID(clo_l2_data->ec_tag)) &&
9350Sstevel@tonic-gate 	((faddr & P2ALIGN(C_AFAR_PA, PN_L2_SET_SIZE)) ==
9360Sstevel@tonic-gate 	    PN_L2TAG_TO_PA(clo_l2_data->ec_tag)))
9370Sstevel@tonic-gate 		return (1);
9380Sstevel@tonic-gate 	return (0);
9390Sstevel@tonic-gate }
9400Sstevel@tonic-gate 
9410Sstevel@tonic-gate /*
9420Sstevel@tonic-gate  * This array is used to convert the 3 digit PgSz encoding (as used in
9430Sstevel@tonic-gate  * various MMU registers such as MMU_TAG_ACCESS_EXT) into the corresponding
9440Sstevel@tonic-gate  * page size.
9450Sstevel@tonic-gate  */
9460Sstevel@tonic-gate static uint64_t tlb_pgsz_to_size[] = {
9470Sstevel@tonic-gate 	/* 000 = 8KB: */
9480Sstevel@tonic-gate 	0x2000,
9490Sstevel@tonic-gate 	/* 001 = 64KB: */
9500Sstevel@tonic-gate 	0x10000,
9510Sstevel@tonic-gate 	/* 010 = 512KB: */
9520Sstevel@tonic-gate 	0x80000,
9530Sstevel@tonic-gate 	/* 011 = 4MB: */
9540Sstevel@tonic-gate 	0x400000,
9550Sstevel@tonic-gate 	/* 100 = 32MB: */
9560Sstevel@tonic-gate 	0x2000000,
9570Sstevel@tonic-gate 	/* 101 = 256MB: */
9580Sstevel@tonic-gate 	0x10000000,
9590Sstevel@tonic-gate 	/* undefined for encodings 110 and 111: */
9600Sstevel@tonic-gate 	0, 0
9610Sstevel@tonic-gate };
9620Sstevel@tonic-gate 
9630Sstevel@tonic-gate /*
9640Sstevel@tonic-gate  * The itlb_parity_trap and dtlb_parity_trap handlers transfer control here
9650Sstevel@tonic-gate  * after collecting logout information related to the TLB parity error and
9660Sstevel@tonic-gate  * flushing the offending TTE entries from the ITLB or DTLB.
9670Sstevel@tonic-gate  *
9680Sstevel@tonic-gate  * DTLB traps which occur at TL>0 are not recoverable because we will most
9690Sstevel@tonic-gate  * likely be corrupting some other trap handler's alternate globals. As
9700Sstevel@tonic-gate  * such, we simply panic here when that happens. ITLB parity errors are
9710Sstevel@tonic-gate  * not expected to happen at TL>0.
9720Sstevel@tonic-gate  */
9730Sstevel@tonic-gate void
cpu_tlb_parity_error(struct regs * rp,ulong_t trap_va,ulong_t tlb_info)9740Sstevel@tonic-gate cpu_tlb_parity_error(struct regs *rp, ulong_t trap_va, ulong_t tlb_info) {
9750Sstevel@tonic-gate 	ch_async_flt_t ch_flt;
9760Sstevel@tonic-gate 	struct async_flt *aflt;
9770Sstevel@tonic-gate 	pn_tlb_logout_t *tlop = NULL;
9780Sstevel@tonic-gate 	int immu_parity = (tlb_info & PN_TLO_INFO_IMMU) != 0;
9790Sstevel@tonic-gate 	int tl1_trap = (tlb_info & PN_TLO_INFO_TL1) != 0;
9800Sstevel@tonic-gate 	char *error_class;
9810Sstevel@tonic-gate 
9820Sstevel@tonic-gate 	bzero(&ch_flt, sizeof (ch_async_flt_t));
9830Sstevel@tonic-gate 
9840Sstevel@tonic-gate 	/*
9850Sstevel@tonic-gate 	 * Get the CPU log out info. If we can't find our CPU private
9860Sstevel@tonic-gate 	 * pointer, or if the logout information does not correspond to
9870Sstevel@tonic-gate 	 * this error, then we will have to make due without detailed
9880Sstevel@tonic-gate 	 * logout information.
9890Sstevel@tonic-gate 	 */
9900Sstevel@tonic-gate 	if (CPU_PRIVATE(CPU)) {
9910Sstevel@tonic-gate 		tlop = CPU_PRIVATE_PTR(CPU, chpr_tlb_logout);
9920Sstevel@tonic-gate 		if ((tlop->tlo_addr != trap_va) ||
9930Sstevel@tonic-gate 		    (tlop->tlo_info != tlb_info))
9940Sstevel@tonic-gate 			tlop = NULL;
9950Sstevel@tonic-gate 	}
9960Sstevel@tonic-gate 
9970Sstevel@tonic-gate 	if (tlop) {
9980Sstevel@tonic-gate 		ch_flt.tlb_diag_data = *tlop;
9990Sstevel@tonic-gate 
10000Sstevel@tonic-gate 		/* Zero out + invalidate TLB logout. */
10010Sstevel@tonic-gate 		bzero(tlop, sizeof (pn_tlb_logout_t));
10020Sstevel@tonic-gate 		tlop->tlo_addr = LOGOUT_INVALID;
10030Sstevel@tonic-gate 	} else {
10040Sstevel@tonic-gate 		/*
10050Sstevel@tonic-gate 		 * Copy what logout information we have and mark
10060Sstevel@tonic-gate 		 * it incomplete.
10070Sstevel@tonic-gate 		 */
10080Sstevel@tonic-gate 		ch_flt.flt_data_incomplete = 1;
10090Sstevel@tonic-gate 		ch_flt.tlb_diag_data.tlo_info = tlb_info;
10100Sstevel@tonic-gate 		ch_flt.tlb_diag_data.tlo_addr = trap_va;
10110Sstevel@tonic-gate 	}
10120Sstevel@tonic-gate 
10130Sstevel@tonic-gate 	/*
10140Sstevel@tonic-gate 	 * Log the error.
10150Sstevel@tonic-gate 	 */
10160Sstevel@tonic-gate 	aflt = (struct async_flt *)&ch_flt;
10170Sstevel@tonic-gate 	aflt->flt_id = gethrtime_waitfree();
10180Sstevel@tonic-gate 	aflt->flt_bus_id = getprocessorid();
10190Sstevel@tonic-gate 	aflt->flt_inst = CPU->cpu_id;
10200Sstevel@tonic-gate 	aflt->flt_pc = (caddr_t)rp->r_pc;
10210Sstevel@tonic-gate 	aflt->flt_addr = trap_va;
10220Sstevel@tonic-gate 	aflt->flt_prot = AFLT_PROT_NONE;
10230Sstevel@tonic-gate 	aflt->flt_class = CPU_FAULT;
10240Sstevel@tonic-gate 	aflt->flt_priv = (rp->r_tstate & TSTATE_PRIV) ?  1 : 0;
10250Sstevel@tonic-gate 	aflt->flt_tl = tl1_trap ? 1 : 0;
10260Sstevel@tonic-gate 	aflt->flt_panic = tl1_trap ? 1 : 0;
10270Sstevel@tonic-gate 
10280Sstevel@tonic-gate 	if (immu_parity) {
10290Sstevel@tonic-gate 		aflt->flt_status = ECC_ITLB_TRAP;
10300Sstevel@tonic-gate 		ch_flt.flt_type = CPU_ITLB_PARITY;
10310Sstevel@tonic-gate 		error_class = FM_EREPORT_CPU_USIII_ITLBPE;
10320Sstevel@tonic-gate 		aflt->flt_payload = FM_EREPORT_PAYLOAD_ITLB_PE;
10330Sstevel@tonic-gate 	} else {
10340Sstevel@tonic-gate 		aflt->flt_status = ECC_DTLB_TRAP;
10350Sstevel@tonic-gate 		ch_flt.flt_type = CPU_DTLB_PARITY;
10360Sstevel@tonic-gate 		error_class = FM_EREPORT_CPU_USIII_DTLBPE;
10370Sstevel@tonic-gate 		aflt->flt_payload = FM_EREPORT_PAYLOAD_DTLB_PE;
10380Sstevel@tonic-gate 	}
10390Sstevel@tonic-gate 
10400Sstevel@tonic-gate 	/*
10410Sstevel@tonic-gate 	 * The TLB entries have already been flushed by the TL1 trap
10420Sstevel@tonic-gate 	 * handler so at this point the only thing left to do is log
10430Sstevel@tonic-gate 	 * the error message.
10440Sstevel@tonic-gate 	 */
10450Sstevel@tonic-gate 	if (aflt->flt_panic) {
10460Sstevel@tonic-gate 		cpu_errorq_dispatch(error_class, (void *)&ch_flt,
10470Sstevel@tonic-gate 		    sizeof (ch_async_flt_t), ue_queue, aflt->flt_panic);
10480Sstevel@tonic-gate 		/*
10490Sstevel@tonic-gate 		 * Panic here if aflt->flt_panic has been set.  Enqueued
10500Sstevel@tonic-gate 		 * errors will be logged as part of the panic flow.
10510Sstevel@tonic-gate 		 */
10520Sstevel@tonic-gate 		fm_panic("%sError(s)", immu_parity ? "ITLBPE " : "DTLBPE ");
10530Sstevel@tonic-gate 	} else {
10540Sstevel@tonic-gate 		cpu_errorq_dispatch(error_class, (void *)&ch_flt,
10550Sstevel@tonic-gate 		    sizeof (ch_async_flt_t), ce_queue, aflt->flt_panic);
10560Sstevel@tonic-gate 	}
10570Sstevel@tonic-gate }
10580Sstevel@tonic-gate 
10590Sstevel@tonic-gate /*
10600Sstevel@tonic-gate  * This routine is called when a TLB parity error event is 'ue_drain'ed
10610Sstevel@tonic-gate  * or 'ce_drain'ed from the errorq.
10620Sstevel@tonic-gate  */
10630Sstevel@tonic-gate void
cpu_async_log_tlb_parity_err(void * flt)10640Sstevel@tonic-gate cpu_async_log_tlb_parity_err(void *flt) {
10650Sstevel@tonic-gate 	ch_async_flt_t *ch_flt = (ch_async_flt_t *)flt;
10660Sstevel@tonic-gate 	struct async_flt *aflt = (struct async_flt *)flt;
10670Sstevel@tonic-gate #ifdef lint
10680Sstevel@tonic-gate 	aflt = aflt;
10690Sstevel@tonic-gate #endif
10700Sstevel@tonic-gate 
10710Sstevel@tonic-gate 	/*
10720Sstevel@tonic-gate 	 * We only capture TLB information if we encountered
10730Sstevel@tonic-gate 	 * a TLB parity error and Panther is the only CPU which
10740Sstevel@tonic-gate 	 * can detect a TLB parity error.
10750Sstevel@tonic-gate 	 */
10760Sstevel@tonic-gate 	ASSERT(IS_PANTHER(cpunodes[aflt->flt_inst].implementation));
10770Sstevel@tonic-gate 	ASSERT((ch_flt->flt_type == CPU_ITLB_PARITY) ||
10780Sstevel@tonic-gate 	    (ch_flt->flt_type == CPU_DTLB_PARITY));
10790Sstevel@tonic-gate 
10800Sstevel@tonic-gate 	if (ch_flt->flt_data_incomplete == 0) {
10810Sstevel@tonic-gate 		if (ch_flt->flt_type == CPU_ITLB_PARITY)
10820Sstevel@tonic-gate 			ch_flt->tlb_diag_data.tlo_logflag = IT_LOGFLAG_MAGIC;
10830Sstevel@tonic-gate 		else /* parity error is in DTLB */
10840Sstevel@tonic-gate 			ch_flt->tlb_diag_data.tlo_logflag = DT_LOGFLAG_MAGIC;
10850Sstevel@tonic-gate 	}
10860Sstevel@tonic-gate }
10870Sstevel@tonic-gate 
10880Sstevel@tonic-gate /*
10890Sstevel@tonic-gate  * Add L1 Prefetch cache data to the ereport payload.
10900Sstevel@tonic-gate  */
10910Sstevel@tonic-gate void
cpu_payload_add_pcache(struct async_flt * aflt,nvlist_t * nvl)10920Sstevel@tonic-gate cpu_payload_add_pcache(struct async_flt *aflt, nvlist_t *nvl)
10930Sstevel@tonic-gate {
10940Sstevel@tonic-gate 	ch_async_flt_t *ch_flt = (ch_async_flt_t *)aflt;
10950Sstevel@tonic-gate 	ch_pc_data_t *pcp;
10960Sstevel@tonic-gate 	ch_pc_data_t pcdata[CH_PCACHE_NWAY];
10970Sstevel@tonic-gate 	uint_t nelem;
10980Sstevel@tonic-gate 	int i, ways_logged = 0;
10990Sstevel@tonic-gate 
11000Sstevel@tonic-gate 	/*
11010Sstevel@tonic-gate 	 * We only capture P$ information if we encountered
11020Sstevel@tonic-gate 	 * a P$ parity error and Panther is the only CPU which
11030Sstevel@tonic-gate 	 * can detect a P$ parity error.
11040Sstevel@tonic-gate 	 */
11050Sstevel@tonic-gate 	ASSERT(IS_PANTHER(cpunodes[aflt->flt_inst].implementation));
11060Sstevel@tonic-gate 	for (i = 0; i < CH_PCACHE_NWAY; i++) {
11070Sstevel@tonic-gate 		pcp = &ch_flt->parity_data.dpe.cpl_pc[i];
11080Sstevel@tonic-gate 		if (pcp->pc_logflag == PC_LOGFLAG_MAGIC) {
11090Sstevel@tonic-gate 			bcopy(pcp, &pcdata[ways_logged],
11104718Smh27603 			    sizeof (ch_pc_data_t));
11110Sstevel@tonic-gate 			ways_logged++;
11120Sstevel@tonic-gate 		}
11130Sstevel@tonic-gate 	}
11140Sstevel@tonic-gate 
11150Sstevel@tonic-gate 	/*
11160Sstevel@tonic-gate 	 * Add the pcache data to the payload.
11170Sstevel@tonic-gate 	 */
11180Sstevel@tonic-gate 	fm_payload_set(nvl, FM_EREPORT_PAYLOAD_NAME_L1P_WAYS,
11190Sstevel@tonic-gate 	    DATA_TYPE_UINT8, (uint8_t)ways_logged, NULL);
11200Sstevel@tonic-gate 	if (ways_logged != 0) {
11210Sstevel@tonic-gate 		nelem = sizeof (ch_pc_data_t) / sizeof (uint64_t) * ways_logged;
11220Sstevel@tonic-gate 		fm_payload_set(nvl, FM_EREPORT_PAYLOAD_NAME_L1P_DATA,
11230Sstevel@tonic-gate 		    DATA_TYPE_UINT64_ARRAY, nelem, (uint64_t *)pcdata, NULL);
11240Sstevel@tonic-gate 	}
11250Sstevel@tonic-gate }
11260Sstevel@tonic-gate 
11270Sstevel@tonic-gate /*
11280Sstevel@tonic-gate  * Add TLB diagnostic data to the ereport payload.
11290Sstevel@tonic-gate  */
11300Sstevel@tonic-gate void
cpu_payload_add_tlb(struct async_flt * aflt,nvlist_t * nvl)11310Sstevel@tonic-gate cpu_payload_add_tlb(struct async_flt *aflt, nvlist_t *nvl)
11320Sstevel@tonic-gate {
11330Sstevel@tonic-gate 	ch_async_flt_t *ch_flt = (ch_async_flt_t *)aflt;
11340Sstevel@tonic-gate 	uint8_t num_entries, tlb_data_words;
11350Sstevel@tonic-gate 
11360Sstevel@tonic-gate 	/*
11370Sstevel@tonic-gate 	 * We only capture TLB information if we encountered
11380Sstevel@tonic-gate 	 * a TLB parity error and Panther is the only CPU which
11390Sstevel@tonic-gate 	 * can detect a TLB parity error.
11400Sstevel@tonic-gate 	 */
11410Sstevel@tonic-gate 	ASSERT(IS_PANTHER(cpunodes[aflt->flt_inst].implementation));
11420Sstevel@tonic-gate 	ASSERT((ch_flt->flt_type == CPU_ITLB_PARITY) ||
11430Sstevel@tonic-gate 	    (ch_flt->flt_type == CPU_DTLB_PARITY));
11440Sstevel@tonic-gate 
11450Sstevel@tonic-gate 	if (ch_flt->flt_type == CPU_ITLB_PARITY) {
11460Sstevel@tonic-gate 		num_entries = (uint8_t)(PN_ITLB_NWAYS * PN_NUM_512_ITLBS);
11470Sstevel@tonic-gate 		tlb_data_words = sizeof (ch_tte_entry_t) / sizeof (uint64_t) *
11480Sstevel@tonic-gate 		    num_entries;
11490Sstevel@tonic-gate 
11500Sstevel@tonic-gate 		/*
11510Sstevel@tonic-gate 		 * Add the TLB diagnostic data to the payload
11520Sstevel@tonic-gate 		 * if it was collected.
11530Sstevel@tonic-gate 		 */
11540Sstevel@tonic-gate 		if (ch_flt->tlb_diag_data.tlo_logflag == IT_LOGFLAG_MAGIC) {
11550Sstevel@tonic-gate 			fm_payload_set(nvl,
11560Sstevel@tonic-gate 			    FM_EREPORT_PAYLOAD_NAME_ITLB_ENTRIES,
11570Sstevel@tonic-gate 			    DATA_TYPE_UINT8, num_entries, NULL);
11580Sstevel@tonic-gate 			fm_payload_set(nvl, FM_EREPORT_PAYLOAD_NAME_ITLB_DATA,
11590Sstevel@tonic-gate 			    DATA_TYPE_UINT64_ARRAY, tlb_data_words,
11600Sstevel@tonic-gate 			    (uint64_t *)ch_flt->tlb_diag_data.tlo_itlb_tte,
11610Sstevel@tonic-gate 			    NULL);
11620Sstevel@tonic-gate 		}
11630Sstevel@tonic-gate 	} else {
11640Sstevel@tonic-gate 		num_entries = (uint8_t)(PN_DTLB_NWAYS * PN_NUM_512_DTLBS);
11650Sstevel@tonic-gate 		tlb_data_words = sizeof (ch_tte_entry_t) / sizeof (uint64_t) *
11660Sstevel@tonic-gate 		    num_entries;
11670Sstevel@tonic-gate 
11680Sstevel@tonic-gate 		fm_payload_set(nvl, FM_EREPORT_PAYLOAD_NAME_VA,
11690Sstevel@tonic-gate 		    DATA_TYPE_UINT64, ch_flt->tlb_diag_data.tlo_addr, NULL);
11700Sstevel@tonic-gate 
11710Sstevel@tonic-gate 		/*
11720Sstevel@tonic-gate 		 * Add the TLB diagnostic data to the payload
11730Sstevel@tonic-gate 		 * if it was collected.
11740Sstevel@tonic-gate 		 */
11750Sstevel@tonic-gate 		if (ch_flt->tlb_diag_data.tlo_logflag == DT_LOGFLAG_MAGIC) {
11760Sstevel@tonic-gate 			fm_payload_set(nvl,
11770Sstevel@tonic-gate 			    FM_EREPORT_PAYLOAD_NAME_DTLB_ENTRIES,
11780Sstevel@tonic-gate 			    DATA_TYPE_UINT8, num_entries, NULL);
11790Sstevel@tonic-gate 			fm_payload_set(nvl, FM_EREPORT_PAYLOAD_NAME_DTLB_DATA,
11800Sstevel@tonic-gate 			    DATA_TYPE_UINT64_ARRAY, tlb_data_words,
11810Sstevel@tonic-gate 			    (uint64_t *)ch_flt->tlb_diag_data.tlo_dtlb_tte,
11820Sstevel@tonic-gate 			    NULL);
11830Sstevel@tonic-gate 		}
11840Sstevel@tonic-gate 	}
11850Sstevel@tonic-gate }
11860Sstevel@tonic-gate 
11870Sstevel@tonic-gate /*
11880Sstevel@tonic-gate  * Panther Cache Scrubbing:
11890Sstevel@tonic-gate  *
11900Sstevel@tonic-gate  * In Jaguar, the E$ was split between cores, so the scrubber must run on both
11910Sstevel@tonic-gate  * cores.  For Panther, however, the L2$ and L3$ are shared across cores.
11920Sstevel@tonic-gate  * Therefore, the E$ scrubber only needs to run on one of the two cores.
11930Sstevel@tonic-gate  *
11940Sstevel@tonic-gate  * There are four possible states for the E$ scrubber:
11950Sstevel@tonic-gate  *
11960Sstevel@tonic-gate  * 0. If both cores are offline, add core 0 to cpu_offline_set so that
11970Sstevel@tonic-gate  *    the offline scrubber will run on it.
11980Sstevel@tonic-gate  * 1. If core 0 is online and core 1 off, we run the scrubber on core 0.
11990Sstevel@tonic-gate  * 2. If core 1 is online and core 0 off, we move the scrubber to run
12000Sstevel@tonic-gate  *    on core 1.
12010Sstevel@tonic-gate  * 3. If both cores are online, only run the scrubber on core 0.
12020Sstevel@tonic-gate  *
12030Sstevel@tonic-gate  * These states are enumerated by the SCRUBBER_[BOTH|CORE|NEITHER]_* defines
12040Sstevel@tonic-gate  * above.  One of those values is stored in
12050Sstevel@tonic-gate  * chpr_scrub_misc->chsm_core_state on each core.
12060Sstevel@tonic-gate  *
12070Sstevel@tonic-gate  * Also note that, for Panther, ecache_flush_line() will flush out the L2$
12080Sstevel@tonic-gate  * before the E$, so the L2$ will be scrubbed by the E$ scrubber.  No
12090Sstevel@tonic-gate  * additional code is necessary to scrub the L2$.
12100Sstevel@tonic-gate  *
12110Sstevel@tonic-gate  * For all cpu types, whenever a cpu or core is offlined, add it to
12120Sstevel@tonic-gate  * cpu_offline_set so the necessary scrubbers can still run.  This is still
12130Sstevel@tonic-gate  * necessary on Panther so the D$ scrubber can still run.
12140Sstevel@tonic-gate  */
12150Sstevel@tonic-gate /*ARGSUSED*/
12160Sstevel@tonic-gate int
cpu_scrub_cpu_setup(cpu_setup_t what,int cpuid,void * arg)12170Sstevel@tonic-gate cpu_scrub_cpu_setup(cpu_setup_t what, int cpuid, void *arg)
12180Sstevel@tonic-gate {
12190Sstevel@tonic-gate 	processorid_t core_0_id;
12200Sstevel@tonic-gate 	cpu_t *core_cpus[2];
12210Sstevel@tonic-gate 	ch_scrub_misc_t *core_scrub[2];
12220Sstevel@tonic-gate 	int old_state, i;
12230Sstevel@tonic-gate 	int new_state = SCRUBBER_NEITHER_CORE_ONLINE;
12240Sstevel@tonic-gate 
12250Sstevel@tonic-gate 	switch (what) {
12260Sstevel@tonic-gate 	case CPU_ON:
12270Sstevel@tonic-gate 	case CPU_INIT:
12280Sstevel@tonic-gate 		CPUSET_DEL(cpu_offline_set, cpuid);
12290Sstevel@tonic-gate 		break;
12300Sstevel@tonic-gate 	case CPU_OFF:
12310Sstevel@tonic-gate 		CPUSET_ADD(cpu_offline_set, cpuid);
12320Sstevel@tonic-gate 		break;
12330Sstevel@tonic-gate 	default:
12340Sstevel@tonic-gate 		return (0);
12350Sstevel@tonic-gate 	}
12360Sstevel@tonic-gate 
12370Sstevel@tonic-gate 	if (!IS_PANTHER(cpunodes[cpuid].implementation)) {
12380Sstevel@tonic-gate 		return (0);
12390Sstevel@tonic-gate 	}
12400Sstevel@tonic-gate 
12410Sstevel@tonic-gate 	/*
12420Sstevel@tonic-gate 	 * Update the chsm_enable[CACHE_SCRUBBER_INFO_E] value
12430Sstevel@tonic-gate 	 * if necessary
12440Sstevel@tonic-gate 	 */
12450Sstevel@tonic-gate 	core_0_id = cmp_cpu_to_chip(cpuid);
12460Sstevel@tonic-gate 	core_cpus[0] = cpu_get(core_0_id);
12470Sstevel@tonic-gate 	core_cpus[1] = cpu_get_sibling_core(core_cpus[0]);
12480Sstevel@tonic-gate 
12490Sstevel@tonic-gate 	for (i = 0; i < 2; i++) {
12500Sstevel@tonic-gate 		if (core_cpus[i] == NULL) {
12510Sstevel@tonic-gate 			/*
1252694Srscott 			 * This may happen during DR - one core is offlined
1253694Srscott 			 * and completely unconfigured before the second
1254694Srscott 			 * core is offlined.  Give up and return quietly,
1255694Srscott 			 * since the second core should quickly be removed
1256694Srscott 			 * anyways.
12570Sstevel@tonic-gate 			 */
12580Sstevel@tonic-gate 			return (0);
12590Sstevel@tonic-gate 		}
12600Sstevel@tonic-gate 		core_scrub[i] = CPU_PRIVATE_PTR(core_cpus[i], chpr_scrub_misc);
12610Sstevel@tonic-gate 	}
12620Sstevel@tonic-gate 
12630Sstevel@tonic-gate 	if (cpuid == (processorid_t)cmp_cpu_to_chip(cpuid)) {
12640Sstevel@tonic-gate 		/* cpuid is core 0 */
12650Sstevel@tonic-gate 		if (cpu_is_active(core_cpus[1])) {
12660Sstevel@tonic-gate 			new_state |= SCRUBBER_CORE_1_ONLINE;
12670Sstevel@tonic-gate 		}
12680Sstevel@tonic-gate 		if (what != CPU_OFF) {
12690Sstevel@tonic-gate 			new_state |= SCRUBBER_CORE_0_ONLINE;
12700Sstevel@tonic-gate 		}
12710Sstevel@tonic-gate 	} else {
12720Sstevel@tonic-gate 		/* cpuid is core 1 */
12730Sstevel@tonic-gate 		if (cpu_is_active(core_cpus[0])) {
12740Sstevel@tonic-gate 			new_state |= SCRUBBER_CORE_0_ONLINE;
12750Sstevel@tonic-gate 		}
12760Sstevel@tonic-gate 		if (what != CPU_OFF) {
12770Sstevel@tonic-gate 			new_state |= SCRUBBER_CORE_1_ONLINE;
12780Sstevel@tonic-gate 		}
12790Sstevel@tonic-gate 	}
12800Sstevel@tonic-gate 
12810Sstevel@tonic-gate 	old_state = core_scrub[0]->chsm_core_state;
12820Sstevel@tonic-gate 
12830Sstevel@tonic-gate 	if (old_state == new_state) {
12840Sstevel@tonic-gate 		return (0);
12850Sstevel@tonic-gate 	}
12860Sstevel@tonic-gate 
12870Sstevel@tonic-gate 	if (old_state == SCRUBBER_CORE_1_ONLINE) {
12880Sstevel@tonic-gate 		/*
12890Sstevel@tonic-gate 		 * We need to move the scrubber state from core 1
12900Sstevel@tonic-gate 		 * back to core 0.  This data is not protected by
12910Sstevel@tonic-gate 		 * locks, but the worst that can happen is some
12920Sstevel@tonic-gate 		 * lines are scrubbed multiple times.  chsm_oustanding is
12930Sstevel@tonic-gate 		 * set to 0 to make sure an interrupt is scheduled the
12940Sstevel@tonic-gate 		 * first time through do_scrub().
12950Sstevel@tonic-gate 		 */
12960Sstevel@tonic-gate 		core_scrub[0]->chsm_flush_index[CACHE_SCRUBBER_INFO_E] =
12970Sstevel@tonic-gate 		    core_scrub[1]->chsm_flush_index[CACHE_SCRUBBER_INFO_E];
12980Sstevel@tonic-gate 		core_scrub[0]->chsm_outstanding[CACHE_SCRUBBER_INFO_E] = 0;
12990Sstevel@tonic-gate 	}
13000Sstevel@tonic-gate 
13010Sstevel@tonic-gate 	switch (new_state) {
13020Sstevel@tonic-gate 	case SCRUBBER_NEITHER_CORE_ONLINE:
13030Sstevel@tonic-gate 	case SCRUBBER_BOTH_CORES_ONLINE:
13040Sstevel@tonic-gate 	case SCRUBBER_CORE_0_ONLINE:
13050Sstevel@tonic-gate 		core_scrub[1]->chsm_enable[CACHE_SCRUBBER_INFO_E] = 0;
13060Sstevel@tonic-gate 		core_scrub[0]->chsm_enable[CACHE_SCRUBBER_INFO_E] = 1;
13070Sstevel@tonic-gate 		break;
13080Sstevel@tonic-gate 
13090Sstevel@tonic-gate 	case SCRUBBER_CORE_1_ONLINE:
13100Sstevel@tonic-gate 	default:
13110Sstevel@tonic-gate 		/*
13120Sstevel@tonic-gate 		 * We need to move the scrubber state from core 0
13130Sstevel@tonic-gate 		 * to core 1.
13140Sstevel@tonic-gate 		 */
13150Sstevel@tonic-gate 		core_scrub[1]->chsm_flush_index[CACHE_SCRUBBER_INFO_E] =
13160Sstevel@tonic-gate 		    core_scrub[0]->chsm_flush_index[CACHE_SCRUBBER_INFO_E];
13170Sstevel@tonic-gate 		core_scrub[1]->chsm_outstanding[CACHE_SCRUBBER_INFO_E] = 0;
13180Sstevel@tonic-gate 
13190Sstevel@tonic-gate 		core_scrub[0]->chsm_enable[CACHE_SCRUBBER_INFO_E] = 0;
13200Sstevel@tonic-gate 		core_scrub[1]->chsm_enable[CACHE_SCRUBBER_INFO_E] = 1;
13210Sstevel@tonic-gate 		break;
13220Sstevel@tonic-gate 	}
13230Sstevel@tonic-gate 
13240Sstevel@tonic-gate 	core_scrub[0]->chsm_core_state = new_state;
13250Sstevel@tonic-gate 	core_scrub[1]->chsm_core_state = new_state;
13260Sstevel@tonic-gate 	return (0);
13270Sstevel@tonic-gate }
13280Sstevel@tonic-gate 
13290Sstevel@tonic-gate /*
13300Sstevel@tonic-gate  * Returns a pointer to the cpu structure of the argument's sibling core.
13310Sstevel@tonic-gate  * If no sibling core can be found, return NULL.
13320Sstevel@tonic-gate  */
13330Sstevel@tonic-gate static cpu_t *
cpu_get_sibling_core(cpu_t * cpup)13340Sstevel@tonic-gate cpu_get_sibling_core(cpu_t *cpup)
13350Sstevel@tonic-gate {
13363434Sesaxe 	cpu_t		*nextp;
13373434Sesaxe 	pg_t		*pg;
13383434Sesaxe 	pg_cpu_itr_t	i;
13390Sstevel@tonic-gate 
1340694Srscott 	if ((cpup == NULL) || (!cmp_cpu_is_cmp(cpup->cpu_id)))
13410Sstevel@tonic-gate 		return (NULL);
13423434Sesaxe 	pg = (pg_t *)pghw_find_pg(cpup, PGHW_CHIP);
13433434Sesaxe 	if (pg == NULL)
13443434Sesaxe 		return (NULL);
13450Sstevel@tonic-gate 
13463434Sesaxe 	/*
13473434Sesaxe 	 * Iterate over the CPUs in the chip PG looking
13483434Sesaxe 	 * for a CPU that isn't cpup
13493434Sesaxe 	 */
13503434Sesaxe 	PG_CPU_ITR_INIT(pg, i);
13513434Sesaxe 	while ((nextp = pg_cpu_next(&i)) != NULL) {
13523434Sesaxe 		if (nextp != cpup)
13533434Sesaxe 			break;
13543434Sesaxe 	}
13553434Sesaxe 
13563434Sesaxe 	if (nextp == NULL)
13570Sstevel@tonic-gate 		return (NULL);
13580Sstevel@tonic-gate 
13590Sstevel@tonic-gate 	return (nextp);
13600Sstevel@tonic-gate }
1361