xref: /onnv-gate/usr/src/uts/sun4u/io/mem_cache.c (revision 7178:dd8ced585048)
16330Sjc25722 /*
26330Sjc25722  * CDDL HEADER START
36330Sjc25722  *
46330Sjc25722  * The contents of this file are subject to the terms of the
56330Sjc25722  * Common Development and Distribution License (the "License").
66330Sjc25722  * You may not use this file except in compliance with the License.
76330Sjc25722  *
86330Sjc25722  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
96330Sjc25722  * or http://www.opensolaris.org/os/licensing.
106330Sjc25722  * See the License for the specific language governing permissions
116330Sjc25722  * and limitations under the License.
126330Sjc25722  *
136330Sjc25722  * When distributing Covered Code, include this CDDL HEADER in each
146330Sjc25722  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
156330Sjc25722  * If applicable, add the following below this CDDL HEADER, with the
166330Sjc25722  * fields enclosed by brackets "[]" replaced with your own identifying
176330Sjc25722  * information: Portions Copyright [yyyy] [name of copyright owner]
186330Sjc25722  *
196330Sjc25722  * CDDL HEADER END
206330Sjc25722  */
216330Sjc25722 /*
226330Sjc25722  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
236330Sjc25722  * Use is subject to license terms.
246330Sjc25722  */
256330Sjc25722 
266330Sjc25722 #pragma ident	"%Z%%M%	%I%	%E% SMI"
276330Sjc25722 
286330Sjc25722 /*
296330Sjc25722  * Driver to retire/unretire L2/L3 cachelines on panther
306330Sjc25722  */
316330Sjc25722 #include <sys/types.h>
326330Sjc25722 #include <sys/types32.h>
336330Sjc25722 #include <sys/time.h>
346330Sjc25722 #include <sys/errno.h>
356330Sjc25722 #include <sys/cmn_err.h>
366330Sjc25722 #include <sys/param.h>
376330Sjc25722 #include <sys/modctl.h>
386330Sjc25722 #include <sys/conf.h>
396330Sjc25722 #include <sys/open.h>
406330Sjc25722 #include <sys/stat.h>
416330Sjc25722 #include <sys/ddi.h>
426330Sjc25722 #include <sys/sunddi.h>
436330Sjc25722 #include <sys/file.h>
446330Sjc25722 #include <sys/cpuvar.h>
456330Sjc25722 #include <sys/x_call.h>
466330Sjc25722 #include <sys/cheetahregs.h>
476330Sjc25722 #include <sys/mem_cache.h>
486330Sjc25722 #include <sys/mem_cache_ioctl.h>
496330Sjc25722 
506330Sjc25722 extern int	retire_l2(uint64_t, uint64_t);
516330Sjc25722 extern int	retire_l2_alternate(uint64_t, uint64_t);
526330Sjc25722 extern int	unretire_l2(uint64_t, uint64_t);
536330Sjc25722 extern int	unretire_l2_alternate(uint64_t, uint64_t);
546330Sjc25722 extern int	retire_l3(uint64_t, uint64_t);
556330Sjc25722 extern int	retire_l3_alternate(uint64_t, uint64_t);
566330Sjc25722 extern int	unretire_l3(uint64_t, uint64_t);
576330Sjc25722 extern int	unretire_l3_alternate(uint64_t, uint64_t);
586330Sjc25722 
596330Sjc25722 extern void	retire_l2_start(uint64_t, uint64_t);
606330Sjc25722 extern void	retire_l2_end(uint64_t, uint64_t);
616330Sjc25722 extern void	unretire_l2_start(uint64_t, uint64_t);
626330Sjc25722 extern void	unretire_l2_end(uint64_t, uint64_t);
636330Sjc25722 extern void	retire_l3_start(uint64_t, uint64_t);
646330Sjc25722 extern void	retire_l3_end(uint64_t, uint64_t);
656330Sjc25722 extern void	unretire_l3_start(uint64_t, uint64_t);
666330Sjc25722 extern void	unretire_l3_end(uint64_t, uint64_t);
676330Sjc25722 
686330Sjc25722 extern void	get_ecache_dtags_tl1(uint64_t, ch_cpu_logout_t *);
696440Sbala extern void	get_l2_tag_tl1(uint64_t, uint64_t);
706440Sbala extern void	get_l3_tag_tl1(uint64_t, uint64_t);
716330Sjc25722 
726330Sjc25722 
736330Sjc25722 /* Macro for putting 64-bit onto stack as two 32-bit ints */
746330Sjc25722 #define	PRTF_64_TO_32(x)	(uint32_t)((x)>>32), (uint32_t)(x)
756330Sjc25722 
766330Sjc25722 
776330Sjc25722 uint_t l2_flush_retries_done = 0;
786330Sjc25722 int mem_cache_debug = 0x0;
796330Sjc25722 uint64_t pattern = 0;
806330Sjc25722 uint32_t retire_failures = 0;
816330Sjc25722 uint32_t last_error_injected_way = 0;
826330Sjc25722 uint8_t last_error_injected_bit = 0;
836330Sjc25722 uint32_t last_l3tag_error_injected_way = 0;
846330Sjc25722 uint8_t last_l3tag_error_injected_bit = 0;
856330Sjc25722 uint32_t last_l2tag_error_injected_way = 0;
866330Sjc25722 uint8_t last_l2tag_error_injected_bit = 0;
876330Sjc25722 uint32_t last_l3data_error_injected_way = 0;
886330Sjc25722 uint8_t last_l3data_error_injected_bit = 0;
896330Sjc25722 uint32_t last_l2data_error_injected_way = 0;
906330Sjc25722 uint8_t last_l2data_error_injected_bit = 0;
916330Sjc25722 
926330Sjc25722 /* dev_ops and cb_ops entry point function declarations */
936330Sjc25722 static int	mem_cache_attach(dev_info_t *, ddi_attach_cmd_t);
946330Sjc25722 static int	mem_cache_detach(dev_info_t *, ddi_detach_cmd_t);
956330Sjc25722 static int	mem_cache_getinfo(dev_info_t *, ddi_info_cmd_t, void *,
966330Sjc25722 				void **);
976330Sjc25722 static int	mem_cache_open(dev_t *, int, int, cred_t *);
986330Sjc25722 static int	mem_cache_close(dev_t, int, int, cred_t *);
996330Sjc25722 static int	mem_cache_ioctl_ops(int, int, cache_info_t *);
1006330Sjc25722 static int	mem_cache_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
1016330Sjc25722 
1026330Sjc25722 struct cb_ops mem_cache_cb_ops = {
1036330Sjc25722 	mem_cache_open,
1046330Sjc25722 	mem_cache_close,
1056330Sjc25722 	nodev,
1066330Sjc25722 	nodev,
1076330Sjc25722 	nodev,			/* dump */
1086330Sjc25722 	nodev,
1096330Sjc25722 	nodev,
1106330Sjc25722 	mem_cache_ioctl,
1116330Sjc25722 	nodev,			/* devmap */
1126330Sjc25722 	nodev,
1136330Sjc25722 	ddi_segmap,		/* segmap */
1146330Sjc25722 	nochpoll,
1156330Sjc25722 	ddi_prop_op,
1166330Sjc25722 	NULL,			/* for STREAMS drivers */
1176330Sjc25722 	D_NEW | D_MP		/* driver compatibility flag */
1186330Sjc25722 };
1196330Sjc25722 
1206330Sjc25722 static struct dev_ops mem_cache_dev_ops = {
1216330Sjc25722 	DEVO_REV,		/* driver build version */
1226330Sjc25722 	0,			/* device reference count */
1236330Sjc25722 	mem_cache_getinfo,
1246330Sjc25722 	nulldev,
1256330Sjc25722 	nulldev,		/* probe */
1266330Sjc25722 	mem_cache_attach,
1276330Sjc25722 	mem_cache_detach,
1286330Sjc25722 	nulldev,		/* reset */
1296330Sjc25722 	&mem_cache_cb_ops,
1306330Sjc25722 	(struct bus_ops *)NULL,
1316330Sjc25722 	nulldev			/* power */
1326330Sjc25722 };
1336330Sjc25722 
1346330Sjc25722 /*
1356330Sjc25722  * Soft state
1366330Sjc25722  */
1376330Sjc25722 struct mem_cache_softc {
1386330Sjc25722 	dev_info_t	*dip;
1396330Sjc25722 	kmutex_t	mutex;
1406330Sjc25722 };
1416330Sjc25722 #define	getsoftc(inst)	((struct mem_cache_softc *)ddi_get_soft_state(statep,\
1426330Sjc25722 			(inst)))
1436330Sjc25722 
1446330Sjc25722 /* module configuration stuff */
1456330Sjc25722 static void *statep;
1466330Sjc25722 extern struct mod_ops mod_driverops;
1476330Sjc25722 
1486330Sjc25722 static struct modldrv modldrv = {
1496330Sjc25722 	&mod_driverops,
1506330Sjc25722 	"mem_cache_driver (08/01/30) ",
1516330Sjc25722 	&mem_cache_dev_ops
1526330Sjc25722 };
1536330Sjc25722 
1546330Sjc25722 static struct modlinkage modlinkage = {
1556330Sjc25722 	MODREV_1,
1566330Sjc25722 	&modldrv,
1576330Sjc25722 	0
1586330Sjc25722 };
1596330Sjc25722 
1606330Sjc25722 int
1616330Sjc25722 _init(void)
1626330Sjc25722 {
1636330Sjc25722 	int e;
1646330Sjc25722 
1656330Sjc25722 	if (e = ddi_soft_state_init(&statep, sizeof (struct mem_cache_softc),
1666330Sjc25722 	    MAX_MEM_CACHE_INSTANCES)) {
1676330Sjc25722 		return (e);
1686330Sjc25722 	}
1696330Sjc25722 
1706330Sjc25722 	if ((e = mod_install(&modlinkage)) != 0)
1716330Sjc25722 		ddi_soft_state_fini(&statep);
1726330Sjc25722 
1736330Sjc25722 	return (e);
1746330Sjc25722 }
1756330Sjc25722 
1766330Sjc25722 int
1776330Sjc25722 _fini(void)
1786330Sjc25722 {
1796330Sjc25722 	int e;
1806330Sjc25722 
1816330Sjc25722 	if ((e = mod_remove(&modlinkage)) != 0)
1826330Sjc25722 		return (e);
1836330Sjc25722 
1846330Sjc25722 	ddi_soft_state_fini(&statep);
1856330Sjc25722 
1866330Sjc25722 	return (DDI_SUCCESS);
1876330Sjc25722 }
1886330Sjc25722 
1896330Sjc25722 int
1906330Sjc25722 _info(struct modinfo *modinfop)
1916330Sjc25722 {
1926330Sjc25722 	return (mod_info(&modlinkage, modinfop));
1936330Sjc25722 }
1946330Sjc25722 
1956330Sjc25722 /*ARGSUSED*/
1966330Sjc25722 static int
1976330Sjc25722 mem_cache_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
1986330Sjc25722 {
1996330Sjc25722 	int	inst;
2006330Sjc25722 	int	retval = DDI_SUCCESS;
2016330Sjc25722 	struct mem_cache_softc *softc;
2026330Sjc25722 
2036330Sjc25722 	inst = getminor((dev_t)arg);
2046330Sjc25722 
2056330Sjc25722 	switch (cmd) {
2066330Sjc25722 	case DDI_INFO_DEVT2DEVINFO:
2076330Sjc25722 		if ((softc = getsoftc(inst)) == NULL) {
2086330Sjc25722 			*result = (void *)NULL;
2096330Sjc25722 			retval = DDI_FAILURE;
2106330Sjc25722 		} else
2116330Sjc25722 			*result = (void *)softc->dip;
2126330Sjc25722 		break;
2136330Sjc25722 
2146330Sjc25722 	case DDI_INFO_DEVT2INSTANCE:
2156330Sjc25722 		*result = (void *)((uintptr_t)inst);
2166330Sjc25722 		break;
2176330Sjc25722 
2186330Sjc25722 	default:
2196330Sjc25722 		retval = DDI_FAILURE;
2206330Sjc25722 	}
2216330Sjc25722 
2226330Sjc25722 	return (retval);
2236330Sjc25722 }
2246330Sjc25722 
2256330Sjc25722 static int
2266330Sjc25722 mem_cache_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2276330Sjc25722 {
2286330Sjc25722 	int inst;
2296330Sjc25722 	struct mem_cache_softc *softc = NULL;
2306330Sjc25722 	char name[80];
2316330Sjc25722 
2326330Sjc25722 	switch (cmd) {
2336330Sjc25722 	case DDI_ATTACH:
2346330Sjc25722 		inst = ddi_get_instance(dip);
2356330Sjc25722 		if (inst >= MAX_MEM_CACHE_INSTANCES) {
2366330Sjc25722 			cmn_err(CE_WARN, "attach failed, too many instances\n");
2376330Sjc25722 			return (DDI_FAILURE);
2386330Sjc25722 		}
2396330Sjc25722 		(void) sprintf(name, MEM_CACHE_DRIVER_NAME"%d", inst);
2406330Sjc25722 		if (ddi_create_priv_minor_node(dip, name,
2416330Sjc25722 		    S_IFCHR,
2426330Sjc25722 		    inst,
2436330Sjc25722 		    DDI_PSEUDO,
2446330Sjc25722 		    0, NULL, "all", 0640) ==
2456330Sjc25722 		    DDI_FAILURE) {
2466330Sjc25722 			ddi_remove_minor_node(dip, NULL);
2476330Sjc25722 			return (DDI_FAILURE);
2486330Sjc25722 		}
2496330Sjc25722 
2506330Sjc25722 		/* Allocate a soft state structure for this instance */
2516330Sjc25722 		if (ddi_soft_state_zalloc(statep, inst) != DDI_SUCCESS) {
2526330Sjc25722 			cmn_err(CE_WARN, " ddi_soft_state_zalloc() failed "
2536330Sjc25722 			    "for inst %d\n", inst);
2546330Sjc25722 			goto attach_failed;
2556330Sjc25722 		}
2566330Sjc25722 
2576330Sjc25722 		/* Setup soft state */
2586330Sjc25722 		softc = getsoftc(inst);
2596330Sjc25722 		softc->dip = dip;
2606330Sjc25722 		mutex_init(&softc->mutex, NULL, MUTEX_DRIVER, NULL);
2616330Sjc25722 
2626330Sjc25722 		/* Create main environmental node */
2636330Sjc25722 		ddi_report_dev(dip);
2646330Sjc25722 
2656330Sjc25722 		return (DDI_SUCCESS);
2666330Sjc25722 
2676330Sjc25722 	case DDI_RESUME:
2686330Sjc25722 		return (DDI_SUCCESS);
2696330Sjc25722 
2706330Sjc25722 	default:
2716330Sjc25722 		return (DDI_FAILURE);
2726330Sjc25722 	}
2736330Sjc25722 
2746330Sjc25722 attach_failed:
2756330Sjc25722 
2766330Sjc25722 	/* Free soft state, if allocated. remove minor node if added earlier */
2776330Sjc25722 	if (softc)
2786330Sjc25722 		ddi_soft_state_free(statep, inst);
2796330Sjc25722 
2806330Sjc25722 	ddi_remove_minor_node(dip, NULL);
2816330Sjc25722 
2826330Sjc25722 	return (DDI_FAILURE);
2836330Sjc25722 }
2846330Sjc25722 
2856330Sjc25722 static int
2866330Sjc25722 mem_cache_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2876330Sjc25722 {
2886330Sjc25722 	int inst;
2896330Sjc25722 	struct mem_cache_softc *softc;
2906330Sjc25722 
2916330Sjc25722 	switch (cmd) {
2926330Sjc25722 	case DDI_DETACH:
2936330Sjc25722 		inst = ddi_get_instance(dip);
2946330Sjc25722 		if ((softc = getsoftc(inst)) == NULL)
2956330Sjc25722 			return (ENXIO);
2966330Sjc25722 
2976330Sjc25722 		/* Free the soft state and remove minor node added earlier */
2986330Sjc25722 		mutex_destroy(&softc->mutex);
2996330Sjc25722 		ddi_soft_state_free(statep, inst);
3006330Sjc25722 		ddi_remove_minor_node(dip, NULL);
3016330Sjc25722 		return (DDI_SUCCESS);
3026330Sjc25722 
3036330Sjc25722 	case DDI_SUSPEND:
3046330Sjc25722 		return (DDI_SUCCESS);
3056330Sjc25722 
3066330Sjc25722 	default:
3076330Sjc25722 		return (DDI_FAILURE);
3086330Sjc25722 	}
3096330Sjc25722 }
3106330Sjc25722 
3116330Sjc25722 /*ARGSUSED*/
3126330Sjc25722 static int
3136330Sjc25722 mem_cache_open(dev_t *devp, int flag, int otyp, cred_t *credp)
3146330Sjc25722 {
3156330Sjc25722 	int	inst = getminor(*devp);
3166330Sjc25722 
3176330Sjc25722 	return (getsoftc(inst) == NULL ? ENXIO : 0);
3186330Sjc25722 }
3196330Sjc25722 
3206330Sjc25722 /*ARGSUSED*/
3216330Sjc25722 static int
3226330Sjc25722 mem_cache_close(dev_t dev, int flag, int otyp, cred_t *credp)
3236330Sjc25722 {
3246330Sjc25722 	int	inst = getminor(dev);
3256330Sjc25722 
3266330Sjc25722 	return (getsoftc(inst) == NULL ? ENXIO : 0);
3276330Sjc25722 }
3286330Sjc25722 
3296330Sjc25722 static char *tstate_to_desc[] = {
3306330Sjc25722 	"Invalid",			/* 0 */
3316330Sjc25722 	"Shared",			/* 1 */
3326330Sjc25722 	"Exclusive",			/* 2 */
3336330Sjc25722 	"Owner",			/* 3 */
3346330Sjc25722 	"Modified",			/* 4 */
3356330Sjc25722 	"NA",				/* 5 */
3366330Sjc25722 	"Owner/Shared",			/* 6 */
3376330Sjc25722 	"Reserved(7)",			/* 7 */
3386330Sjc25722 };
3396330Sjc25722 
3406330Sjc25722 static char *
3416330Sjc25722 tag_state_to_desc(uint8_t tagstate)
3426330Sjc25722 {
3436330Sjc25722 	return (tstate_to_desc[tagstate & CH_ECSTATE_MASK]);
3446330Sjc25722 }
3456330Sjc25722 
3466330Sjc25722 void
3476330Sjc25722 print_l2_tag(uint64_t tag_addr, uint64_t l2_tag)
3486330Sjc25722 {
3496330Sjc25722 	uint64_t l2_subaddr;
3506330Sjc25722 	uint8_t	l2_state;
3516330Sjc25722 
3526330Sjc25722 	l2_subaddr = PN_L2TAG_TO_PA(l2_tag);
3536330Sjc25722 	l2_subaddr |= (tag_addr & PN_L2_INDEX_MASK);
3546330Sjc25722 
3556330Sjc25722 	l2_state = (l2_tag & CH_ECSTATE_MASK);
3566330Sjc25722 	cmn_err(CE_CONT,
3576330Sjc25722 	    "PA=0x%08x.%08x E$tag 0x%08x.%08x E$state %s\n",
3586330Sjc25722 	    PRTF_64_TO_32(l2_subaddr),
3596330Sjc25722 	    PRTF_64_TO_32(l2_tag),
3606330Sjc25722 	    tag_state_to_desc(l2_state));
3616330Sjc25722 }
3626330Sjc25722 
3636330Sjc25722 void
3646330Sjc25722 print_l2cache_line(ch_cpu_logout_t *clop)
3656330Sjc25722 {
3666330Sjc25722 	uint64_t l2_subaddr;
3676330Sjc25722 	int i, offset;
3686330Sjc25722 	uint8_t	way, l2_state;
3696330Sjc25722 	ch_ec_data_t *ecp;
3706330Sjc25722 
3716330Sjc25722 
3726330Sjc25722 	for (way = 0; way < PN_CACHE_NWAYS; way++) {
3736330Sjc25722 		ecp = &clop->clo_data.chd_l2_data[way];
3746330Sjc25722 		l2_subaddr = PN_L2TAG_TO_PA(ecp->ec_tag);
3756330Sjc25722 		l2_subaddr |= (ecp->ec_idx & PN_L2_INDEX_MASK);
3766330Sjc25722 
3776330Sjc25722 		l2_state = (ecp->ec_tag & CH_ECSTATE_MASK);
3786330Sjc25722 		cmn_err(CE_CONT,
3796330Sjc25722 		    "\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n"
3806330Sjc25722 		    "E$tag 0x%08x.%08x E$state %s",
3816330Sjc25722 		    way, (uint32_t)ecp->ec_idx, PRTF_64_TO_32(l2_subaddr),
3826330Sjc25722 		    PRTF_64_TO_32(ecp->ec_tag),
3836330Sjc25722 		    tag_state_to_desc(l2_state));
3846330Sjc25722 		/*
3856330Sjc25722 		 * Dump out Ecache subblock data captured.
3866330Sjc25722 		 * For Cheetah, we need to compute the ECC for each 16-byte
3876330Sjc25722 		 * chunk and compare it with the captured chunk ECC to figure
3886330Sjc25722 		 * out which chunk is bad.
3896330Sjc25722 		 */
3906330Sjc25722 		for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) {
3916330Sjc25722 			ec_data_elm_t *ecdptr;
3926330Sjc25722 			uint64_t d_low, d_high;
3936330Sjc25722 			uint32_t ecc;
3946330Sjc25722 			int l2_data_idx = (i/2);
3956330Sjc25722 
3966330Sjc25722 			offset = i * 16;
3976330Sjc25722 			ecdptr = &clop->clo_data.chd_l2_data[way].ec_data
3986330Sjc25722 			    [l2_data_idx];
3996330Sjc25722 			if ((i & 1) == 0) {
4006330Sjc25722 				ecc = (ecdptr->ec_eccd >> 9) & 0x1ff;
4016330Sjc25722 				d_high = ecdptr->ec_d8[0];
4026330Sjc25722 				d_low  = ecdptr->ec_d8[1];
4036330Sjc25722 			} else {
4046330Sjc25722 				ecc = ecdptr->ec_eccd & 0x1ff;
4056330Sjc25722 				d_high = ecdptr->ec_d8[2];
4066330Sjc25722 				d_low  = ecdptr->ec_d8[3];
4076330Sjc25722 			}
4086330Sjc25722 
4096330Sjc25722 			cmn_err(CE_CONT,
4106330Sjc25722 			    "\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x"
4116330Sjc25722 			    " ECC 0x%03x",
4126330Sjc25722 			    offset, PRTF_64_TO_32(d_high),
4136330Sjc25722 			    PRTF_64_TO_32(d_low), ecc);
4146330Sjc25722 		}
4156330Sjc25722 	}	/* end of for way loop */
4166330Sjc25722 }
4176330Sjc25722 
4186330Sjc25722 void
4196330Sjc25722 print_ecache_line(ch_cpu_logout_t *clop)
4206330Sjc25722 {
4216330Sjc25722 	uint64_t ec_subaddr;
4226330Sjc25722 	int i, offset;
4236330Sjc25722 	uint8_t	way, ec_state;
4246330Sjc25722 	ch_ec_data_t *ecp;
4256330Sjc25722 
4266330Sjc25722 
4276330Sjc25722 	for (way = 0; way < PN_CACHE_NWAYS; way++) {
4286330Sjc25722 		ecp = &clop->clo_data.chd_ec_data[way];
4296330Sjc25722 		ec_subaddr = PN_L3TAG_TO_PA(ecp->ec_tag);
4306330Sjc25722 		ec_subaddr |= (ecp->ec_idx & PN_L3_TAG_RD_MASK);
4316330Sjc25722 
4326330Sjc25722 		ec_state = (ecp->ec_tag & CH_ECSTATE_MASK);
4336330Sjc25722 		cmn_err(CE_CONT,
4346330Sjc25722 		    "\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n"
4356330Sjc25722 		    "E$tag 0x%08x.%08x E$state %s",
4366330Sjc25722 		    way, (uint32_t)ecp->ec_idx, PRTF_64_TO_32(ec_subaddr),
4376330Sjc25722 		    PRTF_64_TO_32(ecp->ec_tag),
4386330Sjc25722 		    tag_state_to_desc(ec_state));
4396330Sjc25722 		/*
4406330Sjc25722 		 * Dump out Ecache subblock data captured.
4416330Sjc25722 		 * For Cheetah, we need to compute the ECC for each 16-byte
4426330Sjc25722 		 * chunk and compare it with the captured chunk ECC to figure
4436330Sjc25722 		 * out which chunk is bad.
4446330Sjc25722 		 */
4456330Sjc25722 		for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) {
4466330Sjc25722 			ec_data_elm_t *ecdptr;
4476330Sjc25722 			uint64_t d_low, d_high;
4486330Sjc25722 			uint32_t ecc;
4496330Sjc25722 			int ec_data_idx = (i/2);
4506330Sjc25722 
4516330Sjc25722 			offset = i * 16;
4526330Sjc25722 			ecdptr =
4536330Sjc25722 			    &clop->clo_data.chd_ec_data[way].ec_data
4546330Sjc25722 			    [ec_data_idx];
4556330Sjc25722 			if ((i & 1) == 0) {
4566330Sjc25722 				ecc = (ecdptr->ec_eccd >> 9) & 0x1ff;
4576330Sjc25722 				d_high = ecdptr->ec_d8[0];
4586330Sjc25722 				d_low  = ecdptr->ec_d8[1];
4596330Sjc25722 			} else {
4606330Sjc25722 				ecc = ecdptr->ec_eccd & 0x1ff;
4616330Sjc25722 				d_high = ecdptr->ec_d8[2];
4626330Sjc25722 				d_low  = ecdptr->ec_d8[3];
4636330Sjc25722 			}
4646330Sjc25722 
4656330Sjc25722 			cmn_err(CE_CONT,
4666330Sjc25722 			    "\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x"
4676330Sjc25722 			    " ECC 0x%03x",
4686330Sjc25722 			    offset, PRTF_64_TO_32(d_high),
4696330Sjc25722 			    PRTF_64_TO_32(d_low), ecc);
4706330Sjc25722 		}
4716330Sjc25722 	}
4726330Sjc25722 }
4736330Sjc25722 
4746330Sjc25722 static boolean_t
4756330Sjc25722 tag_addr_collides(uint64_t tag_addr, cache_id_t type,
4766330Sjc25722     retire_func_t start_of_func, retire_func_t end_of_func)
4776330Sjc25722 {
4786330Sjc25722 	uint64_t start_paddr, end_paddr;
4796330Sjc25722 	char *type_str;
4806330Sjc25722 
4816330Sjc25722 	start_paddr = va_to_pa((void *)start_of_func);
4826330Sjc25722 	end_paddr = va_to_pa((void *)end_of_func);
4836330Sjc25722 	switch (type) {
4846330Sjc25722 		case L2_CACHE_TAG:
4856330Sjc25722 		case L2_CACHE_DATA:
4866330Sjc25722 			tag_addr &= PN_L2_INDEX_MASK;
4876330Sjc25722 			start_paddr &= PN_L2_INDEX_MASK;
4886330Sjc25722 			end_paddr &= PN_L2_INDEX_MASK;
4896330Sjc25722 			type_str = "L2:";
4906330Sjc25722 			break;
4916330Sjc25722 		case L3_CACHE_TAG:
4926330Sjc25722 		case L3_CACHE_DATA:
4936330Sjc25722 			tag_addr &= PN_L3_TAG_RD_MASK;
4946330Sjc25722 			start_paddr &= PN_L3_TAG_RD_MASK;
4956330Sjc25722 			end_paddr &= PN_L3_TAG_RD_MASK;
4966330Sjc25722 			type_str = "L3:";
4976330Sjc25722 			break;
4986330Sjc25722 		default:
4996330Sjc25722 			/*
5006330Sjc25722 			 * Should never reach here.
5016330Sjc25722 			 */
5026330Sjc25722 			ASSERT(0);
5036330Sjc25722 			return (B_FALSE);
5046330Sjc25722 	}
5056330Sjc25722 	if ((tag_addr > (start_paddr - 0x100)) &&
5066330Sjc25722 	    (tag_addr < (end_paddr + 0x100))) {
5076330Sjc25722 		if (mem_cache_debug & 0x1)
5086330Sjc25722 			cmn_err(CE_CONT,
5096330Sjc25722 			    "%s collision detected tag_addr = 0x%08x"
5106330Sjc25722 			    " start_paddr = 0x%08x end_paddr = 0x%08x\n",
5116330Sjc25722 			    type_str, (uint32_t)tag_addr, (uint32_t)start_paddr,
5126330Sjc25722 			    (uint32_t)end_paddr);
5136330Sjc25722 		return (B_TRUE);
5146330Sjc25722 	}
5156330Sjc25722 	else
5166330Sjc25722 		return (B_FALSE);
5176330Sjc25722 }
5186330Sjc25722 
5196330Sjc25722 static uint64_t
5206330Sjc25722 get_tag_addr(cache_info_t *cache_info)
5216330Sjc25722 {
5226330Sjc25722 	uint64_t tag_addr, scratch;
5236330Sjc25722 
5246330Sjc25722 	switch (cache_info->cache) {
5256330Sjc25722 		case L2_CACHE_TAG:
5266330Sjc25722 		case L2_CACHE_DATA:
5276330Sjc25722 			tag_addr = (uint64_t)(cache_info->index <<
5286330Sjc25722 			    PN_CACHE_LINE_SHIFT);
5296330Sjc25722 			scratch = (uint64_t)(cache_info->way <<
5306330Sjc25722 			    PN_L2_WAY_SHIFT);
5316330Sjc25722 			tag_addr |= scratch;
5326330Sjc25722 			tag_addr |= PN_L2_IDX_HW_ECC_EN;
5336330Sjc25722 			break;
5346330Sjc25722 		case L3_CACHE_TAG:
5356330Sjc25722 		case L3_CACHE_DATA:
5366330Sjc25722 			tag_addr = (uint64_t)(cache_info->index <<
5376330Sjc25722 			    PN_CACHE_LINE_SHIFT);
5386330Sjc25722 			scratch = (uint64_t)(cache_info->way <<
5396330Sjc25722 			    PN_L3_WAY_SHIFT);
5406330Sjc25722 			tag_addr |= scratch;
5416330Sjc25722 			tag_addr |= PN_L3_IDX_HW_ECC_EN;
5426330Sjc25722 			break;
5436330Sjc25722 		default:
5446330Sjc25722 			/*
5456330Sjc25722 			 * Should never reach here.
5466330Sjc25722 			 */
5476330Sjc25722 			ASSERT(0);
5486330Sjc25722 			return (uint64_t)(0);
5496330Sjc25722 	}
5506330Sjc25722 	return (tag_addr);
5516330Sjc25722 }
5526330Sjc25722 
5536330Sjc25722 static int
5546330Sjc25722 mem_cache_ioctl_ops(int cmd, int mode, cache_info_t *cache_info)
5556330Sjc25722 {
5566330Sjc25722 	int	ret_val = 0;
5576330Sjc25722 	uint64_t afar, tag_addr;
5586330Sjc25722 	ch_cpu_logout_t clop;
5596330Sjc25722 	uint64_t Lxcache_tag_data[PN_CACHE_NWAYS];
5606330Sjc25722 	int	i, retire_retry_count;
5616330Sjc25722 	cpu_t	*cpu;
5626330Sjc25722 	uint64_t tag_data;
5636330Sjc25722 	uint8_t state;
5646330Sjc25722 
5656330Sjc25722 	switch (cache_info->cache) {
5666330Sjc25722 		case L2_CACHE_TAG:
5676330Sjc25722 		case L2_CACHE_DATA:
5686330Sjc25722 			if (cache_info->way >= PN_CACHE_NWAYS)
5696330Sjc25722 				return (EINVAL);
5706330Sjc25722 			if (cache_info->index >=
5716330Sjc25722 			    (PN_L2_SET_SIZE/PN_L2_LINESIZE))
5726330Sjc25722 				return (EINVAL);
5736330Sjc25722 			break;
5746330Sjc25722 		case L3_CACHE_TAG:
5756330Sjc25722 		case L3_CACHE_DATA:
5766330Sjc25722 			if (cache_info->way >= PN_CACHE_NWAYS)
5776330Sjc25722 				return (EINVAL);
5786330Sjc25722 			if (cache_info->index >=
5796330Sjc25722 			    (PN_L3_SET_SIZE/PN_L3_LINESIZE))
5806330Sjc25722 				return (EINVAL);
5816330Sjc25722 			break;
5826330Sjc25722 		default:
5836330Sjc25722 			return (ENOTSUP);
5846330Sjc25722 	}
5856330Sjc25722 	/*
5866330Sjc25722 	 * Check if we have a valid cpu ID and that
5876330Sjc25722 	 * CPU is ONLINE.
5886330Sjc25722 	 */
5896330Sjc25722 	mutex_enter(&cpu_lock);
5906330Sjc25722 	cpu = cpu_get(cache_info->cpu_id);
5916330Sjc25722 	if ((cpu == NULL) || (!cpu_is_online(cpu))) {
5926330Sjc25722 		mutex_exit(&cpu_lock);
5936330Sjc25722 		return (EINVAL);
5946330Sjc25722 	}
5956330Sjc25722 	mutex_exit(&cpu_lock);
5966330Sjc25722 	switch (cmd) {
5976330Sjc25722 		case MEM_CACHE_RETIRE:
5986330Sjc25722 			if ((cache_info->bit & MSB_BIT_MASK) ==
5996330Sjc25722 			    MSB_BIT_MASK) {
6006330Sjc25722 				pattern = ((uint64_t)1 <<
6016330Sjc25722 				    (cache_info->bit & TAG_BIT_MASK));
6026330Sjc25722 			} else {
6036330Sjc25722 				pattern = 0;
6046330Sjc25722 			}
6056330Sjc25722 			tag_addr = get_tag_addr(cache_info);
6066330Sjc25722 			pattern |= PN_ECSTATE_NA;
6076330Sjc25722 			retire_retry_count = 0;
6086330Sjc25722 			affinity_set(cache_info->cpu_id);
6096330Sjc25722 			switch (cache_info->cache) {
6106330Sjc25722 				case L2_CACHE_DATA:
6116330Sjc25722 				case L2_CACHE_TAG:
6126330Sjc25722 retry_l2_retire:
6136330Sjc25722 					if (tag_addr_collides(tag_addr,
6146330Sjc25722 					    cache_info->cache,
6156330Sjc25722 					    retire_l2_start, retire_l2_end))
6166330Sjc25722 						ret_val =
6176330Sjc25722 						    retire_l2_alternate(
6186330Sjc25722 						    tag_addr, pattern);
6196330Sjc25722 					else
6206330Sjc25722 						ret_val = retire_l2(tag_addr,
6216330Sjc25722 						    pattern);
6226330Sjc25722 					if (ret_val == 1) {
6236330Sjc25722 						/*
6246330Sjc25722 						 * cacheline was in retired
6256330Sjc25722 						 * STATE already.
6266330Sjc25722 						 * so return success.
6276330Sjc25722 						 */
6286330Sjc25722 						ret_val = 0;
6296330Sjc25722 					}
6306330Sjc25722 					if (ret_val < 0) {
6316330Sjc25722 						cmn_err(CE_WARN,
6326330Sjc25722 		"retire_l2() failed. index = 0x%x way %d. Retrying...\n",
6336330Sjc25722 						    cache_info->index,
6346330Sjc25722 						    cache_info->way);
6356330Sjc25722 						if (retire_retry_count >= 2) {
6366330Sjc25722 							retire_failures++;
6376330Sjc25722 							affinity_clear();
6386330Sjc25722 							return (EIO);
6396330Sjc25722 						}
6406330Sjc25722 						retire_retry_count++;
6416330Sjc25722 						goto retry_l2_retire;
6426330Sjc25722 					}
6436330Sjc25722 					if (ret_val == 2)
6446330Sjc25722 						l2_flush_retries_done++;
6456440Sbala 			/*
6466440Sbala 			 * We bind ourself to a CPU and send cross trap to
6476440Sbala 			 * ourself. On return from xt_one we can rely on the
6486440Sbala 			 * data in tag_data being filled in. Normally one would
6496440Sbala 			 * do a xt_sync to make sure that the CPU has completed
6506440Sbala 			 * the cross trap call xt_one.
6516440Sbala 			 */
6526330Sjc25722 					xt_one(cache_info->cpu_id,
6536330Sjc25722 					    (xcfunc_t *)(get_l2_tag_tl1),
6546330Sjc25722 					    tag_addr, (uint64_t)(&tag_data));
6556330Sjc25722 					state = tag_data & CH_ECSTATE_MASK;
6566330Sjc25722 					if (state != PN_ECSTATE_NA) {
6576330Sjc25722 						retire_failures++;
6586330Sjc25722 						print_l2_tag(tag_addr,
6596330Sjc25722 						    tag_data);
6606330Sjc25722 						cmn_err(CE_WARN,
6616330Sjc25722 		"L2 RETIRE:failed for index 0x%x way %d. Retrying...\n",
6626330Sjc25722 						    cache_info->index,
6636330Sjc25722 						    cache_info->way);
6646330Sjc25722 						if (retire_retry_count >= 2) {
6656330Sjc25722 							retire_failures++;
6666330Sjc25722 							affinity_clear();
6676330Sjc25722 							return (EIO);
6686330Sjc25722 						}
6696330Sjc25722 						retire_retry_count++;
6706330Sjc25722 						goto retry_l2_retire;
6716330Sjc25722 					}
6726330Sjc25722 					break;
6736330Sjc25722 				case L3_CACHE_TAG:
6746330Sjc25722 				case L3_CACHE_DATA:
6756330Sjc25722 					if (tag_addr_collides(tag_addr,
6766330Sjc25722 					    cache_info->cache,
6776330Sjc25722 					    retire_l3_start, retire_l3_end))
6786330Sjc25722 						ret_val =
6796330Sjc25722 						    retire_l3_alternate(
6806330Sjc25722 						    tag_addr, pattern);
6816330Sjc25722 					else
6826330Sjc25722 						ret_val = retire_l3(tag_addr,
6836330Sjc25722 						    pattern);
6846330Sjc25722 					if (ret_val == 1) {
6856330Sjc25722 						/*
6866330Sjc25722 						 * cacheline was in retired
6876330Sjc25722 						 * STATE already.
6886330Sjc25722 						 * so return success.
6896330Sjc25722 						 */
6906330Sjc25722 						ret_val = 0;
6916330Sjc25722 					}
6926330Sjc25722 					if (ret_val < 0) {
6936330Sjc25722 						cmn_err(CE_WARN,
6946330Sjc25722 			"retire_l3() failed. ret_val = %d index = 0x%x\n",
6956330Sjc25722 						    ret_val,
6966330Sjc25722 						    cache_info->index);
6976330Sjc25722 						retire_failures++;
6986330Sjc25722 						affinity_clear();
6996330Sjc25722 						return (EIO);
7006330Sjc25722 					}
7016440Sbala 			/*
7026440Sbala 			 * We bind ourself to a CPU and send cross trap to
7036440Sbala 			 * ourself. On return from xt_one we can rely on the
7046440Sbala 			 * data in tag_data being filled in. Normally one would
7056440Sbala 			 * do a xt_sync to make sure that the CPU has completed
7066440Sbala 			 * the cross trap call xt_one.
7076440Sbala 			 */
7086330Sjc25722 					xt_one(cache_info->cpu_id,
7096330Sjc25722 					    (xcfunc_t *)(get_l3_tag_tl1),
7106330Sjc25722 					    tag_addr, (uint64_t)(&tag_data));
7116330Sjc25722 					state = tag_data & CH_ECSTATE_MASK;
7126330Sjc25722 					if (state != PN_ECSTATE_NA) {
7136330Sjc25722 						cmn_err(CE_WARN,
7146330Sjc25722 					"L3 RETIRE failed for index 0x%x\n",
7156330Sjc25722 						    cache_info->index);
7166330Sjc25722 						retire_failures++;
7176330Sjc25722 						affinity_clear();
7186330Sjc25722 						return (EIO);
7196330Sjc25722 					}
7206330Sjc25722 
7216330Sjc25722 					break;
7226330Sjc25722 			}
7236330Sjc25722 			affinity_clear();
7246330Sjc25722 			break;
7256330Sjc25722 		case MEM_CACHE_UNRETIRE:
7266330Sjc25722 			tag_addr = get_tag_addr(cache_info);
7276330Sjc25722 			pattern = PN_ECSTATE_INV;
7286330Sjc25722 			affinity_set(cache_info->cpu_id);
7296330Sjc25722 			switch (cache_info->cache) {
7306330Sjc25722 				case L2_CACHE_DATA:
7316330Sjc25722 				case L2_CACHE_TAG:
7326330Sjc25722 					/*
7336330Sjc25722 					 * Check if the index/way is in NA state
7346330Sjc25722 					 */
7356440Sbala 			/*
7366440Sbala 			 * We bind ourself to a CPU and send cross trap to
7376440Sbala 			 * ourself. On return from xt_one we can rely on the
7386440Sbala 			 * data in tag_data being filled in. Normally one would
7396440Sbala 			 * do a xt_sync to make sure that the CPU has completed
7406440Sbala 			 * the cross trap call xt_one.
7416440Sbala 			 */
7426330Sjc25722 					xt_one(cache_info->cpu_id,
7436330Sjc25722 					    (xcfunc_t *)(get_l2_tag_tl1),
7446330Sjc25722 					    tag_addr, (uint64_t)(&tag_data));
7456330Sjc25722 					state = tag_data & CH_ECSTATE_MASK;
7466330Sjc25722 					if (state != PN_ECSTATE_NA) {
7476330Sjc25722 						affinity_clear();
7486330Sjc25722 						return (EINVAL);
7496330Sjc25722 					}
7506330Sjc25722 					if (tag_addr_collides(tag_addr,
7516330Sjc25722 					    cache_info->cache,
7526330Sjc25722 					    unretire_l2_start, unretire_l2_end))
7536330Sjc25722 						ret_val =
7546330Sjc25722 						    unretire_l2_alternate(
7556330Sjc25722 						    tag_addr, pattern);
7566330Sjc25722 					else
7576330Sjc25722 						ret_val =
7586330Sjc25722 						    unretire_l2(tag_addr,
7596330Sjc25722 						    pattern);
7606330Sjc25722 					if (ret_val != 0) {
7616330Sjc25722 						cmn_err(CE_WARN,
7626330Sjc25722 			"unretire_l2() failed. ret_val = %d index = 0x%x\n",
7636330Sjc25722 						    ret_val,
7646330Sjc25722 						    cache_info->index);
7656330Sjc25722 						retire_failures++;
7666330Sjc25722 						affinity_clear();
7676330Sjc25722 						return (EIO);
7686330Sjc25722 					}
7696330Sjc25722 					break;
7706330Sjc25722 				case L3_CACHE_TAG:
7716330Sjc25722 				case L3_CACHE_DATA:
7726330Sjc25722 					/*
7736330Sjc25722 					 * Check if the index/way is in NA state
7746330Sjc25722 					 */
7756440Sbala 			/*
7766440Sbala 			 * We bind ourself to a CPU and send cross trap to
7776440Sbala 			 * ourself. On return from xt_one we can rely on the
7786440Sbala 			 * data in tag_data being filled in. Normally one would
7796440Sbala 			 * do a xt_sync to make sure that the CPU has completed
7806440Sbala 			 * the cross trap call xt_one.
7816440Sbala 			 */
7826330Sjc25722 					xt_one(cache_info->cpu_id,
7836330Sjc25722 					    (xcfunc_t *)(get_l3_tag_tl1),
7846330Sjc25722 					    tag_addr, (uint64_t)(&tag_data));
7856330Sjc25722 					state = tag_data & CH_ECSTATE_MASK;
7866330Sjc25722 					if (state != PN_ECSTATE_NA) {
7876330Sjc25722 						affinity_clear();
7886330Sjc25722 						return (EINVAL);
7896330Sjc25722 					}
7906330Sjc25722 					if (tag_addr_collides(tag_addr,
7916330Sjc25722 					    cache_info->cache,
7926330Sjc25722 					    unretire_l3_start, unretire_l3_end))
7936330Sjc25722 						ret_val =
7946330Sjc25722 						    unretire_l3_alternate(
7956330Sjc25722 						    tag_addr, pattern);
7966330Sjc25722 					else
7976330Sjc25722 						ret_val =
7986330Sjc25722 						    unretire_l3(tag_addr,
7996330Sjc25722 						    pattern);
8006330Sjc25722 					if (ret_val != 0) {
8016330Sjc25722 						cmn_err(CE_WARN,
8026330Sjc25722 			"unretire_l3() failed. ret_val = %d index = 0x%x\n",
8036330Sjc25722 						    ret_val,
8046330Sjc25722 						    cache_info->index);
8056330Sjc25722 						affinity_clear();
8066330Sjc25722 						return (EIO);
8076330Sjc25722 					}
8086330Sjc25722 					break;
8096330Sjc25722 			}
8106330Sjc25722 			affinity_clear();
8116330Sjc25722 			break;
8126330Sjc25722 		case MEM_CACHE_ISRETIRED:
8136330Sjc25722 		case MEM_CACHE_STATE:
8146330Sjc25722 			return (ENOTSUP);
8156330Sjc25722 		case MEM_CACHE_READ_TAGS:
8166440Sbala #ifdef DEBUG
8176330Sjc25722 		case MEM_CACHE_READ_ERROR_INJECTED_TAGS:
8186440Sbala #endif
8196330Sjc25722 			/*
8206330Sjc25722 			 * Read tag and data for all the ways at a given afar
8216330Sjc25722 			 */
8226330Sjc25722 			afar = (uint64_t)(cache_info->index
8236330Sjc25722 			    << PN_CACHE_LINE_SHIFT);
8246330Sjc25722 			affinity_set(cache_info->cpu_id);
8256440Sbala 			/*
8266440Sbala 			 * We bind ourself to a CPU and send cross trap to
8276440Sbala 			 * ourself. On return from xt_one we can rely on the
8286440Sbala 			 * data in clop being filled in. Normally one would
8296440Sbala 			 * do a xt_sync to make sure that the CPU has completed
8306440Sbala 			 * the cross trap call xt_one.
8316440Sbala 			 */
8326330Sjc25722 			xt_one(cache_info->cpu_id,
8336330Sjc25722 			    (xcfunc_t *)(get_ecache_dtags_tl1),
8346330Sjc25722 			    afar, (uint64_t)(&clop));
8356330Sjc25722 			switch (cache_info->cache) {
8366330Sjc25722 				case L2_CACHE_TAG:
8376330Sjc25722 					for (i = 0; i < PN_CACHE_NWAYS; i++) {
8386330Sjc25722 						Lxcache_tag_data[i] =
8396330Sjc25722 						    clop.clo_data.chd_l2_data
8406330Sjc25722 						    [i].ec_tag;
8416330Sjc25722 					}
8426330Sjc25722 					last_error_injected_bit =
8436330Sjc25722 					    last_l2tag_error_injected_bit;
8446330Sjc25722 					last_error_injected_way =
8456330Sjc25722 					    last_l2tag_error_injected_way;
8466330Sjc25722 					break;
8476330Sjc25722 				case L3_CACHE_TAG:
8486330Sjc25722 					for (i = 0; i < PN_CACHE_NWAYS; i++) {
8496330Sjc25722 						Lxcache_tag_data[i] =
8506330Sjc25722 						    clop.clo_data.chd_ec_data
8516330Sjc25722 						    [i].ec_tag;
8526330Sjc25722 					}
8536330Sjc25722 					last_error_injected_bit =
8546330Sjc25722 					    last_l3tag_error_injected_bit;
8556330Sjc25722 					last_error_injected_way =
8566330Sjc25722 					    last_l3tag_error_injected_way;
8576330Sjc25722 					break;
8586330Sjc25722 				default:
8596330Sjc25722 					affinity_clear();
8606330Sjc25722 					return (ENOTSUP);
8616330Sjc25722 			}	/* end if switch(cache) */
8626440Sbala #ifdef DEBUG
8636330Sjc25722 			if (cmd == MEM_CACHE_READ_ERROR_INJECTED_TAGS) {
8646330Sjc25722 				pattern = ((uint64_t)1 <<
8656330Sjc25722 				    last_error_injected_bit);
8666330Sjc25722 				/*
8676330Sjc25722 				 * If error bit is ECC we need to make sure
8686330Sjc25722 				 * ECC on all all WAYS are corrupted.
8696330Sjc25722 				 */
8706330Sjc25722 				if ((last_error_injected_bit >= 6) &&
8716330Sjc25722 				    (last_error_injected_bit <= 14)) {
8726330Sjc25722 					for (i = 0; i < PN_CACHE_NWAYS; i++)
8736330Sjc25722 						Lxcache_tag_data[i] ^=
8746330Sjc25722 						    pattern;
8756330Sjc25722 				} else
8766330Sjc25722 					Lxcache_tag_data
8776330Sjc25722 					    [last_error_injected_way] ^=
8786330Sjc25722 					    pattern;
8796330Sjc25722 			}
8806440Sbala #endif
8816330Sjc25722 			if (ddi_copyout((caddr_t)Lxcache_tag_data,
8826330Sjc25722 			    (caddr_t)cache_info->datap,
8836330Sjc25722 			    sizeof (Lxcache_tag_data), mode)
8846330Sjc25722 			    != DDI_SUCCESS) {
8856330Sjc25722 				affinity_clear();
8866330Sjc25722 				return (EFAULT);
8876330Sjc25722 			}
8886330Sjc25722 			affinity_clear();
8896330Sjc25722 			break;	/* end of READ_TAGS */
8906330Sjc25722 		default:
8916330Sjc25722 			return (ENOTSUP);
8926330Sjc25722 	}	/* end if switch(cmd) */
8936330Sjc25722 	return (ret_val);
8946330Sjc25722 }
8956330Sjc25722 
8966330Sjc25722 /*ARGSUSED*/
8976330Sjc25722 static int
8986330Sjc25722 mem_cache_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
8996330Sjc25722 		int *rvalp)
9006330Sjc25722 {
9016330Sjc25722 	int	inst;
9026330Sjc25722 	struct mem_cache_softc *softc;
9036330Sjc25722 	cache_info_t	cache_info;
9046330Sjc25722 	cache_info32_t	cache_info32;
9056330Sjc25722 	int	ret_val;
9066440Sbala 	int	is_panther;
9076330Sjc25722 
9086330Sjc25722 	inst = getminor(dev);
9096330Sjc25722 	if ((softc = getsoftc(inst)) == NULL)
9106330Sjc25722 		return (ENXIO);
9116330Sjc25722 
9126330Sjc25722 	mutex_enter(&softc->mutex);
9136330Sjc25722 
9146330Sjc25722 #ifdef _MULTI_DATAMODEL
9156330Sjc25722 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
9166330Sjc25722 		if (ddi_copyin((cache_info32_t *)arg, &cache_info32,
9176330Sjc25722 		    sizeof (cache_info32), mode) != DDI_SUCCESS) {
9186330Sjc25722 			mutex_exit(&softc->mutex);
9196330Sjc25722 			return (EFAULT);
9206330Sjc25722 		}
9216330Sjc25722 		cache_info.cache = cache_info32.cache;
9226330Sjc25722 		cache_info.index = cache_info32.index;
9236330Sjc25722 		cache_info.way = cache_info32.way;
9246330Sjc25722 		cache_info.cpu_id = cache_info32.cpu_id;
9256330Sjc25722 		cache_info.bit = cache_info32.bit;
9266330Sjc25722 		cache_info.datap = (void *)((uint64_t)cache_info32.datap);
9276330Sjc25722 	} else
9286330Sjc25722 #endif
9296330Sjc25722 	if (ddi_copyin((cache_info_t *)arg, &cache_info,
9306330Sjc25722 	    sizeof (cache_info), mode) != DDI_SUCCESS) {
9316330Sjc25722 		mutex_exit(&softc->mutex);
9326330Sjc25722 		return (EFAULT);
9336330Sjc25722 	}
934*7178Scb222892 
935*7178Scb222892 	if ((cache_info.cpu_id < 0) || (cache_info.cpu_id >= NCPU)) {
936*7178Scb222892 		mutex_exit(&softc->mutex);
937*7178Scb222892 		return (EINVAL);
938*7178Scb222892 	}
939*7178Scb222892 
9406440Sbala 	is_panther = IS_PANTHER(cpunodes[cache_info.cpu_id].implementation);
9416440Sbala 	if (!is_panther) {
9426440Sbala 		mutex_exit(&softc->mutex);
9436440Sbala 		return (ENOTSUP);
9446440Sbala 	}
9456330Sjc25722 	switch (cmd) {
9466330Sjc25722 		case MEM_CACHE_RETIRE:
9476330Sjc25722 		case MEM_CACHE_UNRETIRE:
9486330Sjc25722 			if ((mode & FWRITE) == 0) {
9496330Sjc25722 				ret_val = EBADF;
9506330Sjc25722 				break;
9516330Sjc25722 			}
9526330Sjc25722 		/*FALLTHROUGH*/
9536330Sjc25722 		case MEM_CACHE_ISRETIRED:
9546330Sjc25722 		case MEM_CACHE_STATE:
9556330Sjc25722 		case MEM_CACHE_READ_TAGS:
9566440Sbala #ifdef DEBUG
9576330Sjc25722 		case MEM_CACHE_READ_ERROR_INJECTED_TAGS:
9586440Sbala #endif
9596330Sjc25722 			ret_val =  mem_cache_ioctl_ops(cmd, mode, &cache_info);
9606330Sjc25722 			break;
9616330Sjc25722 		default:
9626330Sjc25722 			ret_val = ENOTSUP;
9636330Sjc25722 			break;
9646330Sjc25722 	}
9656330Sjc25722 	mutex_exit(&softc->mutex);
9666330Sjc25722 	return (ret_val);
9676330Sjc25722 }
968