16330Sjc25722 /*
26330Sjc25722 * CDDL HEADER START
36330Sjc25722 *
46330Sjc25722 * The contents of this file are subject to the terms of the
56330Sjc25722 * Common Development and Distribution License (the "License").
66330Sjc25722 * You may not use this file except in compliance with the License.
76330Sjc25722 *
86330Sjc25722 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
96330Sjc25722 * or http://www.opensolaris.org/os/licensing.
106330Sjc25722 * See the License for the specific language governing permissions
116330Sjc25722 * and limitations under the License.
126330Sjc25722 *
136330Sjc25722 * When distributing Covered Code, include this CDDL HEADER in each
146330Sjc25722 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
156330Sjc25722 * If applicable, add the following below this CDDL HEADER, with the
166330Sjc25722 * fields enclosed by brackets "[]" replaced with your own identifying
176330Sjc25722 * information: Portions Copyright [yyyy] [name of copyright owner]
186330Sjc25722 *
196330Sjc25722 * CDDL HEADER END
206330Sjc25722 */
216330Sjc25722 /*
229543SChristopher.Baumbauer@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
236330Sjc25722 * Use is subject to license terms.
246330Sjc25722 */
256330Sjc25722
266330Sjc25722 /*
276330Sjc25722 * Driver to retire/unretire L2/L3 cachelines on panther
286330Sjc25722 */
296330Sjc25722 #include <sys/types.h>
306330Sjc25722 #include <sys/types32.h>
316330Sjc25722 #include <sys/time.h>
326330Sjc25722 #include <sys/errno.h>
336330Sjc25722 #include <sys/cmn_err.h>
346330Sjc25722 #include <sys/param.h>
356330Sjc25722 #include <sys/modctl.h>
366330Sjc25722 #include <sys/conf.h>
376330Sjc25722 #include <sys/open.h>
386330Sjc25722 #include <sys/stat.h>
396330Sjc25722 #include <sys/ddi.h>
406330Sjc25722 #include <sys/sunddi.h>
416330Sjc25722 #include <sys/file.h>
426330Sjc25722 #include <sys/cpuvar.h>
436330Sjc25722 #include <sys/x_call.h>
446330Sjc25722 #include <sys/cheetahregs.h>
456330Sjc25722 #include <sys/mem_cache.h>
466330Sjc25722 #include <sys/mem_cache_ioctl.h>
476330Sjc25722
486330Sjc25722 extern int retire_l2(uint64_t, uint64_t);
496330Sjc25722 extern int retire_l2_alternate(uint64_t, uint64_t);
506330Sjc25722 extern int unretire_l2(uint64_t, uint64_t);
516330Sjc25722 extern int unretire_l2_alternate(uint64_t, uint64_t);
526330Sjc25722 extern int retire_l3(uint64_t, uint64_t);
536330Sjc25722 extern int retire_l3_alternate(uint64_t, uint64_t);
546330Sjc25722 extern int unretire_l3(uint64_t, uint64_t);
556330Sjc25722 extern int unretire_l3_alternate(uint64_t, uint64_t);
566330Sjc25722
576330Sjc25722 extern void retire_l2_start(uint64_t, uint64_t);
586330Sjc25722 extern void retire_l2_end(uint64_t, uint64_t);
596330Sjc25722 extern void unretire_l2_start(uint64_t, uint64_t);
606330Sjc25722 extern void unretire_l2_end(uint64_t, uint64_t);
616330Sjc25722 extern void retire_l3_start(uint64_t, uint64_t);
626330Sjc25722 extern void retire_l3_end(uint64_t, uint64_t);
636330Sjc25722 extern void unretire_l3_start(uint64_t, uint64_t);
646330Sjc25722 extern void unretire_l3_end(uint64_t, uint64_t);
656330Sjc25722
666330Sjc25722 extern void get_ecache_dtags_tl1(uint64_t, ch_cpu_logout_t *);
676440Sbala extern void get_l2_tag_tl1(uint64_t, uint64_t);
686440Sbala extern void get_l3_tag_tl1(uint64_t, uint64_t);
6910784Ssinanallur.balasubramanian@sun.com extern const int _ncpu;
706330Sjc25722
716330Sjc25722 /* Macro for putting 64-bit onto stack as two 32-bit ints */
726330Sjc25722 #define PRTF_64_TO_32(x) (uint32_t)((x)>>32), (uint32_t)(x)
736330Sjc25722
746330Sjc25722
756330Sjc25722 uint_t l2_flush_retries_done = 0;
766330Sjc25722 int mem_cache_debug = 0x0;
776330Sjc25722 uint64_t pattern = 0;
786330Sjc25722 uint32_t retire_failures = 0;
7910784Ssinanallur.balasubramanian@sun.com #ifdef DEBUG
8010784Ssinanallur.balasubramanian@sun.com int inject_anonymous_tag_error = 0;
8110784Ssinanallur.balasubramanian@sun.com int32_t last_error_injected_way = 0;
826330Sjc25722 uint8_t last_error_injected_bit = 0;
83*10875Ssinanallur.balasubramanian@sun.com int32_t last_l3tag_error_injected_way;
84*10875Ssinanallur.balasubramanian@sun.com uint8_t last_l3tag_error_injected_bit;
85*10875Ssinanallur.balasubramanian@sun.com int32_t last_l2tag_error_injected_way;
86*10875Ssinanallur.balasubramanian@sun.com uint8_t last_l2tag_error_injected_bit;
8710784Ssinanallur.balasubramanian@sun.com #endif
886330Sjc25722
896330Sjc25722 /* dev_ops and cb_ops entry point function declarations */
906330Sjc25722 static int mem_cache_attach(dev_info_t *, ddi_attach_cmd_t);
916330Sjc25722 static int mem_cache_detach(dev_info_t *, ddi_detach_cmd_t);
926330Sjc25722 static int mem_cache_getinfo(dev_info_t *, ddi_info_cmd_t, void *,
936330Sjc25722 void **);
946330Sjc25722 static int mem_cache_open(dev_t *, int, int, cred_t *);
956330Sjc25722 static int mem_cache_close(dev_t, int, int, cred_t *);
966330Sjc25722 static int mem_cache_ioctl_ops(int, int, cache_info_t *);
976330Sjc25722 static int mem_cache_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
986330Sjc25722
996330Sjc25722 struct cb_ops mem_cache_cb_ops = {
1006330Sjc25722 mem_cache_open,
1016330Sjc25722 mem_cache_close,
1026330Sjc25722 nodev,
1036330Sjc25722 nodev,
1046330Sjc25722 nodev, /* dump */
1056330Sjc25722 nodev,
1066330Sjc25722 nodev,
1076330Sjc25722 mem_cache_ioctl,
1086330Sjc25722 nodev, /* devmap */
1096330Sjc25722 nodev,
1106330Sjc25722 ddi_segmap, /* segmap */
1116330Sjc25722 nochpoll,
1126330Sjc25722 ddi_prop_op,
1136330Sjc25722 NULL, /* for STREAMS drivers */
1146330Sjc25722 D_NEW | D_MP /* driver compatibility flag */
1156330Sjc25722 };
1166330Sjc25722
1176330Sjc25722 static struct dev_ops mem_cache_dev_ops = {
1186330Sjc25722 DEVO_REV, /* driver build version */
1196330Sjc25722 0, /* device reference count */
1206330Sjc25722 mem_cache_getinfo,
1216330Sjc25722 nulldev,
1226330Sjc25722 nulldev, /* probe */
1236330Sjc25722 mem_cache_attach,
1246330Sjc25722 mem_cache_detach,
1256330Sjc25722 nulldev, /* reset */
1266330Sjc25722 &mem_cache_cb_ops,
1276330Sjc25722 (struct bus_ops *)NULL,
12810784Ssinanallur.balasubramanian@sun.com nulldev, /* power */
1297656SSherry.Moore@Sun.COM ddi_quiesce_not_needed, /* quiesce */
1306330Sjc25722 };
1316330Sjc25722
1326330Sjc25722 /*
1336330Sjc25722 * Soft state
1346330Sjc25722 */
1356330Sjc25722 struct mem_cache_softc {
1366330Sjc25722 dev_info_t *dip;
1376330Sjc25722 kmutex_t mutex;
1386330Sjc25722 };
1396330Sjc25722 #define getsoftc(inst) ((struct mem_cache_softc *)ddi_get_soft_state(statep,\
1406330Sjc25722 (inst)))
1416330Sjc25722
1426330Sjc25722 /* module configuration stuff */
1436330Sjc25722 static void *statep;
1446330Sjc25722 extern struct mod_ops mod_driverops;
1456330Sjc25722
1466330Sjc25722 static struct modldrv modldrv = {
1476330Sjc25722 &mod_driverops,
1486330Sjc25722 "mem_cache_driver (08/01/30) ",
1496330Sjc25722 &mem_cache_dev_ops
1506330Sjc25722 };
1516330Sjc25722
1526330Sjc25722 static struct modlinkage modlinkage = {
1536330Sjc25722 MODREV_1,
1546330Sjc25722 &modldrv,
1556330Sjc25722 0
1566330Sjc25722 };
1576330Sjc25722
1589543SChristopher.Baumbauer@Sun.COM extern const int _ncpu; /* Pull the kernel's global _ncpu definition */
1599543SChristopher.Baumbauer@Sun.COM
1606330Sjc25722 int
_init(void)1616330Sjc25722 _init(void)
1626330Sjc25722 {
1636330Sjc25722 int e;
1646330Sjc25722
1656330Sjc25722 if (e = ddi_soft_state_init(&statep, sizeof (struct mem_cache_softc),
1666330Sjc25722 MAX_MEM_CACHE_INSTANCES)) {
1676330Sjc25722 return (e);
1686330Sjc25722 }
1696330Sjc25722
1706330Sjc25722 if ((e = mod_install(&modlinkage)) != 0)
1716330Sjc25722 ddi_soft_state_fini(&statep);
1726330Sjc25722
1736330Sjc25722 return (e);
1746330Sjc25722 }
1756330Sjc25722
1766330Sjc25722 int
_fini(void)1776330Sjc25722 _fini(void)
1786330Sjc25722 {
1796330Sjc25722 int e;
1806330Sjc25722
1816330Sjc25722 if ((e = mod_remove(&modlinkage)) != 0)
1826330Sjc25722 return (e);
1836330Sjc25722
1846330Sjc25722 ddi_soft_state_fini(&statep);
1856330Sjc25722
1866330Sjc25722 return (DDI_SUCCESS);
1876330Sjc25722 }
1886330Sjc25722
1896330Sjc25722 int
_info(struct modinfo * modinfop)1906330Sjc25722 _info(struct modinfo *modinfop)
1916330Sjc25722 {
1926330Sjc25722 return (mod_info(&modlinkage, modinfop));
1936330Sjc25722 }
1946330Sjc25722
1956330Sjc25722 /*ARGSUSED*/
1966330Sjc25722 static int
mem_cache_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** result)1976330Sjc25722 mem_cache_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
1986330Sjc25722 {
1996330Sjc25722 int inst;
2006330Sjc25722 int retval = DDI_SUCCESS;
2016330Sjc25722 struct mem_cache_softc *softc;
2026330Sjc25722
2036330Sjc25722 inst = getminor((dev_t)arg);
2046330Sjc25722
2056330Sjc25722 switch (cmd) {
2066330Sjc25722 case DDI_INFO_DEVT2DEVINFO:
2076330Sjc25722 if ((softc = getsoftc(inst)) == NULL) {
2086330Sjc25722 *result = (void *)NULL;
2096330Sjc25722 retval = DDI_FAILURE;
2106330Sjc25722 } else
2116330Sjc25722 *result = (void *)softc->dip;
2126330Sjc25722 break;
2136330Sjc25722
2146330Sjc25722 case DDI_INFO_DEVT2INSTANCE:
2156330Sjc25722 *result = (void *)((uintptr_t)inst);
2166330Sjc25722 break;
2176330Sjc25722
2186330Sjc25722 default:
2196330Sjc25722 retval = DDI_FAILURE;
2206330Sjc25722 }
2216330Sjc25722
2226330Sjc25722 return (retval);
2236330Sjc25722 }
2246330Sjc25722
2256330Sjc25722 static int
mem_cache_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)2266330Sjc25722 mem_cache_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2276330Sjc25722 {
2286330Sjc25722 int inst;
2296330Sjc25722 struct mem_cache_softc *softc = NULL;
2306330Sjc25722 char name[80];
2316330Sjc25722
2326330Sjc25722 switch (cmd) {
2336330Sjc25722 case DDI_ATTACH:
2346330Sjc25722 inst = ddi_get_instance(dip);
2356330Sjc25722 if (inst >= MAX_MEM_CACHE_INSTANCES) {
2366330Sjc25722 cmn_err(CE_WARN, "attach failed, too many instances\n");
2376330Sjc25722 return (DDI_FAILURE);
2386330Sjc25722 }
2396330Sjc25722 (void) sprintf(name, MEM_CACHE_DRIVER_NAME"%d", inst);
2406330Sjc25722 if (ddi_create_priv_minor_node(dip, name,
2416330Sjc25722 S_IFCHR,
2426330Sjc25722 inst,
2436330Sjc25722 DDI_PSEUDO,
2446330Sjc25722 0, NULL, "all", 0640) ==
2456330Sjc25722 DDI_FAILURE) {
2466330Sjc25722 ddi_remove_minor_node(dip, NULL);
2476330Sjc25722 return (DDI_FAILURE);
2486330Sjc25722 }
2496330Sjc25722
2506330Sjc25722 /* Allocate a soft state structure for this instance */
2516330Sjc25722 if (ddi_soft_state_zalloc(statep, inst) != DDI_SUCCESS) {
2526330Sjc25722 cmn_err(CE_WARN, " ddi_soft_state_zalloc() failed "
2536330Sjc25722 "for inst %d\n", inst);
2546330Sjc25722 goto attach_failed;
2556330Sjc25722 }
2566330Sjc25722
2576330Sjc25722 /* Setup soft state */
2586330Sjc25722 softc = getsoftc(inst);
2596330Sjc25722 softc->dip = dip;
2606330Sjc25722 mutex_init(&softc->mutex, NULL, MUTEX_DRIVER, NULL);
2616330Sjc25722
2626330Sjc25722 /* Create main environmental node */
2636330Sjc25722 ddi_report_dev(dip);
2646330Sjc25722
2656330Sjc25722 return (DDI_SUCCESS);
2666330Sjc25722
2676330Sjc25722 case DDI_RESUME:
2686330Sjc25722 return (DDI_SUCCESS);
2696330Sjc25722
2706330Sjc25722 default:
2716330Sjc25722 return (DDI_FAILURE);
2726330Sjc25722 }
2736330Sjc25722
2746330Sjc25722 attach_failed:
2756330Sjc25722
2766330Sjc25722 /* Free soft state, if allocated. remove minor node if added earlier */
2776330Sjc25722 if (softc)
2786330Sjc25722 ddi_soft_state_free(statep, inst);
2796330Sjc25722
2806330Sjc25722 ddi_remove_minor_node(dip, NULL);
2816330Sjc25722
2826330Sjc25722 return (DDI_FAILURE);
2836330Sjc25722 }
2846330Sjc25722
2856330Sjc25722 static int
mem_cache_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)2866330Sjc25722 mem_cache_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2876330Sjc25722 {
2886330Sjc25722 int inst;
2896330Sjc25722 struct mem_cache_softc *softc;
2906330Sjc25722
2916330Sjc25722 switch (cmd) {
2926330Sjc25722 case DDI_DETACH:
2936330Sjc25722 inst = ddi_get_instance(dip);
2946330Sjc25722 if ((softc = getsoftc(inst)) == NULL)
2956330Sjc25722 return (ENXIO);
2966330Sjc25722
2976330Sjc25722 /* Free the soft state and remove minor node added earlier */
2986330Sjc25722 mutex_destroy(&softc->mutex);
2996330Sjc25722 ddi_soft_state_free(statep, inst);
3006330Sjc25722 ddi_remove_minor_node(dip, NULL);
3016330Sjc25722 return (DDI_SUCCESS);
3026330Sjc25722
3036330Sjc25722 case DDI_SUSPEND:
3046330Sjc25722 return (DDI_SUCCESS);
3056330Sjc25722
3066330Sjc25722 default:
3076330Sjc25722 return (DDI_FAILURE);
3086330Sjc25722 }
3096330Sjc25722 }
3106330Sjc25722
3116330Sjc25722 /*ARGSUSED*/
3126330Sjc25722 static int
mem_cache_open(dev_t * devp,int flag,int otyp,cred_t * credp)3136330Sjc25722 mem_cache_open(dev_t *devp, int flag, int otyp, cred_t *credp)
3146330Sjc25722 {
3156330Sjc25722 int inst = getminor(*devp);
3166330Sjc25722
3176330Sjc25722 return (getsoftc(inst) == NULL ? ENXIO : 0);
3186330Sjc25722 }
3196330Sjc25722
3206330Sjc25722 /*ARGSUSED*/
3216330Sjc25722 static int
mem_cache_close(dev_t dev,int flag,int otyp,cred_t * credp)3226330Sjc25722 mem_cache_close(dev_t dev, int flag, int otyp, cred_t *credp)
3236330Sjc25722 {
3246330Sjc25722 int inst = getminor(dev);
3256330Sjc25722
3266330Sjc25722 return (getsoftc(inst) == NULL ? ENXIO : 0);
3276330Sjc25722 }
3286330Sjc25722
3296330Sjc25722 static char *tstate_to_desc[] = {
3306330Sjc25722 "Invalid", /* 0 */
3316330Sjc25722 "Shared", /* 1 */
3326330Sjc25722 "Exclusive", /* 2 */
3336330Sjc25722 "Owner", /* 3 */
3346330Sjc25722 "Modified", /* 4 */
3356330Sjc25722 "NA", /* 5 */
3366330Sjc25722 "Owner/Shared", /* 6 */
3376330Sjc25722 "Reserved(7)", /* 7 */
3386330Sjc25722 };
3396330Sjc25722
3406330Sjc25722 static char *
tag_state_to_desc(uint8_t tagstate)3416330Sjc25722 tag_state_to_desc(uint8_t tagstate)
3426330Sjc25722 {
3436330Sjc25722 return (tstate_to_desc[tagstate & CH_ECSTATE_MASK]);
3446330Sjc25722 }
3456330Sjc25722
3466330Sjc25722 void
print_l2_tag(uint64_t tag_addr,uint64_t l2_tag)3476330Sjc25722 print_l2_tag(uint64_t tag_addr, uint64_t l2_tag)
3486330Sjc25722 {
3496330Sjc25722 uint64_t l2_subaddr;
3506330Sjc25722 uint8_t l2_state;
3516330Sjc25722
3526330Sjc25722 l2_subaddr = PN_L2TAG_TO_PA(l2_tag);
3536330Sjc25722 l2_subaddr |= (tag_addr & PN_L2_INDEX_MASK);
3546330Sjc25722
3556330Sjc25722 l2_state = (l2_tag & CH_ECSTATE_MASK);
3566330Sjc25722 cmn_err(CE_CONT,
3576330Sjc25722 "PA=0x%08x.%08x E$tag 0x%08x.%08x E$state %s\n",
3586330Sjc25722 PRTF_64_TO_32(l2_subaddr),
3596330Sjc25722 PRTF_64_TO_32(l2_tag),
3606330Sjc25722 tag_state_to_desc(l2_state));
3616330Sjc25722 }
3626330Sjc25722
3636330Sjc25722 void
print_l2cache_line(ch_cpu_logout_t * clop)3646330Sjc25722 print_l2cache_line(ch_cpu_logout_t *clop)
3656330Sjc25722 {
3666330Sjc25722 uint64_t l2_subaddr;
3676330Sjc25722 int i, offset;
3686330Sjc25722 uint8_t way, l2_state;
3696330Sjc25722 ch_ec_data_t *ecp;
3706330Sjc25722
3716330Sjc25722
3726330Sjc25722 for (way = 0; way < PN_CACHE_NWAYS; way++) {
3736330Sjc25722 ecp = &clop->clo_data.chd_l2_data[way];
3746330Sjc25722 l2_subaddr = PN_L2TAG_TO_PA(ecp->ec_tag);
3756330Sjc25722 l2_subaddr |= (ecp->ec_idx & PN_L2_INDEX_MASK);
3766330Sjc25722
3776330Sjc25722 l2_state = (ecp->ec_tag & CH_ECSTATE_MASK);
3786330Sjc25722 cmn_err(CE_CONT,
3796330Sjc25722 "\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n"
3806330Sjc25722 "E$tag 0x%08x.%08x E$state %s",
3816330Sjc25722 way, (uint32_t)ecp->ec_idx, PRTF_64_TO_32(l2_subaddr),
3826330Sjc25722 PRTF_64_TO_32(ecp->ec_tag),
3836330Sjc25722 tag_state_to_desc(l2_state));
3846330Sjc25722 /*
3856330Sjc25722 * Dump out Ecache subblock data captured.
3866330Sjc25722 * For Cheetah, we need to compute the ECC for each 16-byte
3876330Sjc25722 * chunk and compare it with the captured chunk ECC to figure
3886330Sjc25722 * out which chunk is bad.
3896330Sjc25722 */
3906330Sjc25722 for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) {
3916330Sjc25722 ec_data_elm_t *ecdptr;
3926330Sjc25722 uint64_t d_low, d_high;
3936330Sjc25722 uint32_t ecc;
3946330Sjc25722 int l2_data_idx = (i/2);
3956330Sjc25722
3966330Sjc25722 offset = i * 16;
3976330Sjc25722 ecdptr = &clop->clo_data.chd_l2_data[way].ec_data
3986330Sjc25722 [l2_data_idx];
3996330Sjc25722 if ((i & 1) == 0) {
4006330Sjc25722 ecc = (ecdptr->ec_eccd >> 9) & 0x1ff;
4016330Sjc25722 d_high = ecdptr->ec_d8[0];
4026330Sjc25722 d_low = ecdptr->ec_d8[1];
4036330Sjc25722 } else {
4046330Sjc25722 ecc = ecdptr->ec_eccd & 0x1ff;
4056330Sjc25722 d_high = ecdptr->ec_d8[2];
4066330Sjc25722 d_low = ecdptr->ec_d8[3];
4076330Sjc25722 }
4086330Sjc25722
4096330Sjc25722 cmn_err(CE_CONT,
4106330Sjc25722 "\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x"
4116330Sjc25722 " ECC 0x%03x",
4126330Sjc25722 offset, PRTF_64_TO_32(d_high),
4136330Sjc25722 PRTF_64_TO_32(d_low), ecc);
4146330Sjc25722 }
4156330Sjc25722 } /* end of for way loop */
4166330Sjc25722 }
4176330Sjc25722
4186330Sjc25722 void
print_ecache_line(ch_cpu_logout_t * clop)4196330Sjc25722 print_ecache_line(ch_cpu_logout_t *clop)
4206330Sjc25722 {
4216330Sjc25722 uint64_t ec_subaddr;
4226330Sjc25722 int i, offset;
4236330Sjc25722 uint8_t way, ec_state;
4246330Sjc25722 ch_ec_data_t *ecp;
4256330Sjc25722
4266330Sjc25722
4276330Sjc25722 for (way = 0; way < PN_CACHE_NWAYS; way++) {
4286330Sjc25722 ecp = &clop->clo_data.chd_ec_data[way];
4296330Sjc25722 ec_subaddr = PN_L3TAG_TO_PA(ecp->ec_tag);
4306330Sjc25722 ec_subaddr |= (ecp->ec_idx & PN_L3_TAG_RD_MASK);
4316330Sjc25722
4326330Sjc25722 ec_state = (ecp->ec_tag & CH_ECSTATE_MASK);
4336330Sjc25722 cmn_err(CE_CONT,
4346330Sjc25722 "\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n"
4356330Sjc25722 "E$tag 0x%08x.%08x E$state %s",
4366330Sjc25722 way, (uint32_t)ecp->ec_idx, PRTF_64_TO_32(ec_subaddr),
4376330Sjc25722 PRTF_64_TO_32(ecp->ec_tag),
4386330Sjc25722 tag_state_to_desc(ec_state));
4396330Sjc25722 /*
4406330Sjc25722 * Dump out Ecache subblock data captured.
4416330Sjc25722 * For Cheetah, we need to compute the ECC for each 16-byte
4426330Sjc25722 * chunk and compare it with the captured chunk ECC to figure
4436330Sjc25722 * out which chunk is bad.
4446330Sjc25722 */
4456330Sjc25722 for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) {
4466330Sjc25722 ec_data_elm_t *ecdptr;
4476330Sjc25722 uint64_t d_low, d_high;
4486330Sjc25722 uint32_t ecc;
4496330Sjc25722 int ec_data_idx = (i/2);
4506330Sjc25722
4516330Sjc25722 offset = i * 16;
4526330Sjc25722 ecdptr =
4536330Sjc25722 &clop->clo_data.chd_ec_data[way].ec_data
4546330Sjc25722 [ec_data_idx];
4556330Sjc25722 if ((i & 1) == 0) {
4566330Sjc25722 ecc = (ecdptr->ec_eccd >> 9) & 0x1ff;
4576330Sjc25722 d_high = ecdptr->ec_d8[0];
4586330Sjc25722 d_low = ecdptr->ec_d8[1];
4596330Sjc25722 } else {
4606330Sjc25722 ecc = ecdptr->ec_eccd & 0x1ff;
4616330Sjc25722 d_high = ecdptr->ec_d8[2];
4626330Sjc25722 d_low = ecdptr->ec_d8[3];
4636330Sjc25722 }
4646330Sjc25722
4656330Sjc25722 cmn_err(CE_CONT,
4666330Sjc25722 "\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x"
4676330Sjc25722 " ECC 0x%03x",
4686330Sjc25722 offset, PRTF_64_TO_32(d_high),
4696330Sjc25722 PRTF_64_TO_32(d_low), ecc);
4706330Sjc25722 }
4716330Sjc25722 }
4726330Sjc25722 }
4736330Sjc25722
4746330Sjc25722 static boolean_t
tag_addr_collides(uint64_t tag_addr,cache_id_t type,retire_func_t start_of_func,retire_func_t end_of_func)4756330Sjc25722 tag_addr_collides(uint64_t tag_addr, cache_id_t type,
4766330Sjc25722 retire_func_t start_of_func, retire_func_t end_of_func)
4776330Sjc25722 {
4786330Sjc25722 uint64_t start_paddr, end_paddr;
4796330Sjc25722 char *type_str;
4806330Sjc25722
4816330Sjc25722 start_paddr = va_to_pa((void *)start_of_func);
4826330Sjc25722 end_paddr = va_to_pa((void *)end_of_func);
4836330Sjc25722 switch (type) {
4846330Sjc25722 case L2_CACHE_TAG:
4856330Sjc25722 case L2_CACHE_DATA:
4866330Sjc25722 tag_addr &= PN_L2_INDEX_MASK;
4876330Sjc25722 start_paddr &= PN_L2_INDEX_MASK;
4886330Sjc25722 end_paddr &= PN_L2_INDEX_MASK;
4896330Sjc25722 type_str = "L2:";
4906330Sjc25722 break;
4916330Sjc25722 case L3_CACHE_TAG:
4926330Sjc25722 case L3_CACHE_DATA:
4936330Sjc25722 tag_addr &= PN_L3_TAG_RD_MASK;
4946330Sjc25722 start_paddr &= PN_L3_TAG_RD_MASK;
4956330Sjc25722 end_paddr &= PN_L3_TAG_RD_MASK;
4966330Sjc25722 type_str = "L3:";
4976330Sjc25722 break;
4986330Sjc25722 default:
4996330Sjc25722 /*
5006330Sjc25722 * Should never reach here.
5016330Sjc25722 */
5026330Sjc25722 ASSERT(0);
5036330Sjc25722 return (B_FALSE);
5046330Sjc25722 }
5056330Sjc25722 if ((tag_addr > (start_paddr - 0x100)) &&
5066330Sjc25722 (tag_addr < (end_paddr + 0x100))) {
5076330Sjc25722 if (mem_cache_debug & 0x1)
5086330Sjc25722 cmn_err(CE_CONT,
5096330Sjc25722 "%s collision detected tag_addr = 0x%08x"
5106330Sjc25722 " start_paddr = 0x%08x end_paddr = 0x%08x\n",
5116330Sjc25722 type_str, (uint32_t)tag_addr, (uint32_t)start_paddr,
5126330Sjc25722 (uint32_t)end_paddr);
5136330Sjc25722 return (B_TRUE);
5146330Sjc25722 }
5156330Sjc25722 else
5166330Sjc25722 return (B_FALSE);
5176330Sjc25722 }
5186330Sjc25722
5196330Sjc25722 static uint64_t
get_tag_addr(cache_info_t * cache_info)5206330Sjc25722 get_tag_addr(cache_info_t *cache_info)
5216330Sjc25722 {
5226330Sjc25722 uint64_t tag_addr, scratch;
5236330Sjc25722
5246330Sjc25722 switch (cache_info->cache) {
5256330Sjc25722 case L2_CACHE_TAG:
5266330Sjc25722 case L2_CACHE_DATA:
5276330Sjc25722 tag_addr = (uint64_t)(cache_info->index <<
5286330Sjc25722 PN_CACHE_LINE_SHIFT);
5296330Sjc25722 scratch = (uint64_t)(cache_info->way <<
5306330Sjc25722 PN_L2_WAY_SHIFT);
5316330Sjc25722 tag_addr |= scratch;
5326330Sjc25722 tag_addr |= PN_L2_IDX_HW_ECC_EN;
5336330Sjc25722 break;
5346330Sjc25722 case L3_CACHE_TAG:
5356330Sjc25722 case L3_CACHE_DATA:
5366330Sjc25722 tag_addr = (uint64_t)(cache_info->index <<
5376330Sjc25722 PN_CACHE_LINE_SHIFT);
5386330Sjc25722 scratch = (uint64_t)(cache_info->way <<
5396330Sjc25722 PN_L3_WAY_SHIFT);
5406330Sjc25722 tag_addr |= scratch;
5416330Sjc25722 tag_addr |= PN_L3_IDX_HW_ECC_EN;
5426330Sjc25722 break;
5436330Sjc25722 default:
5446330Sjc25722 /*
5456330Sjc25722 * Should never reach here.
5466330Sjc25722 */
5476330Sjc25722 ASSERT(0);
5486330Sjc25722 return (uint64_t)(0);
5496330Sjc25722 }
5506330Sjc25722 return (tag_addr);
5516330Sjc25722 }
5526330Sjc25722
5536330Sjc25722 static int
mem_cache_ioctl_ops(int cmd,int mode,cache_info_t * cache_info)5546330Sjc25722 mem_cache_ioctl_ops(int cmd, int mode, cache_info_t *cache_info)
5556330Sjc25722 {
5566330Sjc25722 int ret_val = 0;
5576330Sjc25722 uint64_t afar, tag_addr;
5586330Sjc25722 ch_cpu_logout_t clop;
5596330Sjc25722 uint64_t Lxcache_tag_data[PN_CACHE_NWAYS];
5606330Sjc25722 int i, retire_retry_count;
5616330Sjc25722 cpu_t *cpu;
5626330Sjc25722 uint64_t tag_data;
5636330Sjc25722 uint8_t state;
5646330Sjc25722
56510784Ssinanallur.balasubramanian@sun.com if (cache_info->way >= PN_CACHE_NWAYS)
56610784Ssinanallur.balasubramanian@sun.com return (EINVAL);
5676330Sjc25722 switch (cache_info->cache) {
5686330Sjc25722 case L2_CACHE_TAG:
5696330Sjc25722 case L2_CACHE_DATA:
5706330Sjc25722 if (cache_info->index >=
5716330Sjc25722 (PN_L2_SET_SIZE/PN_L2_LINESIZE))
5726330Sjc25722 return (EINVAL);
5736330Sjc25722 break;
5746330Sjc25722 case L3_CACHE_TAG:
5756330Sjc25722 case L3_CACHE_DATA:
5766330Sjc25722 if (cache_info->index >=
5776330Sjc25722 (PN_L3_SET_SIZE/PN_L3_LINESIZE))
5786330Sjc25722 return (EINVAL);
5796330Sjc25722 break;
5806330Sjc25722 default:
5816330Sjc25722 return (ENOTSUP);
5826330Sjc25722 }
5836330Sjc25722 /*
5846330Sjc25722 * Check if we have a valid cpu ID and that
5856330Sjc25722 * CPU is ONLINE.
5866330Sjc25722 */
5876330Sjc25722 mutex_enter(&cpu_lock);
5886330Sjc25722 cpu = cpu_get(cache_info->cpu_id);
5896330Sjc25722 if ((cpu == NULL) || (!cpu_is_online(cpu))) {
5906330Sjc25722 mutex_exit(&cpu_lock);
5916330Sjc25722 return (EINVAL);
5926330Sjc25722 }
5936330Sjc25722 mutex_exit(&cpu_lock);
59410784Ssinanallur.balasubramanian@sun.com pattern = 0; /* default value of TAG PA when cacheline is retired. */
5956330Sjc25722 switch (cmd) {
5966330Sjc25722 case MEM_CACHE_RETIRE:
5976330Sjc25722 tag_addr = get_tag_addr(cache_info);
5986330Sjc25722 pattern |= PN_ECSTATE_NA;
5996330Sjc25722 retire_retry_count = 0;
6006330Sjc25722 affinity_set(cache_info->cpu_id);
6016330Sjc25722 switch (cache_info->cache) {
6026330Sjc25722 case L2_CACHE_DATA:
6036330Sjc25722 case L2_CACHE_TAG:
60410784Ssinanallur.balasubramanian@sun.com if ((cache_info->bit & MSB_BIT_MASK) ==
60510784Ssinanallur.balasubramanian@sun.com MSB_BIT_MASK)
60610784Ssinanallur.balasubramanian@sun.com pattern |= PN_L2TAG_PA_MASK;
6076330Sjc25722 retry_l2_retire:
6086330Sjc25722 if (tag_addr_collides(tag_addr,
6096330Sjc25722 cache_info->cache,
6106330Sjc25722 retire_l2_start, retire_l2_end))
6116330Sjc25722 ret_val =
6126330Sjc25722 retire_l2_alternate(
6136330Sjc25722 tag_addr, pattern);
6146330Sjc25722 else
6156330Sjc25722 ret_val = retire_l2(tag_addr,
6166330Sjc25722 pattern);
6176330Sjc25722 if (ret_val == 1) {
6186330Sjc25722 /*
6196330Sjc25722 * cacheline was in retired
6206330Sjc25722 * STATE already.
6216330Sjc25722 * so return success.
6226330Sjc25722 */
6236330Sjc25722 ret_val = 0;
6246330Sjc25722 }
6256330Sjc25722 if (ret_val < 0) {
6266330Sjc25722 cmn_err(CE_WARN,
6276330Sjc25722 "retire_l2() failed. index = 0x%x way %d. Retrying...\n",
6286330Sjc25722 cache_info->index,
6296330Sjc25722 cache_info->way);
6306330Sjc25722 if (retire_retry_count >= 2) {
6316330Sjc25722 retire_failures++;
6326330Sjc25722 affinity_clear();
6336330Sjc25722 return (EIO);
6346330Sjc25722 }
6356330Sjc25722 retire_retry_count++;
6366330Sjc25722 goto retry_l2_retire;
6376330Sjc25722 }
6386330Sjc25722 if (ret_val == 2)
6396330Sjc25722 l2_flush_retries_done++;
6406440Sbala /*
6416440Sbala * We bind ourself to a CPU and send cross trap to
6426440Sbala * ourself. On return from xt_one we can rely on the
6436440Sbala * data in tag_data being filled in. Normally one would
6446440Sbala * do a xt_sync to make sure that the CPU has completed
6456440Sbala * the cross trap call xt_one.
6466440Sbala */
6476330Sjc25722 xt_one(cache_info->cpu_id,
6486330Sjc25722 (xcfunc_t *)(get_l2_tag_tl1),
6496330Sjc25722 tag_addr, (uint64_t)(&tag_data));
6506330Sjc25722 state = tag_data & CH_ECSTATE_MASK;
6516330Sjc25722 if (state != PN_ECSTATE_NA) {
6526330Sjc25722 retire_failures++;
6536330Sjc25722 print_l2_tag(tag_addr,
6546330Sjc25722 tag_data);
6556330Sjc25722 cmn_err(CE_WARN,
6566330Sjc25722 "L2 RETIRE:failed for index 0x%x way %d. Retrying...\n",
6576330Sjc25722 cache_info->index,
6586330Sjc25722 cache_info->way);
6596330Sjc25722 if (retire_retry_count >= 2) {
6606330Sjc25722 retire_failures++;
6616330Sjc25722 affinity_clear();
6626330Sjc25722 return (EIO);
6636330Sjc25722 }
6646330Sjc25722 retire_retry_count++;
6656330Sjc25722 goto retry_l2_retire;
6666330Sjc25722 }
6676330Sjc25722 break;
6686330Sjc25722 case L3_CACHE_TAG:
6696330Sjc25722 case L3_CACHE_DATA:
67010784Ssinanallur.balasubramanian@sun.com if ((cache_info->bit & MSB_BIT_MASK) ==
67110784Ssinanallur.balasubramanian@sun.com MSB_BIT_MASK)
67210784Ssinanallur.balasubramanian@sun.com pattern |= PN_L3TAG_PA_MASK;
6736330Sjc25722 if (tag_addr_collides(tag_addr,
6746330Sjc25722 cache_info->cache,
6756330Sjc25722 retire_l3_start, retire_l3_end))
6766330Sjc25722 ret_val =
6776330Sjc25722 retire_l3_alternate(
6786330Sjc25722 tag_addr, pattern);
6796330Sjc25722 else
6806330Sjc25722 ret_val = retire_l3(tag_addr,
6816330Sjc25722 pattern);
6826330Sjc25722 if (ret_val == 1) {
6836330Sjc25722 /*
6846330Sjc25722 * cacheline was in retired
6856330Sjc25722 * STATE already.
6866330Sjc25722 * so return success.
6876330Sjc25722 */
6886330Sjc25722 ret_val = 0;
6896330Sjc25722 }
6906330Sjc25722 if (ret_val < 0) {
6916330Sjc25722 cmn_err(CE_WARN,
6926330Sjc25722 "retire_l3() failed. ret_val = %d index = 0x%x\n",
6936330Sjc25722 ret_val,
6946330Sjc25722 cache_info->index);
6956330Sjc25722 retire_failures++;
6966330Sjc25722 affinity_clear();
6976330Sjc25722 return (EIO);
6986330Sjc25722 }
6996440Sbala /*
7006440Sbala * We bind ourself to a CPU and send cross trap to
7016440Sbala * ourself. On return from xt_one we can rely on the
7026440Sbala * data in tag_data being filled in. Normally one would
7036440Sbala * do a xt_sync to make sure that the CPU has completed
7046440Sbala * the cross trap call xt_one.
7056440Sbala */
7066330Sjc25722 xt_one(cache_info->cpu_id,
7076330Sjc25722 (xcfunc_t *)(get_l3_tag_tl1),
7086330Sjc25722 tag_addr, (uint64_t)(&tag_data));
7096330Sjc25722 state = tag_data & CH_ECSTATE_MASK;
7106330Sjc25722 if (state != PN_ECSTATE_NA) {
7116330Sjc25722 cmn_err(CE_WARN,
7126330Sjc25722 "L3 RETIRE failed for index 0x%x\n",
7136330Sjc25722 cache_info->index);
7146330Sjc25722 retire_failures++;
7156330Sjc25722 affinity_clear();
7166330Sjc25722 return (EIO);
7176330Sjc25722 }
7186330Sjc25722
7196330Sjc25722 break;
7206330Sjc25722 }
7216330Sjc25722 affinity_clear();
7226330Sjc25722 break;
7236330Sjc25722 case MEM_CACHE_UNRETIRE:
7246330Sjc25722 tag_addr = get_tag_addr(cache_info);
7256330Sjc25722 pattern = PN_ECSTATE_INV;
7266330Sjc25722 affinity_set(cache_info->cpu_id);
7276330Sjc25722 switch (cache_info->cache) {
7286330Sjc25722 case L2_CACHE_DATA:
7296330Sjc25722 case L2_CACHE_TAG:
7306440Sbala /*
7316440Sbala * We bind ourself to a CPU and send cross trap to
7326440Sbala * ourself. On return from xt_one we can rely on the
7336440Sbala * data in tag_data being filled in. Normally one would
7346440Sbala * do a xt_sync to make sure that the CPU has completed
7356440Sbala * the cross trap call xt_one.
7366440Sbala */
7376330Sjc25722 xt_one(cache_info->cpu_id,
7386330Sjc25722 (xcfunc_t *)(get_l2_tag_tl1),
7396330Sjc25722 tag_addr, (uint64_t)(&tag_data));
7406330Sjc25722 state = tag_data & CH_ECSTATE_MASK;
7416330Sjc25722 if (state != PN_ECSTATE_NA) {
7426330Sjc25722 affinity_clear();
7436330Sjc25722 return (EINVAL);
7446330Sjc25722 }
7456330Sjc25722 if (tag_addr_collides(tag_addr,
7466330Sjc25722 cache_info->cache,
7476330Sjc25722 unretire_l2_start, unretire_l2_end))
7486330Sjc25722 ret_val =
7496330Sjc25722 unretire_l2_alternate(
7506330Sjc25722 tag_addr, pattern);
7516330Sjc25722 else
7526330Sjc25722 ret_val =
7536330Sjc25722 unretire_l2(tag_addr,
7546330Sjc25722 pattern);
7556330Sjc25722 if (ret_val != 0) {
7566330Sjc25722 cmn_err(CE_WARN,
7576330Sjc25722 "unretire_l2() failed. ret_val = %d index = 0x%x\n",
7586330Sjc25722 ret_val,
7596330Sjc25722 cache_info->index);
7606330Sjc25722 retire_failures++;
7616330Sjc25722 affinity_clear();
7626330Sjc25722 return (EIO);
7636330Sjc25722 }
7646330Sjc25722 break;
7656330Sjc25722 case L3_CACHE_TAG:
7666330Sjc25722 case L3_CACHE_DATA:
7676440Sbala /*
7686440Sbala * We bind ourself to a CPU and send cross trap to
7696440Sbala * ourself. On return from xt_one we can rely on the
7706440Sbala * data in tag_data being filled in. Normally one would
7716440Sbala * do a xt_sync to make sure that the CPU has completed
7726440Sbala * the cross trap call xt_one.
7736440Sbala */
7746330Sjc25722 xt_one(cache_info->cpu_id,
7756330Sjc25722 (xcfunc_t *)(get_l3_tag_tl1),
7766330Sjc25722 tag_addr, (uint64_t)(&tag_data));
7776330Sjc25722 state = tag_data & CH_ECSTATE_MASK;
7786330Sjc25722 if (state != PN_ECSTATE_NA) {
7796330Sjc25722 affinity_clear();
7806330Sjc25722 return (EINVAL);
7816330Sjc25722 }
7826330Sjc25722 if (tag_addr_collides(tag_addr,
7836330Sjc25722 cache_info->cache,
7846330Sjc25722 unretire_l3_start, unretire_l3_end))
7856330Sjc25722 ret_val =
7866330Sjc25722 unretire_l3_alternate(
7876330Sjc25722 tag_addr, pattern);
7886330Sjc25722 else
7896330Sjc25722 ret_val =
7906330Sjc25722 unretire_l3(tag_addr,
7916330Sjc25722 pattern);
7926330Sjc25722 if (ret_val != 0) {
7936330Sjc25722 cmn_err(CE_WARN,
7946330Sjc25722 "unretire_l3() failed. ret_val = %d index = 0x%x\n",
7956330Sjc25722 ret_val,
7966330Sjc25722 cache_info->index);
7976330Sjc25722 affinity_clear();
7986330Sjc25722 return (EIO);
7996330Sjc25722 }
8006330Sjc25722 break;
8016330Sjc25722 }
8026330Sjc25722 affinity_clear();
8036330Sjc25722 break;
8046330Sjc25722 case MEM_CACHE_ISRETIRED:
8056330Sjc25722 case MEM_CACHE_STATE:
8066330Sjc25722 return (ENOTSUP);
8076330Sjc25722 case MEM_CACHE_READ_TAGS:
8086440Sbala #ifdef DEBUG
8096330Sjc25722 case MEM_CACHE_READ_ERROR_INJECTED_TAGS:
8106440Sbala #endif
8116330Sjc25722 /*
8126330Sjc25722 * Read tag and data for all the ways at a given afar
8136330Sjc25722 */
8146330Sjc25722 afar = (uint64_t)(cache_info->index
8156330Sjc25722 << PN_CACHE_LINE_SHIFT);
81610784Ssinanallur.balasubramanian@sun.com mutex_enter(&cpu_lock);
8176330Sjc25722 affinity_set(cache_info->cpu_id);
81810784Ssinanallur.balasubramanian@sun.com (void) pause_cpus(NULL);
81910784Ssinanallur.balasubramanian@sun.com mutex_exit(&cpu_lock);
8206440Sbala /*
8216440Sbala * We bind ourself to a CPU and send cross trap to
8226440Sbala * ourself. On return from xt_one we can rely on the
8236440Sbala * data in clop being filled in. Normally one would
8246440Sbala * do a xt_sync to make sure that the CPU has completed
8256440Sbala * the cross trap call xt_one.
8266440Sbala */
8276330Sjc25722 xt_one(cache_info->cpu_id,
8286330Sjc25722 (xcfunc_t *)(get_ecache_dtags_tl1),
8296330Sjc25722 afar, (uint64_t)(&clop));
83010784Ssinanallur.balasubramanian@sun.com mutex_enter(&cpu_lock);
83110784Ssinanallur.balasubramanian@sun.com (void) start_cpus();
83210784Ssinanallur.balasubramanian@sun.com mutex_exit(&cpu_lock);
83310784Ssinanallur.balasubramanian@sun.com affinity_clear();
8346330Sjc25722 switch (cache_info->cache) {
8356330Sjc25722 case L2_CACHE_TAG:
8366330Sjc25722 for (i = 0; i < PN_CACHE_NWAYS; i++) {
8376330Sjc25722 Lxcache_tag_data[i] =
8386330Sjc25722 clop.clo_data.chd_l2_data
8396330Sjc25722 [i].ec_tag;
8406330Sjc25722 }
84110784Ssinanallur.balasubramanian@sun.com #ifdef DEBUG
8426330Sjc25722 last_error_injected_bit =
8436330Sjc25722 last_l2tag_error_injected_bit;
8446330Sjc25722 last_error_injected_way =
8456330Sjc25722 last_l2tag_error_injected_way;
84610784Ssinanallur.balasubramanian@sun.com #endif
8476330Sjc25722 break;
8486330Sjc25722 case L3_CACHE_TAG:
8496330Sjc25722 for (i = 0; i < PN_CACHE_NWAYS; i++) {
8506330Sjc25722 Lxcache_tag_data[i] =
8516330Sjc25722 clop.clo_data.chd_ec_data
8526330Sjc25722 [i].ec_tag;
8536330Sjc25722 }
85410784Ssinanallur.balasubramanian@sun.com #ifdef DEBUG
8556330Sjc25722 last_error_injected_bit =
8566330Sjc25722 last_l3tag_error_injected_bit;
8576330Sjc25722 last_error_injected_way =
8586330Sjc25722 last_l3tag_error_injected_way;
85910784Ssinanallur.balasubramanian@sun.com #endif
8606330Sjc25722 break;
8616330Sjc25722 default:
8626330Sjc25722 return (ENOTSUP);
8636330Sjc25722 } /* end if switch(cache) */
8646440Sbala #ifdef DEBUG
86510784Ssinanallur.balasubramanian@sun.com if ((cmd == MEM_CACHE_READ_ERROR_INJECTED_TAGS) &&
86610784Ssinanallur.balasubramanian@sun.com (inject_anonymous_tag_error == 0) &&
86710784Ssinanallur.balasubramanian@sun.com (last_error_injected_way >= 0) &&
86810784Ssinanallur.balasubramanian@sun.com (last_error_injected_way <= 3)) {
8696330Sjc25722 pattern = ((uint64_t)1 <<
8706330Sjc25722 last_error_injected_bit);
8716330Sjc25722 /*
8726330Sjc25722 * If error bit is ECC we need to make sure
8736330Sjc25722 * ECC on all all WAYS are corrupted.
8746330Sjc25722 */
8756330Sjc25722 if ((last_error_injected_bit >= 6) &&
8766330Sjc25722 (last_error_injected_bit <= 14)) {
8776330Sjc25722 for (i = 0; i < PN_CACHE_NWAYS; i++)
8786330Sjc25722 Lxcache_tag_data[i] ^=
8796330Sjc25722 pattern;
8806330Sjc25722 } else
8816330Sjc25722 Lxcache_tag_data
8826330Sjc25722 [last_error_injected_way] ^=
8836330Sjc25722 pattern;
8846330Sjc25722 }
8856440Sbala #endif
8866330Sjc25722 if (ddi_copyout((caddr_t)Lxcache_tag_data,
8876330Sjc25722 (caddr_t)cache_info->datap,
8886330Sjc25722 sizeof (Lxcache_tag_data), mode)
8896330Sjc25722 != DDI_SUCCESS) {
8906330Sjc25722 return (EFAULT);
8916330Sjc25722 }
8926330Sjc25722 break; /* end of READ_TAGS */
8936330Sjc25722 default:
8946330Sjc25722 return (ENOTSUP);
8956330Sjc25722 } /* end if switch(cmd) */
8966330Sjc25722 return (ret_val);
8976330Sjc25722 }
8986330Sjc25722
8996330Sjc25722 /*ARGSUSED*/
9006330Sjc25722 static int
mem_cache_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)9016330Sjc25722 mem_cache_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
9026330Sjc25722 int *rvalp)
9036330Sjc25722 {
9046330Sjc25722 int inst;
9056330Sjc25722 struct mem_cache_softc *softc;
9066330Sjc25722 cache_info_t cache_info;
9076330Sjc25722 cache_info32_t cache_info32;
9086330Sjc25722 int ret_val;
9096440Sbala int is_panther;
9106330Sjc25722
9116330Sjc25722 inst = getminor(dev);
9126330Sjc25722 if ((softc = getsoftc(inst)) == NULL)
9136330Sjc25722 return (ENXIO);
9146330Sjc25722
9156330Sjc25722 mutex_enter(&softc->mutex);
9166330Sjc25722
9176330Sjc25722 #ifdef _MULTI_DATAMODEL
9186330Sjc25722 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
9196330Sjc25722 if (ddi_copyin((cache_info32_t *)arg, &cache_info32,
9206330Sjc25722 sizeof (cache_info32), mode) != DDI_SUCCESS) {
9216330Sjc25722 mutex_exit(&softc->mutex);
9226330Sjc25722 return (EFAULT);
9236330Sjc25722 }
9246330Sjc25722 cache_info.cache = cache_info32.cache;
9256330Sjc25722 cache_info.index = cache_info32.index;
9266330Sjc25722 cache_info.way = cache_info32.way;
9276330Sjc25722 cache_info.cpu_id = cache_info32.cpu_id;
9286330Sjc25722 cache_info.bit = cache_info32.bit;
9296330Sjc25722 cache_info.datap = (void *)((uint64_t)cache_info32.datap);
9306330Sjc25722 } else
9316330Sjc25722 #endif
9326330Sjc25722 if (ddi_copyin((cache_info_t *)arg, &cache_info,
9336330Sjc25722 sizeof (cache_info), mode) != DDI_SUCCESS) {
9346330Sjc25722 mutex_exit(&softc->mutex);
9356330Sjc25722 return (EFAULT);
9366330Sjc25722 }
9377178Scb222892
9389543SChristopher.Baumbauer@Sun.COM if ((cache_info.cpu_id < 0) || (cache_info.cpu_id >= _ncpu)) {
9397178Scb222892 mutex_exit(&softc->mutex);
9407178Scb222892 return (EINVAL);
9417178Scb222892 }
9426440Sbala is_panther = IS_PANTHER(cpunodes[cache_info.cpu_id].implementation);
9436440Sbala if (!is_panther) {
9446440Sbala mutex_exit(&softc->mutex);
9456440Sbala return (ENOTSUP);
9466440Sbala }
9476330Sjc25722 switch (cmd) {
9486330Sjc25722 case MEM_CACHE_RETIRE:
9496330Sjc25722 case MEM_CACHE_UNRETIRE:
9506330Sjc25722 if ((mode & FWRITE) == 0) {
9516330Sjc25722 ret_val = EBADF;
9526330Sjc25722 break;
9536330Sjc25722 }
9546330Sjc25722 /*FALLTHROUGH*/
9556330Sjc25722 case MEM_CACHE_ISRETIRED:
9566330Sjc25722 case MEM_CACHE_STATE:
9576330Sjc25722 case MEM_CACHE_READ_TAGS:
9586440Sbala #ifdef DEBUG
9596330Sjc25722 case MEM_CACHE_READ_ERROR_INJECTED_TAGS:
9606440Sbala #endif
9616330Sjc25722 ret_val = mem_cache_ioctl_ops(cmd, mode, &cache_info);
9626330Sjc25722 break;
9636330Sjc25722 default:
9646330Sjc25722 ret_val = ENOTSUP;
9656330Sjc25722 break;
9666330Sjc25722 }
9676330Sjc25722 mutex_exit(&softc->mutex);
9686330Sjc25722 return (ret_val);
9696330Sjc25722 }
970