16330Sjc25722 /* 26330Sjc25722 * CDDL HEADER START 36330Sjc25722 * 46330Sjc25722 * The contents of this file are subject to the terms of the 56330Sjc25722 * Common Development and Distribution License (the "License"). 66330Sjc25722 * You may not use this file except in compliance with the License. 76330Sjc25722 * 86330Sjc25722 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 96330Sjc25722 * or http://www.opensolaris.org/os/licensing. 106330Sjc25722 * See the License for the specific language governing permissions 116330Sjc25722 * and limitations under the License. 126330Sjc25722 * 136330Sjc25722 * When distributing Covered Code, include this CDDL HEADER in each 146330Sjc25722 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 156330Sjc25722 * If applicable, add the following below this CDDL HEADER, with the 166330Sjc25722 * fields enclosed by brackets "[]" replaced with your own identifying 176330Sjc25722 * information: Portions Copyright [yyyy] [name of copyright owner] 186330Sjc25722 * 196330Sjc25722 * CDDL HEADER END 206330Sjc25722 */ 216330Sjc25722 /* 22*9543SChristopher.Baumbauer@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 236330Sjc25722 * Use is subject to license terms. 246330Sjc25722 */ 256330Sjc25722 266330Sjc25722 276330Sjc25722 /* 286330Sjc25722 * Driver to retire/unretire L2/L3 cachelines on panther 296330Sjc25722 */ 306330Sjc25722 #include <sys/types.h> 316330Sjc25722 #include <sys/types32.h> 326330Sjc25722 #include <sys/time.h> 336330Sjc25722 #include <sys/errno.h> 346330Sjc25722 #include <sys/cmn_err.h> 356330Sjc25722 #include <sys/param.h> 366330Sjc25722 #include <sys/modctl.h> 376330Sjc25722 #include <sys/conf.h> 386330Sjc25722 #include <sys/open.h> 396330Sjc25722 #include <sys/stat.h> 406330Sjc25722 #include <sys/ddi.h> 416330Sjc25722 #include <sys/sunddi.h> 426330Sjc25722 #include <sys/file.h> 436330Sjc25722 #include <sys/cpuvar.h> 446330Sjc25722 #include <sys/x_call.h> 456330Sjc25722 #include <sys/cheetahregs.h> 466330Sjc25722 #include <sys/mem_cache.h> 476330Sjc25722 #include <sys/mem_cache_ioctl.h> 486330Sjc25722 496330Sjc25722 extern int retire_l2(uint64_t, uint64_t); 506330Sjc25722 extern int retire_l2_alternate(uint64_t, uint64_t); 516330Sjc25722 extern int unretire_l2(uint64_t, uint64_t); 526330Sjc25722 extern int unretire_l2_alternate(uint64_t, uint64_t); 536330Sjc25722 extern int retire_l3(uint64_t, uint64_t); 546330Sjc25722 extern int retire_l3_alternate(uint64_t, uint64_t); 556330Sjc25722 extern int unretire_l3(uint64_t, uint64_t); 566330Sjc25722 extern int unretire_l3_alternate(uint64_t, uint64_t); 576330Sjc25722 586330Sjc25722 extern void retire_l2_start(uint64_t, uint64_t); 596330Sjc25722 extern void retire_l2_end(uint64_t, uint64_t); 606330Sjc25722 extern void unretire_l2_start(uint64_t, uint64_t); 616330Sjc25722 extern void unretire_l2_end(uint64_t, uint64_t); 626330Sjc25722 extern void retire_l3_start(uint64_t, uint64_t); 636330Sjc25722 extern void retire_l3_end(uint64_t, uint64_t); 646330Sjc25722 extern void unretire_l3_start(uint64_t, uint64_t); 656330Sjc25722 extern void unretire_l3_end(uint64_t, uint64_t); 666330Sjc25722 676330Sjc25722 extern void get_ecache_dtags_tl1(uint64_t, ch_cpu_logout_t *); 686440Sbala extern void get_l2_tag_tl1(uint64_t, uint64_t); 696440Sbala extern void get_l3_tag_tl1(uint64_t, uint64_t); 706330Sjc25722 716330Sjc25722 726330Sjc25722 /* Macro for putting 64-bit onto stack as two 32-bit ints */ 736330Sjc25722 #define PRTF_64_TO_32(x) (uint32_t)((x)>>32), (uint32_t)(x) 746330Sjc25722 756330Sjc25722 766330Sjc25722 uint_t l2_flush_retries_done = 0; 776330Sjc25722 int mem_cache_debug = 0x0; 786330Sjc25722 uint64_t pattern = 0; 796330Sjc25722 uint32_t retire_failures = 0; 806330Sjc25722 uint32_t last_error_injected_way = 0; 816330Sjc25722 uint8_t last_error_injected_bit = 0; 826330Sjc25722 uint32_t last_l3tag_error_injected_way = 0; 836330Sjc25722 uint8_t last_l3tag_error_injected_bit = 0; 846330Sjc25722 uint32_t last_l2tag_error_injected_way = 0; 856330Sjc25722 uint8_t last_l2tag_error_injected_bit = 0; 866330Sjc25722 uint32_t last_l3data_error_injected_way = 0; 876330Sjc25722 uint8_t last_l3data_error_injected_bit = 0; 886330Sjc25722 uint32_t last_l2data_error_injected_way = 0; 896330Sjc25722 uint8_t last_l2data_error_injected_bit = 0; 906330Sjc25722 916330Sjc25722 /* dev_ops and cb_ops entry point function declarations */ 926330Sjc25722 static int mem_cache_attach(dev_info_t *, ddi_attach_cmd_t); 936330Sjc25722 static int mem_cache_detach(dev_info_t *, ddi_detach_cmd_t); 946330Sjc25722 static int mem_cache_getinfo(dev_info_t *, ddi_info_cmd_t, void *, 956330Sjc25722 void **); 966330Sjc25722 static int mem_cache_open(dev_t *, int, int, cred_t *); 976330Sjc25722 static int mem_cache_close(dev_t, int, int, cred_t *); 986330Sjc25722 static int mem_cache_ioctl_ops(int, int, cache_info_t *); 996330Sjc25722 static int mem_cache_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 1006330Sjc25722 1016330Sjc25722 struct cb_ops mem_cache_cb_ops = { 1026330Sjc25722 mem_cache_open, 1036330Sjc25722 mem_cache_close, 1046330Sjc25722 nodev, 1056330Sjc25722 nodev, 1066330Sjc25722 nodev, /* dump */ 1076330Sjc25722 nodev, 1086330Sjc25722 nodev, 1096330Sjc25722 mem_cache_ioctl, 1106330Sjc25722 nodev, /* devmap */ 1116330Sjc25722 nodev, 1126330Sjc25722 ddi_segmap, /* segmap */ 1136330Sjc25722 nochpoll, 1146330Sjc25722 ddi_prop_op, 1156330Sjc25722 NULL, /* for STREAMS drivers */ 1166330Sjc25722 D_NEW | D_MP /* driver compatibility flag */ 1176330Sjc25722 }; 1186330Sjc25722 1196330Sjc25722 static struct dev_ops mem_cache_dev_ops = { 1206330Sjc25722 DEVO_REV, /* driver build version */ 1216330Sjc25722 0, /* device reference count */ 1226330Sjc25722 mem_cache_getinfo, 1236330Sjc25722 nulldev, 1246330Sjc25722 nulldev, /* probe */ 1256330Sjc25722 mem_cache_attach, 1266330Sjc25722 mem_cache_detach, 1276330Sjc25722 nulldev, /* reset */ 1286330Sjc25722 &mem_cache_cb_ops, 1296330Sjc25722 (struct bus_ops *)NULL, 1307656SSherry.Moore@Sun.COM nulldev, /* power */ 1317656SSherry.Moore@Sun.COM ddi_quiesce_not_needed, /* quiesce */ 1326330Sjc25722 }; 1336330Sjc25722 1346330Sjc25722 /* 1356330Sjc25722 * Soft state 1366330Sjc25722 */ 1376330Sjc25722 struct mem_cache_softc { 1386330Sjc25722 dev_info_t *dip; 1396330Sjc25722 kmutex_t mutex; 1406330Sjc25722 }; 1416330Sjc25722 #define getsoftc(inst) ((struct mem_cache_softc *)ddi_get_soft_state(statep,\ 1426330Sjc25722 (inst))) 1436330Sjc25722 1446330Sjc25722 /* module configuration stuff */ 1456330Sjc25722 static void *statep; 1466330Sjc25722 extern struct mod_ops mod_driverops; 1476330Sjc25722 1486330Sjc25722 static struct modldrv modldrv = { 1496330Sjc25722 &mod_driverops, 1506330Sjc25722 "mem_cache_driver (08/01/30) ", 1516330Sjc25722 &mem_cache_dev_ops 1526330Sjc25722 }; 1536330Sjc25722 1546330Sjc25722 static struct modlinkage modlinkage = { 1556330Sjc25722 MODREV_1, 1566330Sjc25722 &modldrv, 1576330Sjc25722 0 1586330Sjc25722 }; 1596330Sjc25722 160*9543SChristopher.Baumbauer@Sun.COM extern const int _ncpu; /* Pull the kernel's global _ncpu definition */ 161*9543SChristopher.Baumbauer@Sun.COM 1626330Sjc25722 int 1636330Sjc25722 _init(void) 1646330Sjc25722 { 1656330Sjc25722 int e; 1666330Sjc25722 1676330Sjc25722 if (e = ddi_soft_state_init(&statep, sizeof (struct mem_cache_softc), 1686330Sjc25722 MAX_MEM_CACHE_INSTANCES)) { 1696330Sjc25722 return (e); 1706330Sjc25722 } 1716330Sjc25722 1726330Sjc25722 if ((e = mod_install(&modlinkage)) != 0) 1736330Sjc25722 ddi_soft_state_fini(&statep); 1746330Sjc25722 1756330Sjc25722 return (e); 1766330Sjc25722 } 1776330Sjc25722 1786330Sjc25722 int 1796330Sjc25722 _fini(void) 1806330Sjc25722 { 1816330Sjc25722 int e; 1826330Sjc25722 1836330Sjc25722 if ((e = mod_remove(&modlinkage)) != 0) 1846330Sjc25722 return (e); 1856330Sjc25722 1866330Sjc25722 ddi_soft_state_fini(&statep); 1876330Sjc25722 1886330Sjc25722 return (DDI_SUCCESS); 1896330Sjc25722 } 1906330Sjc25722 1916330Sjc25722 int 1926330Sjc25722 _info(struct modinfo *modinfop) 1936330Sjc25722 { 1946330Sjc25722 return (mod_info(&modlinkage, modinfop)); 1956330Sjc25722 } 1966330Sjc25722 1976330Sjc25722 /*ARGSUSED*/ 1986330Sjc25722 static int 1996330Sjc25722 mem_cache_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 2006330Sjc25722 { 2016330Sjc25722 int inst; 2026330Sjc25722 int retval = DDI_SUCCESS; 2036330Sjc25722 struct mem_cache_softc *softc; 2046330Sjc25722 2056330Sjc25722 inst = getminor((dev_t)arg); 2066330Sjc25722 2076330Sjc25722 switch (cmd) { 2086330Sjc25722 case DDI_INFO_DEVT2DEVINFO: 2096330Sjc25722 if ((softc = getsoftc(inst)) == NULL) { 2106330Sjc25722 *result = (void *)NULL; 2116330Sjc25722 retval = DDI_FAILURE; 2126330Sjc25722 } else 2136330Sjc25722 *result = (void *)softc->dip; 2146330Sjc25722 break; 2156330Sjc25722 2166330Sjc25722 case DDI_INFO_DEVT2INSTANCE: 2176330Sjc25722 *result = (void *)((uintptr_t)inst); 2186330Sjc25722 break; 2196330Sjc25722 2206330Sjc25722 default: 2216330Sjc25722 retval = DDI_FAILURE; 2226330Sjc25722 } 2236330Sjc25722 2246330Sjc25722 return (retval); 2256330Sjc25722 } 2266330Sjc25722 2276330Sjc25722 static int 2286330Sjc25722 mem_cache_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 2296330Sjc25722 { 2306330Sjc25722 int inst; 2316330Sjc25722 struct mem_cache_softc *softc = NULL; 2326330Sjc25722 char name[80]; 2336330Sjc25722 2346330Sjc25722 switch (cmd) { 2356330Sjc25722 case DDI_ATTACH: 2366330Sjc25722 inst = ddi_get_instance(dip); 2376330Sjc25722 if (inst >= MAX_MEM_CACHE_INSTANCES) { 2386330Sjc25722 cmn_err(CE_WARN, "attach failed, too many instances\n"); 2396330Sjc25722 return (DDI_FAILURE); 2406330Sjc25722 } 2416330Sjc25722 (void) sprintf(name, MEM_CACHE_DRIVER_NAME"%d", inst); 2426330Sjc25722 if (ddi_create_priv_minor_node(dip, name, 2436330Sjc25722 S_IFCHR, 2446330Sjc25722 inst, 2456330Sjc25722 DDI_PSEUDO, 2466330Sjc25722 0, NULL, "all", 0640) == 2476330Sjc25722 DDI_FAILURE) { 2486330Sjc25722 ddi_remove_minor_node(dip, NULL); 2496330Sjc25722 return (DDI_FAILURE); 2506330Sjc25722 } 2516330Sjc25722 2526330Sjc25722 /* Allocate a soft state structure for this instance */ 2536330Sjc25722 if (ddi_soft_state_zalloc(statep, inst) != DDI_SUCCESS) { 2546330Sjc25722 cmn_err(CE_WARN, " ddi_soft_state_zalloc() failed " 2556330Sjc25722 "for inst %d\n", inst); 2566330Sjc25722 goto attach_failed; 2576330Sjc25722 } 2586330Sjc25722 2596330Sjc25722 /* Setup soft state */ 2606330Sjc25722 softc = getsoftc(inst); 2616330Sjc25722 softc->dip = dip; 2626330Sjc25722 mutex_init(&softc->mutex, NULL, MUTEX_DRIVER, NULL); 2636330Sjc25722 2646330Sjc25722 /* Create main environmental node */ 2656330Sjc25722 ddi_report_dev(dip); 2666330Sjc25722 2676330Sjc25722 return (DDI_SUCCESS); 2686330Sjc25722 2696330Sjc25722 case DDI_RESUME: 2706330Sjc25722 return (DDI_SUCCESS); 2716330Sjc25722 2726330Sjc25722 default: 2736330Sjc25722 return (DDI_FAILURE); 2746330Sjc25722 } 2756330Sjc25722 2766330Sjc25722 attach_failed: 2776330Sjc25722 2786330Sjc25722 /* Free soft state, if allocated. remove minor node if added earlier */ 2796330Sjc25722 if (softc) 2806330Sjc25722 ddi_soft_state_free(statep, inst); 2816330Sjc25722 2826330Sjc25722 ddi_remove_minor_node(dip, NULL); 2836330Sjc25722 2846330Sjc25722 return (DDI_FAILURE); 2856330Sjc25722 } 2866330Sjc25722 2876330Sjc25722 static int 2886330Sjc25722 mem_cache_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 2896330Sjc25722 { 2906330Sjc25722 int inst; 2916330Sjc25722 struct mem_cache_softc *softc; 2926330Sjc25722 2936330Sjc25722 switch (cmd) { 2946330Sjc25722 case DDI_DETACH: 2956330Sjc25722 inst = ddi_get_instance(dip); 2966330Sjc25722 if ((softc = getsoftc(inst)) == NULL) 2976330Sjc25722 return (ENXIO); 2986330Sjc25722 2996330Sjc25722 /* Free the soft state and remove minor node added earlier */ 3006330Sjc25722 mutex_destroy(&softc->mutex); 3016330Sjc25722 ddi_soft_state_free(statep, inst); 3026330Sjc25722 ddi_remove_minor_node(dip, NULL); 3036330Sjc25722 return (DDI_SUCCESS); 3046330Sjc25722 3056330Sjc25722 case DDI_SUSPEND: 3066330Sjc25722 return (DDI_SUCCESS); 3076330Sjc25722 3086330Sjc25722 default: 3096330Sjc25722 return (DDI_FAILURE); 3106330Sjc25722 } 3116330Sjc25722 } 3126330Sjc25722 3136330Sjc25722 /*ARGSUSED*/ 3146330Sjc25722 static int 3156330Sjc25722 mem_cache_open(dev_t *devp, int flag, int otyp, cred_t *credp) 3166330Sjc25722 { 3176330Sjc25722 int inst = getminor(*devp); 3186330Sjc25722 3196330Sjc25722 return (getsoftc(inst) == NULL ? ENXIO : 0); 3206330Sjc25722 } 3216330Sjc25722 3226330Sjc25722 /*ARGSUSED*/ 3236330Sjc25722 static int 3246330Sjc25722 mem_cache_close(dev_t dev, int flag, int otyp, cred_t *credp) 3256330Sjc25722 { 3266330Sjc25722 int inst = getminor(dev); 3276330Sjc25722 3286330Sjc25722 return (getsoftc(inst) == NULL ? ENXIO : 0); 3296330Sjc25722 } 3306330Sjc25722 3316330Sjc25722 static char *tstate_to_desc[] = { 3326330Sjc25722 "Invalid", /* 0 */ 3336330Sjc25722 "Shared", /* 1 */ 3346330Sjc25722 "Exclusive", /* 2 */ 3356330Sjc25722 "Owner", /* 3 */ 3366330Sjc25722 "Modified", /* 4 */ 3376330Sjc25722 "NA", /* 5 */ 3386330Sjc25722 "Owner/Shared", /* 6 */ 3396330Sjc25722 "Reserved(7)", /* 7 */ 3406330Sjc25722 }; 3416330Sjc25722 3426330Sjc25722 static char * 3436330Sjc25722 tag_state_to_desc(uint8_t tagstate) 3446330Sjc25722 { 3456330Sjc25722 return (tstate_to_desc[tagstate & CH_ECSTATE_MASK]); 3466330Sjc25722 } 3476330Sjc25722 3486330Sjc25722 void 3496330Sjc25722 print_l2_tag(uint64_t tag_addr, uint64_t l2_tag) 3506330Sjc25722 { 3516330Sjc25722 uint64_t l2_subaddr; 3526330Sjc25722 uint8_t l2_state; 3536330Sjc25722 3546330Sjc25722 l2_subaddr = PN_L2TAG_TO_PA(l2_tag); 3556330Sjc25722 l2_subaddr |= (tag_addr & PN_L2_INDEX_MASK); 3566330Sjc25722 3576330Sjc25722 l2_state = (l2_tag & CH_ECSTATE_MASK); 3586330Sjc25722 cmn_err(CE_CONT, 3596330Sjc25722 "PA=0x%08x.%08x E$tag 0x%08x.%08x E$state %s\n", 3606330Sjc25722 PRTF_64_TO_32(l2_subaddr), 3616330Sjc25722 PRTF_64_TO_32(l2_tag), 3626330Sjc25722 tag_state_to_desc(l2_state)); 3636330Sjc25722 } 3646330Sjc25722 3656330Sjc25722 void 3666330Sjc25722 print_l2cache_line(ch_cpu_logout_t *clop) 3676330Sjc25722 { 3686330Sjc25722 uint64_t l2_subaddr; 3696330Sjc25722 int i, offset; 3706330Sjc25722 uint8_t way, l2_state; 3716330Sjc25722 ch_ec_data_t *ecp; 3726330Sjc25722 3736330Sjc25722 3746330Sjc25722 for (way = 0; way < PN_CACHE_NWAYS; way++) { 3756330Sjc25722 ecp = &clop->clo_data.chd_l2_data[way]; 3766330Sjc25722 l2_subaddr = PN_L2TAG_TO_PA(ecp->ec_tag); 3776330Sjc25722 l2_subaddr |= (ecp->ec_idx & PN_L2_INDEX_MASK); 3786330Sjc25722 3796330Sjc25722 l2_state = (ecp->ec_tag & CH_ECSTATE_MASK); 3806330Sjc25722 cmn_err(CE_CONT, 3816330Sjc25722 "\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n" 3826330Sjc25722 "E$tag 0x%08x.%08x E$state %s", 3836330Sjc25722 way, (uint32_t)ecp->ec_idx, PRTF_64_TO_32(l2_subaddr), 3846330Sjc25722 PRTF_64_TO_32(ecp->ec_tag), 3856330Sjc25722 tag_state_to_desc(l2_state)); 3866330Sjc25722 /* 3876330Sjc25722 * Dump out Ecache subblock data captured. 3886330Sjc25722 * For Cheetah, we need to compute the ECC for each 16-byte 3896330Sjc25722 * chunk and compare it with the captured chunk ECC to figure 3906330Sjc25722 * out which chunk is bad. 3916330Sjc25722 */ 3926330Sjc25722 for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) { 3936330Sjc25722 ec_data_elm_t *ecdptr; 3946330Sjc25722 uint64_t d_low, d_high; 3956330Sjc25722 uint32_t ecc; 3966330Sjc25722 int l2_data_idx = (i/2); 3976330Sjc25722 3986330Sjc25722 offset = i * 16; 3996330Sjc25722 ecdptr = &clop->clo_data.chd_l2_data[way].ec_data 4006330Sjc25722 [l2_data_idx]; 4016330Sjc25722 if ((i & 1) == 0) { 4026330Sjc25722 ecc = (ecdptr->ec_eccd >> 9) & 0x1ff; 4036330Sjc25722 d_high = ecdptr->ec_d8[0]; 4046330Sjc25722 d_low = ecdptr->ec_d8[1]; 4056330Sjc25722 } else { 4066330Sjc25722 ecc = ecdptr->ec_eccd & 0x1ff; 4076330Sjc25722 d_high = ecdptr->ec_d8[2]; 4086330Sjc25722 d_low = ecdptr->ec_d8[3]; 4096330Sjc25722 } 4106330Sjc25722 4116330Sjc25722 cmn_err(CE_CONT, 4126330Sjc25722 "\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x" 4136330Sjc25722 " ECC 0x%03x", 4146330Sjc25722 offset, PRTF_64_TO_32(d_high), 4156330Sjc25722 PRTF_64_TO_32(d_low), ecc); 4166330Sjc25722 } 4176330Sjc25722 } /* end of for way loop */ 4186330Sjc25722 } 4196330Sjc25722 4206330Sjc25722 void 4216330Sjc25722 print_ecache_line(ch_cpu_logout_t *clop) 4226330Sjc25722 { 4236330Sjc25722 uint64_t ec_subaddr; 4246330Sjc25722 int i, offset; 4256330Sjc25722 uint8_t way, ec_state; 4266330Sjc25722 ch_ec_data_t *ecp; 4276330Sjc25722 4286330Sjc25722 4296330Sjc25722 for (way = 0; way < PN_CACHE_NWAYS; way++) { 4306330Sjc25722 ecp = &clop->clo_data.chd_ec_data[way]; 4316330Sjc25722 ec_subaddr = PN_L3TAG_TO_PA(ecp->ec_tag); 4326330Sjc25722 ec_subaddr |= (ecp->ec_idx & PN_L3_TAG_RD_MASK); 4336330Sjc25722 4346330Sjc25722 ec_state = (ecp->ec_tag & CH_ECSTATE_MASK); 4356330Sjc25722 cmn_err(CE_CONT, 4366330Sjc25722 "\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n" 4376330Sjc25722 "E$tag 0x%08x.%08x E$state %s", 4386330Sjc25722 way, (uint32_t)ecp->ec_idx, PRTF_64_TO_32(ec_subaddr), 4396330Sjc25722 PRTF_64_TO_32(ecp->ec_tag), 4406330Sjc25722 tag_state_to_desc(ec_state)); 4416330Sjc25722 /* 4426330Sjc25722 * Dump out Ecache subblock data captured. 4436330Sjc25722 * For Cheetah, we need to compute the ECC for each 16-byte 4446330Sjc25722 * chunk and compare it with the captured chunk ECC to figure 4456330Sjc25722 * out which chunk is bad. 4466330Sjc25722 */ 4476330Sjc25722 for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) { 4486330Sjc25722 ec_data_elm_t *ecdptr; 4496330Sjc25722 uint64_t d_low, d_high; 4506330Sjc25722 uint32_t ecc; 4516330Sjc25722 int ec_data_idx = (i/2); 4526330Sjc25722 4536330Sjc25722 offset = i * 16; 4546330Sjc25722 ecdptr = 4556330Sjc25722 &clop->clo_data.chd_ec_data[way].ec_data 4566330Sjc25722 [ec_data_idx]; 4576330Sjc25722 if ((i & 1) == 0) { 4586330Sjc25722 ecc = (ecdptr->ec_eccd >> 9) & 0x1ff; 4596330Sjc25722 d_high = ecdptr->ec_d8[0]; 4606330Sjc25722 d_low = ecdptr->ec_d8[1]; 4616330Sjc25722 } else { 4626330Sjc25722 ecc = ecdptr->ec_eccd & 0x1ff; 4636330Sjc25722 d_high = ecdptr->ec_d8[2]; 4646330Sjc25722 d_low = ecdptr->ec_d8[3]; 4656330Sjc25722 } 4666330Sjc25722 4676330Sjc25722 cmn_err(CE_CONT, 4686330Sjc25722 "\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x" 4696330Sjc25722 " ECC 0x%03x", 4706330Sjc25722 offset, PRTF_64_TO_32(d_high), 4716330Sjc25722 PRTF_64_TO_32(d_low), ecc); 4726330Sjc25722 } 4736330Sjc25722 } 4746330Sjc25722 } 4756330Sjc25722 4766330Sjc25722 static boolean_t 4776330Sjc25722 tag_addr_collides(uint64_t tag_addr, cache_id_t type, 4786330Sjc25722 retire_func_t start_of_func, retire_func_t end_of_func) 4796330Sjc25722 { 4806330Sjc25722 uint64_t start_paddr, end_paddr; 4816330Sjc25722 char *type_str; 4826330Sjc25722 4836330Sjc25722 start_paddr = va_to_pa((void *)start_of_func); 4846330Sjc25722 end_paddr = va_to_pa((void *)end_of_func); 4856330Sjc25722 switch (type) { 4866330Sjc25722 case L2_CACHE_TAG: 4876330Sjc25722 case L2_CACHE_DATA: 4886330Sjc25722 tag_addr &= PN_L2_INDEX_MASK; 4896330Sjc25722 start_paddr &= PN_L2_INDEX_MASK; 4906330Sjc25722 end_paddr &= PN_L2_INDEX_MASK; 4916330Sjc25722 type_str = "L2:"; 4926330Sjc25722 break; 4936330Sjc25722 case L3_CACHE_TAG: 4946330Sjc25722 case L3_CACHE_DATA: 4956330Sjc25722 tag_addr &= PN_L3_TAG_RD_MASK; 4966330Sjc25722 start_paddr &= PN_L3_TAG_RD_MASK; 4976330Sjc25722 end_paddr &= PN_L3_TAG_RD_MASK; 4986330Sjc25722 type_str = "L3:"; 4996330Sjc25722 break; 5006330Sjc25722 default: 5016330Sjc25722 /* 5026330Sjc25722 * Should never reach here. 5036330Sjc25722 */ 5046330Sjc25722 ASSERT(0); 5056330Sjc25722 return (B_FALSE); 5066330Sjc25722 } 5076330Sjc25722 if ((tag_addr > (start_paddr - 0x100)) && 5086330Sjc25722 (tag_addr < (end_paddr + 0x100))) { 5096330Sjc25722 if (mem_cache_debug & 0x1) 5106330Sjc25722 cmn_err(CE_CONT, 5116330Sjc25722 "%s collision detected tag_addr = 0x%08x" 5126330Sjc25722 " start_paddr = 0x%08x end_paddr = 0x%08x\n", 5136330Sjc25722 type_str, (uint32_t)tag_addr, (uint32_t)start_paddr, 5146330Sjc25722 (uint32_t)end_paddr); 5156330Sjc25722 return (B_TRUE); 5166330Sjc25722 } 5176330Sjc25722 else 5186330Sjc25722 return (B_FALSE); 5196330Sjc25722 } 5206330Sjc25722 5216330Sjc25722 static uint64_t 5226330Sjc25722 get_tag_addr(cache_info_t *cache_info) 5236330Sjc25722 { 5246330Sjc25722 uint64_t tag_addr, scratch; 5256330Sjc25722 5266330Sjc25722 switch (cache_info->cache) { 5276330Sjc25722 case L2_CACHE_TAG: 5286330Sjc25722 case L2_CACHE_DATA: 5296330Sjc25722 tag_addr = (uint64_t)(cache_info->index << 5306330Sjc25722 PN_CACHE_LINE_SHIFT); 5316330Sjc25722 scratch = (uint64_t)(cache_info->way << 5326330Sjc25722 PN_L2_WAY_SHIFT); 5336330Sjc25722 tag_addr |= scratch; 5346330Sjc25722 tag_addr |= PN_L2_IDX_HW_ECC_EN; 5356330Sjc25722 break; 5366330Sjc25722 case L3_CACHE_TAG: 5376330Sjc25722 case L3_CACHE_DATA: 5386330Sjc25722 tag_addr = (uint64_t)(cache_info->index << 5396330Sjc25722 PN_CACHE_LINE_SHIFT); 5406330Sjc25722 scratch = (uint64_t)(cache_info->way << 5416330Sjc25722 PN_L3_WAY_SHIFT); 5426330Sjc25722 tag_addr |= scratch; 5436330Sjc25722 tag_addr |= PN_L3_IDX_HW_ECC_EN; 5446330Sjc25722 break; 5456330Sjc25722 default: 5466330Sjc25722 /* 5476330Sjc25722 * Should never reach here. 5486330Sjc25722 */ 5496330Sjc25722 ASSERT(0); 5506330Sjc25722 return (uint64_t)(0); 5516330Sjc25722 } 5526330Sjc25722 return (tag_addr); 5536330Sjc25722 } 5546330Sjc25722 5556330Sjc25722 static int 5566330Sjc25722 mem_cache_ioctl_ops(int cmd, int mode, cache_info_t *cache_info) 5576330Sjc25722 { 5586330Sjc25722 int ret_val = 0; 5596330Sjc25722 uint64_t afar, tag_addr; 5606330Sjc25722 ch_cpu_logout_t clop; 5616330Sjc25722 uint64_t Lxcache_tag_data[PN_CACHE_NWAYS]; 5626330Sjc25722 int i, retire_retry_count; 5636330Sjc25722 cpu_t *cpu; 5646330Sjc25722 uint64_t tag_data; 5656330Sjc25722 uint8_t state; 5666330Sjc25722 5676330Sjc25722 switch (cache_info->cache) { 5686330Sjc25722 case L2_CACHE_TAG: 5696330Sjc25722 case L2_CACHE_DATA: 5706330Sjc25722 if (cache_info->way >= PN_CACHE_NWAYS) 5716330Sjc25722 return (EINVAL); 5726330Sjc25722 if (cache_info->index >= 5736330Sjc25722 (PN_L2_SET_SIZE/PN_L2_LINESIZE)) 5746330Sjc25722 return (EINVAL); 5756330Sjc25722 break; 5766330Sjc25722 case L3_CACHE_TAG: 5776330Sjc25722 case L3_CACHE_DATA: 5786330Sjc25722 if (cache_info->way >= PN_CACHE_NWAYS) 5796330Sjc25722 return (EINVAL); 5806330Sjc25722 if (cache_info->index >= 5816330Sjc25722 (PN_L3_SET_SIZE/PN_L3_LINESIZE)) 5826330Sjc25722 return (EINVAL); 5836330Sjc25722 break; 5846330Sjc25722 default: 5856330Sjc25722 return (ENOTSUP); 5866330Sjc25722 } 5876330Sjc25722 /* 5886330Sjc25722 * Check if we have a valid cpu ID and that 5896330Sjc25722 * CPU is ONLINE. 5906330Sjc25722 */ 5916330Sjc25722 mutex_enter(&cpu_lock); 5926330Sjc25722 cpu = cpu_get(cache_info->cpu_id); 5936330Sjc25722 if ((cpu == NULL) || (!cpu_is_online(cpu))) { 5946330Sjc25722 mutex_exit(&cpu_lock); 5956330Sjc25722 return (EINVAL); 5966330Sjc25722 } 5976330Sjc25722 mutex_exit(&cpu_lock); 5986330Sjc25722 switch (cmd) { 5996330Sjc25722 case MEM_CACHE_RETIRE: 6006330Sjc25722 if ((cache_info->bit & MSB_BIT_MASK) == 6016330Sjc25722 MSB_BIT_MASK) { 6026330Sjc25722 pattern = ((uint64_t)1 << 6036330Sjc25722 (cache_info->bit & TAG_BIT_MASK)); 6046330Sjc25722 } else { 6056330Sjc25722 pattern = 0; 6066330Sjc25722 } 6076330Sjc25722 tag_addr = get_tag_addr(cache_info); 6086330Sjc25722 pattern |= PN_ECSTATE_NA; 6096330Sjc25722 retire_retry_count = 0; 6106330Sjc25722 affinity_set(cache_info->cpu_id); 6116330Sjc25722 switch (cache_info->cache) { 6126330Sjc25722 case L2_CACHE_DATA: 6136330Sjc25722 case L2_CACHE_TAG: 6146330Sjc25722 retry_l2_retire: 6156330Sjc25722 if (tag_addr_collides(tag_addr, 6166330Sjc25722 cache_info->cache, 6176330Sjc25722 retire_l2_start, retire_l2_end)) 6186330Sjc25722 ret_val = 6196330Sjc25722 retire_l2_alternate( 6206330Sjc25722 tag_addr, pattern); 6216330Sjc25722 else 6226330Sjc25722 ret_val = retire_l2(tag_addr, 6236330Sjc25722 pattern); 6246330Sjc25722 if (ret_val == 1) { 6256330Sjc25722 /* 6266330Sjc25722 * cacheline was in retired 6276330Sjc25722 * STATE already. 6286330Sjc25722 * so return success. 6296330Sjc25722 */ 6306330Sjc25722 ret_val = 0; 6316330Sjc25722 } 6326330Sjc25722 if (ret_val < 0) { 6336330Sjc25722 cmn_err(CE_WARN, 6346330Sjc25722 "retire_l2() failed. index = 0x%x way %d. Retrying...\n", 6356330Sjc25722 cache_info->index, 6366330Sjc25722 cache_info->way); 6376330Sjc25722 if (retire_retry_count >= 2) { 6386330Sjc25722 retire_failures++; 6396330Sjc25722 affinity_clear(); 6406330Sjc25722 return (EIO); 6416330Sjc25722 } 6426330Sjc25722 retire_retry_count++; 6436330Sjc25722 goto retry_l2_retire; 6446330Sjc25722 } 6456330Sjc25722 if (ret_val == 2) 6466330Sjc25722 l2_flush_retries_done++; 6476440Sbala /* 6486440Sbala * We bind ourself to a CPU and send cross trap to 6496440Sbala * ourself. On return from xt_one we can rely on the 6506440Sbala * data in tag_data being filled in. Normally one would 6516440Sbala * do a xt_sync to make sure that the CPU has completed 6526440Sbala * the cross trap call xt_one. 6536440Sbala */ 6546330Sjc25722 xt_one(cache_info->cpu_id, 6556330Sjc25722 (xcfunc_t *)(get_l2_tag_tl1), 6566330Sjc25722 tag_addr, (uint64_t)(&tag_data)); 6576330Sjc25722 state = tag_data & CH_ECSTATE_MASK; 6586330Sjc25722 if (state != PN_ECSTATE_NA) { 6596330Sjc25722 retire_failures++; 6606330Sjc25722 print_l2_tag(tag_addr, 6616330Sjc25722 tag_data); 6626330Sjc25722 cmn_err(CE_WARN, 6636330Sjc25722 "L2 RETIRE:failed for index 0x%x way %d. Retrying...\n", 6646330Sjc25722 cache_info->index, 6656330Sjc25722 cache_info->way); 6666330Sjc25722 if (retire_retry_count >= 2) { 6676330Sjc25722 retire_failures++; 6686330Sjc25722 affinity_clear(); 6696330Sjc25722 return (EIO); 6706330Sjc25722 } 6716330Sjc25722 retire_retry_count++; 6726330Sjc25722 goto retry_l2_retire; 6736330Sjc25722 } 6746330Sjc25722 break; 6756330Sjc25722 case L3_CACHE_TAG: 6766330Sjc25722 case L3_CACHE_DATA: 6776330Sjc25722 if (tag_addr_collides(tag_addr, 6786330Sjc25722 cache_info->cache, 6796330Sjc25722 retire_l3_start, retire_l3_end)) 6806330Sjc25722 ret_val = 6816330Sjc25722 retire_l3_alternate( 6826330Sjc25722 tag_addr, pattern); 6836330Sjc25722 else 6846330Sjc25722 ret_val = retire_l3(tag_addr, 6856330Sjc25722 pattern); 6866330Sjc25722 if (ret_val == 1) { 6876330Sjc25722 /* 6886330Sjc25722 * cacheline was in retired 6896330Sjc25722 * STATE already. 6906330Sjc25722 * so return success. 6916330Sjc25722 */ 6926330Sjc25722 ret_val = 0; 6936330Sjc25722 } 6946330Sjc25722 if (ret_val < 0) { 6956330Sjc25722 cmn_err(CE_WARN, 6966330Sjc25722 "retire_l3() failed. ret_val = %d index = 0x%x\n", 6976330Sjc25722 ret_val, 6986330Sjc25722 cache_info->index); 6996330Sjc25722 retire_failures++; 7006330Sjc25722 affinity_clear(); 7016330Sjc25722 return (EIO); 7026330Sjc25722 } 7036440Sbala /* 7046440Sbala * We bind ourself to a CPU and send cross trap to 7056440Sbala * ourself. On return from xt_one we can rely on the 7066440Sbala * data in tag_data being filled in. Normally one would 7076440Sbala * do a xt_sync to make sure that the CPU has completed 7086440Sbala * the cross trap call xt_one. 7096440Sbala */ 7106330Sjc25722 xt_one(cache_info->cpu_id, 7116330Sjc25722 (xcfunc_t *)(get_l3_tag_tl1), 7126330Sjc25722 tag_addr, (uint64_t)(&tag_data)); 7136330Sjc25722 state = tag_data & CH_ECSTATE_MASK; 7146330Sjc25722 if (state != PN_ECSTATE_NA) { 7156330Sjc25722 cmn_err(CE_WARN, 7166330Sjc25722 "L3 RETIRE failed for index 0x%x\n", 7176330Sjc25722 cache_info->index); 7186330Sjc25722 retire_failures++; 7196330Sjc25722 affinity_clear(); 7206330Sjc25722 return (EIO); 7216330Sjc25722 } 7226330Sjc25722 7236330Sjc25722 break; 7246330Sjc25722 } 7256330Sjc25722 affinity_clear(); 7266330Sjc25722 break; 7276330Sjc25722 case MEM_CACHE_UNRETIRE: 7286330Sjc25722 tag_addr = get_tag_addr(cache_info); 7296330Sjc25722 pattern = PN_ECSTATE_INV; 7306330Sjc25722 affinity_set(cache_info->cpu_id); 7316330Sjc25722 switch (cache_info->cache) { 7326330Sjc25722 case L2_CACHE_DATA: 7336330Sjc25722 case L2_CACHE_TAG: 7346330Sjc25722 /* 7356330Sjc25722 * Check if the index/way is in NA state 7366330Sjc25722 */ 7376440Sbala /* 7386440Sbala * We bind ourself to a CPU and send cross trap to 7396440Sbala * ourself. On return from xt_one we can rely on the 7406440Sbala * data in tag_data being filled in. Normally one would 7416440Sbala * do a xt_sync to make sure that the CPU has completed 7426440Sbala * the cross trap call xt_one. 7436440Sbala */ 7446330Sjc25722 xt_one(cache_info->cpu_id, 7456330Sjc25722 (xcfunc_t *)(get_l2_tag_tl1), 7466330Sjc25722 tag_addr, (uint64_t)(&tag_data)); 7476330Sjc25722 state = tag_data & CH_ECSTATE_MASK; 7486330Sjc25722 if (state != PN_ECSTATE_NA) { 7496330Sjc25722 affinity_clear(); 7506330Sjc25722 return (EINVAL); 7516330Sjc25722 } 7526330Sjc25722 if (tag_addr_collides(tag_addr, 7536330Sjc25722 cache_info->cache, 7546330Sjc25722 unretire_l2_start, unretire_l2_end)) 7556330Sjc25722 ret_val = 7566330Sjc25722 unretire_l2_alternate( 7576330Sjc25722 tag_addr, pattern); 7586330Sjc25722 else 7596330Sjc25722 ret_val = 7606330Sjc25722 unretire_l2(tag_addr, 7616330Sjc25722 pattern); 7626330Sjc25722 if (ret_val != 0) { 7636330Sjc25722 cmn_err(CE_WARN, 7646330Sjc25722 "unretire_l2() failed. ret_val = %d index = 0x%x\n", 7656330Sjc25722 ret_val, 7666330Sjc25722 cache_info->index); 7676330Sjc25722 retire_failures++; 7686330Sjc25722 affinity_clear(); 7696330Sjc25722 return (EIO); 7706330Sjc25722 } 7716330Sjc25722 break; 7726330Sjc25722 case L3_CACHE_TAG: 7736330Sjc25722 case L3_CACHE_DATA: 7746330Sjc25722 /* 7756330Sjc25722 * Check if the index/way is in NA state 7766330Sjc25722 */ 7776440Sbala /* 7786440Sbala * We bind ourself to a CPU and send cross trap to 7796440Sbala * ourself. On return from xt_one we can rely on the 7806440Sbala * data in tag_data being filled in. Normally one would 7816440Sbala * do a xt_sync to make sure that the CPU has completed 7826440Sbala * the cross trap call xt_one. 7836440Sbala */ 7846330Sjc25722 xt_one(cache_info->cpu_id, 7856330Sjc25722 (xcfunc_t *)(get_l3_tag_tl1), 7866330Sjc25722 tag_addr, (uint64_t)(&tag_data)); 7876330Sjc25722 state = tag_data & CH_ECSTATE_MASK; 7886330Sjc25722 if (state != PN_ECSTATE_NA) { 7896330Sjc25722 affinity_clear(); 7906330Sjc25722 return (EINVAL); 7916330Sjc25722 } 7926330Sjc25722 if (tag_addr_collides(tag_addr, 7936330Sjc25722 cache_info->cache, 7946330Sjc25722 unretire_l3_start, unretire_l3_end)) 7956330Sjc25722 ret_val = 7966330Sjc25722 unretire_l3_alternate( 7976330Sjc25722 tag_addr, pattern); 7986330Sjc25722 else 7996330Sjc25722 ret_val = 8006330Sjc25722 unretire_l3(tag_addr, 8016330Sjc25722 pattern); 8026330Sjc25722 if (ret_val != 0) { 8036330Sjc25722 cmn_err(CE_WARN, 8046330Sjc25722 "unretire_l3() failed. ret_val = %d index = 0x%x\n", 8056330Sjc25722 ret_val, 8066330Sjc25722 cache_info->index); 8076330Sjc25722 affinity_clear(); 8086330Sjc25722 return (EIO); 8096330Sjc25722 } 8106330Sjc25722 break; 8116330Sjc25722 } 8126330Sjc25722 affinity_clear(); 8136330Sjc25722 break; 8146330Sjc25722 case MEM_CACHE_ISRETIRED: 8156330Sjc25722 case MEM_CACHE_STATE: 8166330Sjc25722 return (ENOTSUP); 8176330Sjc25722 case MEM_CACHE_READ_TAGS: 8186440Sbala #ifdef DEBUG 8196330Sjc25722 case MEM_CACHE_READ_ERROR_INJECTED_TAGS: 8206440Sbala #endif 8216330Sjc25722 /* 8226330Sjc25722 * Read tag and data for all the ways at a given afar 8236330Sjc25722 */ 8246330Sjc25722 afar = (uint64_t)(cache_info->index 8256330Sjc25722 << PN_CACHE_LINE_SHIFT); 8266330Sjc25722 affinity_set(cache_info->cpu_id); 8276440Sbala /* 8286440Sbala * We bind ourself to a CPU and send cross trap to 8296440Sbala * ourself. On return from xt_one we can rely on the 8306440Sbala * data in clop being filled in. Normally one would 8316440Sbala * do a xt_sync to make sure that the CPU has completed 8326440Sbala * the cross trap call xt_one. 8336440Sbala */ 8346330Sjc25722 xt_one(cache_info->cpu_id, 8356330Sjc25722 (xcfunc_t *)(get_ecache_dtags_tl1), 8366330Sjc25722 afar, (uint64_t)(&clop)); 8376330Sjc25722 switch (cache_info->cache) { 8386330Sjc25722 case L2_CACHE_TAG: 8396330Sjc25722 for (i = 0; i < PN_CACHE_NWAYS; i++) { 8406330Sjc25722 Lxcache_tag_data[i] = 8416330Sjc25722 clop.clo_data.chd_l2_data 8426330Sjc25722 [i].ec_tag; 8436330Sjc25722 } 8446330Sjc25722 last_error_injected_bit = 8456330Sjc25722 last_l2tag_error_injected_bit; 8466330Sjc25722 last_error_injected_way = 8476330Sjc25722 last_l2tag_error_injected_way; 8486330Sjc25722 break; 8496330Sjc25722 case L3_CACHE_TAG: 8506330Sjc25722 for (i = 0; i < PN_CACHE_NWAYS; i++) { 8516330Sjc25722 Lxcache_tag_data[i] = 8526330Sjc25722 clop.clo_data.chd_ec_data 8536330Sjc25722 [i].ec_tag; 8546330Sjc25722 } 8556330Sjc25722 last_error_injected_bit = 8566330Sjc25722 last_l3tag_error_injected_bit; 8576330Sjc25722 last_error_injected_way = 8586330Sjc25722 last_l3tag_error_injected_way; 8596330Sjc25722 break; 8606330Sjc25722 default: 8616330Sjc25722 affinity_clear(); 8626330Sjc25722 return (ENOTSUP); 8636330Sjc25722 } /* end if switch(cache) */ 8646440Sbala #ifdef DEBUG 8656330Sjc25722 if (cmd == MEM_CACHE_READ_ERROR_INJECTED_TAGS) { 8666330Sjc25722 pattern = ((uint64_t)1 << 8676330Sjc25722 last_error_injected_bit); 8686330Sjc25722 /* 8696330Sjc25722 * If error bit is ECC we need to make sure 8706330Sjc25722 * ECC on all all WAYS are corrupted. 8716330Sjc25722 */ 8726330Sjc25722 if ((last_error_injected_bit >= 6) && 8736330Sjc25722 (last_error_injected_bit <= 14)) { 8746330Sjc25722 for (i = 0; i < PN_CACHE_NWAYS; i++) 8756330Sjc25722 Lxcache_tag_data[i] ^= 8766330Sjc25722 pattern; 8776330Sjc25722 } else 8786330Sjc25722 Lxcache_tag_data 8796330Sjc25722 [last_error_injected_way] ^= 8806330Sjc25722 pattern; 8816330Sjc25722 } 8826440Sbala #endif 8836330Sjc25722 if (ddi_copyout((caddr_t)Lxcache_tag_data, 8846330Sjc25722 (caddr_t)cache_info->datap, 8856330Sjc25722 sizeof (Lxcache_tag_data), mode) 8866330Sjc25722 != DDI_SUCCESS) { 8876330Sjc25722 affinity_clear(); 8886330Sjc25722 return (EFAULT); 8896330Sjc25722 } 8906330Sjc25722 affinity_clear(); 8916330Sjc25722 break; /* end of READ_TAGS */ 8926330Sjc25722 default: 8936330Sjc25722 return (ENOTSUP); 8946330Sjc25722 } /* end if switch(cmd) */ 8956330Sjc25722 return (ret_val); 8966330Sjc25722 } 8976330Sjc25722 8986330Sjc25722 /*ARGSUSED*/ 8996330Sjc25722 static int 9006330Sjc25722 mem_cache_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 9016330Sjc25722 int *rvalp) 9026330Sjc25722 { 9036330Sjc25722 int inst; 9046330Sjc25722 struct mem_cache_softc *softc; 9056330Sjc25722 cache_info_t cache_info; 9066330Sjc25722 cache_info32_t cache_info32; 9076330Sjc25722 int ret_val; 9086440Sbala int is_panther; 9096330Sjc25722 9106330Sjc25722 inst = getminor(dev); 9116330Sjc25722 if ((softc = getsoftc(inst)) == NULL) 9126330Sjc25722 return (ENXIO); 9136330Sjc25722 9146330Sjc25722 mutex_enter(&softc->mutex); 9156330Sjc25722 9166330Sjc25722 #ifdef _MULTI_DATAMODEL 9176330Sjc25722 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) { 9186330Sjc25722 if (ddi_copyin((cache_info32_t *)arg, &cache_info32, 9196330Sjc25722 sizeof (cache_info32), mode) != DDI_SUCCESS) { 9206330Sjc25722 mutex_exit(&softc->mutex); 9216330Sjc25722 return (EFAULT); 9226330Sjc25722 } 9236330Sjc25722 cache_info.cache = cache_info32.cache; 9246330Sjc25722 cache_info.index = cache_info32.index; 9256330Sjc25722 cache_info.way = cache_info32.way; 9266330Sjc25722 cache_info.cpu_id = cache_info32.cpu_id; 9276330Sjc25722 cache_info.bit = cache_info32.bit; 9286330Sjc25722 cache_info.datap = (void *)((uint64_t)cache_info32.datap); 9296330Sjc25722 } else 9306330Sjc25722 #endif 9316330Sjc25722 if (ddi_copyin((cache_info_t *)arg, &cache_info, 9326330Sjc25722 sizeof (cache_info), mode) != DDI_SUCCESS) { 9336330Sjc25722 mutex_exit(&softc->mutex); 9346330Sjc25722 return (EFAULT); 9356330Sjc25722 } 9367178Scb222892 937*9543SChristopher.Baumbauer@Sun.COM if ((cache_info.cpu_id < 0) || (cache_info.cpu_id >= _ncpu)) { 9387178Scb222892 mutex_exit(&softc->mutex); 9397178Scb222892 return (EINVAL); 9407178Scb222892 } 9417178Scb222892 9426440Sbala is_panther = IS_PANTHER(cpunodes[cache_info.cpu_id].implementation); 9436440Sbala if (!is_panther) { 9446440Sbala mutex_exit(&softc->mutex); 9456440Sbala return (ENOTSUP); 9466440Sbala } 9476330Sjc25722 switch (cmd) { 9486330Sjc25722 case MEM_CACHE_RETIRE: 9496330Sjc25722 case MEM_CACHE_UNRETIRE: 9506330Sjc25722 if ((mode & FWRITE) == 0) { 9516330Sjc25722 ret_val = EBADF; 9526330Sjc25722 break; 9536330Sjc25722 } 9546330Sjc25722 /*FALLTHROUGH*/ 9556330Sjc25722 case MEM_CACHE_ISRETIRED: 9566330Sjc25722 case MEM_CACHE_STATE: 9576330Sjc25722 case MEM_CACHE_READ_TAGS: 9586440Sbala #ifdef DEBUG 9596330Sjc25722 case MEM_CACHE_READ_ERROR_INJECTED_TAGS: 9606440Sbala #endif 9616330Sjc25722 ret_val = mem_cache_ioctl_ops(cmd, mode, &cache_info); 9626330Sjc25722 break; 9636330Sjc25722 default: 9646330Sjc25722 ret_val = ENOTSUP; 9656330Sjc25722 break; 9666330Sjc25722 } 9676330Sjc25722 mutex_exit(&softc->mutex); 9686330Sjc25722 return (ret_val); 9696330Sjc25722 } 970