1*1708Sstevel /* 2*1708Sstevel * CDDL HEADER START 3*1708Sstevel * 4*1708Sstevel * The contents of this file are subject to the terms of the 5*1708Sstevel * Common Development and Distribution License (the "License"). 6*1708Sstevel * You may not use this file except in compliance with the License. 7*1708Sstevel * 8*1708Sstevel * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9*1708Sstevel * or http://www.opensolaris.org/os/licensing. 10*1708Sstevel * See the License for the specific language governing permissions 11*1708Sstevel * and limitations under the License. 12*1708Sstevel * 13*1708Sstevel * When distributing Covered Code, include this CDDL HEADER in each 14*1708Sstevel * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15*1708Sstevel * If applicable, add the following below this CDDL HEADER, with the 16*1708Sstevel * fields enclosed by brackets "[]" replaced with your own identifying 17*1708Sstevel * information: Portions Copyright [yyyy] [name of copyright owner] 18*1708Sstevel * 19*1708Sstevel * CDDL HEADER END 20*1708Sstevel */ 21*1708Sstevel 22*1708Sstevel /* 23*1708Sstevel * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*1708Sstevel * Use is subject to license terms. 25*1708Sstevel */ 26*1708Sstevel 27*1708Sstevel #pragma ident "%Z%%M% %I% %E% SMI" 28*1708Sstevel 29*1708Sstevel /* 30*1708Sstevel * IOSRAM leaf driver to SBBC nexus driver. This driver is used 31*1708Sstevel * by Starcat Domain SW to read/write from/to the IO sram. 32*1708Sstevel */ 33*1708Sstevel 34*1708Sstevel #include <sys/types.h> 35*1708Sstevel #include <sys/conf.h> 36*1708Sstevel #include <sys/ddi.h> 37*1708Sstevel #include <sys/sunddi.h> 38*1708Sstevel #include <sys/ddi_impldefs.h> 39*1708Sstevel #include <sys/obpdefs.h> 40*1708Sstevel #include <sys/promif.h> 41*1708Sstevel #include <sys/prom_plat.h> 42*1708Sstevel #include <sys/cmn_err.h> 43*1708Sstevel #include <sys/conf.h> /* req. by dev_ops flags MTSAFE etc. */ 44*1708Sstevel #include <sys/modctl.h> /* for modldrv */ 45*1708Sstevel #include <sys/stat.h> /* ddi_create_minor_node S_IFCHR */ 46*1708Sstevel #include <sys/errno.h> 47*1708Sstevel #include <sys/kmem.h> 48*1708Sstevel #include <sys/kstat.h> 49*1708Sstevel #include <sys/debug.h> 50*1708Sstevel 51*1708Sstevel #include <sys/axq.h> 52*1708Sstevel #include <sys/iosramreg.h> 53*1708Sstevel #include <sys/iosramio.h> 54*1708Sstevel #include <sys/iosramvar.h> 55*1708Sstevel 56*1708Sstevel 57*1708Sstevel #if defined(DEBUG) 58*1708Sstevel int iosram_debug = 0; 59*1708Sstevel static void iosram_dprintf(const char *fmt, ...); 60*1708Sstevel #define DPRINTF(level, arg) \ 61*1708Sstevel { if (iosram_debug >= level) iosram_dprintf arg; } 62*1708Sstevel #else /* !DEBUG */ 63*1708Sstevel #define DPRINTF(level, arg) 64*1708Sstevel #endif /* !DEBUG */ 65*1708Sstevel 66*1708Sstevel 67*1708Sstevel /* 68*1708Sstevel * IOSRAM module global state 69*1708Sstevel */ 70*1708Sstevel static void *iosramsoft_statep; /* IOSRAM state pointer */ 71*1708Sstevel static kmutex_t iosram_mutex; /* mutex lock */ 72*1708Sstevel 73*1708Sstevel static iosram_chunk_t *chunks = NULL; /* array of TOC entries */ 74*1708Sstevel static int nchunks = 0; /* # of TOC entries */ 75*1708Sstevel static iosram_chunk_t *iosram_hashtab[IOSRAM_HASHSZ]; /* key hash table */ 76*1708Sstevel 77*1708Sstevel static kcondvar_t iosram_tswitch_wait; /* tunnel switch wait cv */ 78*1708Sstevel static int iosram_tswitch_wakeup = 0; /* flag indicationg one or */ 79*1708Sstevel /* more threads waiting on */ 80*1708Sstevel /* iosram_tswitch_wait cv */ 81*1708Sstevel static int iosram_tswitch_active = 0; /* tunnel switch active flag */ 82*1708Sstevel static int iosram_tswitch_aborted = 0; /* tunnel switch abort flag */ 83*1708Sstevel static clock_t iosram_tswitch_tstamp = 0; /* lbolt of last tswitch end */ 84*1708Sstevel static kcondvar_t iosram_rw_wait; /* read/write wait cv */ 85*1708Sstevel static int iosram_rw_wakeup = 0; /* flag indicationg one or */ 86*1708Sstevel /* more threads waiting on */ 87*1708Sstevel /* iosram_rw_wait cv */ 88*1708Sstevel static int iosram_rw_active = 0; /* # threads accessing IOSRAM */ 89*1708Sstevel #if defined(DEBUG) 90*1708Sstevel static int iosram_rw_active_max = 0; 91*1708Sstevel #endif 92*1708Sstevel 93*1708Sstevel static struct iosramsoft *iosram_new_master = NULL; /* new tunnel target */ 94*1708Sstevel static struct iosramsoft *iosram_master = NULL; /* master tunnel */ 95*1708Sstevel static struct iosramsoft *iosram_instances = NULL; /* list of softstates */ 96*1708Sstevel 97*1708Sstevel static ddi_acc_handle_t iosram_handle = NULL; /* master IOSRAM map handle */ 98*1708Sstevel 99*1708Sstevel static void (*iosram_hdrchange_handler)() = NULL; 100*1708Sstevel 101*1708Sstevel #if IOSRAM_STATS 102*1708Sstevel static struct iosram_stat iosram_stats; /* IOSRAM statistics */ 103*1708Sstevel static void iosram_print_stats(); /* forward declaration */ 104*1708Sstevel #endif /* IOSRAM_STATS */ 105*1708Sstevel 106*1708Sstevel 107*1708Sstevel #if IOSRAM_LOG 108*1708Sstevel kmutex_t iosram_log_mutex; 109*1708Sstevel int iosram_log_level = 1; 110*1708Sstevel int iosram_log_print = 0; /* print log when recorded */ 111*1708Sstevel uint32_t iosram_logseq; 112*1708Sstevel iosram_log_t iosram_logbuf[IOSRAM_MAXLOG]; 113*1708Sstevel static void iosram_print_log(int cnt); /* forward declaration */ 114*1708Sstevel #endif /* IOSRAM_LOG */ 115*1708Sstevel 116*1708Sstevel 117*1708Sstevel /* driver entry point fn definitions */ 118*1708Sstevel static int iosram_open(dev_t *, int, int, cred_t *); 119*1708Sstevel static int iosram_close(dev_t, int, int, cred_t *); 120*1708Sstevel static int iosram_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 121*1708Sstevel 122*1708Sstevel /* configuration entry point fn definitions */ 123*1708Sstevel static int iosram_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 124*1708Sstevel static int iosram_attach(dev_info_t *, ddi_attach_cmd_t); 125*1708Sstevel static int iosram_detach(dev_info_t *, ddi_detach_cmd_t); 126*1708Sstevel 127*1708Sstevel 128*1708Sstevel /* forward declaractions */ 129*1708Sstevel static iosram_chunk_t *iosram_find_chunk(uint32_t key); 130*1708Sstevel static void iosram_set_master(struct iosramsoft *softp); 131*1708Sstevel static int iosram_is_chosen(struct iosramsoft *softp); 132*1708Sstevel static int iosram_tunnel_capable(struct iosramsoft *softp); 133*1708Sstevel static int iosram_read_toc(struct iosramsoft *softp); 134*1708Sstevel static void iosram_init_hashtab(void); 135*1708Sstevel static void iosram_update_addrs(struct iosramsoft *softp); 136*1708Sstevel 137*1708Sstevel static int iosram_setup_map(struct iosramsoft *softp); 138*1708Sstevel static void iosram_remove_map(struct iosramsoft *softp); 139*1708Sstevel static int iosram_add_intr(iosramsoft_t *); 140*1708Sstevel static int iosram_remove_intr(iosramsoft_t *); 141*1708Sstevel 142*1708Sstevel static void iosram_add_instance(struct iosramsoft *softp); 143*1708Sstevel static void iosram_remove_instance(int instance); 144*1708Sstevel static int iosram_switch_tunnel(iosramsoft_t *softp); 145*1708Sstevel static void iosram_abort_tswitch(); 146*1708Sstevel 147*1708Sstevel #if defined(DEBUG) 148*1708Sstevel /* forward declaractions for debugging */ 149*1708Sstevel static int iosram_get_keys(iosram_toc_entry_t *buf, uint32_t *len); 150*1708Sstevel static void iosram_print_cback(); 151*1708Sstevel static void iosram_print_state(int); 152*1708Sstevel static void iosram_print_flags(); 153*1708Sstevel #endif 154*1708Sstevel 155*1708Sstevel 156*1708Sstevel 157*1708Sstevel /* 158*1708Sstevel * cb_ops 159*1708Sstevel */ 160*1708Sstevel static struct cb_ops iosram_cb_ops = { 161*1708Sstevel iosram_open, /* cb_open */ 162*1708Sstevel iosram_close, /* cb_close */ 163*1708Sstevel nodev, /* cb_strategy */ 164*1708Sstevel nodev, /* cb_print */ 165*1708Sstevel nodev, /* cb_dump */ 166*1708Sstevel nodev, /* cb_read */ 167*1708Sstevel nodev, /* cb_write */ 168*1708Sstevel iosram_ioctl, /* cb_ioctl */ 169*1708Sstevel nodev, /* cb_devmap */ 170*1708Sstevel nodev, /* cb_mmap */ 171*1708Sstevel nodev, /* cb_segmap */ 172*1708Sstevel nochpoll, /* cb_chpoll */ 173*1708Sstevel ddi_prop_op, /* cb_prop_op */ 174*1708Sstevel NULL, /* cb_stream */ 175*1708Sstevel (int)(D_NEW | D_MP | D_HOTPLUG) /* cb_flag */ 176*1708Sstevel }; 177*1708Sstevel 178*1708Sstevel /* 179*1708Sstevel * Declare ops vectors for auto configuration. 180*1708Sstevel */ 181*1708Sstevel struct dev_ops iosram_ops = { 182*1708Sstevel DEVO_REV, /* devo_rev */ 183*1708Sstevel 0, /* devo_refcnt */ 184*1708Sstevel iosram_getinfo, /* devo_getinfo */ 185*1708Sstevel nulldev, /* devo_identify */ 186*1708Sstevel nulldev, /* devo_probe */ 187*1708Sstevel iosram_attach, /* devo_attach */ 188*1708Sstevel iosram_detach, /* devo_detach */ 189*1708Sstevel nodev, /* devo_reset */ 190*1708Sstevel &iosram_cb_ops, /* devo_cb_ops */ 191*1708Sstevel (struct bus_ops *)NULL, /* devo_bus_ops */ 192*1708Sstevel nulldev /* devo_power */ 193*1708Sstevel }; 194*1708Sstevel 195*1708Sstevel /* 196*1708Sstevel * Loadable module support. 197*1708Sstevel */ 198*1708Sstevel extern struct mod_ops mod_driverops; 199*1708Sstevel 200*1708Sstevel static struct modldrv iosrammodldrv = { 201*1708Sstevel &mod_driverops, /* type of module - driver */ 202*1708Sstevel "IOSRAM Leaf driver v%I%", 203*1708Sstevel &iosram_ops, 204*1708Sstevel }; 205*1708Sstevel 206*1708Sstevel static struct modlinkage iosrammodlinkage = { 207*1708Sstevel MODREV_1, 208*1708Sstevel &iosrammodldrv, 209*1708Sstevel NULL 210*1708Sstevel }; 211*1708Sstevel 212*1708Sstevel 213*1708Sstevel int 214*1708Sstevel _init(void) 215*1708Sstevel { 216*1708Sstevel int error; 217*1708Sstevel int i; 218*1708Sstevel 219*1708Sstevel mutex_init(&iosram_mutex, NULL, MUTEX_DRIVER, (void *)NULL); 220*1708Sstevel cv_init(&iosram_tswitch_wait, NULL, CV_DRIVER, NULL); 221*1708Sstevel cv_init(&iosram_rw_wait, NULL, CV_DRIVER, NULL); 222*1708Sstevel #if defined(IOSRAM_LOG) 223*1708Sstevel mutex_init(&iosram_log_mutex, NULL, MUTEX_DRIVER, (void *)NULL); 224*1708Sstevel #endif 225*1708Sstevel 226*1708Sstevel DPRINTF(1, ("_init:IOSRAM\n")); 227*1708Sstevel 228*1708Sstevel for (i = 0; i < IOSRAM_HASHSZ; i++) { 229*1708Sstevel iosram_hashtab[i] = NULL; 230*1708Sstevel } 231*1708Sstevel 232*1708Sstevel if ((error = ddi_soft_state_init(&iosramsoft_statep, 233*1708Sstevel sizeof (struct iosramsoft), 1)) != 0) { 234*1708Sstevel goto failed; 235*1708Sstevel } 236*1708Sstevel if ((error = mod_install(&iosrammodlinkage)) != 0) { 237*1708Sstevel ddi_soft_state_fini(&iosramsoft_statep); 238*1708Sstevel goto failed; 239*1708Sstevel } 240*1708Sstevel 241*1708Sstevel IOSRAMLOG(0, "_init:IOSRAM ... error:%d statep:%p\n", 242*1708Sstevel error, iosramsoft_statep, NULL, NULL); 243*1708Sstevel 244*1708Sstevel return (error); 245*1708Sstevel 246*1708Sstevel failed: 247*1708Sstevel cv_destroy(&iosram_tswitch_wait); 248*1708Sstevel cv_destroy(&iosram_rw_wait); 249*1708Sstevel mutex_destroy(&iosram_mutex); 250*1708Sstevel #if defined(IOSRAM_LOG) 251*1708Sstevel mutex_destroy(&iosram_log_mutex); 252*1708Sstevel #endif 253*1708Sstevel IOSRAMLOG(0, "_init:IOSRAM ... error:%d statep:%p\n", 254*1708Sstevel error, iosramsoft_statep, NULL, NULL); 255*1708Sstevel 256*1708Sstevel return (error); 257*1708Sstevel } 258*1708Sstevel 259*1708Sstevel 260*1708Sstevel int 261*1708Sstevel _fini(void) 262*1708Sstevel { 263*1708Sstevel #ifndef DEBUG 264*1708Sstevel return (EBUSY); 265*1708Sstevel #else /* !DEBUG */ 266*1708Sstevel int error; 267*1708Sstevel 268*1708Sstevel if ((error = mod_remove(&iosrammodlinkage)) == 0) { 269*1708Sstevel ddi_soft_state_fini(&iosramsoft_statep); 270*1708Sstevel 271*1708Sstevel cv_destroy(&iosram_tswitch_wait); 272*1708Sstevel cv_destroy(&iosram_rw_wait); 273*1708Sstevel mutex_destroy(&iosram_mutex); 274*1708Sstevel #if defined(IOSRAM_LOG) 275*1708Sstevel mutex_destroy(&iosram_log_mutex); 276*1708Sstevel #endif 277*1708Sstevel } 278*1708Sstevel DPRINTF(1, ("_fini:IOSRAM error:%d\n", error)); 279*1708Sstevel 280*1708Sstevel return (error); 281*1708Sstevel #endif /* !DEBUG */ 282*1708Sstevel } 283*1708Sstevel 284*1708Sstevel 285*1708Sstevel int 286*1708Sstevel _info(struct modinfo *modinfop) 287*1708Sstevel { 288*1708Sstevel return (mod_info(&iosrammodlinkage, modinfop)); 289*1708Sstevel } 290*1708Sstevel 291*1708Sstevel 292*1708Sstevel static int 293*1708Sstevel iosram_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 294*1708Sstevel { 295*1708Sstevel int instance; 296*1708Sstevel int propval; 297*1708Sstevel int length; 298*1708Sstevel char name[32]; 299*1708Sstevel struct iosramsoft *softp; 300*1708Sstevel 301*1708Sstevel instance = ddi_get_instance(dip); 302*1708Sstevel 303*1708Sstevel DPRINTF(1, ("iosram(%d): attach dip:%p\n", instance)); 304*1708Sstevel 305*1708Sstevel IOSRAMLOG(1, "ATTACH: dip:%p instance %d ... start\n", 306*1708Sstevel dip, instance, NULL, NULL); 307*1708Sstevel switch (cmd) { 308*1708Sstevel case DDI_ATTACH: 309*1708Sstevel break; 310*1708Sstevel case DDI_RESUME: 311*1708Sstevel if (!(softp = ddi_get_soft_state(iosramsoft_statep, 312*1708Sstevel instance))) { 313*1708Sstevel return (DDI_FAILURE); 314*1708Sstevel } 315*1708Sstevel mutex_enter(&iosram_mutex); 316*1708Sstevel mutex_enter(&softp->intr_mutex); 317*1708Sstevel if (!softp->suspended) { 318*1708Sstevel mutex_exit(&softp->intr_mutex); 319*1708Sstevel mutex_exit(&iosram_mutex); 320*1708Sstevel return (DDI_FAILURE); 321*1708Sstevel } 322*1708Sstevel softp->suspended = 0; 323*1708Sstevel 324*1708Sstevel /* 325*1708Sstevel * enable SBBC interrupts if SBBC is mapped in 326*1708Sstevel * restore the value saved during detach 327*1708Sstevel */ 328*1708Sstevel if (softp->sbbc_region) { 329*1708Sstevel ddi_put32(softp->sbbc_handle, 330*1708Sstevel &(softp->sbbc_region->int_enable.reg), 331*1708Sstevel softp->int_enable_sav); 332*1708Sstevel } 333*1708Sstevel 334*1708Sstevel /* 335*1708Sstevel * Trigger soft interrupt handler to process any pending 336*1708Sstevel * interrupts. 337*1708Sstevel */ 338*1708Sstevel if (softp->intr_pending && !softp->intr_busy && 339*1708Sstevel (softp->softintr_id != NULL)) { 340*1708Sstevel ddi_trigger_softintr(softp->softintr_id); 341*1708Sstevel } 342*1708Sstevel 343*1708Sstevel mutex_exit(&softp->intr_mutex); 344*1708Sstevel mutex_exit(&iosram_mutex); 345*1708Sstevel 346*1708Sstevel return (DDI_SUCCESS); 347*1708Sstevel 348*1708Sstevel default: 349*1708Sstevel return (DDI_FAILURE); 350*1708Sstevel } 351*1708Sstevel 352*1708Sstevel if (ddi_soft_state_zalloc(iosramsoft_statep, instance) != 0) { 353*1708Sstevel return (DDI_FAILURE); 354*1708Sstevel } 355*1708Sstevel 356*1708Sstevel if ((softp = ddi_get_soft_state(iosramsoft_statep, instance)) == NULL) { 357*1708Sstevel return (DDI_FAILURE); 358*1708Sstevel } 359*1708Sstevel softp->dip = dip; 360*1708Sstevel softp->instance = instance; 361*1708Sstevel softp->sbbc_region = NULL; 362*1708Sstevel 363*1708Sstevel /* 364*1708Sstevel * If this instance is not tunnel capable, we don't attach it. 365*1708Sstevel */ 366*1708Sstevel if (iosram_tunnel_capable(softp) == 0) { 367*1708Sstevel DPRINTF(1, ("iosram(%d): not tunnel_capable\n", instance)); 368*1708Sstevel IOSRAMLOG(1, "ATTACH(%d): not tunnel_capable\n", instance, NULL, 369*1708Sstevel NULL, NULL); 370*1708Sstevel goto attach_fail; 371*1708Sstevel } 372*1708Sstevel 373*1708Sstevel /* 374*1708Sstevel * Need to create an "interrupt-priorities" property to define the PIL 375*1708Sstevel * to be used with the interrupt service routine. 376*1708Sstevel */ 377*1708Sstevel if (ddi_getproplen(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 378*1708Sstevel "interrupt-priorities", &length) == DDI_PROP_NOT_FOUND) { 379*1708Sstevel DPRINTF(1, ("iosram(%d): creating interrupt priority property", 380*1708Sstevel instance)); 381*1708Sstevel propval = IOSRAM_PIL; 382*1708Sstevel if (ddi_prop_create(DDI_DEV_T_NONE, dip, 0, 383*1708Sstevel "interrupt-priorities", (caddr_t)&propval, sizeof (propval)) 384*1708Sstevel != DDI_PROP_SUCCESS) { 385*1708Sstevel cmn_err(CE_WARN, 386*1708Sstevel "iosram_attach: failed to create property"); 387*1708Sstevel goto attach_fail; 388*1708Sstevel } 389*1708Sstevel } 390*1708Sstevel 391*1708Sstevel /* 392*1708Sstevel * Get interrupts cookies and initialize per-instance mutexes 393*1708Sstevel */ 394*1708Sstevel if (ddi_get_iblock_cookie(softp->dip, 0, &softp->real_iblk) 395*1708Sstevel != DDI_SUCCESS) { 396*1708Sstevel IOSRAMLOG(1, "ATTACH(%d): cannot get soft intr cookie\n", 397*1708Sstevel instance, NULL, NULL, NULL); 398*1708Sstevel goto attach_fail; 399*1708Sstevel } 400*1708Sstevel mutex_init(&softp->intr_mutex, NULL, MUTEX_DRIVER, 401*1708Sstevel (void *)softp->real_iblk); 402*1708Sstevel 403*1708Sstevel /* 404*1708Sstevel * Add this instance to the iosram_instances list so that it can be used 405*1708Sstevel * for tunnel in future. 406*1708Sstevel */ 407*1708Sstevel mutex_enter(&iosram_mutex); 408*1708Sstevel softp->state = IOSRAM_STATE_INIT; 409*1708Sstevel iosram_add_instance(softp); 410*1708Sstevel 411*1708Sstevel /* 412*1708Sstevel * If this is the chosen IOSRAM and there is no master IOSRAM yet, then 413*1708Sstevel * let's set this instance as the master. 414*1708Sstevel */ 415*1708Sstevel if (iosram_master == NULL && iosram_is_chosen(softp)) { 416*1708Sstevel iosram_switch_tunnel(softp); 417*1708Sstevel 418*1708Sstevel /* 419*1708Sstevel * XXX Do we need to panic if unable to setup master IOSRAM? 420*1708Sstevel */ 421*1708Sstevel if (iosram_master == NULL) { 422*1708Sstevel cmn_err(CE_WARN, 423*1708Sstevel "iosram(%d): can't setup master tunnel\n", 424*1708Sstevel instance); 425*1708Sstevel softp->state = 0; 426*1708Sstevel iosram_remove_instance(softp->instance); 427*1708Sstevel mutex_exit(&iosram_mutex); 428*1708Sstevel mutex_destroy(&softp->intr_mutex); 429*1708Sstevel goto attach_fail; 430*1708Sstevel } 431*1708Sstevel } 432*1708Sstevel 433*1708Sstevel mutex_exit(&iosram_mutex); 434*1708Sstevel 435*1708Sstevel /* 436*1708Sstevel * Create minor node 437*1708Sstevel */ 438*1708Sstevel (void) sprintf(name, "iosram%d", instance); 439*1708Sstevel if (ddi_create_minor_node(dip, name, S_IFCHR, instance, NULL, NULL) == 440*1708Sstevel DDI_FAILURE) { 441*1708Sstevel /* 442*1708Sstevel * Minor node seems to be needed only for debugging purposes. 443*1708Sstevel * Therefore, there is no need to fail this attach request. 444*1708Sstevel * Simply print a message out. 445*1708Sstevel */ 446*1708Sstevel cmn_err(CE_NOTE, "!iosram(%d): can't create minor node\n", 447*1708Sstevel instance); 448*1708Sstevel } 449*1708Sstevel ddi_report_dev(dip); 450*1708Sstevel 451*1708Sstevel DPRINTF(1, ("iosram_attach(%d): success.\n", instance)); 452*1708Sstevel IOSRAMLOG(1, "ATTACH: dip:%p instance:%d ... success softp:%p\n", 453*1708Sstevel dip, instance, softp, NULL); 454*1708Sstevel 455*1708Sstevel return (DDI_SUCCESS); 456*1708Sstevel 457*1708Sstevel attach_fail: 458*1708Sstevel DPRINTF(1, ("iosram_attach(%d):failed.\n", instance)); 459*1708Sstevel IOSRAMLOG(1, "ATTACH: dip:%p instance:%d ... failed.\n", 460*1708Sstevel dip, instance, NULL, NULL); 461*1708Sstevel 462*1708Sstevel ddi_soft_state_free(iosramsoft_statep, instance); 463*1708Sstevel return (DDI_FAILURE); 464*1708Sstevel } 465*1708Sstevel 466*1708Sstevel 467*1708Sstevel static int 468*1708Sstevel iosram_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 469*1708Sstevel { 470*1708Sstevel int instance; 471*1708Sstevel struct iosramsoft *softp; 472*1708Sstevel 473*1708Sstevel instance = ddi_get_instance(dip); 474*1708Sstevel if (!(softp = ddi_get_soft_state(iosramsoft_statep, instance))) { 475*1708Sstevel return (DDI_FAILURE); 476*1708Sstevel } 477*1708Sstevel 478*1708Sstevel IOSRAMLOG(1, "DETACH: dip:%p instance %d softp:%p\n", 479*1708Sstevel dip, instance, softp, NULL); 480*1708Sstevel 481*1708Sstevel switch (cmd) { 482*1708Sstevel case DDI_DETACH: 483*1708Sstevel break; 484*1708Sstevel case DDI_SUSPEND: 485*1708Sstevel mutex_enter(&iosram_mutex); 486*1708Sstevel mutex_enter(&softp->intr_mutex); 487*1708Sstevel if (softp->suspended) { 488*1708Sstevel mutex_exit(&softp->intr_mutex); 489*1708Sstevel mutex_exit(&iosram_mutex); 490*1708Sstevel return (DDI_FAILURE); 491*1708Sstevel } 492*1708Sstevel softp->suspended = 1; 493*1708Sstevel /* 494*1708Sstevel * Disable SBBC interrupts if SBBC is mapped in 495*1708Sstevel */ 496*1708Sstevel if (softp->sbbc_region) { 497*1708Sstevel /* save current interrupt enable register */ 498*1708Sstevel softp->int_enable_sav = ddi_get32(softp->sbbc_handle, 499*1708Sstevel &(softp->sbbc_region->int_enable.reg)); 500*1708Sstevel ddi_put32(softp->sbbc_handle, 501*1708Sstevel &(softp->sbbc_region->int_enable.reg), 0x0); 502*1708Sstevel } 503*1708Sstevel mutex_exit(&softp->intr_mutex); 504*1708Sstevel mutex_exit(&iosram_mutex); 505*1708Sstevel return (DDI_SUCCESS); 506*1708Sstevel 507*1708Sstevel default: 508*1708Sstevel return (DDI_FAILURE); 509*1708Sstevel } 510*1708Sstevel 511*1708Sstevel 512*1708Sstevel /* 513*1708Sstevel * Indicate that this instance is being detached so that this instance 514*1708Sstevel * does not become a target for tunnel switch in future. 515*1708Sstevel */ 516*1708Sstevel mutex_enter(&iosram_mutex); 517*1708Sstevel softp->state |= IOSRAM_STATE_DETACH; 518*1708Sstevel 519*1708Sstevel /* 520*1708Sstevel * If this instance is currently the master or the target of the tunnel 521*1708Sstevel * switch, then we need to wait and switch tunnel, if necessary. 522*1708Sstevel */ 523*1708Sstevel if (iosram_master == softp || (softp->state & IOSRAM_STATE_TSWITCH)) { 524*1708Sstevel mutex_exit(&iosram_mutex); 525*1708Sstevel iosram_switchfrom(instance); 526*1708Sstevel mutex_enter(&iosram_mutex); 527*1708Sstevel } 528*1708Sstevel 529*1708Sstevel /* 530*1708Sstevel * If the tunnel switch is in progress and we are the master or target 531*1708Sstevel * of tunnel relocation, then we can't detach this instance right now. 532*1708Sstevel */ 533*1708Sstevel if (softp->state & IOSRAM_STATE_TSWITCH) { 534*1708Sstevel softp->state &= ~IOSRAM_STATE_DETACH; 535*1708Sstevel mutex_exit(&iosram_mutex); 536*1708Sstevel return (DDI_FAILURE); 537*1708Sstevel } 538*1708Sstevel 539*1708Sstevel /* 540*1708Sstevel * We can't allow master IOSRAM to be detached as we won't be able to 541*1708Sstevel * communicate otherwise. 542*1708Sstevel */ 543*1708Sstevel if (iosram_master == softp) { 544*1708Sstevel softp->state &= ~IOSRAM_STATE_DETACH; 545*1708Sstevel mutex_exit(&iosram_mutex); 546*1708Sstevel return (DDI_FAILURE); 547*1708Sstevel } 548*1708Sstevel 549*1708Sstevel /* 550*1708Sstevel * Now remove our instance from the iosram_instances list. 551*1708Sstevel */ 552*1708Sstevel iosram_remove_instance(instance); 553*1708Sstevel mutex_exit(&iosram_mutex); 554*1708Sstevel 555*1708Sstevel /* 556*1708Sstevel * Instances should only ever be mapped if they are the master and/or 557*1708Sstevel * participating in a tunnel switch. Neither should be the case here. 558*1708Sstevel */ 559*1708Sstevel ASSERT((softp->state & IOSRAM_STATE_MAPPED) == 0); 560*1708Sstevel 561*1708Sstevel /* 562*1708Sstevel * Destroy per-instance mutexes 563*1708Sstevel */ 564*1708Sstevel mutex_destroy(&softp->intr_mutex); 565*1708Sstevel 566*1708Sstevel ddi_remove_minor_node(dip, NULL); 567*1708Sstevel 568*1708Sstevel /* 569*1708Sstevel * Finally remove our soft state structure 570*1708Sstevel */ 571*1708Sstevel ddi_soft_state_free(iosramsoft_statep, instance); 572*1708Sstevel 573*1708Sstevel return (DDI_SUCCESS); 574*1708Sstevel } 575*1708Sstevel 576*1708Sstevel 577*1708Sstevel /* ARGSUSED0 */ 578*1708Sstevel static int 579*1708Sstevel iosram_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, 580*1708Sstevel void **result) 581*1708Sstevel { 582*1708Sstevel dev_t dev = (dev_t)arg; 583*1708Sstevel struct iosramsoft *softp; 584*1708Sstevel int instance, ret; 585*1708Sstevel 586*1708Sstevel instance = getminor(dev); 587*1708Sstevel 588*1708Sstevel IOSRAMLOG(2, "GETINFO: dip:%x instance %d dev:%x infocmd:%x\n", 589*1708Sstevel dip, instance, dev, infocmd); 590*1708Sstevel 591*1708Sstevel switch (infocmd) { 592*1708Sstevel case DDI_INFO_DEVT2DEVINFO: 593*1708Sstevel softp = ddi_get_soft_state(iosramsoft_statep, instance); 594*1708Sstevel if (softp == NULL) { 595*1708Sstevel *result = NULL; 596*1708Sstevel ret = DDI_FAILURE; 597*1708Sstevel } else { 598*1708Sstevel *result = softp->dip; 599*1708Sstevel ret = DDI_SUCCESS; 600*1708Sstevel } 601*1708Sstevel break; 602*1708Sstevel case DDI_INFO_DEVT2INSTANCE: 603*1708Sstevel *result = (void *)(uintptr_t)instance; 604*1708Sstevel ret = DDI_SUCCESS; 605*1708Sstevel break; 606*1708Sstevel default: 607*1708Sstevel ret = DDI_FAILURE; 608*1708Sstevel break; 609*1708Sstevel } 610*1708Sstevel 611*1708Sstevel return (ret); 612*1708Sstevel } 613*1708Sstevel 614*1708Sstevel 615*1708Sstevel /*ARGSUSED1*/ 616*1708Sstevel static int 617*1708Sstevel iosram_open(dev_t *dev, int flag, int otype, cred_t *credp) 618*1708Sstevel { 619*1708Sstevel struct iosramsoft *softp; 620*1708Sstevel int instance; 621*1708Sstevel 622*1708Sstevel instance = getminor(*dev); 623*1708Sstevel softp = ddi_get_soft_state(iosramsoft_statep, instance); 624*1708Sstevel 625*1708Sstevel if (softp == NULL) { 626*1708Sstevel return (ENXIO); 627*1708Sstevel } 628*1708Sstevel 629*1708Sstevel IOSRAMLOG(1, "OPEN: dev:%p otype:%x ... instance:%d softp:%p\n", 630*1708Sstevel *dev, otype, softp->instance, softp); 631*1708Sstevel 632*1708Sstevel return (0); 633*1708Sstevel } 634*1708Sstevel 635*1708Sstevel 636*1708Sstevel /*ARGSUSED1*/ 637*1708Sstevel static int 638*1708Sstevel iosram_close(dev_t dev, int flag, int otype, cred_t *credp) 639*1708Sstevel { 640*1708Sstevel struct iosramsoft *softp; 641*1708Sstevel int instance; 642*1708Sstevel 643*1708Sstevel instance = getminor(dev); 644*1708Sstevel softp = ddi_get_soft_state(iosramsoft_statep, instance); 645*1708Sstevel if (softp == NULL) { 646*1708Sstevel return (ENXIO); 647*1708Sstevel } 648*1708Sstevel 649*1708Sstevel IOSRAMLOG(1, "CLOSE: dev:%p otype:%x ... instance:%d softp:%p\n", 650*1708Sstevel dev, otype, softp->instance, softp); 651*1708Sstevel 652*1708Sstevel return (0); 653*1708Sstevel } 654*1708Sstevel 655*1708Sstevel 656*1708Sstevel int 657*1708Sstevel iosram_rd(uint32_t key, uint32_t off, uint32_t len, caddr_t dptr) 658*1708Sstevel { 659*1708Sstevel iosram_chunk_t *chunkp; 660*1708Sstevel uint32_t chunk_len; 661*1708Sstevel uint8_t *iosramp; 662*1708Sstevel ddi_acc_handle_t handle; 663*1708Sstevel int boff; 664*1708Sstevel union { 665*1708Sstevel uchar_t cbuf[UINT32SZ]; 666*1708Sstevel uint32_t data; 667*1708Sstevel } word; 668*1708Sstevel 669*1708Sstevel int error = 0; 670*1708Sstevel uint8_t *buf = (uint8_t *)dptr; 671*1708Sstevel 672*1708Sstevel /* 673*1708Sstevel * We try to read from the IOSRAM using double word or word access 674*1708Sstevel * provided both "off" and "buf" are (or can be) double word or word 675*1708Sstevel * aligned. Othewise, we try to align the "off" to a word boundary and 676*1708Sstevel * then try to read data from the IOSRAM using word access, but store it 677*1708Sstevel * into buf buffer using byte access. 678*1708Sstevel * 679*1708Sstevel * If the leading/trailing portion of the IOSRAM data is not word 680*1708Sstevel * aligned, it will always be copied using byte access. 681*1708Sstevel */ 682*1708Sstevel IOSRAMLOG(1, "RD: key: 0x%x off:%x len:%x buf:%p\n", 683*1708Sstevel key, off, len, buf); 684*1708Sstevel 685*1708Sstevel /* 686*1708Sstevel * Acquire lock and look for the requested chunk. If it exists, make 687*1708Sstevel * sure the requested read is within the chunk's bounds and no tunnel 688*1708Sstevel * switch is active. 689*1708Sstevel */ 690*1708Sstevel mutex_enter(&iosram_mutex); 691*1708Sstevel chunkp = iosram_find_chunk(key); 692*1708Sstevel chunk_len = (chunkp != NULL) ? chunkp->toc_data.len : 0; 693*1708Sstevel 694*1708Sstevel if (iosram_master == NULL) { 695*1708Sstevel error = EIO; 696*1708Sstevel } else if (chunkp == NULL) { 697*1708Sstevel error = EINVAL; 698*1708Sstevel } else if ((off >= chunk_len) || (len > chunk_len) || 699*1708Sstevel ((off + len) > chunk_len)) { 700*1708Sstevel error = EMSGSIZE; 701*1708Sstevel } else if (iosram_tswitch_active) { 702*1708Sstevel error = EAGAIN; 703*1708Sstevel } 704*1708Sstevel 705*1708Sstevel if (error) { 706*1708Sstevel mutex_exit(&iosram_mutex); 707*1708Sstevel return (error); 708*1708Sstevel } 709*1708Sstevel 710*1708Sstevel /* 711*1708Sstevel * Bump reference count to indicate #thread accessing IOSRAM and release 712*1708Sstevel * the lock. 713*1708Sstevel */ 714*1708Sstevel iosram_rw_active++; 715*1708Sstevel #if defined(DEBUG) 716*1708Sstevel if (iosram_rw_active > iosram_rw_active_max) { 717*1708Sstevel iosram_rw_active_max = iosram_rw_active; 718*1708Sstevel } 719*1708Sstevel #endif 720*1708Sstevel mutex_exit(&iosram_mutex); 721*1708Sstevel 722*1708Sstevel IOSRAM_STAT(read); 723*1708Sstevel IOSRAM_STAT_ADD(bread, len); 724*1708Sstevel 725*1708Sstevel /* Get starting address and map handle */ 726*1708Sstevel iosramp = chunkp->basep + off; 727*1708Sstevel handle = iosram_handle; 728*1708Sstevel 729*1708Sstevel /* 730*1708Sstevel * Align the off to word boundary and then try reading/writing data 731*1708Sstevel * using double word or word access. 732*1708Sstevel */ 733*1708Sstevel if ((boff = ((uintptr_t)iosramp & (UINT32SZ - 1))) != 0) { 734*1708Sstevel int cnt = UINT32SZ - boff; 735*1708Sstevel 736*1708Sstevel if (cnt > len) { 737*1708Sstevel cnt = len; 738*1708Sstevel } 739*1708Sstevel IOSRAMLOG(2, 740*1708Sstevel "RD: align rep_get8(buf:%p sramp:%p cnt:%x) len:%x\n", 741*1708Sstevel buf, iosramp, cnt, len); 742*1708Sstevel ddi_rep_get8(handle, buf, iosramp, cnt, DDI_DEV_AUTOINCR); 743*1708Sstevel buf += cnt; 744*1708Sstevel iosramp += cnt; 745*1708Sstevel len -= cnt; 746*1708Sstevel } 747*1708Sstevel 748*1708Sstevel if ((len >= UINT64SZ) && 749*1708Sstevel ((((uintptr_t)iosramp | (uintptr_t)buf) & (UINT64SZ - 1)) == 0)) { 750*1708Sstevel /* 751*1708Sstevel * Both source and destination are double word aligned 752*1708Sstevel */ 753*1708Sstevel int cnt = len/UINT64SZ; 754*1708Sstevel 755*1708Sstevel IOSRAMLOG(2, 756*1708Sstevel "RD: rep_get64(buf:%p sramp:%p cnt:%x) len:%x\n", 757*1708Sstevel buf, iosramp, cnt, len); 758*1708Sstevel ddi_rep_get64(handle, (uint64_t *)buf, (uint64_t *)iosramp, 759*1708Sstevel cnt, DDI_DEV_AUTOINCR); 760*1708Sstevel iosramp += cnt * UINT64SZ; 761*1708Sstevel buf += cnt * UINT64SZ; 762*1708Sstevel len -= cnt * UINT64SZ; 763*1708Sstevel 764*1708Sstevel /* 765*1708Sstevel * read remaining data using word and byte access 766*1708Sstevel */ 767*1708Sstevel if (len >= UINT32SZ) { 768*1708Sstevel IOSRAMLOG(2, 769*1708Sstevel "RD: get32(buf:%p sramp:%p) len:%x\n", 770*1708Sstevel buf, iosramp, len, NULL); 771*1708Sstevel *(uint32_t *)buf = ddi_get32(handle, 772*1708Sstevel (uint32_t *)iosramp); 773*1708Sstevel iosramp += UINT32SZ; 774*1708Sstevel buf += UINT32SZ; 775*1708Sstevel len -= UINT32SZ; 776*1708Sstevel } 777*1708Sstevel 778*1708Sstevel if (len != 0) { 779*1708Sstevel ddi_rep_get8(handle, buf, iosramp, len, DDI_DEV_AUTOINCR); 780*1708Sstevel } 781*1708Sstevel } else if ((len >= UINT32SZ) && 782*1708Sstevel ((((uintptr_t)iosramp | (uintptr_t)buf) & (UINT32SZ - 1)) == 0)) { 783*1708Sstevel /* 784*1708Sstevel * Both source and destination are word aligned 785*1708Sstevel */ 786*1708Sstevel int cnt = len/UINT32SZ; 787*1708Sstevel 788*1708Sstevel IOSRAMLOG(2, 789*1708Sstevel "RD: rep_get32(buf:%p sramp:%p cnt:%x) len:%x\n", 790*1708Sstevel buf, iosramp, cnt, len); 791*1708Sstevel ddi_rep_get32(handle, (uint32_t *)buf, (uint32_t *)iosramp, 792*1708Sstevel cnt, DDI_DEV_AUTOINCR); 793*1708Sstevel iosramp += cnt * UINT32SZ; 794*1708Sstevel buf += cnt * UINT32SZ; 795*1708Sstevel len -= cnt * UINT32SZ; 796*1708Sstevel 797*1708Sstevel /* 798*1708Sstevel * copy the remainder using byte access 799*1708Sstevel */ 800*1708Sstevel if (len != 0) { 801*1708Sstevel ddi_rep_get8(handle, buf, iosramp, len, DDI_DEV_AUTOINCR); 802*1708Sstevel } 803*1708Sstevel } else if (len != 0) { 804*1708Sstevel /* 805*1708Sstevel * We know that the "off" (i.e. iosramp) is at least word 806*1708Sstevel * aligned. We need to read IOSRAM word at a time and copy it 807*1708Sstevel * byte at a time. 808*1708Sstevel */ 809*1708Sstevel ASSERT(((uintptr_t)iosramp & (UINT32SZ - 1)) == 0); 810*1708Sstevel 811*1708Sstevel IOSRAMLOG(2, 812*1708Sstevel "RD: unaligned get32(buf:%p sramp:%p) len:%x\n", 813*1708Sstevel buf, iosramp, len, NULL); 814*1708Sstevel for (; len >= UINT32SZ; len -= UINT32SZ, iosramp += UINT32SZ) { 815*1708Sstevel word.data = ddi_get32(handle, (uint32_t *)iosramp); 816*1708Sstevel *buf++ = word.cbuf[0]; 817*1708Sstevel *buf++ = word.cbuf[1]; 818*1708Sstevel *buf++ = word.cbuf[2]; 819*1708Sstevel *buf++ = word.cbuf[3]; 820*1708Sstevel } 821*1708Sstevel 822*1708Sstevel /* 823*1708Sstevel * copy the remaining data using byte access 824*1708Sstevel */ 825*1708Sstevel if (len != 0) { 826*1708Sstevel ddi_rep_get8(handle, buf, iosramp, len, 827*1708Sstevel DDI_DEV_AUTOINCR); 828*1708Sstevel } 829*1708Sstevel } 830*1708Sstevel 831*1708Sstevel /* 832*1708Sstevel * Reacquire mutex lock, decrement refcnt and if refcnt is 0 and any 833*1708Sstevel * threads are waiting for r/w activity to complete, wake them up. 834*1708Sstevel */ 835*1708Sstevel mutex_enter(&iosram_mutex); 836*1708Sstevel ASSERT(iosram_rw_active > 0); 837*1708Sstevel 838*1708Sstevel if ((--iosram_rw_active == 0) && iosram_rw_wakeup) { 839*1708Sstevel iosram_rw_wakeup = 0; 840*1708Sstevel cv_broadcast(&iosram_rw_wait); 841*1708Sstevel } 842*1708Sstevel mutex_exit(&iosram_mutex); 843*1708Sstevel 844*1708Sstevel return (error); 845*1708Sstevel } 846*1708Sstevel 847*1708Sstevel 848*1708Sstevel /* 849*1708Sstevel * _iosram_write(key, off, len, dptr, force) 850*1708Sstevel * Internal common routine to write to the IOSRAM. 851*1708Sstevel */ 852*1708Sstevel static int 853*1708Sstevel _iosram_write(uint32_t key, uint32_t off, uint32_t len, caddr_t dptr, int force) 854*1708Sstevel { 855*1708Sstevel iosram_chunk_t *chunkp; 856*1708Sstevel uint32_t chunk_len; 857*1708Sstevel uint8_t *iosramp; 858*1708Sstevel ddi_acc_handle_t handle; 859*1708Sstevel int boff; 860*1708Sstevel union { 861*1708Sstevel uint8_t cbuf[UINT32SZ]; 862*1708Sstevel uint32_t data; 863*1708Sstevel } word; 864*1708Sstevel 865*1708Sstevel int error = 0; 866*1708Sstevel uint8_t *buf = (uint8_t *)dptr; 867*1708Sstevel 868*1708Sstevel /* 869*1708Sstevel * We try to write to the IOSRAM using double word or word access 870*1708Sstevel * provided both "off" and "buf" are (or can be) double word or word 871*1708Sstevel * aligned. Othewise, we try to align the "off" to a word boundary and 872*1708Sstevel * then try to write data to the IOSRAM using word access, but read data 873*1708Sstevel * from the buf buffer using byte access. 874*1708Sstevel * 875*1708Sstevel * If the leading/trailing portion of the IOSRAM data is not word 876*1708Sstevel * aligned, it will always be written using byte access. 877*1708Sstevel */ 878*1708Sstevel IOSRAMLOG(1, "WR: key: 0x%x off:%x len:%x buf:%p\n", 879*1708Sstevel key, off, len, buf); 880*1708Sstevel 881*1708Sstevel /* 882*1708Sstevel * Acquire lock and look for the requested chunk. If it exists, make 883*1708Sstevel * sure the requested write is within the chunk's bounds and no tunnel 884*1708Sstevel * switch is active. 885*1708Sstevel */ 886*1708Sstevel mutex_enter(&iosram_mutex); 887*1708Sstevel chunkp = iosram_find_chunk(key); 888*1708Sstevel chunk_len = (chunkp != NULL) ? chunkp->toc_data.len : 0; 889*1708Sstevel 890*1708Sstevel if (iosram_master == NULL) { 891*1708Sstevel error = EIO; 892*1708Sstevel } else if (chunkp == NULL) { 893*1708Sstevel error = EINVAL; 894*1708Sstevel } else if ((off >= chunk_len) || (len > chunk_len) || 895*1708Sstevel ((off+len) > chunk_len)) { 896*1708Sstevel error = EMSGSIZE; 897*1708Sstevel } else if (iosram_tswitch_active && !force) { 898*1708Sstevel error = EAGAIN; 899*1708Sstevel } 900*1708Sstevel 901*1708Sstevel if (error) { 902*1708Sstevel mutex_exit(&iosram_mutex); 903*1708Sstevel return (error); 904*1708Sstevel } 905*1708Sstevel 906*1708Sstevel /* 907*1708Sstevel * If this is a forced write and there's a tunnel switch in progress, 908*1708Sstevel * abort the switch. 909*1708Sstevel */ 910*1708Sstevel if (iosram_tswitch_active && force) { 911*1708Sstevel cmn_err(CE_NOTE, "!iosram: Aborting tswitch on force_write"); 912*1708Sstevel iosram_abort_tswitch(); 913*1708Sstevel } 914*1708Sstevel 915*1708Sstevel /* 916*1708Sstevel * Bump reference count to indicate #thread accessing IOSRAM 917*1708Sstevel * and release the lock. 918*1708Sstevel */ 919*1708Sstevel iosram_rw_active++; 920*1708Sstevel #if defined(DEBUG) 921*1708Sstevel if (iosram_rw_active > iosram_rw_active_max) { 922*1708Sstevel iosram_rw_active_max = iosram_rw_active; 923*1708Sstevel } 924*1708Sstevel #endif 925*1708Sstevel mutex_exit(&iosram_mutex); 926*1708Sstevel 927*1708Sstevel 928*1708Sstevel IOSRAM_STAT(write); 929*1708Sstevel IOSRAM_STAT_ADD(bwrite, len); 930*1708Sstevel 931*1708Sstevel /* Get starting address and map handle */ 932*1708Sstevel iosramp = chunkp->basep + off; 933*1708Sstevel handle = iosram_handle; 934*1708Sstevel 935*1708Sstevel /* 936*1708Sstevel * Align the off to word boundary and then try reading/writing 937*1708Sstevel * data using double word or word access. 938*1708Sstevel */ 939*1708Sstevel if ((boff = ((uintptr_t)iosramp & (UINT32SZ - 1))) != 0) { 940*1708Sstevel int cnt = UINT32SZ - boff; 941*1708Sstevel 942*1708Sstevel if (cnt > len) { 943*1708Sstevel cnt = len; 944*1708Sstevel } 945*1708Sstevel IOSRAMLOG(2, 946*1708Sstevel "WR: align rep_put8(buf:%p sramp:%p cnt:%x) len:%x\n", 947*1708Sstevel buf, iosramp, cnt, len); 948*1708Sstevel ddi_rep_put8(handle, buf, iosramp, cnt, DDI_DEV_AUTOINCR); 949*1708Sstevel buf += cnt; 950*1708Sstevel iosramp += cnt; 951*1708Sstevel len -= cnt; 952*1708Sstevel } 953*1708Sstevel 954*1708Sstevel if ((len >= UINT64SZ) && 955*1708Sstevel ((((uintptr_t)iosramp | (uintptr_t)buf) & (UINT64SZ - 1)) == 0)) { 956*1708Sstevel /* 957*1708Sstevel * Both source and destination are double word aligned 958*1708Sstevel */ 959*1708Sstevel int cnt = len/UINT64SZ; 960*1708Sstevel 961*1708Sstevel IOSRAMLOG(2, 962*1708Sstevel "WR: rep_put64(buf:%p sramp:%p cnt:%x) len:%x\n", 963*1708Sstevel buf, iosramp, cnt, len); 964*1708Sstevel ddi_rep_put64(handle, (uint64_t *)buf, (uint64_t *)iosramp, 965*1708Sstevel cnt, DDI_DEV_AUTOINCR); 966*1708Sstevel iosramp += cnt * UINT64SZ; 967*1708Sstevel buf += cnt * UINT64SZ; 968*1708Sstevel len -= cnt * UINT64SZ; 969*1708Sstevel 970*1708Sstevel /* 971*1708Sstevel * Copy the remaining data using word & byte access 972*1708Sstevel */ 973*1708Sstevel if (len >= UINT32SZ) { 974*1708Sstevel IOSRAMLOG(2, 975*1708Sstevel "WR: put32(buf:%p sramp:%p) len:%x\n", buf, iosramp, 976*1708Sstevel len, NULL); 977*1708Sstevel ddi_put32(handle, (uint32_t *)iosramp, 978*1708Sstevel *(uint32_t *)buf); 979*1708Sstevel iosramp += UINT32SZ; 980*1708Sstevel buf += UINT32SZ; 981*1708Sstevel len -= UINT32SZ; 982*1708Sstevel } 983*1708Sstevel 984*1708Sstevel if (len != 0) { 985*1708Sstevel ddi_rep_put8(handle, buf, iosramp, len, 986*1708Sstevel DDI_DEV_AUTOINCR); 987*1708Sstevel } 988*1708Sstevel } else if ((len >= UINT32SZ) && 989*1708Sstevel ((((uintptr_t)iosramp | (uintptr_t)buf) & (UINT32SZ - 1)) == 0)) { 990*1708Sstevel /* 991*1708Sstevel * Both source and destination are word aligned 992*1708Sstevel */ 993*1708Sstevel int cnt = len/UINT32SZ; 994*1708Sstevel 995*1708Sstevel IOSRAMLOG(2, 996*1708Sstevel "WR: rep_put32(buf:%p sramp:%p cnt:%x) len:%x\n", 997*1708Sstevel buf, iosramp, cnt, len); 998*1708Sstevel ddi_rep_put32(handle, (uint32_t *)buf, (uint32_t *)iosramp, 999*1708Sstevel cnt, DDI_DEV_AUTOINCR); 1000*1708Sstevel iosramp += cnt * UINT32SZ; 1001*1708Sstevel buf += cnt * UINT32SZ; 1002*1708Sstevel len -= cnt * UINT32SZ; 1003*1708Sstevel 1004*1708Sstevel /* 1005*1708Sstevel * copy the remainder using byte access 1006*1708Sstevel */ 1007*1708Sstevel if (len != 0) { 1008*1708Sstevel ddi_rep_put8(handle, buf, iosramp, len, 1009*1708Sstevel DDI_DEV_AUTOINCR); 1010*1708Sstevel } 1011*1708Sstevel } else if (len != 0) { 1012*1708Sstevel /* 1013*1708Sstevel * We know that the "off" is at least word aligned. We 1014*1708Sstevel * need to read data from buf buffer byte at a time, and 1015*1708Sstevel * write it to the IOSRAM word at a time. 1016*1708Sstevel */ 1017*1708Sstevel 1018*1708Sstevel ASSERT(((uintptr_t)iosramp & (UINT32SZ - 1)) == 0); 1019*1708Sstevel 1020*1708Sstevel IOSRAMLOG(2, 1021*1708Sstevel "WR: unaligned put32(buf:%p sramp:%p) len:%x\n", 1022*1708Sstevel buf, iosramp, len, NULL); 1023*1708Sstevel for (; len >= UINT32SZ; len -= UINT32SZ, iosramp += UINT32SZ) { 1024*1708Sstevel word.cbuf[0] = *buf++; 1025*1708Sstevel word.cbuf[1] = *buf++; 1026*1708Sstevel word.cbuf[2] = *buf++; 1027*1708Sstevel word.cbuf[3] = *buf++; 1028*1708Sstevel ddi_put32(handle, (uint32_t *)iosramp, word.data); 1029*1708Sstevel } 1030*1708Sstevel 1031*1708Sstevel /* 1032*1708Sstevel * copy the remaining data using byte access 1033*1708Sstevel */ 1034*1708Sstevel if (len != 0) { 1035*1708Sstevel ddi_rep_put8(handle, buf, iosramp, 1036*1708Sstevel len, DDI_DEV_AUTOINCR); 1037*1708Sstevel } 1038*1708Sstevel } 1039*1708Sstevel 1040*1708Sstevel /* 1041*1708Sstevel * Reacquire mutex lock, decrement refcnt and if refcnt is 0 and 1042*1708Sstevel * any threads are waiting for r/w activity to complete, wake them up. 1043*1708Sstevel */ 1044*1708Sstevel mutex_enter(&iosram_mutex); 1045*1708Sstevel ASSERT(iosram_rw_active > 0); 1046*1708Sstevel 1047*1708Sstevel if ((--iosram_rw_active == 0) && iosram_rw_wakeup) { 1048*1708Sstevel iosram_rw_wakeup = 0; 1049*1708Sstevel cv_broadcast(&iosram_rw_wait); 1050*1708Sstevel } 1051*1708Sstevel mutex_exit(&iosram_mutex); 1052*1708Sstevel 1053*1708Sstevel return (error); 1054*1708Sstevel } 1055*1708Sstevel 1056*1708Sstevel 1057*1708Sstevel int 1058*1708Sstevel iosram_force_write(uint32_t key, uint32_t off, uint32_t len, caddr_t dptr) 1059*1708Sstevel { 1060*1708Sstevel return (_iosram_write(key, off, len, dptr, 1 /* force */)); 1061*1708Sstevel } 1062*1708Sstevel 1063*1708Sstevel 1064*1708Sstevel int 1065*1708Sstevel iosram_wr(uint32_t key, uint32_t off, uint32_t len, caddr_t dptr) 1066*1708Sstevel { 1067*1708Sstevel return (_iosram_write(key, off, len, dptr, 0)); 1068*1708Sstevel } 1069*1708Sstevel 1070*1708Sstevel 1071*1708Sstevel /* 1072*1708Sstevel * iosram_register(key, handler, arg) 1073*1708Sstevel * Register a handler and an arg for the specified chunk. This handler 1074*1708Sstevel * will be invoked when an interrupt is received from the other side and 1075*1708Sstevel * the int_pending flag for the corresponding key is marked 1076*1708Sstevel * IOSRAM_INT_TO_DOM. 1077*1708Sstevel */ 1078*1708Sstevel /* ARGSUSED */ 1079*1708Sstevel int 1080*1708Sstevel iosram_register(uint32_t key, void (*handler)(), void *arg) 1081*1708Sstevel { 1082*1708Sstevel struct iosram_chunk *chunkp; 1083*1708Sstevel int error = 0; 1084*1708Sstevel 1085*1708Sstevel /* 1086*1708Sstevel * Acquire lock and look for the requested chunk. If it exists, and no 1087*1708Sstevel * other callback is registered, proceed with the registration. 1088*1708Sstevel */ 1089*1708Sstevel mutex_enter(&iosram_mutex); 1090*1708Sstevel chunkp = iosram_find_chunk(key); 1091*1708Sstevel 1092*1708Sstevel if (iosram_master == NULL) { 1093*1708Sstevel error = EIO; 1094*1708Sstevel } else if (chunkp == NULL) { 1095*1708Sstevel error = EINVAL; 1096*1708Sstevel } else if (chunkp->cback.handler != NULL) { 1097*1708Sstevel error = EBUSY; 1098*1708Sstevel } else { 1099*1708Sstevel chunkp->cback.busy = 0; 1100*1708Sstevel chunkp->cback.unregister = 0; 1101*1708Sstevel chunkp->cback.handler = handler; 1102*1708Sstevel chunkp->cback.arg = arg; 1103*1708Sstevel } 1104*1708Sstevel mutex_exit(&iosram_mutex); 1105*1708Sstevel 1106*1708Sstevel IOSRAMLOG(1, "REG: key: 0x%x hdlr:%p arg:%p error:%d\n", 1107*1708Sstevel key, handler, arg, error); 1108*1708Sstevel 1109*1708Sstevel return (error); 1110*1708Sstevel } 1111*1708Sstevel 1112*1708Sstevel 1113*1708Sstevel /* 1114*1708Sstevel * iosram_unregister() 1115*1708Sstevel * Unregister handler associated with the specified chunk. 1116*1708Sstevel */ 1117*1708Sstevel int 1118*1708Sstevel iosram_unregister(uint32_t key) 1119*1708Sstevel { 1120*1708Sstevel struct iosram_chunk *chunkp; 1121*1708Sstevel int error = 0; 1122*1708Sstevel 1123*1708Sstevel /* 1124*1708Sstevel * Acquire lock and look for the requested chunk. If it exists and has 1125*1708Sstevel * a callback registered, unregister it. 1126*1708Sstevel */ 1127*1708Sstevel mutex_enter(&iosram_mutex); 1128*1708Sstevel chunkp = iosram_find_chunk(key); 1129*1708Sstevel 1130*1708Sstevel if (iosram_master == NULL) { 1131*1708Sstevel error = EIO; 1132*1708Sstevel } else if (chunkp == NULL) { 1133*1708Sstevel error = EINVAL; 1134*1708Sstevel } else if (chunkp->cback.busy) { 1135*1708Sstevel /* 1136*1708Sstevel * If the handler is already busy (being invoked), then we flag 1137*1708Sstevel * it so it will be unregistered after the invocation completes. 1138*1708Sstevel */ 1139*1708Sstevel DPRINTF(1, ("IOSRAM(%d): unregister: delaying unreg k:0x%08x\n", 1140*1708Sstevel iosram_master->instance, key)); 1141*1708Sstevel chunkp->cback.unregister = 1; 1142*1708Sstevel } else if (chunkp->cback.handler != NULL) { 1143*1708Sstevel chunkp->cback.handler = NULL; 1144*1708Sstevel chunkp->cback.arg = NULL; 1145*1708Sstevel } 1146*1708Sstevel mutex_exit(&iosram_mutex); 1147*1708Sstevel 1148*1708Sstevel IOSRAMLOG(1, "UNREG: key:%x error:%d\n", key, error, NULL, NULL); 1149*1708Sstevel return (error); 1150*1708Sstevel } 1151*1708Sstevel 1152*1708Sstevel 1153*1708Sstevel /* 1154*1708Sstevel * iosram_get_flag(): 1155*1708Sstevel * Get data_valid and/or int_pending flags associated with the 1156*1708Sstevel * specified key. 1157*1708Sstevel */ 1158*1708Sstevel int 1159*1708Sstevel iosram_get_flag(uint32_t key, uint8_t *data_valid, uint8_t *int_pending) 1160*1708Sstevel { 1161*1708Sstevel iosram_chunk_t *chunkp; 1162*1708Sstevel iosram_flags_t flags; 1163*1708Sstevel int error = 0; 1164*1708Sstevel 1165*1708Sstevel /* 1166*1708Sstevel * Acquire lock and look for the requested chunk. If it exists, and no 1167*1708Sstevel * tunnel switch is in progress, read the chunk's flags. 1168*1708Sstevel */ 1169*1708Sstevel mutex_enter(&iosram_mutex); 1170*1708Sstevel chunkp = iosram_find_chunk(key); 1171*1708Sstevel 1172*1708Sstevel if (iosram_master == NULL) { 1173*1708Sstevel error = EIO; 1174*1708Sstevel } else if (chunkp == NULL) { 1175*1708Sstevel error = EINVAL; 1176*1708Sstevel } else if (iosram_tswitch_active) { 1177*1708Sstevel error = EAGAIN; 1178*1708Sstevel } else { 1179*1708Sstevel IOSRAM_STAT(getflag); 1180*1708Sstevel 1181*1708Sstevel /* 1182*1708Sstevel * Read the flags 1183*1708Sstevel */ 1184*1708Sstevel ddi_rep_get8(iosram_handle, (uint8_t *)&flags, 1185*1708Sstevel (uint8_t *)(chunkp->flagsp), sizeof (iosram_flags_t), 1186*1708Sstevel DDI_DEV_AUTOINCR); 1187*1708Sstevel 1188*1708Sstevel /* 1189*1708Sstevel * Get each flag value that the caller is interested in. 1190*1708Sstevel */ 1191*1708Sstevel if (data_valid != NULL) { 1192*1708Sstevel *data_valid = flags.data_valid; 1193*1708Sstevel } 1194*1708Sstevel 1195*1708Sstevel if (int_pending != NULL) { 1196*1708Sstevel *int_pending = flags.int_pending; 1197*1708Sstevel } 1198*1708Sstevel } 1199*1708Sstevel mutex_exit(&iosram_mutex); 1200*1708Sstevel 1201*1708Sstevel IOSRAMLOG(1, "GetFlag key:%x data_valid:%x int_pending:%x error:%d\n", 1202*1708Sstevel key, flags.data_valid, flags.int_pending, error); 1203*1708Sstevel return (error); 1204*1708Sstevel } 1205*1708Sstevel 1206*1708Sstevel 1207*1708Sstevel /* 1208*1708Sstevel * iosram_set_flag(): 1209*1708Sstevel * Set data_valid and int_pending flags associated with the specified key. 1210*1708Sstevel */ 1211*1708Sstevel int 1212*1708Sstevel iosram_set_flag(uint32_t key, uint8_t data_valid, uint8_t int_pending) 1213*1708Sstevel { 1214*1708Sstevel iosram_chunk_t *chunkp; 1215*1708Sstevel iosram_flags_t flags; 1216*1708Sstevel int error = 0; 1217*1708Sstevel 1218*1708Sstevel /* 1219*1708Sstevel * Acquire lock and look for the requested chunk. If it exists, and no 1220*1708Sstevel * tunnel switch is in progress, write the chunk's flags. 1221*1708Sstevel */ 1222*1708Sstevel mutex_enter(&iosram_mutex); 1223*1708Sstevel chunkp = iosram_find_chunk(key); 1224*1708Sstevel 1225*1708Sstevel if (iosram_master == NULL) { 1226*1708Sstevel error = EIO; 1227*1708Sstevel } else if ((chunkp == NULL) || 1228*1708Sstevel ((data_valid != IOSRAM_DATA_INVALID) && 1229*1708Sstevel (data_valid != IOSRAM_DATA_VALID)) || 1230*1708Sstevel ((int_pending != IOSRAM_INT_NONE) && 1231*1708Sstevel (int_pending != IOSRAM_INT_TO_SSC) && 1232*1708Sstevel (int_pending != IOSRAM_INT_TO_DOM))) { 1233*1708Sstevel error = EINVAL; 1234*1708Sstevel } else if (iosram_tswitch_active) { 1235*1708Sstevel error = EAGAIN; 1236*1708Sstevel } else { 1237*1708Sstevel IOSRAM_STAT(setflag); 1238*1708Sstevel flags.data_valid = data_valid; 1239*1708Sstevel flags.int_pending = int_pending; 1240*1708Sstevel ddi_rep_put8(iosram_handle, (uint8_t *)&flags, 1241*1708Sstevel (uint8_t *)(chunkp->flagsp), sizeof (iosram_flags_t), 1242*1708Sstevel DDI_DEV_AUTOINCR); 1243*1708Sstevel } 1244*1708Sstevel mutex_exit(&iosram_mutex); 1245*1708Sstevel 1246*1708Sstevel IOSRAMLOG(1, "SetFlag key:%x data_valid:%x int_pending:%x error:%d\n", 1247*1708Sstevel key, flags.data_valid, flags.int_pending, error); 1248*1708Sstevel return (error); 1249*1708Sstevel } 1250*1708Sstevel 1251*1708Sstevel 1252*1708Sstevel /* 1253*1708Sstevel * iosram_ctrl() 1254*1708Sstevel * This function provides access to a variety of services not available 1255*1708Sstevel * through the basic API. 1256*1708Sstevel */ 1257*1708Sstevel int 1258*1708Sstevel iosram_ctrl(uint32_t key, uint32_t cmd, void *arg) 1259*1708Sstevel { 1260*1708Sstevel struct iosram_chunk *chunkp; 1261*1708Sstevel int error = 0; 1262*1708Sstevel 1263*1708Sstevel /* 1264*1708Sstevel * Acquire lock and do some argument sanity checking. 1265*1708Sstevel */ 1266*1708Sstevel mutex_enter(&iosram_mutex); 1267*1708Sstevel chunkp = iosram_find_chunk(key); 1268*1708Sstevel 1269*1708Sstevel if (iosram_master == NULL) { 1270*1708Sstevel error = EIO; 1271*1708Sstevel } else if (chunkp == NULL) { 1272*1708Sstevel error = EINVAL; 1273*1708Sstevel } 1274*1708Sstevel 1275*1708Sstevel if (error != 0) { 1276*1708Sstevel mutex_exit(&iosram_mutex); 1277*1708Sstevel return (error); 1278*1708Sstevel } 1279*1708Sstevel 1280*1708Sstevel /* 1281*1708Sstevel * Arguments seem okay so far, so process the command. 1282*1708Sstevel */ 1283*1708Sstevel switch (cmd) { 1284*1708Sstevel case IOSRAM_CMD_CHUNKLEN: 1285*1708Sstevel /* 1286*1708Sstevel * Return the length of the chunk indicated by the key. 1287*1708Sstevel */ 1288*1708Sstevel if (arg == NULL) { 1289*1708Sstevel error = EINVAL; 1290*1708Sstevel break; 1291*1708Sstevel } 1292*1708Sstevel 1293*1708Sstevel *(uint32_t *)arg = chunkp->toc_data.len; 1294*1708Sstevel break; 1295*1708Sstevel 1296*1708Sstevel default: 1297*1708Sstevel error = ENOTSUP; 1298*1708Sstevel break; 1299*1708Sstevel } 1300*1708Sstevel 1301*1708Sstevel mutex_exit(&iosram_mutex); 1302*1708Sstevel return (error); 1303*1708Sstevel } 1304*1708Sstevel 1305*1708Sstevel 1306*1708Sstevel /* 1307*1708Sstevel * iosram_hdr_ctrl() 1308*1708Sstevel * This function provides an interface for the Mailbox Protocol 1309*1708Sstevel * implementation to use when interacting with the IOSRAM header. 1310*1708Sstevel */ 1311*1708Sstevel int 1312*1708Sstevel iosram_hdr_ctrl(uint32_t cmd, void *arg) 1313*1708Sstevel { 1314*1708Sstevel int error = 0; 1315*1708Sstevel 1316*1708Sstevel /* 1317*1708Sstevel * Acquire lock and do some argument sanity checking. 1318*1708Sstevel */ 1319*1708Sstevel mutex_enter(&iosram_mutex); 1320*1708Sstevel 1321*1708Sstevel if (iosram_master == NULL) { 1322*1708Sstevel error = EIO; 1323*1708Sstevel } 1324*1708Sstevel 1325*1708Sstevel if (error != 0) { 1326*1708Sstevel mutex_exit(&iosram_mutex); 1327*1708Sstevel return (error); 1328*1708Sstevel } 1329*1708Sstevel 1330*1708Sstevel switch (cmd) { 1331*1708Sstevel case IOSRAM_HDRCMD_GET_SMS_MBOX_VER: 1332*1708Sstevel /* 1333*1708Sstevel * Return the value of the sms_mbox_version field. 1334*1708Sstevel */ 1335*1708Sstevel if (arg == NULL) { 1336*1708Sstevel error = EINVAL; 1337*1708Sstevel break; 1338*1708Sstevel } 1339*1708Sstevel 1340*1708Sstevel *(uint32_t *)arg = IOSRAM_GET_HDRFIELD32(iosram_master, 1341*1708Sstevel sms_mbox_version); 1342*1708Sstevel break; 1343*1708Sstevel 1344*1708Sstevel case IOSRAM_HDRCMD_SET_OS_MBOX_VER: 1345*1708Sstevel /* 1346*1708Sstevel * Set the value of the os_mbox_version field. 1347*1708Sstevel */ 1348*1708Sstevel IOSRAM_SET_HDRFIELD32(iosram_master, os_mbox_version, 1349*1708Sstevel (uint32_t)(uintptr_t)arg); 1350*1708Sstevel IOSRAM_SET_HDRFIELD32(iosram_master, os_change_mask, 1351*1708Sstevel IOSRAM_HDRFIELD_OS_MBOX_VER); 1352*1708Sstevel iosram_send_intr(); 1353*1708Sstevel break; 1354*1708Sstevel 1355*1708Sstevel case IOSRAM_HDRCMD_REG_CALLBACK: 1356*1708Sstevel iosram_hdrchange_handler = (void (*)())arg; 1357*1708Sstevel break; 1358*1708Sstevel 1359*1708Sstevel default: 1360*1708Sstevel error = ENOTSUP; 1361*1708Sstevel break; 1362*1708Sstevel } 1363*1708Sstevel 1364*1708Sstevel mutex_exit(&iosram_mutex); 1365*1708Sstevel return (error); 1366*1708Sstevel } 1367*1708Sstevel 1368*1708Sstevel 1369*1708Sstevel /* 1370*1708Sstevel * iosram_softintr() 1371*1708Sstevel * IOSRAM soft interrupt handler 1372*1708Sstevel */ 1373*1708Sstevel static uint_t 1374*1708Sstevel iosram_softintr(caddr_t arg) 1375*1708Sstevel { 1376*1708Sstevel uint32_t hdr_changes; 1377*1708Sstevel iosramsoft_t *softp = (iosramsoft_t *)arg; 1378*1708Sstevel iosram_chunk_t *chunkp; 1379*1708Sstevel void (*handler)(); 1380*1708Sstevel int i; 1381*1708Sstevel uint8_t flag; 1382*1708Sstevel 1383*1708Sstevel DPRINTF(1, ("iosram(%d): in iosram_softintr\n", softp->instance)); 1384*1708Sstevel 1385*1708Sstevel IOSRAMLOG(2, "SINTR arg/softp:%p pending:%d busy:%d\n", 1386*1708Sstevel arg, softp->intr_pending, softp->intr_busy, NULL); 1387*1708Sstevel 1388*1708Sstevel mutex_enter(&iosram_mutex); 1389*1708Sstevel mutex_enter(&softp->intr_mutex); 1390*1708Sstevel 1391*1708Sstevel /* 1392*1708Sstevel * Do not process interrupt if interrupt handler is already running or 1393*1708Sstevel * no interrupts are pending. 1394*1708Sstevel */ 1395*1708Sstevel if (softp->intr_busy || !softp->intr_pending) { 1396*1708Sstevel mutex_exit(&softp->intr_mutex); 1397*1708Sstevel mutex_exit(&iosram_mutex); 1398*1708Sstevel DPRINTF(1, ("IOSRAM(%d): softintr: busy=%d pending=%d\n", 1399*1708Sstevel softp->instance, softp->intr_busy, softp->intr_pending)); 1400*1708Sstevel return (softp->intr_pending ? DDI_INTR_CLAIMED : 1401*1708Sstevel DDI_INTR_UNCLAIMED); 1402*1708Sstevel } 1403*1708Sstevel 1404*1708Sstevel /* 1405*1708Sstevel * It's possible for the SC to send an interrupt on the new master 1406*1708Sstevel * before we are able to set our internal state. If so, we'll retrigger 1407*1708Sstevel * soft interrupt right after tunnel switch completion. 1408*1708Sstevel */ 1409*1708Sstevel if (softp->state & IOSRAM_STATE_TSWITCH) { 1410*1708Sstevel mutex_exit(&softp->intr_mutex); 1411*1708Sstevel mutex_exit(&iosram_mutex); 1412*1708Sstevel DPRINTF(1, ("IOSRAM(%d): softintr: doing switch " 1413*1708Sstevel "state=0x%x\n", softp->instance, softp->state)); 1414*1708Sstevel return (DDI_INTR_CLAIMED); 1415*1708Sstevel } 1416*1708Sstevel 1417*1708Sstevel /* 1418*1708Sstevel * Do not process interrupt if we are not the master. 1419*1708Sstevel */ 1420*1708Sstevel if (!(softp->state & IOSRAM_STATE_MASTER)) { 1421*1708Sstevel mutex_exit(&softp->intr_mutex); 1422*1708Sstevel mutex_exit(&iosram_mutex); 1423*1708Sstevel DPRINTF(1, ("IOSRAM(%d): softintr: no master state=0x%x\n ", 1424*1708Sstevel softp->instance, softp->state)); 1425*1708Sstevel return (DDI_INTR_CLAIMED); 1426*1708Sstevel } 1427*1708Sstevel 1428*1708Sstevel IOSRAM_STAT(sintr_recv); 1429*1708Sstevel 1430*1708Sstevel /* 1431*1708Sstevel * If the driver is suspended, then we should not process any 1432*1708Sstevel * interrupts. Instead, we trigger a soft interrupt when the driver 1433*1708Sstevel * resumes. 1434*1708Sstevel */ 1435*1708Sstevel if (softp->suspended) { 1436*1708Sstevel mutex_exit(&softp->intr_mutex); 1437*1708Sstevel mutex_exit(&iosram_mutex); 1438*1708Sstevel DPRINTF(1, ("IOSRAM(%d): softintr: suspended\n", 1439*1708Sstevel softp->instance)); 1440*1708Sstevel return (DDI_INTR_CLAIMED); 1441*1708Sstevel } 1442*1708Sstevel 1443*1708Sstevel /* 1444*1708Sstevel * Indicate that the IOSRAM interrupt handler is busy. Note that this 1445*1708Sstevel * includes incrementing the reader/writer count, since we don't want 1446*1708Sstevel * any tunnel switches to start up while we're processing callbacks. 1447*1708Sstevel */ 1448*1708Sstevel softp->intr_busy = 1; 1449*1708Sstevel iosram_rw_active++; 1450*1708Sstevel #if defined(DEBUG) 1451*1708Sstevel if (iosram_rw_active > iosram_rw_active_max) { 1452*1708Sstevel iosram_rw_active_max = iosram_rw_active; 1453*1708Sstevel } 1454*1708Sstevel #endif 1455*1708Sstevel 1456*1708Sstevel do { 1457*1708Sstevel DPRINTF(1, ("IOSRAM(%d): softintr: processing interrupt\n", 1458*1708Sstevel softp->instance)); 1459*1708Sstevel 1460*1708Sstevel softp->intr_pending = 0; 1461*1708Sstevel 1462*1708Sstevel mutex_exit(&softp->intr_mutex); 1463*1708Sstevel 1464*1708Sstevel /* 1465*1708Sstevel * Process changes to the IOSRAM header. 1466*1708Sstevel */ 1467*1708Sstevel hdr_changes = IOSRAM_GET_HDRFIELD32(iosram_master, 1468*1708Sstevel sms_change_mask); 1469*1708Sstevel if (hdr_changes != 0) { 1470*1708Sstevel int error; 1471*1708Sstevel 1472*1708Sstevel IOSRAM_SET_HDRFIELD32(iosram_master, sms_change_mask, 1473*1708Sstevel 0); 1474*1708Sstevel if (hdr_changes & IOSRAM_HDRFIELD_TOC_INDEX) { 1475*1708Sstevel /* 1476*1708Sstevel * XXX is it safe to temporarily release the 1477*1708Sstevel * iosram_mutex here? 1478*1708Sstevel */ 1479*1708Sstevel mutex_exit(&iosram_mutex); 1480*1708Sstevel error = iosram_read_toc(iosram_master); 1481*1708Sstevel mutex_enter(&iosram_mutex); 1482*1708Sstevel if (error) { 1483*1708Sstevel cmn_err(CE_WARN, "iosram_read_toc: new" 1484*1708Sstevel " TOC invalid; using old TOC."); 1485*1708Sstevel } 1486*1708Sstevel iosram_update_addrs(iosram_master); 1487*1708Sstevel } 1488*1708Sstevel 1489*1708Sstevel if (iosram_hdrchange_handler != NULL) { 1490*1708Sstevel mutex_exit(&iosram_mutex); 1491*1708Sstevel iosram_hdrchange_handler(); 1492*1708Sstevel mutex_enter(&iosram_mutex); 1493*1708Sstevel } 1494*1708Sstevel } 1495*1708Sstevel 1496*1708Sstevel /* 1497*1708Sstevel * Get data_valid/int_pending flags and generate a callback if 1498*1708Sstevel * applicable. For now, we read only those flags for which a 1499*1708Sstevel * callback has been registered. We can optimize reading of 1500*1708Sstevel * flags by reading them all at once and then process them 1501*1708Sstevel * later. 1502*1708Sstevel */ 1503*1708Sstevel for (i = 0, chunkp = chunks; i < nchunks; i++, 1504*1708Sstevel chunkp++) { 1505*1708Sstevel #if DEBUG 1506*1708Sstevel flag = ddi_get8(iosram_handle, 1507*1708Sstevel &(chunkp->flagsp->int_pending)); 1508*1708Sstevel DPRINTF(1, ("IOSRAM(%d): softintr chunk #%d " 1509*1708Sstevel "flag=0x%x handler=%p\n", 1510*1708Sstevel softp->instance, i, (int)flag, 1511*1708Sstevel chunkp->cback.handler)); 1512*1708Sstevel #endif 1513*1708Sstevel if ((handler = chunkp->cback.handler) == NULL) { 1514*1708Sstevel continue; 1515*1708Sstevel } 1516*1708Sstevel flag = ddi_get8(iosram_handle, 1517*1708Sstevel &(chunkp->flagsp->int_pending)); 1518*1708Sstevel if (flag == IOSRAM_INT_TO_DOM) { 1519*1708Sstevel DPRINTF(1, 1520*1708Sstevel ("IOSRAM(%d): softintr: invoking handler\n", 1521*1708Sstevel softp->instance)); 1522*1708Sstevel IOSRAMLOG(1, 1523*1708Sstevel "SINTR invoking hdlr:%p arg:%p index:%d\n", 1524*1708Sstevel handler, chunkp->cback.arg, i, NULL); 1525*1708Sstevel IOSRAM_STAT(callbacks); 1526*1708Sstevel 1527*1708Sstevel ddi_put8(iosram_handle, 1528*1708Sstevel &(chunkp->flagsp->int_pending), 1529*1708Sstevel IOSRAM_INT_NONE); 1530*1708Sstevel chunkp->cback.busy = 1; 1531*1708Sstevel mutex_exit(&iosram_mutex); 1532*1708Sstevel (*handler)(chunkp->cback.arg); 1533*1708Sstevel mutex_enter(&iosram_mutex); 1534*1708Sstevel chunkp->cback.busy = 0; 1535*1708Sstevel 1536*1708Sstevel /* 1537*1708Sstevel * If iosram_unregister was called while the 1538*1708Sstevel * callback was being invoked, complete the 1539*1708Sstevel * unregistration here. 1540*1708Sstevel */ 1541*1708Sstevel if (chunkp->cback.unregister) { 1542*1708Sstevel DPRINTF(1, ("IOSRAM(%d): softintr: " 1543*1708Sstevel "delayed unreg k:0x%08x\n", 1544*1708Sstevel softp->instance, 1545*1708Sstevel chunkp->toc_data.key)); 1546*1708Sstevel chunkp->cback.handler = NULL; 1547*1708Sstevel chunkp->cback.arg = NULL; 1548*1708Sstevel chunkp->cback.unregister = 0; 1549*1708Sstevel } 1550*1708Sstevel } 1551*1708Sstevel 1552*1708Sstevel /* 1553*1708Sstevel * If there's a tunnel switch waiting to run, give it 1554*1708Sstevel * higher priority than these callbacks by bailing out. 1555*1708Sstevel * They'll still be invoked on the new master iosram 1556*1708Sstevel * when the tunnel switch is done. 1557*1708Sstevel */ 1558*1708Sstevel if (iosram_tswitch_active) { 1559*1708Sstevel break; 1560*1708Sstevel } 1561*1708Sstevel } 1562*1708Sstevel 1563*1708Sstevel mutex_enter(&softp->intr_mutex); 1564*1708Sstevel 1565*1708Sstevel } while (softp->intr_pending && !softp->suspended && 1566*1708Sstevel !iosram_tswitch_active); 1567*1708Sstevel 1568*1708Sstevel /* 1569*1708Sstevel * Indicate IOSRAM interrupt handler is not BUSY any more 1570*1708Sstevel */ 1571*1708Sstevel softp->intr_busy = 0; 1572*1708Sstevel 1573*1708Sstevel ASSERT(iosram_rw_active > 0); 1574*1708Sstevel if ((--iosram_rw_active == 0) && iosram_rw_wakeup) { 1575*1708Sstevel iosram_rw_wakeup = 0; 1576*1708Sstevel cv_broadcast(&iosram_rw_wait); 1577*1708Sstevel } 1578*1708Sstevel 1579*1708Sstevel mutex_exit(&softp->intr_mutex); 1580*1708Sstevel mutex_exit(&iosram_mutex); 1581*1708Sstevel 1582*1708Sstevel DPRINTF(1, ("iosram(%d): softintr exit\n", softp->instance)); 1583*1708Sstevel 1584*1708Sstevel return (DDI_INTR_CLAIMED); 1585*1708Sstevel } 1586*1708Sstevel 1587*1708Sstevel 1588*1708Sstevel /* 1589*1708Sstevel * iosram_intr() 1590*1708Sstevel * IOSRAM real interrupt handler 1591*1708Sstevel */ 1592*1708Sstevel static uint_t 1593*1708Sstevel iosram_intr(caddr_t arg) 1594*1708Sstevel { 1595*1708Sstevel iosramsoft_t *softp = (iosramsoft_t *)arg; 1596*1708Sstevel int result = DDI_INTR_UNCLAIMED; 1597*1708Sstevel uint32_t int_status; 1598*1708Sstevel 1599*1708Sstevel DPRINTF(2, ("iosram(%d): in iosram_intr\n", softp->instance)); 1600*1708Sstevel 1601*1708Sstevel mutex_enter(&softp->intr_mutex); 1602*1708Sstevel 1603*1708Sstevel if (softp->sbbc_handle == NULL) { 1604*1708Sstevel /* 1605*1708Sstevel * The SBBC registers region is not mapped in. 1606*1708Sstevel * Set the interrupt pending flag here, and process the 1607*1708Sstevel * interrupt after the tunnel switch. 1608*1708Sstevel */ 1609*1708Sstevel DPRINTF(1, ("IOSRAM(%d): iosram_intr: SBBC not mapped\n", 1610*1708Sstevel softp->instance)); 1611*1708Sstevel softp->intr_pending = 1; 1612*1708Sstevel mutex_exit(&softp->intr_mutex); 1613*1708Sstevel return (DDI_INTR_UNCLAIMED); 1614*1708Sstevel } 1615*1708Sstevel 1616*1708Sstevel int_status = ddi_get32(softp->sbbc_handle, 1617*1708Sstevel &(softp->sbbc_region->int_status.reg)); 1618*1708Sstevel DPRINTF(1, ("iosram_intr: int_status = 0x%08x\n", int_status)); 1619*1708Sstevel 1620*1708Sstevel if (int_status & IOSRAM_SBBC_INT0) { 1621*1708Sstevel result = DDI_INTR_CLAIMED; 1622*1708Sstevel DPRINTF(1, ("iosram_intr: int0 detected!\n")); 1623*1708Sstevel } 1624*1708Sstevel 1625*1708Sstevel if (int_status & IOSRAM_SBBC_INT1) { 1626*1708Sstevel result = DDI_INTR_CLAIMED; 1627*1708Sstevel DPRINTF(1, ("iosram_intr: int1 detected!\n")); 1628*1708Sstevel } 1629*1708Sstevel 1630*1708Sstevel if (result == DDI_INTR_CLAIMED) { 1631*1708Sstevel ddi_put32(softp->sbbc_handle, 1632*1708Sstevel &(softp->sbbc_region->int_status.reg), int_status); 1633*1708Sstevel int_status = ddi_get32(softp->sbbc_handle, 1634*1708Sstevel &(softp->sbbc_region->int_status.reg)); 1635*1708Sstevel DPRINTF(1, ("iosram_intr: int_status = 0x%08x\n", 1636*1708Sstevel int_status)); 1637*1708Sstevel 1638*1708Sstevel softp->intr_pending = 1; 1639*1708Sstevel /* 1640*1708Sstevel * Trigger soft interrupt if not executing and 1641*1708Sstevel * not suspended. 1642*1708Sstevel */ 1643*1708Sstevel if (!softp->intr_busy && !softp->suspended && 1644*1708Sstevel (softp->softintr_id != NULL)) { 1645*1708Sstevel DPRINTF(1, ("iosram(%d): trigger softint\n", 1646*1708Sstevel softp->instance)); 1647*1708Sstevel ddi_trigger_softintr(softp->softintr_id); 1648*1708Sstevel } 1649*1708Sstevel } 1650*1708Sstevel 1651*1708Sstevel IOSRAM_STAT(intr_recv); 1652*1708Sstevel 1653*1708Sstevel mutex_exit(&softp->intr_mutex); 1654*1708Sstevel 1655*1708Sstevel IOSRAMLOG(2, "INTR arg/softp:%p pending:%d busy:%d\n", 1656*1708Sstevel arg, softp->intr_pending, softp->intr_busy, NULL); 1657*1708Sstevel DPRINTF(1, ("iosram(%d): iosram_intr exit\n", softp->instance)); 1658*1708Sstevel 1659*1708Sstevel return (result); 1660*1708Sstevel } 1661*1708Sstevel 1662*1708Sstevel 1663*1708Sstevel /* 1664*1708Sstevel * iosram_send_intr() 1665*1708Sstevel * Send an interrupt to the SSP side via AXQ driver 1666*1708Sstevel */ 1667*1708Sstevel int 1668*1708Sstevel iosram_send_intr() 1669*1708Sstevel { 1670*1708Sstevel IOSRAMLOG(1, "SendIntr called\n", NULL, NULL, NULL, NULL); 1671*1708Sstevel IOSRAM_STAT(intr_send); 1672*1708Sstevel DPRINTF(1, ("iosram iosram_send_intr invoked\n")); 1673*1708Sstevel 1674*1708Sstevel return (axq_cpu2ssc_intr(0)); 1675*1708Sstevel } 1676*1708Sstevel 1677*1708Sstevel 1678*1708Sstevel #if defined(DEBUG) 1679*1708Sstevel static void 1680*1708Sstevel iosram_dummy_cback(void *arg) 1681*1708Sstevel { 1682*1708Sstevel DPRINTF(1, ("iosram_dummy_cback invoked arg:%p\n", arg)); 1683*1708Sstevel } 1684*1708Sstevel #endif /* DEBUG */ 1685*1708Sstevel 1686*1708Sstevel 1687*1708Sstevel /*ARGSUSED1*/ 1688*1708Sstevel static int 1689*1708Sstevel iosram_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, 1690*1708Sstevel int *rvalp) 1691*1708Sstevel { 1692*1708Sstevel struct iosramsoft *softp; 1693*1708Sstevel int error = DDI_SUCCESS; 1694*1708Sstevel 1695*1708Sstevel softp = ddi_get_soft_state(iosramsoft_statep, getminor(dev)); 1696*1708Sstevel if (softp == NULL) { 1697*1708Sstevel return (ENXIO); 1698*1708Sstevel } 1699*1708Sstevel IOSRAMLOG(1, "IOCTL: dev:%p cmd:%x arg:%p ... instance %d\n", 1700*1708Sstevel dev, cmd, arg, softp->instance); 1701*1708Sstevel 1702*1708Sstevel switch (cmd) { 1703*1708Sstevel #if defined(DEBUG) 1704*1708Sstevel case IOSRAM_GET_FLAG: 1705*1708Sstevel { 1706*1708Sstevel iosram_io_t req; 1707*1708Sstevel uint8_t data_valid, int_pending; 1708*1708Sstevel 1709*1708Sstevel if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) { 1710*1708Sstevel return (EFAULT); 1711*1708Sstevel } 1712*1708Sstevel 1713*1708Sstevel DPRINTF(2, ("IOSRAM_GET_FLAG(key:%x\n", req.key)); 1714*1708Sstevel 1715*1708Sstevel req.retval = iosram_get_flag(req.key, &data_valid, 1716*1708Sstevel &int_pending); 1717*1708Sstevel req.data_valid = (uint32_t)data_valid; 1718*1708Sstevel req.int_pending = (uint32_t)int_pending; 1719*1708Sstevel 1720*1708Sstevel if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) { 1721*1708Sstevel DPRINTF(1, 1722*1708Sstevel ("IOSRAM_GET_FLAG: can't copyout req.retval (%x)", 1723*1708Sstevel req.retval)); 1724*1708Sstevel error = EFAULT; 1725*1708Sstevel } 1726*1708Sstevel 1727*1708Sstevel return (error); 1728*1708Sstevel } 1729*1708Sstevel 1730*1708Sstevel case IOSRAM_SET_FLAG: 1731*1708Sstevel { 1732*1708Sstevel iosram_io_t req; 1733*1708Sstevel 1734*1708Sstevel if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) { 1735*1708Sstevel return (EFAULT); 1736*1708Sstevel } 1737*1708Sstevel 1738*1708Sstevel DPRINTF(2, ("IOSRAM_SET_FLAG(key:%x data_valid:%x " 1739*1708Sstevel "int_pending:%x\n", req.key, req.data_valid, 1740*1708Sstevel req.int_pending)); 1741*1708Sstevel 1742*1708Sstevel req.retval = iosram_set_flag(req.key, req.data_valid, 1743*1708Sstevel req.int_pending); 1744*1708Sstevel 1745*1708Sstevel if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) { 1746*1708Sstevel DPRINTF(1, ("IOSRAM_SET_FLAG: can't copyout req.retval" 1747*1708Sstevel " (%x)\n", req.retval)); 1748*1708Sstevel error = EFAULT; 1749*1708Sstevel } 1750*1708Sstevel 1751*1708Sstevel return (error); 1752*1708Sstevel } 1753*1708Sstevel 1754*1708Sstevel case IOSRAM_RD: 1755*1708Sstevel { 1756*1708Sstevel caddr_t bufp; 1757*1708Sstevel int len; 1758*1708Sstevel iosram_io_t req; 1759*1708Sstevel 1760*1708Sstevel if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) { 1761*1708Sstevel return (EFAULT); 1762*1708Sstevel } 1763*1708Sstevel 1764*1708Sstevel DPRINTF(2, ("IOSRAM_RD(k:%x o:%x len:%x bufp:%p\n", req.key, 1765*1708Sstevel req.off, req.len, (void *)(uintptr_t)req.bufp)); 1766*1708Sstevel 1767*1708Sstevel len = req.len; 1768*1708Sstevel bufp = kmem_alloc(len, KM_SLEEP); 1769*1708Sstevel 1770*1708Sstevel req.retval = iosram_rd(req.key, req.off, req.len, bufp); 1771*1708Sstevel 1772*1708Sstevel if (ddi_copyout(bufp, (void *)(uintptr_t)req.bufp, len, mode)) { 1773*1708Sstevel DPRINTF(1, ("IOSRAM_RD: copyout(%p, %p,%x,%x) failed\n", 1774*1708Sstevel bufp, (void *)(uintptr_t)req.bufp, len, mode)); 1775*1708Sstevel error = EFAULT; 1776*1708Sstevel } else if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) { 1777*1708Sstevel DPRINTF(1, ("IOSRAM_RD: can't copyout retval (%x)\n", 1778*1708Sstevel req.retval)); 1779*1708Sstevel error = EFAULT; 1780*1708Sstevel } 1781*1708Sstevel 1782*1708Sstevel kmem_free(bufp, len); 1783*1708Sstevel return (error); 1784*1708Sstevel } 1785*1708Sstevel 1786*1708Sstevel case IOSRAM_WR: 1787*1708Sstevel { 1788*1708Sstevel caddr_t bufp; 1789*1708Sstevel iosram_io_t req; 1790*1708Sstevel int len; 1791*1708Sstevel 1792*1708Sstevel if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) { 1793*1708Sstevel return (EFAULT); 1794*1708Sstevel } 1795*1708Sstevel 1796*1708Sstevel DPRINTF(2, ("IOSRAM_WR(k:%x o:%x len:%x bufp:%p\n", 1797*1708Sstevel req.key, req.off, req.len, req.bufp)); 1798*1708Sstevel len = req.len; 1799*1708Sstevel bufp = kmem_alloc(len, KM_SLEEP); 1800*1708Sstevel if (ddi_copyin((void *)(uintptr_t)req.bufp, bufp, len, mode)) { 1801*1708Sstevel error = EFAULT; 1802*1708Sstevel } else { 1803*1708Sstevel req.retval = iosram_wr(req.key, req.off, req.len, 1804*1708Sstevel bufp); 1805*1708Sstevel 1806*1708Sstevel if (ddi_copyout(&req, (void *)arg, sizeof (req), 1807*1708Sstevel mode)) { 1808*1708Sstevel error = EFAULT; 1809*1708Sstevel } 1810*1708Sstevel } 1811*1708Sstevel kmem_free(bufp, len); 1812*1708Sstevel return (error); 1813*1708Sstevel } 1814*1708Sstevel 1815*1708Sstevel case IOSRAM_TOC: 1816*1708Sstevel { 1817*1708Sstevel caddr_t bufp; 1818*1708Sstevel int len; 1819*1708Sstevel iosram_io_t req; 1820*1708Sstevel 1821*1708Sstevel if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) { 1822*1708Sstevel return (EFAULT); 1823*1708Sstevel } 1824*1708Sstevel 1825*1708Sstevel DPRINTF(2, ("IOSRAM_TOC (req.bufp:%x req.len:%x) \n", 1826*1708Sstevel req.bufp, req.len)); 1827*1708Sstevel 1828*1708Sstevel len = req.len; 1829*1708Sstevel bufp = kmem_alloc(len, KM_SLEEP); 1830*1708Sstevel 1831*1708Sstevel req.retval = iosram_get_keys((iosram_toc_entry_t *)bufp, 1832*1708Sstevel &req.len); 1833*1708Sstevel 1834*1708Sstevel if (ddi_copyout(bufp, (void *)(uintptr_t)req.bufp, req.len, 1835*1708Sstevel mode)) { 1836*1708Sstevel DPRINTF(1, 1837*1708Sstevel ("IOSRAM_TOC: copyout(%p, %p,%x,%x) failed\n", 1838*1708Sstevel bufp, (void *)(uintptr_t)req.bufp, req.len, mode)); 1839*1708Sstevel error = EFAULT; 1840*1708Sstevel } else if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) { 1841*1708Sstevel DPRINTF(1, ("IOSRAM_TOC: can't copyout retval (%x)\n", 1842*1708Sstevel req.retval)); 1843*1708Sstevel error = EFAULT; 1844*1708Sstevel } 1845*1708Sstevel kmem_free(bufp, len); 1846*1708Sstevel return (error); 1847*1708Sstevel } 1848*1708Sstevel 1849*1708Sstevel case IOSRAM_SEND_INTR: 1850*1708Sstevel { 1851*1708Sstevel DPRINTF(2, ("IOSRAM_SEND_INTR\n")); 1852*1708Sstevel 1853*1708Sstevel switch ((int)arg) { 1854*1708Sstevel case 0x11: 1855*1708Sstevel case 0x22: 1856*1708Sstevel case 0x44: 1857*1708Sstevel case 0x88: 1858*1708Sstevel ddi_put32(softp->sbbc_handle, 1859*1708Sstevel &(softp->sbbc_region->int_enable.reg), (int)arg); 1860*1708Sstevel DPRINTF(1, ("Wrote 0x%x to int_enable.reg\n", 1861*1708Sstevel (int)arg)); 1862*1708Sstevel break; 1863*1708Sstevel case 0xBB: 1864*1708Sstevel ddi_put32(softp->sbbc_handle, 1865*1708Sstevel &(softp->sbbc_region->p0_int_gen.reg), 1); 1866*1708Sstevel DPRINTF(1, ("Wrote 1 to p0_int_gen.reg\n")); 1867*1708Sstevel break; 1868*1708Sstevel default: 1869*1708Sstevel error = iosram_send_intr(); 1870*1708Sstevel } 1871*1708Sstevel 1872*1708Sstevel return (error); 1873*1708Sstevel } 1874*1708Sstevel 1875*1708Sstevel case IOSRAM_PRINT_CBACK: 1876*1708Sstevel iosram_print_cback(); 1877*1708Sstevel break; 1878*1708Sstevel 1879*1708Sstevel case IOSRAM_PRINT_STATE: 1880*1708Sstevel iosram_print_state((int)arg); 1881*1708Sstevel break; 1882*1708Sstevel 1883*1708Sstevel #if IOSRAM_STATS 1884*1708Sstevel case IOSRAM_PRINT_STATS: 1885*1708Sstevel iosram_print_stats(); 1886*1708Sstevel break; 1887*1708Sstevel #endif 1888*1708Sstevel 1889*1708Sstevel #if IOSRAM_LOG 1890*1708Sstevel case IOSRAM_PRINT_LOG: 1891*1708Sstevel iosram_print_log((int)arg); 1892*1708Sstevel break; 1893*1708Sstevel #endif 1894*1708Sstevel 1895*1708Sstevel case IOSRAM_TUNNEL_SWITCH: 1896*1708Sstevel error = iosram_switchfrom((int)arg); 1897*1708Sstevel break; 1898*1708Sstevel 1899*1708Sstevel case IOSRAM_PRINT_FLAGS: 1900*1708Sstevel iosram_print_flags(); 1901*1708Sstevel break; 1902*1708Sstevel 1903*1708Sstevel case IOSRAM_REG_CBACK: 1904*1708Sstevel { 1905*1708Sstevel iosram_io_t req; 1906*1708Sstevel 1907*1708Sstevel if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) { 1908*1708Sstevel return (EFAULT); 1909*1708Sstevel } 1910*1708Sstevel 1911*1708Sstevel DPRINTF(2, ("IOSRAM_REG_CBACK(k:%x)\n", req.key)); 1912*1708Sstevel 1913*1708Sstevel req.retval = iosram_register(req.key, iosram_dummy_cback, 1914*1708Sstevel (void *)(uintptr_t)req.key); 1915*1708Sstevel if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) { 1916*1708Sstevel error = EFAULT; 1917*1708Sstevel } 1918*1708Sstevel 1919*1708Sstevel return (error); 1920*1708Sstevel } 1921*1708Sstevel 1922*1708Sstevel case IOSRAM_UNREG_CBACK: 1923*1708Sstevel { 1924*1708Sstevel iosram_io_t req; 1925*1708Sstevel 1926*1708Sstevel if (ddi_copyin((void *)arg, &req, sizeof (req), mode)) { 1927*1708Sstevel return (EFAULT); 1928*1708Sstevel } 1929*1708Sstevel 1930*1708Sstevel DPRINTF(2, ("IOSRAM_REG_CBACK(k:%x)\n", req.key)); 1931*1708Sstevel 1932*1708Sstevel req.retval = iosram_unregister(req.key); 1933*1708Sstevel if (ddi_copyout(&req, (void *)arg, sizeof (req), mode)) { 1934*1708Sstevel error = EFAULT; 1935*1708Sstevel } 1936*1708Sstevel 1937*1708Sstevel return (error); 1938*1708Sstevel } 1939*1708Sstevel 1940*1708Sstevel case IOSRAM_SEMA_ACQUIRE: 1941*1708Sstevel { 1942*1708Sstevel DPRINTF(1, ("IOSRAM_SEMA_ACQUIRE\n")); 1943*1708Sstevel error = iosram_sema_acquire(NULL); 1944*1708Sstevel return (error); 1945*1708Sstevel } 1946*1708Sstevel 1947*1708Sstevel case IOSRAM_SEMA_RELEASE: 1948*1708Sstevel { 1949*1708Sstevel DPRINTF(1, ("IOSRAM_SEMA_RELEASE\n")); 1950*1708Sstevel error = iosram_sema_release(); 1951*1708Sstevel return (error); 1952*1708Sstevel } 1953*1708Sstevel 1954*1708Sstevel #endif /* DEBUG */ 1955*1708Sstevel 1956*1708Sstevel default: 1957*1708Sstevel DPRINTF(1, ("iosram_ioctl: Illegal command %x\n", cmd)); 1958*1708Sstevel error = ENOTTY; 1959*1708Sstevel } 1960*1708Sstevel 1961*1708Sstevel return (error); 1962*1708Sstevel } 1963*1708Sstevel 1964*1708Sstevel 1965*1708Sstevel /* 1966*1708Sstevel * iosram_switch_tunnel(softp) 1967*1708Sstevel * Switch master tunnel to the specified instance 1968*1708Sstevel * Must be called while holding iosram_mutex 1969*1708Sstevel */ 1970*1708Sstevel /*ARGSUSED*/ 1971*1708Sstevel static int 1972*1708Sstevel iosram_switch_tunnel(iosramsoft_t *softp) 1973*1708Sstevel { 1974*1708Sstevel #ifdef DEBUG 1975*1708Sstevel int instance = softp->instance; 1976*1708Sstevel #endif 1977*1708Sstevel int error = 0; 1978*1708Sstevel iosramsoft_t *prev_master; 1979*1708Sstevel 1980*1708Sstevel ASSERT(mutex_owned(&iosram_mutex)); 1981*1708Sstevel 1982*1708Sstevel DPRINTF(1, ("tunnel switch new master:%p (%d) current master:%p (%d)\n", 1983*1708Sstevel softp, instance, iosram_master, 1984*1708Sstevel ((iosram_master) ? iosram_master->instance : -1))); 1985*1708Sstevel IOSRAMLOG(1, "TSWTCH: new_master:%p (%p) iosram_master:%p (%d)\n", 1986*1708Sstevel softp, instance, iosram_master, 1987*1708Sstevel ((iosram_master) ? iosram_master->instance : -1)); 1988*1708Sstevel 1989*1708Sstevel if (softp == NULL || (softp->state & IOSRAM_STATE_DETACH)) { 1990*1708Sstevel return (ENXIO); 1991*1708Sstevel } 1992*1708Sstevel if (iosram_master == softp) { 1993*1708Sstevel return (0); 1994*1708Sstevel } 1995*1708Sstevel 1996*1708Sstevel 1997*1708Sstevel /* 1998*1708Sstevel * We protect against the softp structure being deallocated by setting 1999*1708Sstevel * the IOSRAM_STATE_TSWITCH state flag. The detach routine will check 2000*1708Sstevel * for this flag and if set, it will wait for this flag to be reset or 2001*1708Sstevel * refuse the detach operation. 2002*1708Sstevel */ 2003*1708Sstevel iosram_new_master = softp; 2004*1708Sstevel softp->state |= IOSRAM_STATE_TSWITCH; 2005*1708Sstevel prev_master = iosram_master; 2006*1708Sstevel if (prev_master) { 2007*1708Sstevel prev_master->state |= IOSRAM_STATE_TSWITCH; 2008*1708Sstevel } 2009*1708Sstevel mutex_exit(&iosram_mutex); 2010*1708Sstevel 2011*1708Sstevel /* 2012*1708Sstevel * Map the target IOSRAM, read the TOC, and register interrupts if not 2013*1708Sstevel * already done. 2014*1708Sstevel */ 2015*1708Sstevel DPRINTF(1, ("iosram(%d): mapping IOSRAM and SBBC\n", 2016*1708Sstevel softp->instance)); 2017*1708Sstevel IOSRAMLOG(1, "TSWTCH: mapping instance:%d softp:%p\n", 2018*1708Sstevel instance, softp, NULL, NULL); 2019*1708Sstevel 2020*1708Sstevel if (iosram_setup_map(softp) != DDI_SUCCESS) { 2021*1708Sstevel error = ENXIO; 2022*1708Sstevel } else if ((chunks == NULL) && (iosram_read_toc(softp) != 0)) { 2023*1708Sstevel iosram_remove_map(softp); 2024*1708Sstevel error = EINVAL; 2025*1708Sstevel } else if (iosram_add_intr(softp) != DDI_SUCCESS) { 2026*1708Sstevel /* 2027*1708Sstevel * If there was no previous master, purge the TOC data that 2028*1708Sstevel * iosram_read_toc() created. 2029*1708Sstevel */ 2030*1708Sstevel if ((prev_master == NULL) && (chunks != NULL)) { 2031*1708Sstevel kmem_free(chunks, nchunks * sizeof (iosram_chunk_t)); 2032*1708Sstevel chunks = NULL; 2033*1708Sstevel nchunks = 0; 2034*1708Sstevel iosram_init_hashtab(); 2035*1708Sstevel } 2036*1708Sstevel iosram_remove_map(softp); 2037*1708Sstevel error = ENXIO; 2038*1708Sstevel } 2039*1708Sstevel 2040*1708Sstevel /* 2041*1708Sstevel * If we are asked to abort tunnel switch, do so now, before invoking 2042*1708Sstevel * the OBP callback. 2043*1708Sstevel */ 2044*1708Sstevel if (iosram_tswitch_aborted) { 2045*1708Sstevel 2046*1708Sstevel /* 2047*1708Sstevel * Once the tunnel switch is aborted, this thread should not 2048*1708Sstevel * resume. If it does, we simply log a message. We can't unmap 2049*1708Sstevel * the new master IOSRAM as it may be accessed in 2050*1708Sstevel * iosram_abort_tswitch(). It will be unmapped when it is 2051*1708Sstevel * detached. 2052*1708Sstevel */ 2053*1708Sstevel IOSRAMLOG(1, 2054*1708Sstevel "TSWTCH: aborted (pre OBP cback). Thread resumed.\n", 2055*1708Sstevel NULL, NULL, NULL, NULL); 2056*1708Sstevel error = EIO; 2057*1708Sstevel } 2058*1708Sstevel 2059*1708Sstevel if (error) { 2060*1708Sstevel IOSRAMLOG(1, 2061*1708Sstevel "TSWTCH: map failed instance:%d softp:%p error:%x\n", 2062*1708Sstevel instance, softp, error, NULL); 2063*1708Sstevel goto done; 2064*1708Sstevel } 2065*1708Sstevel 2066*1708Sstevel if (prev_master != NULL) { 2067*1708Sstevel int result; 2068*1708Sstevel 2069*1708Sstevel /* 2070*1708Sstevel * Now invoke the OBP interface to do the tunnel switch. 2071*1708Sstevel */ 2072*1708Sstevel result = prom_starcat_switch_tunnel(softp->portid, 2073*1708Sstevel OBP_TSWITCH_REQREPLY); 2074*1708Sstevel if (result != 0) { 2075*1708Sstevel error = EIO; 2076*1708Sstevel } 2077*1708Sstevel IOSRAMLOG(1, 2078*1708Sstevel "TSWTCH: OBP tswitch portid:%x result:%x error:%x\n", 2079*1708Sstevel softp->portid, result, error, NULL); 2080*1708Sstevel IOSRAM_STAT(tswitch); 2081*1708Sstevel iosram_tswitch_tstamp = ddi_get_lbolt(); 2082*1708Sstevel } 2083*1708Sstevel 2084*1708Sstevel mutex_enter(&iosram_mutex); 2085*1708Sstevel if (iosram_tswitch_aborted) { 2086*1708Sstevel /* 2087*1708Sstevel * Tunnel switch aborted. This thread should not resume. 2088*1708Sstevel * For now, we simply log a message, but don't unmap any 2089*1708Sstevel * IOSRAM at this stage as it may be accessed within the 2090*1708Sstevel * isoram_abort_tswitch(). The IOSRAM will be unmapped 2091*1708Sstevel * when that instance is detached. 2092*1708Sstevel */ 2093*1708Sstevel if (iosram_tswitch_aborted) { 2094*1708Sstevel IOSRAMLOG(1, 2095*1708Sstevel "TSWTCH: aborted (post OBP cback). Thread" 2096*1708Sstevel " resumed.\n", NULL, NULL, NULL, NULL); 2097*1708Sstevel error = EIO; 2098*1708Sstevel mutex_exit(&iosram_mutex); 2099*1708Sstevel } 2100*1708Sstevel } else if (error) { 2101*1708Sstevel /* 2102*1708Sstevel * Tunnel switch failed. Continue using previous tunnel. 2103*1708Sstevel * However, unmap new (target) IOSRAM. 2104*1708Sstevel */ 2105*1708Sstevel iosram_new_master = NULL; 2106*1708Sstevel mutex_exit(&iosram_mutex); 2107*1708Sstevel iosram_remove_intr(softp); 2108*1708Sstevel iosram_remove_map(softp); 2109*1708Sstevel } else { 2110*1708Sstevel /* 2111*1708Sstevel * Tunnel switch was successful. Set the new master. 2112*1708Sstevel * Also unmap old master IOSRAM and remove any interrupts 2113*1708Sstevel * associated with that. 2114*1708Sstevel * 2115*1708Sstevel * Note that a call to iosram_force_write() allows access 2116*1708Sstevel * to the IOSRAM while tunnel switch is in progress. That 2117*1708Sstevel * means we need to set the new master before unmapping 2118*1708Sstevel * the old master. 2119*1708Sstevel */ 2120*1708Sstevel iosram_set_master(softp); 2121*1708Sstevel iosram_new_master = NULL; 2122*1708Sstevel mutex_exit(&iosram_mutex); 2123*1708Sstevel 2124*1708Sstevel if (prev_master) { 2125*1708Sstevel IOSRAMLOG(1, "TSWTCH: unmapping prev_master:%p (%d)\n", 2126*1708Sstevel prev_master, prev_master->instance, NULL, NULL); 2127*1708Sstevel iosram_remove_intr(prev_master); 2128*1708Sstevel iosram_remove_map(prev_master); 2129*1708Sstevel } 2130*1708Sstevel } 2131*1708Sstevel 2132*1708Sstevel done: 2133*1708Sstevel mutex_enter(&iosram_mutex); 2134*1708Sstevel 2135*1708Sstevel /* 2136*1708Sstevel * Clear the tunnel switch flag on the source and destination 2137*1708Sstevel * instances. 2138*1708Sstevel */ 2139*1708Sstevel if (prev_master) { 2140*1708Sstevel prev_master->state &= ~IOSRAM_STATE_TSWITCH; 2141*1708Sstevel } 2142*1708Sstevel softp->state &= ~IOSRAM_STATE_TSWITCH; 2143*1708Sstevel 2144*1708Sstevel /* 2145*1708Sstevel * Since incoming interrupts could get lost during a tunnel switch, 2146*1708Sstevel * trigger a soft interrupt just in case. No harm other than a bit 2147*1708Sstevel * of wasted effort will be caused if no interrupts were dropped. 2148*1708Sstevel */ 2149*1708Sstevel mutex_enter(&softp->intr_mutex); 2150*1708Sstevel iosram_master->intr_pending = 1; 2151*1708Sstevel if ((iosram_master->softintr_id != NULL) && 2152*1708Sstevel (iosram_master->intr_busy == 0)) { 2153*1708Sstevel ddi_trigger_softintr(iosram_master->softintr_id); 2154*1708Sstevel } 2155*1708Sstevel mutex_exit(&softp->intr_mutex); 2156*1708Sstevel 2157*1708Sstevel IOSRAMLOG(1, "TSWTCH: done error:%d iosram_master:%p instance:%d\n", 2158*1708Sstevel error, iosram_master, 2159*1708Sstevel (iosram_master) ? iosram_master->instance : -1, NULL); 2160*1708Sstevel 2161*1708Sstevel return (error); 2162*1708Sstevel } 2163*1708Sstevel 2164*1708Sstevel 2165*1708Sstevel /* 2166*1708Sstevel * iosram_abort_tswitch() 2167*1708Sstevel * Must be called while holding iosram_mutex. 2168*1708Sstevel */ 2169*1708Sstevel static void 2170*1708Sstevel iosram_abort_tswitch() 2171*1708Sstevel { 2172*1708Sstevel uint32_t master_valid, new_master_valid; 2173*1708Sstevel 2174*1708Sstevel ASSERT(mutex_owned(&iosram_mutex)); 2175*1708Sstevel 2176*1708Sstevel if ((!iosram_tswitch_active) || iosram_tswitch_aborted) { 2177*1708Sstevel return; 2178*1708Sstevel } 2179*1708Sstevel 2180*1708Sstevel ASSERT(iosram_master != NULL); 2181*1708Sstevel 2182*1708Sstevel IOSRAMLOG(1, "ABORT: iosram_master:%p (%d) iosram_new_master:%p (%d)\n", 2183*1708Sstevel iosram_master, iosram_master->instance, iosram_new_master, 2184*1708Sstevel (iosram_new_master == NULL) ? -1 : iosram_new_master->instance); 2185*1708Sstevel 2186*1708Sstevel /* 2187*1708Sstevel * The first call to iosram_force_write() in the middle of tunnel switch 2188*1708Sstevel * will get here. We lookup IOSRAM VALID location and setup appropriate 2189*1708Sstevel * master, if one is still valid. We also set iosram_tswitch_aborted to 2190*1708Sstevel * prevent reentering this code and to catch if the OBP callback thread 2191*1708Sstevel * somehow resumes. 2192*1708Sstevel */ 2193*1708Sstevel iosram_tswitch_aborted = 1; 2194*1708Sstevel 2195*1708Sstevel if ((iosram_new_master == NULL) || 2196*1708Sstevel (iosram_new_master = iosram_master)) { 2197*1708Sstevel /* 2198*1708Sstevel * New master hasn't been selected yet, or OBP callback 2199*1708Sstevel * succeeded and we already selected new IOSRAM as master, but 2200*1708Sstevel * system crashed in the middle of unmapping previous master or 2201*1708Sstevel * cleaning up state. Use the existing master. 2202*1708Sstevel */ 2203*1708Sstevel ASSERT(iosram_master->iosramp != NULL); 2204*1708Sstevel ASSERT(IOSRAM_GET_HDRFIELD32(iosram_master, status) == 2205*1708Sstevel IOSRAM_VALID); 2206*1708Sstevel IOSRAMLOG(1, "ABORT: master (%d) already determined.\n", 2207*1708Sstevel iosram_master->instance, NULL, NULL, NULL); 2208*1708Sstevel 2209*1708Sstevel return; 2210*1708Sstevel } 2211*1708Sstevel 2212*1708Sstevel /* 2213*1708Sstevel * System crashed in the middle of tunnel switch and we know that the 2214*1708Sstevel * new target has not been marked master yet. That means, the old 2215*1708Sstevel * master should still be mapped. We need to abort the tunnel switch 2216*1708Sstevel * and setup a valid master, if possible, so that we can write to the 2217*1708Sstevel * IOSRAM. 2218*1708Sstevel * 2219*1708Sstevel * We select a new master based upon the IOSRAM header status fields in 2220*1708Sstevel * the previous master IOSRAM and the target IOSRAM as follows: 2221*1708Sstevel * 2222*1708Sstevel * iosram_master iosram-tswitch 2223*1708Sstevel * (Prev Master) (New Target) Decision 2224*1708Sstevel * --------------- --------------- ----------- 2225*1708Sstevel * VALID don't care prev master 2226*1708Sstevel * INTRANSIT INVALID prev master 2227*1708Sstevel * INTRANSIT INTRANSIT prev master 2228*1708Sstevel * INTRANSIT VALID new target 2229*1708Sstevel * INVALID INVALID shouldn't ever happen 2230*1708Sstevel * INVALID INTRANSIT shouldn't ever happen 2231*1708Sstevel * INVALID VALID new target 2232*1708Sstevel */ 2233*1708Sstevel 2234*1708Sstevel master_valid = (iosram_master->iosramp != NULL) ? 2235*1708Sstevel IOSRAM_GET_HDRFIELD32(iosram_master, status) : IOSRAM_INVALID; 2236*1708Sstevel new_master_valid = (iosram_new_master->iosramp != NULL) ? 2237*1708Sstevel IOSRAM_GET_HDRFIELD32(iosram_new_master, status) : IOSRAM_INVALID; 2238*1708Sstevel 2239*1708Sstevel if (master_valid == IOSRAM_VALID) { 2240*1708Sstevel /* EMPTY */ 2241*1708Sstevel /* 2242*1708Sstevel * OBP hasn't been called yet or, if it has, it hasn't started 2243*1708Sstevel * copying yet. Use the existing master. Note that the new 2244*1708Sstevel * master may not be mapped yet. 2245*1708Sstevel */ 2246*1708Sstevel IOSRAMLOG(1, "ABORT: prev master(%d) is VALID\n", 2247*1708Sstevel iosram_master->instance, NULL, NULL, NULL); 2248*1708Sstevel } else if (master_valid == IOSRAM_INTRANSIT) { 2249*1708Sstevel /* 2250*1708Sstevel * The system crashed after OBP started processing the tunnel 2251*1708Sstevel * switch but before the iosram driver determined that it was 2252*1708Sstevel * complete. Use the new master if it has been marked valid, 2253*1708Sstevel * meaning that OBP finished copying data to it, or the old 2254*1708Sstevel * master otherwise. 2255*1708Sstevel */ 2256*1708Sstevel IOSRAMLOG(1, "ABORT: prev master(%d) is INTRANSIT\n", 2257*1708Sstevel iosram_master->instance, NULL, NULL, NULL); 2258*1708Sstevel 2259*1708Sstevel if (new_master_valid == IOSRAM_VALID) { 2260*1708Sstevel iosram_set_master(iosram_new_master); 2261*1708Sstevel IOSRAMLOG(1, "ABORT: new master(%d) is VALID\n", 2262*1708Sstevel iosram_new_master->instance, NULL, NULL, 2263*1708Sstevel NULL); 2264*1708Sstevel } else { 2265*1708Sstevel prom_starcat_switch_tunnel(iosram_master->portid, 2266*1708Sstevel OBP_TSWITCH_NOREPLY); 2267*1708Sstevel 2268*1708Sstevel IOSRAMLOG(1, "ABORT: new master(%d) is INVALID\n", 2269*1708Sstevel iosram_new_master->instance, NULL, NULL, 2270*1708Sstevel NULL); 2271*1708Sstevel } 2272*1708Sstevel } else { 2273*1708Sstevel /* 2274*1708Sstevel * The system crashed after OBP marked the old master INVALID, 2275*1708Sstevel * which means the new master is the way to go. 2276*1708Sstevel */ 2277*1708Sstevel IOSRAMLOG(1, "ABORT: prev master(%d) is INVALID\n", 2278*1708Sstevel iosram_master->instance, NULL, NULL, NULL); 2279*1708Sstevel 2280*1708Sstevel ASSERT(new_master_valid == IOSRAM_VALID); 2281*1708Sstevel 2282*1708Sstevel iosram_set_master(iosram_new_master); 2283*1708Sstevel } 2284*1708Sstevel 2285*1708Sstevel IOSRAMLOG(1, "ABORT: Instance %d selected as master\n", 2286*1708Sstevel iosram_master->instance, NULL, NULL, NULL); 2287*1708Sstevel } 2288*1708Sstevel 2289*1708Sstevel 2290*1708Sstevel /* 2291*1708Sstevel * iosram_switchfrom(instance) 2292*1708Sstevel * Switch master tunnel away from the specified instance 2293*1708Sstevel */ 2294*1708Sstevel /*ARGSUSED*/ 2295*1708Sstevel int 2296*1708Sstevel iosram_switchfrom(int instance) 2297*1708Sstevel { 2298*1708Sstevel struct iosramsoft *softp; 2299*1708Sstevel int error = 0; 2300*1708Sstevel int count; 2301*1708Sstevel clock_t current_tstamp; 2302*1708Sstevel clock_t tstamp_interval; 2303*1708Sstevel struct iosramsoft *last_master = NULL; 2304*1708Sstevel static int last_master_instance = -1; 2305*1708Sstevel 2306*1708Sstevel IOSRAMLOG(1, "SwtchFrom: instance:%d iosram_master:%p (%d)\n", 2307*1708Sstevel instance, iosram_master, 2308*1708Sstevel ((iosram_master) ? iosram_master->instance : -1), NULL); 2309*1708Sstevel 2310*1708Sstevel mutex_enter(&iosram_mutex); 2311*1708Sstevel 2312*1708Sstevel /* 2313*1708Sstevel * Wait if another tunnel switch is in progress 2314*1708Sstevel */ 2315*1708Sstevel for (count = 0; iosram_tswitch_active && count < IOSRAM_TSWITCH_RETRY; 2316*1708Sstevel count++) { 2317*1708Sstevel iosram_tswitch_wakeup = 1; 2318*1708Sstevel cv_wait(&iosram_tswitch_wait, &iosram_mutex); 2319*1708Sstevel } 2320*1708Sstevel 2321*1708Sstevel if (iosram_tswitch_active) { 2322*1708Sstevel mutex_exit(&iosram_mutex); 2323*1708Sstevel return (EAGAIN); 2324*1708Sstevel } 2325*1708Sstevel 2326*1708Sstevel /* 2327*1708Sstevel * Check if the specified instance holds the tunnel. If not, 2328*1708Sstevel * then we are done. 2329*1708Sstevel */ 2330*1708Sstevel if ((iosram_master == NULL) || (iosram_master->instance != instance)) { 2331*1708Sstevel mutex_exit(&iosram_mutex); 2332*1708Sstevel return (0); 2333*1708Sstevel } 2334*1708Sstevel 2335*1708Sstevel /* 2336*1708Sstevel * Before beginning the tunnel switch process, wait for any outstanding 2337*1708Sstevel * read/write activity to complete. 2338*1708Sstevel */ 2339*1708Sstevel iosram_tswitch_active = 1; 2340*1708Sstevel while (iosram_rw_active) { 2341*1708Sstevel iosram_rw_wakeup = 1; 2342*1708Sstevel cv_wait(&iosram_rw_wait, &iosram_mutex); 2343*1708Sstevel } 2344*1708Sstevel 2345*1708Sstevel /* 2346*1708Sstevel * If a previous tunnel switch just completed, we have to make sure 2347*1708Sstevel * HWAD has enough time to find the new tunnel before we switch 2348*1708Sstevel * away from it. Otherwise, OBP's mailbox message to OSD will never 2349*1708Sstevel * get through. Just to be paranoid about synchronization of lbolt 2350*1708Sstevel * across different CPUs, make sure the current attempt isn't noted 2351*1708Sstevel * as starting _before_ the last tunnel switch completed. 2352*1708Sstevel */ 2353*1708Sstevel current_tstamp = ddi_get_lbolt(); 2354*1708Sstevel if (current_tstamp > iosram_tswitch_tstamp) { 2355*1708Sstevel tstamp_interval = current_tstamp - iosram_tswitch_tstamp; 2356*1708Sstevel } else { 2357*1708Sstevel tstamp_interval = 0; 2358*1708Sstevel } 2359*1708Sstevel if (drv_hztousec(tstamp_interval) < IOSRAM_TSWITCH_DELAY_US) { 2360*1708Sstevel mutex_exit(&iosram_mutex); 2361*1708Sstevel delay(drv_usectohz(IOSRAM_TSWITCH_DELAY_US) - tstamp_interval); 2362*1708Sstevel mutex_enter(&iosram_mutex); 2363*1708Sstevel } 2364*1708Sstevel 2365*1708Sstevel /* 2366*1708Sstevel * The specified instance holds the tunnel. We need to move it to some 2367*1708Sstevel * other IOSRAM. Try out all possible IOSRAMs listed in 2368*1708Sstevel * iosram_instances. For now, we always search from the first entry. 2369*1708Sstevel * In future, it may be desirable to start where we left off. 2370*1708Sstevel */ 2371*1708Sstevel for (softp = iosram_instances; softp != NULL; softp = softp->next) { 2372*1708Sstevel if (iosram_tswitch_aborted) { 2373*1708Sstevel break; 2374*1708Sstevel } 2375*1708Sstevel 2376*1708Sstevel /* we can't switch _to_ the instance we're switching _from_ */ 2377*1708Sstevel if (softp->instance == instance) { 2378*1708Sstevel continue; 2379*1708Sstevel } 2380*1708Sstevel 2381*1708Sstevel /* skip over instances being detached */ 2382*1708Sstevel if (softp->state & IOSRAM_STATE_DETACH) { 2383*1708Sstevel continue; 2384*1708Sstevel } 2385*1708Sstevel 2386*1708Sstevel /* 2387*1708Sstevel * Try to avoid reverting to the last instance we switched away 2388*1708Sstevel * from, as we expect that one to be detached eventually. Keep 2389*1708Sstevel * track of it, though, so we can go ahead and try switching to 2390*1708Sstevel * it if no other viable candidates are found. 2391*1708Sstevel */ 2392*1708Sstevel if (softp->instance == last_master_instance) { 2393*1708Sstevel last_master = softp; 2394*1708Sstevel continue; 2395*1708Sstevel } 2396*1708Sstevel 2397*1708Sstevel /* 2398*1708Sstevel * Do the tunnel switch. If successful, record the instance of 2399*1708Sstevel * the master we just left behind so we can try to avoid 2400*1708Sstevel * reverting to it next time. 2401*1708Sstevel */ 2402*1708Sstevel if (iosram_switch_tunnel(softp) == 0) { 2403*1708Sstevel last_master_instance = instance; 2404*1708Sstevel break; 2405*1708Sstevel } 2406*1708Sstevel } 2407*1708Sstevel 2408*1708Sstevel /* 2409*1708Sstevel * If we failed to switch the tunnel, but we skipped over an instance 2410*1708Sstevel * that had previously been switched out of because we expected it to be 2411*1708Sstevel * detached, go ahead and try it anyway (unless the tswitch was aborted 2412*1708Sstevel * or the instance we skipped is finally being detached). 2413*1708Sstevel */ 2414*1708Sstevel if ((softp == NULL) && (last_master != NULL) && 2415*1708Sstevel !iosram_tswitch_aborted && 2416*1708Sstevel !(last_master->state & IOSRAM_STATE_DETACH)) { 2417*1708Sstevel if (iosram_switch_tunnel(last_master) == 0) { 2418*1708Sstevel softp = last_master; 2419*1708Sstevel last_master_instance = instance; 2420*1708Sstevel } 2421*1708Sstevel } 2422*1708Sstevel 2423*1708Sstevel if ((softp == NULL) || (iosram_tswitch_aborted)) { 2424*1708Sstevel error = EIO; 2425*1708Sstevel } 2426*1708Sstevel 2427*1708Sstevel /* 2428*1708Sstevel * If there are additional tunnel switches queued up waiting for this 2429*1708Sstevel * one to complete, wake them up. 2430*1708Sstevel */ 2431*1708Sstevel if (iosram_tswitch_wakeup) { 2432*1708Sstevel iosram_tswitch_wakeup = 0; 2433*1708Sstevel cv_broadcast(&iosram_tswitch_wait); 2434*1708Sstevel } 2435*1708Sstevel iosram_tswitch_active = 0; 2436*1708Sstevel mutex_exit(&iosram_mutex); 2437*1708Sstevel return (error); 2438*1708Sstevel } 2439*1708Sstevel 2440*1708Sstevel 2441*1708Sstevel /* 2442*1708Sstevel * iosram_tunnel_capable(softp) 2443*1708Sstevel * Check if this IOSRAM instance is tunnel-capable by looing at 2444*1708Sstevel * "tunnel-capable" property. 2445*1708Sstevel */ 2446*1708Sstevel static int 2447*1708Sstevel iosram_tunnel_capable(struct iosramsoft *softp) 2448*1708Sstevel { 2449*1708Sstevel int proplen; 2450*1708Sstevel int tunnel_capable; 2451*1708Sstevel 2452*1708Sstevel /* 2453*1708Sstevel * Look up IOSRAM_TUNNELOK_PROP property, if any. 2454*1708Sstevel */ 2455*1708Sstevel proplen = sizeof (tunnel_capable); 2456*1708Sstevel if (ddi_getlongprop_buf(DDI_DEV_T_ANY, softp->dip, 2457*1708Sstevel DDI_PROP_DONTPASS, IOSRAM_TUNNELOK_PROP, (caddr_t)&tunnel_capable, 2458*1708Sstevel &proplen) != DDI_PROP_SUCCESS) { 2459*1708Sstevel tunnel_capable = 0; 2460*1708Sstevel } 2461*1708Sstevel return (tunnel_capable); 2462*1708Sstevel } 2463*1708Sstevel 2464*1708Sstevel 2465*1708Sstevel static int 2466*1708Sstevel iosram_sbbc_setup_map(struct iosramsoft *softp) 2467*1708Sstevel { 2468*1708Sstevel int rv; 2469*1708Sstevel struct ddi_device_acc_attr attr; 2470*1708Sstevel dev_info_t *dip = softp->dip; 2471*1708Sstevel uint32_t sema_val; 2472*1708Sstevel 2473*1708Sstevel attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 2474*1708Sstevel attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 2475*1708Sstevel attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; 2476*1708Sstevel 2477*1708Sstevel mutex_enter(&iosram_mutex); 2478*1708Sstevel mutex_enter(&softp->intr_mutex); 2479*1708Sstevel 2480*1708Sstevel /* 2481*1708Sstevel * Map SBBC region in 2482*1708Sstevel */ 2483*1708Sstevel if ((rv = ddi_regs_map_setup(dip, IOSRAM_SBBC_MAP_INDEX, 2484*1708Sstevel (caddr_t *)&softp->sbbc_region, 2485*1708Sstevel IOSRAM_SBBC_MAP_OFFSET, sizeof (iosram_sbbc_region_t), 2486*1708Sstevel &attr, &softp->sbbc_handle)) != DDI_SUCCESS) { 2487*1708Sstevel DPRINTF(1, ("Failed to map SBBC region.\n")); 2488*1708Sstevel mutex_exit(&softp->intr_mutex); 2489*1708Sstevel mutex_exit(&iosram_mutex); 2490*1708Sstevel return (rv); 2491*1708Sstevel } 2492*1708Sstevel 2493*1708Sstevel /* 2494*1708Sstevel * Disable SBBC interrupts. SBBC interrupts are enabled 2495*1708Sstevel * once the interrupt handler is registered. 2496*1708Sstevel */ 2497*1708Sstevel ddi_put32(softp->sbbc_handle, 2498*1708Sstevel &(softp->sbbc_region->int_enable.reg), 0x0); 2499*1708Sstevel 2500*1708Sstevel /* 2501*1708Sstevel * Clear hardware semaphore value if appropriate. 2502*1708Sstevel * When the first SBBC is mapped in by the IOSRAM driver, 2503*1708Sstevel * the value of the semaphore should be initialized only 2504*1708Sstevel * if it is not held by SMS. For subsequent SBBC's, the 2505*1708Sstevel * semaphore will be always initialized. 2506*1708Sstevel */ 2507*1708Sstevel sema_val = IOSRAM_SEMA_RD(softp); 2508*1708Sstevel 2509*1708Sstevel if (!iosram_master) { 2510*1708Sstevel /* the first SBBC is being mapped in */ 2511*1708Sstevel if (!(IOSRAM_SEMA_IS_HELD(sema_val) && 2512*1708Sstevel IOSRAM_SEMA_GET_IDX(sema_val) == IOSRAM_SEMA_SMS_IDX)) { 2513*1708Sstevel /* not held by SMS, we clear the semaphore */ 2514*1708Sstevel IOSRAM_SEMA_WR(softp, 0); 2515*1708Sstevel } 2516*1708Sstevel } else { 2517*1708Sstevel /* not the first SBBC, we clear the semaphore */ 2518*1708Sstevel IOSRAM_SEMA_WR(softp, 0); 2519*1708Sstevel } 2520*1708Sstevel 2521*1708Sstevel mutex_exit(&softp->intr_mutex); 2522*1708Sstevel mutex_exit(&iosram_mutex); 2523*1708Sstevel return (0); 2524*1708Sstevel } 2525*1708Sstevel 2526*1708Sstevel 2527*1708Sstevel static int 2528*1708Sstevel iosram_setup_map(struct iosramsoft *softp) 2529*1708Sstevel { 2530*1708Sstevel int instance = softp->instance; 2531*1708Sstevel dev_info_t *dip = softp->dip; 2532*1708Sstevel int portid; 2533*1708Sstevel int proplen; 2534*1708Sstevel caddr_t propvalue; 2535*1708Sstevel struct ddi_device_acc_attr attr; 2536*1708Sstevel 2537*1708Sstevel attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 2538*1708Sstevel attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 2539*1708Sstevel attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; 2540*1708Sstevel 2541*1708Sstevel /* 2542*1708Sstevel * Lookup IOSRAM_REG_PROP property to find out our IOSRAM length 2543*1708Sstevel */ 2544*1708Sstevel if (ddi_getlongprop(DDI_DEV_T_ANY, dip, 2545*1708Sstevel DDI_PROP_DONTPASS, IOSRAM_REG_PROP, (caddr_t)&propvalue, 2546*1708Sstevel &proplen) != DDI_PROP_SUCCESS) { 2547*1708Sstevel cmn_err(CE_WARN, "iosram(%d): can't find register property.\n", 2548*1708Sstevel instance); 2549*1708Sstevel return (DDI_FAILURE); 2550*1708Sstevel } else { 2551*1708Sstevel iosram_reg_t *regprop = (iosram_reg_t *)propvalue; 2552*1708Sstevel 2553*1708Sstevel DPRINTF(1, ("SetupMap(%d): Got reg prop: %x %x %x\n", 2554*1708Sstevel instance, regprop->addr_hi, 2555*1708Sstevel regprop->addr_lo, regprop->size)); 2556*1708Sstevel 2557*1708Sstevel softp->iosramlen = regprop->size; 2558*1708Sstevel 2559*1708Sstevel kmem_free(propvalue, proplen); 2560*1708Sstevel } 2561*1708Sstevel DPRINTF(1, ("SetupMap(%d): IOSRAM length: 0x%x\n", instance, 2562*1708Sstevel softp->iosramlen)); 2563*1708Sstevel softp->handle = NULL; 2564*1708Sstevel 2565*1708Sstevel /* 2566*1708Sstevel * To minimize boot time, we map the entire IOSRAM as opposed to 2567*1708Sstevel * mapping individual chunk via ddi_regs_map_setup() call. 2568*1708Sstevel */ 2569*1708Sstevel if (ddi_regs_map_setup(dip, 0, (caddr_t *)&softp->iosramp, 2570*1708Sstevel 0x0, softp->iosramlen, &attr, &softp->handle) != DDI_SUCCESS) { 2571*1708Sstevel cmn_err(CE_WARN, "iosram(%d): failed to map IOSRAM len:%x\n", 2572*1708Sstevel instance, softp->iosramlen); 2573*1708Sstevel iosram_remove_map(softp); 2574*1708Sstevel return (DDI_FAILURE); 2575*1708Sstevel } 2576*1708Sstevel 2577*1708Sstevel /* 2578*1708Sstevel * Lookup PORTID property on my parent hierarchy 2579*1708Sstevel */ 2580*1708Sstevel proplen = sizeof (portid); 2581*1708Sstevel if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 2582*1708Sstevel 0, IOSRAM_PORTID_PROP, (caddr_t)&portid, 2583*1708Sstevel &proplen) != DDI_PROP_SUCCESS) { 2584*1708Sstevel cmn_err(CE_WARN, "iosram(%d): can't find portid property.\n", 2585*1708Sstevel instance); 2586*1708Sstevel iosram_remove_map(softp); 2587*1708Sstevel return (DDI_FAILURE); 2588*1708Sstevel } 2589*1708Sstevel softp->portid = portid; 2590*1708Sstevel 2591*1708Sstevel if (iosram_sbbc_setup_map(softp) != DDI_SUCCESS) { 2592*1708Sstevel cmn_err(CE_WARN, "iosram(%d): can't map SBBC region.\n", 2593*1708Sstevel instance); 2594*1708Sstevel iosram_remove_map(softp); 2595*1708Sstevel return (DDI_FAILURE); 2596*1708Sstevel } 2597*1708Sstevel 2598*1708Sstevel mutex_enter(&iosram_mutex); 2599*1708Sstevel softp->state |= IOSRAM_STATE_MAPPED; 2600*1708Sstevel mutex_exit(&iosram_mutex); 2601*1708Sstevel 2602*1708Sstevel return (DDI_SUCCESS); 2603*1708Sstevel } 2604*1708Sstevel 2605*1708Sstevel 2606*1708Sstevel static void 2607*1708Sstevel iosram_remove_map(struct iosramsoft *softp) 2608*1708Sstevel { 2609*1708Sstevel mutex_enter(&iosram_mutex); 2610*1708Sstevel 2611*1708Sstevel ASSERT((softp->state & IOSRAM_STATE_MASTER) == 0); 2612*1708Sstevel 2613*1708Sstevel if (softp->handle) { 2614*1708Sstevel ddi_regs_map_free(&softp->handle); 2615*1708Sstevel softp->handle = NULL; 2616*1708Sstevel } 2617*1708Sstevel softp->iosramp = NULL; 2618*1708Sstevel 2619*1708Sstevel /* 2620*1708Sstevel * Umap SBBC registers region. Shared with handler for SBBC 2621*1708Sstevel * interrupts, take intr_mutex. 2622*1708Sstevel */ 2623*1708Sstevel mutex_enter(&softp->intr_mutex); 2624*1708Sstevel if (softp->sbbc_region) { 2625*1708Sstevel ddi_regs_map_free(&softp->sbbc_handle); 2626*1708Sstevel softp->sbbc_region = NULL; 2627*1708Sstevel } 2628*1708Sstevel mutex_exit(&softp->intr_mutex); 2629*1708Sstevel 2630*1708Sstevel softp->state &= ~IOSRAM_STATE_MAPPED; 2631*1708Sstevel 2632*1708Sstevel mutex_exit(&iosram_mutex); 2633*1708Sstevel } 2634*1708Sstevel 2635*1708Sstevel 2636*1708Sstevel /* 2637*1708Sstevel * iosram_is_chosen(struct iosramsoft *softp) 2638*1708Sstevel * 2639*1708Sstevel * Looks up "chosen" node property to 2640*1708Sstevel * determine if it is the chosen IOSRAM. 2641*1708Sstevel */ 2642*1708Sstevel static int 2643*1708Sstevel iosram_is_chosen(struct iosramsoft *softp) 2644*1708Sstevel { 2645*1708Sstevel char chosen_iosram[MAXNAMELEN]; 2646*1708Sstevel char pn[MAXNAMELEN]; 2647*1708Sstevel int nodeid; 2648*1708Sstevel int chosen; 2649*1708Sstevel pnode_t dnode; 2650*1708Sstevel 2651*1708Sstevel /* 2652*1708Sstevel * Get /chosen node info. prom interface will handle errors. 2653*1708Sstevel */ 2654*1708Sstevel dnode = prom_chosennode(); 2655*1708Sstevel 2656*1708Sstevel /* 2657*1708Sstevel * Look for the "iosram" property on the chosen node with a prom 2658*1708Sstevel * interface as ddi_find_devinfo() couldn't be used (calls 2659*1708Sstevel * ddi_walk_devs() that creates one extra lock on the device tree). 2660*1708Sstevel */ 2661*1708Sstevel if (prom_getprop(dnode, IOSRAM_CHOSEN_PROP, (caddr_t)&nodeid) <= 0) { 2662*1708Sstevel /* 2663*1708Sstevel * Can't find IOSRAM_CHOSEN_PROP property under chosen node 2664*1708Sstevel */ 2665*1708Sstevel cmn_err(CE_WARN, 2666*1708Sstevel "iosram(%d): can't find chosen iosram property\n", 2667*1708Sstevel softp->instance); 2668*1708Sstevel return (0); 2669*1708Sstevel } 2670*1708Sstevel 2671*1708Sstevel DPRINTF(1, ("iosram(%d): Got '%x' for chosen '%s' property\n", 2672*1708Sstevel softp->instance, nodeid, IOSRAM_CHOSEN_PROP)); 2673*1708Sstevel 2674*1708Sstevel /* 2675*1708Sstevel * get the full OBP pathname of this node 2676*1708Sstevel */ 2677*1708Sstevel if (prom_phandle_to_path((phandle_t)nodeid, chosen_iosram, 2678*1708Sstevel sizeof (chosen_iosram)) < 0) { 2679*1708Sstevel cmn_err(CE_NOTE, "prom_phandle_to_path(%x) failed\n", nodeid); 2680*1708Sstevel return (0); 2681*1708Sstevel } 2682*1708Sstevel DPRINTF(1, ("iosram(%d): prom_phandle_to_path(%x) is '%s'\n", 2683*1708Sstevel softp->instance, nodeid, chosen_iosram)); 2684*1708Sstevel 2685*1708Sstevel (void) ddi_pathname(softp->dip, pn); 2686*1708Sstevel DPRINTF(1, ("iosram(%d): ddi_pathname(%p) is '%s'\n", 2687*1708Sstevel softp->instance, softp->dip, pn)); 2688*1708Sstevel 2689*1708Sstevel chosen = (strcmp(chosen_iosram, pn) == 0) ? 1 : 0; 2690*1708Sstevel DPRINTF(1, ("iosram(%d): ... %s\n", softp->instance, 2691*1708Sstevel chosen ? "MASTER" : "SLAVE")); 2692*1708Sstevel IOSRAMLOG(1, "iosram(%d): ... %s\n", softp->instance, 2693*1708Sstevel (chosen ? "MASTER" : "SLAVE"), NULL, NULL); 2694*1708Sstevel 2695*1708Sstevel return (chosen); 2696*1708Sstevel } 2697*1708Sstevel 2698*1708Sstevel 2699*1708Sstevel /* 2700*1708Sstevel * iosram_set_master(struct iosramsoft *softp) 2701*1708Sstevel * 2702*1708Sstevel * Set master tunnel to the specified IOSRAM 2703*1708Sstevel * Must be called while holding iosram_mutex. 2704*1708Sstevel */ 2705*1708Sstevel static void 2706*1708Sstevel iosram_set_master(struct iosramsoft *softp) 2707*1708Sstevel { 2708*1708Sstevel ASSERT(mutex_owned(&iosram_mutex)); 2709*1708Sstevel ASSERT(softp != NULL); 2710*1708Sstevel ASSERT(softp->state & IOSRAM_STATE_MAPPED); 2711*1708Sstevel ASSERT(IOSRAM_GET_HDRFIELD32(softp, status) == IOSRAM_VALID); 2712*1708Sstevel 2713*1708Sstevel /* 2714*1708Sstevel * Clear MASTER flag on any previous IOSRAM master, if any 2715*1708Sstevel */ 2716*1708Sstevel if (iosram_master && (iosram_master != softp)) { 2717*1708Sstevel iosram_master->state &= ~IOSRAM_STATE_MASTER; 2718*1708Sstevel } 2719*1708Sstevel 2720*1708Sstevel /* 2721*1708Sstevel * Setup new IOSRAM master 2722*1708Sstevel */ 2723*1708Sstevel iosram_update_addrs(softp); 2724*1708Sstevel iosram_handle = softp->handle; 2725*1708Sstevel softp->state |= IOSRAM_STATE_MASTER; 2726*1708Sstevel softp->tswitch_ok++; 2727*1708Sstevel iosram_master = softp; 2728*1708Sstevel 2729*1708Sstevel IOSRAMLOG(1, "SETMASTER: softp:%p instance:%d\n", softp, 2730*1708Sstevel softp->instance, NULL, NULL); 2731*1708Sstevel } 2732*1708Sstevel 2733*1708Sstevel 2734*1708Sstevel /* 2735*1708Sstevel * iosram_read_toc() 2736*1708Sstevel * 2737*1708Sstevel * Read the TOC from an IOSRAM instance that has been mapped in. 2738*1708Sstevel * If the TOC is flawed or the IOSRAM isn't valid, return an error. 2739*1708Sstevel */ 2740*1708Sstevel static int 2741*1708Sstevel iosram_read_toc(struct iosramsoft *softp) 2742*1708Sstevel { 2743*1708Sstevel int i; 2744*1708Sstevel int instance = softp->instance; 2745*1708Sstevel uint8_t *toc_entryp; 2746*1708Sstevel iosram_flags_t *flagsp = NULL; 2747*1708Sstevel int new_nchunks; 2748*1708Sstevel iosram_chunk_t *new_chunks; 2749*1708Sstevel iosram_chunk_t *chunkp; 2750*1708Sstevel iosram_chunk_t *old_chunkp; 2751*1708Sstevel iosram_toc_entry_t index; 2752*1708Sstevel 2753*1708Sstevel /* 2754*1708Sstevel * Never try to read the TOC out of an unmapped IOSRAM. 2755*1708Sstevel */ 2756*1708Sstevel ASSERT(softp->state & IOSRAM_STATE_MAPPED); 2757*1708Sstevel 2758*1708Sstevel mutex_enter(&iosram_mutex); 2759*1708Sstevel 2760*1708Sstevel /* 2761*1708Sstevel * Check to make sure this IOSRAM is marked valid. Return 2762*1708Sstevel * an error if it isn't. 2763*1708Sstevel */ 2764*1708Sstevel if (IOSRAM_GET_HDRFIELD32(softp, status) != IOSRAM_VALID) { 2765*1708Sstevel DPRINTF(1, ("iosram_read_toc(%d): IOSRAM not flagged valid\n", 2766*1708Sstevel instance)); 2767*1708Sstevel mutex_exit(&iosram_mutex); 2768*1708Sstevel return (EINVAL); 2769*1708Sstevel } 2770*1708Sstevel 2771*1708Sstevel /* 2772*1708Sstevel * Get the location of the TOC. 2773*1708Sstevel */ 2774*1708Sstevel toc_entryp = softp->iosramp + IOSRAM_GET_HDRFIELD32(softp, toc_offset); 2775*1708Sstevel 2776*1708Sstevel /* 2777*1708Sstevel * Read the index entry from the TOC and make sure it looks correct. 2778*1708Sstevel */ 2779*1708Sstevel ddi_rep_get8(softp->handle, (uint8_t *)&index, toc_entryp, 2780*1708Sstevel sizeof (iosram_toc_entry_t), DDI_DEV_AUTOINCR); 2781*1708Sstevel if ((index.key != IOSRAM_INDEX_KEY) || 2782*1708Sstevel (index.off != IOSRAM_INDEX_OFF)) { 2783*1708Sstevel cmn_err(CE_WARN, "iosram(%d): invalid TOC index.\n", instance); 2784*1708Sstevel mutex_exit(&iosram_mutex); 2785*1708Sstevel return (EINVAL); 2786*1708Sstevel } 2787*1708Sstevel 2788*1708Sstevel /* 2789*1708Sstevel * Allocate storage for the new chunks array and initialize it with data 2790*1708Sstevel * from the TOC and callback data from the corresponding old chunk, if 2791*1708Sstevel * it exists. 2792*1708Sstevel */ 2793*1708Sstevel new_nchunks = index.len - 1; 2794*1708Sstevel new_chunks = (iosram_chunk_t *)kmem_zalloc(new_nchunks * 2795*1708Sstevel sizeof (iosram_chunk_t), KM_SLEEP); 2796*1708Sstevel for (i = 0, chunkp = new_chunks; i < new_nchunks; i++, chunkp++) { 2797*1708Sstevel toc_entryp += sizeof (iosram_toc_entry_t); 2798*1708Sstevel ddi_rep_get8(softp->handle, (uint8_t *)&(chunkp->toc_data), 2799*1708Sstevel toc_entryp, sizeof (iosram_toc_entry_t), DDI_DEV_AUTOINCR); 2800*1708Sstevel chunkp->hash = NULL; 2801*1708Sstevel if ((chunkp->toc_data.off < softp->iosramlen) && 2802*1708Sstevel (chunkp->toc_data.len <= softp->iosramlen) && 2803*1708Sstevel ((chunkp->toc_data.off + chunkp->toc_data.len) <= 2804*1708Sstevel softp->iosramlen)) { 2805*1708Sstevel chunkp->basep = softp->iosramp + chunkp->toc_data.off; 2806*1708Sstevel DPRINTF(1, 2807*1708Sstevel ("iosram_read_toc(%d): k:%x o:%x l:%x p:%x\n", 2808*1708Sstevel instance, chunkp->toc_data.key, 2809*1708Sstevel chunkp->toc_data.off, chunkp->toc_data.len, 2810*1708Sstevel chunkp->basep)); 2811*1708Sstevel } else { 2812*1708Sstevel cmn_err(CE_WARN, "iosram(%d): TOC entry %d" 2813*1708Sstevel "out of range... off:%x len:%x\n", 2814*1708Sstevel instance, i + 1, chunkp->toc_data.off, 2815*1708Sstevel chunkp->toc_data.len); 2816*1708Sstevel kmem_free(new_chunks, new_nchunks * 2817*1708Sstevel sizeof (iosram_chunk_t)); 2818*1708Sstevel mutex_exit(&iosram_mutex); 2819*1708Sstevel return (EINVAL); 2820*1708Sstevel } 2821*1708Sstevel 2822*1708Sstevel /* 2823*1708Sstevel * Note the existence of the flags chunk, which is required in 2824*1708Sstevel * a correct TOC. 2825*1708Sstevel */ 2826*1708Sstevel if (chunkp->toc_data.key == IOSRAM_FLAGS_KEY) { 2827*1708Sstevel flagsp = (iosram_flags_t *)chunkp->basep; 2828*1708Sstevel } 2829*1708Sstevel 2830*1708Sstevel /* 2831*1708Sstevel * If there was an entry for this chunk in the old list, copy 2832*1708Sstevel * the callback data from old to new storage. 2833*1708Sstevel */ 2834*1708Sstevel if ((nchunks > 0) && 2835*1708Sstevel ((old_chunkp = iosram_find_chunk(chunkp->toc_data.key)) != 2836*1708Sstevel NULL)) { 2837*1708Sstevel bcopy(&(old_chunkp->cback), &(chunkp->cback), 2838*1708Sstevel sizeof (iosram_cback_t)); 2839*1708Sstevel } 2840*1708Sstevel } 2841*1708Sstevel /* 2842*1708Sstevel * The TOC is malformed if there is no entry for the flags chunk. 2843*1708Sstevel */ 2844*1708Sstevel if (flagsp == NULL) { 2845*1708Sstevel kmem_free(new_chunks, new_nchunks * sizeof (iosram_chunk_t)); 2846*1708Sstevel mutex_exit(&iosram_mutex); 2847*1708Sstevel return (EINVAL); 2848*1708Sstevel } 2849*1708Sstevel 2850*1708Sstevel /* 2851*1708Sstevel * Free any memory that is no longer needed and install the new data 2852*1708Sstevel * as current data. 2853*1708Sstevel */ 2854*1708Sstevel if (chunks != NULL) { 2855*1708Sstevel kmem_free(chunks, nchunks * sizeof (iosram_chunk_t)); 2856*1708Sstevel } 2857*1708Sstevel chunks = new_chunks; 2858*1708Sstevel nchunks = new_nchunks; 2859*1708Sstevel iosram_init_hashtab(); 2860*1708Sstevel 2861*1708Sstevel mutex_exit(&iosram_mutex); 2862*1708Sstevel return (0); 2863*1708Sstevel } 2864*1708Sstevel 2865*1708Sstevel 2866*1708Sstevel /* 2867*1708Sstevel * iosram_init_hashtab() 2868*1708Sstevel * 2869*1708Sstevel * Initialize the hash table and populate it with the IOSRAM 2870*1708Sstevel * chunks previously read from the TOC. The caller must hold the 2871*1708Sstevel * ioram_mutex lock. 2872*1708Sstevel */ 2873*1708Sstevel static void 2874*1708Sstevel iosram_init_hashtab(void) 2875*1708Sstevel { 2876*1708Sstevel int i, bucket; 2877*1708Sstevel iosram_chunk_t *chunkp; 2878*1708Sstevel 2879*1708Sstevel ASSERT(mutex_owned(&iosram_mutex)); 2880*1708Sstevel 2881*1708Sstevel for (i = 0; i < IOSRAM_HASHSZ; i++) { 2882*1708Sstevel iosram_hashtab[i] = NULL; 2883*1708Sstevel } 2884*1708Sstevel 2885*1708Sstevel if (chunks) { 2886*1708Sstevel for (i = 0, chunkp = chunks; i < nchunks; i++, chunkp++) { 2887*1708Sstevel /* 2888*1708Sstevel * Hide the flags chunk by leaving it out of the hash 2889*1708Sstevel * table. 2890*1708Sstevel */ 2891*1708Sstevel if (chunkp->toc_data.key == IOSRAM_FLAGS_KEY) { 2892*1708Sstevel continue; 2893*1708Sstevel } 2894*1708Sstevel 2895*1708Sstevel /* 2896*1708Sstevel * Add the current chunk to the hash table. 2897*1708Sstevel */ 2898*1708Sstevel bucket = IOSRAM_HASH(chunkp->toc_data.key); 2899*1708Sstevel chunkp->hash = iosram_hashtab[bucket]; 2900*1708Sstevel iosram_hashtab[bucket] = chunkp; 2901*1708Sstevel } 2902*1708Sstevel } 2903*1708Sstevel } 2904*1708Sstevel 2905*1708Sstevel 2906*1708Sstevel /* 2907*1708Sstevel * iosram_update_addrs() 2908*1708Sstevel * 2909*1708Sstevel * Process the chunk list, updating each chunk's basep, which is a pointer 2910*1708Sstevel * to the beginning of the chunk's memory in kvaddr space. Record the 2911*1708Sstevel * basep value of the flags chunk to speed up flag access. The caller 2912*1708Sstevel * must hold the iosram_mutex lock. 2913*1708Sstevel */ 2914*1708Sstevel static void 2915*1708Sstevel iosram_update_addrs(struct iosramsoft *softp) 2916*1708Sstevel { 2917*1708Sstevel int i; 2918*1708Sstevel iosram_flags_t *flagsp; 2919*1708Sstevel iosram_chunk_t *chunkp; 2920*1708Sstevel 2921*1708Sstevel ASSERT(mutex_owned(&iosram_mutex)); 2922*1708Sstevel 2923*1708Sstevel /* 2924*1708Sstevel * First go through all of the chunks updating their base pointers and 2925*1708Sstevel * looking for the flags chunk. 2926*1708Sstevel */ 2927*1708Sstevel for (i = 0, chunkp = chunks; i < nchunks; i++, chunkp++) { 2928*1708Sstevel chunkp->basep = softp->iosramp + chunkp->toc_data.off; 2929*1708Sstevel if (chunkp->toc_data.key == IOSRAM_FLAGS_KEY) { 2930*1708Sstevel flagsp = (iosram_flags_t *)(chunkp->basep); 2931*1708Sstevel DPRINTF(1, 2932*1708Sstevel ("iosram_update_addrs flags: o:0x%08x p:%p", 2933*1708Sstevel chunkp->toc_data.off, flagsp)); 2934*1708Sstevel } 2935*1708Sstevel } 2936*1708Sstevel 2937*1708Sstevel /* 2938*1708Sstevel * Now, go through and update each chunk's flags pointer. This can't be 2939*1708Sstevel * done in the first loop because we don't have the address of the flags 2940*1708Sstevel * chunk yet. 2941*1708Sstevel */ 2942*1708Sstevel for (i = 0, chunkp = chunks; i < nchunks; i++, chunkp++) { 2943*1708Sstevel chunkp->flagsp = flagsp++; 2944*1708Sstevel DPRINTF(1, ("iosram_update_addrs: k:0x%x f:%p\n", 2945*1708Sstevel chunkp->toc_data.key, chunkp->flagsp)); 2946*1708Sstevel } 2947*1708Sstevel } 2948*1708Sstevel 2949*1708Sstevel /* 2950*1708Sstevel * iosram_find_chunk(key) 2951*1708Sstevel * 2952*1708Sstevel * Return a pointer to iosram_chunk structure corresponding to the 2953*1708Sstevel * "key" IOSRAM chunk. The caller must hold the iosram_mutex lock. 2954*1708Sstevel */ 2955*1708Sstevel static iosram_chunk_t * 2956*1708Sstevel iosram_find_chunk(uint32_t key) 2957*1708Sstevel { 2958*1708Sstevel iosram_chunk_t *chunkp; 2959*1708Sstevel int index = IOSRAM_HASH(key); 2960*1708Sstevel 2961*1708Sstevel ASSERT(mutex_owned(&iosram_mutex)); 2962*1708Sstevel 2963*1708Sstevel for (chunkp = iosram_hashtab[index]; chunkp; chunkp = chunkp->hash) { 2964*1708Sstevel if (chunkp->toc_data.key == key) { 2965*1708Sstevel break; 2966*1708Sstevel } 2967*1708Sstevel } 2968*1708Sstevel 2969*1708Sstevel return (chunkp); 2970*1708Sstevel } 2971*1708Sstevel 2972*1708Sstevel 2973*1708Sstevel /* 2974*1708Sstevel * iosram_add_intr(iosramsoft_t *) 2975*1708Sstevel */ 2976*1708Sstevel static int 2977*1708Sstevel iosram_add_intr(iosramsoft_t *softp) 2978*1708Sstevel { 2979*1708Sstevel IOSRAMLOG(2, "ADDINTR: softp:%p instance:%d\n", 2980*1708Sstevel softp, softp->instance, NULL, NULL); 2981*1708Sstevel 2982*1708Sstevel if (ddi_add_softintr(softp->dip, DDI_SOFTINT_MED, 2983*1708Sstevel &softp->softintr_id, &softp->soft_iblk, NULL, 2984*1708Sstevel iosram_softintr, (caddr_t)softp) != DDI_SUCCESS) { 2985*1708Sstevel cmn_err(CE_WARN, 2986*1708Sstevel "iosram(%d): Can't register softintr.\n", 2987*1708Sstevel softp->instance); 2988*1708Sstevel return (DDI_FAILURE); 2989*1708Sstevel } 2990*1708Sstevel 2991*1708Sstevel if (ddi_add_intr(softp->dip, 0, &softp->real_iblk, NULL, 2992*1708Sstevel iosram_intr, (caddr_t)softp) != DDI_SUCCESS) { 2993*1708Sstevel cmn_err(CE_WARN, 2994*1708Sstevel "iosram(%d): Can't register intr" 2995*1708Sstevel " handler.\n", softp->instance); 2996*1708Sstevel ddi_remove_softintr(softp->softintr_id); 2997*1708Sstevel return (DDI_FAILURE); 2998*1708Sstevel } 2999*1708Sstevel 3000*1708Sstevel /* 3001*1708Sstevel * Enable SBBC interrupts 3002*1708Sstevel */ 3003*1708Sstevel ddi_put32(softp->sbbc_handle, &(softp->sbbc_region->int_enable.reg), 3004*1708Sstevel IOSRAM_SBBC_INT0|IOSRAM_SBBC_INT1); 3005*1708Sstevel 3006*1708Sstevel return (DDI_SUCCESS); 3007*1708Sstevel } 3008*1708Sstevel 3009*1708Sstevel 3010*1708Sstevel /* 3011*1708Sstevel * iosram_remove_intr(iosramsoft_t *) 3012*1708Sstevel */ 3013*1708Sstevel static int 3014*1708Sstevel iosram_remove_intr(iosramsoft_t *softp) 3015*1708Sstevel { 3016*1708Sstevel IOSRAMLOG(2, "REMINTR: softp:%p instance:%d\n", 3017*1708Sstevel softp, softp->instance, NULL, NULL); 3018*1708Sstevel 3019*1708Sstevel /* 3020*1708Sstevel * Disable SBBC interrupts if SBBC is mapped in 3021*1708Sstevel */ 3022*1708Sstevel if (softp->sbbc_region) { 3023*1708Sstevel ddi_put32(softp->sbbc_handle, 3024*1708Sstevel &(softp->sbbc_region->int_enable.reg), 0); 3025*1708Sstevel } 3026*1708Sstevel 3027*1708Sstevel /* 3028*1708Sstevel * Remove SBBC interrupt handler 3029*1708Sstevel */ 3030*1708Sstevel ddi_remove_intr(softp->dip, 0, softp->real_iblk); 3031*1708Sstevel 3032*1708Sstevel /* 3033*1708Sstevel * Remove soft interrupt handler 3034*1708Sstevel */ 3035*1708Sstevel mutex_enter(&iosram_mutex); 3036*1708Sstevel if (softp->softintr_id != NULL) { 3037*1708Sstevel ddi_remove_softintr(softp->softintr_id); 3038*1708Sstevel softp->softintr_id = NULL; 3039*1708Sstevel } 3040*1708Sstevel mutex_exit(&iosram_mutex); 3041*1708Sstevel 3042*1708Sstevel return (0); 3043*1708Sstevel } 3044*1708Sstevel 3045*1708Sstevel 3046*1708Sstevel /* 3047*1708Sstevel * iosram_add_instance(iosramsoft_t *) 3048*1708Sstevel * Must be called while holding iosram_mutex 3049*1708Sstevel */ 3050*1708Sstevel static void 3051*1708Sstevel iosram_add_instance(iosramsoft_t *new_softp) 3052*1708Sstevel { 3053*1708Sstevel #ifdef DEBUG 3054*1708Sstevel int instance = new_softp->instance; 3055*1708Sstevel iosramsoft_t *softp; 3056*1708Sstevel #endif 3057*1708Sstevel 3058*1708Sstevel ASSERT(mutex_owned(&iosram_mutex)); 3059*1708Sstevel 3060*1708Sstevel #if defined(DEBUG) 3061*1708Sstevel /* Verify that this instance is not in the list */ 3062*1708Sstevel for (softp = iosram_instances; softp != NULL; softp = softp->next) { 3063*1708Sstevel ASSERT(softp->instance != instance); 3064*1708Sstevel } 3065*1708Sstevel #endif 3066*1708Sstevel 3067*1708Sstevel /* 3068*1708Sstevel * Add this instance to the list 3069*1708Sstevel */ 3070*1708Sstevel if (iosram_instances != NULL) { 3071*1708Sstevel iosram_instances->prev = new_softp; 3072*1708Sstevel } 3073*1708Sstevel new_softp->next = iosram_instances; 3074*1708Sstevel new_softp->prev = NULL; 3075*1708Sstevel iosram_instances = new_softp; 3076*1708Sstevel } 3077*1708Sstevel 3078*1708Sstevel 3079*1708Sstevel /* 3080*1708Sstevel * iosram_remove_instance(int instance) 3081*1708Sstevel * Must be called while holding iosram_mutex 3082*1708Sstevel */ 3083*1708Sstevel static void 3084*1708Sstevel iosram_remove_instance(int instance) 3085*1708Sstevel { 3086*1708Sstevel iosramsoft_t *softp; 3087*1708Sstevel 3088*1708Sstevel /* 3089*1708Sstevel * Remove specified instance from the iosram_instances list so that 3090*1708Sstevel * it can't be chosen for tunnel in future. 3091*1708Sstevel */ 3092*1708Sstevel ASSERT(mutex_owned(&iosram_mutex)); 3093*1708Sstevel 3094*1708Sstevel for (softp = iosram_instances; softp != NULL; softp = softp->next) { 3095*1708Sstevel if (softp->instance == instance) { 3096*1708Sstevel if (softp->next != NULL) { 3097*1708Sstevel softp->next->prev = softp->prev; 3098*1708Sstevel } 3099*1708Sstevel if (softp->prev != NULL) { 3100*1708Sstevel softp->prev->next = softp->next; 3101*1708Sstevel } 3102*1708Sstevel if (iosram_instances == softp) { 3103*1708Sstevel iosram_instances = softp->next; 3104*1708Sstevel } 3105*1708Sstevel 3106*1708Sstevel return; 3107*1708Sstevel } 3108*1708Sstevel } 3109*1708Sstevel } 3110*1708Sstevel 3111*1708Sstevel 3112*1708Sstevel /* 3113*1708Sstevel * iosram_sema_acquire: Acquire hardware semaphore. 3114*1708Sstevel * Return 0 if the semaphore could be acquired, or one of the following 3115*1708Sstevel * possible values: 3116*1708Sstevel * EAGAIN: there is a tunnel switch in progress 3117*1708Sstevel * EBUSY: the semaphore was already "held" 3118*1708Sstevel * ENXIO: an IO error occured (e.g. SBBC not mapped) 3119*1708Sstevel * If old_value is not NULL, the location it points to will be updated 3120*1708Sstevel * with the semaphore value read when attempting to acquire it. 3121*1708Sstevel */ 3122*1708Sstevel int 3123*1708Sstevel iosram_sema_acquire(uint32_t *old_value) 3124*1708Sstevel { 3125*1708Sstevel struct iosramsoft *softp; 3126*1708Sstevel int rv; 3127*1708Sstevel uint32_t sema_val; 3128*1708Sstevel 3129*1708Sstevel DPRINTF(2, ("IOSRAM: in iosram_sema_acquire\n")); 3130*1708Sstevel 3131*1708Sstevel mutex_enter(&iosram_mutex); 3132*1708Sstevel 3133*1708Sstevel /* 3134*1708Sstevel * Disallow access if there is a tunnel switch in progress. 3135*1708Sstevel */ 3136*1708Sstevel if (iosram_tswitch_active) { 3137*1708Sstevel mutex_exit(&iosram_mutex); 3138*1708Sstevel return (EAGAIN); 3139*1708Sstevel } 3140*1708Sstevel 3141*1708Sstevel /* 3142*1708Sstevel * Use current master IOSRAM for operation, fail if none is 3143*1708Sstevel * currently active. 3144*1708Sstevel */ 3145*1708Sstevel if ((softp = iosram_master) == NULL) { 3146*1708Sstevel mutex_exit(&iosram_mutex); 3147*1708Sstevel DPRINTF(1, ("IOSRAM: iosram_sema_acquire: no master\n")); 3148*1708Sstevel return (ENXIO); 3149*1708Sstevel } 3150*1708Sstevel 3151*1708Sstevel mutex_enter(&softp->intr_mutex); 3152*1708Sstevel 3153*1708Sstevel /* 3154*1708Sstevel * Fail if SBBC region has not been mapped. This shouldn't 3155*1708Sstevel * happen if we have a master IOSRAM, but we double-check. 3156*1708Sstevel */ 3157*1708Sstevel if (softp->sbbc_region == NULL) { 3158*1708Sstevel mutex_exit(&softp->intr_mutex); 3159*1708Sstevel mutex_exit(&iosram_mutex); 3160*1708Sstevel DPRINTF(1, ("IOSRAM(%d): iosram_sema_acquire: " 3161*1708Sstevel "SBBC not mapped\n", softp->instance)); 3162*1708Sstevel return (ENXIO); 3163*1708Sstevel } 3164*1708Sstevel 3165*1708Sstevel /* read semaphore value */ 3166*1708Sstevel sema_val = IOSRAM_SEMA_RD(softp); 3167*1708Sstevel if (old_value != NULL) 3168*1708Sstevel *old_value = sema_val; 3169*1708Sstevel 3170*1708Sstevel if (IOSRAM_SEMA_IS_HELD(sema_val)) { 3171*1708Sstevel /* semaphore was held by someone else */ 3172*1708Sstevel rv = EBUSY; 3173*1708Sstevel } else { 3174*1708Sstevel /* semaphore was not held, we just acquired it */ 3175*1708Sstevel rv = 0; 3176*1708Sstevel } 3177*1708Sstevel 3178*1708Sstevel mutex_exit(&softp->intr_mutex); 3179*1708Sstevel mutex_exit(&iosram_mutex); 3180*1708Sstevel 3181*1708Sstevel DPRINTF(1, ("IOSRAM(%d): iosram_sema_acquire: " 3182*1708Sstevel "old value=0x%x rv=%d\n", softp->instance, sema_val, rv)); 3183*1708Sstevel 3184*1708Sstevel return (rv); 3185*1708Sstevel } 3186*1708Sstevel 3187*1708Sstevel 3188*1708Sstevel /* 3189*1708Sstevel * iosram_sema_release: Release hardware semaphore. 3190*1708Sstevel * This function will "release" the hardware semaphore, and return 0 on 3191*1708Sstevel * success. If an error occured, one of the following values will be 3192*1708Sstevel * returned: 3193*1708Sstevel * EAGAIN: there is a tunnel switch in progress 3194*1708Sstevel * ENXIO: an IO error occured (e.g. SBBC not mapped) 3195*1708Sstevel */ 3196*1708Sstevel int 3197*1708Sstevel iosram_sema_release(void) 3198*1708Sstevel { 3199*1708Sstevel struct iosramsoft *softp; 3200*1708Sstevel 3201*1708Sstevel DPRINTF(2, ("IOSRAM: in iosram_sema_release\n")); 3202*1708Sstevel 3203*1708Sstevel mutex_enter(&iosram_mutex); 3204*1708Sstevel 3205*1708Sstevel /* 3206*1708Sstevel * Disallow access if there is a tunnel switch in progress. 3207*1708Sstevel */ 3208*1708Sstevel if (iosram_tswitch_active) { 3209*1708Sstevel mutex_exit(&iosram_mutex); 3210*1708Sstevel return (EAGAIN); 3211*1708Sstevel } 3212*1708Sstevel 3213*1708Sstevel /* 3214*1708Sstevel * Use current master IOSRAM for operation, fail if none is 3215*1708Sstevel * currently active. 3216*1708Sstevel */ 3217*1708Sstevel if ((softp = iosram_master) == NULL) { 3218*1708Sstevel mutex_exit(&iosram_mutex); 3219*1708Sstevel DPRINTF(1, ("IOSRAM: iosram_sema_release: no master\n")); 3220*1708Sstevel return (ENXIO); 3221*1708Sstevel } 3222*1708Sstevel 3223*1708Sstevel mutex_enter(&softp->intr_mutex); 3224*1708Sstevel 3225*1708Sstevel /* 3226*1708Sstevel * Fail if SBBC region has not been mapped in. This shouldn't 3227*1708Sstevel * happen if we have a master IOSRAM, but we double-check. 3228*1708Sstevel */ 3229*1708Sstevel if (softp->sbbc_region == NULL) { 3230*1708Sstevel mutex_exit(&softp->intr_mutex); 3231*1708Sstevel mutex_exit(&iosram_mutex); 3232*1708Sstevel DPRINTF(1, ("IOSRAM(%d): iosram_sema_release: " 3233*1708Sstevel "SBBC not mapped\n", softp->instance)); 3234*1708Sstevel return (ENXIO); 3235*1708Sstevel } 3236*1708Sstevel 3237*1708Sstevel /* Release semaphore by clearing our semaphore register */ 3238*1708Sstevel IOSRAM_SEMA_WR(softp, 0); 3239*1708Sstevel 3240*1708Sstevel mutex_exit(&softp->intr_mutex); 3241*1708Sstevel mutex_exit(&iosram_mutex); 3242*1708Sstevel 3243*1708Sstevel DPRINTF(1, ("IOSRAM(%d): iosram_sema_release: success\n", 3244*1708Sstevel softp->instance)); 3245*1708Sstevel 3246*1708Sstevel return (0); 3247*1708Sstevel } 3248*1708Sstevel 3249*1708Sstevel 3250*1708Sstevel #if defined(IOSRAM_LOG) 3251*1708Sstevel void 3252*1708Sstevel iosram_log(caddr_t fmt, intptr_t a1, intptr_t a2, intptr_t a3, intptr_t a4) 3253*1708Sstevel { 3254*1708Sstevel uint32_t seq; 3255*1708Sstevel iosram_log_t *logp; 3256*1708Sstevel 3257*1708Sstevel mutex_enter(&iosram_log_mutex); 3258*1708Sstevel 3259*1708Sstevel seq = iosram_logseq++; 3260*1708Sstevel logp = &iosram_logbuf[seq % IOSRAM_MAXLOG]; 3261*1708Sstevel logp->seq = seq; 3262*1708Sstevel logp->tstamp = lbolt; 3263*1708Sstevel logp->fmt = fmt; 3264*1708Sstevel logp->arg1 = a1; 3265*1708Sstevel logp->arg2 = a2; 3266*1708Sstevel logp->arg3 = a3; 3267*1708Sstevel logp->arg4 = a4; 3268*1708Sstevel 3269*1708Sstevel mutex_exit(&iosram_log_mutex); 3270*1708Sstevel 3271*1708Sstevel if (iosram_log_print) { 3272*1708Sstevel cmn_err(CE_CONT, "#%x @%lx ", logp->seq, logp->tstamp); 3273*1708Sstevel if (logp->fmt) { 3274*1708Sstevel cmn_err(CE_CONT, logp->fmt, logp->arg1, logp->arg2, 3275*1708Sstevel logp->arg3, logp->arg4); 3276*1708Sstevel if (logp->fmt[strlen(logp->fmt)-1] != '\n') { 3277*1708Sstevel cmn_err(CE_CONT, "\n"); 3278*1708Sstevel } 3279*1708Sstevel } else { 3280*1708Sstevel cmn_err(CE_CONT, "fmt:%p args: %lx %lx %lx %lx\n", 3281*1708Sstevel logp->fmt, logp->arg1, logp->arg2, logp->arg3, 3282*1708Sstevel logp->arg4); 3283*1708Sstevel } 3284*1708Sstevel } 3285*1708Sstevel } 3286*1708Sstevel #endif /* IOSRAM_LOG */ 3287*1708Sstevel 3288*1708Sstevel 3289*1708Sstevel #if defined(DEBUG) 3290*1708Sstevel /* 3291*1708Sstevel * iosram_get_keys(buf, len) 3292*1708Sstevel * Return IOSRAM TOC in the specified buffer 3293*1708Sstevel */ 3294*1708Sstevel static int 3295*1708Sstevel iosram_get_keys(iosram_toc_entry_t *bufp, uint32_t *len) 3296*1708Sstevel { 3297*1708Sstevel struct iosram_chunk *chunkp; 3298*1708Sstevel int error = 0; 3299*1708Sstevel int i; 3300*1708Sstevel int cnt = (*len) / sizeof (iosram_toc_entry_t); 3301*1708Sstevel 3302*1708Sstevel IOSRAMLOG(2, "iosram_get_keys(bufp:%p *len:%x)\n", bufp, *len, NULL, 3303*1708Sstevel NULL); 3304*1708Sstevel 3305*1708Sstevel /* 3306*1708Sstevel * Copy data while holding the lock to prevent any data 3307*1708Sstevel * corruption or invalid pointer dereferencing. 3308*1708Sstevel */ 3309*1708Sstevel mutex_enter(&iosram_mutex); 3310*1708Sstevel 3311*1708Sstevel if (iosram_master == NULL) { 3312*1708Sstevel error = EIO; 3313*1708Sstevel } else { 3314*1708Sstevel for (i = 0, chunkp = chunks; i < nchunks && i < cnt; 3315*1708Sstevel i++, chunkp++) { 3316*1708Sstevel bufp[i].key = chunkp->toc_data.key; 3317*1708Sstevel bufp[i].off = chunkp->toc_data.off; 3318*1708Sstevel bufp[i].len = chunkp->toc_data.len; 3319*1708Sstevel bufp[i].unused = chunkp->toc_data.unused; 3320*1708Sstevel } 3321*1708Sstevel *len = i * sizeof (iosram_toc_entry_t); 3322*1708Sstevel } 3323*1708Sstevel 3324*1708Sstevel mutex_exit(&iosram_mutex); 3325*1708Sstevel return (error); 3326*1708Sstevel } 3327*1708Sstevel 3328*1708Sstevel 3329*1708Sstevel /* 3330*1708Sstevel * iosram_print_state(instance) 3331*1708Sstevel */ 3332*1708Sstevel static void 3333*1708Sstevel iosram_print_state(int instance) 3334*1708Sstevel { 3335*1708Sstevel struct iosramsoft *softp; 3336*1708Sstevel char pn[MAXNAMELEN]; 3337*1708Sstevel 3338*1708Sstevel if (instance < 0) { 3339*1708Sstevel softp = iosram_master; 3340*1708Sstevel } else { 3341*1708Sstevel softp = ddi_get_soft_state(iosramsoft_statep, instance); 3342*1708Sstevel } 3343*1708Sstevel 3344*1708Sstevel if (softp == NULL) { 3345*1708Sstevel cmn_err(CE_CONT, "iosram_print_state: Can't find instance %d\n", 3346*1708Sstevel instance); 3347*1708Sstevel return; 3348*1708Sstevel } 3349*1708Sstevel instance = softp->instance; 3350*1708Sstevel 3351*1708Sstevel mutex_enter(&iosram_mutex); 3352*1708Sstevel mutex_enter(&softp->intr_mutex); 3353*1708Sstevel 3354*1708Sstevel cmn_err(CE_CONT, "iosram_print_state(%d): ... %s\n", instance, 3355*1708Sstevel ((softp == iosram_master) ? "MASTER" : "SLAVE")); 3356*1708Sstevel 3357*1708Sstevel (void) ddi_pathname(softp->dip, pn); 3358*1708Sstevel cmn_err(CE_CONT, " pathname:%s\n", pn); 3359*1708Sstevel cmn_err(CE_CONT, " instance:%d portid:%d iosramlen:0x%x\n", 3360*1708Sstevel softp->instance, softp->portid, softp->iosramlen); 3361*1708Sstevel cmn_err(CE_CONT, " softp:%p handle:%p iosramp:%p\n", softp, 3362*1708Sstevel softp->handle, softp->iosramp); 3363*1708Sstevel cmn_err(CE_CONT, " state:0x%x tswitch_ok:%x tswitch_fail:%x\n", 3364*1708Sstevel softp->state, softp->tswitch_ok, softp->tswitch_fail); 3365*1708Sstevel cmn_err(CE_CONT, " softintr_id:%p intr_busy:%x intr_pending:%x\n", 3366*1708Sstevel softp->softintr_id, softp->intr_busy, softp->intr_pending); 3367*1708Sstevel 3368*1708Sstevel mutex_exit(&softp->intr_mutex); 3369*1708Sstevel mutex_exit(&iosram_mutex); 3370*1708Sstevel } 3371*1708Sstevel 3372*1708Sstevel 3373*1708Sstevel /* 3374*1708Sstevel * iosram_print_stats() 3375*1708Sstevel */ 3376*1708Sstevel static void 3377*1708Sstevel iosram_print_stats() 3378*1708Sstevel { 3379*1708Sstevel uint32_t calls; 3380*1708Sstevel 3381*1708Sstevel cmn_err(CE_CONT, "iosram_stats:\n"); 3382*1708Sstevel calls = iosram_stats.read; 3383*1708Sstevel cmn_err(CE_CONT, " read ... calls:%x bytes:%lx avg_sz:%x\n", 3384*1708Sstevel calls, iosram_stats.bread, 3385*1708Sstevel (uint32_t)((calls != 0) ? (iosram_stats.bread/calls) : 0)); 3386*1708Sstevel 3387*1708Sstevel calls = iosram_stats.write; 3388*1708Sstevel cmn_err(CE_CONT, " write ... calls:%x bytes:%lx avg_sz:%x\n", 3389*1708Sstevel calls, iosram_stats.bwrite, 3390*1708Sstevel (uint32_t)((calls != 0) ? (iosram_stats.bwrite/calls) : 0)); 3391*1708Sstevel 3392*1708Sstevel cmn_err(CE_CONT, " intr recv (real:%x soft:%x) sent:%x cback:%x\n", 3393*1708Sstevel iosram_stats.intr_recv, iosram_stats.sintr_recv, 3394*1708Sstevel iosram_stats.intr_send, iosram_stats.callbacks); 3395*1708Sstevel 3396*1708Sstevel cmn_err(CE_CONT, " tswitch: %x getflag:%x setflag:%x\n", 3397*1708Sstevel iosram_stats.tswitch, iosram_stats.getflag, 3398*1708Sstevel iosram_stats.setflag); 3399*1708Sstevel 3400*1708Sstevel cmn_err(CE_CONT, " iosram_rw_active_max: %x\n", iosram_rw_active_max); 3401*1708Sstevel } 3402*1708Sstevel 3403*1708Sstevel 3404*1708Sstevel static void 3405*1708Sstevel iosram_print_cback() 3406*1708Sstevel { 3407*1708Sstevel iosram_chunk_t *chunkp; 3408*1708Sstevel int i; 3409*1708Sstevel 3410*1708Sstevel /* 3411*1708Sstevel * Print callback handlers 3412*1708Sstevel */ 3413*1708Sstevel mutex_enter(&iosram_mutex); 3414*1708Sstevel 3415*1708Sstevel cmn_err(CE_CONT, "IOSRAM callbacks:\n"); 3416*1708Sstevel for (i = 0, chunkp = chunks; i < nchunks; i++, chunkp++) { 3417*1708Sstevel if (chunkp->cback.handler) { 3418*1708Sstevel cmn_err(CE_CONT, " %2d: key:0x%x hdlr:%p arg:%p " 3419*1708Sstevel "busy:%d unreg:%d\n", i, chunkp->toc_data.key, 3420*1708Sstevel chunkp->cback.handler, chunkp->cback.arg, 3421*1708Sstevel chunkp->cback.busy, chunkp->cback.unregister); 3422*1708Sstevel } 3423*1708Sstevel } 3424*1708Sstevel mutex_exit(&iosram_mutex); 3425*1708Sstevel } 3426*1708Sstevel 3427*1708Sstevel 3428*1708Sstevel static void 3429*1708Sstevel iosram_print_flags() 3430*1708Sstevel { 3431*1708Sstevel int i; 3432*1708Sstevel uint32_t *keys; 3433*1708Sstevel iosram_flags_t *flags; 3434*1708Sstevel 3435*1708Sstevel mutex_enter(&iosram_mutex); 3436*1708Sstevel 3437*1708Sstevel if (iosram_master == NULL) { 3438*1708Sstevel mutex_exit(&iosram_mutex); 3439*1708Sstevel cmn_err(CE_CONT, "IOSRAM Flags: not accessible\n"); 3440*1708Sstevel return; 3441*1708Sstevel } 3442*1708Sstevel 3443*1708Sstevel keys = kmem_alloc(nchunks * sizeof (uint32_t), KM_SLEEP); 3444*1708Sstevel flags = kmem_alloc(nchunks * sizeof (iosram_flags_t), KM_SLEEP); 3445*1708Sstevel 3446*1708Sstevel for (i = 0; i < nchunks; i++) { 3447*1708Sstevel keys[i] = chunks[i].toc_data.key; 3448*1708Sstevel ddi_rep_get8(iosram_handle, (uint8_t *)&(flags[i]), 3449*1708Sstevel (uint8_t *)(chunks[i].flagsp), sizeof (iosram_flags_t), 3450*1708Sstevel DDI_DEV_AUTOINCR); 3451*1708Sstevel } 3452*1708Sstevel 3453*1708Sstevel mutex_exit(&iosram_mutex); 3454*1708Sstevel 3455*1708Sstevel cmn_err(CE_CONT, "IOSRAM Flags:\n"); 3456*1708Sstevel for (i = 0; i < nchunks; i++) { 3457*1708Sstevel cmn_err(CE_CONT, 3458*1708Sstevel " %2d: key: 0x%x data_valid:%x int_pending:%x\n", 3459*1708Sstevel i, keys[i], flags[i].data_valid, flags[i].int_pending); 3460*1708Sstevel } 3461*1708Sstevel 3462*1708Sstevel kmem_free(keys, nchunks * sizeof (uint32_t)); 3463*1708Sstevel kmem_free(flags, nchunks * sizeof (iosram_flags_t)); 3464*1708Sstevel } 3465*1708Sstevel 3466*1708Sstevel 3467*1708Sstevel /*PRINTFLIKE1*/ 3468*1708Sstevel static void 3469*1708Sstevel iosram_dprintf(const char *fmt, ...) 3470*1708Sstevel { 3471*1708Sstevel char msg_buf[256]; 3472*1708Sstevel va_list adx; 3473*1708Sstevel 3474*1708Sstevel va_start(adx, fmt); 3475*1708Sstevel vsprintf(msg_buf, fmt, adx); 3476*1708Sstevel va_end(adx); 3477*1708Sstevel 3478*1708Sstevel cmn_err(CE_CONT, "%s", msg_buf); 3479*1708Sstevel } 3480*1708Sstevel #endif /* DEBUG */ 3481*1708Sstevel 3482*1708Sstevel 3483*1708Sstevel #if IOSRAM_LOG 3484*1708Sstevel /* 3485*1708Sstevel * iosram_print_log(int cnt) 3486*1708Sstevel * Print last few entries of the IOSRAM log in reverse order 3487*1708Sstevel */ 3488*1708Sstevel static void 3489*1708Sstevel iosram_print_log(int cnt) 3490*1708Sstevel { 3491*1708Sstevel int i; 3492*1708Sstevel 3493*1708Sstevel if (cnt <= 0) { 3494*1708Sstevel cnt = 20; 3495*1708Sstevel } else if (cnt > IOSRAM_MAXLOG) { 3496*1708Sstevel cnt = IOSRAM_MAXLOG; 3497*1708Sstevel } 3498*1708Sstevel 3499*1708Sstevel 3500*1708Sstevel cmn_err(CE_CONT, 3501*1708Sstevel "\niosram_logseq: 0x%x lbolt: %lx iosram_log_level:%x\n", 3502*1708Sstevel iosram_logseq, lbolt, iosram_log_level); 3503*1708Sstevel cmn_err(CE_CONT, "iosram_logbuf: %p max entries:0x%x\n", 3504*1708Sstevel iosram_logbuf, IOSRAM_MAXLOG); 3505*1708Sstevel for (i = iosram_logseq; --i >= 0 && --cnt >= 0; ) { 3506*1708Sstevel iosram_log_t *logp; 3507*1708Sstevel 3508*1708Sstevel mutex_enter(&iosram_log_mutex); 3509*1708Sstevel 3510*1708Sstevel logp = &iosram_logbuf[i %IOSRAM_MAXLOG]; 3511*1708Sstevel cmn_err(CE_CONT, "#%x @%lx ", logp->seq, logp->tstamp); 3512*1708Sstevel 3513*1708Sstevel if (logp->fmt) { 3514*1708Sstevel cmn_err(CE_CONT, logp->fmt, logp->arg1, logp->arg2, 3515*1708Sstevel logp->arg3, logp->arg4); 3516*1708Sstevel if (logp->fmt[strlen(logp->fmt)-1] != '\n') { 3517*1708Sstevel cmn_err(CE_CONT, "\n"); 3518*1708Sstevel } 3519*1708Sstevel } else { 3520*1708Sstevel cmn_err(CE_CONT, "fmt:%p args: %lx %lx %lx %lx\n", 3521*1708Sstevel logp->fmt, logp->arg1, logp->arg2, 3522*1708Sstevel logp->arg3, logp->arg4); 3523*1708Sstevel } 3524*1708Sstevel 3525*1708Sstevel mutex_exit(&iosram_log_mutex); 3526*1708Sstevel } 3527*1708Sstevel } 3528*1708Sstevel #endif /* IOSRAM_LOG */ 3529