xref: /onnv-gate/usr/src/uts/sun4u/starcat/io/axq.c (revision 7656:2621e50fdf4a)
11708Sstevel /*
21708Sstevel  * CDDL HEADER START
31708Sstevel  *
41708Sstevel  * The contents of this file are subject to the terms of the
51708Sstevel  * Common Development and Distribution License (the "License").
61708Sstevel  * You may not use this file except in compliance with the License.
71708Sstevel  *
81708Sstevel  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
91708Sstevel  * or http://www.opensolaris.org/os/licensing.
101708Sstevel  * See the License for the specific language governing permissions
111708Sstevel  * and limitations under the License.
121708Sstevel  *
131708Sstevel  * When distributing Covered Code, include this CDDL HEADER in each
141708Sstevel  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
151708Sstevel  * If applicable, add the following below this CDDL HEADER, with the
161708Sstevel  * fields enclosed by brackets "[]" replaced with your own identifying
171708Sstevel  * information: Portions Copyright [yyyy] [name of copyright owner]
181708Sstevel  *
191708Sstevel  * CDDL HEADER END
201708Sstevel  */
211708Sstevel 
221708Sstevel /*
23*7656SSherry.Moore@Sun.COM  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
241708Sstevel  * Use is subject to license terms.
251708Sstevel  */
261708Sstevel 
271708Sstevel 
281708Sstevel 
291708Sstevel #include <sys/types.h>
301708Sstevel #include <sys/conf.h>
311708Sstevel #include <sys/ddi.h>
321708Sstevel #include <sys/sunddi.h>
331708Sstevel #include <sys/ddi_impldefs.h>
341708Sstevel #include <sys/obpdefs.h>
351708Sstevel #include <sys/cmn_err.h>
361708Sstevel #include <sys/errno.h>
371708Sstevel #include <sys/kmem.h>
381708Sstevel #include <sys/debug.h>
391708Sstevel #include <sys/sysmacros.h>
401708Sstevel #include <sys/autoconf.h>
411708Sstevel #include <sys/modctl.h>
421708Sstevel #include <sys/sunndi.h>
431708Sstevel 
441708Sstevel #include <sys/axq.h>
451708Sstevel #include <sys/promif.h>
461708Sstevel #include <sys/cpuvar.h>
471708Sstevel #include <sys/starcat.h>
481708Sstevel #include <sys/callb.h>
491708Sstevel 
501708Sstevel #define	REG_ADDR(b, o)	(uint32_t *)((caddr_t)(b) + (o))
511708Sstevel 
521708Sstevel /*
531708Sstevel  * Function prototypes
541708Sstevel  */
551708Sstevel 
561708Sstevel /* autoconfig entry point function definitions */
571708Sstevel static int axq_attach(dev_info_t *, ddi_attach_cmd_t);
581708Sstevel static int axq_detach(dev_info_t *, ddi_detach_cmd_t);
591708Sstevel static int axq_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
601708Sstevel 
611708Sstevel /* internal axq definitions */
621708Sstevel static void axq_init(struct axq_soft_state *);
631708Sstevel static void axq_init_local(struct axq_local_regs *);
641708Sstevel 
651708Sstevel /* axq kstats */
661708Sstevel static void axq_add_picN_kstats(dev_info_t *dip);
671708Sstevel static void axq_add_kstats(struct axq_soft_state *);
681708Sstevel static int axq_counters_kstat_update(kstat_t *, int);
691708Sstevel 
701708Sstevel /*
711708Sstevel  * Configuration data structures
721708Sstevel  */
731708Sstevel static struct cb_ops axq_cb_ops = {
741708Sstevel 	nulldev,			/* open */
751708Sstevel 	nulldev,			/* close */
761708Sstevel 	nulldev,			/* strategy */
771708Sstevel 	nulldev,			/* print */
781708Sstevel 	nodev,				/* dump */
791708Sstevel 	nulldev,			/* read */
801708Sstevel 	nulldev,			/* write */
811708Sstevel 	nulldev,			/* ioctl */
821708Sstevel 	nodev,				/* devmap */
831708Sstevel 	nodev,				/* mmap */
841708Sstevel 	nodev,				/* segmap */
851708Sstevel 	nochpoll,			/* poll */
861708Sstevel 	ddi_prop_op,			/* cb_prop_op */
871708Sstevel 	0,				/* streamtab */
881708Sstevel 	D_MP | D_NEW,			/* Driver compatibility flag */
891708Sstevel 	CB_REV,				/* rev */
901708Sstevel 	nodev,				/* cb_aread */
911708Sstevel 	nodev				/* cb_awrite */
921708Sstevel };
931708Sstevel 
941708Sstevel static struct dev_ops axq_ops = {
951708Sstevel 	DEVO_REV,			/* rev */
961708Sstevel 	0,				/* refcnt  */
971708Sstevel 	axq_getinfo,			/* getinfo */
981708Sstevel 	nulldev,			/* identify */
991708Sstevel 	nulldev,			/* probe */
1001708Sstevel 	axq_attach,			/* attach */
1011708Sstevel 	axq_detach,			/* detach */
1021708Sstevel 	nulldev,			/* reset */
1031708Sstevel 	&axq_cb_ops,			/* cb_ops */
1041708Sstevel 	(struct bus_ops *)0,		/* bus_ops */
105*7656SSherry.Moore@Sun.COM 	nulldev,			/* power */
106*7656SSherry.Moore@Sun.COM 	ddi_quiesce_not_supported,	/* devo_quiesce */
1071708Sstevel };
1081708Sstevel 
1091708Sstevel 
1101708Sstevel /*
1111708Sstevel  * AXQ globals
1121708Sstevel  */
1131708Sstevel struct axq_soft_state *axq_array[AXQ_MAX_EXP][AXQ_MAX_SLOT_PER_EXP];
1141708Sstevel krwlock_t axq_array_lock;
1151708Sstevel struct axq_local_regs axq_local;
1161708Sstevel int use_axq_iopause = 1;	/* enable flag axq iopause by default */
1171708Sstevel /*
1181708Sstevel  * If non-zero, iopause will be asserted during DDI_SUSPEND.
1191708Sstevel  * Clients using the axq_iopause_*_all interfaces should set this to zero.
1201708Sstevel  */
1211708Sstevel int axq_suspend_iopause = 1;
1221708Sstevel 
1231708Sstevel /*
1241708Sstevel  * loadable module support
1251708Sstevel  */
1261708Sstevel extern struct mod_ops mod_driverops;
1271708Sstevel 
1281708Sstevel static struct modldrv modldrv = {
1291708Sstevel 	&mod_driverops,		/* Type of module.  This one is a driver */
130*7656SSherry.Moore@Sun.COM 	"AXQ driver",	/* name of module */
1311708Sstevel 	&axq_ops,		/* driver ops */
1321708Sstevel };
1331708Sstevel 
1341708Sstevel static struct modlinkage modlinkage = {
1351708Sstevel 	MODREV_1,
1361708Sstevel 	(void *)&modldrv,
1371708Sstevel 	NULL
1381708Sstevel };
1391708Sstevel 
1401708Sstevel static void *axq_softp;
1411708Sstevel 
1421708Sstevel /*
1431708Sstevel  * AXQ Performance counters
1441708Sstevel  * We statically declare a array of the known
1451708Sstevel  * AXQ event-names and event masks. The number
1461708Sstevel  * of events in this array is AXQ_NUM_EVENTS.
1471708Sstevel  */
1481708Sstevel static axq_event_mask_t axq_events[AXQ_NUM_EVENTS] = {
1491708Sstevel 	{"count_clk", COUNT_CLK}, {"freeze_cnt", FREEZE_CNT},
1501708Sstevel 	{"ha_input_fifo", HA_INPUT_FIFO}, {"ha_intr_info", HA_INTR_INFO},
1511708Sstevel 	{"ha_pio_fifo", HA_PIO_FIFO}, {"ha_adr_fifo_lk3", HA_ADR_FIFO_LK3},
1521708Sstevel 	{"ha_adr_fifo_lk2", HA_ADR_FIFO_LK2},
1531708Sstevel 	{"ha_adr_fifo_lk1", HA_ADR_FIFO_LK1},
1541708Sstevel 	{"ha_adr_fifo_lk0", HA_ADR_FIFO_LK0},
1551708Sstevel 	{"ha_dump_q", HA_DUMP_Q},
1561708Sstevel 	{"ha_rd_f_stb_q", HA_RD_F_STB_Q},
1571708Sstevel 	{"ha_dp_wr_q", HA_DP_WR_Q},
1581708Sstevel 	{"ha_int_q", HA_INT_Q},
1591708Sstevel 	{"ha_wrb_q", HA_WRB_Q},
1601708Sstevel 	{"ha_wr_mp_q", HA_WR_MP_Q},
1611708Sstevel 	{"ha_wrtag_q", HA_WRTAG_Q},
1621708Sstevel 	{"ha_wt_wait_fifo", HA_WT_WAIT_FIFO},
1631708Sstevel 	{"ha_wrb_stb_fifo", HA_WRB_STB_FIFO},
1641708Sstevel 	{"ha_ap0_q", HA_AP0_Q},
1651708Sstevel 	{"ha_ap1_q", HA_AP1_Q},
1661708Sstevel 	{"ha_new_wr_q", HA_NEW_WR_Q},
1671708Sstevel 	{"ha_dp_rd_q", HA_DP_RD_Q},
1681708Sstevel 	{"ha_unlock_q", HA_UNLOCK_Q},
1691708Sstevel 	{"ha_cdc_upd_q", HA_CDC_UPD_Q},
1701708Sstevel 	{"ha_ds_q", HA_DS_Q},
1711708Sstevel 	{"ha_unlk_wait_q", HA_UNLK_WAIT_Q},
1721708Sstevel 	{"ha_rd_mp_q", HA_RD_MP_Q},
1731708Sstevel 	{"l2_io_q", L2_IO_Q},
1741708Sstevel 	{"l2_sb_q", L2_SB_Q},
1751708Sstevel 	{"l2_ra_q", L2_RA_Q},
1761708Sstevel 	{"l2_ha_q", L2_HA_Q},
1771708Sstevel 	{"l2_sa_q", L2_SA_Q},
1781708Sstevel 	{"ra_wait_fifo", RA_WAIT_FIFO},
1791708Sstevel 	{"ra_wrb_inv_fifo", RA_WRB_INV_FIFO},
1801708Sstevel 	{"ra_wrb_fifo", RA_WRB_FIFO},
1811708Sstevel 	{"ra_cc_ptr_fifo", RA_CC_PTR_FIFO},
1821708Sstevel 	{"ra_io_ptr_fifo", RA_IO_PTR_FIFO},
1831708Sstevel 	{"ra_int_ptr_fifo", RA_INT_PTR_FIFO},
1841708Sstevel 	{"ra_rp_q", RA_RP_Q},
1851708Sstevel 	{"ra_wrb_rp_q", RA_WRB_RP_Q},
1861708Sstevel 	{"ra_dp_q", RA_DP_Q},
1871708Sstevel 	{"ra_dp_stb_q", RA_DP_STB_Q},
1881708Sstevel 	{"ra_gtarg_q", RA_GTARG_Q},
1891708Sstevel 	{"sdc_recv_q",	SDC_RECV_Q},
1901708Sstevel 	{"sdc_redir_io_q", SDC_REDIR_IO_Q},
1911708Sstevel 	{"sdc_redir_sb_q", SDC_REDIR_SB_Q},
1921708Sstevel 	{"sdc_outb_io_q", SDC_OUTB_IO_Q},
1931708Sstevel 	{"sdc_outb_sb_q", SDC_OUTB_SB_Q},
1941708Sstevel 	{"sa_add1_input_q", SA_ADD1_INPUT_Q},
1951708Sstevel 	{"sa_add2_input_q", SA_ADD2_INPUT_Q},
1961708Sstevel 	{"sa_inv_q", SA_INV_Q},
1971708Sstevel 	{"sa_no_inv_q", SA_NO_INV_Q},
1981708Sstevel 	{"sa_int_dp_q", SA_INT_DP_Q},
1991708Sstevel 	{"sa_dp_q", SA_DP_Q},
2001708Sstevel 	{"sl_wrtag_q", SL_WRTAG_Q},
2011708Sstevel 	{"sl_rto_dp_q", SL_RTO_DP_Q},
2021708Sstevel 	{"syreg_input_q", SYSREG_INPUT_Q},
2031708Sstevel 	{"sdi_sys_status1", SDI_SYS_STATUS1},
2041708Sstevel 	{"sdi_sys_status0", SDI_SYS_STATUS0},
2051708Sstevel 	{"cdc_hits", CDC_HITS},
2061708Sstevel 	{"total_cdc_read", TOTAL_CDC_READ},
2071708Sstevel 	{"ha_watranid_sd", HA_WATRANID_SD},
2081708Sstevel 	{"ha_stb_sd", HA_STB_SD},
2091708Sstevel 	{"ha_l2_irq_sd", HA_L2_IRQ_SD},
2101708Sstevel 	{"ha_sl_wrtag_sd", HA_SL_WRTAG_SD},
2111708Sstevel 	{"aa_home_cc_full", AA_HOME_CC_FULL},
2121708Sstevel 	{"aa_home_io_full", AA_HOME_IO_FULL},
2131708Sstevel 	{"aa_slave_full", AA_SLAVE_FULL},
2141708Sstevel 	{"aa_rp_full", AA_RP_FULL}
2151708Sstevel };
2161708Sstevel 
2171708Sstevel static kstat_t *axq_picN_ksp[AXQ_NUM_PICS];	/* picN kstats */
2181708Sstevel static int axq_attachcnt = 0;		/* # of instances attached */
2191708Sstevel static kmutex_t axq_attachcnt_lock;	/* lock for attachcnt */
2201708Sstevel 
2211708Sstevel static int axq_map_phys(dev_info_t *, struct regspec *,  caddr_t *,
2221708Sstevel     ddi_device_acc_attr_t *, ddi_acc_handle_t *);
2231708Sstevel static void axq_unmap_phys(ddi_acc_handle_t *);
2241708Sstevel 
2251708Sstevel int starcat_axq_pio_workaround(dev_info_t *);
2261708Sstevel static int axq_slot1_idle(struct axq_soft_state *);
2271708Sstevel 
2281708Sstevel static boolean_t axq_panic_callb(void *, int);
2291708Sstevel static callb_id_t axq_panic_cb_id;
2301708Sstevel 
2311708Sstevel /*
2321708Sstevel  * These are the module initialization routines.
2331708Sstevel  */
2341708Sstevel 
2351708Sstevel int
_init(void)2361708Sstevel _init(void)
2371708Sstevel {
2381708Sstevel 	int error;
2391708Sstevel 
2401708Sstevel 	if ((error = ddi_soft_state_init(&axq_softp,
241*7656SSherry.Moore@Sun.COM 	    sizeof (struct axq_soft_state), 1)) != 0)
2421708Sstevel 		return (error);
2431708Sstevel 
2441708Sstevel 	rw_init(&axq_array_lock, NULL, RW_DEFAULT, NULL);
2451708Sstevel 
2461708Sstevel 	mutex_init(&axq_local.axq_local_lock, NULL, MUTEX_DRIVER, NULL);
2471708Sstevel 
2481708Sstevel 	mutex_init(&axq_attachcnt_lock, NULL, MUTEX_DRIVER, NULL);
2491708Sstevel 
2501708Sstevel 	axq_local.initflag = 0;
2511708Sstevel 
2521708Sstevel 	if ((error = mod_install(&modlinkage)) != 0) {
2531708Sstevel 		ddi_soft_state_fini(&axq_softp);
2541708Sstevel 		mutex_destroy(&axq_attachcnt_lock);
2551708Sstevel 		mutex_destroy(&axq_local.axq_local_lock);
2561708Sstevel 		rw_destroy(&axq_array_lock);
2571708Sstevel 		return (error);
2581708Sstevel 	}
2591708Sstevel 
2601708Sstevel 	axq_panic_cb_id = callb_add(axq_panic_callb, (void *)NULL,
2611708Sstevel 	    CB_CL_PANIC, "axq_panic");
2621708Sstevel 
2631708Sstevel 	return (0);
2641708Sstevel }
2651708Sstevel 
2661708Sstevel int
_fini(void)2671708Sstevel _fini(void)
2681708Sstevel {
2691708Sstevel 	int error;
2701708Sstevel 
2711708Sstevel 	if ((error = mod_remove(&modlinkage)) != 0)
2721708Sstevel 		return (error);
2731708Sstevel 
2741708Sstevel 	ddi_soft_state_fini(&axq_softp);
2751708Sstevel 	mutex_destroy(&axq_attachcnt_lock);
2761708Sstevel 	mutex_destroy(&axq_local.axq_local_lock);
2771708Sstevel 	rw_destroy(&axq_array_lock);
2781708Sstevel 
2791708Sstevel 	(void) callb_delete(axq_panic_cb_id);
2801708Sstevel 
2811708Sstevel 	return (0);
2821708Sstevel }
2831708Sstevel 
2841708Sstevel int
_info(struct modinfo * modinfop)2851708Sstevel _info(struct modinfo *modinfop)
2861708Sstevel {
2871708Sstevel 	return (mod_info(&modlinkage, modinfop));
2881708Sstevel }
2891708Sstevel 
2901708Sstevel static int
axq_attach(dev_info_t * devi,ddi_attach_cmd_t cmd)2911708Sstevel axq_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
2921708Sstevel {
2931708Sstevel 	int instance;
2941708Sstevel 	struct axq_soft_state *softsp;
2951708Sstevel 	ddi_device_acc_attr_t attr;
2961708Sstevel 	extern uint64_t va_to_pa(void *);
2971708Sstevel 
2981708Sstevel 	instance = ddi_get_instance(devi);
2991708Sstevel 
3001708Sstevel 	switch (cmd) {
3011708Sstevel 	case DDI_ATTACH:
3021708Sstevel 		break;
3031708Sstevel 
3041708Sstevel 	case DDI_RESUME:
3051708Sstevel 		/*
3061708Sstevel 		 * Reenable the axq io pause if it is
3071708Sstevel 		 * employed. See the DDI_SUSPEND comments
3081708Sstevel 		 */
3091708Sstevel 		softsp = ddi_get_soft_state(axq_softp, instance);
3101708Sstevel 		if (softsp->slotnum && softsp->paused && use_axq_iopause &&
3111708Sstevel 		    axq_suspend_iopause) {
3121708Sstevel 			*softsp->axq_domain_ctrl &= ~AXQ_DOMCTRL_PAUSE;
3131708Sstevel 			softsp->paused = 0;
3141708Sstevel 		}
3151708Sstevel 		return (DDI_SUCCESS);
3161708Sstevel 
3171708Sstevel 	default:
3181708Sstevel 		return (DDI_FAILURE);
3191708Sstevel 	}
3201708Sstevel 
3211708Sstevel 	if (ddi_soft_state_zalloc(axq_softp, instance) != DDI_SUCCESS)
3221708Sstevel 		return (DDI_FAILURE);
3231708Sstevel 
3241708Sstevel 	softsp = ddi_get_soft_state(axq_softp, instance);
3251708Sstevel 
3261708Sstevel 	/* Set the dip in the soft state */
3271708Sstevel 	softsp->dip = devi;
3281708Sstevel 
3291708Sstevel 	/* Get the "portid" property */
3301708Sstevel 	if ((softsp->portid = (int)ddi_getprop(DDI_DEV_T_ANY, softsp->dip,
331*7656SSherry.Moore@Sun.COM 	    DDI_PROP_DONTPASS, "portid", -1)) == -1) {
3321708Sstevel 		cmn_err(CE_WARN, "Unable to retrieve safari portid"
333*7656SSherry.Moore@Sun.COM 		    "property.");
3341708Sstevel 		goto bad;
3351708Sstevel 	}
3361708Sstevel 
3371708Sstevel 	softsp->expid = softsp->portid >> 5;
3381708Sstevel 
3391708Sstevel 	/*
3401708Sstevel 	 * derive the slot # from the portid - for starcat, it is
3411708Sstevel 	 * either 0 or 1 based on the lsb of the axq portid.
3421708Sstevel 	 */
3431708Sstevel 	softsp->slotnum = softsp->portid & 0x1;
3441708Sstevel 
3451708Sstevel 	/*
3461708Sstevel 	 * map in the regs. There are two regspecs - one
3471708Sstevel 	 * in safari config space and the other in local space.
3481708Sstevel 	 */
3491708Sstevel 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
3501708Sstevel 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
3511708Sstevel 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
3521708Sstevel 	if (ddi_regs_map_setup(softsp->dip, 0, &softsp->address, 0, 0,
353*7656SSherry.Moore@Sun.COM 	    &attr, &softsp->ac0) != DDI_SUCCESS) {
3541708Sstevel 		cmn_err(CE_WARN, "%s%d: unable to map reg set 0\n",
355*7656SSherry.Moore@Sun.COM 		    ddi_get_name(softsp->dip),
356*7656SSherry.Moore@Sun.COM 		    ddi_get_instance(softsp->dip));
3571708Sstevel 		goto bad;
3581708Sstevel 	}
3591708Sstevel 
3601708Sstevel 	/*
3611708Sstevel 	 * This is a hack for support DR copy rename scripting
3621708Sstevel 	 * Get the physical address of the start of the
3631708Sstevel 	 * AXQ config space and save it.
3641708Sstevel 	 */
3651708Sstevel 	softsp->axq_phyaddr = va_to_pa((caddr_t)softsp->address);
3661708Sstevel 
3671708Sstevel 	axq_init(softsp);
3681708Sstevel 
3691708Sstevel 	/*
3701708Sstevel 	 * Map in the regs for local space access
3711708Sstevel 	 * This is global for all axq instances.
3721708Sstevel 	 * Make sure that some axq instance does
3731708Sstevel 	 * it for the rest of the gang..
3741708Sstevel 	 * Note that this mapping is never removed.
3751708Sstevel 	 */
3761708Sstevel 	mutex_enter(&axq_local.axq_local_lock);
3771708Sstevel 	if (!axq_local.initflag) {
3781708Sstevel 		/* initialize and map in the local space */
3791708Sstevel 		if (ddi_regs_map_setup(softsp->dip, 1,
380*7656SSherry.Moore@Sun.COM 		    &axq_local.laddress, 0, 0,
381*7656SSherry.Moore@Sun.COM 		    &attr, &axq_local.ac) != DDI_SUCCESS) {
3821708Sstevel 			cmn_err(CE_WARN, "%s%d: unable to map reg set 1\n",
383*7656SSherry.Moore@Sun.COM 			    ddi_get_name(softsp->dip),
384*7656SSherry.Moore@Sun.COM 			    ddi_get_instance(softsp->dip));
3851708Sstevel 			ddi_regs_map_free(&softsp->ac0);
3861708Sstevel 			mutex_exit(&axq_local.axq_local_lock);
3871708Sstevel 			goto bad;
3881708Sstevel 		}
3891708Sstevel 		axq_init_local(&axq_local);
3901708Sstevel 		axq_local.initflag = 1;
3911708Sstevel 	}
3921708Sstevel 	mutex_exit(&axq_local.axq_local_lock);
3931708Sstevel 
3941708Sstevel 	mutex_init(&softsp->axq_lock, NULL, MUTEX_DRIVER, NULL);
3951708Sstevel 
3961708Sstevel 	/* update the axq array for this new instance */
3971708Sstevel 	rw_enter(&axq_array_lock, RW_WRITER);
3981708Sstevel 	ASSERT(axq_array[softsp->expid][softsp->slotnum] == NULL);
3991708Sstevel 	axq_array[softsp->expid][softsp->slotnum] = softsp;
4001708Sstevel 	rw_exit(&axq_array_lock);
4011708Sstevel 
4021708Sstevel 	axq_add_kstats(softsp);
4031708Sstevel 
4041708Sstevel 	ddi_report_dev(devi);
4051708Sstevel 
4061708Sstevel 	return (DDI_SUCCESS);
4071708Sstevel 
4081708Sstevel bad:
4091708Sstevel 	ddi_soft_state_free(axq_softp, instance);
4101708Sstevel 	return (DDI_FAILURE);
4111708Sstevel }
4121708Sstevel 
4131708Sstevel 
4141708Sstevel static void
axq_init(struct axq_soft_state * softsp)4151708Sstevel axq_init(struct axq_soft_state *softsp)
4161708Sstevel {
4171708Sstevel 	int i;
4181708Sstevel 
4191708Sstevel 	/*
4201708Sstevel 	 * Setup the AXQ registers
4211708Sstevel 	 * Some offsets and availability are dependent on the slot type
4221708Sstevel 	 */
4231708Sstevel 	if (softsp->slotnum == 0) {
4241708Sstevel 		/* This is a slot type 0 AXQ */
4251708Sstevel 		softsp->axq_domain_ctrl = REG_ADDR(softsp->address,
426*7656SSherry.Moore@Sun.COM 		    AXQ_SLOT0_DOMCTRL);
4271708Sstevel 		softsp->axq_cdc_addrtest = REG_ADDR(softsp->address,
428*7656SSherry.Moore@Sun.COM 		    AXQ_SLOT0_CDC_ADR_TEST);
4291708Sstevel 		softsp->axq_cdc_ctrltest = REG_ADDR(softsp->address,
430*7656SSherry.Moore@Sun.COM 		    AXQ_SLOT0_CDC_CTL_TEST);
4311708Sstevel 		softsp->axq_cdc_datawrite0 = REG_ADDR(softsp->address,
432*7656SSherry.Moore@Sun.COM 		    AXQ_SLOT0_CDC_DATA_WR0);
4331708Sstevel 		softsp->axq_cdc_datawrite1 = REG_ADDR(softsp->address,
434*7656SSherry.Moore@Sun.COM 		    AXQ_SLOT0_CDC_DATA_WR1);
4351708Sstevel 		softsp->axq_cdc_datawrite2 = REG_ADDR(softsp->address,
436*7656SSherry.Moore@Sun.COM 		    AXQ_SLOT0_CDC_DATA_WR2);
4371708Sstevel 		softsp->axq_cdc_datawrite3 = REG_ADDR(softsp->address,
438*7656SSherry.Moore@Sun.COM 		    AXQ_SLOT0_CDC_DATA_WR3);
4391708Sstevel 		softsp->axq_cdc_counter = REG_ADDR(softsp->address,
440*7656SSherry.Moore@Sun.COM 		    AXQ_SLOT0_CDC_CNT_TEST);
4411708Sstevel 		softsp->axq_cdc_readdata0 = REG_ADDR(softsp->address,
442*7656SSherry.Moore@Sun.COM 		    AXQ_SLOT0_CDC_RD_DATA0);
4431708Sstevel 		softsp->axq_cdc_readdata1 = REG_ADDR(softsp->address,
444*7656SSherry.Moore@Sun.COM 		    AXQ_SLOT0_CDC_RD_DATA1);
4451708Sstevel 		softsp->axq_cdc_readdata2 = REG_ADDR(softsp->address,
446*7656SSherry.Moore@Sun.COM 		    AXQ_SLOT0_CDC_RD_DATA2);
4471708Sstevel 		softsp->axq_cdc_readdata3 = REG_ADDR(softsp->address,
448*7656SSherry.Moore@Sun.COM 		    AXQ_SLOT0_CDC_RD_DATA3);
4491708Sstevel 		softsp->axq_pcr = REG_ADDR(softsp->address,
450*7656SSherry.Moore@Sun.COM 		    AXQ_SLOT0_PERFCNT_SEL);
4511708Sstevel 		softsp->axq_pic0 = REG_ADDR(softsp->address,
452*7656SSherry.Moore@Sun.COM 		    AXQ_SLOT0_PERFCNT0);
4531708Sstevel 		softsp->axq_pic1 = REG_ADDR(softsp->address,
454*7656SSherry.Moore@Sun.COM 		    AXQ_SLOT0_PERFCNT1);
4551708Sstevel 		softsp->axq_pic2 = REG_ADDR(softsp->address,
456*7656SSherry.Moore@Sun.COM 		    AXQ_SLOT0_PERFCNT2);
4571708Sstevel 		softsp->axq_nasm = REG_ADDR(softsp->address, AXQ_SLOT0_NASM);
4581708Sstevel 	} else {
4591708Sstevel 		/* slot type 1 AXQ */
4601708Sstevel 		softsp->axq_domain_ctrl = REG_ADDR(softsp->address,
461*7656SSherry.Moore@Sun.COM 		    AXQ_SLOT1_DOMCTRL);
4621708Sstevel 		softsp->axq_pcr = REG_ADDR(softsp->address,
463*7656SSherry.Moore@Sun.COM 		    AXQ_SLOT1_PERFCNT_SEL);
4641708Sstevel 		softsp->axq_pic0 = REG_ADDR(softsp->address,
465*7656SSherry.Moore@Sun.COM 		    AXQ_SLOT1_PERFCNT0);
4661708Sstevel 		softsp->axq_pic1 = REG_ADDR(softsp->address,
467*7656SSherry.Moore@Sun.COM 		    AXQ_SLOT1_PERFCNT1);
4681708Sstevel 		softsp->axq_pic2 = REG_ADDR(softsp->address,
469*7656SSherry.Moore@Sun.COM 		    AXQ_SLOT1_PERFCNT2);
4701708Sstevel 		softsp->axq_nasm = REG_ADDR(softsp->address, AXQ_SLOT1_NASM);
4711708Sstevel 	}
4721708Sstevel 
4731708Sstevel 	/* setup CASM slots */
4741708Sstevel 	for (i = 0; i < AXQ_MAX_EXP; i++) {
4751708Sstevel 		softsp->axq_casm_slot[i] = REG_ADDR(softsp->address,
476*7656SSherry.Moore@Sun.COM 		    (AXQ_CASM_SLOT_START + AXQ_REGOFF(i)));
4771708Sstevel 	}
4781708Sstevel 
4791708Sstevel 	/* setup SDI timeout register accesses */
4801708Sstevel 	softsp->axq_sdi_timeout_rd = REG_ADDR(softsp->address,
481*7656SSherry.Moore@Sun.COM 	    AXQ_SLOT_SDI_TIMEOUT_RD);
4821708Sstevel 	softsp->axq_sdi_timeout_rdclr = REG_ADDR(softsp->address,
483*7656SSherry.Moore@Sun.COM 	    AXQ_SLOT_SDI_TIMEOUT_RDCLR);
4841708Sstevel 
4851708Sstevel 	/*
4861708Sstevel 	 * Save the CDC state (enabled or disabled)
4871708Sstevel 	 * as originally setup by Post.
4881708Sstevel 	 */
4891708Sstevel 	if (softsp->slotnum == 0) {
4901708Sstevel 		softsp->axq_cdc_state = *softsp->axq_cdc_ctrltest &
491*7656SSherry.Moore@Sun.COM 		    AXQ_CDC_DIS;
4921708Sstevel 	}
4931708Sstevel 
4941708Sstevel #ifndef _AXQ_LOCAL_ACCESS_SUPPORTED
4951708Sstevel 	/*
4961708Sstevel 	 * Setup cpu2ssc intr register in explicit expander
4971708Sstevel 	 * space. Local space addressing for this is broken,
4981708Sstevel 	 * we'll use explicit addressing for now.
4991708Sstevel 	 */
5001708Sstevel 	softsp->axq_cpu2ssc_intr = REG_ADDR(softsp->address,
501*7656SSherry.Moore@Sun.COM 	    AXQ_SLOT_CPU2SSC_INTR);
5021708Sstevel #endif /* _AXQ_LOCAL_ACCESS_SUPPORTED */
5031708Sstevel }
5041708Sstevel 
5051708Sstevel 
5061708Sstevel static void
axq_init_local(struct axq_local_regs * localregs)5071708Sstevel axq_init_local(struct axq_local_regs *localregs)
5081708Sstevel {
5091708Sstevel 	/*
5101708Sstevel 	 * local access to cpu2ssc intr register will
5111708Sstevel 	 * be the only one that may work properly in the
5121708Sstevel 	 * next revision of the AXQ asics.
5131708Sstevel 	 * Set it up here for now.
5141708Sstevel 	 */
5151708Sstevel 	localregs->axq_cpu2ssc_intr = REG_ADDR(localregs->laddress,
516*7656SSherry.Moore@Sun.COM 	    AXQ_SLOT_CPU2SSC_INTR);
5171708Sstevel }
5181708Sstevel 
5191708Sstevel /* ARGSUSED */
5201708Sstevel static int
axq_detach(dev_info_t * devi,ddi_detach_cmd_t cmd)5211708Sstevel axq_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
5221708Sstevel {
5231708Sstevel 	int instance;
5241708Sstevel 	int i;
5251708Sstevel 	struct axq_soft_state *softsp;
5261708Sstevel 	processorid_t cpuid;
5271708Sstevel 
5281708Sstevel 	/* get the instance of this devi */
5291708Sstevel 	instance = ddi_get_instance(devi);
5301708Sstevel 
5311708Sstevel 	/* get the soft state pointer for this device node */
5321708Sstevel 	softsp = ddi_get_soft_state(axq_softp, instance);
5331708Sstevel 
5341708Sstevel 	switch (cmd) {
5351708Sstevel 	case DDI_SUSPEND:
5361708Sstevel 		/*
5371708Sstevel 		 * Depending on the variable "use_axq_iopause"
5381708Sstevel 		 * we set the axq iopause bit as a paranoid
5391708Sstevel 		 * safety net. This is assuming all the devices
5401708Sstevel 		 * associated with the slot are already suspended.
5411708Sstevel 		 * Care must be taken to not set iopause when CPUs
5421708Sstevel 		 * are known to be present on the slot 1 board,
5431708Sstevel 		 * i.e. MCPU board type.
5441708Sstevel 		 * This io pause bit only applies to slot 1 axq,
5451708Sstevel 		 */
5461708Sstevel 		if (softsp->slotnum && use_axq_iopause && axq_suspend_iopause) {
5471708Sstevel 			/*
5481708Sstevel 			 * Do not enable AXQ_DOMCTRL_PAUSE if CPUs are
5491708Sstevel 			 * known to be present in slot 1.
5501708Sstevel 			 */
5511708Sstevel 			mutex_enter(&cpu_lock);
5521708Sstevel 			for (i = 0; i < STARCAT_SLOT1_CPU_MAX; i++) {
5531708Sstevel 				cpuid = MAKE_CPUID(softsp->expid,
554*7656SSherry.Moore@Sun.COM 				    softsp->slotnum, i);
5551708Sstevel 				if (cpu[cpuid]) {
5561708Sstevel 					mutex_exit(&cpu_lock);
5571708Sstevel 					return (DDI_SUCCESS);
5581708Sstevel 				}
5591708Sstevel 			}
5601708Sstevel 			mutex_exit(&cpu_lock);
5611708Sstevel 
5621708Sstevel 			/*
5631708Sstevel 			 * Make sure that there is no outstanding
5641708Sstevel 			 * I/O activity by reading the domain ctrl reg.
5651708Sstevel 			 * A non-zero lsb indicates no I/O activity.
5661708Sstevel 			 */
5671708Sstevel 			if (axq_slot1_idle(softsp) == DDI_FAILURE) {
5681708Sstevel 				cmn_err(CE_WARN, "%s%d: busy! suspend failed",
5691708Sstevel 				    ddi_get_name(softsp->dip),
5701708Sstevel 				    ddi_get_instance(softsp->dip));
5711708Sstevel 				return (DDI_FAILURE);
5721708Sstevel 			}
5731708Sstevel 
5741708Sstevel 			*softsp->axq_domain_ctrl |= AXQ_DOMCTRL_PAUSE;
5751708Sstevel 			softsp->paused = 1;
5761708Sstevel 		}
5771708Sstevel 		return (DDI_SUCCESS);
5781708Sstevel 
5791708Sstevel 	case DDI_DETACH:
5801708Sstevel 		rw_enter(&axq_array_lock, RW_WRITER);
5811708Sstevel 		ASSERT(axq_array[softsp->expid][softsp->slotnum]
582*7656SSherry.Moore@Sun.COM 		    != NULL);
5831708Sstevel 		axq_array[softsp->expid][softsp->slotnum] = NULL;
5841708Sstevel 		rw_exit(&axq_array_lock);
5851708Sstevel 
5861708Sstevel 		ddi_regs_map_free(&softsp->ac0);
5871708Sstevel 
5881708Sstevel 		/*
5891708Sstevel 		 * remove counter kstats for this device
5901708Sstevel 		 */
5911708Sstevel 		if (softsp->axq_counters_ksp != (kstat_t *)NULL) {
5921708Sstevel 			kstat_delete(softsp->axq_counters_ksp);
5931708Sstevel 		}
5941708Sstevel 
5951708Sstevel 		/*
5961708Sstevel 		 * See if we are the last instance to detach.
5971708Sstevel 		 * If so, we need to remove the picN kstats
5981708Sstevel 		 */
5991708Sstevel 		mutex_enter(&axq_attachcnt_lock);
6001708Sstevel 		if (--axq_attachcnt == 0) {
6011708Sstevel 			for (i = 0; i < AXQ_NUM_PICS; i++) {
6021708Sstevel 				if (axq_picN_ksp[i] != (kstat_t *)NULL) {
6031708Sstevel 					kstat_delete(axq_picN_ksp[i]);
6041708Sstevel 					axq_picN_ksp[i] = NULL;
6051708Sstevel 				}
6061708Sstevel 			}
6071708Sstevel 		}
6081708Sstevel 		mutex_exit(&axq_attachcnt_lock);
6091708Sstevel 
6101708Sstevel 		ddi_soft_state_free(axq_softp, instance);
6111708Sstevel 
6121708Sstevel 		return (DDI_SUCCESS);
6131708Sstevel 	default:
6141708Sstevel 		return (DDI_FAILURE);
6151708Sstevel 	}
6161708Sstevel }
6171708Sstevel 
6181708Sstevel 
6191708Sstevel /* ARGSUSED0 */
6201708Sstevel static int
axq_getinfo(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)6211708Sstevel axq_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
6221708Sstevel {
6231708Sstevel 	dev_t dev = (dev_t)arg;
6241708Sstevel 	struct axq_soft_state *softsp;
6251708Sstevel 	int instance, ret;
6261708Sstevel 
6271708Sstevel 	instance = getminor(dev);
6281708Sstevel 
6291708Sstevel 	switch (infocmd) {
6301708Sstevel 		case DDI_INFO_DEVT2DEVINFO:
6311708Sstevel 			softsp = (struct axq_soft_state *)
6321708Sstevel 			    ddi_get_soft_state(axq_softp, instance);
6331708Sstevel 			if (softsp == NULL) {
6341708Sstevel 				ret = DDI_FAILURE;
6351708Sstevel 			} else {
6361708Sstevel 				*result = softsp->dip;
6371708Sstevel 				ret = DDI_SUCCESS;
6381708Sstevel 			}
6391708Sstevel 			break;
6401708Sstevel 		case DDI_INFO_DEVT2INSTANCE:
6411708Sstevel 			*result = (void *)(uintptr_t)instance;
6421708Sstevel 			ret = DDI_SUCCESS;
6431708Sstevel 			break;
6441708Sstevel 		default:
6451708Sstevel 			ret = DDI_FAILURE;
6461708Sstevel 			break;
6471708Sstevel 	}
6481708Sstevel 	return (ret);
6491708Sstevel }
6501708Sstevel 
6511708Sstevel /*
6521708Sstevel  * Flush the CDC Sram of the slot0 axq
6531708Sstevel  * indicated by the expid argument
6541708Sstevel  */
6551708Sstevel int
axq_cdc_flush(uint32_t expid,int held,int disabled)6561708Sstevel axq_cdc_flush(uint32_t expid, int held, int disabled)
6571708Sstevel {
6581708Sstevel 	struct axq_soft_state *softsp;
6591708Sstevel 	uint32_t axq_ctrl_test_save0;
6601708Sstevel 	uint32_t tmpval;
6611708Sstevel 	int retval = 0;
6621708Sstevel 	int i;
6631708Sstevel 
6641708Sstevel 	if (!held)
6651708Sstevel 		rw_enter(&axq_array_lock, RW_READER);
6661708Sstevel 
6671708Sstevel 	ASSERT(axq_array[expid][SLOT0_AXQ] != NULL);
6681708Sstevel 
6691708Sstevel 	softsp = axq_array[expid][SLOT0_AXQ];
6701708Sstevel 
6711708Sstevel 	mutex_enter(&softsp->axq_lock);
6721708Sstevel 
6731708Sstevel 	/* save the value of the ctrl test reg */
6741708Sstevel 	axq_ctrl_test_save0 = *softsp->axq_cdc_ctrltest;
6751708Sstevel 
6761708Sstevel 	/* disable sram and setup the ctrl test reg for flushing */
6771708Sstevel 	tmpval = axq_ctrl_test_save0 & (AXQ_CDC_DATA_ECC_CHK_EN |
678*7656SSherry.Moore@Sun.COM 	    AXQ_CDC_ADR_PAR_CHK_EN |
679*7656SSherry.Moore@Sun.COM 	    AXQ_CDC_DATA_ECC_GEN_EN |
680*7656SSherry.Moore@Sun.COM 	    AXQ_CDC_ADR_PAR_GEN_EN);
6811708Sstevel 	*softsp->axq_cdc_ctrltest = tmpval | AXQ_CDC_TMODE_WR
682*7656SSherry.Moore@Sun.COM 	    | AXQ_CDC_DATA2PAR_MUX_SEL_DATA
683*7656SSherry.Moore@Sun.COM 	    | AXQ_CDC_ADR2SRAM_MUX_SEL_TEST
684*7656SSherry.Moore@Sun.COM 	    | AXQ_CDC_ADR_INCR_XOR_CTRL
685*7656SSherry.Moore@Sun.COM 	    | AXQ_CDC_DIS;
6861708Sstevel 
6871708Sstevel 	/* Enable CDC test in the CDC Address test reg */
6881708Sstevel 	*softsp->axq_cdc_addrtest = AXQ_CDC_ADR_TEST_EN;
6891708Sstevel 
6901708Sstevel 	/* clear the CDC Data write regs */
6911708Sstevel 	*softsp->axq_cdc_datawrite0 = *softsp->axq_cdc_datawrite1 = 0;
6921708Sstevel 	*softsp->axq_cdc_datawrite2 = *softsp->axq_cdc_datawrite3 = 0;
6931708Sstevel 
6941708Sstevel 	/*
6951708Sstevel 	 * write in the size of the sram to clear
6961708Sstevel 	 * into the CDC Counter test reg
6971708Sstevel 	 */
6981708Sstevel 	*softsp->axq_cdc_counter = AXQ_CDC_SRAM_SIZE;
6991708Sstevel 
7001708Sstevel 	/* wait for flush to complete */
7011708Sstevel 	for (i = 0; i < AXQ_CDC_FLUSH_WAIT; i++) {
7021708Sstevel 		DELAY(3000); /* should take only 1750 usecs */
7031708Sstevel 		if (((*softsp->axq_cdc_counter) &
704*7656SSherry.Moore@Sun.COM 		    AXQ_CDC_CNT_TEST_DONE) != 0) {
7051708Sstevel 			break;
7061708Sstevel 		}
7071708Sstevel 	}
7081708Sstevel 	if (i >= AXQ_CDC_FLUSH_WAIT) {
7091708Sstevel 		retval = DDI_FAILURE;
7101708Sstevel 		cmn_err(CE_WARN, "axq_cdc_flush failed on expander %d",
711*7656SSherry.Moore@Sun.COM 		    expid);
7121708Sstevel 	}
7131708Sstevel 
7141708Sstevel 	/*
7151708Sstevel 	 * Disable test mode in CDC address test reg
7161708Sstevel 	 */
7171708Sstevel 	*softsp->axq_cdc_addrtest = 0;
7181708Sstevel 
7191708Sstevel 	/*
7201708Sstevel 	 * If "disabled" option is requested, leave
7211708Sstevel 	 * the CDC disabled.
7221708Sstevel 	 */
7231708Sstevel 	if (disabled) {
7241708Sstevel 		axq_ctrl_test_save0 |= AXQ_CDC_DIS;
7251708Sstevel 		*softsp->axq_cdc_ctrltest = axq_ctrl_test_save0;
7261708Sstevel 	} else {
7271708Sstevel 		*softsp->axq_cdc_ctrltest = axq_ctrl_test_save0;
7281708Sstevel 	}
7291708Sstevel 
7301708Sstevel 	mutex_exit(&softsp->axq_lock);
7311708Sstevel 
7321708Sstevel 	if (!held)
7331708Sstevel 		rw_exit(&axq_array_lock);
7341708Sstevel 
7351708Sstevel 	return (retval);
7361708Sstevel }
7371708Sstevel 
7381708Sstevel 
7391708Sstevel /*
7401708Sstevel  * Flush all the CDC srams for all the AXQs in
7411708Sstevel  * the local domain.
7421708Sstevel  */
7431708Sstevel int
axq_cdc_flush_all()7441708Sstevel axq_cdc_flush_all()
7451708Sstevel {
7461708Sstevel 	int retval;
7471708Sstevel 	int i;
7481708Sstevel 
7491708Sstevel 	rw_enter(&axq_array_lock, RW_READER);
7501708Sstevel 
7511708Sstevel 	for (i = 0; i < AXQ_MAX_EXP; i++) {
7521708Sstevel 		if (axq_array[i][SLOT0_AXQ] != NULL) {
7531708Sstevel 			retval = axq_cdc_flush(i, 1, 0);
7541708Sstevel 			if (retval != DDI_SUCCESS) break;
7551708Sstevel 		}
7561708Sstevel 	}
7571708Sstevel 	rw_exit(&axq_array_lock);
7581708Sstevel 	return (retval);
7591708Sstevel }
7601708Sstevel 
7611708Sstevel /*
7621708Sstevel  * Disable and flush all CDC srams for all the AXQs
7631708Sstevel  * in the local domain.
7641708Sstevel  */
7651708Sstevel int
axq_cdc_disable_flush_all()7661708Sstevel axq_cdc_disable_flush_all()
7671708Sstevel {
7681708Sstevel 	int retval;
7691708Sstevel 	int i;
7701708Sstevel 
7711708Sstevel 	rw_enter(&axq_array_lock, RW_READER);
7721708Sstevel 
7731708Sstevel 	/*
7741708Sstevel 	 * Disable and flush all the CDC srams
7751708Sstevel 	 */
7761708Sstevel 	for (i = 0; i < AXQ_MAX_EXP; i++) {
7771708Sstevel 		if (axq_array[i][SLOT0_AXQ] != NULL) {
7781708Sstevel 			retval = axq_cdc_flush(i, 1, 1);
7791708Sstevel 			if (retval != DDI_SUCCESS) break;
7801708Sstevel 		}
7811708Sstevel 	}
7821708Sstevel 	rw_exit(&axq_array_lock);
7831708Sstevel 
7841708Sstevel 	if (retval != DDI_SUCCESS) {
7851708Sstevel 		axq_cdc_enable_all();
7861708Sstevel 	}
7871708Sstevel 	return (retval);
7881708Sstevel }
7891708Sstevel 
7901708Sstevel 
7911708Sstevel /*
7921708Sstevel  * Enable the CDC srams for all the AXQs in the
7931708Sstevel  * the local domain. This routine is used in
7941708Sstevel  * conjunction with axq_cdc_disable_flush_all().
7951708Sstevel  */
7961708Sstevel void
axq_cdc_enable_all()7971708Sstevel axq_cdc_enable_all()
7981708Sstevel {
7991708Sstevel 	struct axq_soft_state *softsp;
8001708Sstevel 	int i;
8011708Sstevel 
8021708Sstevel 	rw_enter(&axq_array_lock, RW_READER);
8031708Sstevel 
8041708Sstevel 	/*
8051708Sstevel 	 * Enable all the CDC sram
8061708Sstevel 	 */
8071708Sstevel 	for (i = 0; i < AXQ_MAX_EXP; i++) {
8081708Sstevel 		if ((softsp = axq_array[i][SLOT0_AXQ]) != NULL) {
8091708Sstevel 			mutex_enter(&softsp->axq_lock);
8101708Sstevel 			if (softsp->axq_cdc_state != AXQ_CDC_DIS) {
8111708Sstevel 				*softsp->axq_cdc_ctrltest &= ~AXQ_CDC_DIS;
8121708Sstevel 			}
8131708Sstevel 			mutex_exit(&softsp->axq_lock);
8141708Sstevel 		}
8151708Sstevel 	}
8161708Sstevel 	rw_exit(&axq_array_lock);
8171708Sstevel }
8181708Sstevel 
8191708Sstevel /*
8201708Sstevel  * Interface for DR to enable slot1 iopause after cpus have been idled.
8211708Sstevel  * Precondition is for all devices to have been suspended (including axq).
8221708Sstevel  * This routine avoids locks as it is called by DR with cpus paused.
8231708Sstevel  */
8241708Sstevel int
axq_iopause_enable_all(uint32_t * errexp)8251708Sstevel axq_iopause_enable_all(uint32_t *errexp)
8261708Sstevel {
8271708Sstevel 	int i, j;
8281708Sstevel 	int retval = DDI_SUCCESS;
8291708Sstevel 	processorid_t cpuid;
8301708Sstevel 	struct axq_soft_state *softsp;
8311708Sstevel 
8321708Sstevel 	DELAY(1000);
8331708Sstevel 	for (i = 0; i < AXQ_MAX_EXP; i++) {
8341708Sstevel 		if ((softsp = axq_array[i][SLOT1_AXQ]) != NULL &&
8351708Sstevel 		    use_axq_iopause) {
8361708Sstevel 			/*
8371708Sstevel 			 * Do not enable if cpus configured in slot1.
8381708Sstevel 			 * Unconfigured cpus should be idle in nc space.
8391708Sstevel 			 */
8401708Sstevel 			for (j = 0; j < STARCAT_SLOT1_CPU_MAX; j++) {
8411708Sstevel 				cpuid = MAKE_CPUID(softsp->expid,
8421708Sstevel 				    softsp->slotnum, j);
8431708Sstevel 				if (cpu[cpuid]) {
8441708Sstevel 					break;
8451708Sstevel 				}
8461708Sstevel 			}
8471708Sstevel 			if (j < STARCAT_SLOT1_CPU_MAX) {
8481708Sstevel 				continue;
8491708Sstevel 			}
8501708Sstevel 
8511708Sstevel 			retval = axq_slot1_idle(softsp);
8521708Sstevel 			if (retval == DDI_FAILURE) {
8531708Sstevel 				break;
8541708Sstevel 			}
8551708Sstevel 
8561708Sstevel 			*softsp->axq_domain_ctrl |= AXQ_DOMCTRL_PAUSE;
8571708Sstevel 			softsp->paused = 1;
8581708Sstevel 		}
8591708Sstevel 	}
8601708Sstevel 
8611708Sstevel 	if (retval != DDI_SUCCESS) {
8621708Sstevel 		ASSERT(errexp);
8631708Sstevel 		*errexp = i;
8641708Sstevel 		axq_iopause_disable_all();
8651708Sstevel 	}
8661708Sstevel 	return (retval);
8671708Sstevel }
8681708Sstevel 
8691708Sstevel /*
8701708Sstevel  * De-assert axq iopause on all slot1 boards. This routine avoids locks
8711708Sstevel  * as it is called by DR with cpus paused.
8721708Sstevel  */
8731708Sstevel void
axq_iopause_disable_all()8741708Sstevel axq_iopause_disable_all()
8751708Sstevel {
8761708Sstevel 	int i;
8771708Sstevel 	struct axq_soft_state *softsp;
8781708Sstevel 
8791708Sstevel 	for (i = 0; i < AXQ_MAX_EXP; i++) {
8801708Sstevel 		if ((softsp = axq_array[i][SLOT1_AXQ]) != NULL &&
8811708Sstevel 		    softsp->paused) {
8821708Sstevel 			*softsp->axq_domain_ctrl &= ~AXQ_DOMCTRL_PAUSE;
8831708Sstevel 			softsp->paused = 0;
8841708Sstevel 		}
8851708Sstevel 	}
8861708Sstevel }
8871708Sstevel 
8881708Sstevel /*
8891708Sstevel  * Attempt to wait for slot1 activity to go idle.
8901708Sstevel  */
8911708Sstevel static int
axq_slot1_idle(struct axq_soft_state * softsp)8921708Sstevel axq_slot1_idle(struct axq_soft_state *softsp)
8931708Sstevel {
8941708Sstevel 	int i;
8951708Sstevel 
8961708Sstevel 	ASSERT(softsp->slotnum == SLOT1_AXQ);
8971708Sstevel 	for (i = 0; i < 10; i++) {
8981708Sstevel 		if ((*(softsp->axq_domain_ctrl) & AXQ_DOMCTRL_BUSY) != 0) {
8991708Sstevel 			return (DDI_SUCCESS);
9001708Sstevel 		}
9011708Sstevel 		DELAY(50);
9021708Sstevel 	}
9031708Sstevel 	return (DDI_FAILURE);
9041708Sstevel }
9051708Sstevel 
9061708Sstevel /*
9071708Sstevel  * Read a particular NASM entry
9081708Sstevel  */
9091708Sstevel int
axq_nasm_read(uint32_t expid,uint32_t slot,uint32_t nasm_entry,uint32_t * data)9101708Sstevel axq_nasm_read(uint32_t expid, uint32_t slot, uint32_t nasm_entry,
9111708Sstevel     uint32_t *data)
9121708Sstevel {
9131708Sstevel 	axq_nasm_read_u aread;
9141708Sstevel 	axq_nasm_write_u awrite;
9151708Sstevel 	struct axq_soft_state *softsp;
9161708Sstevel 
9171708Sstevel 	if (slot > AXQ_MAX_SLOT_PER_EXP ||
9181708Sstevel 	    expid > AXQ_MAX_EXP ||
9191708Sstevel 	    nasm_entry > AXQ_NASM_SIZE) {
9201708Sstevel 		return (DDI_FAILURE);
9211708Sstevel 	}
9221708Sstevel 
9231708Sstevel 	awrite.bit.rw = 0;	/* read operation */
9241708Sstevel 	awrite.bit.addr = nasm_entry;
9251708Sstevel 	awrite.bit.data = 0;
9261708Sstevel 
9271708Sstevel 	rw_enter(&axq_array_lock, RW_READER);
9281708Sstevel 
9291708Sstevel 	softsp = axq_array[expid][slot];
9301708Sstevel 	if (softsp == NULL) {
9311708Sstevel 		rw_exit(&axq_array_lock);
9321708Sstevel 		return (DDI_FAILURE);
9331708Sstevel 	}
9341708Sstevel 
9351708Sstevel 	mutex_enter(&softsp->axq_lock);
9361708Sstevel 
9371708Sstevel 	*(softsp->axq_nasm) = awrite.val;
9381708Sstevel 	aread.val = *(softsp->axq_nasm);
9391708Sstevel 
9401708Sstevel 	mutex_exit(&softsp->axq_lock);
9411708Sstevel 	rw_exit(&axq_array_lock);
9421708Sstevel 
9431708Sstevel 	if (aread.bit.valid) {
9441708Sstevel 		*data = aread.bit.data;
9451708Sstevel 		return (DDI_SUCCESS);
9461708Sstevel 	}
9471708Sstevel 	return (DDI_FAILURE);
9481708Sstevel }
9491708Sstevel 
9501708Sstevel /*
9511708Sstevel  * Write a particular NASM entry
9521708Sstevel  */
9531708Sstevel static int
axq_nasm_write_one(uint32_t expid,uint32_t slot,uint32_t nasm_entry,uint32_t data)9541708Sstevel axq_nasm_write_one(uint32_t expid, uint32_t slot, uint32_t nasm_entry,
9551708Sstevel     uint32_t data)
9561708Sstevel {
9571708Sstevel 	axq_nasm_write_u awrite;
9581708Sstevel 	struct axq_soft_state *softsp;
9591708Sstevel 
9601708Sstevel 	/*
9611708Sstevel 	 * Note: need to make sure axq_array_lock held first, so that a
9621708Sstevel 	 * paused thread is not holding softsp->axq_lock, which could
9631708Sstevel 	 * result in deadlock.
9641708Sstevel 	 */
9651708Sstevel 	ASSERT(RW_LOCK_HELD(&axq_array_lock));
9661708Sstevel 
9671708Sstevel 	if (slot > AXQ_MAX_SLOT_PER_EXP ||
9681708Sstevel 	    expid > AXQ_MAX_EXP ||
9691708Sstevel 	    nasm_entry > AXQ_NASM_SIZE) {
9701708Sstevel 		return (DDI_FAILURE);
9711708Sstevel 	}
9721708Sstevel 
9731708Sstevel 	awrite.bit.rw = 1;	/* write operation */
9741708Sstevel 	awrite.bit.addr = nasm_entry;
9751708Sstevel 	awrite.bit.data = data;
9761708Sstevel 
9771708Sstevel 	softsp = axq_array[expid][slot];
9781708Sstevel 	if (softsp == NULL) {
9791708Sstevel 		return (DDI_FAILURE);
9801708Sstevel 	}
9811708Sstevel 
9821708Sstevel 	mutex_enter(&softsp->axq_lock);
9831708Sstevel 
9841708Sstevel 	*(softsp->axq_nasm) = awrite.val;
9851708Sstevel 
9861708Sstevel 	mutex_exit(&softsp->axq_lock);
9871708Sstevel 
9881708Sstevel 	return (DDI_SUCCESS);
9891708Sstevel }
9901708Sstevel 
9911708Sstevel int
axq_nasm_write(uint32_t expid,uint32_t slot,uint32_t nasm_entry,uint32_t data)9921708Sstevel axq_nasm_write(uint32_t expid, uint32_t slot, uint32_t nasm_entry,
9931708Sstevel     uint32_t data)
9941708Sstevel {
9951708Sstevel 	int rc;
9961708Sstevel 
9971708Sstevel 	rw_enter(&axq_array_lock, RW_READER);
9981708Sstevel 	rc = axq_nasm_write_one(expid, slot, nasm_entry, data);
9991708Sstevel 	rw_exit(&axq_array_lock);
10001708Sstevel 	return (rc);
10011708Sstevel }
10021708Sstevel 
10031708Sstevel /*
10041708Sstevel  * Write a particular NASM entry for all the
10051708Sstevel  * axqs in the domain
10061708Sstevel  * Note: other CPUs are paused when this function called.
10071708Sstevel  */
10081708Sstevel int
axq_nasm_write_all(uint32_t nasm_entry,uint32_t data)10091708Sstevel axq_nasm_write_all(uint32_t nasm_entry, uint32_t data)
10101708Sstevel {
10111708Sstevel 	int i;
10121708Sstevel 	int rc;
10131708Sstevel 
10141708Sstevel 	ASSERT(RW_WRITE_HELD(&axq_array_lock));
10151708Sstevel 
10161708Sstevel 	for (i = 0; i < AXQ_MAX_EXP; i++) {
10171708Sstevel 		if (axq_array[i][SLOT0_AXQ] != NULL) {
10181708Sstevel 			rc = axq_nasm_write_one(i, SLOT0_AXQ, nasm_entry,
10191708Sstevel 			    data);
10201708Sstevel 			if (rc != DDI_SUCCESS) {
10211708Sstevel 				return (DDI_FAILURE);
10221708Sstevel 			}
10231708Sstevel 		}
10241708Sstevel 		if (axq_array[i][SLOT1_AXQ] != NULL) {
10251708Sstevel 			rc = axq_nasm_write_one(i, SLOT1_AXQ, nasm_entry,
10261708Sstevel 			    data);
10271708Sstevel 			if (rc != DDI_SUCCESS) {
10281708Sstevel 				return (DDI_FAILURE);
10291708Sstevel 			}
10301708Sstevel 		}
10311708Sstevel 	}
10321708Sstevel 
10331708Sstevel 	return (DDI_SUCCESS);
10341708Sstevel }
10351708Sstevel 
10361708Sstevel /*
10371708Sstevel  * Take write lock for axq_nasm_write_all() outside
10381708Sstevel  * critical section where other CPUs are paused.
10391708Sstevel  */
10401708Sstevel void
axq_array_rw_enter(void)10411708Sstevel axq_array_rw_enter(void)
10421708Sstevel {
10431708Sstevel 	rw_enter(&axq_array_lock, RW_WRITER);
10441708Sstevel }
10451708Sstevel 
10461708Sstevel /*
10471708Sstevel  * Release write lock for axq_nasm_write_all() outside
10481708Sstevel  * critical section where other CPUs are paused.
10491708Sstevel  */
10501708Sstevel void
axq_array_rw_exit(void)10511708Sstevel axq_array_rw_exit(void)
10521708Sstevel {
10531708Sstevel 	rw_exit(&axq_array_lock);
10541708Sstevel }
10551708Sstevel 
10561708Sstevel /*
10571708Sstevel  * Read a particular CASM entry
10581708Sstevel  */
10591708Sstevel uint32_t
axq_casm_read(uint32_t expid,uint32_t slot,int casmslot)10601708Sstevel axq_casm_read(uint32_t expid, uint32_t slot, int casmslot)
10611708Sstevel {
10621708Sstevel 	struct axq_soft_state *softsp;
10631708Sstevel 	uint32_t retval;
10641708Sstevel 
10651708Sstevel 	rw_enter(&axq_array_lock, RW_READER);
10661708Sstevel 
10671708Sstevel 	ASSERT(axq_array[expid][slot] != NULL);
10681708Sstevel 	ASSERT(casmslot >= 0 && casmslot < AXQ_MAX_EXP);
10691708Sstevel 
10701708Sstevel 	softsp = axq_array[expid][slot];
10711708Sstevel 
10721708Sstevel 	mutex_enter(&softsp->axq_lock);
10731708Sstevel 
10741708Sstevel 	retval = *(softsp->axq_casm_slot[casmslot]);
10751708Sstevel 
10761708Sstevel 	mutex_exit(&softsp->axq_lock);
10771708Sstevel 	rw_exit(&axq_array_lock);
10781708Sstevel 
10791708Sstevel 	return (retval);
10801708Sstevel }
10811708Sstevel 
10821708Sstevel 
10831708Sstevel /*
10841708Sstevel  * Write a particular CASM entry
10851708Sstevel  */
10861708Sstevel 
10871708Sstevel int
axq_casm_write(uint32_t expid,uint32_t slot,int casmslot,uint32_t value)10881708Sstevel axq_casm_write(uint32_t expid, uint32_t slot, int casmslot,
10891708Sstevel 		uint32_t value)
10901708Sstevel {
10911708Sstevel 	struct axq_soft_state *softsp;
10921708Sstevel 	int retval;
10931708Sstevel 
10941708Sstevel 	rw_enter(&axq_array_lock, RW_READER);
10951708Sstevel 
10961708Sstevel 	ASSERT(axq_array[expid][slot] != NULL);
10971708Sstevel 	ASSERT(casmslot >= 0 && casmslot < AXQ_MAX_EXP);
10981708Sstevel 
10991708Sstevel 	softsp = axq_array[expid][slot];
11001708Sstevel 
11011708Sstevel 	mutex_enter(&softsp->axq_lock);
11021708Sstevel 
11031708Sstevel 	/*
11041708Sstevel 	 * first read the casm slot in question
11051708Sstevel 	 * it should be non-zero to indicate that
11061708Sstevel 	 * we have write permission to update it.
11071708Sstevel 	 * Note that if we write it without valid
11081708Sstevel 	 * permission, we can get an exception.
11091708Sstevel 	 */
11101708Sstevel 	if (*(softsp->axq_casm_slot[casmslot])) {
11111708Sstevel 		*(softsp->axq_casm_slot[casmslot]) = value;
11121708Sstevel 		retval = DDI_SUCCESS;
11131708Sstevel 	} else {
11141708Sstevel 		retval = DDI_FAILURE;
11151708Sstevel 	}
11161708Sstevel 
11171708Sstevel 	mutex_exit(&softsp->axq_lock);
11181708Sstevel 	rw_exit(&axq_array_lock);
11191708Sstevel 	return (retval);
11201708Sstevel }
11211708Sstevel 
11221708Sstevel /*
11231708Sstevel  * Write a particular CASM entry for all the
11241708Sstevel  * axqs in the domain
11251708Sstevel  */
11261708Sstevel 
11271708Sstevel int
axq_casm_write_all(int casmslot,uint32_t value)11281708Sstevel axq_casm_write_all(int casmslot, uint32_t value)
11291708Sstevel {
11301708Sstevel 	int i;
11311708Sstevel 	struct axq_soft_state *softsp;
11321708Sstevel 
11331708Sstevel 	/*
11341708Sstevel 	 * Since we are updating all the AXQs,
11351708Sstevel 	 * it will be easier to simply grab
11361708Sstevel 	 * exclusive access to the AXQs by obtaining
11371708Sstevel 	 * the RW_WRITER access to the axq_array.
11381708Sstevel 	 */
11391708Sstevel 	rw_enter(&axq_array_lock, RW_WRITER);
11401708Sstevel 
11411708Sstevel 	/*
11421708Sstevel 	 * Paranoid check: run thru all the avail AXQs
11431708Sstevel 	 * and make sure we can write into that slot in question
11441708Sstevel 	 * We check it by reading the slot and it should be
11451708Sstevel 	 * non-zero.
11461708Sstevel 	 */
11471708Sstevel 	for (i = 0; i < AXQ_MAX_EXP; i++) {
11481708Sstevel 		if ((softsp = axq_array[i][SLOT0_AXQ]) != NULL) {
11491708Sstevel 			if (*(softsp->axq_casm_slot[casmslot])
1150*7656SSherry.Moore@Sun.COM 			    == 0) {
11511708Sstevel 				break;
11521708Sstevel 			}
11531708Sstevel 		}
11541708Sstevel 		if ((softsp = axq_array[i][SLOT1_AXQ]) != NULL) {
11551708Sstevel 			if (*(softsp->axq_casm_slot[casmslot])
1156*7656SSherry.Moore@Sun.COM 			    == 0) {
11571708Sstevel 				break;
11581708Sstevel 			}
11591708Sstevel 		}
11601708Sstevel 	}
11611708Sstevel 
11621708Sstevel 	if (i < AXQ_MAX_EXP) {
11631708Sstevel 		/*
11641708Sstevel 		 * We have no write permission for some AXQ
11651708Sstevel 		 * for the CASM slot in question. Flag it
11661708Sstevel 		 * as an error
11671708Sstevel 		 */
11681708Sstevel 		rw_exit(&axq_array_lock);
11691708Sstevel 		return (DDI_FAILURE);
11701708Sstevel 	}
11711708Sstevel 
11721708Sstevel 	/*
11731708Sstevel 	 * everything looks good - do the update
11741708Sstevel 	 */
11751708Sstevel 	for (i = 0; i < AXQ_MAX_EXP; i++) {
11761708Sstevel 		if ((softsp = axq_array[i][SLOT0_AXQ]) != NULL) {
11771708Sstevel 			*softsp->axq_casm_slot[casmslot] = value;
11781708Sstevel 		}
11791708Sstevel 		if ((softsp = axq_array[i][SLOT1_AXQ]) != NULL) {
11801708Sstevel 			*softsp->axq_casm_slot[casmslot] = value;
11811708Sstevel 		}
11821708Sstevel 	}
11831708Sstevel 
11841708Sstevel 	rw_exit(&axq_array_lock);
11851708Sstevel 	return (DDI_SUCCESS);
11861708Sstevel }
11871708Sstevel 
11881708Sstevel 
11891708Sstevel /*
11901708Sstevel  * Construct a script of <physicaladdr, data> tuple pairs that
11911708Sstevel  * reprogram the all the AXQs in the local domain to swap the
11921708Sstevel  * contents of casmslot0 with casmslot1.
11931708Sstevel  */
11941708Sstevel int
axq_do_casm_rename_script(uint64_t ** script_elm,int casmslot0,int casmslot1)11951708Sstevel axq_do_casm_rename_script(uint64_t **script_elm, int casmslot0,
11961708Sstevel 	int casmslot1)
11971708Sstevel {
11981708Sstevel 	struct axq_soft_state *softsp;
11991708Sstevel 	int i, slot;
12001708Sstevel 	uint32_t val0, val1;
12011708Sstevel 	uint64_t *s_elm = *script_elm;
12021708Sstevel 	uint64_t paddr;
12031708Sstevel 
12041708Sstevel 	/*
12051708Sstevel 	 * There should be some global locking at the
12061708Sstevel 	 * DR level to do this - since this is one of
12071708Sstevel 	 * the sequence of steps in copyrename.
12081708Sstevel 	 * For now, we grab the RW_WRITER lock for
12091708Sstevel 	 * script construction.
12101708Sstevel 	 */
12111708Sstevel 	rw_enter(&axq_array_lock, RW_WRITER);
12121708Sstevel 
12131708Sstevel 	/*
12141708Sstevel 	 * Construct the <physicaladdr, data> tuple pairs
12151708Sstevel 	 * for reprogramming the AXQs so that the value in
12161708Sstevel 	 * casmslot0 is swapped with the content in casmslot1.
12171708Sstevel 	 * Paranoid check: We make sure that we can write to
12181708Sstevel 	 * both slots in all the AXQs by reading the slots and
12191708Sstevel 	 * they should be non-zero.
12201708Sstevel 	 */
12211708Sstevel 	for (slot = SLOT0_AXQ; slot <= SLOT1_AXQ; slot++) {
1222*7656SSherry.Moore@Sun.COM 		for (i = 0; i < AXQ_MAX_EXP; i++) {
12231708Sstevel 		if ((softsp = axq_array[i][slot]) != NULL) {
12241708Sstevel 			paddr = softsp->axq_phyaddr;
12251708Sstevel 			val0 = *(softsp->axq_casm_slot[casmslot0]);
12261708Sstevel 			val1 = *(softsp->axq_casm_slot[casmslot1]);
12271708Sstevel 			if (val0 != 0 && val1 != 0) {
12281708Sstevel 				*s_elm++ = paddr + AXQ_CASM_SLOT_START +
1229*7656SSherry.Moore@Sun.COM 				    AXQ_REGOFF(casmslot0);
12301708Sstevel 				*s_elm++ = val1;
12311708Sstevel 				*s_elm++ = paddr + AXQ_CASM_SLOT_START +
1232*7656SSherry.Moore@Sun.COM 				    AXQ_REGOFF(casmslot1);
12331708Sstevel 				*s_elm++ = val0;
12341708Sstevel 			} else {
12351708Sstevel 				/*
12361708Sstevel 				 * Somehow we can't access one of
12371708Sstevel 				 * the casm slot - quit.
12381708Sstevel 				 */
12391708Sstevel 				break;
12401708Sstevel 			}
12411708Sstevel 		}
1242*7656SSherry.Moore@Sun.COM 		}
1243*7656SSherry.Moore@Sun.COM 		if (i < AXQ_MAX_EXP) break;
12441708Sstevel 	}
12451708Sstevel 
12461708Sstevel 	rw_exit(&axq_array_lock);
12471708Sstevel 
12481708Sstevel 	if (slot > SLOT1_AXQ) {
12491708Sstevel 		/* successful */
12501708Sstevel 		*script_elm = s_elm;
12511708Sstevel 		return (DDI_SUCCESS);
12521708Sstevel 	} else {
12531708Sstevel 		return (DDI_FAILURE);
12541708Sstevel 	}
12551708Sstevel }
12561708Sstevel 
12571708Sstevel 
12581708Sstevel /*
12591708Sstevel  * Send an interrupt to the SSC passing
12601708Sstevel  * a 8 bit cookie value
12611708Sstevel  */
12621708Sstevel int
axq_cpu2ssc_intr(uint8_t cookie)12631708Sstevel axq_cpu2ssc_intr(uint8_t cookie)
12641708Sstevel {
12651708Sstevel 	int retval, i;
12661708Sstevel 	volatile uint32_t *intr_reg;
12671708Sstevel 
12681708Sstevel #ifndef	_AXQ_LOCAL_SPACE_SUPPORTED
12691708Sstevel 	/* Local space access not available */
12701708Sstevel 
12711708Sstevel 	int exp, slot;
12721708Sstevel 
12731708Sstevel 	rw_enter(&axq_array_lock, RW_READER);
12741708Sstevel 
12751708Sstevel 	/* Make sure the current cpu is not switched out */
12761708Sstevel 	kpreempt_disable();
12771708Sstevel 
12781708Sstevel 	/*
12791708Sstevel 	 * Compute the exp# and slot# of the current cpu
12801708Sstevel 	 * so that we know which AXQ cpu2ssc intr reg to
12811708Sstevel 	 * use.
12821708Sstevel 	 */
12831708Sstevel 	exp = CPU->cpu_id >> 5;
12841708Sstevel 	slot = (CPU->cpu_id >> 3) & 0x1;
12851708Sstevel 
12861708Sstevel 	intr_reg = axq_array[exp][slot]->axq_cpu2ssc_intr;
12871708Sstevel #else
12881708Sstevel 	/* use local space */
12891708Sstevel 	intr_reg = axq_local.axq_cpu2ssc_intr;
12901708Sstevel #endif /* _AXQ_LOCAL_SPACE_SUPPORTED */
12911708Sstevel 
12921708Sstevel 	ASSERT(intr_reg != 0);
12931708Sstevel 
12941708Sstevel 	retval = DDI_FAILURE;
12951708Sstevel 	for (i = 0; i < AXQ_INTR_PEND_WAIT; i++) {
12961708Sstevel 		if (!(*intr_reg & AXQ_CPU2SSC_INTR_PEND)) {
12971708Sstevel 			*intr_reg = cookie;
12981708Sstevel 			retval = DDI_SUCCESS;
12991708Sstevel 			break;
13001708Sstevel 		}
13011708Sstevel 		DELAY(200);
13021708Sstevel 	}
13031708Sstevel 
13041708Sstevel #ifndef	_AXQ_LOCAL_SPACE_SUPPORTED
13051708Sstevel 	kpreempt_enable();
13061708Sstevel 	rw_exit(&axq_array_lock);
13071708Sstevel #endif
13081708Sstevel 	return (retval);
13091708Sstevel }
13101708Sstevel 
13111708Sstevel 
13121708Sstevel /*
13131708Sstevel  * Read the SDI timeout register (SRD use)
13141708Sstevel  * This routine accepts a clear flag to indicate
13151708Sstevel  * whether the register should be cleared after
13161708Sstevel  * the read.
13171708Sstevel  */
13181708Sstevel uint32_t
axq_read_sdi_timeout_reg(uint32_t expid,uint32_t slot,int clearflag)13191708Sstevel axq_read_sdi_timeout_reg(uint32_t expid, uint32_t slot, int clearflag)
13201708Sstevel {
13211708Sstevel 	struct axq_soft_state *softsp;
13221708Sstevel 	uint32_t retval;
13231708Sstevel 
13241708Sstevel 	rw_enter(&axq_array_lock, RW_READER);
13251708Sstevel 
13261708Sstevel 	ASSERT(axq_array[expid][slot] != NULL);
13271708Sstevel 
13281708Sstevel 	softsp = axq_array[expid][slot];
13291708Sstevel 
13301708Sstevel 	mutex_enter(&softsp->axq_lock);
13311708Sstevel 
13321708Sstevel 	if (clearflag) {
13331708Sstevel 		/* read and then clear register */
13341708Sstevel 		retval = *softsp->axq_sdi_timeout_rdclr;
13351708Sstevel 	} else {
13361708Sstevel 		retval = *softsp->axq_sdi_timeout_rd;
13371708Sstevel 	}
13381708Sstevel 
13391708Sstevel 	mutex_exit(&softsp->axq_lock);
13401708Sstevel 	rw_exit(&axq_array_lock);
13411708Sstevel 
13421708Sstevel 	return (retval);
13431708Sstevel }
13441708Sstevel 
13451708Sstevel 
13461708Sstevel /*
13471708Sstevel  * Routine to create a kstat for each %pic that
13481708Sstevel  * the AXQ has (there are 3 of them). These read-only
13491708Sstevel  * kstats export event names that the respective %pic
13501708Sstevel  * supports. Pic0 and Pic1 are similar and they both have
13511708Sstevel  * a 128-input mux. Pic2 counts the clock and can set up
13521708Sstevel  * to count or freeze.
13531708Sstevel  * Note that all AXQ instances use the same events, we only
13541708Sstevel  * need to create one set of the picN kstats.
13551708Sstevel  */
13561708Sstevel static void
axq_add_picN_kstats(dev_info_t * dip)13571708Sstevel axq_add_picN_kstats(dev_info_t *dip)
13581708Sstevel {
13591708Sstevel 	struct kstat_named *axq_pic_named_data;
13601708Sstevel 	int event, pic;
13611708Sstevel 	int instance = ddi_get_instance(dip);
13621708Sstevel 	int pic_shift = 0;
13631708Sstevel 
13641708Sstevel 	/*
13651708Sstevel 	 * Create the picN kstat for Pic0 and Pic1
13661708Sstevel 	 * Both have similar set of events. Add one
13671708Sstevel 	 * extra event for the clear_event mask.
13681708Sstevel 	 */
13691708Sstevel 	for (pic = 0; pic < AXQ_NUM_PICS; pic++) {
13701708Sstevel 		char pic_name[20];
13711708Sstevel 		int num_events, i;
13721708Sstevel 
13731708Sstevel 		(void) sprintf(pic_name, "pic%d", pic);
13741708Sstevel 
13751708Sstevel 		num_events = (pic <= 1) ? AXQ_PIC0_1_NUM_EVENTS :
1376*7656SSherry.Moore@Sun.COM 		    AXQ_PIC2_NUM_EVENTS;
13771708Sstevel 
13781708Sstevel 		if ((axq_picN_ksp[pic] = kstat_create("axq",
1379*7656SSherry.Moore@Sun.COM 		    instance, pic_name, "bus", KSTAT_TYPE_NAMED,
1380*7656SSherry.Moore@Sun.COM 		    num_events + 1, NULL)) == NULL) {
13811708Sstevel 			cmn_err(CE_WARN, "axq %s: kstat_create failed",
1382*7656SSherry.Moore@Sun.COM 			    pic_name);
13831708Sstevel 
13841708Sstevel 			/* remove pic kstats that was created earlier */
13851708Sstevel 			for (i = 0; i < pic; i++) {
13861708Sstevel 				kstat_delete(axq_picN_ksp[i]);
13871708Sstevel 				axq_picN_ksp[i] = NULL;
13881708Sstevel 			}
13891708Sstevel 			return;
13901708Sstevel 		}
13911708Sstevel 
13921708Sstevel 		axq_pic_named_data =
1393*7656SSherry.Moore@Sun.COM 		    (struct kstat_named *)(axq_picN_ksp[pic]->ks_data);
13941708Sstevel 
13951708Sstevel 		pic_shift = pic * AXQ_PIC_SHIFT;
13961708Sstevel 
13971708Sstevel 		/*
13981708Sstevel 		 * for each picN event, write a kstat record of
13991708Sstevel 		 * name = EVENT & value.ui64 = PCR_MASK.
14001708Sstevel 		 */
14011708Sstevel 		for (event = 0; event < num_events; event++) {
14021708Sstevel 			/* pcr_mask */
14031708Sstevel 			axq_pic_named_data[event].value.ui64 =
1404*7656SSherry.Moore@Sun.COM 			    axq_events[event].pcr_mask << pic_shift;
14051708Sstevel 
14061708Sstevel 			/* event name */
14071708Sstevel 			kstat_named_init(&axq_pic_named_data[event],
1408*7656SSherry.Moore@Sun.COM 			    axq_events[event].event_name,
1409*7656SSherry.Moore@Sun.COM 			    KSTAT_DATA_UINT64);
14101708Sstevel 		}
14111708Sstevel 
14121708Sstevel 		/*
14131708Sstevel 		 * Add the clear pic event and mask as the last
14141708Sstevel 		 * record in the kstat.
14151708Sstevel 		 */
14161708Sstevel 		axq_pic_named_data[num_events].value.ui64 =
1417*7656SSherry.Moore@Sun.COM 		    (uint32_t)~(AXQ_PIC_CLEAR_MASK << pic_shift);
14181708Sstevel 
14191708Sstevel 		kstat_named_init(&axq_pic_named_data[num_events],
1420*7656SSherry.Moore@Sun.COM 		    "clear_pic", KSTAT_DATA_UINT64);
14211708Sstevel 
14221708Sstevel 		kstat_install(axq_picN_ksp[pic]);
14231708Sstevel 	}
14241708Sstevel }
14251708Sstevel 
14261708Sstevel 
14271708Sstevel static  void
axq_add_kstats(struct axq_soft_state * softsp)14281708Sstevel axq_add_kstats(struct axq_soft_state *softsp)
14291708Sstevel {
14301708Sstevel 	struct kstat *axq_counters_ksp;
14311708Sstevel 	struct kstat_named *axq_counters_named_data;
14321708Sstevel 
14331708Sstevel 	/*
14341708Sstevel 	 * Create the picN kstats if we are the first instance
14351708Sstevel 	 * to attach. We use axq_attachcnt as a count of how
14361708Sstevel 	 * many instances have attached. This is protected by
14371708Sstevel 	 * a lock.
14381708Sstevel 	 */
14391708Sstevel 	mutex_enter(&axq_attachcnt_lock);
14401708Sstevel 	if (axq_attachcnt++ == 0)
14411708Sstevel 		axq_add_picN_kstats(softsp->dip);
14421708Sstevel 
14431708Sstevel 	mutex_exit(&axq_attachcnt_lock);
14441708Sstevel 
14451708Sstevel 	/*
14461708Sstevel 	 * A "counter" kstat is created for each axq
14471708Sstevel 	 * instance that provides access to the %pcr and %pic
14481708Sstevel 	 * registers for that instance.
14491708Sstevel 	 *
14501708Sstevel 	 * The size of this kstat is AXQ_NUM_PICS + 1 for %pcr
14511708Sstevel 	 */
14521708Sstevel 	if ((axq_counters_ksp = kstat_create("axq",
1453*7656SSherry.Moore@Sun.COM 	    ddi_get_instance(softsp->dip), "counters",
1454*7656SSherry.Moore@Sun.COM 	    "bus", KSTAT_TYPE_NAMED, AXQ_NUM_PICS + 1,
1455*7656SSherry.Moore@Sun.COM 	    KSTAT_FLAG_WRITABLE)) == NULL) {
14561708Sstevel 			cmn_err(CE_WARN, "axq%d counters: kstat_create"
14571708Sstevel 			" failed", ddi_get_instance(softsp->dip));
14581708Sstevel 		return;
14591708Sstevel 	}
14601708Sstevel 
14611708Sstevel 	axq_counters_named_data =
1462*7656SSherry.Moore@Sun.COM 	    (struct kstat_named *)(axq_counters_ksp->ks_data);
14631708Sstevel 
14641708Sstevel 	/* initialize the named kstats */
14651708Sstevel 	kstat_named_init(&axq_counters_named_data[0],
1466*7656SSherry.Moore@Sun.COM 	    "pcr", KSTAT_DATA_UINT32);
14671708Sstevel 
14681708Sstevel 	kstat_named_init(&axq_counters_named_data[1],
1469*7656SSherry.Moore@Sun.COM 	    "pic0", KSTAT_DATA_UINT32);
14701708Sstevel 
14711708Sstevel 	kstat_named_init(&axq_counters_named_data[2],
1472*7656SSherry.Moore@Sun.COM 	    "pic1", KSTAT_DATA_UINT32);
14731708Sstevel 
14741708Sstevel 	kstat_named_init(&axq_counters_named_data[3],
1475*7656SSherry.Moore@Sun.COM 	    "pic2", KSTAT_DATA_UINT32);
14761708Sstevel 
14771708Sstevel 	axq_counters_ksp->ks_update = axq_counters_kstat_update;
14781708Sstevel 	axq_counters_ksp->ks_private = (void *)softsp;
14791708Sstevel 
14801708Sstevel 	kstat_install(axq_counters_ksp);
14811708Sstevel 
14821708Sstevel 	/* update the softstate */
14831708Sstevel 	softsp->axq_counters_ksp = axq_counters_ksp;
14841708Sstevel }
14851708Sstevel 
14861708Sstevel 
14871708Sstevel static  int
axq_counters_kstat_update(kstat_t * ksp,int rw)14881708Sstevel axq_counters_kstat_update(kstat_t *ksp, int rw)
14891708Sstevel {
14901708Sstevel 	struct kstat_named *axq_counters_data;
14911708Sstevel 	struct axq_soft_state *softsp;
14921708Sstevel 
14931708Sstevel 	axq_counters_data = (struct kstat_named *)ksp->ks_data;
14941708Sstevel 	softsp = (struct axq_soft_state *)ksp->ks_private;
14951708Sstevel 
14961708Sstevel 	if (rw == KSTAT_WRITE) {
14971708Sstevel 		/*
14981708Sstevel 		 * Write the pcr value to the softsp->axq_pcr.
14991708Sstevel 		 * The pic register is read-only so we don't
15001708Sstevel 		 * attempt to write to it.
15011708Sstevel 		 */
15021708Sstevel 		*softsp->axq_pcr = (uint32_t)axq_counters_data[0].value.ui64;
15031708Sstevel 	} else {
15041708Sstevel 		/*
15051708Sstevel 		 * Read %pcr and %pic register values and write them
15061708Sstevel 		 * into counters kstat.
15071708Sstevel 		 *
15081708Sstevel 		 */
15091708Sstevel 
15101708Sstevel 		/* pcr */
15111708Sstevel 		axq_counters_data[0].value.ui64 = (uint64_t)
1512*7656SSherry.Moore@Sun.COM 		    (*softsp->axq_pcr);
15131708Sstevel 
15141708Sstevel 		/* pic0 */
15151708Sstevel 		axq_counters_data[1].value.ui64 = (uint64_t)
1516*7656SSherry.Moore@Sun.COM 		    (*softsp->axq_pic0);
15171708Sstevel 
15181708Sstevel 		/* pic1 */
15191708Sstevel 		axq_counters_data[2].value.ui64 = (uint64_t)
1520*7656SSherry.Moore@Sun.COM 		    *softsp->axq_pic1;
15211708Sstevel 
15221708Sstevel 		/* pic2 */
15231708Sstevel 		axq_counters_data[3].value.ui64 = (uint64_t)
1524*7656SSherry.Moore@Sun.COM 		    *softsp->axq_pic2;
15251708Sstevel 	}
15261708Sstevel 	return (0);
15271708Sstevel }
15281708Sstevel 
15291708Sstevel struct gptwo_phys_spec {
15301708Sstevel 	uint_t gptwo_phys_hi;   /* child's address, hi word */
15311708Sstevel 	uint_t gptwo_phys_low;  /* child's address, low word */
15321708Sstevel 	uint_t gptwo_size_hi;   /* high word of size field */
15331708Sstevel 	uint_t gptwo_size_low;  /* low word of size field */
15341708Sstevel };
15351708Sstevel 
15361708Sstevel int axq_pio_workaround_disable = 0;
15371708Sstevel int axq_pio_limit = 3;
15381708Sstevel 
15391708Sstevel int
starcat_axq_pio_workaround(dev_info_t * dip)15401708Sstevel starcat_axq_pio_workaround(dev_info_t *dip)
15411708Sstevel {
15421708Sstevel 	dev_info_t *axq_dip, *cdip, *pdip;
15431708Sstevel 	int portid, axq_portid;
15441708Sstevel 	char *name;
15451708Sstevel 	int size, circ;
15461708Sstevel 	uint_t *base_addr, *io_domain_control_addr;
15471708Sstevel 	int32_t io_domain_control;
15481708Sstevel 	ddi_device_acc_attr_t acc;
15491708Sstevel 	ddi_acc_handle_t handle;
15501708Sstevel 	struct gptwo_phys_spec *gptwo_spec;
15511708Sstevel 	struct regspec phys_spec;
15521708Sstevel 
15531708Sstevel 	if (axq_pio_workaround_disable)
15541708Sstevel 		return (0);
15551708Sstevel 
15561708Sstevel 	/*
15571708Sstevel 	 * Get the portid for the PCI (Schizo) device).
15581708Sstevel 	 */
15591708Sstevel 	if ((portid = ddi_getprop(DDI_DEV_T_ANY, dip, 0, "portid", -1)) < 0) {
15601708Sstevel 		cmn_err(CE_WARN, "%s: no portid\n", ddi_get_name(dip));
15611708Sstevel 		return (0);
15621708Sstevel 	}
15631708Sstevel 
15641708Sstevel 	/*
15651708Sstevel 	 * Calculate the portid for the Slot 1 AXQ.  The portid for
15661708Sstevel 	 * Schizo 0 EEEEE11100
15671708Sstevel 	 * Schizo 1 EEEEE11101
15681708Sstevel 	 * AXQ 0    EEEEE11110
15691708Sstevel 	 * AXQ 1    EEEEE11111
15701708Sstevel 	 * where EEEEE is the 5 bit expander number.  So the portid for
15711708Sstevel 	 * AXQ 1 can be easily calculated by oring a 3 to the portid of
15721708Sstevel 	 * Schizo 0 or 1.
15731708Sstevel 	 */
15741708Sstevel 	axq_portid = portid | 3;
15751708Sstevel 
15761708Sstevel 	/*
15771708Sstevel 	 * Look for AXQ nodes that have the portid we calculated.
15781708Sstevel 	 */
15791708Sstevel 	axq_dip = NULL;
15801708Sstevel 	pdip = ddi_root_node();
15811708Sstevel 	ndi_devi_enter(pdip, &circ);
15821708Sstevel 	for (cdip = ddi_get_child(pdip); cdip != NULL;
1583*7656SSherry.Moore@Sun.COM 	    cdip = ddi_get_next_sibling(cdip)) {
15841708Sstevel 
15851708Sstevel 		if (ddi_getlongprop(DDI_DEV_T_ANY, cdip,
15861708Sstevel 		    DDI_PROP_DONTPASS, "name", (caddr_t)&name, &size)
15871708Sstevel 		    != DDI_PROP_SUCCESS) {
15881708Sstevel 			continue;
15891708Sstevel 		}
15901708Sstevel 
15911708Sstevel 		if (strcmp(name, "address-extender-queue") != 0) {
15921708Sstevel 			kmem_free(name, size);
15931708Sstevel 			continue;
15941708Sstevel 		}
15951708Sstevel 
15961708Sstevel 		/*
15971708Sstevel 		 * Found an AXQ node.
15981708Sstevel 		 */
15991708Sstevel 
16001708Sstevel 		kmem_free(name, size);
16011708Sstevel 
16021708Sstevel 		portid = ddi_getprop(DDI_DEV_T_ANY, cdip, 0, "portid", -1);
16031708Sstevel 
16041708Sstevel 		if (portid == axq_portid) {
16051708Sstevel 
16061708Sstevel 			/*
16071708Sstevel 			 * We found the correct AXQ node.
16081708Sstevel 			 */
16091708Sstevel 			ndi_hold_devi(cdip);
16101708Sstevel 			axq_dip = cdip;
16111708Sstevel 			break;
16121708Sstevel 		}
16131708Sstevel 	}
16141708Sstevel 	ndi_devi_exit(pdip, circ);
16151708Sstevel 
16161708Sstevel 	if (axq_dip == NULL) {
16171708Sstevel 		cmn_err(CE_WARN, "can't find axq node with portid=0x%x\n",
16181708Sstevel 		    axq_portid);
16191708Sstevel 		return (0);
16201708Sstevel 	}
16211708Sstevel 
16221708Sstevel 	if (ddi_getlongprop(DDI_DEV_T_ANY, axq_dip, DDI_PROP_DONTPASS, "reg",
16231708Sstevel 	    (caddr_t)&gptwo_spec, &size) != DDI_PROP_SUCCESS) {
16241708Sstevel 		cmn_err(CE_WARN, "%s: no regspec\n", ddi_get_name(axq_dip));
16251708Sstevel 		ndi_rele_devi(axq_dip);
16261708Sstevel 		return (0);
16271708Sstevel 	}
16281708Sstevel 
16291708Sstevel 	phys_spec.regspec_bustype = gptwo_spec->gptwo_phys_hi;
16301708Sstevel 	phys_spec.regspec_addr = gptwo_spec->gptwo_phys_low;
16311708Sstevel 	phys_spec.regspec_size = gptwo_spec->gptwo_size_low;
16321708Sstevel 
16331708Sstevel 	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
16341708Sstevel 	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
16351708Sstevel 	acc.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
16361708Sstevel 
16371708Sstevel 	if (axq_map_phys(axq_dip, &phys_spec, (caddr_t *)&base_addr,
16381708Sstevel 	    &acc, &handle)) {
16391708Sstevel 		cmn_err(CE_WARN, "%s: map phys failed\n",
16401708Sstevel 		    ddi_get_name(axq_dip));
16411708Sstevel 		kmem_free(gptwo_spec, size);
16421708Sstevel 		ndi_rele_devi(axq_dip);
16431708Sstevel 		return (0);
16441708Sstevel 	}
16451708Sstevel 
16461708Sstevel 	kmem_free(gptwo_spec, size);
16471708Sstevel 
16481708Sstevel 	io_domain_control_addr = REG_ADDR(base_addr, AXQ_SLOT1_DOMCTRL);
16491708Sstevel 
16501708Sstevel 	if (ddi_peek32(axq_dip, (int32_t *)io_domain_control_addr,
16511708Sstevel 	    (int32_t *)&io_domain_control)) {
16521708Sstevel 		cmn_err(CE_WARN, "%s: peek failed\n", ddi_get_name(axq_dip));
16531708Sstevel 		ndi_rele_devi(axq_dip);
16541708Sstevel 		return (0);
16551708Sstevel 	}
16561708Sstevel 
16571708Sstevel 	axq_unmap_phys(&handle);
16581708Sstevel 
16591708Sstevel 	ndi_rele_devi(axq_dip);
16601708Sstevel 
16611708Sstevel 	/*
16621708Sstevel 	 * If bit 6 of the IO Domain Control Register is a one,
16631708Sstevel 	 * then this AXQ version does not have the PIO Limit problem.
16641708Sstevel 	 */
16651708Sstevel 	if (io_domain_control & AXQ_DOMCTRL_PIOFIX)
16661708Sstevel 		return (0);
16671708Sstevel 
16681708Sstevel 	return (axq_pio_limit);
16691708Sstevel }
16701708Sstevel 
16711708Sstevel static int
axq_map_phys(dev_info_t * dip,struct regspec * phys_spec,caddr_t * addrp,ddi_device_acc_attr_t * accattrp,ddi_acc_handle_t * handlep)16721708Sstevel axq_map_phys(dev_info_t *dip, struct regspec *phys_spec,
16731708Sstevel 	caddr_t *addrp, ddi_device_acc_attr_t *accattrp,
16741708Sstevel 	ddi_acc_handle_t *handlep)
16751708Sstevel {
16761708Sstevel 	ddi_map_req_t mr;
16771708Sstevel 	ddi_acc_hdl_t *hp;
16781708Sstevel 	int result;
16791708Sstevel 	struct regspec *ph;
16801708Sstevel 
16811708Sstevel 	*handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL);
16821708Sstevel 	hp = impl_acc_hdl_get(*handlep);
16831708Sstevel 	hp->ah_vers = VERS_ACCHDL;
16841708Sstevel 	hp->ah_dip = dip;
16851708Sstevel 	hp->ah_rnumber = 0;
16861708Sstevel 	hp->ah_offset = 0;
16871708Sstevel 	hp->ah_len = 0;
16881708Sstevel 	hp->ah_acc = *accattrp;
16891708Sstevel 	ph = kmem_zalloc(sizeof (struct regspec), KM_SLEEP);
16901708Sstevel 	*ph = *phys_spec;
16911708Sstevel 	hp->ah_bus_private = ph;	/* cache a copy of the reg spec */
16921708Sstevel 
16931708Sstevel 	mr.map_op = DDI_MO_MAP_LOCKED;
16941708Sstevel 	mr.map_type = DDI_MT_REGSPEC;
16951708Sstevel 	mr.map_obj.rp = phys_spec;
16961708Sstevel 	mr.map_prot = PROT_READ | PROT_WRITE;
16971708Sstevel 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
16981708Sstevel 	mr.map_handlep = hp;
16991708Sstevel 	mr.map_vers = DDI_MAP_VERSION;
17001708Sstevel 
17011708Sstevel 	result = ddi_map(dip, &mr, 0, 0, addrp);
17021708Sstevel 
17031708Sstevel 	if (result != DDI_SUCCESS) {
17041708Sstevel 		impl_acc_hdl_free(*handlep);
17051708Sstevel 		*handlep = NULL;
17061708Sstevel 	} else {
17071708Sstevel 		hp->ah_addr = *addrp;
17081708Sstevel 	}
17091708Sstevel 
17101708Sstevel 	return (result);
17111708Sstevel }
17121708Sstevel 
17131708Sstevel static void
axq_unmap_phys(ddi_acc_handle_t * handlep)17141708Sstevel axq_unmap_phys(ddi_acc_handle_t *handlep)
17151708Sstevel {
17161708Sstevel 	ddi_map_req_t mr;
17171708Sstevel 	ddi_acc_hdl_t *hp;
17181708Sstevel 	struct regspec *ph;
17191708Sstevel 
17201708Sstevel 	hp = impl_acc_hdl_get(*handlep);
17211708Sstevel 	ASSERT(hp);
17221708Sstevel 	ph = hp->ah_bus_private;
17231708Sstevel 
17241708Sstevel 	mr.map_op = DDI_MO_UNMAP;
17251708Sstevel 	mr.map_type = DDI_MT_REGSPEC;
17261708Sstevel 	mr.map_obj.rp = ph;
17271708Sstevel 	mr.map_prot = PROT_READ | PROT_WRITE;
17281708Sstevel 	mr.map_flags = DDI_MF_KERNEL_MAPPING;
17291708Sstevel 	mr.map_handlep = hp;
17301708Sstevel 	mr.map_vers = DDI_MAP_VERSION;
17311708Sstevel 
17321708Sstevel 	(void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
1733*7656SSherry.Moore@Sun.COM 	    hp->ah_len, &hp->ah_addr);
17341708Sstevel 
17351708Sstevel 	impl_acc_hdl_free(*handlep);
17361708Sstevel 	kmem_free(ph, sizeof (struct regspec));	/* Free the cached copy */
17371708Sstevel 	*handlep = NULL;
17381708Sstevel }
17391708Sstevel 
17401708Sstevel /* ARGSUSED */
17411708Sstevel static boolean_t
axq_panic_callb(void * arg,int code)17421708Sstevel axq_panic_callb(void *arg, int code)
17431708Sstevel {
17441708Sstevel 	axq_iopause_disable_all();
17451708Sstevel 	return (B_TRUE);
17461708Sstevel }
1747