xref: /onnv-gate/usr/src/uts/sun4u/io/px/px_lib4u.c (revision 1772:78cca3d2cc4b)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51540Skini  * Common Development and Distribution License (the "License").
61540Skini  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
221531Skini  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
270Sstevel@tonic-gate 
280Sstevel@tonic-gate #include <sys/types.h>
290Sstevel@tonic-gate #include <sys/kmem.h>
300Sstevel@tonic-gate #include <sys/conf.h>
310Sstevel@tonic-gate #include <sys/ddi.h>
320Sstevel@tonic-gate #include <sys/sunddi.h>
3327Sjchu #include <sys/fm/protocol.h>
3427Sjchu #include <sys/fm/util.h>
350Sstevel@tonic-gate #include <sys/modctl.h>
360Sstevel@tonic-gate #include <sys/disp.h>
370Sstevel@tonic-gate #include <sys/stat.h>
380Sstevel@tonic-gate #include <sys/ddi_impldefs.h>
390Sstevel@tonic-gate #include <sys/vmem.h>
400Sstevel@tonic-gate #include <sys/iommutsb.h>
410Sstevel@tonic-gate #include <sys/cpuvar.h>
4227Sjchu #include <sys/ivintr.h>
43383Set142600 #include <sys/byteorder.h>
441531Skini #include <sys/hotplug/pci/pciehpc.h>
450Sstevel@tonic-gate #include <px_obj.h>
460Sstevel@tonic-gate #include <pcie_pwr.h>
47*1772Sjl139090 #include "px_tools_var.h"
480Sstevel@tonic-gate #include <px_regs.h>
490Sstevel@tonic-gate #include <px_csr.h>
5027Sjchu #include <sys/machsystm.h>
510Sstevel@tonic-gate #include "px_lib4u.h"
5227Sjchu #include "px_err.h"
53*1772Sjl139090 #include "oberon_regs.h"
540Sstevel@tonic-gate 
550Sstevel@tonic-gate #pragma weak jbus_stst_order
560Sstevel@tonic-gate 
570Sstevel@tonic-gate extern void jbus_stst_order();
580Sstevel@tonic-gate 
590Sstevel@tonic-gate ulong_t px_mmu_dvma_end = 0xfffffffful;
600Sstevel@tonic-gate uint_t px_ranges_phi_mask = 0xfffffffful;
61*1772Sjl139090 uint64_t *px_oberon_ubc_scratch_regs;
620Sstevel@tonic-gate 
630Sstevel@tonic-gate static int px_goto_l23ready(px_t *px_p);
64118Sjchu static int px_goto_l0(px_t *px_p);
65118Sjchu static int px_pre_pwron_check(px_t *px_p);
660Sstevel@tonic-gate static uint32_t px_identity_chip(px_t *px_p);
67435Sjchu static boolean_t px_cpr_callb(void *arg, int code);
681648Sjchu static uint_t px_cb_intr(caddr_t arg);
6927Sjchu 
7027Sjchu /*
7127Sjchu  * px_lib_map_registers
7227Sjchu  *
7327Sjchu  * This function is called from the attach routine to map the registers
7427Sjchu  * accessed by this driver.
7527Sjchu  *
7627Sjchu  * used by: px_attach()
7727Sjchu  *
7827Sjchu  * return value: DDI_FAILURE on failure
7927Sjchu  */
8027Sjchu int
8127Sjchu px_lib_map_regs(pxu_t *pxu_p, dev_info_t *dip)
8227Sjchu {
8327Sjchu 	ddi_device_acc_attr_t	attr;
8427Sjchu 	px_reg_bank_t		reg_bank = PX_REG_CSR;
8527Sjchu 
8627Sjchu 	DBG(DBG_ATTACH, dip, "px_lib_map_regs: pxu_p:0x%p, dip 0x%p\n",
8727Sjchu 		pxu_p, dip);
8827Sjchu 
8927Sjchu 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
9027Sjchu 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
9127Sjchu 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
9227Sjchu 
9327Sjchu 	/*
9427Sjchu 	 * PCI CSR Base
9527Sjchu 	 */
9627Sjchu 	if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank],
9727Sjchu 	    0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) {
9827Sjchu 		goto fail;
9927Sjchu 	}
10027Sjchu 
10127Sjchu 	reg_bank++;
10227Sjchu 
10327Sjchu 	/*
10427Sjchu 	 * XBUS CSR Base
10527Sjchu 	 */
10627Sjchu 	if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank],
10727Sjchu 	    0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) {
10827Sjchu 		goto fail;
10927Sjchu 	}
11027Sjchu 
11127Sjchu 	pxu_p->px_address[reg_bank] -= FIRE_CONTROL_STATUS;
11227Sjchu 
11327Sjchu done:
11427Sjchu 	for (; reg_bank >= PX_REG_CSR; reg_bank--) {
11527Sjchu 		DBG(DBG_ATTACH, dip, "reg_bank 0x%x address 0x%p\n",
11627Sjchu 		    reg_bank, pxu_p->px_address[reg_bank]);
11727Sjchu 	}
11827Sjchu 
11927Sjchu 	return (DDI_SUCCESS);
12027Sjchu 
12127Sjchu fail:
12227Sjchu 	cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n",
12327Sjchu 	    ddi_driver_name(dip), ddi_get_instance(dip), reg_bank);
12427Sjchu 
12527Sjchu 	for (reg_bank--; reg_bank >= PX_REG_CSR; reg_bank--) {
12627Sjchu 		pxu_p->px_address[reg_bank] = NULL;
12727Sjchu 		ddi_regs_map_free(&pxu_p->px_ac[reg_bank]);
12827Sjchu 	}
12927Sjchu 
13027Sjchu 	return (DDI_FAILURE);
13127Sjchu }
13227Sjchu 
13327Sjchu /*
13427Sjchu  * px_lib_unmap_regs:
13527Sjchu  *
13627Sjchu  * This routine unmaps the registers mapped by map_px_registers.
13727Sjchu  *
13827Sjchu  * used by: px_detach(), and error conditions in px_attach()
13927Sjchu  *
14027Sjchu  * return value: none
14127Sjchu  */
14227Sjchu void
14327Sjchu px_lib_unmap_regs(pxu_t *pxu_p)
14427Sjchu {
14527Sjchu 	int i;
14627Sjchu 
14727Sjchu 	for (i = 0; i < PX_REG_MAX; i++) {
14827Sjchu 		if (pxu_p->px_ac[i])
14927Sjchu 			ddi_regs_map_free(&pxu_p->px_ac[i]);
15027Sjchu 	}
15127Sjchu }
1520Sstevel@tonic-gate 
1530Sstevel@tonic-gate int
1540Sstevel@tonic-gate px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl)
1550Sstevel@tonic-gate {
1560Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
15727Sjchu 	caddr_t		xbc_csr_base, csr_base;
1580Sstevel@tonic-gate 	px_dvma_range_prop_t	px_dvma_range;
1590Sstevel@tonic-gate 	uint32_t	chip_id;
1600Sstevel@tonic-gate 	pxu_t		*pxu_p;
1610Sstevel@tonic-gate 
1620Sstevel@tonic-gate 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p\n", dip);
1630Sstevel@tonic-gate 
1640Sstevel@tonic-gate 	if ((chip_id = px_identity_chip(px_p)) == PX_CHIP_UNIDENTIFIED)
1650Sstevel@tonic-gate 		return (DDI_FAILURE);
1660Sstevel@tonic-gate 
1670Sstevel@tonic-gate 	switch (chip_id) {
1680Sstevel@tonic-gate 	case FIRE_VER_10:
169225Sess 		cmn_err(CE_WARN, "FIRE Hardware Version 1.0 is not supported");
170225Sess 		return (DDI_FAILURE);
1710Sstevel@tonic-gate 	case FIRE_VER_20:
1720Sstevel@tonic-gate 		DBG(DBG_ATTACH, dip, "FIRE Hardware Version 2.0\n");
1730Sstevel@tonic-gate 		break;
174*1772Sjl139090 	case OBERON_VER_10:
175*1772Sjl139090 		DBG(DBG_ATTACH, dip, "Oberon Hardware Version 1.0\n");
176*1772Sjl139090 		break;
1770Sstevel@tonic-gate 	default:
178*1772Sjl139090 		cmn_err(CE_WARN, "%s%d: PX Hardware Version Unknown\n",
1790Sstevel@tonic-gate 		    ddi_driver_name(dip), ddi_get_instance(dip));
1800Sstevel@tonic-gate 		return (DDI_FAILURE);
1810Sstevel@tonic-gate 	}
1820Sstevel@tonic-gate 
1830Sstevel@tonic-gate 	/*
1840Sstevel@tonic-gate 	 * Allocate platform specific structure and link it to
1850Sstevel@tonic-gate 	 * the px state structure.
1860Sstevel@tonic-gate 	 */
1870Sstevel@tonic-gate 	pxu_p = kmem_zalloc(sizeof (pxu_t), KM_SLEEP);
1880Sstevel@tonic-gate 	pxu_p->chip_id = chip_id;
1890Sstevel@tonic-gate 	pxu_p->portid  = ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1900Sstevel@tonic-gate 	    "portid", -1);
1910Sstevel@tonic-gate 
19227Sjchu 	/* Map in the registers */
19327Sjchu 	if (px_lib_map_regs(pxu_p, dip) == DDI_FAILURE) {
19427Sjchu 		kmem_free(pxu_p, sizeof (pxu_t));
19527Sjchu 
19627Sjchu 		return (DDI_FAILURE);
19727Sjchu 	}
19827Sjchu 
19927Sjchu 	xbc_csr_base = (caddr_t)pxu_p->px_address[PX_REG_XBC];
20027Sjchu 	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
20127Sjchu 
2020Sstevel@tonic-gate 	pxu_p->tsb_cookie = iommu_tsb_alloc(pxu_p->portid);
2030Sstevel@tonic-gate 	pxu_p->tsb_size = iommu_tsb_cookie_to_size(pxu_p->tsb_cookie);
2040Sstevel@tonic-gate 	pxu_p->tsb_vaddr = iommu_tsb_cookie_to_va(pxu_p->tsb_cookie);
2050Sstevel@tonic-gate 
206*1772Sjl139090 	pxu_p->tsb_paddr = va_to_pa(pxu_p->tsb_vaddr);
207*1772Sjl139090 
2080Sstevel@tonic-gate 	/*
2090Sstevel@tonic-gate 	 * Create "virtual-dma" property to support child devices
2100Sstevel@tonic-gate 	 * needing to know DVMA range.
2110Sstevel@tonic-gate 	 */
2120Sstevel@tonic-gate 	px_dvma_range.dvma_base = (uint32_t)px_mmu_dvma_end + 1
2130Sstevel@tonic-gate 	    - ((pxu_p->tsb_size >> 3) << MMU_PAGE_SHIFT);
2140Sstevel@tonic-gate 	px_dvma_range.dvma_len = (uint32_t)
2150Sstevel@tonic-gate 	    px_mmu_dvma_end - px_dvma_range.dvma_base + 1;
2160Sstevel@tonic-gate 
2170Sstevel@tonic-gate 	(void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
2180Sstevel@tonic-gate 		"virtual-dma", (caddr_t)&px_dvma_range,
2190Sstevel@tonic-gate 		sizeof (px_dvma_range_prop_t));
2200Sstevel@tonic-gate 	/*
2210Sstevel@tonic-gate 	 * Initilize all fire hardware specific blocks.
2220Sstevel@tonic-gate 	 */
2230Sstevel@tonic-gate 	hvio_cb_init(xbc_csr_base, pxu_p);
2240Sstevel@tonic-gate 	hvio_ib_init(csr_base, pxu_p);
2250Sstevel@tonic-gate 	hvio_pec_init(csr_base, pxu_p);
2260Sstevel@tonic-gate 	hvio_mmu_init(csr_base, pxu_p);
2270Sstevel@tonic-gate 
2280Sstevel@tonic-gate 	px_p->px_plat_p = (void *)pxu_p;
2290Sstevel@tonic-gate 
23027Sjchu 	/*
23127Sjchu 	 * Initialize all the interrupt handlers
23227Sjchu 	 */
233*1772Sjl139090 	switch (PX_CHIP_TYPE(pxu_p)) {
234*1772Sjl139090 	case PX_CHIP_OBERON:
235*1772Sjl139090 		px_err_reg_enable(px_p, PX_ERR_UBC);
236*1772Sjl139090 		px_err_reg_enable(px_p, PX_ERR_MMU);
237*1772Sjl139090 		px_err_reg_enable(px_p, PX_ERR_IMU);
238*1772Sjl139090 		px_err_reg_enable(px_p, PX_ERR_TLU_UE);
239*1772Sjl139090 		px_err_reg_enable(px_p, PX_ERR_TLU_CE);
240*1772Sjl139090 		px_err_reg_enable(px_p, PX_ERR_TLU_OE);
241*1772Sjl139090 		px_err_reg_enable(px_p, PX_ERR_ILU);
242*1772Sjl139090 
243*1772Sjl139090 		px_fabric_die_rc_ue |= PCIE_AER_UCE_UC;
244*1772Sjl139090 		break;
245*1772Sjl139090 
246*1772Sjl139090 	case PX_CHIP_FIRE:
247*1772Sjl139090 		px_err_reg_enable(px_p, PX_ERR_JBC);
248*1772Sjl139090 		px_err_reg_enable(px_p, PX_ERR_MMU);
249*1772Sjl139090 		px_err_reg_enable(px_p, PX_ERR_IMU);
250*1772Sjl139090 		px_err_reg_enable(px_p, PX_ERR_TLU_UE);
251*1772Sjl139090 		px_err_reg_enable(px_p, PX_ERR_TLU_CE);
252*1772Sjl139090 		px_err_reg_enable(px_p, PX_ERR_TLU_OE);
253*1772Sjl139090 		px_err_reg_enable(px_p, PX_ERR_ILU);
254*1772Sjl139090 		px_err_reg_enable(px_p, PX_ERR_LPU_LINK);
255*1772Sjl139090 		px_err_reg_enable(px_p, PX_ERR_LPU_PHY);
256*1772Sjl139090 		px_err_reg_enable(px_p, PX_ERR_LPU_RX);
257*1772Sjl139090 		px_err_reg_enable(px_p, PX_ERR_LPU_TX);
258*1772Sjl139090 		px_err_reg_enable(px_p, PX_ERR_LPU_LTSSM);
259*1772Sjl139090 		px_err_reg_enable(px_p, PX_ERR_LPU_GIGABLZ);
260*1772Sjl139090 		break;
261*1772Sjl139090 	default:
262*1772Sjl139090 		cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n",
263*1772Sjl139090 		    ddi_driver_name(dip), ddi_get_instance(dip));
264*1772Sjl139090 		return (DDI_FAILURE);
265*1772Sjl139090 	}
26627Sjchu 
2670Sstevel@tonic-gate 	/* Initilize device handle */
2680Sstevel@tonic-gate 	*dev_hdl = (devhandle_t)csr_base;
2690Sstevel@tonic-gate 
2700Sstevel@tonic-gate 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl);
2710Sstevel@tonic-gate 
2720Sstevel@tonic-gate 	return (DDI_SUCCESS);
2730Sstevel@tonic-gate }
2740Sstevel@tonic-gate 
2750Sstevel@tonic-gate int
2760Sstevel@tonic-gate px_lib_dev_fini(dev_info_t *dip)
2770Sstevel@tonic-gate {
2780Sstevel@tonic-gate 	px_t	*px_p = DIP_TO_STATE(dip);
2790Sstevel@tonic-gate 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
2800Sstevel@tonic-gate 
2810Sstevel@tonic-gate 	DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip);
2820Sstevel@tonic-gate 
28327Sjchu 	/*
28427Sjchu 	 * Deinitialize all the interrupt handlers
28527Sjchu 	 */
286*1772Sjl139090 	switch (PX_CHIP_TYPE(pxu_p)) {
287*1772Sjl139090 	case PX_CHIP_OBERON:
288*1772Sjl139090 		px_err_reg_disable(px_p, PX_ERR_UBC);
289*1772Sjl139090 		px_err_reg_disable(px_p, PX_ERR_MMU);
290*1772Sjl139090 		px_err_reg_disable(px_p, PX_ERR_IMU);
291*1772Sjl139090 		px_err_reg_disable(px_p, PX_ERR_TLU_UE);
292*1772Sjl139090 		px_err_reg_disable(px_p, PX_ERR_TLU_CE);
293*1772Sjl139090 		px_err_reg_disable(px_p, PX_ERR_TLU_OE);
294*1772Sjl139090 		px_err_reg_disable(px_p, PX_ERR_ILU);
295*1772Sjl139090 		break;
296*1772Sjl139090 	case PX_CHIP_FIRE:
297*1772Sjl139090 		px_err_reg_disable(px_p, PX_ERR_JBC);
298*1772Sjl139090 		px_err_reg_disable(px_p, PX_ERR_MMU);
299*1772Sjl139090 		px_err_reg_disable(px_p, PX_ERR_IMU);
300*1772Sjl139090 		px_err_reg_disable(px_p, PX_ERR_TLU_UE);
301*1772Sjl139090 		px_err_reg_disable(px_p, PX_ERR_TLU_CE);
302*1772Sjl139090 		px_err_reg_disable(px_p, PX_ERR_TLU_OE);
303*1772Sjl139090 		px_err_reg_disable(px_p, PX_ERR_ILU);
304*1772Sjl139090 		px_err_reg_disable(px_p, PX_ERR_LPU_LINK);
305*1772Sjl139090 		px_err_reg_disable(px_p, PX_ERR_LPU_PHY);
306*1772Sjl139090 		px_err_reg_disable(px_p, PX_ERR_LPU_RX);
307*1772Sjl139090 		px_err_reg_disable(px_p, PX_ERR_LPU_TX);
308*1772Sjl139090 		px_err_reg_disable(px_p, PX_ERR_LPU_LTSSM);
309*1772Sjl139090 		px_err_reg_disable(px_p, PX_ERR_LPU_GIGABLZ);
310*1772Sjl139090 		break;
311*1772Sjl139090 	default:
312*1772Sjl139090 		cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n",
313*1772Sjl139090 		    ddi_driver_name(dip), ddi_get_instance(dip));
314*1772Sjl139090 		return (DDI_FAILURE);
315*1772Sjl139090 	}
31627Sjchu 
3170Sstevel@tonic-gate 	iommu_tsb_free(pxu_p->tsb_cookie);
3180Sstevel@tonic-gate 
31927Sjchu 	px_lib_unmap_regs((pxu_t *)px_p->px_plat_p);
32027Sjchu 	kmem_free(px_p->px_plat_p, sizeof (pxu_t));
3210Sstevel@tonic-gate 	px_p->px_plat_p = NULL;
3220Sstevel@tonic-gate 
3230Sstevel@tonic-gate 	return (DDI_SUCCESS);
3240Sstevel@tonic-gate }
3250Sstevel@tonic-gate 
3260Sstevel@tonic-gate /*ARGSUSED*/
3270Sstevel@tonic-gate int
3280Sstevel@tonic-gate px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino,
3290Sstevel@tonic-gate     sysino_t *sysino)
3300Sstevel@tonic-gate {
3310Sstevel@tonic-gate 	px_t	*px_p = DIP_TO_STATE(dip);
3320Sstevel@tonic-gate 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
3330Sstevel@tonic-gate 	uint64_t	ret;
3340Sstevel@tonic-gate 
3350Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p "
3360Sstevel@tonic-gate 	    "devino 0x%x\n", dip, devino);
3370Sstevel@tonic-gate 
3380Sstevel@tonic-gate 	if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip),
3390Sstevel@tonic-gate 	    pxu_p, devino, sysino)) != H_EOK) {
3400Sstevel@tonic-gate 		DBG(DBG_LIB_INT, dip,
3410Sstevel@tonic-gate 		    "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret);
3420Sstevel@tonic-gate 		return (DDI_FAILURE);
3430Sstevel@tonic-gate 	}
3440Sstevel@tonic-gate 
3450Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n",
3460Sstevel@tonic-gate 	    *sysino);
3470Sstevel@tonic-gate 
3480Sstevel@tonic-gate 	return (DDI_SUCCESS);
3490Sstevel@tonic-gate }
3500Sstevel@tonic-gate 
3510Sstevel@tonic-gate /*ARGSUSED*/
3520Sstevel@tonic-gate int
3530Sstevel@tonic-gate px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino,
3540Sstevel@tonic-gate     intr_valid_state_t *intr_valid_state)
3550Sstevel@tonic-gate {
3560Sstevel@tonic-gate 	uint64_t	ret;
3570Sstevel@tonic-gate 
3580Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n",
3590Sstevel@tonic-gate 	    dip, sysino);
3600Sstevel@tonic-gate 
3610Sstevel@tonic-gate 	if ((ret = hvio_intr_getvalid(DIP_TO_HANDLE(dip),
3620Sstevel@tonic-gate 	    sysino, intr_valid_state)) != H_EOK) {
3630Sstevel@tonic-gate 		DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n",
3640Sstevel@tonic-gate 		    ret);
3650Sstevel@tonic-gate 		return (DDI_FAILURE);
3660Sstevel@tonic-gate 	}
3670Sstevel@tonic-gate 
3680Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n",
3690Sstevel@tonic-gate 	    *intr_valid_state);
3700Sstevel@tonic-gate 
3710Sstevel@tonic-gate 	return (DDI_SUCCESS);
3720Sstevel@tonic-gate }
3730Sstevel@tonic-gate 
3740Sstevel@tonic-gate /*ARGSUSED*/
3750Sstevel@tonic-gate int
3760Sstevel@tonic-gate px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino,
3770Sstevel@tonic-gate     intr_valid_state_t intr_valid_state)
3780Sstevel@tonic-gate {
3790Sstevel@tonic-gate 	uint64_t	ret;
3800Sstevel@tonic-gate 
3810Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx "
3820Sstevel@tonic-gate 	    "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state);
3830Sstevel@tonic-gate 
3840Sstevel@tonic-gate 	if ((ret = hvio_intr_setvalid(DIP_TO_HANDLE(dip),
3850Sstevel@tonic-gate 	    sysino, intr_valid_state)) != H_EOK) {
3860Sstevel@tonic-gate 		DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n",
3870Sstevel@tonic-gate 		    ret);
3880Sstevel@tonic-gate 		return (DDI_FAILURE);
3890Sstevel@tonic-gate 	}
3900Sstevel@tonic-gate 
3910Sstevel@tonic-gate 	return (DDI_SUCCESS);
3920Sstevel@tonic-gate }
3930Sstevel@tonic-gate 
3940Sstevel@tonic-gate /*ARGSUSED*/
3950Sstevel@tonic-gate int
3960Sstevel@tonic-gate px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino,
3970Sstevel@tonic-gate     intr_state_t *intr_state)
3980Sstevel@tonic-gate {
3990Sstevel@tonic-gate 	uint64_t	ret;
4000Sstevel@tonic-gate 
4010Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n",
4020Sstevel@tonic-gate 	    dip, sysino);
4030Sstevel@tonic-gate 
4040Sstevel@tonic-gate 	if ((ret = hvio_intr_getstate(DIP_TO_HANDLE(dip),
4050Sstevel@tonic-gate 	    sysino, intr_state)) != H_EOK) {
4060Sstevel@tonic-gate 		DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n",
4070Sstevel@tonic-gate 		    ret);
4080Sstevel@tonic-gate 		return (DDI_FAILURE);
4090Sstevel@tonic-gate 	}
4100Sstevel@tonic-gate 
4110Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n",
4120Sstevel@tonic-gate 	    *intr_state);
4130Sstevel@tonic-gate 
4140Sstevel@tonic-gate 	return (DDI_SUCCESS);
4150Sstevel@tonic-gate }
4160Sstevel@tonic-gate 
4170Sstevel@tonic-gate /*ARGSUSED*/
4180Sstevel@tonic-gate int
4190Sstevel@tonic-gate px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino,
4200Sstevel@tonic-gate     intr_state_t intr_state)
4210Sstevel@tonic-gate {
4220Sstevel@tonic-gate 	uint64_t	ret;
4230Sstevel@tonic-gate 
4240Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx "
4250Sstevel@tonic-gate 	    "intr_state 0x%x\n", dip, sysino, intr_state);
4260Sstevel@tonic-gate 
4270Sstevel@tonic-gate 	if ((ret = hvio_intr_setstate(DIP_TO_HANDLE(dip),
4280Sstevel@tonic-gate 	    sysino, intr_state)) != H_EOK) {
4290Sstevel@tonic-gate 		DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n",
4300Sstevel@tonic-gate 		    ret);
4310Sstevel@tonic-gate 		return (DDI_FAILURE);
4320Sstevel@tonic-gate 	}
4330Sstevel@tonic-gate 
4340Sstevel@tonic-gate 	return (DDI_SUCCESS);
4350Sstevel@tonic-gate }
4360Sstevel@tonic-gate 
4370Sstevel@tonic-gate /*ARGSUSED*/
4380Sstevel@tonic-gate int
4390Sstevel@tonic-gate px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid)
4400Sstevel@tonic-gate {
441*1772Sjl139090 	px_t		*px_p = DIP_TO_STATE(dip);
442*1772Sjl139090 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
4430Sstevel@tonic-gate 	uint64_t	ret;
4440Sstevel@tonic-gate 
4450Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n",
4460Sstevel@tonic-gate 	    dip, sysino);
4470Sstevel@tonic-gate 
448*1772Sjl139090 	if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip), pxu_p,
4490Sstevel@tonic-gate 	    sysino, cpuid)) != H_EOK) {
4500Sstevel@tonic-gate 		DBG(DBG_LIB_INT, dip, "hvio_intr_gettarget failed, ret 0x%lx\n",
4510Sstevel@tonic-gate 		    ret);
4520Sstevel@tonic-gate 		return (DDI_FAILURE);
4530Sstevel@tonic-gate 	}
4540Sstevel@tonic-gate 
4550Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", cpuid);
4560Sstevel@tonic-gate 
4570Sstevel@tonic-gate 	return (DDI_SUCCESS);
4580Sstevel@tonic-gate }
4590Sstevel@tonic-gate 
4600Sstevel@tonic-gate /*ARGSUSED*/
4610Sstevel@tonic-gate int
4620Sstevel@tonic-gate px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid)
4630Sstevel@tonic-gate {
464*1772Sjl139090 	px_t		*px_p = DIP_TO_STATE(dip);
465*1772Sjl139090 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
4660Sstevel@tonic-gate 	uint64_t	ret;
4670Sstevel@tonic-gate 
4680Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx "
4690Sstevel@tonic-gate 	    "cpuid 0x%x\n", dip, sysino, cpuid);
4700Sstevel@tonic-gate 
471*1772Sjl139090 	if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip), pxu_p,
4720Sstevel@tonic-gate 	    sysino, cpuid)) != H_EOK) {
4730Sstevel@tonic-gate 		DBG(DBG_LIB_INT, dip, "hvio_intr_settarget failed, ret 0x%lx\n",
4740Sstevel@tonic-gate 		    ret);
4750Sstevel@tonic-gate 		return (DDI_FAILURE);
4760Sstevel@tonic-gate 	}
4770Sstevel@tonic-gate 
4780Sstevel@tonic-gate 	return (DDI_SUCCESS);
4790Sstevel@tonic-gate }
4800Sstevel@tonic-gate 
4810Sstevel@tonic-gate /*ARGSUSED*/
4820Sstevel@tonic-gate int
4830Sstevel@tonic-gate px_lib_intr_reset(dev_info_t *dip)
4840Sstevel@tonic-gate {
4850Sstevel@tonic-gate 	devino_t	ino;
4860Sstevel@tonic-gate 	sysino_t	sysino;
4870Sstevel@tonic-gate 
4880Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip);
4890Sstevel@tonic-gate 
4900Sstevel@tonic-gate 	/* Reset all Interrupts */
4910Sstevel@tonic-gate 	for (ino = 0; ino < INTERRUPT_MAPPING_ENTRIES; ino++) {
4920Sstevel@tonic-gate 		if (px_lib_intr_devino_to_sysino(dip, ino,
4930Sstevel@tonic-gate 		    &sysino) != DDI_SUCCESS)
4940Sstevel@tonic-gate 			return (BF_FATAL);
4950Sstevel@tonic-gate 
4960Sstevel@tonic-gate 		if (px_lib_intr_setstate(dip, sysino,
4970Sstevel@tonic-gate 		    INTR_IDLE_STATE) != DDI_SUCCESS)
4980Sstevel@tonic-gate 			return (BF_FATAL);
4990Sstevel@tonic-gate 	}
5000Sstevel@tonic-gate 
5010Sstevel@tonic-gate 	return (BF_NONE);
5020Sstevel@tonic-gate }
5030Sstevel@tonic-gate 
5040Sstevel@tonic-gate /*ARGSUSED*/
5050Sstevel@tonic-gate int
5060Sstevel@tonic-gate px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages,
5071617Sgovinda     io_attributes_t attr, void *addr, size_t pfn_index, int flags)
5080Sstevel@tonic-gate {
5090Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
5100Sstevel@tonic-gate 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
5110Sstevel@tonic-gate 	uint64_t	ret;
5120Sstevel@tonic-gate 
5130Sstevel@tonic-gate 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx "
5141617Sgovinda 	    "pages 0x%x attr 0x%x addr 0x%p pfn_index 0x%llx flags 0x%x\n",
5151617Sgovinda 	    dip, tsbid, pages, attr, addr, pfn_index, flags);
5160Sstevel@tonic-gate 
5170Sstevel@tonic-gate 	if ((ret = hvio_iommu_map(px_p->px_dev_hdl, pxu_p, tsbid, pages,
5181617Sgovinda 	    attr, addr, pfn_index, flags)) != H_EOK) {
5190Sstevel@tonic-gate 		DBG(DBG_LIB_DMA, dip,
5200Sstevel@tonic-gate 		    "px_lib_iommu_map failed, ret 0x%lx\n", ret);
5210Sstevel@tonic-gate 		return (DDI_FAILURE);
5220Sstevel@tonic-gate 	}
5230Sstevel@tonic-gate 
5240Sstevel@tonic-gate 	return (DDI_SUCCESS);
5250Sstevel@tonic-gate }
5260Sstevel@tonic-gate 
5270Sstevel@tonic-gate /*ARGSUSED*/
5280Sstevel@tonic-gate int
5290Sstevel@tonic-gate px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages)
5300Sstevel@tonic-gate {
5310Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
5320Sstevel@tonic-gate 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
5330Sstevel@tonic-gate 	uint64_t	ret;
5340Sstevel@tonic-gate 
5350Sstevel@tonic-gate 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx "
5360Sstevel@tonic-gate 	    "pages 0x%x\n", dip, tsbid, pages);
5370Sstevel@tonic-gate 
5380Sstevel@tonic-gate 	if ((ret = hvio_iommu_demap(px_p->px_dev_hdl, pxu_p, tsbid, pages))
5390Sstevel@tonic-gate 	    != H_EOK) {
5400Sstevel@tonic-gate 		DBG(DBG_LIB_DMA, dip,
5410Sstevel@tonic-gate 		    "px_lib_iommu_demap failed, ret 0x%lx\n", ret);
5420Sstevel@tonic-gate 
5430Sstevel@tonic-gate 		return (DDI_FAILURE);
5440Sstevel@tonic-gate 	}
5450Sstevel@tonic-gate 
5460Sstevel@tonic-gate 	return (DDI_SUCCESS);
5470Sstevel@tonic-gate }
5480Sstevel@tonic-gate 
5490Sstevel@tonic-gate /*ARGSUSED*/
5500Sstevel@tonic-gate int
5511617Sgovinda px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, io_attributes_t *attr_p,
5521617Sgovinda     r_addr_t *r_addr_p)
5530Sstevel@tonic-gate {
5540Sstevel@tonic-gate 	px_t	*px_p = DIP_TO_STATE(dip);
5550Sstevel@tonic-gate 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
5560Sstevel@tonic-gate 	uint64_t	ret;
5570Sstevel@tonic-gate 
5580Sstevel@tonic-gate 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n",
5590Sstevel@tonic-gate 	    dip, tsbid);
5600Sstevel@tonic-gate 
5610Sstevel@tonic-gate 	if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), pxu_p, tsbid,
5621617Sgovinda 	    attr_p, r_addr_p)) != H_EOK) {
5630Sstevel@tonic-gate 		DBG(DBG_LIB_DMA, dip,
5640Sstevel@tonic-gate 		    "hvio_iommu_getmap failed, ret 0x%lx\n", ret);
5650Sstevel@tonic-gate 
5660Sstevel@tonic-gate 		return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE);
5670Sstevel@tonic-gate 	}
5680Sstevel@tonic-gate 
5690Sstevel@tonic-gate 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%x r_addr 0x%llx\n",
5701617Sgovinda 	    *attr_p, *r_addr_p);
5710Sstevel@tonic-gate 
5720Sstevel@tonic-gate 	return (DDI_SUCCESS);
5730Sstevel@tonic-gate }
5740Sstevel@tonic-gate 
5750Sstevel@tonic-gate 
5760Sstevel@tonic-gate /*
5770Sstevel@tonic-gate  * Checks dma attributes against system bypass ranges
5780Sstevel@tonic-gate  * The bypass range is determined by the hardware. Return them so the
5790Sstevel@tonic-gate  * common code can do generic checking against them.
5800Sstevel@tonic-gate  */
5810Sstevel@tonic-gate /*ARGSUSED*/
5820Sstevel@tonic-gate int
583*1772Sjl139090 px_lib_dma_bypass_rngchk(dev_info_t *dip, ddi_dma_attr_t *attr_p,
584*1772Sjl139090     uint64_t *lo_p, uint64_t *hi_p)
5850Sstevel@tonic-gate {
586*1772Sjl139090 	px_t	*px_p = DIP_TO_STATE(dip);
587*1772Sjl139090 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
588*1772Sjl139090 
589*1772Sjl139090 	*lo_p = hvio_get_bypass_base(pxu_p);
590*1772Sjl139090 	*hi_p = hvio_get_bypass_end(pxu_p);
5910Sstevel@tonic-gate 
5920Sstevel@tonic-gate 	return (DDI_SUCCESS);
5930Sstevel@tonic-gate }
5940Sstevel@tonic-gate 
5950Sstevel@tonic-gate 
5960Sstevel@tonic-gate /*ARGSUSED*/
5970Sstevel@tonic-gate int
5981617Sgovinda px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra, io_attributes_t attr,
5991617Sgovinda     io_addr_t *io_addr_p)
6000Sstevel@tonic-gate {
6010Sstevel@tonic-gate 	uint64_t	ret;
602*1772Sjl139090 	px_t	*px_p = DIP_TO_STATE(dip);
603*1772Sjl139090 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
6040Sstevel@tonic-gate 
6050Sstevel@tonic-gate 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx "
6061617Sgovinda 	    "attr 0x%x\n", dip, ra, attr);
6070Sstevel@tonic-gate 
608*1772Sjl139090 	if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), pxu_p, ra,
609*1772Sjl139090 	    attr, io_addr_p)) != H_EOK) {
6100Sstevel@tonic-gate 		DBG(DBG_LIB_DMA, dip,
6110Sstevel@tonic-gate 		    "hvio_iommu_getbypass failed, ret 0x%lx\n", ret);
6120Sstevel@tonic-gate 		return (DDI_FAILURE);
6130Sstevel@tonic-gate 	}
6140Sstevel@tonic-gate 
6150Sstevel@tonic-gate 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n",
6160Sstevel@tonic-gate 	    *io_addr_p);
6170Sstevel@tonic-gate 
6180Sstevel@tonic-gate 	return (DDI_SUCCESS);
6190Sstevel@tonic-gate }
6200Sstevel@tonic-gate 
6210Sstevel@tonic-gate /*
6220Sstevel@tonic-gate  * bus dma sync entry point.
6230Sstevel@tonic-gate  */
6240Sstevel@tonic-gate /*ARGSUSED*/
6250Sstevel@tonic-gate int
6260Sstevel@tonic-gate px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
6271617Sgovinda     off_t off, size_t len, uint_t cache_flags)
6280Sstevel@tonic-gate {
6290Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
630*1772Sjl139090 	px_t	*px_p = DIP_TO_STATE(dip);
631*1772Sjl139090 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
6320Sstevel@tonic-gate 
6330Sstevel@tonic-gate 	DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p "
6340Sstevel@tonic-gate 	    "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n",
6350Sstevel@tonic-gate 	    dip, rdip, handle, off, len, cache_flags);
6360Sstevel@tonic-gate 
6370Sstevel@tonic-gate 	/*
638*1772Sjl139090 	 * No flush needed for Oberon
639*1772Sjl139090 	 */
640*1772Sjl139090 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
641*1772Sjl139090 		return (DDI_SUCCESS);
642*1772Sjl139090 
643*1772Sjl139090 	/*
6440Sstevel@tonic-gate 	 * jbus_stst_order is found only in certain cpu modules.
6450Sstevel@tonic-gate 	 * Just return success if not present.
6460Sstevel@tonic-gate 	 */
6470Sstevel@tonic-gate 	if (&jbus_stst_order == NULL)
6480Sstevel@tonic-gate 		return (DDI_SUCCESS);
6490Sstevel@tonic-gate 
650909Segillett 	if (!(mp->dmai_flags & PX_DMAI_FLAGS_INUSE)) {
65127Sjchu 		cmn_err(CE_WARN, "%s%d: Unbound dma handle %p.",
65227Sjchu 		    ddi_driver_name(rdip), ddi_get_instance(rdip), (void *)mp);
65327Sjchu 
6540Sstevel@tonic-gate 		return (DDI_FAILURE);
6550Sstevel@tonic-gate 	}
6560Sstevel@tonic-gate 
657909Segillett 	if (mp->dmai_flags & PX_DMAI_FLAGS_NOSYNC)
6580Sstevel@tonic-gate 		return (DDI_SUCCESS);
6590Sstevel@tonic-gate 
6600Sstevel@tonic-gate 	/*
6610Sstevel@tonic-gate 	 * No flush needed when sending data from memory to device.
6620Sstevel@tonic-gate 	 * Nothing to do to "sync" memory to what device would already see.
6630Sstevel@tonic-gate 	 */
6640Sstevel@tonic-gate 	if (!(mp->dmai_rflags & DDI_DMA_READ) ||
6650Sstevel@tonic-gate 	    ((cache_flags & PX_DMA_SYNC_DDI_FLAGS) == DDI_DMA_SYNC_FORDEV))
6660Sstevel@tonic-gate 		return (DDI_SUCCESS);
6670Sstevel@tonic-gate 
6680Sstevel@tonic-gate 	/*
6690Sstevel@tonic-gate 	 * Perform necessary cpu workaround to ensure jbus ordering.
6700Sstevel@tonic-gate 	 * CPU's internal "invalidate FIFOs" are flushed.
6710Sstevel@tonic-gate 	 */
6720Sstevel@tonic-gate 
6730Sstevel@tonic-gate #if !defined(lint)
6740Sstevel@tonic-gate 	kpreempt_disable();
6750Sstevel@tonic-gate #endif
6760Sstevel@tonic-gate 	jbus_stst_order();
6770Sstevel@tonic-gate #if !defined(lint)
6780Sstevel@tonic-gate 	kpreempt_enable();
6790Sstevel@tonic-gate #endif
6800Sstevel@tonic-gate 	return (DDI_SUCCESS);
6810Sstevel@tonic-gate }
6820Sstevel@tonic-gate 
6830Sstevel@tonic-gate /*
6840Sstevel@tonic-gate  * MSIQ Functions:
6850Sstevel@tonic-gate  */
6860Sstevel@tonic-gate /*ARGSUSED*/
6870Sstevel@tonic-gate int
6880Sstevel@tonic-gate px_lib_msiq_init(dev_info_t *dip)
6890Sstevel@tonic-gate {
6900Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
6910Sstevel@tonic-gate 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
6920Sstevel@tonic-gate 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
6930Sstevel@tonic-gate 	caddr_t		msiq_addr;
6940Sstevel@tonic-gate 	px_dvma_addr_t	pg_index;
6950Sstevel@tonic-gate 	size_t		size;
6960Sstevel@tonic-gate 	int		ret;
6970Sstevel@tonic-gate 
6980Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip);
6990Sstevel@tonic-gate 
7000Sstevel@tonic-gate 	/*
7010Sstevel@tonic-gate 	 * Map the EQ memory into the Fire MMU (has to be 512KB aligned)
7020Sstevel@tonic-gate 	 * and then initialize the base address register.
7030Sstevel@tonic-gate 	 *
7040Sstevel@tonic-gate 	 * Allocate entries from Fire IOMMU so that the resulting address
7050Sstevel@tonic-gate 	 * is properly aligned.  Calculate the index of the first allocated
7060Sstevel@tonic-gate 	 * entry.  Note: The size of the mapping is assumed to be a multiple
7070Sstevel@tonic-gate 	 * of the page size.
7080Sstevel@tonic-gate 	 */
7090Sstevel@tonic-gate 	msiq_addr = (caddr_t)(((uint64_t)msiq_state_p->msiq_buf_p +
7100Sstevel@tonic-gate 	    (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT);
7110Sstevel@tonic-gate 
7120Sstevel@tonic-gate 	size = msiq_state_p->msiq_cnt *
7130Sstevel@tonic-gate 	    msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
7140Sstevel@tonic-gate 
7150Sstevel@tonic-gate 	pxu_p->msiq_mapped_p = vmem_xalloc(px_p->px_mmu_p->mmu_dvma_map,
7160Sstevel@tonic-gate 	    size, (512 * 1024), 0, 0, NULL, NULL, VM_NOSLEEP | VM_BESTFIT);
7170Sstevel@tonic-gate 
7180Sstevel@tonic-gate 	if (pxu_p->msiq_mapped_p == NULL)
7190Sstevel@tonic-gate 		return (DDI_FAILURE);
7200Sstevel@tonic-gate 
7210Sstevel@tonic-gate 	pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p,
7220Sstevel@tonic-gate 	    MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p));
7230Sstevel@tonic-gate 
7240Sstevel@tonic-gate 	if ((ret = px_lib_iommu_map(px_p->px_dip, PCI_TSBID(0, pg_index),
7250Sstevel@tonic-gate 	    MMU_BTOP(size), PCI_MAP_ATTR_WRITE, (void *)msiq_addr, 0,
7260Sstevel@tonic-gate 	    MMU_MAP_BUF)) != DDI_SUCCESS) {
7270Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
7280Sstevel@tonic-gate 		    "hvio_msiq_init failed, ret 0x%lx\n", ret);
7290Sstevel@tonic-gate 
7300Sstevel@tonic-gate 		(void) px_lib_msiq_fini(dip);
7310Sstevel@tonic-gate 		return (DDI_FAILURE);
7320Sstevel@tonic-gate 	}
7330Sstevel@tonic-gate 
7340Sstevel@tonic-gate 	(void) hvio_msiq_init(DIP_TO_HANDLE(dip), pxu_p);
7350Sstevel@tonic-gate 
7360Sstevel@tonic-gate 	return (DDI_SUCCESS);
7370Sstevel@tonic-gate }
7380Sstevel@tonic-gate 
7390Sstevel@tonic-gate /*ARGSUSED*/
7400Sstevel@tonic-gate int
7410Sstevel@tonic-gate px_lib_msiq_fini(dev_info_t *dip)
7420Sstevel@tonic-gate {
7430Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
7440Sstevel@tonic-gate 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
7450Sstevel@tonic-gate 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
7460Sstevel@tonic-gate 	px_dvma_addr_t	pg_index;
7470Sstevel@tonic-gate 	size_t		size;
7480Sstevel@tonic-gate 
7490Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip);
7500Sstevel@tonic-gate 
7510Sstevel@tonic-gate 	/*
7520Sstevel@tonic-gate 	 * Unmap and free the EQ memory that had been mapped
7530Sstevel@tonic-gate 	 * into the Fire IOMMU.
7540Sstevel@tonic-gate 	 */
7550Sstevel@tonic-gate 	size = msiq_state_p->msiq_cnt *
7560Sstevel@tonic-gate 	    msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
7570Sstevel@tonic-gate 
7580Sstevel@tonic-gate 	pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p,
7590Sstevel@tonic-gate 	    MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p));
7600Sstevel@tonic-gate 
7610Sstevel@tonic-gate 	(void) px_lib_iommu_demap(px_p->px_dip,
7620Sstevel@tonic-gate 	    PCI_TSBID(0, pg_index), MMU_BTOP(size));
7630Sstevel@tonic-gate 
7640Sstevel@tonic-gate 	/* Free the entries from the Fire MMU */
7650Sstevel@tonic-gate 	vmem_xfree(px_p->px_mmu_p->mmu_dvma_map,
7660Sstevel@tonic-gate 	    (void *)pxu_p->msiq_mapped_p, size);
7670Sstevel@tonic-gate 
7680Sstevel@tonic-gate 	return (DDI_SUCCESS);
7690Sstevel@tonic-gate }
7700Sstevel@tonic-gate 
7710Sstevel@tonic-gate /*ARGSUSED*/
7720Sstevel@tonic-gate int
7730Sstevel@tonic-gate px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p,
7740Sstevel@tonic-gate     uint_t *msiq_rec_cnt_p)
7750Sstevel@tonic-gate {
7760Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
7770Sstevel@tonic-gate 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
7780Sstevel@tonic-gate 	uint64_t	*msiq_addr;
7790Sstevel@tonic-gate 	size_t		msiq_size;
7800Sstevel@tonic-gate 
7810Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n",
7820Sstevel@tonic-gate 	    dip, msiq_id);
7830Sstevel@tonic-gate 
7840Sstevel@tonic-gate 	msiq_addr = (uint64_t *)(((uint64_t)msiq_state_p->msiq_buf_p +
7850Sstevel@tonic-gate 	    (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT);
7860Sstevel@tonic-gate 	msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
7870Sstevel@tonic-gate 	ra_p = (r_addr_t *)((caddr_t)msiq_addr + (msiq_id * msiq_size));
7880Sstevel@tonic-gate 
7890Sstevel@tonic-gate 	*msiq_rec_cnt_p = msiq_state_p->msiq_rec_cnt;
7900Sstevel@tonic-gate 
7910Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n",
7920Sstevel@tonic-gate 	    ra_p, *msiq_rec_cnt_p);
7930Sstevel@tonic-gate 
7940Sstevel@tonic-gate 	return (DDI_SUCCESS);
7950Sstevel@tonic-gate }
7960Sstevel@tonic-gate 
7970Sstevel@tonic-gate /*ARGSUSED*/
7980Sstevel@tonic-gate int
7990Sstevel@tonic-gate px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id,
8000Sstevel@tonic-gate     pci_msiq_valid_state_t *msiq_valid_state)
8010Sstevel@tonic-gate {
8020Sstevel@tonic-gate 	uint64_t	ret;
8030Sstevel@tonic-gate 
8040Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n",
8050Sstevel@tonic-gate 	    dip, msiq_id);
8060Sstevel@tonic-gate 
8070Sstevel@tonic-gate 	if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip),
8080Sstevel@tonic-gate 	    msiq_id, msiq_valid_state)) != H_EOK) {
8090Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
8100Sstevel@tonic-gate 		    "hvio_msiq_getvalid failed, ret 0x%lx\n", ret);
8110Sstevel@tonic-gate 		return (DDI_FAILURE);
8120Sstevel@tonic-gate 	}
8130Sstevel@tonic-gate 
8140Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n",
8150Sstevel@tonic-gate 	    *msiq_valid_state);
8160Sstevel@tonic-gate 
8170Sstevel@tonic-gate 	return (DDI_SUCCESS);
8180Sstevel@tonic-gate }
8190Sstevel@tonic-gate 
8200Sstevel@tonic-gate /*ARGSUSED*/
8210Sstevel@tonic-gate int
8220Sstevel@tonic-gate px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id,
8230Sstevel@tonic-gate     pci_msiq_valid_state_t msiq_valid_state)
8240Sstevel@tonic-gate {
8250Sstevel@tonic-gate 	uint64_t	ret;
8260Sstevel@tonic-gate 
8270Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x "
8280Sstevel@tonic-gate 	    "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state);
8290Sstevel@tonic-gate 
8300Sstevel@tonic-gate 	if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip),
8310Sstevel@tonic-gate 	    msiq_id, msiq_valid_state)) != H_EOK) {
8320Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
8330Sstevel@tonic-gate 		    "hvio_msiq_setvalid failed, ret 0x%lx\n", ret);
8340Sstevel@tonic-gate 		return (DDI_FAILURE);
8350Sstevel@tonic-gate 	}
8360Sstevel@tonic-gate 
8370Sstevel@tonic-gate 	return (DDI_SUCCESS);
8380Sstevel@tonic-gate }
8390Sstevel@tonic-gate 
8400Sstevel@tonic-gate /*ARGSUSED*/
8410Sstevel@tonic-gate int
8420Sstevel@tonic-gate px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id,
8430Sstevel@tonic-gate     pci_msiq_state_t *msiq_state)
8440Sstevel@tonic-gate {
8450Sstevel@tonic-gate 	uint64_t	ret;
8460Sstevel@tonic-gate 
8470Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n",
8480Sstevel@tonic-gate 	    dip, msiq_id);
8490Sstevel@tonic-gate 
8500Sstevel@tonic-gate 	if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip),
8510Sstevel@tonic-gate 	    msiq_id, msiq_state)) != H_EOK) {
8520Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
8530Sstevel@tonic-gate 		    "hvio_msiq_getstate failed, ret 0x%lx\n", ret);
8540Sstevel@tonic-gate 		return (DDI_FAILURE);
8550Sstevel@tonic-gate 	}
8560Sstevel@tonic-gate 
8570Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n",
8580Sstevel@tonic-gate 	    *msiq_state);
8590Sstevel@tonic-gate 
8600Sstevel@tonic-gate 	return (DDI_SUCCESS);
8610Sstevel@tonic-gate }
8620Sstevel@tonic-gate 
8630Sstevel@tonic-gate /*ARGSUSED*/
8640Sstevel@tonic-gate int
8650Sstevel@tonic-gate px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id,
8660Sstevel@tonic-gate     pci_msiq_state_t msiq_state)
8670Sstevel@tonic-gate {
8680Sstevel@tonic-gate 	uint64_t	ret;
8690Sstevel@tonic-gate 
8700Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x "
8710Sstevel@tonic-gate 	    "msiq_state 0x%x\n", dip, msiq_id, msiq_state);
8720Sstevel@tonic-gate 
8730Sstevel@tonic-gate 	if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip),
8740Sstevel@tonic-gate 	    msiq_id, msiq_state)) != H_EOK) {
8750Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
8760Sstevel@tonic-gate 		    "hvio_msiq_setstate failed, ret 0x%lx\n", ret);
8770Sstevel@tonic-gate 		return (DDI_FAILURE);
8780Sstevel@tonic-gate 	}
8790Sstevel@tonic-gate 
8800Sstevel@tonic-gate 	return (DDI_SUCCESS);
8810Sstevel@tonic-gate }
8820Sstevel@tonic-gate 
8830Sstevel@tonic-gate /*ARGSUSED*/
8840Sstevel@tonic-gate int
8850Sstevel@tonic-gate px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id,
8860Sstevel@tonic-gate     msiqhead_t *msiq_head)
8870Sstevel@tonic-gate {
8880Sstevel@tonic-gate 	uint64_t	ret;
8890Sstevel@tonic-gate 
8900Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n",
8910Sstevel@tonic-gate 	    dip, msiq_id);
8920Sstevel@tonic-gate 
8930Sstevel@tonic-gate 	if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip),
8940Sstevel@tonic-gate 	    msiq_id, msiq_head)) != H_EOK) {
8950Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
8960Sstevel@tonic-gate 		    "hvio_msiq_gethead failed, ret 0x%lx\n", ret);
8970Sstevel@tonic-gate 		return (DDI_FAILURE);
8980Sstevel@tonic-gate 	}
8990Sstevel@tonic-gate 
9000Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: msiq_head 0x%x\n",
9010Sstevel@tonic-gate 	    *msiq_head);
9020Sstevel@tonic-gate 
9030Sstevel@tonic-gate 	return (DDI_SUCCESS);
9040Sstevel@tonic-gate }
9050Sstevel@tonic-gate 
9060Sstevel@tonic-gate /*ARGSUSED*/
9070Sstevel@tonic-gate int
9080Sstevel@tonic-gate px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id,
9090Sstevel@tonic-gate     msiqhead_t msiq_head)
9100Sstevel@tonic-gate {
9110Sstevel@tonic-gate 	uint64_t	ret;
9120Sstevel@tonic-gate 
9130Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x "
9140Sstevel@tonic-gate 	    "msiq_head 0x%x\n", dip, msiq_id, msiq_head);
9150Sstevel@tonic-gate 
9160Sstevel@tonic-gate 	if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip),
9170Sstevel@tonic-gate 	    msiq_id, msiq_head)) != H_EOK) {
9180Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
9190Sstevel@tonic-gate 		    "hvio_msiq_sethead failed, ret 0x%lx\n", ret);
9200Sstevel@tonic-gate 		return (DDI_FAILURE);
9210Sstevel@tonic-gate 	}
9220Sstevel@tonic-gate 
9230Sstevel@tonic-gate 	return (DDI_SUCCESS);
9240Sstevel@tonic-gate }
9250Sstevel@tonic-gate 
9260Sstevel@tonic-gate /*ARGSUSED*/
9270Sstevel@tonic-gate int
9280Sstevel@tonic-gate px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id,
9290Sstevel@tonic-gate     msiqtail_t *msiq_tail)
9300Sstevel@tonic-gate {
9310Sstevel@tonic-gate 	uint64_t	ret;
9320Sstevel@tonic-gate 
9330Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n",
9340Sstevel@tonic-gate 	    dip, msiq_id);
9350Sstevel@tonic-gate 
9360Sstevel@tonic-gate 	if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip),
9370Sstevel@tonic-gate 	    msiq_id, msiq_tail)) != H_EOK) {
9380Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
9390Sstevel@tonic-gate 		    "hvio_msiq_gettail failed, ret 0x%lx\n", ret);
9400Sstevel@tonic-gate 		return (DDI_FAILURE);
9410Sstevel@tonic-gate 	}
9420Sstevel@tonic-gate 
9430Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n",
9440Sstevel@tonic-gate 	    *msiq_tail);
9450Sstevel@tonic-gate 
9460Sstevel@tonic-gate 	return (DDI_SUCCESS);
9470Sstevel@tonic-gate }
9480Sstevel@tonic-gate 
9490Sstevel@tonic-gate /*ARGSUSED*/
9500Sstevel@tonic-gate void
9510Sstevel@tonic-gate px_lib_get_msiq_rec(dev_info_t *dip, px_msiq_t *msiq_p, msiq_rec_t *msiq_rec_p)
9520Sstevel@tonic-gate {
9530Sstevel@tonic-gate 	eq_rec_t	*eq_rec_p = (eq_rec_t *)msiq_p->msiq_curr;
9540Sstevel@tonic-gate 
9550Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p eq_rec_p 0x%p\n",
9560Sstevel@tonic-gate 	    dip, eq_rec_p);
9570Sstevel@tonic-gate 
958287Smg140465 	if (!eq_rec_p->eq_rec_fmt_type) {
959287Smg140465 		/* Set msiq_rec_type to zero */
960287Smg140465 		msiq_rec_p->msiq_rec_type = 0;
9610Sstevel@tonic-gate 
9620Sstevel@tonic-gate 		return;
9630Sstevel@tonic-gate 	}
9640Sstevel@tonic-gate 
9650Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: EQ RECORD, "
9660Sstevel@tonic-gate 	    "eq_rec_rid 0x%llx eq_rec_fmt_type 0x%llx "
9670Sstevel@tonic-gate 	    "eq_rec_len 0x%llx eq_rec_addr0 0x%llx "
9680Sstevel@tonic-gate 	    "eq_rec_addr1 0x%llx eq_rec_data0 0x%llx "
9690Sstevel@tonic-gate 	    "eq_rec_data1 0x%llx\n", eq_rec_p->eq_rec_rid,
9700Sstevel@tonic-gate 	    eq_rec_p->eq_rec_fmt_type, eq_rec_p->eq_rec_len,
9710Sstevel@tonic-gate 	    eq_rec_p->eq_rec_addr0, eq_rec_p->eq_rec_addr1,
9720Sstevel@tonic-gate 	    eq_rec_p->eq_rec_data0, eq_rec_p->eq_rec_data1);
9730Sstevel@tonic-gate 
9740Sstevel@tonic-gate 	/*
9750Sstevel@tonic-gate 	 * Only upper 4 bits of eq_rec_fmt_type is used
9760Sstevel@tonic-gate 	 * to identify the EQ record type.
9770Sstevel@tonic-gate 	 */
9780Sstevel@tonic-gate 	switch (eq_rec_p->eq_rec_fmt_type >> 3) {
9790Sstevel@tonic-gate 	case EQ_REC_MSI32:
9800Sstevel@tonic-gate 		msiq_rec_p->msiq_rec_type = MSI32_REC;
9810Sstevel@tonic-gate 
982225Sess 		msiq_rec_p->msiq_rec_data.msi.msi_data =
983225Sess 		    eq_rec_p->eq_rec_data0;
9840Sstevel@tonic-gate 		break;
9850Sstevel@tonic-gate 	case EQ_REC_MSI64:
9860Sstevel@tonic-gate 		msiq_rec_p->msiq_rec_type = MSI64_REC;
9870Sstevel@tonic-gate 
988225Sess 		msiq_rec_p->msiq_rec_data.msi.msi_data =
989225Sess 		    eq_rec_p->eq_rec_data0;
9900Sstevel@tonic-gate 		break;
9910Sstevel@tonic-gate 	case EQ_REC_MSG:
9920Sstevel@tonic-gate 		msiq_rec_p->msiq_rec_type = MSG_REC;
9930Sstevel@tonic-gate 
9940Sstevel@tonic-gate 		msiq_rec_p->msiq_rec_data.msg.msg_route =
9950Sstevel@tonic-gate 		    eq_rec_p->eq_rec_fmt_type & 7;
9960Sstevel@tonic-gate 		msiq_rec_p->msiq_rec_data.msg.msg_targ = eq_rec_p->eq_rec_rid;
9970Sstevel@tonic-gate 		msiq_rec_p->msiq_rec_data.msg.msg_code = eq_rec_p->eq_rec_data0;
9980Sstevel@tonic-gate 		break;
9990Sstevel@tonic-gate 	default:
10000Sstevel@tonic-gate 		cmn_err(CE_WARN, "%s%d: px_lib_get_msiq_rec: "
1001671Skrishnae 		    "0x%x is an unknown EQ record type",
10020Sstevel@tonic-gate 		    ddi_driver_name(dip), ddi_get_instance(dip),
1003671Skrishnae 		    (int)eq_rec_p->eq_rec_fmt_type);
10040Sstevel@tonic-gate 		break;
10050Sstevel@tonic-gate 	}
10060Sstevel@tonic-gate 
10070Sstevel@tonic-gate 	msiq_rec_p->msiq_rec_rid = eq_rec_p->eq_rec_rid;
10080Sstevel@tonic-gate 	msiq_rec_p->msiq_rec_msi_addr = ((eq_rec_p->eq_rec_addr1 << 16) |
10090Sstevel@tonic-gate 	    (eq_rec_p->eq_rec_addr0 << 2));
10100Sstevel@tonic-gate 
1011287Smg140465 	/* Zero out eq_rec_fmt_type field */
1012287Smg140465 	eq_rec_p->eq_rec_fmt_type = 0;
10130Sstevel@tonic-gate }
10140Sstevel@tonic-gate 
10150Sstevel@tonic-gate /*
10160Sstevel@tonic-gate  * MSI Functions:
10170Sstevel@tonic-gate  */
10180Sstevel@tonic-gate /*ARGSUSED*/
10190Sstevel@tonic-gate int
10200Sstevel@tonic-gate px_lib_msi_init(dev_info_t *dip)
10210Sstevel@tonic-gate {
10220Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
10230Sstevel@tonic-gate 	px_msi_state_t	*msi_state_p = &px_p->px_ib_p->ib_msi_state;
10240Sstevel@tonic-gate 	uint64_t	ret;
10250Sstevel@tonic-gate 
10260Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip);
10270Sstevel@tonic-gate 
10280Sstevel@tonic-gate 	if ((ret = hvio_msi_init(DIP_TO_HANDLE(dip),
10290Sstevel@tonic-gate 	    msi_state_p->msi_addr32, msi_state_p->msi_addr64)) != H_EOK) {
10300Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip, "px_lib_msi_init failed, ret 0x%lx\n",
10310Sstevel@tonic-gate 		    ret);
10320Sstevel@tonic-gate 		return (DDI_FAILURE);
10330Sstevel@tonic-gate 	}
10340Sstevel@tonic-gate 
10350Sstevel@tonic-gate 	return (DDI_SUCCESS);
10360Sstevel@tonic-gate }
10370Sstevel@tonic-gate 
10380Sstevel@tonic-gate /*ARGSUSED*/
10390Sstevel@tonic-gate int
10400Sstevel@tonic-gate px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num,
10410Sstevel@tonic-gate     msiqid_t *msiq_id)
10420Sstevel@tonic-gate {
10430Sstevel@tonic-gate 	uint64_t	ret;
10440Sstevel@tonic-gate 
10450Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n",
10460Sstevel@tonic-gate 	    dip, msi_num);
10470Sstevel@tonic-gate 
10480Sstevel@tonic-gate 	if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip),
10490Sstevel@tonic-gate 	    msi_num, msiq_id)) != H_EOK) {
10500Sstevel@tonic-gate 		DBG(DBG_LIB_MSI, dip,
10510Sstevel@tonic-gate 		    "hvio_msi_getmsiq failed, ret 0x%lx\n", ret);
10520Sstevel@tonic-gate 		return (DDI_FAILURE);
10530Sstevel@tonic-gate 	}
10540Sstevel@tonic-gate 
10550Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n",
10560Sstevel@tonic-gate 	    *msiq_id);
10570Sstevel@tonic-gate 
10580Sstevel@tonic-gate 	return (DDI_SUCCESS);
10590Sstevel@tonic-gate }
10600Sstevel@tonic-gate 
10610Sstevel@tonic-gate /*ARGSUSED*/
10620Sstevel@tonic-gate int
10630Sstevel@tonic-gate px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num,
10640Sstevel@tonic-gate     msiqid_t msiq_id, msi_type_t msitype)
10650Sstevel@tonic-gate {
10660Sstevel@tonic-gate 	uint64_t	ret;
10670Sstevel@tonic-gate 
10680Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x "
10690Sstevel@tonic-gate 	    "msq_id 0x%x\n", dip, msi_num, msiq_id);
10700Sstevel@tonic-gate 
10710Sstevel@tonic-gate 	if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip),
10720Sstevel@tonic-gate 	    msi_num, msiq_id)) != H_EOK) {
10730Sstevel@tonic-gate 		DBG(DBG_LIB_MSI, dip,
10740Sstevel@tonic-gate 		    "hvio_msi_setmsiq failed, ret 0x%lx\n", ret);
10750Sstevel@tonic-gate 		return (DDI_FAILURE);
10760Sstevel@tonic-gate 	}
10770Sstevel@tonic-gate 
10780Sstevel@tonic-gate 	return (DDI_SUCCESS);
10790Sstevel@tonic-gate }
10800Sstevel@tonic-gate 
10810Sstevel@tonic-gate /*ARGSUSED*/
10820Sstevel@tonic-gate int
10830Sstevel@tonic-gate px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num,
10840Sstevel@tonic-gate     pci_msi_valid_state_t *msi_valid_state)
10850Sstevel@tonic-gate {
10860Sstevel@tonic-gate 	uint64_t	ret;
10870Sstevel@tonic-gate 
10880Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n",
10890Sstevel@tonic-gate 	    dip, msi_num);
10900Sstevel@tonic-gate 
10910Sstevel@tonic-gate 	if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip),
10920Sstevel@tonic-gate 	    msi_num, msi_valid_state)) != H_EOK) {
10930Sstevel@tonic-gate 		DBG(DBG_LIB_MSI, dip,
10940Sstevel@tonic-gate 		    "hvio_msi_getvalid failed, ret 0x%lx\n", ret);
10950Sstevel@tonic-gate 		return (DDI_FAILURE);
10960Sstevel@tonic-gate 	}
10970Sstevel@tonic-gate 
10980Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n",
10990Sstevel@tonic-gate 	    *msi_valid_state);
11000Sstevel@tonic-gate 
11010Sstevel@tonic-gate 	return (DDI_SUCCESS);
11020Sstevel@tonic-gate }
11030Sstevel@tonic-gate 
11040Sstevel@tonic-gate /*ARGSUSED*/
11050Sstevel@tonic-gate int
11060Sstevel@tonic-gate px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num,
11070Sstevel@tonic-gate     pci_msi_valid_state_t msi_valid_state)
11080Sstevel@tonic-gate {
11090Sstevel@tonic-gate 	uint64_t	ret;
11100Sstevel@tonic-gate 
11110Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x "
11120Sstevel@tonic-gate 	    "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state);
11130Sstevel@tonic-gate 
11140Sstevel@tonic-gate 	if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip),
11150Sstevel@tonic-gate 	    msi_num, msi_valid_state)) != H_EOK) {
11160Sstevel@tonic-gate 		DBG(DBG_LIB_MSI, dip,
11170Sstevel@tonic-gate 		    "hvio_msi_setvalid failed, ret 0x%lx\n", ret);
11180Sstevel@tonic-gate 		return (DDI_FAILURE);
11190Sstevel@tonic-gate 	}
11200Sstevel@tonic-gate 
11210Sstevel@tonic-gate 	return (DDI_SUCCESS);
11220Sstevel@tonic-gate }
11230Sstevel@tonic-gate 
11240Sstevel@tonic-gate /*ARGSUSED*/
11250Sstevel@tonic-gate int
11260Sstevel@tonic-gate px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num,
11270Sstevel@tonic-gate     pci_msi_state_t *msi_state)
11280Sstevel@tonic-gate {
11290Sstevel@tonic-gate 	uint64_t	ret;
11300Sstevel@tonic-gate 
11310Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n",
11320Sstevel@tonic-gate 	    dip, msi_num);
11330Sstevel@tonic-gate 
11340Sstevel@tonic-gate 	if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip),
11350Sstevel@tonic-gate 	    msi_num, msi_state)) != H_EOK) {
11360Sstevel@tonic-gate 		DBG(DBG_LIB_MSI, dip,
11370Sstevel@tonic-gate 		    "hvio_msi_getstate failed, ret 0x%lx\n", ret);
11380Sstevel@tonic-gate 		return (DDI_FAILURE);
11390Sstevel@tonic-gate 	}
11400Sstevel@tonic-gate 
11410Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n",
11420Sstevel@tonic-gate 	    *msi_state);
11430Sstevel@tonic-gate 
11440Sstevel@tonic-gate 	return (DDI_SUCCESS);
11450Sstevel@tonic-gate }
11460Sstevel@tonic-gate 
11470Sstevel@tonic-gate /*ARGSUSED*/
11480Sstevel@tonic-gate int
11490Sstevel@tonic-gate px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num,
11500Sstevel@tonic-gate     pci_msi_state_t msi_state)
11510Sstevel@tonic-gate {
11520Sstevel@tonic-gate 	uint64_t	ret;
11530Sstevel@tonic-gate 
11540Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x "
11550Sstevel@tonic-gate 	    "msi_state 0x%x\n", dip, msi_num, msi_state);
11560Sstevel@tonic-gate 
11570Sstevel@tonic-gate 	if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip),
11580Sstevel@tonic-gate 	    msi_num, msi_state)) != H_EOK) {
11590Sstevel@tonic-gate 		DBG(DBG_LIB_MSI, dip,
11600Sstevel@tonic-gate 		    "hvio_msi_setstate failed, ret 0x%lx\n", ret);
11610Sstevel@tonic-gate 		return (DDI_FAILURE);
11620Sstevel@tonic-gate 	}
11630Sstevel@tonic-gate 
11640Sstevel@tonic-gate 	return (DDI_SUCCESS);
11650Sstevel@tonic-gate }
11660Sstevel@tonic-gate 
11670Sstevel@tonic-gate /*
11680Sstevel@tonic-gate  * MSG Functions:
11690Sstevel@tonic-gate  */
11700Sstevel@tonic-gate /*ARGSUSED*/
11710Sstevel@tonic-gate int
11720Sstevel@tonic-gate px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
11730Sstevel@tonic-gate     msiqid_t *msiq_id)
11740Sstevel@tonic-gate {
11750Sstevel@tonic-gate 	uint64_t	ret;
11760Sstevel@tonic-gate 
11770Sstevel@tonic-gate 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n",
11780Sstevel@tonic-gate 	    dip, msg_type);
11790Sstevel@tonic-gate 
11800Sstevel@tonic-gate 	if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip),
11810Sstevel@tonic-gate 	    msg_type, msiq_id)) != H_EOK) {
11820Sstevel@tonic-gate 		DBG(DBG_LIB_MSG, dip,
11830Sstevel@tonic-gate 		    "hvio_msg_getmsiq failed, ret 0x%lx\n", ret);
11840Sstevel@tonic-gate 		return (DDI_FAILURE);
11850Sstevel@tonic-gate 	}
11860Sstevel@tonic-gate 
11870Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n",
11880Sstevel@tonic-gate 	    *msiq_id);
11890Sstevel@tonic-gate 
11900Sstevel@tonic-gate 	return (DDI_SUCCESS);
11910Sstevel@tonic-gate }
11920Sstevel@tonic-gate 
11930Sstevel@tonic-gate /*ARGSUSED*/
11940Sstevel@tonic-gate int
11950Sstevel@tonic-gate px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
11960Sstevel@tonic-gate     msiqid_t msiq_id)
11970Sstevel@tonic-gate {
11980Sstevel@tonic-gate 	uint64_t	ret;
11990Sstevel@tonic-gate 
12000Sstevel@tonic-gate 	DBG(DBG_LIB_MSG, dip, "px_lib_msi_setstate: dip 0x%p msg_type 0x%x "
12010Sstevel@tonic-gate 	    "msiq_id 0x%x\n", dip, msg_type, msiq_id);
12020Sstevel@tonic-gate 
12030Sstevel@tonic-gate 	if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip),
12040Sstevel@tonic-gate 	    msg_type, msiq_id)) != H_EOK) {
12050Sstevel@tonic-gate 		DBG(DBG_LIB_MSG, dip,
12060Sstevel@tonic-gate 		    "hvio_msg_setmsiq failed, ret 0x%lx\n", ret);
12070Sstevel@tonic-gate 		return (DDI_FAILURE);
12080Sstevel@tonic-gate 	}
12090Sstevel@tonic-gate 
12100Sstevel@tonic-gate 	return (DDI_SUCCESS);
12110Sstevel@tonic-gate }
12120Sstevel@tonic-gate 
12130Sstevel@tonic-gate /*ARGSUSED*/
12140Sstevel@tonic-gate int
12150Sstevel@tonic-gate px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
12160Sstevel@tonic-gate     pcie_msg_valid_state_t *msg_valid_state)
12170Sstevel@tonic-gate {
12180Sstevel@tonic-gate 	uint64_t	ret;
12190Sstevel@tonic-gate 
12200Sstevel@tonic-gate 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n",
12210Sstevel@tonic-gate 	    dip, msg_type);
12220Sstevel@tonic-gate 
12230Sstevel@tonic-gate 	if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type,
12240Sstevel@tonic-gate 	    msg_valid_state)) != H_EOK) {
12250Sstevel@tonic-gate 		DBG(DBG_LIB_MSG, dip,
12260Sstevel@tonic-gate 		    "hvio_msg_getvalid failed, ret 0x%lx\n", ret);
12270Sstevel@tonic-gate 		return (DDI_FAILURE);
12280Sstevel@tonic-gate 	}
12290Sstevel@tonic-gate 
12300Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n",
12310Sstevel@tonic-gate 	    *msg_valid_state);
12320Sstevel@tonic-gate 
12330Sstevel@tonic-gate 	return (DDI_SUCCESS);
12340Sstevel@tonic-gate }
12350Sstevel@tonic-gate 
12360Sstevel@tonic-gate /*ARGSUSED*/
12370Sstevel@tonic-gate int
12380Sstevel@tonic-gate px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
12390Sstevel@tonic-gate     pcie_msg_valid_state_t msg_valid_state)
12400Sstevel@tonic-gate {
12410Sstevel@tonic-gate 	uint64_t	ret;
12420Sstevel@tonic-gate 
12430Sstevel@tonic-gate 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x "
12440Sstevel@tonic-gate 	    "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state);
12450Sstevel@tonic-gate 
12460Sstevel@tonic-gate 	if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type,
12470Sstevel@tonic-gate 	    msg_valid_state)) != H_EOK) {
12480Sstevel@tonic-gate 		DBG(DBG_LIB_MSG, dip,
12490Sstevel@tonic-gate 		    "hvio_msg_setvalid failed, ret 0x%lx\n", ret);
12500Sstevel@tonic-gate 		return (DDI_FAILURE);
12510Sstevel@tonic-gate 	}
12520Sstevel@tonic-gate 
12530Sstevel@tonic-gate 	return (DDI_SUCCESS);
12540Sstevel@tonic-gate }
12550Sstevel@tonic-gate 
12560Sstevel@tonic-gate /*
12570Sstevel@tonic-gate  * Suspend/Resume Functions:
12580Sstevel@tonic-gate  * Currently unsupported by hypervisor
12590Sstevel@tonic-gate  */
12600Sstevel@tonic-gate int
12610Sstevel@tonic-gate px_lib_suspend(dev_info_t *dip)
12620Sstevel@tonic-gate {
12630Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
12640Sstevel@tonic-gate 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
12651648Sjchu 	px_cb_t		*cb_p = PX2CB(px_p);
12660Sstevel@tonic-gate 	devhandle_t	dev_hdl, xbus_dev_hdl;
12671648Sjchu 	uint64_t	ret = H_EOK;
12680Sstevel@tonic-gate 
12690Sstevel@tonic-gate 	DBG(DBG_DETACH, dip, "px_lib_suspend: dip 0x%p\n", dip);
12700Sstevel@tonic-gate 
127127Sjchu 	dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR];
127227Sjchu 	xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC];
12730Sstevel@tonic-gate 
12741648Sjchu 	if ((ret = hvio_suspend(dev_hdl, pxu_p)) != H_EOK)
12751648Sjchu 		goto fail;
12761648Sjchu 
12771648Sjchu 	if (--cb_p->attachcnt == 0) {
12781648Sjchu 		ret = hvio_cb_suspend(xbus_dev_hdl, pxu_p);
12791648Sjchu 		if (ret != H_EOK)
12801648Sjchu 			cb_p->attachcnt++;
12810Sstevel@tonic-gate 	}
12820Sstevel@tonic-gate 
12831648Sjchu fail:
12840Sstevel@tonic-gate 	return ((ret != H_EOK) ? DDI_FAILURE: DDI_SUCCESS);
12850Sstevel@tonic-gate }
12860Sstevel@tonic-gate 
12870Sstevel@tonic-gate void
12880Sstevel@tonic-gate px_lib_resume(dev_info_t *dip)
12890Sstevel@tonic-gate {
12900Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
12910Sstevel@tonic-gate 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
12921648Sjchu 	px_cb_t		*cb_p = PX2CB(px_p);
12930Sstevel@tonic-gate 	devhandle_t	dev_hdl, xbus_dev_hdl;
12940Sstevel@tonic-gate 	devino_t	pec_ino = px_p->px_inos[PX_INTR_PEC];
12950Sstevel@tonic-gate 	devino_t	xbc_ino = px_p->px_inos[PX_INTR_XBC];
12960Sstevel@tonic-gate 
12970Sstevel@tonic-gate 	DBG(DBG_ATTACH, dip, "px_lib_resume: dip 0x%p\n", dip);
12980Sstevel@tonic-gate 
129927Sjchu 	dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR];
130027Sjchu 	xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC];
13010Sstevel@tonic-gate 
13021648Sjchu 	if (++cb_p->attachcnt == 1)
13030Sstevel@tonic-gate 		hvio_cb_resume(dev_hdl, xbus_dev_hdl, xbc_ino, pxu_p);
13040Sstevel@tonic-gate 
13051648Sjchu 	hvio_resume(dev_hdl, pec_ino, pxu_p);
13060Sstevel@tonic-gate }
13070Sstevel@tonic-gate 
1308*1772Sjl139090 /*
1309*1772Sjl139090  * Generate a unique Oberon UBC ID based on the Logicial System Board and
1310*1772Sjl139090  * the IO Channel from the portid property field.
1311*1772Sjl139090  */
1312*1772Sjl139090 static uint64_t
1313*1772Sjl139090 oberon_get_ubc_id(dev_info_t *dip)
1314*1772Sjl139090 {
1315*1772Sjl139090 	px_t	*px_p = DIP_TO_STATE(dip);
1316*1772Sjl139090 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
1317*1772Sjl139090 	uint64_t	ubc_id;
1318*1772Sjl139090 
1319*1772Sjl139090 	/*
1320*1772Sjl139090 	 * Generate a unique 6 bit UBC ID using the 2 IO_Channel#[1:0] bits and
1321*1772Sjl139090 	 * the 4 LSB_ID[3:0] bits from the Oberon's portid property.
1322*1772Sjl139090 	 */
1323*1772Sjl139090 	ubc_id = (((pxu_p->portid >> OBERON_PORT_ID_IOC) &
1324*1772Sjl139090 	    OBERON_PORT_ID_IOC_MASK) | (((pxu_p->portid >>
1325*1772Sjl139090 	    OBERON_PORT_ID_LSB) & OBERON_PORT_ID_LSB_MASK)
1326*1772Sjl139090 	    << OBERON_UBC_ID_LSB));
1327*1772Sjl139090 
1328*1772Sjl139090 	return (ubc_id);
1329*1772Sjl139090 }
1330*1772Sjl139090 
1331*1772Sjl139090 /*
1332*1772Sjl139090  * Oberon does not have a UBC scratch register, so alloc an array of scratch
1333*1772Sjl139090  * registers when needed and use a unique UBC ID as an index. This code
1334*1772Sjl139090  * can be simplified if we use a pre-allocated array. They are currently
1335*1772Sjl139090  * being dynamically allocated because it's only needed by the Oberon.
1336*1772Sjl139090  */
1337*1772Sjl139090 static void
1338*1772Sjl139090 oberon_set_cb(dev_info_t *dip, uint64_t val)
1339*1772Sjl139090 {
1340*1772Sjl139090 	uint64_t	ubc_id;
1341*1772Sjl139090 
1342*1772Sjl139090 	if (px_oberon_ubc_scratch_regs == NULL)
1343*1772Sjl139090 		px_oberon_ubc_scratch_regs =
1344*1772Sjl139090 		    (uint64_t *)kmem_zalloc(sizeof (uint64_t)*
1345*1772Sjl139090 		    OBERON_UBC_ID_MAX, KM_SLEEP);
1346*1772Sjl139090 
1347*1772Sjl139090 	ubc_id = oberon_get_ubc_id(dip);
1348*1772Sjl139090 
1349*1772Sjl139090 	px_oberon_ubc_scratch_regs[ubc_id] = val;
1350*1772Sjl139090 
1351*1772Sjl139090 	/*
1352*1772Sjl139090 	 * Check if any scratch registers are still in use. If all scratch
1353*1772Sjl139090 	 * registers are currently set to zero, then deallocate the scratch
1354*1772Sjl139090 	 * register array.
1355*1772Sjl139090 	 */
1356*1772Sjl139090 	for (ubc_id = 0; ubc_id < OBERON_UBC_ID_MAX; ubc_id++) {
1357*1772Sjl139090 		if (px_oberon_ubc_scratch_regs[ubc_id] != NULL)
1358*1772Sjl139090 			return;
1359*1772Sjl139090 	}
1360*1772Sjl139090 
1361*1772Sjl139090 	/*
1362*1772Sjl139090 	 * All scratch registers are set to zero so deallocate the scratch
1363*1772Sjl139090 	 * register array and set the pointer to NULL.
1364*1772Sjl139090 	 */
1365*1772Sjl139090 	kmem_free(px_oberon_ubc_scratch_regs,
1366*1772Sjl139090 	    (sizeof (uint64_t)*OBERON_UBC_ID_MAX));
1367*1772Sjl139090 
1368*1772Sjl139090 	px_oberon_ubc_scratch_regs = NULL;
1369*1772Sjl139090 }
1370*1772Sjl139090 
1371*1772Sjl139090 /*
1372*1772Sjl139090  * Oberon does not have a UBC scratch register, so use an allocated array of
1373*1772Sjl139090  * scratch registers and use the unique UBC ID as an index into that array.
1374*1772Sjl139090  */
1375*1772Sjl139090 static uint64_t
1376*1772Sjl139090 oberon_get_cb(dev_info_t *dip)
1377*1772Sjl139090 {
1378*1772Sjl139090 	uint64_t	ubc_id;
1379*1772Sjl139090 
1380*1772Sjl139090 	if (px_oberon_ubc_scratch_regs == NULL)
1381*1772Sjl139090 		return (0);
1382*1772Sjl139090 
1383*1772Sjl139090 	ubc_id = oberon_get_ubc_id(dip);
1384*1772Sjl139090 
1385*1772Sjl139090 	return (px_oberon_ubc_scratch_regs[ubc_id]);
1386*1772Sjl139090 }
1387*1772Sjl139090 
1388*1772Sjl139090 /*
1389*1772Sjl139090  * Misc Functions:
1390*1772Sjl139090  * Currently unsupported by hypervisor
1391*1772Sjl139090  */
1392*1772Sjl139090 static uint64_t
1393*1772Sjl139090 px_get_cb(dev_info_t *dip)
1394*1772Sjl139090 {
1395*1772Sjl139090 	px_t	*px_p = DIP_TO_STATE(dip);
1396*1772Sjl139090 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
1397*1772Sjl139090 
1398*1772Sjl139090 	/*
1399*1772Sjl139090 	 * Oberon does not currently have Scratchpad registers.
1400*1772Sjl139090 	 */
1401*1772Sjl139090 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
1402*1772Sjl139090 		return (oberon_get_cb(dip));
1403*1772Sjl139090 
1404*1772Sjl139090 	return (CSR_XR((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1));
1405*1772Sjl139090 }
1406*1772Sjl139090 
1407*1772Sjl139090 static void
1408*1772Sjl139090 px_set_cb(dev_info_t *dip, uint64_t val)
1409*1772Sjl139090 {
1410*1772Sjl139090 	px_t	*px_p = DIP_TO_STATE(dip);
1411*1772Sjl139090 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
1412*1772Sjl139090 
1413*1772Sjl139090 	/*
1414*1772Sjl139090 	 * Oberon does not currently have Scratchpad registers.
1415*1772Sjl139090 	 */
1416*1772Sjl139090 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
1417*1772Sjl139090 		oberon_set_cb(dip, val);
1418*1772Sjl139090 		return;
1419*1772Sjl139090 	}
1420*1772Sjl139090 
1421*1772Sjl139090 	CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1, val);
1422*1772Sjl139090 }
1423*1772Sjl139090 
14240Sstevel@tonic-gate /*ARGSUSED*/
14250Sstevel@tonic-gate int
14260Sstevel@tonic-gate px_lib_map_vconfig(dev_info_t *dip,
14270Sstevel@tonic-gate 	ddi_map_req_t *mp, pci_config_offset_t off,
14280Sstevel@tonic-gate 		pci_regspec_t *rp, caddr_t *addrp)
14290Sstevel@tonic-gate {
14300Sstevel@tonic-gate 	/*
14310Sstevel@tonic-gate 	 * No special config space access services in this layer.
14320Sstevel@tonic-gate 	 */
14330Sstevel@tonic-gate 	return (DDI_FAILURE);
14340Sstevel@tonic-gate }
14350Sstevel@tonic-gate 
1436624Sschwartz void
1437677Sjchu px_lib_map_attr_check(ddi_map_req_t *mp)
1438677Sjchu {
1439677Sjchu 	ddi_acc_hdl_t *hp = mp->map_handlep;
1440677Sjchu 
1441677Sjchu 	/* fire does not accept byte masks from PIO store merge */
1442677Sjchu 	if (hp->ah_acc.devacc_attr_dataorder == DDI_STORECACHING_OK_ACC)
1443677Sjchu 		hp->ah_acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1444677Sjchu }
1445677Sjchu 
1446677Sjchu void
1447624Sschwartz px_lib_clr_errs(px_t *px_p)
144827Sjchu {
1449624Sschwartz 	px_pec_t	*pec_p = px_p->px_pec_p;
145027Sjchu 	dev_info_t	*rpdip = px_p->px_dip;
145127Sjchu 	int		err = PX_OK, ret;
145227Sjchu 	int		acctype = pec_p->pec_safeacc_type;
145327Sjchu 	ddi_fm_error_t	derr;
145427Sjchu 
145527Sjchu 	/* Create the derr */
145627Sjchu 	bzero(&derr, sizeof (ddi_fm_error_t));
145727Sjchu 	derr.fme_version = DDI_FME_VERSION;
145827Sjchu 	derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
145927Sjchu 	derr.fme_flag = acctype;
146027Sjchu 
146127Sjchu 	if (acctype == DDI_FM_ERR_EXPECTED) {
146227Sjchu 		derr.fme_status = DDI_FM_NONFATAL;
146327Sjchu 		ndi_fm_acc_err_set(pec_p->pec_acc_hdl, &derr);
146427Sjchu 	}
146527Sjchu 
14661648Sjchu 	mutex_enter(&px_p->px_fm_mutex);
146727Sjchu 
146827Sjchu 	/* send ereport/handle/clear fire registers */
146927Sjchu 	err = px_err_handle(px_p, &derr, PX_LIB_CALL, B_TRUE);
147027Sjchu 
147127Sjchu 	/* Check all child devices for errors */
147227Sjchu 	ret = ndi_fm_handler_dispatch(rpdip, NULL, &derr);
147327Sjchu 
14741648Sjchu 	mutex_exit(&px_p->px_fm_mutex);
147527Sjchu 
147627Sjchu 	/*
147727Sjchu 	 * PX_FATAL_HW indicates a condition recovered from Fatal-Reset,
147827Sjchu 	 * therefore it does not cause panic.
147927Sjchu 	 */
148027Sjchu 	if ((err & (PX_FATAL_GOS | PX_FATAL_SW)) || (ret == DDI_FM_FATAL))
1481677Sjchu 		PX_FM_PANIC("Fatal System Port Error has occurred\n");
148227Sjchu }
148327Sjchu 
14840Sstevel@tonic-gate #ifdef  DEBUG
14850Sstevel@tonic-gate int	px_peekfault_cnt = 0;
14860Sstevel@tonic-gate int	px_pokefault_cnt = 0;
14870Sstevel@tonic-gate #endif  /* DEBUG */
14880Sstevel@tonic-gate 
14890Sstevel@tonic-gate /*ARGSUSED*/
14900Sstevel@tonic-gate static int
14910Sstevel@tonic-gate px_lib_do_poke(dev_info_t *dip, dev_info_t *rdip,
14920Sstevel@tonic-gate     peekpoke_ctlops_t *in_args)
14930Sstevel@tonic-gate {
14940Sstevel@tonic-gate 	px_t *px_p = DIP_TO_STATE(dip);
14950Sstevel@tonic-gate 	px_pec_t *pec_p = px_p->px_pec_p;
14960Sstevel@tonic-gate 	int err = DDI_SUCCESS;
14970Sstevel@tonic-gate 	on_trap_data_t otd;
14980Sstevel@tonic-gate 
14990Sstevel@tonic-gate 	mutex_enter(&pec_p->pec_pokefault_mutex);
15000Sstevel@tonic-gate 	pec_p->pec_ontrap_data = &otd;
150127Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_POKE;
15020Sstevel@tonic-gate 
15030Sstevel@tonic-gate 	/* Set up protected environment. */
15040Sstevel@tonic-gate 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
15050Sstevel@tonic-gate 		uintptr_t tramp = otd.ot_trampoline;
15060Sstevel@tonic-gate 
15070Sstevel@tonic-gate 		otd.ot_trampoline = (uintptr_t)&poke_fault;
15080Sstevel@tonic-gate 		err = do_poke(in_args->size, (void *)in_args->dev_addr,
15090Sstevel@tonic-gate 		    (void *)in_args->host_addr);
15100Sstevel@tonic-gate 		otd.ot_trampoline = tramp;
15110Sstevel@tonic-gate 	} else
15120Sstevel@tonic-gate 		err = DDI_FAILURE;
15130Sstevel@tonic-gate 
1514624Sschwartz 	px_lib_clr_errs(px_p);
151527Sjchu 
15160Sstevel@tonic-gate 	if (otd.ot_trap & OT_DATA_ACCESS)
15170Sstevel@tonic-gate 		err = DDI_FAILURE;
15180Sstevel@tonic-gate 
15190Sstevel@tonic-gate 	/* Take down protected environment. */
15200Sstevel@tonic-gate 	no_trap();
15210Sstevel@tonic-gate 
15220Sstevel@tonic-gate 	pec_p->pec_ontrap_data = NULL;
152327Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
15240Sstevel@tonic-gate 	mutex_exit(&pec_p->pec_pokefault_mutex);
15250Sstevel@tonic-gate 
15260Sstevel@tonic-gate #ifdef  DEBUG
15270Sstevel@tonic-gate 	if (err == DDI_FAILURE)
15280Sstevel@tonic-gate 		px_pokefault_cnt++;
15290Sstevel@tonic-gate #endif
15300Sstevel@tonic-gate 	return (err);
15310Sstevel@tonic-gate }
15320Sstevel@tonic-gate 
15330Sstevel@tonic-gate /*ARGSUSED*/
15340Sstevel@tonic-gate static int
15350Sstevel@tonic-gate px_lib_do_caut_put(dev_info_t *dip, dev_info_t *rdip,
15360Sstevel@tonic-gate     peekpoke_ctlops_t *cautacc_ctlops_arg)
15370Sstevel@tonic-gate {
15380Sstevel@tonic-gate 	size_t size = cautacc_ctlops_arg->size;
15390Sstevel@tonic-gate 	uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr;
15400Sstevel@tonic-gate 	uintptr_t host_addr = cautacc_ctlops_arg->host_addr;
15410Sstevel@tonic-gate 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle;
15420Sstevel@tonic-gate 	size_t repcount = cautacc_ctlops_arg->repcount;
15430Sstevel@tonic-gate 	uint_t flags = cautacc_ctlops_arg->flags;
15440Sstevel@tonic-gate 
15450Sstevel@tonic-gate 	px_t *px_p = DIP_TO_STATE(dip);
15460Sstevel@tonic-gate 	px_pec_t *pec_p = px_p->px_pec_p;
15470Sstevel@tonic-gate 	int err = DDI_SUCCESS;
15480Sstevel@tonic-gate 
154927Sjchu 	/*
155027Sjchu 	 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault
155127Sjchu 	 * mutex.
155227Sjchu 	 */
15530Sstevel@tonic-gate 	i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
15540Sstevel@tonic-gate 
155527Sjchu 	pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap;
155627Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
155727Sjchu 	hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
15580Sstevel@tonic-gate 
15590Sstevel@tonic-gate 	if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
15600Sstevel@tonic-gate 		for (; repcount; repcount--) {
15610Sstevel@tonic-gate 			switch (size) {
15620Sstevel@tonic-gate 
15630Sstevel@tonic-gate 			case sizeof (uint8_t):
15640Sstevel@tonic-gate 				i_ddi_put8(hp, (uint8_t *)dev_addr,
15650Sstevel@tonic-gate 				    *(uint8_t *)host_addr);
15660Sstevel@tonic-gate 				break;
15670Sstevel@tonic-gate 
15680Sstevel@tonic-gate 			case sizeof (uint16_t):
15690Sstevel@tonic-gate 				i_ddi_put16(hp, (uint16_t *)dev_addr,
15700Sstevel@tonic-gate 				    *(uint16_t *)host_addr);
15710Sstevel@tonic-gate 				break;
15720Sstevel@tonic-gate 
15730Sstevel@tonic-gate 			case sizeof (uint32_t):
15740Sstevel@tonic-gate 				i_ddi_put32(hp, (uint32_t *)dev_addr,
15750Sstevel@tonic-gate 				    *(uint32_t *)host_addr);
15760Sstevel@tonic-gate 				break;
15770Sstevel@tonic-gate 
15780Sstevel@tonic-gate 			case sizeof (uint64_t):
15790Sstevel@tonic-gate 				i_ddi_put64(hp, (uint64_t *)dev_addr,
15800Sstevel@tonic-gate 				    *(uint64_t *)host_addr);
15810Sstevel@tonic-gate 				break;
15820Sstevel@tonic-gate 			}
15830Sstevel@tonic-gate 
15840Sstevel@tonic-gate 			host_addr += size;
15850Sstevel@tonic-gate 
15860Sstevel@tonic-gate 			if (flags == DDI_DEV_AUTOINCR)
15870Sstevel@tonic-gate 				dev_addr += size;
15880Sstevel@tonic-gate 
1589624Sschwartz 			px_lib_clr_errs(px_p);
159027Sjchu 
15910Sstevel@tonic-gate 			if (pec_p->pec_ontrap_data->ot_trap & OT_DATA_ACCESS) {
15920Sstevel@tonic-gate 				err = DDI_FAILURE;
15930Sstevel@tonic-gate #ifdef  DEBUG
15940Sstevel@tonic-gate 				px_pokefault_cnt++;
15950Sstevel@tonic-gate #endif
15960Sstevel@tonic-gate 				break;
15970Sstevel@tonic-gate 			}
15980Sstevel@tonic-gate 		}
15990Sstevel@tonic-gate 	}
16000Sstevel@tonic-gate 
16010Sstevel@tonic-gate 	i_ddi_notrap((ddi_acc_handle_t)hp);
16020Sstevel@tonic-gate 	pec_p->pec_ontrap_data = NULL;
160327Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
16040Sstevel@tonic-gate 	i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
16050Sstevel@tonic-gate 	hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED;
16060Sstevel@tonic-gate 
16070Sstevel@tonic-gate 	return (err);
16080Sstevel@tonic-gate }
16090Sstevel@tonic-gate 
16100Sstevel@tonic-gate 
16110Sstevel@tonic-gate int
16120Sstevel@tonic-gate px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip,
16130Sstevel@tonic-gate     peekpoke_ctlops_t *in_args)
16140Sstevel@tonic-gate {
16150Sstevel@tonic-gate 	return (in_args->handle ? px_lib_do_caut_put(dip, rdip, in_args) :
16160Sstevel@tonic-gate 	    px_lib_do_poke(dip, rdip, in_args));
16170Sstevel@tonic-gate }
16180Sstevel@tonic-gate 
16190Sstevel@tonic-gate 
16200Sstevel@tonic-gate /*ARGSUSED*/
16210Sstevel@tonic-gate static int
16220Sstevel@tonic-gate px_lib_do_peek(dev_info_t *dip, peekpoke_ctlops_t *in_args)
16230Sstevel@tonic-gate {
162427Sjchu 	px_t *px_p = DIP_TO_STATE(dip);
162527Sjchu 	px_pec_t *pec_p = px_p->px_pec_p;
16260Sstevel@tonic-gate 	int err = DDI_SUCCESS;
16270Sstevel@tonic-gate 	on_trap_data_t otd;
16280Sstevel@tonic-gate 
162927Sjchu 	mutex_enter(&pec_p->pec_pokefault_mutex);
163027Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK;
163127Sjchu 
16320Sstevel@tonic-gate 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
16330Sstevel@tonic-gate 		uintptr_t tramp = otd.ot_trampoline;
16340Sstevel@tonic-gate 
16350Sstevel@tonic-gate 		otd.ot_trampoline = (uintptr_t)&peek_fault;
16360Sstevel@tonic-gate 		err = do_peek(in_args->size, (void *)in_args->dev_addr,
16370Sstevel@tonic-gate 		    (void *)in_args->host_addr);
16380Sstevel@tonic-gate 		otd.ot_trampoline = tramp;
16390Sstevel@tonic-gate 	} else
16400Sstevel@tonic-gate 		err = DDI_FAILURE;
16410Sstevel@tonic-gate 
16420Sstevel@tonic-gate 	no_trap();
164327Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
164427Sjchu 	mutex_exit(&pec_p->pec_pokefault_mutex);
16450Sstevel@tonic-gate 
16460Sstevel@tonic-gate #ifdef  DEBUG
16470Sstevel@tonic-gate 	if (err == DDI_FAILURE)
16480Sstevel@tonic-gate 		px_peekfault_cnt++;
16490Sstevel@tonic-gate #endif
16500Sstevel@tonic-gate 	return (err);
16510Sstevel@tonic-gate }
16520Sstevel@tonic-gate 
16530Sstevel@tonic-gate 
16540Sstevel@tonic-gate static int
16550Sstevel@tonic-gate px_lib_do_caut_get(dev_info_t *dip, peekpoke_ctlops_t *cautacc_ctlops_arg)
16560Sstevel@tonic-gate {
16570Sstevel@tonic-gate 	size_t size = cautacc_ctlops_arg->size;
16580Sstevel@tonic-gate 	uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr;
16590Sstevel@tonic-gate 	uintptr_t host_addr = cautacc_ctlops_arg->host_addr;
16600Sstevel@tonic-gate 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle;
16610Sstevel@tonic-gate 	size_t repcount = cautacc_ctlops_arg->repcount;
16620Sstevel@tonic-gate 	uint_t flags = cautacc_ctlops_arg->flags;
16630Sstevel@tonic-gate 
16640Sstevel@tonic-gate 	px_t *px_p = DIP_TO_STATE(dip);
16650Sstevel@tonic-gate 	px_pec_t *pec_p = px_p->px_pec_p;
16660Sstevel@tonic-gate 	int err = DDI_SUCCESS;
16670Sstevel@tonic-gate 
166827Sjchu 	/*
166927Sjchu 	 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault
167027Sjchu 	 * mutex.
167127Sjchu 	 */
167227Sjchu 	i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
167327Sjchu 
167427Sjchu 	pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap;
167527Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
16760Sstevel@tonic-gate 	hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
16770Sstevel@tonic-gate 
16780Sstevel@tonic-gate 	if (repcount == 1) {
16790Sstevel@tonic-gate 		if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
16800Sstevel@tonic-gate 			i_ddi_caut_get(size, (void *)dev_addr,
16810Sstevel@tonic-gate 			    (void *)host_addr);
16820Sstevel@tonic-gate 		} else {
16830Sstevel@tonic-gate 			int i;
16840Sstevel@tonic-gate 			uint8_t *ff_addr = (uint8_t *)host_addr;
16850Sstevel@tonic-gate 			for (i = 0; i < size; i++)
16860Sstevel@tonic-gate 				*ff_addr++ = 0xff;
16870Sstevel@tonic-gate 
16880Sstevel@tonic-gate 			err = DDI_FAILURE;
16890Sstevel@tonic-gate #ifdef  DEBUG
16900Sstevel@tonic-gate 			px_peekfault_cnt++;
16910Sstevel@tonic-gate #endif
16920Sstevel@tonic-gate 		}
16930Sstevel@tonic-gate 	} else {
16940Sstevel@tonic-gate 		if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
16950Sstevel@tonic-gate 			for (; repcount; repcount--) {
16960Sstevel@tonic-gate 				i_ddi_caut_get(size, (void *)dev_addr,
16970Sstevel@tonic-gate 				    (void *)host_addr);
16980Sstevel@tonic-gate 
16990Sstevel@tonic-gate 				host_addr += size;
17000Sstevel@tonic-gate 
17010Sstevel@tonic-gate 				if (flags == DDI_DEV_AUTOINCR)
17020Sstevel@tonic-gate 					dev_addr += size;
17030Sstevel@tonic-gate 			}
17040Sstevel@tonic-gate 		} else {
17050Sstevel@tonic-gate 			err = DDI_FAILURE;
17060Sstevel@tonic-gate #ifdef  DEBUG
17070Sstevel@tonic-gate 			px_peekfault_cnt++;
17080Sstevel@tonic-gate #endif
17090Sstevel@tonic-gate 		}
17100Sstevel@tonic-gate 	}
17110Sstevel@tonic-gate 
17120Sstevel@tonic-gate 	i_ddi_notrap((ddi_acc_handle_t)hp);
17130Sstevel@tonic-gate 	pec_p->pec_ontrap_data = NULL;
171427Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
17150Sstevel@tonic-gate 	i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
17160Sstevel@tonic-gate 	hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED;
17170Sstevel@tonic-gate 
17180Sstevel@tonic-gate 	return (err);
17190Sstevel@tonic-gate }
17200Sstevel@tonic-gate 
17210Sstevel@tonic-gate /*ARGSUSED*/
17220Sstevel@tonic-gate int
17230Sstevel@tonic-gate px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip,
17240Sstevel@tonic-gate     peekpoke_ctlops_t *in_args, void *result)
17250Sstevel@tonic-gate {
17260Sstevel@tonic-gate 	result = (void *)in_args->host_addr;
17270Sstevel@tonic-gate 	return (in_args->handle ? px_lib_do_caut_get(dip, in_args) :
17280Sstevel@tonic-gate 	    px_lib_do_peek(dip, in_args));
17290Sstevel@tonic-gate }
1730118Sjchu 
17310Sstevel@tonic-gate /*
17320Sstevel@tonic-gate  * implements PPM interface
17330Sstevel@tonic-gate  */
17340Sstevel@tonic-gate int
17350Sstevel@tonic-gate px_lib_pmctl(int cmd, px_t *px_p)
17360Sstevel@tonic-gate {
17370Sstevel@tonic-gate 	ASSERT((cmd & ~PPMREQ_MASK) == PPMREQ);
17380Sstevel@tonic-gate 	switch (cmd) {
17390Sstevel@tonic-gate 	case PPMREQ_PRE_PWR_OFF:
17400Sstevel@tonic-gate 		/*
17410Sstevel@tonic-gate 		 * Currently there is no device power management for
17420Sstevel@tonic-gate 		 * the root complex (fire). When there is we need to make
17430Sstevel@tonic-gate 		 * sure that it is at full power before trying to send the
17440Sstevel@tonic-gate 		 * PME_Turn_Off message.
17450Sstevel@tonic-gate 		 */
17460Sstevel@tonic-gate 		DBG(DBG_PWR, px_p->px_dip,
17470Sstevel@tonic-gate 		    "ioctl: request to send PME_Turn_Off\n");
17480Sstevel@tonic-gate 		return (px_goto_l23ready(px_p));
17490Sstevel@tonic-gate 
17500Sstevel@tonic-gate 	case PPMREQ_PRE_PWR_ON:
1751118Sjchu 		DBG(DBG_PWR, px_p->px_dip, "ioctl: PRE_PWR_ON request\n");
1752118Sjchu 		return (px_pre_pwron_check(px_p));
1753118Sjchu 
17540Sstevel@tonic-gate 	case PPMREQ_POST_PWR_ON:
1755118Sjchu 		DBG(DBG_PWR, px_p->px_dip, "ioctl: POST_PWR_ON request\n");
1756118Sjchu 		return (px_goto_l0(px_p));
17570Sstevel@tonic-gate 
17580Sstevel@tonic-gate 	default:
17590Sstevel@tonic-gate 		return (DDI_FAILURE);
17600Sstevel@tonic-gate 	}
17610Sstevel@tonic-gate }
17620Sstevel@tonic-gate 
17630Sstevel@tonic-gate /*
17640Sstevel@tonic-gate  * sends PME_Turn_Off message to put the link in L2/L3 ready state.
17650Sstevel@tonic-gate  * called by px_ioctl.
17660Sstevel@tonic-gate  * returns DDI_SUCCESS or DDI_FAILURE
17670Sstevel@tonic-gate  * 1. Wait for link to be in L1 state (link status reg)
17680Sstevel@tonic-gate  * 2. write to PME_Turn_off reg to boradcast
17690Sstevel@tonic-gate  * 3. set timeout
17700Sstevel@tonic-gate  * 4. If timeout, return failure.
17710Sstevel@tonic-gate  * 5. If PM_TO_Ack, wait till link is in L2/L3 ready
17720Sstevel@tonic-gate  */
17730Sstevel@tonic-gate static int
17740Sstevel@tonic-gate px_goto_l23ready(px_t *px_p)
17750Sstevel@tonic-gate {
17760Sstevel@tonic-gate 	pcie_pwr_t	*pwr_p;
177727Sjchu 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
177827Sjchu 	caddr_t	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
17790Sstevel@tonic-gate 	int		ret = DDI_SUCCESS;
17800Sstevel@tonic-gate 	clock_t		end, timeleft;
1781118Sjchu 	int		mutex_held = 1;
17820Sstevel@tonic-gate 
17830Sstevel@tonic-gate 	/* If no PM info, return failure */
17840Sstevel@tonic-gate 	if (!PCIE_PMINFO(px_p->px_dip) ||
17850Sstevel@tonic-gate 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
17860Sstevel@tonic-gate 		return (DDI_FAILURE);
17870Sstevel@tonic-gate 
17880Sstevel@tonic-gate 	mutex_enter(&pwr_p->pwr_lock);
1789118Sjchu 	mutex_enter(&px_p->px_l23ready_lock);
17900Sstevel@tonic-gate 	/* Clear the PME_To_ACK receieved flag */
1791118Sjchu 	px_p->px_pm_flags &= ~PX_PMETOACK_RECVD;
1792287Smg140465 	/*
1793287Smg140465 	 * When P25 is the downstream device, after receiving
1794287Smg140465 	 * PME_To_ACK, fire will go to Detect state, which causes
1795287Smg140465 	 * the link down event. Inform FMA that this is expected.
1796287Smg140465 	 * In case of all other cards complaint with the pci express
1797287Smg140465 	 * spec, this will happen when the power is re-applied. FMA
1798287Smg140465 	 * code will clear this flag after one instance of LDN. Since
1799287Smg140465 	 * there will not be a LDN event for the spec compliant cards,
1800287Smg140465 	 * we need to clear the flag after receiving PME_To_ACK.
1801287Smg140465 	 */
1802287Smg140465 	px_p->px_pm_flags |= PX_LDN_EXPECTED;
18030Sstevel@tonic-gate 	if (px_send_pme_turnoff(csr_base) != DDI_SUCCESS) {
18040Sstevel@tonic-gate 		ret = DDI_FAILURE;
18050Sstevel@tonic-gate 		goto l23ready_done;
18060Sstevel@tonic-gate 	}
1807118Sjchu 	px_p->px_pm_flags |= PX_PME_TURNOFF_PENDING;
18080Sstevel@tonic-gate 
18090Sstevel@tonic-gate 	end = ddi_get_lbolt() + drv_usectohz(px_pme_to_ack_timeout);
1810118Sjchu 	while (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1811118Sjchu 		timeleft = cv_timedwait(&px_p->px_l23ready_cv,
1812118Sjchu 		    &px_p->px_l23ready_lock, end);
18130Sstevel@tonic-gate 		/*
18140Sstevel@tonic-gate 		 * if cv_timedwait returns -1, it is either
18150Sstevel@tonic-gate 		 * 1) timed out or
18160Sstevel@tonic-gate 		 * 2) there was a pre-mature wakeup but by the time
18170Sstevel@tonic-gate 		 * cv_timedwait is called again end < lbolt i.e.
18180Sstevel@tonic-gate 		 * end is in the past.
18190Sstevel@tonic-gate 		 * 3) By the time we make first cv_timedwait call,
18200Sstevel@tonic-gate 		 * end < lbolt is true.
18210Sstevel@tonic-gate 		 */
18220Sstevel@tonic-gate 		if (timeleft == -1)
18230Sstevel@tonic-gate 			break;
18240Sstevel@tonic-gate 	}
1825118Sjchu 	if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
18260Sstevel@tonic-gate 		/*
18270Sstevel@tonic-gate 		 * Either timedout or interrupt didn't get a
18280Sstevel@tonic-gate 		 * chance to grab the mutex and set the flag.
18290Sstevel@tonic-gate 		 * release the mutex and delay for sometime.
18300Sstevel@tonic-gate 		 * This will 1) give a chance for interrupt to
18310Sstevel@tonic-gate 		 * set the flag 2) creates a delay between two
18320Sstevel@tonic-gate 		 * consequetive requests.
18330Sstevel@tonic-gate 		 */
1834118Sjchu 		mutex_exit(&px_p->px_l23ready_lock);
18351147Sjchu 		delay(drv_usectohz(50 * PX_MSEC_TO_USEC));
1836118Sjchu 		mutex_held = 0;
1837118Sjchu 		if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
18380Sstevel@tonic-gate 			ret = DDI_FAILURE;
18390Sstevel@tonic-gate 			DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting"
18400Sstevel@tonic-gate 			    " for PME_TO_ACK\n");
18410Sstevel@tonic-gate 		}
18420Sstevel@tonic-gate 	}
1843287Smg140465 	px_p->px_pm_flags &=
1844287Smg140465 	    ~(PX_PME_TURNOFF_PENDING | PX_PMETOACK_RECVD | PX_LDN_EXPECTED);
18450Sstevel@tonic-gate 
18460Sstevel@tonic-gate l23ready_done:
1847118Sjchu 	if (mutex_held)
1848118Sjchu 		mutex_exit(&px_p->px_l23ready_lock);
1849118Sjchu 	/*
1850118Sjchu 	 * Wait till link is in L1 idle, if sending PME_Turn_Off
1851118Sjchu 	 * was succesful.
1852118Sjchu 	 */
1853118Sjchu 	if (ret == DDI_SUCCESS) {
1854118Sjchu 		if (px_link_wait4l1idle(csr_base) != DDI_SUCCESS) {
1855118Sjchu 			DBG(DBG_PWR, px_p->px_dip, " Link is not at L1"
1856287Smg140465 			    " even though we received PME_To_ACK.\n");
1857287Smg140465 			/*
1858287Smg140465 			 * Workaround for hardware bug with P25.
1859287Smg140465 			 * Due to a hardware bug with P25, link state
1860287Smg140465 			 * will be Detect state rather than L1 after
1861287Smg140465 			 * link is transitioned to L23Ready state. Since
1862287Smg140465 			 * we don't know whether link is L23ready state
1863287Smg140465 			 * without Fire's state being L1_idle, we delay
1864287Smg140465 			 * here just to make sure that we wait till link
1865287Smg140465 			 * is transitioned to L23Ready state.
1866287Smg140465 			 */
18671147Sjchu 			delay(drv_usectohz(100 * PX_MSEC_TO_USEC));
1868287Smg140465 		}
1869287Smg140465 		pwr_p->pwr_link_lvl = PM_LEVEL_L3;
1870118Sjchu 
1871118Sjchu 	}
18720Sstevel@tonic-gate 	mutex_exit(&pwr_p->pwr_lock);
18730Sstevel@tonic-gate 	return (ret);
18740Sstevel@tonic-gate }
18750Sstevel@tonic-gate 
1876118Sjchu /*
1877118Sjchu  * Message interrupt handler intended to be shared for both
1878118Sjchu  * PME and PME_TO_ACK msg handling, currently only handles
1879118Sjchu  * PME_To_ACK message.
1880118Sjchu  */
1881118Sjchu uint_t
1882118Sjchu px_pmeq_intr(caddr_t arg)
1883118Sjchu {
1884118Sjchu 	px_t	*px_p = (px_t *)arg;
1885118Sjchu 
1886287Smg140465 	DBG(DBG_PWR, px_p->px_dip, " PME_To_ACK received \n");
1887118Sjchu 	mutex_enter(&px_p->px_l23ready_lock);
1888118Sjchu 	cv_broadcast(&px_p->px_l23ready_cv);
1889118Sjchu 	if (px_p->px_pm_flags & PX_PME_TURNOFF_PENDING) {
1890118Sjchu 		px_p->px_pm_flags |= PX_PMETOACK_RECVD;
1891118Sjchu 	} else {
1892118Sjchu 		/*
1893118Sjchu 		 * This maybe the second ack received. If so then,
1894118Sjchu 		 * we should be receiving it during wait4L1 stage.
1895118Sjchu 		 */
1896118Sjchu 		px_p->px_pmetoack_ignored++;
1897118Sjchu 	}
1898118Sjchu 	mutex_exit(&px_p->px_l23ready_lock);
1899118Sjchu 	return (DDI_INTR_CLAIMED);
1900118Sjchu }
1901118Sjchu 
1902118Sjchu static int
1903118Sjchu px_pre_pwron_check(px_t *px_p)
1904118Sjchu {
1905118Sjchu 	pcie_pwr_t	*pwr_p;
1906118Sjchu 
1907118Sjchu 	/* If no PM info, return failure */
1908118Sjchu 	if (!PCIE_PMINFO(px_p->px_dip) ||
1909118Sjchu 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1910118Sjchu 		return (DDI_FAILURE);
1911118Sjchu 
1912287Smg140465 	/*
1913287Smg140465 	 * For the spec compliant downstream cards link down
1914287Smg140465 	 * is expected when the device is powered on.
1915287Smg140465 	 */
1916287Smg140465 	px_p->px_pm_flags |= PX_LDN_EXPECTED;
1917118Sjchu 	return (pwr_p->pwr_link_lvl == PM_LEVEL_L3 ? DDI_SUCCESS : DDI_FAILURE);
1918118Sjchu }
1919118Sjchu 
1920118Sjchu static int
1921118Sjchu px_goto_l0(px_t *px_p)
1922118Sjchu {
1923118Sjchu 	pcie_pwr_t	*pwr_p;
1924118Sjchu 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1925118Sjchu 	caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
1926118Sjchu 	int		ret = DDI_SUCCESS;
19271147Sjchu 	uint64_t	time_spent = 0;
1928118Sjchu 
1929118Sjchu 	/* If no PM info, return failure */
1930118Sjchu 	if (!PCIE_PMINFO(px_p->px_dip) ||
1931118Sjchu 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1932118Sjchu 		return (DDI_FAILURE);
1933118Sjchu 
1934118Sjchu 	mutex_enter(&pwr_p->pwr_lock);
1935287Smg140465 	/*
19361147Sjchu 	 * The following link retrain activity will cause LDN and LUP event.
19371147Sjchu 	 * Receiving LDN prior to receiving LUP is expected, not an error in
19381147Sjchu 	 * this case.  Receiving LUP indicates link is fully up to support
19391147Sjchu 	 * powering up down stream device, and of course any further LDN and
19401147Sjchu 	 * LUP outside this context will be error.
1941287Smg140465 	 */
19421147Sjchu 	px_p->px_lup_pending = 1;
1943118Sjchu 	if (px_link_retrain(csr_base) != DDI_SUCCESS) {
1944118Sjchu 		ret = DDI_FAILURE;
1945118Sjchu 		goto l0_done;
1946118Sjchu 	}
1947118Sjchu 
19481147Sjchu 	/* LUP event takes the order of 15ms amount of time to occur */
19491147Sjchu 	for (; px_p->px_lup_pending && (time_spent < px_lup_poll_to);
19501147Sjchu 	    time_spent += px_lup_poll_interval)
19511147Sjchu 		drv_usecwait(px_lup_poll_interval);
19521147Sjchu 	if (px_p->px_lup_pending)
19531147Sjchu 		ret = DDI_FAILURE;
1954118Sjchu l0_done:
1955287Smg140465 	px_enable_detect_quiet(csr_base);
1956118Sjchu 	if (ret == DDI_SUCCESS)
1957287Smg140465 		pwr_p->pwr_link_lvl = PM_LEVEL_L0;
1958118Sjchu 	mutex_exit(&pwr_p->pwr_lock);
1959118Sjchu 	return (ret);
1960118Sjchu }
1961118Sjchu 
19620Sstevel@tonic-gate /*
19630Sstevel@tonic-gate  * Extract the drivers binding name to identify which chip we're binding to.
19640Sstevel@tonic-gate  * Whenever a new bus bridge is created, the driver alias entry should be
19650Sstevel@tonic-gate  * added here to identify the device if needed.  If a device isn't added,
19660Sstevel@tonic-gate  * the identity defaults to PX_CHIP_UNIDENTIFIED.
19670Sstevel@tonic-gate  */
19680Sstevel@tonic-gate static uint32_t
19690Sstevel@tonic-gate px_identity_chip(px_t *px_p)
19700Sstevel@tonic-gate {
19710Sstevel@tonic-gate 	dev_info_t	*dip = px_p->px_dip;
19720Sstevel@tonic-gate 	char		*name = ddi_binding_name(dip);
19730Sstevel@tonic-gate 	uint32_t	revision = 0;
19740Sstevel@tonic-gate 
19750Sstevel@tonic-gate 	revision = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
19760Sstevel@tonic-gate 	    "module-revision#", 0);
19770Sstevel@tonic-gate 
19780Sstevel@tonic-gate 	/* Check for Fire driver binding name */
1979226Set142600 	if ((strcmp(name, "pci108e,80f0") == 0) ||
1980226Set142600 	    (strcmp(name, "pciex108e,80f0") == 0)) {
19810Sstevel@tonic-gate 		DBG(DBG_ATTACH, dip, "px_identity_chip: %s%d: "
19820Sstevel@tonic-gate 		    "name %s module-revision %d\n", ddi_driver_name(dip),
19830Sstevel@tonic-gate 		    ddi_get_instance(dip), name, revision);
19840Sstevel@tonic-gate 
19850Sstevel@tonic-gate 		return (PX_CHIP_ID(PX_CHIP_FIRE, revision, 0x00));
19860Sstevel@tonic-gate 	}
19870Sstevel@tonic-gate 
1988*1772Sjl139090 	/* Check for Oberon driver binding name */
1989*1772Sjl139090 	if (strcmp(name, "pciex108e,80f8") == 0) {
1990*1772Sjl139090 		DBG(DBG_ATTACH, dip, "px_identity_chip: %s%d: "
1991*1772Sjl139090 		    "name %s module-revision %d\n", ddi_driver_name(dip),
1992*1772Sjl139090 		    ddi_get_instance(dip), name, revision);
1993*1772Sjl139090 
1994*1772Sjl139090 		return (PX_CHIP_ID(PX_CHIP_OBERON, revision, 0x00));
1995*1772Sjl139090 	}
1996*1772Sjl139090 
19970Sstevel@tonic-gate 	DBG(DBG_ATTACH, dip, "%s%d: Unknown PCI Express Host bridge %s %x\n",
19980Sstevel@tonic-gate 	    ddi_driver_name(dip), ddi_get_instance(dip), name, revision);
19990Sstevel@tonic-gate 
20000Sstevel@tonic-gate 	return (PX_CHIP_UNIDENTIFIED);
20010Sstevel@tonic-gate }
200227Sjchu 
200327Sjchu int
200427Sjchu px_err_add_intr(px_fault_t *px_fault_p)
200527Sjchu {
200627Sjchu 	dev_info_t	*dip = px_fault_p->px_fh_dip;
200727Sjchu 	px_t		*px_p = DIP_TO_STATE(dip);
200827Sjchu 
200927Sjchu 	VERIFY(add_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL,
201027Sjchu 		px_fault_p->px_err_func, (caddr_t)px_fault_p, NULL) == 0);
201127Sjchu 
201227Sjchu 	px_ib_intr_enable(px_p, intr_dist_cpuid(), px_fault_p->px_intr_ino);
201327Sjchu 
201427Sjchu 	return (DDI_SUCCESS);
201527Sjchu }
201627Sjchu 
201727Sjchu void
201827Sjchu px_err_rem_intr(px_fault_t *px_fault_p)
201927Sjchu {
202027Sjchu 	dev_info_t	*dip = px_fault_p->px_fh_dip;
202127Sjchu 	px_t		*px_p = DIP_TO_STATE(dip);
202227Sjchu 
202327Sjchu 	px_ib_intr_disable(px_p->px_ib_p, px_fault_p->px_intr_ino,
202427Sjchu 		IB_INTR_WAIT);
2025965Sgovinda 
2026965Sgovinda 	rem_ivintr(px_fault_p->px_fh_sysino, NULL);
202727Sjchu }
202827Sjchu 
20291648Sjchu /*
20301648Sjchu  * px_cb_add_intr() - Called from attach(9E) to create CB if not yet
20311648Sjchu  * created, to add CB interrupt vector always, but enable only once.
20321648Sjchu  */
20331648Sjchu int
20341648Sjchu px_cb_add_intr(px_fault_t *fault_p)
20351648Sjchu {
20361648Sjchu 	px_t		*px_p = DIP_TO_STATE(fault_p->px_fh_dip);
20371648Sjchu 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
2038*1772Sjl139090 	px_cb_t		*cb_p = (px_cb_t *)px_get_cb(fault_p->px_fh_dip);
20391648Sjchu 	px_cb_list_t	*pxl, *pxl_new;
20401648Sjchu 	cpuid_t		cpuid;
20411648Sjchu 
20421648Sjchu 
20431648Sjchu 	if (cb_p == NULL) {
20441648Sjchu 		cb_p = kmem_zalloc(sizeof (px_cb_t), KM_SLEEP);
20451648Sjchu 		mutex_init(&cb_p->cb_mutex, NULL, MUTEX_DRIVER, NULL);
20461648Sjchu 		cb_p->px_cb_func = px_cb_intr;
20471648Sjchu 		pxu_p->px_cb_p = cb_p;
2048*1772Sjl139090 		px_set_cb(fault_p->px_fh_dip, (uint64_t)cb_p);
20491648Sjchu 	} else
20501648Sjchu 		pxu_p->px_cb_p = cb_p;
20511648Sjchu 
20521648Sjchu 	mutex_enter(&cb_p->cb_mutex);
20531648Sjchu 
20541648Sjchu 	VERIFY(add_ivintr(fault_p->px_fh_sysino, PX_ERR_PIL,
20551648Sjchu 	    cb_p->px_cb_func, (caddr_t)cb_p, NULL) == 0);
20561648Sjchu 
20571648Sjchu 	if (cb_p->pxl == NULL) {
20581648Sjchu 
20591648Sjchu 		cpuid = intr_dist_cpuid(),
20601648Sjchu 		px_ib_intr_enable(px_p, cpuid, fault_p->px_intr_ino);
20611648Sjchu 
20621648Sjchu 		pxl = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP);
20631648Sjchu 		pxl->pxp = px_p;
20641648Sjchu 
20651648Sjchu 		cb_p->pxl = pxl;
20661648Sjchu 		cb_p->sysino = fault_p->px_fh_sysino;
20671648Sjchu 		cb_p->cpuid = cpuid;
20681648Sjchu 
20691648Sjchu 	} else {
20701648Sjchu 		/*
20711648Sjchu 		 * Find the last pxl or
20721648Sjchu 		 * stop short at encoutering a redundent, or
20731648Sjchu 		 * both.
20741648Sjchu 		 */
20751648Sjchu 		pxl = cb_p->pxl;
20761648Sjchu 		for (; !(pxl->pxp == px_p) && pxl->next; pxl = pxl->next);
20771648Sjchu 		if (pxl->pxp == px_p) {
20781648Sjchu 			cmn_err(CE_WARN, "px_cb_add_intr: reregister sysino "
20791650Sjchu 			    "%lx by px_p 0x%p\n", cb_p->sysino, (void *)px_p);
20801648Sjchu 			return (DDI_FAILURE);
20811648Sjchu 		}
20821648Sjchu 
20831648Sjchu 		/* add to linked list */
20841648Sjchu 		pxl_new = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP);
20851648Sjchu 		pxl_new->pxp = px_p;
20861648Sjchu 		pxl->next = pxl_new;
20871648Sjchu 	}
20881648Sjchu 	cb_p->attachcnt++;
20891648Sjchu 
20901648Sjchu 	mutex_exit(&cb_p->cb_mutex);
20911648Sjchu 
20921648Sjchu 	return (DDI_SUCCESS);
20931648Sjchu }
20941648Sjchu 
20951648Sjchu /*
20961648Sjchu  * px_cb_rem_intr() - Called from detach(9E) to remove its CB
20971648Sjchu  * interrupt vector, to shift proxy to the next available px,
20981648Sjchu  * or disable CB interrupt when itself is the last.
20991648Sjchu  */
21001648Sjchu void
21011648Sjchu px_cb_rem_intr(px_fault_t *fault_p)
21021648Sjchu {
21031648Sjchu 	px_t		*px_p = DIP_TO_STATE(fault_p->px_fh_dip), *pxp;
21041648Sjchu 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
21051648Sjchu 	px_cb_t		*cb_p = PX2CB(px_p);
21061648Sjchu 	px_cb_list_t	*pxl, *prev;
21071648Sjchu 	px_fault_t	*f_p;
21081648Sjchu 
21091648Sjchu 	ASSERT(cb_p->pxl);
21101648Sjchu 
21111648Sjchu 	/* De-list the target px, move the next px up */
21121648Sjchu 
21131648Sjchu 	mutex_enter(&cb_p->cb_mutex);
21141648Sjchu 
21151648Sjchu 	pxl = cb_p->pxl;
21161648Sjchu 	if (pxl->pxp == px_p) {
21171648Sjchu 		cb_p->pxl = pxl->next;
21181648Sjchu 	} else {
21191648Sjchu 		prev = pxl;
21201648Sjchu 		pxl = pxl->next;
21211648Sjchu 		for (; pxl && (pxl->pxp != px_p); prev = pxl, pxl = pxl->next);
21221648Sjchu 		if (!pxl) {
21231648Sjchu 			cmn_err(CE_WARN, "px_cb_rem_intr: can't find px_p 0x%p "
21241650Sjchu 			    "in registered CB list.", (void *)px_p);
21251648Sjchu 			return;
21261648Sjchu 		}
21271648Sjchu 		prev->next = pxl->next;
21281648Sjchu 	}
21291648Sjchu 	kmem_free(pxl, sizeof (px_cb_list_t));
21301648Sjchu 
21311648Sjchu 	if (fault_p->px_fh_sysino == cb_p->sysino) {
21321648Sjchu 		px_ib_intr_disable(px_p->px_ib_p, fault_p->px_intr_ino,
21331648Sjchu 		    IB_INTR_WAIT);
21341648Sjchu 
21351648Sjchu 		if (cb_p->pxl) {
21361648Sjchu 			pxp = cb_p->pxl->pxp;
21371648Sjchu 			f_p = &pxp->px_cb_fault;
21381648Sjchu 			cb_p->sysino = f_p->px_fh_sysino;
21391648Sjchu 
21401648Sjchu 			PX_INTR_ENABLE(pxp->px_dip, cb_p->sysino, cb_p->cpuid);
21411650Sjchu 			(void) px_lib_intr_setstate(pxp->px_dip, cb_p->sysino,
21421648Sjchu 			    INTR_IDLE_STATE);
21431648Sjchu 		}
21441648Sjchu 	}
21451648Sjchu 
21461648Sjchu 	rem_ivintr(fault_p->px_fh_sysino, NULL);
21471648Sjchu 	pxu_p->px_cb_p = NULL;
21481648Sjchu 	cb_p->attachcnt--;
21491648Sjchu 	if (cb_p->pxl) {
21501648Sjchu 		mutex_exit(&cb_p->cb_mutex);
21511648Sjchu 		return;
21521648Sjchu 	}
21531648Sjchu 	mutex_exit(&cb_p->cb_mutex);
21541648Sjchu 
21551648Sjchu 	mutex_destroy(&cb_p->cb_mutex);
2156*1772Sjl139090 	px_set_cb(fault_p->px_fh_dip, 0ull);
21571648Sjchu 	kmem_free(cb_p, sizeof (px_cb_t));
21581648Sjchu }
21591648Sjchu 
21601648Sjchu /*
21611648Sjchu  * px_cb_intr() - sun4u only,  CB interrupt dispatcher
21621648Sjchu  */
21631648Sjchu uint_t
21641648Sjchu px_cb_intr(caddr_t arg)
21651648Sjchu {
21661648Sjchu 	px_cb_t		*cb_p = (px_cb_t *)arg;
21671648Sjchu 	px_cb_list_t	*pxl = cb_p->pxl;
21681648Sjchu 	px_t		*pxp = pxl ? pxl->pxp : NULL;
21691648Sjchu 	px_fault_t	*fault_p;
21701648Sjchu 
21711648Sjchu 	while (pxl && pxp && (pxp->px_state != PX_ATTACHED)) {
21721648Sjchu 		pxl = pxl->next;
21731648Sjchu 		pxp = (pxl) ? pxl->pxp : NULL;
21741648Sjchu 	}
21751648Sjchu 
21761648Sjchu 	if (pxp) {
21771648Sjchu 		fault_p = &pxp->px_cb_fault;
21781648Sjchu 		return (fault_p->px_err_func((caddr_t)fault_p));
21791648Sjchu 	} else
21801648Sjchu 		return (DDI_INTR_UNCLAIMED);
21811648Sjchu }
21821648Sjchu 
21831648Sjchu /*
21841648Sjchu  * px_cb_intr_redist() - sun4u only, CB interrupt redistribution
21851648Sjchu  */
21861648Sjchu void
21871648Sjchu px_cb_intr_redist(px_t	*px_p)
21881648Sjchu {
21891648Sjchu 	px_fault_t	*f_p = &px_p->px_cb_fault;
21901648Sjchu 	px_cb_t		*cb_p = PX2CB(px_p);
21911648Sjchu 	devino_t	ino = px_p->px_inos[PX_INTR_XBC];
21921648Sjchu 	cpuid_t		cpuid;
21931648Sjchu 
21941648Sjchu 	mutex_enter(&cb_p->cb_mutex);
21951648Sjchu 
21961648Sjchu 	if (cb_p->sysino != f_p->px_fh_sysino) {
21971648Sjchu 		mutex_exit(&cb_p->cb_mutex);
21981648Sjchu 		return;
21991648Sjchu 	}
22001648Sjchu 
22011648Sjchu 	cb_p->cpuid = cpuid = intr_dist_cpuid();
22021648Sjchu 	px_ib_intr_dist_en(px_p->px_dip, cpuid, ino, B_FALSE);
22031648Sjchu 
22041648Sjchu 	mutex_exit(&cb_p->cb_mutex);
22051648Sjchu }
22061648Sjchu 
220727Sjchu #ifdef FMA
220827Sjchu void
220927Sjchu px_fill_rc_status(px_fault_t *px_fault_p, pciex_rc_error_regs_t *rc_status)
221027Sjchu {
221127Sjchu 	/* populate the rc_status by reading the registers - TBD */
221227Sjchu }
221327Sjchu #endif /* FMA */
2214383Set142600 
2215383Set142600 /*
2216383Set142600  * Unprotected raw reads/writes of fabric device's config space.
2217383Set142600  * Only used for temporary PCI-E Fabric Error Handling.
2218383Set142600  */
2219383Set142600 uint32_t
22201648Sjchu px_fab_get(px_t *px_p, pcie_req_id_t bdf, uint16_t offset)
22211648Sjchu {
2222383Set142600 	px_ranges_t	*rp = px_p->px_ranges_p;
2223383Set142600 	uint64_t	range_prop, base_addr;
2224383Set142600 	int		bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG);
2225383Set142600 	uint32_t	val;
2226383Set142600 
2227383Set142600 	/* Get Fire's Physical Base Address */
2228*1772Sjl139090 	range_prop = px_get_range_prop(px_p, rp, bank);
2229383Set142600 
2230383Set142600 	/* Get config space first. */
2231383Set142600 	base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset);
2232383Set142600 
2233383Set142600 	val = ldphysio(base_addr);
2234383Set142600 
2235383Set142600 	return (LE_32(val));
2236383Set142600 }
2237383Set142600 
2238383Set142600 void
2239383Set142600 px_fab_set(px_t *px_p, pcie_req_id_t bdf, uint16_t offset,
2240383Set142600     uint32_t val) {
2241383Set142600 	px_ranges_t	*rp = px_p->px_ranges_p;
2242383Set142600 	uint64_t	range_prop, base_addr;
2243383Set142600 	int		bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG);
2244383Set142600 
2245383Set142600 	/* Get Fire's Physical Base Address */
2246*1772Sjl139090 	range_prop = px_get_range_prop(px_p, rp, bank);
2247383Set142600 
2248383Set142600 	/* Get config space first. */
2249383Set142600 	base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset);
2250383Set142600 
2251383Set142600 	stphysio(base_addr, LE_32(val));
2252383Set142600 }
2253435Sjchu 
2254435Sjchu /*
2255435Sjchu  * cpr callback
2256435Sjchu  *
2257435Sjchu  * disable fabric error msg interrupt prior to suspending
2258435Sjchu  * all device drivers; re-enable fabric error msg interrupt
2259435Sjchu  * after all devices are resumed.
2260435Sjchu  */
2261435Sjchu static boolean_t
2262435Sjchu px_cpr_callb(void *arg, int code)
2263435Sjchu {
2264435Sjchu 	px_t		*px_p = (px_t *)arg;
2265435Sjchu 	px_ib_t		*ib_p = px_p->px_ib_p;
2266435Sjchu 	px_pec_t	*pec_p = px_p->px_pec_p;
2267435Sjchu 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
2268435Sjchu 	caddr_t		csr_base;
2269435Sjchu 	devino_t	ce_ino, nf_ino, f_ino;
2270435Sjchu 	px_ib_ino_info_t	*ce_ino_p, *nf_ino_p, *f_ino_p;
2271435Sjchu 	uint64_t	imu_log_enable, imu_intr_enable;
2272435Sjchu 	uint64_t	imu_log_mask, imu_intr_mask;
2273435Sjchu 
2274435Sjchu 	ce_ino = px_msiqid_to_devino(px_p, pec_p->pec_corr_msg_msiq_id);
2275435Sjchu 	nf_ino = px_msiqid_to_devino(px_p, pec_p->pec_non_fatal_msg_msiq_id);
2276435Sjchu 	f_ino = px_msiqid_to_devino(px_p, pec_p->pec_fatal_msg_msiq_id);
2277435Sjchu 	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
2278435Sjchu 
2279435Sjchu 	imu_log_enable = CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE);
2280435Sjchu 	imu_intr_enable = CSR_XR(csr_base, IMU_INTERRUPT_ENABLE);
2281435Sjchu 
2282435Sjchu 	imu_log_mask = BITMASK(IMU_ERROR_LOG_ENABLE_FATAL_MES_NOT_EN_LOG_EN) |
2283435Sjchu 	    BITMASK(IMU_ERROR_LOG_ENABLE_NONFATAL_MES_NOT_EN_LOG_EN) |
2284435Sjchu 	    BITMASK(IMU_ERROR_LOG_ENABLE_COR_MES_NOT_EN_LOG_EN);
2285435Sjchu 
2286435Sjchu 	imu_intr_mask =
2287435Sjchu 	    BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_S_INT_EN) |
2288435Sjchu 	    BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_S_INT_EN) |
2289435Sjchu 	    BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_S_INT_EN) |
2290435Sjchu 	    BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_P_INT_EN) |
2291435Sjchu 	    BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_P_INT_EN) |
2292435Sjchu 	    BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_P_INT_EN);
2293435Sjchu 
2294435Sjchu 	switch (code) {
2295435Sjchu 	case CB_CODE_CPR_CHKPT:
2296435Sjchu 		/* disable imu rbne on corr/nonfatal/fatal errors */
2297435Sjchu 		CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE,
2298435Sjchu 		    imu_log_enable & (~imu_log_mask));
2299435Sjchu 
2300435Sjchu 		CSR_XS(csr_base, IMU_INTERRUPT_ENABLE,
2301435Sjchu 		    imu_intr_enable & (~imu_intr_mask));
2302435Sjchu 
2303435Sjchu 		/* disable CORR intr mapping */
2304435Sjchu 		px_ib_intr_disable(ib_p, ce_ino, IB_INTR_NOWAIT);
2305435Sjchu 
2306435Sjchu 		/* disable NON FATAL intr mapping */
2307435Sjchu 		px_ib_intr_disable(ib_p, nf_ino, IB_INTR_NOWAIT);
2308435Sjchu 
2309435Sjchu 		/* disable FATAL intr mapping */
2310435Sjchu 		px_ib_intr_disable(ib_p, f_ino, IB_INTR_NOWAIT);
2311435Sjchu 
2312435Sjchu 		break;
2313435Sjchu 
2314435Sjchu 	case CB_CODE_CPR_RESUME:
2315435Sjchu 		mutex_enter(&ib_p->ib_ino_lst_mutex);
2316435Sjchu 
2317435Sjchu 		ce_ino_p = px_ib_locate_ino(ib_p, ce_ino);
2318435Sjchu 		nf_ino_p = px_ib_locate_ino(ib_p, nf_ino);
2319435Sjchu 		f_ino_p = px_ib_locate_ino(ib_p, f_ino);
2320435Sjchu 
2321435Sjchu 		/* enable CORR intr mapping */
2322435Sjchu 		if (ce_ino_p)
2323435Sjchu 			px_ib_intr_enable(px_p, ce_ino_p->ino_cpuid, ce_ino);
2324435Sjchu 		else
2325435Sjchu 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2326435Sjchu 			    "reenable PCIe Correctable msg intr.\n");
2327435Sjchu 
2328435Sjchu 		/* enable NON FATAL intr mapping */
2329435Sjchu 		if (nf_ino_p)
2330435Sjchu 			px_ib_intr_enable(px_p, nf_ino_p->ino_cpuid, nf_ino);
2331435Sjchu 		else
2332435Sjchu 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2333435Sjchu 			    "reenable PCIe Non Fatal msg intr.\n");
2334435Sjchu 
2335435Sjchu 		/* enable FATAL intr mapping */
2336435Sjchu 		if (f_ino_p)
2337435Sjchu 			px_ib_intr_enable(px_p, f_ino_p->ino_cpuid, f_ino);
2338435Sjchu 		else
2339435Sjchu 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2340435Sjchu 			    "reenable PCIe Fatal msg intr.\n");
2341435Sjchu 
2342435Sjchu 		mutex_exit(&ib_p->ib_ino_lst_mutex);
2343435Sjchu 
2344435Sjchu 		/* enable corr/nonfatal/fatal not enable error */
2345435Sjchu 		CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, (imu_log_enable |
2346435Sjchu 		    (imu_log_mask & px_imu_log_mask)));
2347435Sjchu 		CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, (imu_intr_enable |
2348435Sjchu 		    (imu_intr_mask & px_imu_intr_mask)));
2349435Sjchu 
2350435Sjchu 		break;
2351435Sjchu 	}
2352435Sjchu 
2353435Sjchu 	return (B_TRUE);
2354435Sjchu }
2355435Sjchu 
2356435Sjchu /*
2357*1772Sjl139090  * fetch chip's range propery's value
2358*1772Sjl139090  */
2359*1772Sjl139090 uint64_t
2360*1772Sjl139090 px_get_range_prop(px_t *px_p, px_ranges_t *rp, int bank)
2361*1772Sjl139090 {
2362*1772Sjl139090 	pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
2363*1772Sjl139090 	uint64_t mask, range_prop;
2364*1772Sjl139090 
2365*1772Sjl139090 	switch (PX_CHIP_TYPE(pxu_p)) {
2366*1772Sjl139090 	case PX_CHIP_OBERON:
2367*1772Sjl139090 		mask = OBERON_RANGE_PROP_MASK;
2368*1772Sjl139090 		break;
2369*1772Sjl139090 	case PX_CHIP_FIRE:
2370*1772Sjl139090 		mask = FIRE_RANGE_PROP_MASK;
2371*1772Sjl139090 		break;
2372*1772Sjl139090 	default:
2373*1772Sjl139090 		mask = FIRE_RANGE_PROP_MASK;
2374*1772Sjl139090 	}
2375*1772Sjl139090 	range_prop = (((uint64_t)(rp[bank].parent_high & mask)) << 32) |
2376*1772Sjl139090 		rp[bank].parent_low;
2377*1772Sjl139090 
2378*1772Sjl139090 	return (range_prop);
2379*1772Sjl139090 }
2380*1772Sjl139090 
2381*1772Sjl139090 /*
2382435Sjchu  * add cpr callback
2383435Sjchu  */
2384435Sjchu void
2385435Sjchu px_cpr_add_callb(px_t *px_p)
2386435Sjchu {
2387435Sjchu 	px_p->px_cprcb_id = callb_add(px_cpr_callb, (void *)px_p,
2388435Sjchu 	CB_CL_CPR_POST_USER, "px_cpr");
2389435Sjchu }
2390435Sjchu 
2391435Sjchu /*
2392435Sjchu  * remove cpr callback
2393435Sjchu  */
2394435Sjchu void
2395435Sjchu px_cpr_rem_callb(px_t *px_p)
2396435Sjchu {
2397435Sjchu 	(void) callb_delete(px_p->px_cprcb_id);
2398435Sjchu }
23991531Skini 
24001531Skini /*ARGSUSED*/
2401*1772Sjl139090 static uint_t
2402*1772Sjl139090 px_hp_intr(caddr_t arg1, caddr_t arg2)
2403*1772Sjl139090 {
2404*1772Sjl139090 	px_t *px_p = (px_t *)arg1;
2405*1772Sjl139090 	int rval;
2406*1772Sjl139090 
2407*1772Sjl139090 	rval = pciehpc_intr(px_p->px_dip);
2408*1772Sjl139090 
2409*1772Sjl139090 #ifdef  DEBUG
2410*1772Sjl139090 	if (rval == DDI_INTR_UNCLAIMED)
2411*1772Sjl139090 	    cmn_err(CE_WARN, "%s%d: UNCLAIMED intr\n",
2412*1772Sjl139090 		ddi_driver_name(px_p->px_dip),
2413*1772Sjl139090 		ddi_get_instance(px_p->px_dip));
2414*1772Sjl139090 #endif
2415*1772Sjl139090 
2416*1772Sjl139090 	return (rval);
2417*1772Sjl139090 }
2418*1772Sjl139090 
24191531Skini int
24201531Skini px_lib_hotplug_init(dev_info_t *dip, void *arg)
24211531Skini {
2422*1772Sjl139090 	px_t	*px_p = DIP_TO_STATE(dip);
2423*1772Sjl139090 	uint64_t ret;
2424*1772Sjl139090 
2425*1772Sjl139090 	if ((ret = hvio_hotplug_init(dip, arg)) == DDI_SUCCESS) {
2426*1772Sjl139090 		sysino_t sysino;
2427*1772Sjl139090 
2428*1772Sjl139090 		if (px_lib_intr_devino_to_sysino(px_p->px_dip,
2429*1772Sjl139090 		    px_p->px_inos[PX_INTR_HOTPLUG], &sysino) !=
2430*1772Sjl139090 		    DDI_SUCCESS) {
2431*1772Sjl139090 #ifdef	DEBUG
2432*1772Sjl139090 			cmn_err(CE_WARN, "%s%d: devino_to_sysino fails\n",
2433*1772Sjl139090 			    ddi_driver_name(px_p->px_dip),
2434*1772Sjl139090 			    ddi_get_instance(px_p->px_dip));
2435*1772Sjl139090 #endif
2436*1772Sjl139090 			return (DDI_FAILURE);
2437*1772Sjl139090 		}
2438*1772Sjl139090 
2439*1772Sjl139090 		VERIFY(add_ivintr(sysino, PX_PCIEHP_PIL,
2440*1772Sjl139090 		    (intrfunc)px_hp_intr, (caddr_t)px_p, NULL) == 0);
2441*1772Sjl139090 	}
2442*1772Sjl139090 
2443*1772Sjl139090 	return (ret);
24441531Skini }
24451531Skini 
24461531Skini void
24471531Skini px_lib_hotplug_uninit(dev_info_t *dip)
24481531Skini {
2449*1772Sjl139090 	if (hvio_hotplug_uninit(dip) == DDI_SUCCESS) {
2450*1772Sjl139090 		px_t	*px_p = DIP_TO_STATE(dip);
2451*1772Sjl139090 		sysino_t sysino;
2452*1772Sjl139090 
2453*1772Sjl139090 		if (px_lib_intr_devino_to_sysino(px_p->px_dip,
2454*1772Sjl139090 		    px_p->px_inos[PX_INTR_HOTPLUG], &sysino) !=
2455*1772Sjl139090 		    DDI_SUCCESS) {
2456*1772Sjl139090 #ifdef	DEBUG
2457*1772Sjl139090 			cmn_err(CE_WARN, "%s%d: devino_to_sysino fails\n",
2458*1772Sjl139090 			    ddi_driver_name(px_p->px_dip),
2459*1772Sjl139090 			    ddi_get_instance(px_p->px_dip));
2460*1772Sjl139090 #endif
2461*1772Sjl139090 			return;
2462*1772Sjl139090 		}
2463*1772Sjl139090 
2464*1772Sjl139090 		rem_ivintr(sysino, NULL);
2465*1772Sjl139090 	}
24661531Skini }
2467