xref: /onnv-gate/usr/src/uts/sun4u/io/px/px_lib4u.c (revision 624:8c5206bfd8f1)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
50Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
60Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
70Sstevel@tonic-gate  * with the License.
80Sstevel@tonic-gate  *
90Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
100Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
110Sstevel@tonic-gate  * See the License for the specific language governing permissions
120Sstevel@tonic-gate  * and limitations under the License.
130Sstevel@tonic-gate  *
140Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
150Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
160Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
170Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
180Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
190Sstevel@tonic-gate  *
200Sstevel@tonic-gate  * CDDL HEADER END
210Sstevel@tonic-gate  */
220Sstevel@tonic-gate /*
230Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
280Sstevel@tonic-gate 
290Sstevel@tonic-gate #include <sys/types.h>
300Sstevel@tonic-gate #include <sys/kmem.h>
310Sstevel@tonic-gate #include <sys/conf.h>
320Sstevel@tonic-gate #include <sys/ddi.h>
330Sstevel@tonic-gate #include <sys/sunddi.h>
3427Sjchu #include <sys/fm/protocol.h>
3527Sjchu #include <sys/fm/util.h>
360Sstevel@tonic-gate #include <sys/modctl.h>
370Sstevel@tonic-gate #include <sys/disp.h>
380Sstevel@tonic-gate #include <sys/stat.h>
390Sstevel@tonic-gate #include <sys/ddi_impldefs.h>
400Sstevel@tonic-gate #include <sys/vmem.h>
410Sstevel@tonic-gate #include <sys/iommutsb.h>
420Sstevel@tonic-gate #include <sys/cpuvar.h>
4327Sjchu #include <sys/ivintr.h>
44383Set142600 #include <sys/byteorder.h>
450Sstevel@tonic-gate #include <px_obj.h>
460Sstevel@tonic-gate #include <pcie_pwr.h>
470Sstevel@tonic-gate #include <px_regs.h>
480Sstevel@tonic-gate #include <px_csr.h>
4927Sjchu #include <sys/machsystm.h>
500Sstevel@tonic-gate #include "px_lib4u.h"
5127Sjchu #include "px_err.h"
520Sstevel@tonic-gate 
530Sstevel@tonic-gate #pragma weak jbus_stst_order
540Sstevel@tonic-gate 
550Sstevel@tonic-gate extern void jbus_stst_order();
560Sstevel@tonic-gate 
570Sstevel@tonic-gate ulong_t px_mmu_dvma_end = 0xfffffffful;
580Sstevel@tonic-gate uint_t px_ranges_phi_mask = 0xfffffffful;
590Sstevel@tonic-gate 
600Sstevel@tonic-gate static int px_goto_l23ready(px_t *px_p);
61118Sjchu static int px_goto_l0(px_t *px_p);
62118Sjchu static int px_pre_pwron_check(px_t *px_p);
630Sstevel@tonic-gate static uint32_t px_identity_chip(px_t *px_p);
64435Sjchu static boolean_t px_cpr_callb(void *arg, int code);
6527Sjchu 
6627Sjchu /*
6727Sjchu  * px_lib_map_registers
6827Sjchu  *
6927Sjchu  * This function is called from the attach routine to map the registers
7027Sjchu  * accessed by this driver.
7127Sjchu  *
7227Sjchu  * used by: px_attach()
7327Sjchu  *
7427Sjchu  * return value: DDI_FAILURE on failure
7527Sjchu  */
7627Sjchu int
7727Sjchu px_lib_map_regs(pxu_t *pxu_p, dev_info_t *dip)
7827Sjchu {
7927Sjchu 	ddi_device_acc_attr_t	attr;
8027Sjchu 	px_reg_bank_t		reg_bank = PX_REG_CSR;
8127Sjchu 
8227Sjchu 	DBG(DBG_ATTACH, dip, "px_lib_map_regs: pxu_p:0x%p, dip 0x%p\n",
8327Sjchu 		pxu_p, dip);
8427Sjchu 
8527Sjchu 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
8627Sjchu 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
8727Sjchu 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
8827Sjchu 
8927Sjchu 	/*
9027Sjchu 	 * PCI CSR Base
9127Sjchu 	 */
9227Sjchu 	if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank],
9327Sjchu 	    0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) {
9427Sjchu 		goto fail;
9527Sjchu 	}
9627Sjchu 
9727Sjchu 	reg_bank++;
9827Sjchu 
9927Sjchu 	/*
10027Sjchu 	 * XBUS CSR Base
10127Sjchu 	 */
10227Sjchu 	if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank],
10327Sjchu 	    0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) {
10427Sjchu 		goto fail;
10527Sjchu 	}
10627Sjchu 
10727Sjchu 	pxu_p->px_address[reg_bank] -= FIRE_CONTROL_STATUS;
10827Sjchu 
10927Sjchu done:
11027Sjchu 	for (; reg_bank >= PX_REG_CSR; reg_bank--) {
11127Sjchu 		DBG(DBG_ATTACH, dip, "reg_bank 0x%x address 0x%p\n",
11227Sjchu 		    reg_bank, pxu_p->px_address[reg_bank]);
11327Sjchu 	}
11427Sjchu 
11527Sjchu 	return (DDI_SUCCESS);
11627Sjchu 
11727Sjchu fail:
11827Sjchu 	cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n",
11927Sjchu 	    ddi_driver_name(dip), ddi_get_instance(dip), reg_bank);
12027Sjchu 
12127Sjchu 	for (reg_bank--; reg_bank >= PX_REG_CSR; reg_bank--) {
12227Sjchu 		pxu_p->px_address[reg_bank] = NULL;
12327Sjchu 		ddi_regs_map_free(&pxu_p->px_ac[reg_bank]);
12427Sjchu 	}
12527Sjchu 
12627Sjchu 	return (DDI_FAILURE);
12727Sjchu }
12827Sjchu 
12927Sjchu /*
13027Sjchu  * px_lib_unmap_regs:
13127Sjchu  *
13227Sjchu  * This routine unmaps the registers mapped by map_px_registers.
13327Sjchu  *
13427Sjchu  * used by: px_detach(), and error conditions in px_attach()
13527Sjchu  *
13627Sjchu  * return value: none
13727Sjchu  */
13827Sjchu void
13927Sjchu px_lib_unmap_regs(pxu_t *pxu_p)
14027Sjchu {
14127Sjchu 	int i;
14227Sjchu 
14327Sjchu 	for (i = 0; i < PX_REG_MAX; i++) {
14427Sjchu 		if (pxu_p->px_ac[i])
14527Sjchu 			ddi_regs_map_free(&pxu_p->px_ac[i]);
14627Sjchu 	}
14727Sjchu }
1480Sstevel@tonic-gate 
1490Sstevel@tonic-gate int
1500Sstevel@tonic-gate px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl)
1510Sstevel@tonic-gate {
1520Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
15327Sjchu 	caddr_t		xbc_csr_base, csr_base;
1540Sstevel@tonic-gate 	px_dvma_range_prop_t	px_dvma_range;
1550Sstevel@tonic-gate 	uint32_t	chip_id;
1560Sstevel@tonic-gate 	pxu_t		*pxu_p;
1570Sstevel@tonic-gate 
1580Sstevel@tonic-gate 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p\n", dip);
1590Sstevel@tonic-gate 
1600Sstevel@tonic-gate 	if ((chip_id = px_identity_chip(px_p)) == PX_CHIP_UNIDENTIFIED)
1610Sstevel@tonic-gate 		return (DDI_FAILURE);
1620Sstevel@tonic-gate 
1630Sstevel@tonic-gate 	switch (chip_id) {
1640Sstevel@tonic-gate 	case FIRE_VER_10:
165225Sess 		cmn_err(CE_WARN, "FIRE Hardware Version 1.0 is not supported");
166225Sess 		return (DDI_FAILURE);
1670Sstevel@tonic-gate 	case FIRE_VER_20:
1680Sstevel@tonic-gate 		DBG(DBG_ATTACH, dip, "FIRE Hardware Version 2.0\n");
1690Sstevel@tonic-gate 		break;
1700Sstevel@tonic-gate 	default:
17127Sjchu 		cmn_err(CE_WARN, "%s%d: FIRE Hardware Version Unknown\n",
1720Sstevel@tonic-gate 		    ddi_driver_name(dip), ddi_get_instance(dip));
1730Sstevel@tonic-gate 		return (DDI_FAILURE);
1740Sstevel@tonic-gate 	}
1750Sstevel@tonic-gate 
1760Sstevel@tonic-gate 	/*
1770Sstevel@tonic-gate 	 * Allocate platform specific structure and link it to
1780Sstevel@tonic-gate 	 * the px state structure.
1790Sstevel@tonic-gate 	 */
1800Sstevel@tonic-gate 	pxu_p = kmem_zalloc(sizeof (pxu_t), KM_SLEEP);
1810Sstevel@tonic-gate 	pxu_p->chip_id = chip_id;
1820Sstevel@tonic-gate 	pxu_p->portid  = ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1830Sstevel@tonic-gate 	    "portid", -1);
1840Sstevel@tonic-gate 
18527Sjchu 	/* Map in the registers */
18627Sjchu 	if (px_lib_map_regs(pxu_p, dip) == DDI_FAILURE) {
18727Sjchu 		kmem_free(pxu_p, sizeof (pxu_t));
18827Sjchu 
18927Sjchu 		return (DDI_FAILURE);
19027Sjchu 	}
19127Sjchu 
19227Sjchu 	xbc_csr_base = (caddr_t)pxu_p->px_address[PX_REG_XBC];
19327Sjchu 	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
19427Sjchu 
1950Sstevel@tonic-gate 	pxu_p->tsb_cookie = iommu_tsb_alloc(pxu_p->portid);
1960Sstevel@tonic-gate 	pxu_p->tsb_size = iommu_tsb_cookie_to_size(pxu_p->tsb_cookie);
1970Sstevel@tonic-gate 	pxu_p->tsb_vaddr = iommu_tsb_cookie_to_va(pxu_p->tsb_cookie);
1980Sstevel@tonic-gate 
1990Sstevel@tonic-gate 	/*
2000Sstevel@tonic-gate 	 * Create "virtual-dma" property to support child devices
2010Sstevel@tonic-gate 	 * needing to know DVMA range.
2020Sstevel@tonic-gate 	 */
2030Sstevel@tonic-gate 	px_dvma_range.dvma_base = (uint32_t)px_mmu_dvma_end + 1
2040Sstevel@tonic-gate 	    - ((pxu_p->tsb_size >> 3) << MMU_PAGE_SHIFT);
2050Sstevel@tonic-gate 	px_dvma_range.dvma_len = (uint32_t)
2060Sstevel@tonic-gate 	    px_mmu_dvma_end - px_dvma_range.dvma_base + 1;
2070Sstevel@tonic-gate 
2080Sstevel@tonic-gate 	(void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
2090Sstevel@tonic-gate 		"virtual-dma", (caddr_t)&px_dvma_range,
2100Sstevel@tonic-gate 		sizeof (px_dvma_range_prop_t));
2110Sstevel@tonic-gate 	/*
2120Sstevel@tonic-gate 	 * Initilize all fire hardware specific blocks.
2130Sstevel@tonic-gate 	 */
2140Sstevel@tonic-gate 	hvio_cb_init(xbc_csr_base, pxu_p);
2150Sstevel@tonic-gate 	hvio_ib_init(csr_base, pxu_p);
2160Sstevel@tonic-gate 	hvio_pec_init(csr_base, pxu_p);
2170Sstevel@tonic-gate 	hvio_mmu_init(csr_base, pxu_p);
2180Sstevel@tonic-gate 
2190Sstevel@tonic-gate 	px_p->px_plat_p = (void *)pxu_p;
2200Sstevel@tonic-gate 
22127Sjchu 	/*
22227Sjchu 	 * Initialize all the interrupt handlers
22327Sjchu 	 */
22427Sjchu 	px_err_reg_enable(px_p, PX_ERR_JBC);
22527Sjchu 	px_err_reg_enable(px_p, PX_ERR_MMU);
22627Sjchu 	px_err_reg_enable(px_p, PX_ERR_IMU);
22727Sjchu 	px_err_reg_enable(px_p, PX_ERR_TLU_UE);
22827Sjchu 	px_err_reg_enable(px_p, PX_ERR_TLU_CE);
22927Sjchu 	px_err_reg_enable(px_p, PX_ERR_TLU_OE);
23027Sjchu 	px_err_reg_enable(px_p, PX_ERR_ILU);
23127Sjchu 	px_err_reg_enable(px_p, PX_ERR_LPU_LINK);
23227Sjchu 	px_err_reg_enable(px_p, PX_ERR_LPU_PHY);
23327Sjchu 	px_err_reg_enable(px_p, PX_ERR_LPU_RX);
23427Sjchu 	px_err_reg_enable(px_p, PX_ERR_LPU_TX);
23527Sjchu 	px_err_reg_enable(px_p, PX_ERR_LPU_LTSSM);
23627Sjchu 	px_err_reg_enable(px_p, PX_ERR_LPU_GIGABLZ);
23727Sjchu 
2380Sstevel@tonic-gate 	/* Initilize device handle */
2390Sstevel@tonic-gate 	*dev_hdl = (devhandle_t)csr_base;
2400Sstevel@tonic-gate 
2410Sstevel@tonic-gate 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl);
2420Sstevel@tonic-gate 
2430Sstevel@tonic-gate 	return (DDI_SUCCESS);
2440Sstevel@tonic-gate }
2450Sstevel@tonic-gate 
2460Sstevel@tonic-gate int
2470Sstevel@tonic-gate px_lib_dev_fini(dev_info_t *dip)
2480Sstevel@tonic-gate {
2490Sstevel@tonic-gate 	px_t	*px_p = DIP_TO_STATE(dip);
2500Sstevel@tonic-gate 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
2510Sstevel@tonic-gate 
2520Sstevel@tonic-gate 	DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip);
2530Sstevel@tonic-gate 
25427Sjchu 	/*
25527Sjchu 	 * Deinitialize all the interrupt handlers
25627Sjchu 	 */
25727Sjchu 	px_err_reg_disable(px_p, PX_ERR_JBC);
25827Sjchu 	px_err_reg_disable(px_p, PX_ERR_MMU);
25927Sjchu 	px_err_reg_disable(px_p, PX_ERR_IMU);
26027Sjchu 	px_err_reg_disable(px_p, PX_ERR_TLU_UE);
26127Sjchu 	px_err_reg_disable(px_p, PX_ERR_TLU_CE);
26227Sjchu 	px_err_reg_disable(px_p, PX_ERR_TLU_OE);
26327Sjchu 	px_err_reg_disable(px_p, PX_ERR_ILU);
26427Sjchu 	px_err_reg_disable(px_p, PX_ERR_LPU_LINK);
26527Sjchu 	px_err_reg_disable(px_p, PX_ERR_LPU_PHY);
26627Sjchu 	px_err_reg_disable(px_p, PX_ERR_LPU_RX);
26727Sjchu 	px_err_reg_disable(px_p, PX_ERR_LPU_TX);
26827Sjchu 	px_err_reg_disable(px_p, PX_ERR_LPU_LTSSM);
26927Sjchu 	px_err_reg_disable(px_p, PX_ERR_LPU_GIGABLZ);
27027Sjchu 
2710Sstevel@tonic-gate 	iommu_tsb_free(pxu_p->tsb_cookie);
2720Sstevel@tonic-gate 
27327Sjchu 	px_lib_unmap_regs((pxu_t *)px_p->px_plat_p);
27427Sjchu 	kmem_free(px_p->px_plat_p, sizeof (pxu_t));
2750Sstevel@tonic-gate 	px_p->px_plat_p = NULL;
2760Sstevel@tonic-gate 
2770Sstevel@tonic-gate 	return (DDI_SUCCESS);
2780Sstevel@tonic-gate }
2790Sstevel@tonic-gate 
2800Sstevel@tonic-gate /*ARGSUSED*/
2810Sstevel@tonic-gate int
2820Sstevel@tonic-gate px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino,
2830Sstevel@tonic-gate     sysino_t *sysino)
2840Sstevel@tonic-gate {
2850Sstevel@tonic-gate 	px_t	*px_p = DIP_TO_STATE(dip);
2860Sstevel@tonic-gate 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
2870Sstevel@tonic-gate 	uint64_t	ret;
2880Sstevel@tonic-gate 
2890Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p "
2900Sstevel@tonic-gate 	    "devino 0x%x\n", dip, devino);
2910Sstevel@tonic-gate 
2920Sstevel@tonic-gate 	if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip),
2930Sstevel@tonic-gate 	    pxu_p, devino, sysino)) != H_EOK) {
2940Sstevel@tonic-gate 		DBG(DBG_LIB_INT, dip,
2950Sstevel@tonic-gate 		    "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret);
2960Sstevel@tonic-gate 		return (DDI_FAILURE);
2970Sstevel@tonic-gate 	}
2980Sstevel@tonic-gate 
2990Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n",
3000Sstevel@tonic-gate 	    *sysino);
3010Sstevel@tonic-gate 
3020Sstevel@tonic-gate 	return (DDI_SUCCESS);
3030Sstevel@tonic-gate }
3040Sstevel@tonic-gate 
3050Sstevel@tonic-gate /*ARGSUSED*/
3060Sstevel@tonic-gate int
3070Sstevel@tonic-gate px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino,
3080Sstevel@tonic-gate     intr_valid_state_t *intr_valid_state)
3090Sstevel@tonic-gate {
3100Sstevel@tonic-gate 	uint64_t	ret;
3110Sstevel@tonic-gate 
3120Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n",
3130Sstevel@tonic-gate 	    dip, sysino);
3140Sstevel@tonic-gate 
3150Sstevel@tonic-gate 	if ((ret = hvio_intr_getvalid(DIP_TO_HANDLE(dip),
3160Sstevel@tonic-gate 	    sysino, intr_valid_state)) != H_EOK) {
3170Sstevel@tonic-gate 		DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n",
3180Sstevel@tonic-gate 		    ret);
3190Sstevel@tonic-gate 		return (DDI_FAILURE);
3200Sstevel@tonic-gate 	}
3210Sstevel@tonic-gate 
3220Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n",
3230Sstevel@tonic-gate 	    *intr_valid_state);
3240Sstevel@tonic-gate 
3250Sstevel@tonic-gate 	return (DDI_SUCCESS);
3260Sstevel@tonic-gate }
3270Sstevel@tonic-gate 
3280Sstevel@tonic-gate /*ARGSUSED*/
3290Sstevel@tonic-gate int
3300Sstevel@tonic-gate px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino,
3310Sstevel@tonic-gate     intr_valid_state_t intr_valid_state)
3320Sstevel@tonic-gate {
3330Sstevel@tonic-gate 	uint64_t	ret;
3340Sstevel@tonic-gate 
3350Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx "
3360Sstevel@tonic-gate 	    "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state);
3370Sstevel@tonic-gate 
3380Sstevel@tonic-gate 	if ((ret = hvio_intr_setvalid(DIP_TO_HANDLE(dip),
3390Sstevel@tonic-gate 	    sysino, intr_valid_state)) != H_EOK) {
3400Sstevel@tonic-gate 		DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n",
3410Sstevel@tonic-gate 		    ret);
3420Sstevel@tonic-gate 		return (DDI_FAILURE);
3430Sstevel@tonic-gate 	}
3440Sstevel@tonic-gate 
3450Sstevel@tonic-gate 	return (DDI_SUCCESS);
3460Sstevel@tonic-gate }
3470Sstevel@tonic-gate 
3480Sstevel@tonic-gate /*ARGSUSED*/
3490Sstevel@tonic-gate int
3500Sstevel@tonic-gate px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino,
3510Sstevel@tonic-gate     intr_state_t *intr_state)
3520Sstevel@tonic-gate {
3530Sstevel@tonic-gate 	uint64_t	ret;
3540Sstevel@tonic-gate 
3550Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n",
3560Sstevel@tonic-gate 	    dip, sysino);
3570Sstevel@tonic-gate 
3580Sstevel@tonic-gate 	if ((ret = hvio_intr_getstate(DIP_TO_HANDLE(dip),
3590Sstevel@tonic-gate 	    sysino, intr_state)) != H_EOK) {
3600Sstevel@tonic-gate 		DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n",
3610Sstevel@tonic-gate 		    ret);
3620Sstevel@tonic-gate 		return (DDI_FAILURE);
3630Sstevel@tonic-gate 	}
3640Sstevel@tonic-gate 
3650Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n",
3660Sstevel@tonic-gate 	    *intr_state);
3670Sstevel@tonic-gate 
3680Sstevel@tonic-gate 	return (DDI_SUCCESS);
3690Sstevel@tonic-gate }
3700Sstevel@tonic-gate 
3710Sstevel@tonic-gate /*ARGSUSED*/
3720Sstevel@tonic-gate int
3730Sstevel@tonic-gate px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino,
3740Sstevel@tonic-gate     intr_state_t intr_state)
3750Sstevel@tonic-gate {
3760Sstevel@tonic-gate 	uint64_t	ret;
3770Sstevel@tonic-gate 
3780Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx "
3790Sstevel@tonic-gate 	    "intr_state 0x%x\n", dip, sysino, intr_state);
3800Sstevel@tonic-gate 
3810Sstevel@tonic-gate 	if ((ret = hvio_intr_setstate(DIP_TO_HANDLE(dip),
3820Sstevel@tonic-gate 	    sysino, intr_state)) != H_EOK) {
3830Sstevel@tonic-gate 		DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n",
3840Sstevel@tonic-gate 		    ret);
3850Sstevel@tonic-gate 		return (DDI_FAILURE);
3860Sstevel@tonic-gate 	}
3870Sstevel@tonic-gate 
3880Sstevel@tonic-gate 	return (DDI_SUCCESS);
3890Sstevel@tonic-gate }
3900Sstevel@tonic-gate 
3910Sstevel@tonic-gate /*ARGSUSED*/
3920Sstevel@tonic-gate int
3930Sstevel@tonic-gate px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid)
3940Sstevel@tonic-gate {
3950Sstevel@tonic-gate 	uint64_t	ret;
3960Sstevel@tonic-gate 
3970Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n",
3980Sstevel@tonic-gate 	    dip, sysino);
3990Sstevel@tonic-gate 
4000Sstevel@tonic-gate 	if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip),
4010Sstevel@tonic-gate 	    sysino, cpuid)) != H_EOK) {
4020Sstevel@tonic-gate 		DBG(DBG_LIB_INT, dip, "hvio_intr_gettarget failed, ret 0x%lx\n",
4030Sstevel@tonic-gate 		    ret);
4040Sstevel@tonic-gate 		return (DDI_FAILURE);
4050Sstevel@tonic-gate 	}
4060Sstevel@tonic-gate 
4070Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", cpuid);
4080Sstevel@tonic-gate 
4090Sstevel@tonic-gate 	return (DDI_SUCCESS);
4100Sstevel@tonic-gate }
4110Sstevel@tonic-gate 
4120Sstevel@tonic-gate /*ARGSUSED*/
4130Sstevel@tonic-gate int
4140Sstevel@tonic-gate px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid)
4150Sstevel@tonic-gate {
4160Sstevel@tonic-gate 	uint64_t	ret;
4170Sstevel@tonic-gate 
4180Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx "
4190Sstevel@tonic-gate 	    "cpuid 0x%x\n", dip, sysino, cpuid);
4200Sstevel@tonic-gate 
4210Sstevel@tonic-gate 	if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip),
4220Sstevel@tonic-gate 	    sysino, cpuid)) != H_EOK) {
4230Sstevel@tonic-gate 		DBG(DBG_LIB_INT, dip, "hvio_intr_settarget failed, ret 0x%lx\n",
4240Sstevel@tonic-gate 		    ret);
4250Sstevel@tonic-gate 		return (DDI_FAILURE);
4260Sstevel@tonic-gate 	}
4270Sstevel@tonic-gate 
4280Sstevel@tonic-gate 	return (DDI_SUCCESS);
4290Sstevel@tonic-gate }
4300Sstevel@tonic-gate 
4310Sstevel@tonic-gate /*ARGSUSED*/
4320Sstevel@tonic-gate int
4330Sstevel@tonic-gate px_lib_intr_reset(dev_info_t *dip)
4340Sstevel@tonic-gate {
4350Sstevel@tonic-gate 	devino_t	ino;
4360Sstevel@tonic-gate 	sysino_t	sysino;
4370Sstevel@tonic-gate 
4380Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip);
4390Sstevel@tonic-gate 
4400Sstevel@tonic-gate 	/* Reset all Interrupts */
4410Sstevel@tonic-gate 	for (ino = 0; ino < INTERRUPT_MAPPING_ENTRIES; ino++) {
4420Sstevel@tonic-gate 		if (px_lib_intr_devino_to_sysino(dip, ino,
4430Sstevel@tonic-gate 		    &sysino) != DDI_SUCCESS)
4440Sstevel@tonic-gate 			return (BF_FATAL);
4450Sstevel@tonic-gate 
4460Sstevel@tonic-gate 		if (px_lib_intr_setstate(dip, sysino,
4470Sstevel@tonic-gate 		    INTR_IDLE_STATE) != DDI_SUCCESS)
4480Sstevel@tonic-gate 			return (BF_FATAL);
4490Sstevel@tonic-gate 	}
4500Sstevel@tonic-gate 
4510Sstevel@tonic-gate 	return (BF_NONE);
4520Sstevel@tonic-gate }
4530Sstevel@tonic-gate 
4540Sstevel@tonic-gate /*ARGSUSED*/
4550Sstevel@tonic-gate int
4560Sstevel@tonic-gate px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages,
4570Sstevel@tonic-gate     io_attributes_t io_attributes, void *addr, size_t pfn_index,
4580Sstevel@tonic-gate     int flag)
4590Sstevel@tonic-gate {
4600Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
4610Sstevel@tonic-gate 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
4620Sstevel@tonic-gate 	uint64_t	ret;
4630Sstevel@tonic-gate 
4640Sstevel@tonic-gate 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx "
4650Sstevel@tonic-gate 	    "pages 0x%x atrr 0x%x addr 0x%p pfn_index 0x%llx, flag 0x%x\n",
4660Sstevel@tonic-gate 	    dip, tsbid, pages, io_attributes, addr, pfn_index, flag);
4670Sstevel@tonic-gate 
4680Sstevel@tonic-gate 	if ((ret = hvio_iommu_map(px_p->px_dev_hdl, pxu_p, tsbid, pages,
4690Sstevel@tonic-gate 	    io_attributes, addr, pfn_index, flag)) != H_EOK) {
4700Sstevel@tonic-gate 		DBG(DBG_LIB_DMA, dip,
4710Sstevel@tonic-gate 		    "px_lib_iommu_map failed, ret 0x%lx\n", ret);
4720Sstevel@tonic-gate 		return (DDI_FAILURE);
4730Sstevel@tonic-gate 	}
4740Sstevel@tonic-gate 
4750Sstevel@tonic-gate 	return (DDI_SUCCESS);
4760Sstevel@tonic-gate }
4770Sstevel@tonic-gate 
4780Sstevel@tonic-gate /*ARGSUSED*/
4790Sstevel@tonic-gate int
4800Sstevel@tonic-gate px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages)
4810Sstevel@tonic-gate {
4820Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
4830Sstevel@tonic-gate 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
4840Sstevel@tonic-gate 	uint64_t	ret;
4850Sstevel@tonic-gate 
4860Sstevel@tonic-gate 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx "
4870Sstevel@tonic-gate 	    "pages 0x%x\n", dip, tsbid, pages);
4880Sstevel@tonic-gate 
4890Sstevel@tonic-gate 	if ((ret = hvio_iommu_demap(px_p->px_dev_hdl, pxu_p, tsbid, pages))
4900Sstevel@tonic-gate 	    != H_EOK) {
4910Sstevel@tonic-gate 		DBG(DBG_LIB_DMA, dip,
4920Sstevel@tonic-gate 		    "px_lib_iommu_demap failed, ret 0x%lx\n", ret);
4930Sstevel@tonic-gate 
4940Sstevel@tonic-gate 		return (DDI_FAILURE);
4950Sstevel@tonic-gate 	}
4960Sstevel@tonic-gate 
4970Sstevel@tonic-gate 	return (DDI_SUCCESS);
4980Sstevel@tonic-gate }
4990Sstevel@tonic-gate 
5000Sstevel@tonic-gate /*ARGSUSED*/
5010Sstevel@tonic-gate int
5020Sstevel@tonic-gate px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid,
5030Sstevel@tonic-gate     io_attributes_t *attributes_p, r_addr_t *r_addr_p)
5040Sstevel@tonic-gate {
5050Sstevel@tonic-gate 	px_t	*px_p = DIP_TO_STATE(dip);
5060Sstevel@tonic-gate 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
5070Sstevel@tonic-gate 	uint64_t	ret;
5080Sstevel@tonic-gate 
5090Sstevel@tonic-gate 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n",
5100Sstevel@tonic-gate 	    dip, tsbid);
5110Sstevel@tonic-gate 
5120Sstevel@tonic-gate 	if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), pxu_p, tsbid,
5130Sstevel@tonic-gate 	    attributes_p, r_addr_p)) != H_EOK) {
5140Sstevel@tonic-gate 		DBG(DBG_LIB_DMA, dip,
5150Sstevel@tonic-gate 		    "hvio_iommu_getmap failed, ret 0x%lx\n", ret);
5160Sstevel@tonic-gate 
5170Sstevel@tonic-gate 		return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE);
5180Sstevel@tonic-gate 	}
5190Sstevel@tonic-gate 
5200Sstevel@tonic-gate 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%x r_addr 0x%llx\n",
5210Sstevel@tonic-gate 	    *attributes_p, *r_addr_p);
5220Sstevel@tonic-gate 
5230Sstevel@tonic-gate 	return (DDI_SUCCESS);
5240Sstevel@tonic-gate }
5250Sstevel@tonic-gate 
5260Sstevel@tonic-gate 
5270Sstevel@tonic-gate /*
5280Sstevel@tonic-gate  * Checks dma attributes against system bypass ranges
5290Sstevel@tonic-gate  * The bypass range is determined by the hardware. Return them so the
5300Sstevel@tonic-gate  * common code can do generic checking against them.
5310Sstevel@tonic-gate  */
5320Sstevel@tonic-gate /*ARGSUSED*/
5330Sstevel@tonic-gate int
5340Sstevel@tonic-gate px_lib_dma_bypass_rngchk(ddi_dma_attr_t *attrp, uint64_t *lo_p, uint64_t *hi_p)
5350Sstevel@tonic-gate {
5360Sstevel@tonic-gate 	*lo_p = MMU_BYPASS_BASE;
5370Sstevel@tonic-gate 	*hi_p = MMU_BYPASS_END;
5380Sstevel@tonic-gate 
5390Sstevel@tonic-gate 	return (DDI_SUCCESS);
5400Sstevel@tonic-gate }
5410Sstevel@tonic-gate 
5420Sstevel@tonic-gate 
5430Sstevel@tonic-gate /*ARGSUSED*/
5440Sstevel@tonic-gate int
5450Sstevel@tonic-gate px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra,
5460Sstevel@tonic-gate     io_attributes_t io_attributes, io_addr_t *io_addr_p)
5470Sstevel@tonic-gate {
5480Sstevel@tonic-gate 	uint64_t	ret;
5490Sstevel@tonic-gate 
5500Sstevel@tonic-gate 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx "
5510Sstevel@tonic-gate 	    "attr 0x%x\n", dip, ra, io_attributes);
5520Sstevel@tonic-gate 
5530Sstevel@tonic-gate 	if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), ra,
5540Sstevel@tonic-gate 	    io_attributes, io_addr_p)) != H_EOK) {
5550Sstevel@tonic-gate 		DBG(DBG_LIB_DMA, dip,
5560Sstevel@tonic-gate 		    "hvio_iommu_getbypass failed, ret 0x%lx\n", ret);
5570Sstevel@tonic-gate 		return (DDI_FAILURE);
5580Sstevel@tonic-gate 	}
5590Sstevel@tonic-gate 
5600Sstevel@tonic-gate 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n",
5610Sstevel@tonic-gate 	    *io_addr_p);
5620Sstevel@tonic-gate 
5630Sstevel@tonic-gate 	return (DDI_SUCCESS);
5640Sstevel@tonic-gate }
5650Sstevel@tonic-gate 
5660Sstevel@tonic-gate /*
5670Sstevel@tonic-gate  * bus dma sync entry point.
5680Sstevel@tonic-gate  */
5690Sstevel@tonic-gate /*ARGSUSED*/
5700Sstevel@tonic-gate int
5710Sstevel@tonic-gate px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
5720Sstevel@tonic-gate 	off_t off, size_t len, uint_t cache_flags)
5730Sstevel@tonic-gate {
5740Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
5750Sstevel@tonic-gate 
5760Sstevel@tonic-gate 	DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p "
5770Sstevel@tonic-gate 	    "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n",
5780Sstevel@tonic-gate 	    dip, rdip, handle, off, len, cache_flags);
5790Sstevel@tonic-gate 
5800Sstevel@tonic-gate 	/*
5810Sstevel@tonic-gate 	 * jbus_stst_order is found only in certain cpu modules.
5820Sstevel@tonic-gate 	 * Just return success if not present.
5830Sstevel@tonic-gate 	 */
5840Sstevel@tonic-gate 	if (&jbus_stst_order == NULL)
5850Sstevel@tonic-gate 		return (DDI_SUCCESS);
5860Sstevel@tonic-gate 
5870Sstevel@tonic-gate 	if (!(mp->dmai_flags & DMAI_FLAGS_INUSE)) {
58827Sjchu 		cmn_err(CE_WARN, "%s%d: Unbound dma handle %p.",
58927Sjchu 		    ddi_driver_name(rdip), ddi_get_instance(rdip), (void *)mp);
59027Sjchu 
5910Sstevel@tonic-gate 		return (DDI_FAILURE);
5920Sstevel@tonic-gate 	}
5930Sstevel@tonic-gate 
5940Sstevel@tonic-gate 	if (mp->dmai_flags & DMAI_FLAGS_NOSYNC)
5950Sstevel@tonic-gate 		return (DDI_SUCCESS);
5960Sstevel@tonic-gate 
5970Sstevel@tonic-gate 	/*
5980Sstevel@tonic-gate 	 * No flush needed when sending data from memory to device.
5990Sstevel@tonic-gate 	 * Nothing to do to "sync" memory to what device would already see.
6000Sstevel@tonic-gate 	 */
6010Sstevel@tonic-gate 	if (!(mp->dmai_rflags & DDI_DMA_READ) ||
6020Sstevel@tonic-gate 	    ((cache_flags & PX_DMA_SYNC_DDI_FLAGS) == DDI_DMA_SYNC_FORDEV))
6030Sstevel@tonic-gate 		return (DDI_SUCCESS);
6040Sstevel@tonic-gate 
6050Sstevel@tonic-gate 	/*
6060Sstevel@tonic-gate 	 * Perform necessary cpu workaround to ensure jbus ordering.
6070Sstevel@tonic-gate 	 * CPU's internal "invalidate FIFOs" are flushed.
6080Sstevel@tonic-gate 	 */
6090Sstevel@tonic-gate 
6100Sstevel@tonic-gate #if !defined(lint)
6110Sstevel@tonic-gate 	kpreempt_disable();
6120Sstevel@tonic-gate #endif
6130Sstevel@tonic-gate 	jbus_stst_order();
6140Sstevel@tonic-gate #if !defined(lint)
6150Sstevel@tonic-gate 	kpreempt_enable();
6160Sstevel@tonic-gate #endif
6170Sstevel@tonic-gate 	return (DDI_SUCCESS);
6180Sstevel@tonic-gate }
6190Sstevel@tonic-gate 
6200Sstevel@tonic-gate /*
6210Sstevel@tonic-gate  * MSIQ Functions:
6220Sstevel@tonic-gate  */
6230Sstevel@tonic-gate /*ARGSUSED*/
6240Sstevel@tonic-gate int
6250Sstevel@tonic-gate px_lib_msiq_init(dev_info_t *dip)
6260Sstevel@tonic-gate {
6270Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
6280Sstevel@tonic-gate 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
6290Sstevel@tonic-gate 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
6300Sstevel@tonic-gate 	caddr_t		msiq_addr;
6310Sstevel@tonic-gate 	px_dvma_addr_t	pg_index;
6320Sstevel@tonic-gate 	size_t		size;
6330Sstevel@tonic-gate 	int		ret;
6340Sstevel@tonic-gate 
6350Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip);
6360Sstevel@tonic-gate 
6370Sstevel@tonic-gate 	/*
6380Sstevel@tonic-gate 	 * Map the EQ memory into the Fire MMU (has to be 512KB aligned)
6390Sstevel@tonic-gate 	 * and then initialize the base address register.
6400Sstevel@tonic-gate 	 *
6410Sstevel@tonic-gate 	 * Allocate entries from Fire IOMMU so that the resulting address
6420Sstevel@tonic-gate 	 * is properly aligned.  Calculate the index of the first allocated
6430Sstevel@tonic-gate 	 * entry.  Note: The size of the mapping is assumed to be a multiple
6440Sstevel@tonic-gate 	 * of the page size.
6450Sstevel@tonic-gate 	 */
6460Sstevel@tonic-gate 	msiq_addr = (caddr_t)(((uint64_t)msiq_state_p->msiq_buf_p +
6470Sstevel@tonic-gate 	    (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT);
6480Sstevel@tonic-gate 
6490Sstevel@tonic-gate 	size = msiq_state_p->msiq_cnt *
6500Sstevel@tonic-gate 	    msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
6510Sstevel@tonic-gate 
6520Sstevel@tonic-gate 	pxu_p->msiq_mapped_p = vmem_xalloc(px_p->px_mmu_p->mmu_dvma_map,
6530Sstevel@tonic-gate 	    size, (512 * 1024), 0, 0, NULL, NULL, VM_NOSLEEP | VM_BESTFIT);
6540Sstevel@tonic-gate 
6550Sstevel@tonic-gate 	if (pxu_p->msiq_mapped_p == NULL)
6560Sstevel@tonic-gate 		return (DDI_FAILURE);
6570Sstevel@tonic-gate 
6580Sstevel@tonic-gate 	pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p,
6590Sstevel@tonic-gate 	    MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p));
6600Sstevel@tonic-gate 
6610Sstevel@tonic-gate 	if ((ret = px_lib_iommu_map(px_p->px_dip, PCI_TSBID(0, pg_index),
6620Sstevel@tonic-gate 	    MMU_BTOP(size), PCI_MAP_ATTR_WRITE, (void *)msiq_addr, 0,
6630Sstevel@tonic-gate 	    MMU_MAP_BUF)) != DDI_SUCCESS) {
6640Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
6650Sstevel@tonic-gate 		    "hvio_msiq_init failed, ret 0x%lx\n", ret);
6660Sstevel@tonic-gate 
6670Sstevel@tonic-gate 		(void) px_lib_msiq_fini(dip);
6680Sstevel@tonic-gate 		return (DDI_FAILURE);
6690Sstevel@tonic-gate 	}
6700Sstevel@tonic-gate 
6710Sstevel@tonic-gate 	(void) hvio_msiq_init(DIP_TO_HANDLE(dip), pxu_p);
6720Sstevel@tonic-gate 
6730Sstevel@tonic-gate 	return (DDI_SUCCESS);
6740Sstevel@tonic-gate }
6750Sstevel@tonic-gate 
6760Sstevel@tonic-gate /*ARGSUSED*/
6770Sstevel@tonic-gate int
6780Sstevel@tonic-gate px_lib_msiq_fini(dev_info_t *dip)
6790Sstevel@tonic-gate {
6800Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
6810Sstevel@tonic-gate 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
6820Sstevel@tonic-gate 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
6830Sstevel@tonic-gate 	px_dvma_addr_t	pg_index;
6840Sstevel@tonic-gate 	size_t		size;
6850Sstevel@tonic-gate 
6860Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip);
6870Sstevel@tonic-gate 
6880Sstevel@tonic-gate 	/*
6890Sstevel@tonic-gate 	 * Unmap and free the EQ memory that had been mapped
6900Sstevel@tonic-gate 	 * into the Fire IOMMU.
6910Sstevel@tonic-gate 	 */
6920Sstevel@tonic-gate 	size = msiq_state_p->msiq_cnt *
6930Sstevel@tonic-gate 	    msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
6940Sstevel@tonic-gate 
6950Sstevel@tonic-gate 	pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p,
6960Sstevel@tonic-gate 	    MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p));
6970Sstevel@tonic-gate 
6980Sstevel@tonic-gate 	(void) px_lib_iommu_demap(px_p->px_dip,
6990Sstevel@tonic-gate 	    PCI_TSBID(0, pg_index), MMU_BTOP(size));
7000Sstevel@tonic-gate 
7010Sstevel@tonic-gate 	/* Free the entries from the Fire MMU */
7020Sstevel@tonic-gate 	vmem_xfree(px_p->px_mmu_p->mmu_dvma_map,
7030Sstevel@tonic-gate 	    (void *)pxu_p->msiq_mapped_p, size);
7040Sstevel@tonic-gate 
7050Sstevel@tonic-gate 	return (DDI_SUCCESS);
7060Sstevel@tonic-gate }
7070Sstevel@tonic-gate 
7080Sstevel@tonic-gate /*ARGSUSED*/
7090Sstevel@tonic-gate int
7100Sstevel@tonic-gate px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p,
7110Sstevel@tonic-gate     uint_t *msiq_rec_cnt_p)
7120Sstevel@tonic-gate {
7130Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
7140Sstevel@tonic-gate 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
7150Sstevel@tonic-gate 	uint64_t	*msiq_addr;
7160Sstevel@tonic-gate 	size_t		msiq_size;
7170Sstevel@tonic-gate 
7180Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n",
7190Sstevel@tonic-gate 	    dip, msiq_id);
7200Sstevel@tonic-gate 
7210Sstevel@tonic-gate 	msiq_addr = (uint64_t *)(((uint64_t)msiq_state_p->msiq_buf_p +
7220Sstevel@tonic-gate 	    (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT);
7230Sstevel@tonic-gate 	msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
7240Sstevel@tonic-gate 	ra_p = (r_addr_t *)((caddr_t)msiq_addr + (msiq_id * msiq_size));
7250Sstevel@tonic-gate 
7260Sstevel@tonic-gate 	*msiq_rec_cnt_p = msiq_state_p->msiq_rec_cnt;
7270Sstevel@tonic-gate 
7280Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n",
7290Sstevel@tonic-gate 	    ra_p, *msiq_rec_cnt_p);
7300Sstevel@tonic-gate 
7310Sstevel@tonic-gate 	return (DDI_SUCCESS);
7320Sstevel@tonic-gate }
7330Sstevel@tonic-gate 
7340Sstevel@tonic-gate /*ARGSUSED*/
7350Sstevel@tonic-gate int
7360Sstevel@tonic-gate px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id,
7370Sstevel@tonic-gate     pci_msiq_valid_state_t *msiq_valid_state)
7380Sstevel@tonic-gate {
7390Sstevel@tonic-gate 	uint64_t	ret;
7400Sstevel@tonic-gate 
7410Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n",
7420Sstevel@tonic-gate 	    dip, msiq_id);
7430Sstevel@tonic-gate 
7440Sstevel@tonic-gate 	if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip),
7450Sstevel@tonic-gate 	    msiq_id, msiq_valid_state)) != H_EOK) {
7460Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
7470Sstevel@tonic-gate 		    "hvio_msiq_getvalid failed, ret 0x%lx\n", ret);
7480Sstevel@tonic-gate 		return (DDI_FAILURE);
7490Sstevel@tonic-gate 	}
7500Sstevel@tonic-gate 
7510Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n",
7520Sstevel@tonic-gate 	    *msiq_valid_state);
7530Sstevel@tonic-gate 
7540Sstevel@tonic-gate 	return (DDI_SUCCESS);
7550Sstevel@tonic-gate }
7560Sstevel@tonic-gate 
7570Sstevel@tonic-gate /*ARGSUSED*/
7580Sstevel@tonic-gate int
7590Sstevel@tonic-gate px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id,
7600Sstevel@tonic-gate     pci_msiq_valid_state_t msiq_valid_state)
7610Sstevel@tonic-gate {
7620Sstevel@tonic-gate 	uint64_t	ret;
7630Sstevel@tonic-gate 
7640Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x "
7650Sstevel@tonic-gate 	    "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state);
7660Sstevel@tonic-gate 
7670Sstevel@tonic-gate 	if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip),
7680Sstevel@tonic-gate 	    msiq_id, msiq_valid_state)) != H_EOK) {
7690Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
7700Sstevel@tonic-gate 		    "hvio_msiq_setvalid failed, ret 0x%lx\n", ret);
7710Sstevel@tonic-gate 		return (DDI_FAILURE);
7720Sstevel@tonic-gate 	}
7730Sstevel@tonic-gate 
7740Sstevel@tonic-gate 	return (DDI_SUCCESS);
7750Sstevel@tonic-gate }
7760Sstevel@tonic-gate 
7770Sstevel@tonic-gate /*ARGSUSED*/
7780Sstevel@tonic-gate int
7790Sstevel@tonic-gate px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id,
7800Sstevel@tonic-gate     pci_msiq_state_t *msiq_state)
7810Sstevel@tonic-gate {
7820Sstevel@tonic-gate 	uint64_t	ret;
7830Sstevel@tonic-gate 
7840Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n",
7850Sstevel@tonic-gate 	    dip, msiq_id);
7860Sstevel@tonic-gate 
7870Sstevel@tonic-gate 	if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip),
7880Sstevel@tonic-gate 	    msiq_id, msiq_state)) != H_EOK) {
7890Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
7900Sstevel@tonic-gate 		    "hvio_msiq_getstate failed, ret 0x%lx\n", ret);
7910Sstevel@tonic-gate 		return (DDI_FAILURE);
7920Sstevel@tonic-gate 	}
7930Sstevel@tonic-gate 
7940Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n",
7950Sstevel@tonic-gate 	    *msiq_state);
7960Sstevel@tonic-gate 
7970Sstevel@tonic-gate 	return (DDI_SUCCESS);
7980Sstevel@tonic-gate }
7990Sstevel@tonic-gate 
8000Sstevel@tonic-gate /*ARGSUSED*/
8010Sstevel@tonic-gate int
8020Sstevel@tonic-gate px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id,
8030Sstevel@tonic-gate     pci_msiq_state_t msiq_state)
8040Sstevel@tonic-gate {
8050Sstevel@tonic-gate 	uint64_t	ret;
8060Sstevel@tonic-gate 
8070Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x "
8080Sstevel@tonic-gate 	    "msiq_state 0x%x\n", dip, msiq_id, msiq_state);
8090Sstevel@tonic-gate 
8100Sstevel@tonic-gate 	if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip),
8110Sstevel@tonic-gate 	    msiq_id, msiq_state)) != H_EOK) {
8120Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
8130Sstevel@tonic-gate 		    "hvio_msiq_setstate failed, ret 0x%lx\n", ret);
8140Sstevel@tonic-gate 		return (DDI_FAILURE);
8150Sstevel@tonic-gate 	}
8160Sstevel@tonic-gate 
8170Sstevel@tonic-gate 	return (DDI_SUCCESS);
8180Sstevel@tonic-gate }
8190Sstevel@tonic-gate 
8200Sstevel@tonic-gate /*ARGSUSED*/
8210Sstevel@tonic-gate int
8220Sstevel@tonic-gate px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id,
8230Sstevel@tonic-gate     msiqhead_t *msiq_head)
8240Sstevel@tonic-gate {
8250Sstevel@tonic-gate 	uint64_t	ret;
8260Sstevel@tonic-gate 
8270Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n",
8280Sstevel@tonic-gate 	    dip, msiq_id);
8290Sstevel@tonic-gate 
8300Sstevel@tonic-gate 	if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip),
8310Sstevel@tonic-gate 	    msiq_id, msiq_head)) != H_EOK) {
8320Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
8330Sstevel@tonic-gate 		    "hvio_msiq_gethead failed, ret 0x%lx\n", ret);
8340Sstevel@tonic-gate 		return (DDI_FAILURE);
8350Sstevel@tonic-gate 	}
8360Sstevel@tonic-gate 
8370Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: msiq_head 0x%x\n",
8380Sstevel@tonic-gate 	    *msiq_head);
8390Sstevel@tonic-gate 
8400Sstevel@tonic-gate 	return (DDI_SUCCESS);
8410Sstevel@tonic-gate }
8420Sstevel@tonic-gate 
8430Sstevel@tonic-gate /*ARGSUSED*/
8440Sstevel@tonic-gate int
8450Sstevel@tonic-gate px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id,
8460Sstevel@tonic-gate     msiqhead_t msiq_head)
8470Sstevel@tonic-gate {
8480Sstevel@tonic-gate 	uint64_t	ret;
8490Sstevel@tonic-gate 
8500Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x "
8510Sstevel@tonic-gate 	    "msiq_head 0x%x\n", dip, msiq_id, msiq_head);
8520Sstevel@tonic-gate 
8530Sstevel@tonic-gate 	if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip),
8540Sstevel@tonic-gate 	    msiq_id, msiq_head)) != H_EOK) {
8550Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
8560Sstevel@tonic-gate 		    "hvio_msiq_sethead failed, ret 0x%lx\n", ret);
8570Sstevel@tonic-gate 		return (DDI_FAILURE);
8580Sstevel@tonic-gate 	}
8590Sstevel@tonic-gate 
8600Sstevel@tonic-gate 	return (DDI_SUCCESS);
8610Sstevel@tonic-gate }
8620Sstevel@tonic-gate 
8630Sstevel@tonic-gate /*ARGSUSED*/
8640Sstevel@tonic-gate int
8650Sstevel@tonic-gate px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id,
8660Sstevel@tonic-gate     msiqtail_t *msiq_tail)
8670Sstevel@tonic-gate {
8680Sstevel@tonic-gate 	uint64_t	ret;
8690Sstevel@tonic-gate 
8700Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n",
8710Sstevel@tonic-gate 	    dip, msiq_id);
8720Sstevel@tonic-gate 
8730Sstevel@tonic-gate 	if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip),
8740Sstevel@tonic-gate 	    msiq_id, msiq_tail)) != H_EOK) {
8750Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
8760Sstevel@tonic-gate 		    "hvio_msiq_gettail failed, ret 0x%lx\n", ret);
8770Sstevel@tonic-gate 		return (DDI_FAILURE);
8780Sstevel@tonic-gate 	}
8790Sstevel@tonic-gate 
8800Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n",
8810Sstevel@tonic-gate 	    *msiq_tail);
8820Sstevel@tonic-gate 
8830Sstevel@tonic-gate 	return (DDI_SUCCESS);
8840Sstevel@tonic-gate }
8850Sstevel@tonic-gate 
8860Sstevel@tonic-gate /*ARGSUSED*/
8870Sstevel@tonic-gate void
8880Sstevel@tonic-gate px_lib_get_msiq_rec(dev_info_t *dip, px_msiq_t *msiq_p, msiq_rec_t *msiq_rec_p)
8890Sstevel@tonic-gate {
8900Sstevel@tonic-gate 	eq_rec_t	*eq_rec_p = (eq_rec_t *)msiq_p->msiq_curr;
8910Sstevel@tonic-gate 
8920Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p eq_rec_p 0x%p\n",
8930Sstevel@tonic-gate 	    dip, eq_rec_p);
8940Sstevel@tonic-gate 
895287Smg140465 	if (!eq_rec_p->eq_rec_fmt_type) {
896287Smg140465 		/* Set msiq_rec_type to zero */
897287Smg140465 		msiq_rec_p->msiq_rec_type = 0;
8980Sstevel@tonic-gate 
8990Sstevel@tonic-gate 		return;
9000Sstevel@tonic-gate 	}
9010Sstevel@tonic-gate 
9020Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: EQ RECORD, "
9030Sstevel@tonic-gate 	    "eq_rec_rid 0x%llx eq_rec_fmt_type 0x%llx "
9040Sstevel@tonic-gate 	    "eq_rec_len 0x%llx eq_rec_addr0 0x%llx "
9050Sstevel@tonic-gate 	    "eq_rec_addr1 0x%llx eq_rec_data0 0x%llx "
9060Sstevel@tonic-gate 	    "eq_rec_data1 0x%llx\n", eq_rec_p->eq_rec_rid,
9070Sstevel@tonic-gate 	    eq_rec_p->eq_rec_fmt_type, eq_rec_p->eq_rec_len,
9080Sstevel@tonic-gate 	    eq_rec_p->eq_rec_addr0, eq_rec_p->eq_rec_addr1,
9090Sstevel@tonic-gate 	    eq_rec_p->eq_rec_data0, eq_rec_p->eq_rec_data1);
9100Sstevel@tonic-gate 
9110Sstevel@tonic-gate 	/*
9120Sstevel@tonic-gate 	 * Only upper 4 bits of eq_rec_fmt_type is used
9130Sstevel@tonic-gate 	 * to identify the EQ record type.
9140Sstevel@tonic-gate 	 */
9150Sstevel@tonic-gate 	switch (eq_rec_p->eq_rec_fmt_type >> 3) {
9160Sstevel@tonic-gate 	case EQ_REC_MSI32:
9170Sstevel@tonic-gate 		msiq_rec_p->msiq_rec_type = MSI32_REC;
9180Sstevel@tonic-gate 
919225Sess 		msiq_rec_p->msiq_rec_data.msi.msi_data =
920225Sess 		    eq_rec_p->eq_rec_data0;
9210Sstevel@tonic-gate 		break;
9220Sstevel@tonic-gate 	case EQ_REC_MSI64:
9230Sstevel@tonic-gate 		msiq_rec_p->msiq_rec_type = MSI64_REC;
9240Sstevel@tonic-gate 
925225Sess 		msiq_rec_p->msiq_rec_data.msi.msi_data =
926225Sess 		    eq_rec_p->eq_rec_data0;
9270Sstevel@tonic-gate 		break;
9280Sstevel@tonic-gate 	case EQ_REC_MSG:
9290Sstevel@tonic-gate 		msiq_rec_p->msiq_rec_type = MSG_REC;
9300Sstevel@tonic-gate 
9310Sstevel@tonic-gate 		msiq_rec_p->msiq_rec_data.msg.msg_route =
9320Sstevel@tonic-gate 		    eq_rec_p->eq_rec_fmt_type & 7;
9330Sstevel@tonic-gate 		msiq_rec_p->msiq_rec_data.msg.msg_targ = eq_rec_p->eq_rec_rid;
9340Sstevel@tonic-gate 		msiq_rec_p->msiq_rec_data.msg.msg_code = eq_rec_p->eq_rec_data0;
9350Sstevel@tonic-gate 		break;
9360Sstevel@tonic-gate 	default:
9370Sstevel@tonic-gate 		cmn_err(CE_WARN, "%s%d: px_lib_get_msiq_rec: "
9380Sstevel@tonic-gate 		    "0x%lx is an unknown EQ record type",
9390Sstevel@tonic-gate 		    ddi_driver_name(dip), ddi_get_instance(dip),
9400Sstevel@tonic-gate 		    eq_rec_p->eq_rec_fmt_type);
9410Sstevel@tonic-gate 		break;
9420Sstevel@tonic-gate 	}
9430Sstevel@tonic-gate 
9440Sstevel@tonic-gate 	msiq_rec_p->msiq_rec_rid = eq_rec_p->eq_rec_rid;
9450Sstevel@tonic-gate 	msiq_rec_p->msiq_rec_msi_addr = ((eq_rec_p->eq_rec_addr1 << 16) |
9460Sstevel@tonic-gate 	    (eq_rec_p->eq_rec_addr0 << 2));
9470Sstevel@tonic-gate 
948287Smg140465 	/* Zero out eq_rec_fmt_type field */
949287Smg140465 	eq_rec_p->eq_rec_fmt_type = 0;
9500Sstevel@tonic-gate }
9510Sstevel@tonic-gate 
9520Sstevel@tonic-gate /*
9530Sstevel@tonic-gate  * MSI Functions:
9540Sstevel@tonic-gate  */
9550Sstevel@tonic-gate /*ARGSUSED*/
9560Sstevel@tonic-gate int
9570Sstevel@tonic-gate px_lib_msi_init(dev_info_t *dip)
9580Sstevel@tonic-gate {
9590Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
9600Sstevel@tonic-gate 	px_msi_state_t	*msi_state_p = &px_p->px_ib_p->ib_msi_state;
9610Sstevel@tonic-gate 	uint64_t	ret;
9620Sstevel@tonic-gate 
9630Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip);
9640Sstevel@tonic-gate 
9650Sstevel@tonic-gate 	if ((ret = hvio_msi_init(DIP_TO_HANDLE(dip),
9660Sstevel@tonic-gate 	    msi_state_p->msi_addr32, msi_state_p->msi_addr64)) != H_EOK) {
9670Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip, "px_lib_msi_init failed, ret 0x%lx\n",
9680Sstevel@tonic-gate 		    ret);
9690Sstevel@tonic-gate 		return (DDI_FAILURE);
9700Sstevel@tonic-gate 	}
9710Sstevel@tonic-gate 
9720Sstevel@tonic-gate 	return (DDI_SUCCESS);
9730Sstevel@tonic-gate }
9740Sstevel@tonic-gate 
9750Sstevel@tonic-gate /*ARGSUSED*/
9760Sstevel@tonic-gate int
9770Sstevel@tonic-gate px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num,
9780Sstevel@tonic-gate     msiqid_t *msiq_id)
9790Sstevel@tonic-gate {
9800Sstevel@tonic-gate 	uint64_t	ret;
9810Sstevel@tonic-gate 
9820Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n",
9830Sstevel@tonic-gate 	    dip, msi_num);
9840Sstevel@tonic-gate 
9850Sstevel@tonic-gate 	if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip),
9860Sstevel@tonic-gate 	    msi_num, msiq_id)) != H_EOK) {
9870Sstevel@tonic-gate 		DBG(DBG_LIB_MSI, dip,
9880Sstevel@tonic-gate 		    "hvio_msi_getmsiq failed, ret 0x%lx\n", ret);
9890Sstevel@tonic-gate 		return (DDI_FAILURE);
9900Sstevel@tonic-gate 	}
9910Sstevel@tonic-gate 
9920Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n",
9930Sstevel@tonic-gate 	    *msiq_id);
9940Sstevel@tonic-gate 
9950Sstevel@tonic-gate 	return (DDI_SUCCESS);
9960Sstevel@tonic-gate }
9970Sstevel@tonic-gate 
9980Sstevel@tonic-gate /*ARGSUSED*/
9990Sstevel@tonic-gate int
10000Sstevel@tonic-gate px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num,
10010Sstevel@tonic-gate     msiqid_t msiq_id, msi_type_t msitype)
10020Sstevel@tonic-gate {
10030Sstevel@tonic-gate 	uint64_t	ret;
10040Sstevel@tonic-gate 
10050Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x "
10060Sstevel@tonic-gate 	    "msq_id 0x%x\n", dip, msi_num, msiq_id);
10070Sstevel@tonic-gate 
10080Sstevel@tonic-gate 	if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip),
10090Sstevel@tonic-gate 	    msi_num, msiq_id)) != H_EOK) {
10100Sstevel@tonic-gate 		DBG(DBG_LIB_MSI, dip,
10110Sstevel@tonic-gate 		    "hvio_msi_setmsiq failed, ret 0x%lx\n", ret);
10120Sstevel@tonic-gate 		return (DDI_FAILURE);
10130Sstevel@tonic-gate 	}
10140Sstevel@tonic-gate 
10150Sstevel@tonic-gate 	return (DDI_SUCCESS);
10160Sstevel@tonic-gate }
10170Sstevel@tonic-gate 
10180Sstevel@tonic-gate /*ARGSUSED*/
10190Sstevel@tonic-gate int
10200Sstevel@tonic-gate px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num,
10210Sstevel@tonic-gate     pci_msi_valid_state_t *msi_valid_state)
10220Sstevel@tonic-gate {
10230Sstevel@tonic-gate 	uint64_t	ret;
10240Sstevel@tonic-gate 
10250Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n",
10260Sstevel@tonic-gate 	    dip, msi_num);
10270Sstevel@tonic-gate 
10280Sstevel@tonic-gate 	if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip),
10290Sstevel@tonic-gate 	    msi_num, msi_valid_state)) != H_EOK) {
10300Sstevel@tonic-gate 		DBG(DBG_LIB_MSI, dip,
10310Sstevel@tonic-gate 		    "hvio_msi_getvalid failed, ret 0x%lx\n", ret);
10320Sstevel@tonic-gate 		return (DDI_FAILURE);
10330Sstevel@tonic-gate 	}
10340Sstevel@tonic-gate 
10350Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n",
10360Sstevel@tonic-gate 	    *msi_valid_state);
10370Sstevel@tonic-gate 
10380Sstevel@tonic-gate 	return (DDI_SUCCESS);
10390Sstevel@tonic-gate }
10400Sstevel@tonic-gate 
10410Sstevel@tonic-gate /*ARGSUSED*/
10420Sstevel@tonic-gate int
10430Sstevel@tonic-gate px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num,
10440Sstevel@tonic-gate     pci_msi_valid_state_t msi_valid_state)
10450Sstevel@tonic-gate {
10460Sstevel@tonic-gate 	uint64_t	ret;
10470Sstevel@tonic-gate 
10480Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x "
10490Sstevel@tonic-gate 	    "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state);
10500Sstevel@tonic-gate 
10510Sstevel@tonic-gate 	if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip),
10520Sstevel@tonic-gate 	    msi_num, msi_valid_state)) != H_EOK) {
10530Sstevel@tonic-gate 		DBG(DBG_LIB_MSI, dip,
10540Sstevel@tonic-gate 		    "hvio_msi_setvalid failed, ret 0x%lx\n", ret);
10550Sstevel@tonic-gate 		return (DDI_FAILURE);
10560Sstevel@tonic-gate 	}
10570Sstevel@tonic-gate 
10580Sstevel@tonic-gate 	return (DDI_SUCCESS);
10590Sstevel@tonic-gate }
10600Sstevel@tonic-gate 
10610Sstevel@tonic-gate /*ARGSUSED*/
10620Sstevel@tonic-gate int
10630Sstevel@tonic-gate px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num,
10640Sstevel@tonic-gate     pci_msi_state_t *msi_state)
10650Sstevel@tonic-gate {
10660Sstevel@tonic-gate 	uint64_t	ret;
10670Sstevel@tonic-gate 
10680Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n",
10690Sstevel@tonic-gate 	    dip, msi_num);
10700Sstevel@tonic-gate 
10710Sstevel@tonic-gate 	if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip),
10720Sstevel@tonic-gate 	    msi_num, msi_state)) != H_EOK) {
10730Sstevel@tonic-gate 		DBG(DBG_LIB_MSI, dip,
10740Sstevel@tonic-gate 		    "hvio_msi_getstate failed, ret 0x%lx\n", ret);
10750Sstevel@tonic-gate 		return (DDI_FAILURE);
10760Sstevel@tonic-gate 	}
10770Sstevel@tonic-gate 
10780Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n",
10790Sstevel@tonic-gate 	    *msi_state);
10800Sstevel@tonic-gate 
10810Sstevel@tonic-gate 	return (DDI_SUCCESS);
10820Sstevel@tonic-gate }
10830Sstevel@tonic-gate 
10840Sstevel@tonic-gate /*ARGSUSED*/
10850Sstevel@tonic-gate int
10860Sstevel@tonic-gate px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num,
10870Sstevel@tonic-gate     pci_msi_state_t msi_state)
10880Sstevel@tonic-gate {
10890Sstevel@tonic-gate 	uint64_t	ret;
10900Sstevel@tonic-gate 
10910Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x "
10920Sstevel@tonic-gate 	    "msi_state 0x%x\n", dip, msi_num, msi_state);
10930Sstevel@tonic-gate 
10940Sstevel@tonic-gate 	if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip),
10950Sstevel@tonic-gate 	    msi_num, msi_state)) != H_EOK) {
10960Sstevel@tonic-gate 		DBG(DBG_LIB_MSI, dip,
10970Sstevel@tonic-gate 		    "hvio_msi_setstate failed, ret 0x%lx\n", ret);
10980Sstevel@tonic-gate 		return (DDI_FAILURE);
10990Sstevel@tonic-gate 	}
11000Sstevel@tonic-gate 
11010Sstevel@tonic-gate 	return (DDI_SUCCESS);
11020Sstevel@tonic-gate }
11030Sstevel@tonic-gate 
11040Sstevel@tonic-gate /*
11050Sstevel@tonic-gate  * MSG Functions:
11060Sstevel@tonic-gate  */
11070Sstevel@tonic-gate /*ARGSUSED*/
11080Sstevel@tonic-gate int
11090Sstevel@tonic-gate px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
11100Sstevel@tonic-gate     msiqid_t *msiq_id)
11110Sstevel@tonic-gate {
11120Sstevel@tonic-gate 	uint64_t	ret;
11130Sstevel@tonic-gate 
11140Sstevel@tonic-gate 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n",
11150Sstevel@tonic-gate 	    dip, msg_type);
11160Sstevel@tonic-gate 
11170Sstevel@tonic-gate 	if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip),
11180Sstevel@tonic-gate 	    msg_type, msiq_id)) != H_EOK) {
11190Sstevel@tonic-gate 		DBG(DBG_LIB_MSG, dip,
11200Sstevel@tonic-gate 		    "hvio_msg_getmsiq failed, ret 0x%lx\n", ret);
11210Sstevel@tonic-gate 		return (DDI_FAILURE);
11220Sstevel@tonic-gate 	}
11230Sstevel@tonic-gate 
11240Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n",
11250Sstevel@tonic-gate 	    *msiq_id);
11260Sstevel@tonic-gate 
11270Sstevel@tonic-gate 	return (DDI_SUCCESS);
11280Sstevel@tonic-gate }
11290Sstevel@tonic-gate 
11300Sstevel@tonic-gate /*ARGSUSED*/
11310Sstevel@tonic-gate int
11320Sstevel@tonic-gate px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
11330Sstevel@tonic-gate     msiqid_t msiq_id)
11340Sstevel@tonic-gate {
11350Sstevel@tonic-gate 	uint64_t	ret;
11360Sstevel@tonic-gate 
11370Sstevel@tonic-gate 	DBG(DBG_LIB_MSG, dip, "px_lib_msi_setstate: dip 0x%p msg_type 0x%x "
11380Sstevel@tonic-gate 	    "msiq_id 0x%x\n", dip, msg_type, msiq_id);
11390Sstevel@tonic-gate 
11400Sstevel@tonic-gate 	if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip),
11410Sstevel@tonic-gate 	    msg_type, msiq_id)) != H_EOK) {
11420Sstevel@tonic-gate 		DBG(DBG_LIB_MSG, dip,
11430Sstevel@tonic-gate 		    "hvio_msg_setmsiq failed, ret 0x%lx\n", ret);
11440Sstevel@tonic-gate 		return (DDI_FAILURE);
11450Sstevel@tonic-gate 	}
11460Sstevel@tonic-gate 
11470Sstevel@tonic-gate 	return (DDI_SUCCESS);
11480Sstevel@tonic-gate }
11490Sstevel@tonic-gate 
11500Sstevel@tonic-gate /*ARGSUSED*/
11510Sstevel@tonic-gate int
11520Sstevel@tonic-gate px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
11530Sstevel@tonic-gate     pcie_msg_valid_state_t *msg_valid_state)
11540Sstevel@tonic-gate {
11550Sstevel@tonic-gate 	uint64_t	ret;
11560Sstevel@tonic-gate 
11570Sstevel@tonic-gate 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n",
11580Sstevel@tonic-gate 	    dip, msg_type);
11590Sstevel@tonic-gate 
11600Sstevel@tonic-gate 	if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type,
11610Sstevel@tonic-gate 	    msg_valid_state)) != H_EOK) {
11620Sstevel@tonic-gate 		DBG(DBG_LIB_MSG, dip,
11630Sstevel@tonic-gate 		    "hvio_msg_getvalid failed, ret 0x%lx\n", ret);
11640Sstevel@tonic-gate 		return (DDI_FAILURE);
11650Sstevel@tonic-gate 	}
11660Sstevel@tonic-gate 
11670Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n",
11680Sstevel@tonic-gate 	    *msg_valid_state);
11690Sstevel@tonic-gate 
11700Sstevel@tonic-gate 	return (DDI_SUCCESS);
11710Sstevel@tonic-gate }
11720Sstevel@tonic-gate 
11730Sstevel@tonic-gate /*ARGSUSED*/
11740Sstevel@tonic-gate int
11750Sstevel@tonic-gate px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
11760Sstevel@tonic-gate     pcie_msg_valid_state_t msg_valid_state)
11770Sstevel@tonic-gate {
11780Sstevel@tonic-gate 	uint64_t	ret;
11790Sstevel@tonic-gate 
11800Sstevel@tonic-gate 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x "
11810Sstevel@tonic-gate 	    "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state);
11820Sstevel@tonic-gate 
11830Sstevel@tonic-gate 	if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type,
11840Sstevel@tonic-gate 	    msg_valid_state)) != H_EOK) {
11850Sstevel@tonic-gate 		DBG(DBG_LIB_MSG, dip,
11860Sstevel@tonic-gate 		    "hvio_msg_setvalid failed, ret 0x%lx\n", ret);
11870Sstevel@tonic-gate 		return (DDI_FAILURE);
11880Sstevel@tonic-gate 	}
11890Sstevel@tonic-gate 
11900Sstevel@tonic-gate 	return (DDI_SUCCESS);
11910Sstevel@tonic-gate }
11920Sstevel@tonic-gate 
11930Sstevel@tonic-gate /*
11940Sstevel@tonic-gate  * Suspend/Resume Functions:
11950Sstevel@tonic-gate  * Currently unsupported by hypervisor
11960Sstevel@tonic-gate  */
11970Sstevel@tonic-gate int
11980Sstevel@tonic-gate px_lib_suspend(dev_info_t *dip)
11990Sstevel@tonic-gate {
12000Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
12010Sstevel@tonic-gate 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
12020Sstevel@tonic-gate 	devhandle_t	dev_hdl, xbus_dev_hdl;
12030Sstevel@tonic-gate 	uint64_t	ret;
12040Sstevel@tonic-gate 
12050Sstevel@tonic-gate 	DBG(DBG_DETACH, dip, "px_lib_suspend: dip 0x%p\n", dip);
12060Sstevel@tonic-gate 
120727Sjchu 	dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR];
120827Sjchu 	xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC];
12090Sstevel@tonic-gate 
12100Sstevel@tonic-gate 	if ((ret = hvio_suspend(dev_hdl, pxu_p)) == H_EOK) {
12110Sstevel@tonic-gate 		px_p->px_cb_p->xbc_attachcnt--;
12120Sstevel@tonic-gate 		if (px_p->px_cb_p->xbc_attachcnt == 0)
12130Sstevel@tonic-gate 			if ((ret = hvio_cb_suspend(xbus_dev_hdl, pxu_p))
12140Sstevel@tonic-gate 			    != H_EOK)
12150Sstevel@tonic-gate 				px_p->px_cb_p->xbc_attachcnt++;
12160Sstevel@tonic-gate 	}
12170Sstevel@tonic-gate 
12180Sstevel@tonic-gate 	return ((ret != H_EOK) ? DDI_FAILURE: DDI_SUCCESS);
12190Sstevel@tonic-gate }
12200Sstevel@tonic-gate 
12210Sstevel@tonic-gate void
12220Sstevel@tonic-gate px_lib_resume(dev_info_t *dip)
12230Sstevel@tonic-gate {
12240Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
12250Sstevel@tonic-gate 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
12260Sstevel@tonic-gate 	devhandle_t	dev_hdl, xbus_dev_hdl;
12270Sstevel@tonic-gate 	devino_t	pec_ino = px_p->px_inos[PX_INTR_PEC];
12280Sstevel@tonic-gate 	devino_t	xbc_ino = px_p->px_inos[PX_INTR_XBC];
12290Sstevel@tonic-gate 
12300Sstevel@tonic-gate 	DBG(DBG_ATTACH, dip, "px_lib_resume: dip 0x%p\n", dip);
12310Sstevel@tonic-gate 
123227Sjchu 	dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR];
123327Sjchu 	xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC];
12340Sstevel@tonic-gate 
12350Sstevel@tonic-gate 	px_p->px_cb_p->xbc_attachcnt++;
12360Sstevel@tonic-gate 	if (px_p->px_cb_p->xbc_attachcnt == 1)
12370Sstevel@tonic-gate 		hvio_cb_resume(dev_hdl, xbus_dev_hdl, xbc_ino, pxu_p);
12380Sstevel@tonic-gate 	hvio_resume(dev_hdl, pec_ino, pxu_p);
12390Sstevel@tonic-gate }
12400Sstevel@tonic-gate 
12410Sstevel@tonic-gate /*
12420Sstevel@tonic-gate  * Misc Functions:
12430Sstevel@tonic-gate  * Currently unsupported by hypervisor
12440Sstevel@tonic-gate  */
12450Sstevel@tonic-gate uint64_t
124627Sjchu px_lib_get_cb(dev_info_t *dip)
12470Sstevel@tonic-gate {
124827Sjchu 	px_t	*px_p = DIP_TO_STATE(dip);
124927Sjchu 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
125027Sjchu 
125127Sjchu 	return (CSR_XR((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1));
12520Sstevel@tonic-gate }
12530Sstevel@tonic-gate 
12540Sstevel@tonic-gate void
125527Sjchu px_lib_set_cb(dev_info_t *dip, uint64_t val)
12560Sstevel@tonic-gate {
125727Sjchu 	px_t	*px_p = DIP_TO_STATE(dip);
125827Sjchu 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
125927Sjchu 
126027Sjchu 	CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1, val);
12610Sstevel@tonic-gate }
12620Sstevel@tonic-gate 
12630Sstevel@tonic-gate /*ARGSUSED*/
12640Sstevel@tonic-gate int
12650Sstevel@tonic-gate px_lib_map_vconfig(dev_info_t *dip,
12660Sstevel@tonic-gate 	ddi_map_req_t *mp, pci_config_offset_t off,
12670Sstevel@tonic-gate 		pci_regspec_t *rp, caddr_t *addrp)
12680Sstevel@tonic-gate {
12690Sstevel@tonic-gate 	/*
12700Sstevel@tonic-gate 	 * No special config space access services in this layer.
12710Sstevel@tonic-gate 	 */
12720Sstevel@tonic-gate 	return (DDI_FAILURE);
12730Sstevel@tonic-gate }
12740Sstevel@tonic-gate 
1275*624Sschwartz void
1276*624Sschwartz px_lib_clr_errs(px_t *px_p)
127727Sjchu {
1278*624Sschwartz 	px_pec_t	*pec_p = px_p->px_pec_p;
127927Sjchu 	dev_info_t	*rpdip = px_p->px_dip;
128027Sjchu 	px_cb_t		*cb_p = px_p->px_cb_p;
128127Sjchu 	int		err = PX_OK, ret;
128227Sjchu 	int		acctype = pec_p->pec_safeacc_type;
128327Sjchu 	ddi_fm_error_t	derr;
128427Sjchu 
128527Sjchu 	/* Create the derr */
128627Sjchu 	bzero(&derr, sizeof (ddi_fm_error_t));
128727Sjchu 	derr.fme_version = DDI_FME_VERSION;
128827Sjchu 	derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
128927Sjchu 	derr.fme_flag = acctype;
129027Sjchu 
129127Sjchu 	if (acctype == DDI_FM_ERR_EXPECTED) {
129227Sjchu 		derr.fme_status = DDI_FM_NONFATAL;
129327Sjchu 		ndi_fm_acc_err_set(pec_p->pec_acc_hdl, &derr);
129427Sjchu 	}
129527Sjchu 
129627Sjchu 	mutex_enter(&cb_p->xbc_fm_mutex);
129727Sjchu 
129827Sjchu 	/* send ereport/handle/clear fire registers */
129927Sjchu 	err = px_err_handle(px_p, &derr, PX_LIB_CALL, B_TRUE);
130027Sjchu 
130127Sjchu 	/* Check all child devices for errors */
130227Sjchu 	ret = ndi_fm_handler_dispatch(rpdip, NULL, &derr);
130327Sjchu 
130427Sjchu 	mutex_exit(&cb_p->xbc_fm_mutex);
130527Sjchu 
130627Sjchu 	/*
130727Sjchu 	 * PX_FATAL_HW indicates a condition recovered from Fatal-Reset,
130827Sjchu 	 * therefore it does not cause panic.
130927Sjchu 	 */
131027Sjchu 	if ((err & (PX_FATAL_GOS | PX_FATAL_SW)) || (ret == DDI_FM_FATAL))
131127Sjchu 		fm_panic("Fatal System Port Error has occurred\n");
131227Sjchu }
131327Sjchu 
13140Sstevel@tonic-gate #ifdef  DEBUG
13150Sstevel@tonic-gate int	px_peekfault_cnt = 0;
13160Sstevel@tonic-gate int	px_pokefault_cnt = 0;
13170Sstevel@tonic-gate #endif  /* DEBUG */
13180Sstevel@tonic-gate 
13190Sstevel@tonic-gate /*ARGSUSED*/
13200Sstevel@tonic-gate static int
13210Sstevel@tonic-gate px_lib_do_poke(dev_info_t *dip, dev_info_t *rdip,
13220Sstevel@tonic-gate     peekpoke_ctlops_t *in_args)
13230Sstevel@tonic-gate {
13240Sstevel@tonic-gate 	px_t *px_p = DIP_TO_STATE(dip);
13250Sstevel@tonic-gate 	px_pec_t *pec_p = px_p->px_pec_p;
13260Sstevel@tonic-gate 	int err = DDI_SUCCESS;
13270Sstevel@tonic-gate 	on_trap_data_t otd;
13280Sstevel@tonic-gate 
13290Sstevel@tonic-gate 	mutex_enter(&pec_p->pec_pokefault_mutex);
13300Sstevel@tonic-gate 	pec_p->pec_ontrap_data = &otd;
133127Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_POKE;
13320Sstevel@tonic-gate 
13330Sstevel@tonic-gate 	/* Set up protected environment. */
13340Sstevel@tonic-gate 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
13350Sstevel@tonic-gate 		uintptr_t tramp = otd.ot_trampoline;
13360Sstevel@tonic-gate 
13370Sstevel@tonic-gate 		otd.ot_trampoline = (uintptr_t)&poke_fault;
13380Sstevel@tonic-gate 		err = do_poke(in_args->size, (void *)in_args->dev_addr,
13390Sstevel@tonic-gate 		    (void *)in_args->host_addr);
13400Sstevel@tonic-gate 		otd.ot_trampoline = tramp;
13410Sstevel@tonic-gate 	} else
13420Sstevel@tonic-gate 		err = DDI_FAILURE;
13430Sstevel@tonic-gate 
1344*624Sschwartz 	px_lib_clr_errs(px_p);
134527Sjchu 
13460Sstevel@tonic-gate 	if (otd.ot_trap & OT_DATA_ACCESS)
13470Sstevel@tonic-gate 		err = DDI_FAILURE;
13480Sstevel@tonic-gate 
13490Sstevel@tonic-gate 	/* Take down protected environment. */
13500Sstevel@tonic-gate 	no_trap();
13510Sstevel@tonic-gate 
13520Sstevel@tonic-gate 	pec_p->pec_ontrap_data = NULL;
135327Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
13540Sstevel@tonic-gate 	mutex_exit(&pec_p->pec_pokefault_mutex);
13550Sstevel@tonic-gate 
13560Sstevel@tonic-gate #ifdef  DEBUG
13570Sstevel@tonic-gate 	if (err == DDI_FAILURE)
13580Sstevel@tonic-gate 		px_pokefault_cnt++;
13590Sstevel@tonic-gate #endif
13600Sstevel@tonic-gate 	return (err);
13610Sstevel@tonic-gate }
13620Sstevel@tonic-gate 
13630Sstevel@tonic-gate /*ARGSUSED*/
13640Sstevel@tonic-gate static int
13650Sstevel@tonic-gate px_lib_do_caut_put(dev_info_t *dip, dev_info_t *rdip,
13660Sstevel@tonic-gate     peekpoke_ctlops_t *cautacc_ctlops_arg)
13670Sstevel@tonic-gate {
13680Sstevel@tonic-gate 	size_t size = cautacc_ctlops_arg->size;
13690Sstevel@tonic-gate 	uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr;
13700Sstevel@tonic-gate 	uintptr_t host_addr = cautacc_ctlops_arg->host_addr;
13710Sstevel@tonic-gate 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle;
13720Sstevel@tonic-gate 	size_t repcount = cautacc_ctlops_arg->repcount;
13730Sstevel@tonic-gate 	uint_t flags = cautacc_ctlops_arg->flags;
13740Sstevel@tonic-gate 
13750Sstevel@tonic-gate 	px_t *px_p = DIP_TO_STATE(dip);
13760Sstevel@tonic-gate 	px_pec_t *pec_p = px_p->px_pec_p;
13770Sstevel@tonic-gate 	int err = DDI_SUCCESS;
13780Sstevel@tonic-gate 
137927Sjchu 	/*
138027Sjchu 	 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault
138127Sjchu 	 * mutex.
138227Sjchu 	 */
13830Sstevel@tonic-gate 	i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
13840Sstevel@tonic-gate 
138527Sjchu 	pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap;
138627Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
138727Sjchu 	hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
13880Sstevel@tonic-gate 
13890Sstevel@tonic-gate 	if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
13900Sstevel@tonic-gate 		for (; repcount; repcount--) {
13910Sstevel@tonic-gate 			switch (size) {
13920Sstevel@tonic-gate 
13930Sstevel@tonic-gate 			case sizeof (uint8_t):
13940Sstevel@tonic-gate 				i_ddi_put8(hp, (uint8_t *)dev_addr,
13950Sstevel@tonic-gate 				    *(uint8_t *)host_addr);
13960Sstevel@tonic-gate 				break;
13970Sstevel@tonic-gate 
13980Sstevel@tonic-gate 			case sizeof (uint16_t):
13990Sstevel@tonic-gate 				i_ddi_put16(hp, (uint16_t *)dev_addr,
14000Sstevel@tonic-gate 				    *(uint16_t *)host_addr);
14010Sstevel@tonic-gate 				break;
14020Sstevel@tonic-gate 
14030Sstevel@tonic-gate 			case sizeof (uint32_t):
14040Sstevel@tonic-gate 				i_ddi_put32(hp, (uint32_t *)dev_addr,
14050Sstevel@tonic-gate 				    *(uint32_t *)host_addr);
14060Sstevel@tonic-gate 				break;
14070Sstevel@tonic-gate 
14080Sstevel@tonic-gate 			case sizeof (uint64_t):
14090Sstevel@tonic-gate 				i_ddi_put64(hp, (uint64_t *)dev_addr,
14100Sstevel@tonic-gate 				    *(uint64_t *)host_addr);
14110Sstevel@tonic-gate 				break;
14120Sstevel@tonic-gate 			}
14130Sstevel@tonic-gate 
14140Sstevel@tonic-gate 			host_addr += size;
14150Sstevel@tonic-gate 
14160Sstevel@tonic-gate 			if (flags == DDI_DEV_AUTOINCR)
14170Sstevel@tonic-gate 				dev_addr += size;
14180Sstevel@tonic-gate 
1419*624Sschwartz 			px_lib_clr_errs(px_p);
142027Sjchu 
14210Sstevel@tonic-gate 			if (pec_p->pec_ontrap_data->ot_trap & OT_DATA_ACCESS) {
14220Sstevel@tonic-gate 				err = DDI_FAILURE;
14230Sstevel@tonic-gate #ifdef  DEBUG
14240Sstevel@tonic-gate 				px_pokefault_cnt++;
14250Sstevel@tonic-gate #endif
14260Sstevel@tonic-gate 				break;
14270Sstevel@tonic-gate 			}
14280Sstevel@tonic-gate 		}
14290Sstevel@tonic-gate 	}
14300Sstevel@tonic-gate 
14310Sstevel@tonic-gate 	i_ddi_notrap((ddi_acc_handle_t)hp);
14320Sstevel@tonic-gate 	pec_p->pec_ontrap_data = NULL;
143327Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
14340Sstevel@tonic-gate 	i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
14350Sstevel@tonic-gate 	hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED;
14360Sstevel@tonic-gate 
14370Sstevel@tonic-gate 	return (err);
14380Sstevel@tonic-gate }
14390Sstevel@tonic-gate 
14400Sstevel@tonic-gate 
14410Sstevel@tonic-gate int
14420Sstevel@tonic-gate px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip,
14430Sstevel@tonic-gate     peekpoke_ctlops_t *in_args)
14440Sstevel@tonic-gate {
14450Sstevel@tonic-gate 	return (in_args->handle ? px_lib_do_caut_put(dip, rdip, in_args) :
14460Sstevel@tonic-gate 	    px_lib_do_poke(dip, rdip, in_args));
14470Sstevel@tonic-gate }
14480Sstevel@tonic-gate 
14490Sstevel@tonic-gate 
14500Sstevel@tonic-gate /*ARGSUSED*/
14510Sstevel@tonic-gate static int
14520Sstevel@tonic-gate px_lib_do_peek(dev_info_t *dip, peekpoke_ctlops_t *in_args)
14530Sstevel@tonic-gate {
145427Sjchu 	px_t *px_p = DIP_TO_STATE(dip);
145527Sjchu 	px_pec_t *pec_p = px_p->px_pec_p;
14560Sstevel@tonic-gate 	int err = DDI_SUCCESS;
14570Sstevel@tonic-gate 	on_trap_data_t otd;
14580Sstevel@tonic-gate 
145927Sjchu 	mutex_enter(&pec_p->pec_pokefault_mutex);
146027Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK;
146127Sjchu 
14620Sstevel@tonic-gate 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
14630Sstevel@tonic-gate 		uintptr_t tramp = otd.ot_trampoline;
14640Sstevel@tonic-gate 
14650Sstevel@tonic-gate 		otd.ot_trampoline = (uintptr_t)&peek_fault;
14660Sstevel@tonic-gate 		err = do_peek(in_args->size, (void *)in_args->dev_addr,
14670Sstevel@tonic-gate 		    (void *)in_args->host_addr);
14680Sstevel@tonic-gate 		otd.ot_trampoline = tramp;
14690Sstevel@tonic-gate 	} else
14700Sstevel@tonic-gate 		err = DDI_FAILURE;
14710Sstevel@tonic-gate 
14720Sstevel@tonic-gate 	no_trap();
147327Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
147427Sjchu 	mutex_exit(&pec_p->pec_pokefault_mutex);
14750Sstevel@tonic-gate 
14760Sstevel@tonic-gate #ifdef  DEBUG
14770Sstevel@tonic-gate 	if (err == DDI_FAILURE)
14780Sstevel@tonic-gate 		px_peekfault_cnt++;
14790Sstevel@tonic-gate #endif
14800Sstevel@tonic-gate 	return (err);
14810Sstevel@tonic-gate }
14820Sstevel@tonic-gate 
14830Sstevel@tonic-gate 
14840Sstevel@tonic-gate static int
14850Sstevel@tonic-gate px_lib_do_caut_get(dev_info_t *dip, peekpoke_ctlops_t *cautacc_ctlops_arg)
14860Sstevel@tonic-gate {
14870Sstevel@tonic-gate 	size_t size = cautacc_ctlops_arg->size;
14880Sstevel@tonic-gate 	uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr;
14890Sstevel@tonic-gate 	uintptr_t host_addr = cautacc_ctlops_arg->host_addr;
14900Sstevel@tonic-gate 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle;
14910Sstevel@tonic-gate 	size_t repcount = cautacc_ctlops_arg->repcount;
14920Sstevel@tonic-gate 	uint_t flags = cautacc_ctlops_arg->flags;
14930Sstevel@tonic-gate 
14940Sstevel@tonic-gate 	px_t *px_p = DIP_TO_STATE(dip);
14950Sstevel@tonic-gate 	px_pec_t *pec_p = px_p->px_pec_p;
14960Sstevel@tonic-gate 	int err = DDI_SUCCESS;
14970Sstevel@tonic-gate 
149827Sjchu 	/*
149927Sjchu 	 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault
150027Sjchu 	 * mutex.
150127Sjchu 	 */
150227Sjchu 	i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
150327Sjchu 
150427Sjchu 	pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap;
150527Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
15060Sstevel@tonic-gate 	hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
15070Sstevel@tonic-gate 
15080Sstevel@tonic-gate 	if (repcount == 1) {
15090Sstevel@tonic-gate 		if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
15100Sstevel@tonic-gate 			i_ddi_caut_get(size, (void *)dev_addr,
15110Sstevel@tonic-gate 			    (void *)host_addr);
15120Sstevel@tonic-gate 		} else {
15130Sstevel@tonic-gate 			int i;
15140Sstevel@tonic-gate 			uint8_t *ff_addr = (uint8_t *)host_addr;
15150Sstevel@tonic-gate 			for (i = 0; i < size; i++)
15160Sstevel@tonic-gate 				*ff_addr++ = 0xff;
15170Sstevel@tonic-gate 
15180Sstevel@tonic-gate 			err = DDI_FAILURE;
15190Sstevel@tonic-gate #ifdef  DEBUG
15200Sstevel@tonic-gate 			px_peekfault_cnt++;
15210Sstevel@tonic-gate #endif
15220Sstevel@tonic-gate 		}
15230Sstevel@tonic-gate 	} else {
15240Sstevel@tonic-gate 		if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
15250Sstevel@tonic-gate 			for (; repcount; repcount--) {
15260Sstevel@tonic-gate 				i_ddi_caut_get(size, (void *)dev_addr,
15270Sstevel@tonic-gate 				    (void *)host_addr);
15280Sstevel@tonic-gate 
15290Sstevel@tonic-gate 				host_addr += size;
15300Sstevel@tonic-gate 
15310Sstevel@tonic-gate 				if (flags == DDI_DEV_AUTOINCR)
15320Sstevel@tonic-gate 					dev_addr += size;
15330Sstevel@tonic-gate 			}
15340Sstevel@tonic-gate 		} else {
15350Sstevel@tonic-gate 			err = DDI_FAILURE;
15360Sstevel@tonic-gate #ifdef  DEBUG
15370Sstevel@tonic-gate 			px_peekfault_cnt++;
15380Sstevel@tonic-gate #endif
15390Sstevel@tonic-gate 		}
15400Sstevel@tonic-gate 	}
15410Sstevel@tonic-gate 
15420Sstevel@tonic-gate 	i_ddi_notrap((ddi_acc_handle_t)hp);
15430Sstevel@tonic-gate 	pec_p->pec_ontrap_data = NULL;
154427Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
15450Sstevel@tonic-gate 	i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
15460Sstevel@tonic-gate 	hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED;
15470Sstevel@tonic-gate 
15480Sstevel@tonic-gate 	return (err);
15490Sstevel@tonic-gate }
15500Sstevel@tonic-gate 
15510Sstevel@tonic-gate /*ARGSUSED*/
15520Sstevel@tonic-gate int
15530Sstevel@tonic-gate px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip,
15540Sstevel@tonic-gate     peekpoke_ctlops_t *in_args, void *result)
15550Sstevel@tonic-gate {
15560Sstevel@tonic-gate 	result = (void *)in_args->host_addr;
15570Sstevel@tonic-gate 	return (in_args->handle ? px_lib_do_caut_get(dip, in_args) :
15580Sstevel@tonic-gate 	    px_lib_do_peek(dip, in_args));
15590Sstevel@tonic-gate }
1560118Sjchu 
15610Sstevel@tonic-gate /*
15620Sstevel@tonic-gate  * implements PPM interface
15630Sstevel@tonic-gate  */
15640Sstevel@tonic-gate int
15650Sstevel@tonic-gate px_lib_pmctl(int cmd, px_t *px_p)
15660Sstevel@tonic-gate {
15670Sstevel@tonic-gate 	ASSERT((cmd & ~PPMREQ_MASK) == PPMREQ);
15680Sstevel@tonic-gate 	switch (cmd) {
15690Sstevel@tonic-gate 	case PPMREQ_PRE_PWR_OFF:
15700Sstevel@tonic-gate 		/*
15710Sstevel@tonic-gate 		 * Currently there is no device power management for
15720Sstevel@tonic-gate 		 * the root complex (fire). When there is we need to make
15730Sstevel@tonic-gate 		 * sure that it is at full power before trying to send the
15740Sstevel@tonic-gate 		 * PME_Turn_Off message.
15750Sstevel@tonic-gate 		 */
15760Sstevel@tonic-gate 		DBG(DBG_PWR, px_p->px_dip,
15770Sstevel@tonic-gate 		    "ioctl: request to send PME_Turn_Off\n");
15780Sstevel@tonic-gate 		return (px_goto_l23ready(px_p));
15790Sstevel@tonic-gate 
15800Sstevel@tonic-gate 	case PPMREQ_PRE_PWR_ON:
1581118Sjchu 		DBG(DBG_PWR, px_p->px_dip, "ioctl: PRE_PWR_ON request\n");
1582118Sjchu 		return (px_pre_pwron_check(px_p));
1583118Sjchu 
15840Sstevel@tonic-gate 	case PPMREQ_POST_PWR_ON:
1585118Sjchu 		DBG(DBG_PWR, px_p->px_dip, "ioctl: POST_PWR_ON request\n");
1586118Sjchu 		return (px_goto_l0(px_p));
15870Sstevel@tonic-gate 
15880Sstevel@tonic-gate 	default:
15890Sstevel@tonic-gate 		return (DDI_FAILURE);
15900Sstevel@tonic-gate 	}
15910Sstevel@tonic-gate }
15920Sstevel@tonic-gate 
1593287Smg140465 #define	MSEC_TO_USEC	1000
1594287Smg140465 
15950Sstevel@tonic-gate /*
15960Sstevel@tonic-gate  * sends PME_Turn_Off message to put the link in L2/L3 ready state.
15970Sstevel@tonic-gate  * called by px_ioctl.
15980Sstevel@tonic-gate  * returns DDI_SUCCESS or DDI_FAILURE
15990Sstevel@tonic-gate  * 1. Wait for link to be in L1 state (link status reg)
16000Sstevel@tonic-gate  * 2. write to PME_Turn_off reg to boradcast
16010Sstevel@tonic-gate  * 3. set timeout
16020Sstevel@tonic-gate  * 4. If timeout, return failure.
16030Sstevel@tonic-gate  * 5. If PM_TO_Ack, wait till link is in L2/L3 ready
16040Sstevel@tonic-gate  */
16050Sstevel@tonic-gate static int
16060Sstevel@tonic-gate px_goto_l23ready(px_t *px_p)
16070Sstevel@tonic-gate {
16080Sstevel@tonic-gate 	pcie_pwr_t	*pwr_p;
160927Sjchu 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
161027Sjchu 	caddr_t	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
16110Sstevel@tonic-gate 	int		ret = DDI_SUCCESS;
16120Sstevel@tonic-gate 	clock_t		end, timeleft;
1613118Sjchu 	int		mutex_held = 1;
16140Sstevel@tonic-gate 
16150Sstevel@tonic-gate 	/* If no PM info, return failure */
16160Sstevel@tonic-gate 	if (!PCIE_PMINFO(px_p->px_dip) ||
16170Sstevel@tonic-gate 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
16180Sstevel@tonic-gate 		return (DDI_FAILURE);
16190Sstevel@tonic-gate 
16200Sstevel@tonic-gate 	mutex_enter(&pwr_p->pwr_lock);
1621118Sjchu 	mutex_enter(&px_p->px_l23ready_lock);
16220Sstevel@tonic-gate 	/* Clear the PME_To_ACK receieved flag */
1623118Sjchu 	px_p->px_pm_flags &= ~PX_PMETOACK_RECVD;
1624287Smg140465 	/*
1625287Smg140465 	 * When P25 is the downstream device, after receiving
1626287Smg140465 	 * PME_To_ACK, fire will go to Detect state, which causes
1627287Smg140465 	 * the link down event. Inform FMA that this is expected.
1628287Smg140465 	 * In case of all other cards complaint with the pci express
1629287Smg140465 	 * spec, this will happen when the power is re-applied. FMA
1630287Smg140465 	 * code will clear this flag after one instance of LDN. Since
1631287Smg140465 	 * there will not be a LDN event for the spec compliant cards,
1632287Smg140465 	 * we need to clear the flag after receiving PME_To_ACK.
1633287Smg140465 	 */
1634287Smg140465 	px_p->px_pm_flags |= PX_LDN_EXPECTED;
16350Sstevel@tonic-gate 	if (px_send_pme_turnoff(csr_base) != DDI_SUCCESS) {
16360Sstevel@tonic-gate 		ret = DDI_FAILURE;
16370Sstevel@tonic-gate 		goto l23ready_done;
16380Sstevel@tonic-gate 	}
1639118Sjchu 	px_p->px_pm_flags |= PX_PME_TURNOFF_PENDING;
16400Sstevel@tonic-gate 
16410Sstevel@tonic-gate 	end = ddi_get_lbolt() + drv_usectohz(px_pme_to_ack_timeout);
1642118Sjchu 	while (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1643118Sjchu 		timeleft = cv_timedwait(&px_p->px_l23ready_cv,
1644118Sjchu 		    &px_p->px_l23ready_lock, end);
16450Sstevel@tonic-gate 		/*
16460Sstevel@tonic-gate 		 * if cv_timedwait returns -1, it is either
16470Sstevel@tonic-gate 		 * 1) timed out or
16480Sstevel@tonic-gate 		 * 2) there was a pre-mature wakeup but by the time
16490Sstevel@tonic-gate 		 * cv_timedwait is called again end < lbolt i.e.
16500Sstevel@tonic-gate 		 * end is in the past.
16510Sstevel@tonic-gate 		 * 3) By the time we make first cv_timedwait call,
16520Sstevel@tonic-gate 		 * end < lbolt is true.
16530Sstevel@tonic-gate 		 */
16540Sstevel@tonic-gate 		if (timeleft == -1)
16550Sstevel@tonic-gate 			break;
16560Sstevel@tonic-gate 	}
1657118Sjchu 	if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
16580Sstevel@tonic-gate 		/*
16590Sstevel@tonic-gate 		 * Either timedout or interrupt didn't get a
16600Sstevel@tonic-gate 		 * chance to grab the mutex and set the flag.
16610Sstevel@tonic-gate 		 * release the mutex and delay for sometime.
16620Sstevel@tonic-gate 		 * This will 1) give a chance for interrupt to
16630Sstevel@tonic-gate 		 * set the flag 2) creates a delay between two
16640Sstevel@tonic-gate 		 * consequetive requests.
16650Sstevel@tonic-gate 		 */
1666118Sjchu 		mutex_exit(&px_p->px_l23ready_lock);
1667287Smg140465 		delay(drv_usectohz(50 * MSEC_TO_USEC));
1668118Sjchu 		mutex_held = 0;
1669118Sjchu 		if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
16700Sstevel@tonic-gate 			ret = DDI_FAILURE;
16710Sstevel@tonic-gate 			DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting"
16720Sstevel@tonic-gate 			    " for PME_TO_ACK\n");
16730Sstevel@tonic-gate 		}
16740Sstevel@tonic-gate 	}
1675287Smg140465 	px_p->px_pm_flags &=
1676287Smg140465 	    ~(PX_PME_TURNOFF_PENDING | PX_PMETOACK_RECVD | PX_LDN_EXPECTED);
16770Sstevel@tonic-gate 
16780Sstevel@tonic-gate l23ready_done:
1679118Sjchu 	if (mutex_held)
1680118Sjchu 		mutex_exit(&px_p->px_l23ready_lock);
1681118Sjchu 	/*
1682118Sjchu 	 * Wait till link is in L1 idle, if sending PME_Turn_Off
1683118Sjchu 	 * was succesful.
1684118Sjchu 	 */
1685118Sjchu 	if (ret == DDI_SUCCESS) {
1686118Sjchu 		if (px_link_wait4l1idle(csr_base) != DDI_SUCCESS) {
1687118Sjchu 			DBG(DBG_PWR, px_p->px_dip, " Link is not at L1"
1688287Smg140465 			    " even though we received PME_To_ACK.\n");
1689287Smg140465 			/*
1690287Smg140465 			 * Workaround for hardware bug with P25.
1691287Smg140465 			 * Due to a hardware bug with P25, link state
1692287Smg140465 			 * will be Detect state rather than L1 after
1693287Smg140465 			 * link is transitioned to L23Ready state. Since
1694287Smg140465 			 * we don't know whether link is L23ready state
1695287Smg140465 			 * without Fire's state being L1_idle, we delay
1696287Smg140465 			 * here just to make sure that we wait till link
1697287Smg140465 			 * is transitioned to L23Ready state.
1698287Smg140465 			 */
1699287Smg140465 			delay(drv_usectohz(100 * MSEC_TO_USEC));
1700287Smg140465 		}
1701287Smg140465 		pwr_p->pwr_link_lvl = PM_LEVEL_L3;
1702118Sjchu 
1703118Sjchu 	}
17040Sstevel@tonic-gate 	mutex_exit(&pwr_p->pwr_lock);
17050Sstevel@tonic-gate 	return (ret);
17060Sstevel@tonic-gate }
17070Sstevel@tonic-gate 
1708118Sjchu /*
1709118Sjchu  * Message interrupt handler intended to be shared for both
1710118Sjchu  * PME and PME_TO_ACK msg handling, currently only handles
1711118Sjchu  * PME_To_ACK message.
1712118Sjchu  */
1713118Sjchu uint_t
1714118Sjchu px_pmeq_intr(caddr_t arg)
1715118Sjchu {
1716118Sjchu 	px_t	*px_p = (px_t *)arg;
1717118Sjchu 
1718287Smg140465 	DBG(DBG_PWR, px_p->px_dip, " PME_To_ACK received \n");
1719118Sjchu 	mutex_enter(&px_p->px_l23ready_lock);
1720118Sjchu 	cv_broadcast(&px_p->px_l23ready_cv);
1721118Sjchu 	if (px_p->px_pm_flags & PX_PME_TURNOFF_PENDING) {
1722118Sjchu 		px_p->px_pm_flags |= PX_PMETOACK_RECVD;
1723118Sjchu 	} else {
1724118Sjchu 		/*
1725118Sjchu 		 * This maybe the second ack received. If so then,
1726118Sjchu 		 * we should be receiving it during wait4L1 stage.
1727118Sjchu 		 */
1728118Sjchu 		px_p->px_pmetoack_ignored++;
1729118Sjchu 	}
1730118Sjchu 	mutex_exit(&px_p->px_l23ready_lock);
1731118Sjchu 	return (DDI_INTR_CLAIMED);
1732118Sjchu }
1733118Sjchu 
1734118Sjchu static int
1735118Sjchu px_pre_pwron_check(px_t *px_p)
1736118Sjchu {
1737118Sjchu 	pcie_pwr_t	*pwr_p;
1738118Sjchu 
1739118Sjchu 	/* If no PM info, return failure */
1740118Sjchu 	if (!PCIE_PMINFO(px_p->px_dip) ||
1741118Sjchu 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1742118Sjchu 		return (DDI_FAILURE);
1743118Sjchu 
1744287Smg140465 	/*
1745287Smg140465 	 * For the spec compliant downstream cards link down
1746287Smg140465 	 * is expected when the device is powered on.
1747287Smg140465 	 */
1748287Smg140465 	px_p->px_pm_flags |= PX_LDN_EXPECTED;
1749118Sjchu 	return (pwr_p->pwr_link_lvl == PM_LEVEL_L3 ? DDI_SUCCESS : DDI_FAILURE);
1750118Sjchu }
1751118Sjchu 
1752118Sjchu static int
1753118Sjchu px_goto_l0(px_t *px_p)
1754118Sjchu {
1755118Sjchu 	pcie_pwr_t	*pwr_p;
1756118Sjchu 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1757118Sjchu 	caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
1758118Sjchu 	int		ret = DDI_SUCCESS;
1759118Sjchu 	clock_t		end, timeleft;
1760118Sjchu 	int		mutex_held = 1;
1761118Sjchu 
1762118Sjchu 	/* If no PM info, return failure */
1763118Sjchu 	if (!PCIE_PMINFO(px_p->px_dip) ||
1764118Sjchu 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1765118Sjchu 		return (DDI_FAILURE);
1766118Sjchu 
1767118Sjchu 	mutex_enter(&pwr_p->pwr_lock);
1768118Sjchu 	mutex_enter(&px_p->px_lupsoft_lock);
1769118Sjchu 	/* Clear the LINKUP_RECVD receieved flag */
1770118Sjchu 	px_p->px_pm_flags &= ~PX_LINKUP_RECVD;
1771287Smg140465 	/*
1772287Smg140465 	 * Set flags LUP_EXPECTED to inform FMA code that LUP is
1773287Smg140465 	 * expected as part of link training and no ereports should
1774287Smg140465 	 * be posted for this event. FMA code will clear this flag
1775287Smg140465 	 * after one instance of this event. In case of P25, there
1776287Smg140465 	 * will not be a LDN event. So clear the flag set at PRE_PWRON
1777287Smg140465 	 * time.
1778287Smg140465 	 */
1779287Smg140465 	px_p->px_pm_flags |=  PX_LUP_EXPECTED;
1780287Smg140465 	px_p->px_pm_flags &= ~PX_LDN_EXPECTED;
1781118Sjchu 	if (px_link_retrain(csr_base) != DDI_SUCCESS) {
1782118Sjchu 		ret = DDI_FAILURE;
1783118Sjchu 		goto l0_done;
1784118Sjchu 	}
1785118Sjchu 	px_p->px_pm_flags |= PX_LINKUP_PENDING;
1786118Sjchu 
1787118Sjchu 	end = ddi_get_lbolt() + drv_usectohz(px_linkup_timeout);
1788118Sjchu 	while (!(px_p->px_pm_flags & PX_LINKUP_RECVD)) {
1789118Sjchu 		timeleft = cv_timedwait(&px_p->px_lup_cv,
1790118Sjchu 		    &px_p->px_lupsoft_lock, end);
1791118Sjchu 		/*
1792118Sjchu 		 * if cv_timedwait returns -1, it is either
1793118Sjchu 		 * 1) timed out or
1794118Sjchu 		 * 2) there was a pre-mature wakeup but by the time
1795118Sjchu 		 * cv_timedwait is called again end < lbolt i.e.
1796118Sjchu 		 * end is in the past.
1797118Sjchu 		 * 3) By the time we make first cv_timedwait call,
1798118Sjchu 		 * end < lbolt is true.
1799118Sjchu 		 */
1800118Sjchu 		if (timeleft == -1)
1801118Sjchu 			break;
1802118Sjchu 	}
1803118Sjchu 	if (!(px_p->px_pm_flags & PX_LINKUP_RECVD)) {
1804118Sjchu 		/*
1805118Sjchu 		 * Either timedout or interrupt didn't get a
1806118Sjchu 		 * chance to grab the mutex and set the flag.
1807118Sjchu 		 * release the mutex and delay for sometime.
1808118Sjchu 		 * This will 1) give a chance for interrupt to
1809118Sjchu 		 * set the flag 2) creates a delay between two
1810118Sjchu 		 * consequetive requests.
1811118Sjchu 		 */
1812118Sjchu 		mutex_exit(&px_p->px_lupsoft_lock);
1813118Sjchu 		mutex_held = 0;
1814287Smg140465 		delay(drv_usectohz(50 * MSEC_TO_USEC));
1815118Sjchu 		if (!(px_p->px_pm_flags & PX_LINKUP_RECVD)) {
1816118Sjchu 			ret = DDI_FAILURE;
1817118Sjchu 			DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting"
1818118Sjchu 			    " for link up\n");
1819118Sjchu 		}
1820118Sjchu 	}
1821287Smg140465 	px_p->px_pm_flags &=
1822287Smg140465 	    ~(PX_LINKUP_PENDING | PX_LINKUP_RECVD | PX_LUP_EXPECTED);
1823118Sjchu 
1824118Sjchu l0_done:
1825118Sjchu 	if (mutex_held)
1826118Sjchu 		mutex_exit(&px_p->px_lupsoft_lock);
1827287Smg140465 	px_enable_detect_quiet(csr_base);
1828118Sjchu 	if (ret == DDI_SUCCESS)
1829287Smg140465 		pwr_p->pwr_link_lvl = PM_LEVEL_L0;
1830118Sjchu 	mutex_exit(&pwr_p->pwr_lock);
1831118Sjchu 	return (ret);
1832118Sjchu }
1833118Sjchu 
1834118Sjchu uint_t
1835118Sjchu px_lup_softintr(caddr_t arg)
1836118Sjchu {
1837118Sjchu 	px_t *px_p = (px_t *)arg;
1838118Sjchu 
1839287Smg140465 	DBG(DBG_PWR, px_p->px_dip, " Link up soft interrupt received \n");
1840118Sjchu 	mutex_enter(&px_p->px_lup_lock);
1841118Sjchu 	if (!(px_p->px_lupsoft_pending > 0)) {
1842118Sjchu 		/* Spurious */
1843118Sjchu 		mutex_exit(&px_p->px_lup_lock);
1844118Sjchu 		return (DDI_INTR_UNCLAIMED);
1845118Sjchu 	}
1846118Sjchu 	px_p->px_lupsoft_pending--;
1847118Sjchu 	if (px_p->px_lupsoft_pending > 0) {
1848118Sjchu 		/* More than one lup soft intr posted - unlikely */
1849118Sjchu 		mutex_exit(&px_p->px_lup_lock);
1850118Sjchu 		return (DDI_INTR_UNCLAIMED);
1851118Sjchu 	}
1852118Sjchu 	mutex_exit(&px_p->px_lup_lock);
1853118Sjchu 
1854118Sjchu 	mutex_enter(&px_p->px_lupsoft_lock);
1855118Sjchu 	cv_broadcast(&px_p->px_lup_cv);
1856118Sjchu 	if (px_p->px_pm_flags & PX_LINKUP_PENDING) {
1857118Sjchu 		px_p->px_pm_flags |= PX_LINKUP_RECVD;
1858118Sjchu 	} else {
1859118Sjchu 		/* Nobody waiting for this! */
1860118Sjchu 		px_p->px_lup_ignored++;
1861118Sjchu 	}
1862118Sjchu 	mutex_exit(&px_p->px_lupsoft_lock);
1863118Sjchu 	return (DDI_INTR_CLAIMED);
1864118Sjchu }
18650Sstevel@tonic-gate 
18660Sstevel@tonic-gate /*
18670Sstevel@tonic-gate  * Extract the drivers binding name to identify which chip we're binding to.
18680Sstevel@tonic-gate  * Whenever a new bus bridge is created, the driver alias entry should be
18690Sstevel@tonic-gate  * added here to identify the device if needed.  If a device isn't added,
18700Sstevel@tonic-gate  * the identity defaults to PX_CHIP_UNIDENTIFIED.
18710Sstevel@tonic-gate  */
18720Sstevel@tonic-gate static uint32_t
18730Sstevel@tonic-gate px_identity_chip(px_t *px_p)
18740Sstevel@tonic-gate {
18750Sstevel@tonic-gate 	dev_info_t	*dip = px_p->px_dip;
18760Sstevel@tonic-gate 	char		*name = ddi_binding_name(dip);
18770Sstevel@tonic-gate 	uint32_t	revision = 0;
18780Sstevel@tonic-gate 
18790Sstevel@tonic-gate 	revision = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
18800Sstevel@tonic-gate 	    "module-revision#", 0);
18810Sstevel@tonic-gate 
18820Sstevel@tonic-gate 	/* Check for Fire driver binding name */
1883226Set142600 	if ((strcmp(name, "pci108e,80f0") == 0) ||
1884226Set142600 	    (strcmp(name, "pciex108e,80f0") == 0)) {
18850Sstevel@tonic-gate 		DBG(DBG_ATTACH, dip, "px_identity_chip: %s%d: "
18860Sstevel@tonic-gate 		    "name %s module-revision %d\n", ddi_driver_name(dip),
18870Sstevel@tonic-gate 		    ddi_get_instance(dip), name, revision);
18880Sstevel@tonic-gate 
18890Sstevel@tonic-gate 		return (PX_CHIP_ID(PX_CHIP_FIRE, revision, 0x00));
18900Sstevel@tonic-gate 	}
18910Sstevel@tonic-gate 
18920Sstevel@tonic-gate 	DBG(DBG_ATTACH, dip, "%s%d: Unknown PCI Express Host bridge %s %x\n",
18930Sstevel@tonic-gate 	    ddi_driver_name(dip), ddi_get_instance(dip), name, revision);
18940Sstevel@tonic-gate 
18950Sstevel@tonic-gate 	return (PX_CHIP_UNIDENTIFIED);
18960Sstevel@tonic-gate }
189727Sjchu 
189827Sjchu int
189927Sjchu px_err_add_intr(px_fault_t *px_fault_p)
190027Sjchu {
190127Sjchu 	dev_info_t	*dip = px_fault_p->px_fh_dip;
190227Sjchu 	px_t		*px_p = DIP_TO_STATE(dip);
190327Sjchu 
190427Sjchu 	VERIFY(add_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL,
190527Sjchu 		px_fault_p->px_err_func, (caddr_t)px_fault_p, NULL) == 0);
190627Sjchu 
190727Sjchu 	px_ib_intr_enable(px_p, intr_dist_cpuid(), px_fault_p->px_intr_ino);
190827Sjchu 
190927Sjchu 	return (DDI_SUCCESS);
191027Sjchu }
191127Sjchu 
191227Sjchu void
191327Sjchu px_err_rem_intr(px_fault_t *px_fault_p)
191427Sjchu {
191527Sjchu 	dev_info_t	*dip = px_fault_p->px_fh_dip;
191627Sjchu 	px_t		*px_p = DIP_TO_STATE(dip);
191727Sjchu 
191827Sjchu 	rem_ivintr(px_fault_p->px_fh_sysino, NULL);
191927Sjchu 
192027Sjchu 	px_ib_intr_disable(px_p->px_ib_p, px_fault_p->px_intr_ino,
192127Sjchu 		IB_INTR_WAIT);
192227Sjchu }
192327Sjchu 
192427Sjchu #ifdef FMA
192527Sjchu void
192627Sjchu px_fill_rc_status(px_fault_t *px_fault_p, pciex_rc_error_regs_t *rc_status)
192727Sjchu {
192827Sjchu 	/* populate the rc_status by reading the registers - TBD */
192927Sjchu }
193027Sjchu #endif /* FMA */
1931383Set142600 
1932383Set142600 /*
1933383Set142600  * Unprotected raw reads/writes of fabric device's config space.
1934383Set142600  * Only used for temporary PCI-E Fabric Error Handling.
1935383Set142600  */
1936383Set142600 uint32_t
1937383Set142600 px_fab_get(px_t *px_p, pcie_req_id_t bdf, uint16_t offset) {
1938383Set142600 	px_ranges_t	*rp = px_p->px_ranges_p;
1939383Set142600 	uint64_t	range_prop, base_addr;
1940383Set142600 	int		bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG);
1941383Set142600 	uint32_t	val;
1942383Set142600 
1943383Set142600 	/* Get Fire's Physical Base Address */
1944383Set142600 	range_prop = (((uint64_t)(rp[bank].parent_high & 0x7ff)) << 32) |
1945383Set142600 	    rp[bank].parent_low;
1946383Set142600 
1947383Set142600 	/* Get config space first. */
1948383Set142600 	base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset);
1949383Set142600 
1950383Set142600 	val = ldphysio(base_addr);
1951383Set142600 
1952383Set142600 	return (LE_32(val));
1953383Set142600 }
1954383Set142600 
1955383Set142600 void
1956383Set142600 px_fab_set(px_t *px_p, pcie_req_id_t bdf, uint16_t offset,
1957383Set142600     uint32_t val) {
1958383Set142600 	px_ranges_t	*rp = px_p->px_ranges_p;
1959383Set142600 	uint64_t	range_prop, base_addr;
1960383Set142600 	int		bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG);
1961383Set142600 
1962383Set142600 	/* Get Fire's Physical Base Address */
1963383Set142600 	range_prop = (((uint64_t)(rp[bank].parent_high & 0x7ff)) << 32) |
1964383Set142600 	    rp[bank].parent_low;
1965383Set142600 
1966383Set142600 	/* Get config space first. */
1967383Set142600 	base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset);
1968383Set142600 
1969383Set142600 	stphysio(base_addr, LE_32(val));
1970383Set142600 }
1971435Sjchu 
1972435Sjchu /*
1973435Sjchu  * cpr callback
1974435Sjchu  *
1975435Sjchu  * disable fabric error msg interrupt prior to suspending
1976435Sjchu  * all device drivers; re-enable fabric error msg interrupt
1977435Sjchu  * after all devices are resumed.
1978435Sjchu  */
1979435Sjchu static boolean_t
1980435Sjchu px_cpr_callb(void *arg, int code)
1981435Sjchu {
1982435Sjchu 	px_t		*px_p = (px_t *)arg;
1983435Sjchu 	px_ib_t		*ib_p = px_p->px_ib_p;
1984435Sjchu 	px_pec_t	*pec_p = px_p->px_pec_p;
1985435Sjchu 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
1986435Sjchu 	caddr_t		csr_base;
1987435Sjchu 	devino_t	ce_ino, nf_ino, f_ino;
1988435Sjchu 	px_ib_ino_info_t	*ce_ino_p, *nf_ino_p, *f_ino_p;
1989435Sjchu 	uint64_t	imu_log_enable, imu_intr_enable;
1990435Sjchu 	uint64_t	imu_log_mask, imu_intr_mask;
1991435Sjchu 
1992435Sjchu 	ce_ino = px_msiqid_to_devino(px_p, pec_p->pec_corr_msg_msiq_id);
1993435Sjchu 	nf_ino = px_msiqid_to_devino(px_p, pec_p->pec_non_fatal_msg_msiq_id);
1994435Sjchu 	f_ino = px_msiqid_to_devino(px_p, pec_p->pec_fatal_msg_msiq_id);
1995435Sjchu 	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
1996435Sjchu 
1997435Sjchu 	imu_log_enable = CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE);
1998435Sjchu 	imu_intr_enable = CSR_XR(csr_base, IMU_INTERRUPT_ENABLE);
1999435Sjchu 
2000435Sjchu 	imu_log_mask = BITMASK(IMU_ERROR_LOG_ENABLE_FATAL_MES_NOT_EN_LOG_EN) |
2001435Sjchu 	    BITMASK(IMU_ERROR_LOG_ENABLE_NONFATAL_MES_NOT_EN_LOG_EN) |
2002435Sjchu 	    BITMASK(IMU_ERROR_LOG_ENABLE_COR_MES_NOT_EN_LOG_EN);
2003435Sjchu 
2004435Sjchu 	imu_intr_mask =
2005435Sjchu 	    BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_S_INT_EN) |
2006435Sjchu 	    BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_S_INT_EN) |
2007435Sjchu 	    BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_S_INT_EN) |
2008435Sjchu 	    BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_P_INT_EN) |
2009435Sjchu 	    BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_P_INT_EN) |
2010435Sjchu 	    BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_P_INT_EN);
2011435Sjchu 
2012435Sjchu 	switch (code) {
2013435Sjchu 	case CB_CODE_CPR_CHKPT:
2014435Sjchu 		/* disable imu rbne on corr/nonfatal/fatal errors */
2015435Sjchu 		CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE,
2016435Sjchu 		    imu_log_enable & (~imu_log_mask));
2017435Sjchu 
2018435Sjchu 		CSR_XS(csr_base, IMU_INTERRUPT_ENABLE,
2019435Sjchu 		    imu_intr_enable & (~imu_intr_mask));
2020435Sjchu 
2021435Sjchu 		/* disable CORR intr mapping */
2022435Sjchu 		px_ib_intr_disable(ib_p, ce_ino, IB_INTR_NOWAIT);
2023435Sjchu 
2024435Sjchu 		/* disable NON FATAL intr mapping */
2025435Sjchu 		px_ib_intr_disable(ib_p, nf_ino, IB_INTR_NOWAIT);
2026435Sjchu 
2027435Sjchu 		/* disable FATAL intr mapping */
2028435Sjchu 		px_ib_intr_disable(ib_p, f_ino, IB_INTR_NOWAIT);
2029435Sjchu 
2030435Sjchu 		break;
2031435Sjchu 
2032435Sjchu 	case CB_CODE_CPR_RESUME:
2033435Sjchu 		mutex_enter(&ib_p->ib_ino_lst_mutex);
2034435Sjchu 
2035435Sjchu 		ce_ino_p = px_ib_locate_ino(ib_p, ce_ino);
2036435Sjchu 		nf_ino_p = px_ib_locate_ino(ib_p, nf_ino);
2037435Sjchu 		f_ino_p = px_ib_locate_ino(ib_p, f_ino);
2038435Sjchu 
2039435Sjchu 		/* enable CORR intr mapping */
2040435Sjchu 		if (ce_ino_p)
2041435Sjchu 			px_ib_intr_enable(px_p, ce_ino_p->ino_cpuid, ce_ino);
2042435Sjchu 		else
2043435Sjchu 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2044435Sjchu 			    "reenable PCIe Correctable msg intr.\n");
2045435Sjchu 
2046435Sjchu 		/* enable NON FATAL intr mapping */
2047435Sjchu 		if (nf_ino_p)
2048435Sjchu 			px_ib_intr_enable(px_p, nf_ino_p->ino_cpuid, nf_ino);
2049435Sjchu 		else
2050435Sjchu 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2051435Sjchu 			    "reenable PCIe Non Fatal msg intr.\n");
2052435Sjchu 
2053435Sjchu 		/* enable FATAL intr mapping */
2054435Sjchu 		if (f_ino_p)
2055435Sjchu 			px_ib_intr_enable(px_p, f_ino_p->ino_cpuid, f_ino);
2056435Sjchu 		else
2057435Sjchu 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2058435Sjchu 			    "reenable PCIe Fatal msg intr.\n");
2059435Sjchu 
2060435Sjchu 		mutex_exit(&ib_p->ib_ino_lst_mutex);
2061435Sjchu 
2062435Sjchu 		/* enable corr/nonfatal/fatal not enable error */
2063435Sjchu 		CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, (imu_log_enable |
2064435Sjchu 		    (imu_log_mask & px_imu_log_mask)));
2065435Sjchu 		CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, (imu_intr_enable |
2066435Sjchu 		    (imu_intr_mask & px_imu_intr_mask)));
2067435Sjchu 
2068435Sjchu 		break;
2069435Sjchu 	}
2070435Sjchu 
2071435Sjchu 	return (B_TRUE);
2072435Sjchu }
2073435Sjchu 
2074435Sjchu /*
2075435Sjchu  * add cpr callback
2076435Sjchu  */
2077435Sjchu void
2078435Sjchu px_cpr_add_callb(px_t *px_p)
2079435Sjchu {
2080435Sjchu 	px_p->px_cprcb_id = callb_add(px_cpr_callb, (void *)px_p,
2081435Sjchu 	CB_CL_CPR_POST_USER, "px_cpr");
2082435Sjchu }
2083435Sjchu 
2084435Sjchu /*
2085435Sjchu  * remove cpr callback
2086435Sjchu  */
2087435Sjchu void
2088435Sjchu px_cpr_rem_callb(px_t *px_p)
2089435Sjchu {
2090435Sjchu 	(void) callb_delete(px_p->px_cprcb_id);
2091435Sjchu }
2092