xref: /onnv-gate/usr/src/uts/sun4u/io/px/px_lib4u.c (revision 10923:df470fd79c3c)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51540Skini  * Common Development and Distribution License (the "License").
61540Skini  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
228691SLida.Horn@Sun.COM  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #include <sys/types.h>
270Sstevel@tonic-gate #include <sys/kmem.h>
280Sstevel@tonic-gate #include <sys/conf.h>
290Sstevel@tonic-gate #include <sys/ddi.h>
300Sstevel@tonic-gate #include <sys/sunddi.h>
316313Skrishnae #include <sys/sunndi.h>
3227Sjchu #include <sys/fm/protocol.h>
3327Sjchu #include <sys/fm/util.h>
340Sstevel@tonic-gate #include <sys/modctl.h>
350Sstevel@tonic-gate #include <sys/disp.h>
360Sstevel@tonic-gate #include <sys/stat.h>
370Sstevel@tonic-gate #include <sys/ddi_impldefs.h>
380Sstevel@tonic-gate #include <sys/vmem.h>
390Sstevel@tonic-gate #include <sys/iommutsb.h>
400Sstevel@tonic-gate #include <sys/cpuvar.h>
4127Sjchu #include <sys/ivintr.h>
42383Set142600 #include <sys/byteorder.h>
433623Sjchu #include <sys/spl.h>
440Sstevel@tonic-gate #include <px_obj.h>
4510187SKrishna.Elango@Sun.COM #include <sys/pcie_pwr.h>
461772Sjl139090 #include "px_tools_var.h"
470Sstevel@tonic-gate #include <px_regs.h>
480Sstevel@tonic-gate #include <px_csr.h>
4927Sjchu #include <sys/machsystm.h>
500Sstevel@tonic-gate #include "px_lib4u.h"
5127Sjchu #include "px_err.h"
521772Sjl139090 #include "oberon_regs.h"
53*10923SEvan.Yan@Sun.COM #include <sys/hotplug/pci/pcie_hp.h>
540Sstevel@tonic-gate 
550Sstevel@tonic-gate #pragma weak jbus_stst_order
560Sstevel@tonic-gate 
570Sstevel@tonic-gate extern void jbus_stst_order();
580Sstevel@tonic-gate 
590Sstevel@tonic-gate ulong_t px_mmu_dvma_end = 0xfffffffful;
600Sstevel@tonic-gate uint_t px_ranges_phi_mask = 0xfffffffful;
611772Sjl139090 uint64_t *px_oberon_ubc_scratch_regs;
622276Sschwartz uint64_t px_paddr_mask;
630Sstevel@tonic-gate 
640Sstevel@tonic-gate static int px_goto_l23ready(px_t *px_p);
65118Sjchu static int px_goto_l0(px_t *px_p);
66118Sjchu static int px_pre_pwron_check(px_t *px_p);
672426Sschwartz static uint32_t px_identity_init(px_t *px_p);
68435Sjchu static boolean_t px_cpr_callb(void *arg, int code);
691648Sjchu static uint_t px_cb_intr(caddr_t arg);
7027Sjchu 
7127Sjchu /*
727596SAlan.Adamson@Sun.COM  * ACKNAK Latency Threshold Table.
737596SAlan.Adamson@Sun.COM  * See Fire PRM 2.0 section 1.2.12.2, table 1-17.
747596SAlan.Adamson@Sun.COM  */
757596SAlan.Adamson@Sun.COM int px_acknak_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = {
767596SAlan.Adamson@Sun.COM 	{0xED,   0x49,  0x43,  0x30},
777596SAlan.Adamson@Sun.COM 	{0x1A0,  0x76,  0x6B,  0x48},
787596SAlan.Adamson@Sun.COM 	{0x22F,  0x9A,  0x56,  0x56},
797596SAlan.Adamson@Sun.COM 	{0x42F,  0x11A, 0x96,  0x96},
807596SAlan.Adamson@Sun.COM 	{0x82F,  0x21A, 0x116, 0x116},
817596SAlan.Adamson@Sun.COM 	{0x102F, 0x41A, 0x216, 0x216}
827596SAlan.Adamson@Sun.COM };
837596SAlan.Adamson@Sun.COM 
847596SAlan.Adamson@Sun.COM /*
857596SAlan.Adamson@Sun.COM  * TxLink Replay Timer Latency Table
867596SAlan.Adamson@Sun.COM  * See Fire PRM 2.0 sections 1.2.12.3, table 1-18.
877596SAlan.Adamson@Sun.COM  */
887596SAlan.Adamson@Sun.COM int px_replay_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = {
897596SAlan.Adamson@Sun.COM 	{0x379,  0x112, 0xFC,  0xB4},
907596SAlan.Adamson@Sun.COM 	{0x618,  0x1BA, 0x192, 0x10E},
917596SAlan.Adamson@Sun.COM 	{0x831,  0x242, 0x143, 0x143},
927596SAlan.Adamson@Sun.COM 	{0xFB1,  0x422, 0x233, 0x233},
937596SAlan.Adamson@Sun.COM 	{0x1EB0, 0x7E1, 0x412, 0x412},
947596SAlan.Adamson@Sun.COM 	{0x3CB0, 0xF61, 0x7D2, 0x7D2}
957596SAlan.Adamson@Sun.COM };
967596SAlan.Adamson@Sun.COM /*
9727Sjchu  * px_lib_map_registers
9827Sjchu  *
9927Sjchu  * This function is called from the attach routine to map the registers
10027Sjchu  * accessed by this driver.
10127Sjchu  *
10227Sjchu  * used by: px_attach()
10327Sjchu  *
10427Sjchu  * return value: DDI_FAILURE on failure
10527Sjchu  */
10627Sjchu int
10727Sjchu px_lib_map_regs(pxu_t *pxu_p, dev_info_t *dip)
10827Sjchu {
10927Sjchu 	ddi_device_acc_attr_t	attr;
11027Sjchu 	px_reg_bank_t		reg_bank = PX_REG_CSR;
11127Sjchu 
11227Sjchu 	DBG(DBG_ATTACH, dip, "px_lib_map_regs: pxu_p:0x%p, dip 0x%p\n",
1136313Skrishnae 	    pxu_p, dip);
11427Sjchu 
11527Sjchu 	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
11627Sjchu 	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
11727Sjchu 	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
11827Sjchu 
11927Sjchu 	/*
12027Sjchu 	 * PCI CSR Base
12127Sjchu 	 */
12227Sjchu 	if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank],
12327Sjchu 	    0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) {
12427Sjchu 		goto fail;
12527Sjchu 	}
12627Sjchu 
12727Sjchu 	reg_bank++;
12827Sjchu 
12927Sjchu 	/*
13027Sjchu 	 * XBUS CSR Base
13127Sjchu 	 */
13227Sjchu 	if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank],
13327Sjchu 	    0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) {
13427Sjchu 		goto fail;
13527Sjchu 	}
13627Sjchu 
13727Sjchu 	pxu_p->px_address[reg_bank] -= FIRE_CONTROL_STATUS;
13827Sjchu 
13927Sjchu done:
14027Sjchu 	for (; reg_bank >= PX_REG_CSR; reg_bank--) {
14127Sjchu 		DBG(DBG_ATTACH, dip, "reg_bank 0x%x address 0x%p\n",
14227Sjchu 		    reg_bank, pxu_p->px_address[reg_bank]);
14327Sjchu 	}
14427Sjchu 
14527Sjchu 	return (DDI_SUCCESS);
14627Sjchu 
14727Sjchu fail:
14827Sjchu 	cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n",
14927Sjchu 	    ddi_driver_name(dip), ddi_get_instance(dip), reg_bank);
15027Sjchu 
15127Sjchu 	for (reg_bank--; reg_bank >= PX_REG_CSR; reg_bank--) {
15227Sjchu 		pxu_p->px_address[reg_bank] = NULL;
15327Sjchu 		ddi_regs_map_free(&pxu_p->px_ac[reg_bank]);
15427Sjchu 	}
15527Sjchu 
15627Sjchu 	return (DDI_FAILURE);
15727Sjchu }
15827Sjchu 
15927Sjchu /*
16027Sjchu  * px_lib_unmap_regs:
16127Sjchu  *
16227Sjchu  * This routine unmaps the registers mapped by map_px_registers.
16327Sjchu  *
16427Sjchu  * used by: px_detach(), and error conditions in px_attach()
16527Sjchu  *
16627Sjchu  * return value: none
16727Sjchu  */
16827Sjchu void
16927Sjchu px_lib_unmap_regs(pxu_t *pxu_p)
17027Sjchu {
17127Sjchu 	int i;
17227Sjchu 
17327Sjchu 	for (i = 0; i < PX_REG_MAX; i++) {
17427Sjchu 		if (pxu_p->px_ac[i])
17527Sjchu 			ddi_regs_map_free(&pxu_p->px_ac[i]);
17627Sjchu 	}
17727Sjchu }
1780Sstevel@tonic-gate 
1790Sstevel@tonic-gate int
1800Sstevel@tonic-gate px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl)
1810Sstevel@tonic-gate {
1822509Sschwartz 
1832509Sschwartz 	caddr_t			xbc_csr_base, csr_base;
1840Sstevel@tonic-gate 	px_dvma_range_prop_t	px_dvma_range;
1852509Sschwartz 	pxu_t			*pxu_p;
1862509Sschwartz 	uint8_t			chip_mask;
1872509Sschwartz 	px_t			*px_p = DIP_TO_STATE(dip);
1882509Sschwartz 	px_chip_type_t		chip_type = px_identity_init(px_p);
1890Sstevel@tonic-gate 
1902426Sschwartz 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p", dip);
1912426Sschwartz 
1922426Sschwartz 	if (chip_type == PX_CHIP_UNIDENTIFIED) {
1932426Sschwartz 		cmn_err(CE_WARN, "%s%d: Unrecognized Hardware Version\n",
1942426Sschwartz 		    NAMEINST(dip));
1950Sstevel@tonic-gate 		return (DDI_FAILURE);
1960Sstevel@tonic-gate 	}
1970Sstevel@tonic-gate 
1982509Sschwartz 	chip_mask = BITMASK(chip_type);
1992426Sschwartz 	px_paddr_mask = (chip_type == PX_CHIP_FIRE) ? MMU_FIRE_PADDR_MASK :
2002426Sschwartz 	    MMU_OBERON_PADDR_MASK;
2012426Sschwartz 
2020Sstevel@tonic-gate 	/*
2030Sstevel@tonic-gate 	 * Allocate platform specific structure and link it to
2040Sstevel@tonic-gate 	 * the px state structure.
2050Sstevel@tonic-gate 	 */
2060Sstevel@tonic-gate 	pxu_p = kmem_zalloc(sizeof (pxu_t), KM_SLEEP);
2072426Sschwartz 	pxu_p->chip_type = chip_type;
2080Sstevel@tonic-gate 	pxu_p->portid  = ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
2090Sstevel@tonic-gate 	    "portid", -1);
2100Sstevel@tonic-gate 
21127Sjchu 	/* Map in the registers */
21227Sjchu 	if (px_lib_map_regs(pxu_p, dip) == DDI_FAILURE) {
21327Sjchu 		kmem_free(pxu_p, sizeof (pxu_t));
21427Sjchu 
21527Sjchu 		return (DDI_FAILURE);
21627Sjchu 	}
21727Sjchu 
21827Sjchu 	xbc_csr_base = (caddr_t)pxu_p->px_address[PX_REG_XBC];
21927Sjchu 	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
22027Sjchu 
2210Sstevel@tonic-gate 	pxu_p->tsb_cookie = iommu_tsb_alloc(pxu_p->portid);
2220Sstevel@tonic-gate 	pxu_p->tsb_size = iommu_tsb_cookie_to_size(pxu_p->tsb_cookie);
2230Sstevel@tonic-gate 	pxu_p->tsb_vaddr = iommu_tsb_cookie_to_va(pxu_p->tsb_cookie);
2240Sstevel@tonic-gate 
2251772Sjl139090 	pxu_p->tsb_paddr = va_to_pa(pxu_p->tsb_vaddr);
2261772Sjl139090 
2270Sstevel@tonic-gate 	/*
2280Sstevel@tonic-gate 	 * Create "virtual-dma" property to support child devices
2290Sstevel@tonic-gate 	 * needing to know DVMA range.
2300Sstevel@tonic-gate 	 */
2310Sstevel@tonic-gate 	px_dvma_range.dvma_base = (uint32_t)px_mmu_dvma_end + 1
2320Sstevel@tonic-gate 	    - ((pxu_p->tsb_size >> 3) << MMU_PAGE_SHIFT);
2330Sstevel@tonic-gate 	px_dvma_range.dvma_len = (uint32_t)
2340Sstevel@tonic-gate 	    px_mmu_dvma_end - px_dvma_range.dvma_base + 1;
2350Sstevel@tonic-gate 
2365328Sdanice 	(void) ddi_prop_update_int_array(DDI_DEV_T_NONE, dip,
2375328Sdanice 	    "virtual-dma", (int *)&px_dvma_range,
2385328Sdanice 	    sizeof (px_dvma_range_prop_t) / sizeof (int));
2390Sstevel@tonic-gate 	/*
2400Sstevel@tonic-gate 	 * Initilize all fire hardware specific blocks.
2410Sstevel@tonic-gate 	 */
2420Sstevel@tonic-gate 	hvio_cb_init(xbc_csr_base, pxu_p);
2430Sstevel@tonic-gate 	hvio_ib_init(csr_base, pxu_p);
2440Sstevel@tonic-gate 	hvio_pec_init(csr_base, pxu_p);
2450Sstevel@tonic-gate 	hvio_mmu_init(csr_base, pxu_p);
2460Sstevel@tonic-gate 
2470Sstevel@tonic-gate 	px_p->px_plat_p = (void *)pxu_p;
2480Sstevel@tonic-gate 
24927Sjchu 	/*
25027Sjchu 	 * Initialize all the interrupt handlers
25127Sjchu 	 */
2521772Sjl139090 	switch (PX_CHIP_TYPE(pxu_p)) {
2531772Sjl139090 	case PX_CHIP_OBERON:
2542044Sjj156685 		/*
2552044Sjj156685 		 * Oberon hotplug uses SPARE3 field in ILU Error Log Enable
2562044Sjj156685 		 * register to indicate the status of leaf reset,
2572044Sjj156685 		 * we need to preserve the value of this bit, and keep it in
2582044Sjj156685 		 * px_ilu_log_mask to reflect the state of the bit
2592044Sjj156685 		 */
2602044Sjj156685 		if (CSR_BR(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3))
2612044Sjj156685 			px_ilu_log_mask |= (1ull <<
2622044Sjj156685 			    ILU_ERROR_LOG_ENABLE_SPARE3);
2632044Sjj156685 		else
2642044Sjj156685 			px_ilu_log_mask &= ~(1ull <<
2652044Sjj156685 			    ILU_ERROR_LOG_ENABLE_SPARE3);
2662509Sschwartz 
2672509Sschwartz 		px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_ENABLE);
2681772Sjl139090 		break;
2691772Sjl139090 
2701772Sjl139090 	case PX_CHIP_FIRE:
2712509Sschwartz 		px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_ENABLE);
2721772Sjl139090 		break;
2732509Sschwartz 
2741772Sjl139090 	default:
2751772Sjl139090 		cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n",
2761772Sjl139090 		    ddi_driver_name(dip), ddi_get_instance(dip));
2771772Sjl139090 		return (DDI_FAILURE);
2781772Sjl139090 	}
27927Sjchu 
2800Sstevel@tonic-gate 	/* Initilize device handle */
2810Sstevel@tonic-gate 	*dev_hdl = (devhandle_t)csr_base;
2820Sstevel@tonic-gate 
2830Sstevel@tonic-gate 	DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl);
2840Sstevel@tonic-gate 
2850Sstevel@tonic-gate 	return (DDI_SUCCESS);
2860Sstevel@tonic-gate }
2870Sstevel@tonic-gate 
2880Sstevel@tonic-gate int
2890Sstevel@tonic-gate px_lib_dev_fini(dev_info_t *dip)
2900Sstevel@tonic-gate {
2912509Sschwartz 	caddr_t			csr_base;
2922509Sschwartz 	uint8_t			chip_mask;
2932509Sschwartz 	px_t			*px_p = DIP_TO_STATE(dip);
2942509Sschwartz 	pxu_t			*pxu_p = (pxu_t *)px_p->px_plat_p;
2950Sstevel@tonic-gate 
2960Sstevel@tonic-gate 	DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip);
2970Sstevel@tonic-gate 
29827Sjchu 	/*
29927Sjchu 	 * Deinitialize all the interrupt handlers
30027Sjchu 	 */
3011772Sjl139090 	switch (PX_CHIP_TYPE(pxu_p)) {
3021772Sjl139090 	case PX_CHIP_OBERON:
3032509Sschwartz 	case PX_CHIP_FIRE:
3042509Sschwartz 		chip_mask = BITMASK(PX_CHIP_TYPE(pxu_p));
3052509Sschwartz 		csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
3062509Sschwartz 		px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_DISABLE);
3071772Sjl139090 		break;
3082509Sschwartz 
3091772Sjl139090 	default:
3101772Sjl139090 		cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n",
3111772Sjl139090 		    ddi_driver_name(dip), ddi_get_instance(dip));
3121772Sjl139090 		return (DDI_FAILURE);
3131772Sjl139090 	}
31427Sjchu 
3150Sstevel@tonic-gate 	iommu_tsb_free(pxu_p->tsb_cookie);
3160Sstevel@tonic-gate 
31727Sjchu 	px_lib_unmap_regs((pxu_t *)px_p->px_plat_p);
31827Sjchu 	kmem_free(px_p->px_plat_p, sizeof (pxu_t));
3190Sstevel@tonic-gate 	px_p->px_plat_p = NULL;
3205328Sdanice 	(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "virtual-dma");
3210Sstevel@tonic-gate 
3220Sstevel@tonic-gate 	return (DDI_SUCCESS);
3230Sstevel@tonic-gate }
3240Sstevel@tonic-gate 
3250Sstevel@tonic-gate /*ARGSUSED*/
3260Sstevel@tonic-gate int
3270Sstevel@tonic-gate px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino,
3280Sstevel@tonic-gate     sysino_t *sysino)
3290Sstevel@tonic-gate {
3300Sstevel@tonic-gate 	px_t	*px_p = DIP_TO_STATE(dip);
3310Sstevel@tonic-gate 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
3320Sstevel@tonic-gate 	uint64_t	ret;
3330Sstevel@tonic-gate 
3340Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p "
3350Sstevel@tonic-gate 	    "devino 0x%x\n", dip, devino);
3360Sstevel@tonic-gate 
3370Sstevel@tonic-gate 	if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip),
3380Sstevel@tonic-gate 	    pxu_p, devino, sysino)) != H_EOK) {
3390Sstevel@tonic-gate 		DBG(DBG_LIB_INT, dip,
3400Sstevel@tonic-gate 		    "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret);
3410Sstevel@tonic-gate 		return (DDI_FAILURE);
3420Sstevel@tonic-gate 	}
3430Sstevel@tonic-gate 
3440Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n",
3450Sstevel@tonic-gate 	    *sysino);
3460Sstevel@tonic-gate 
3470Sstevel@tonic-gate 	return (DDI_SUCCESS);
3480Sstevel@tonic-gate }
3490Sstevel@tonic-gate 
3500Sstevel@tonic-gate /*ARGSUSED*/
3510Sstevel@tonic-gate int
3520Sstevel@tonic-gate px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino,
3530Sstevel@tonic-gate     intr_valid_state_t *intr_valid_state)
3540Sstevel@tonic-gate {
3550Sstevel@tonic-gate 	uint64_t	ret;
3560Sstevel@tonic-gate 
3570Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n",
3580Sstevel@tonic-gate 	    dip, sysino);
3590Sstevel@tonic-gate 
3600Sstevel@tonic-gate 	if ((ret = hvio_intr_getvalid(DIP_TO_HANDLE(dip),
3610Sstevel@tonic-gate 	    sysino, intr_valid_state)) != H_EOK) {
3620Sstevel@tonic-gate 		DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n",
3630Sstevel@tonic-gate 		    ret);
3640Sstevel@tonic-gate 		return (DDI_FAILURE);
3650Sstevel@tonic-gate 	}
3660Sstevel@tonic-gate 
3670Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n",
3680Sstevel@tonic-gate 	    *intr_valid_state);
3690Sstevel@tonic-gate 
3700Sstevel@tonic-gate 	return (DDI_SUCCESS);
3710Sstevel@tonic-gate }
3720Sstevel@tonic-gate 
3730Sstevel@tonic-gate /*ARGSUSED*/
3740Sstevel@tonic-gate int
3750Sstevel@tonic-gate px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino,
3760Sstevel@tonic-gate     intr_valid_state_t intr_valid_state)
3770Sstevel@tonic-gate {
3780Sstevel@tonic-gate 	uint64_t	ret;
3790Sstevel@tonic-gate 
3800Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx "
3810Sstevel@tonic-gate 	    "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state);
3820Sstevel@tonic-gate 
3830Sstevel@tonic-gate 	if ((ret = hvio_intr_setvalid(DIP_TO_HANDLE(dip),
3840Sstevel@tonic-gate 	    sysino, intr_valid_state)) != H_EOK) {
3850Sstevel@tonic-gate 		DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n",
3860Sstevel@tonic-gate 		    ret);
3870Sstevel@tonic-gate 		return (DDI_FAILURE);
3880Sstevel@tonic-gate 	}
3890Sstevel@tonic-gate 
3900Sstevel@tonic-gate 	return (DDI_SUCCESS);
3910Sstevel@tonic-gate }
3920Sstevel@tonic-gate 
3930Sstevel@tonic-gate /*ARGSUSED*/
3940Sstevel@tonic-gate int
3950Sstevel@tonic-gate px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino,
3960Sstevel@tonic-gate     intr_state_t *intr_state)
3970Sstevel@tonic-gate {
3980Sstevel@tonic-gate 	uint64_t	ret;
3990Sstevel@tonic-gate 
4000Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n",
4010Sstevel@tonic-gate 	    dip, sysino);
4020Sstevel@tonic-gate 
4030Sstevel@tonic-gate 	if ((ret = hvio_intr_getstate(DIP_TO_HANDLE(dip),
4040Sstevel@tonic-gate 	    sysino, intr_state)) != H_EOK) {
4050Sstevel@tonic-gate 		DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n",
4060Sstevel@tonic-gate 		    ret);
4070Sstevel@tonic-gate 		return (DDI_FAILURE);
4080Sstevel@tonic-gate 	}
4090Sstevel@tonic-gate 
4100Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n",
4110Sstevel@tonic-gate 	    *intr_state);
4120Sstevel@tonic-gate 
4130Sstevel@tonic-gate 	return (DDI_SUCCESS);
4140Sstevel@tonic-gate }
4150Sstevel@tonic-gate 
4160Sstevel@tonic-gate /*ARGSUSED*/
4170Sstevel@tonic-gate int
4180Sstevel@tonic-gate px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino,
4190Sstevel@tonic-gate     intr_state_t intr_state)
4200Sstevel@tonic-gate {
4210Sstevel@tonic-gate 	uint64_t	ret;
4220Sstevel@tonic-gate 
4230Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx "
4240Sstevel@tonic-gate 	    "intr_state 0x%x\n", dip, sysino, intr_state);
4250Sstevel@tonic-gate 
4260Sstevel@tonic-gate 	if ((ret = hvio_intr_setstate(DIP_TO_HANDLE(dip),
4270Sstevel@tonic-gate 	    sysino, intr_state)) != H_EOK) {
4280Sstevel@tonic-gate 		DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n",
4290Sstevel@tonic-gate 		    ret);
4300Sstevel@tonic-gate 		return (DDI_FAILURE);
4310Sstevel@tonic-gate 	}
4320Sstevel@tonic-gate 
4330Sstevel@tonic-gate 	return (DDI_SUCCESS);
4340Sstevel@tonic-gate }
4350Sstevel@tonic-gate 
4360Sstevel@tonic-gate /*ARGSUSED*/
4370Sstevel@tonic-gate int
4380Sstevel@tonic-gate px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid)
4390Sstevel@tonic-gate {
4401772Sjl139090 	px_t		*px_p = DIP_TO_STATE(dip);
4411772Sjl139090 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
4420Sstevel@tonic-gate 	uint64_t	ret;
4430Sstevel@tonic-gate 
4440Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n",
4450Sstevel@tonic-gate 	    dip, sysino);
4460Sstevel@tonic-gate 
4471772Sjl139090 	if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip), pxu_p,
4480Sstevel@tonic-gate 	    sysino, cpuid)) != H_EOK) {
4490Sstevel@tonic-gate 		DBG(DBG_LIB_INT, dip, "hvio_intr_gettarget failed, ret 0x%lx\n",
4500Sstevel@tonic-gate 		    ret);
4510Sstevel@tonic-gate 		return (DDI_FAILURE);
4520Sstevel@tonic-gate 	}
4530Sstevel@tonic-gate 
4540Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", cpuid);
4550Sstevel@tonic-gate 
4560Sstevel@tonic-gate 	return (DDI_SUCCESS);
4570Sstevel@tonic-gate }
4580Sstevel@tonic-gate 
4590Sstevel@tonic-gate /*ARGSUSED*/
4600Sstevel@tonic-gate int
4610Sstevel@tonic-gate px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid)
4620Sstevel@tonic-gate {
4631772Sjl139090 	px_t		*px_p = DIP_TO_STATE(dip);
4641772Sjl139090 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
4650Sstevel@tonic-gate 	uint64_t	ret;
4660Sstevel@tonic-gate 
4670Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx "
4680Sstevel@tonic-gate 	    "cpuid 0x%x\n", dip, sysino, cpuid);
4690Sstevel@tonic-gate 
4701772Sjl139090 	if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip), pxu_p,
4710Sstevel@tonic-gate 	    sysino, cpuid)) != H_EOK) {
4720Sstevel@tonic-gate 		DBG(DBG_LIB_INT, dip, "hvio_intr_settarget failed, ret 0x%lx\n",
4730Sstevel@tonic-gate 		    ret);
4740Sstevel@tonic-gate 		return (DDI_FAILURE);
4750Sstevel@tonic-gate 	}
4760Sstevel@tonic-gate 
4770Sstevel@tonic-gate 	return (DDI_SUCCESS);
4780Sstevel@tonic-gate }
4790Sstevel@tonic-gate 
4800Sstevel@tonic-gate /*ARGSUSED*/
4810Sstevel@tonic-gate int
4820Sstevel@tonic-gate px_lib_intr_reset(dev_info_t *dip)
4830Sstevel@tonic-gate {
4840Sstevel@tonic-gate 	devino_t	ino;
4850Sstevel@tonic-gate 	sysino_t	sysino;
4860Sstevel@tonic-gate 
4870Sstevel@tonic-gate 	DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip);
4880Sstevel@tonic-gate 
4890Sstevel@tonic-gate 	/* Reset all Interrupts */
4900Sstevel@tonic-gate 	for (ino = 0; ino < INTERRUPT_MAPPING_ENTRIES; ino++) {
4910Sstevel@tonic-gate 		if (px_lib_intr_devino_to_sysino(dip, ino,
4920Sstevel@tonic-gate 		    &sysino) != DDI_SUCCESS)
4930Sstevel@tonic-gate 			return (BF_FATAL);
4940Sstevel@tonic-gate 
4950Sstevel@tonic-gate 		if (px_lib_intr_setstate(dip, sysino,
4960Sstevel@tonic-gate 		    INTR_IDLE_STATE) != DDI_SUCCESS)
4970Sstevel@tonic-gate 			return (BF_FATAL);
4980Sstevel@tonic-gate 	}
4990Sstevel@tonic-gate 
5000Sstevel@tonic-gate 	return (BF_NONE);
5010Sstevel@tonic-gate }
5020Sstevel@tonic-gate 
5030Sstevel@tonic-gate /*ARGSUSED*/
5040Sstevel@tonic-gate int
5050Sstevel@tonic-gate px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages,
5061617Sgovinda     io_attributes_t attr, void *addr, size_t pfn_index, int flags)
5070Sstevel@tonic-gate {
5080Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
5090Sstevel@tonic-gate 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
5100Sstevel@tonic-gate 	uint64_t	ret;
5110Sstevel@tonic-gate 
5120Sstevel@tonic-gate 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx "
5139707SDaniel.Ice@Sun.COM 	    "pages 0x%x attr 0x%llx addr 0x%p pfn_index 0x%llx flags 0x%x\n",
5141617Sgovinda 	    dip, tsbid, pages, attr, addr, pfn_index, flags);
5150Sstevel@tonic-gate 
5160Sstevel@tonic-gate 	if ((ret = hvio_iommu_map(px_p->px_dev_hdl, pxu_p, tsbid, pages,
5171617Sgovinda 	    attr, addr, pfn_index, flags)) != H_EOK) {
5180Sstevel@tonic-gate 		DBG(DBG_LIB_DMA, dip,
5190Sstevel@tonic-gate 		    "px_lib_iommu_map failed, ret 0x%lx\n", ret);
5200Sstevel@tonic-gate 		return (DDI_FAILURE);
5210Sstevel@tonic-gate 	}
5220Sstevel@tonic-gate 
5230Sstevel@tonic-gate 	return (DDI_SUCCESS);
5240Sstevel@tonic-gate }
5250Sstevel@tonic-gate 
5260Sstevel@tonic-gate /*ARGSUSED*/
5270Sstevel@tonic-gate int
5280Sstevel@tonic-gate px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages)
5290Sstevel@tonic-gate {
5300Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
5310Sstevel@tonic-gate 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
5320Sstevel@tonic-gate 	uint64_t	ret;
5330Sstevel@tonic-gate 
5340Sstevel@tonic-gate 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx "
5350Sstevel@tonic-gate 	    "pages 0x%x\n", dip, tsbid, pages);
5360Sstevel@tonic-gate 
5370Sstevel@tonic-gate 	if ((ret = hvio_iommu_demap(px_p->px_dev_hdl, pxu_p, tsbid, pages))
5380Sstevel@tonic-gate 	    != H_EOK) {
5390Sstevel@tonic-gate 		DBG(DBG_LIB_DMA, dip,
5400Sstevel@tonic-gate 		    "px_lib_iommu_demap failed, ret 0x%lx\n", ret);
5410Sstevel@tonic-gate 
5420Sstevel@tonic-gate 		return (DDI_FAILURE);
5430Sstevel@tonic-gate 	}
5440Sstevel@tonic-gate 
5450Sstevel@tonic-gate 	return (DDI_SUCCESS);
5460Sstevel@tonic-gate }
5470Sstevel@tonic-gate 
5480Sstevel@tonic-gate /*ARGSUSED*/
5490Sstevel@tonic-gate int
5501617Sgovinda px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, io_attributes_t *attr_p,
5511617Sgovinda     r_addr_t *r_addr_p)
5520Sstevel@tonic-gate {
5530Sstevel@tonic-gate 	px_t	*px_p = DIP_TO_STATE(dip);
5540Sstevel@tonic-gate 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
5550Sstevel@tonic-gate 	uint64_t	ret;
5560Sstevel@tonic-gate 
5570Sstevel@tonic-gate 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n",
5580Sstevel@tonic-gate 	    dip, tsbid);
5590Sstevel@tonic-gate 
5600Sstevel@tonic-gate 	if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), pxu_p, tsbid,
5611617Sgovinda 	    attr_p, r_addr_p)) != H_EOK) {
5620Sstevel@tonic-gate 		DBG(DBG_LIB_DMA, dip,
5630Sstevel@tonic-gate 		    "hvio_iommu_getmap failed, ret 0x%lx\n", ret);
5640Sstevel@tonic-gate 
5650Sstevel@tonic-gate 		return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE);
5660Sstevel@tonic-gate 	}
5670Sstevel@tonic-gate 
5689707SDaniel.Ice@Sun.COM 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%llx "
5699707SDaniel.Ice@Sun.COM 	    "r_addr 0x%llx\n", *attr_p, *r_addr_p);
5700Sstevel@tonic-gate 
5710Sstevel@tonic-gate 	return (DDI_SUCCESS);
5720Sstevel@tonic-gate }
5730Sstevel@tonic-gate 
5740Sstevel@tonic-gate 
5750Sstevel@tonic-gate /*
5760Sstevel@tonic-gate  * Checks dma attributes against system bypass ranges
5770Sstevel@tonic-gate  * The bypass range is determined by the hardware. Return them so the
5780Sstevel@tonic-gate  * common code can do generic checking against them.
5790Sstevel@tonic-gate  */
5800Sstevel@tonic-gate /*ARGSUSED*/
5810Sstevel@tonic-gate int
5821772Sjl139090 px_lib_dma_bypass_rngchk(dev_info_t *dip, ddi_dma_attr_t *attr_p,
5831772Sjl139090     uint64_t *lo_p, uint64_t *hi_p)
5840Sstevel@tonic-gate {
5851772Sjl139090 	px_t	*px_p = DIP_TO_STATE(dip);
5861772Sjl139090 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
5871772Sjl139090 
5881772Sjl139090 	*lo_p = hvio_get_bypass_base(pxu_p);
5891772Sjl139090 	*hi_p = hvio_get_bypass_end(pxu_p);
5900Sstevel@tonic-gate 
5910Sstevel@tonic-gate 	return (DDI_SUCCESS);
5920Sstevel@tonic-gate }
5930Sstevel@tonic-gate 
5940Sstevel@tonic-gate 
5950Sstevel@tonic-gate /*ARGSUSED*/
5960Sstevel@tonic-gate int
5971617Sgovinda px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra, io_attributes_t attr,
5981617Sgovinda     io_addr_t *io_addr_p)
5990Sstevel@tonic-gate {
6000Sstevel@tonic-gate 	uint64_t	ret;
6011772Sjl139090 	px_t	*px_p = DIP_TO_STATE(dip);
6021772Sjl139090 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
6030Sstevel@tonic-gate 
6040Sstevel@tonic-gate 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx "
6059707SDaniel.Ice@Sun.COM 	    "attr 0x%llx\n", dip, ra, attr);
6060Sstevel@tonic-gate 
6071772Sjl139090 	if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), pxu_p, ra,
6081772Sjl139090 	    attr, io_addr_p)) != H_EOK) {
6090Sstevel@tonic-gate 		DBG(DBG_LIB_DMA, dip,
6100Sstevel@tonic-gate 		    "hvio_iommu_getbypass failed, ret 0x%lx\n", ret);
6110Sstevel@tonic-gate 		return (DDI_FAILURE);
6120Sstevel@tonic-gate 	}
6130Sstevel@tonic-gate 
6140Sstevel@tonic-gate 	DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n",
6150Sstevel@tonic-gate 	    *io_addr_p);
6160Sstevel@tonic-gate 
6170Sstevel@tonic-gate 	return (DDI_SUCCESS);
6180Sstevel@tonic-gate }
6190Sstevel@tonic-gate 
6200Sstevel@tonic-gate /*
6218691SLida.Horn@Sun.COM  * Returns any needed IO address bit(s) for relaxed ordering in IOMMU
6228691SLida.Horn@Sun.COM  * bypass mode.
6238691SLida.Horn@Sun.COM  */
6248691SLida.Horn@Sun.COM uint64_t
6258691SLida.Horn@Sun.COM px_lib_ro_bypass(dev_info_t *dip, io_attributes_t attr, uint64_t ioaddr)
6268691SLida.Horn@Sun.COM {
6278691SLida.Horn@Sun.COM 	px_t	*px_p = DIP_TO_STATE(dip);
6288691SLida.Horn@Sun.COM 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
6298691SLida.Horn@Sun.COM 
6308691SLida.Horn@Sun.COM 	if ((PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) && (attr & PCI_MAP_ATTR_RO))
6318691SLida.Horn@Sun.COM 		return (MMU_OBERON_BYPASS_RO | ioaddr);
6328691SLida.Horn@Sun.COM 	else
6338691SLida.Horn@Sun.COM 		return (ioaddr);
6348691SLida.Horn@Sun.COM }
6358691SLida.Horn@Sun.COM 
6368691SLida.Horn@Sun.COM /*
6370Sstevel@tonic-gate  * bus dma sync entry point.
6380Sstevel@tonic-gate  */
6390Sstevel@tonic-gate /*ARGSUSED*/
6400Sstevel@tonic-gate int
6410Sstevel@tonic-gate px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
6421617Sgovinda     off_t off, size_t len, uint_t cache_flags)
6430Sstevel@tonic-gate {
6440Sstevel@tonic-gate 	ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
6451772Sjl139090 	px_t	*px_p = DIP_TO_STATE(dip);
6461772Sjl139090 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
6470Sstevel@tonic-gate 
6480Sstevel@tonic-gate 	DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p "
6490Sstevel@tonic-gate 	    "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n",
6500Sstevel@tonic-gate 	    dip, rdip, handle, off, len, cache_flags);
6510Sstevel@tonic-gate 
6520Sstevel@tonic-gate 	/*
6531772Sjl139090 	 * No flush needed for Oberon
6541772Sjl139090 	 */
6551772Sjl139090 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
6561772Sjl139090 		return (DDI_SUCCESS);
6571772Sjl139090 
6581772Sjl139090 	/*
6590Sstevel@tonic-gate 	 * jbus_stst_order is found only in certain cpu modules.
6600Sstevel@tonic-gate 	 * Just return success if not present.
6610Sstevel@tonic-gate 	 */
6620Sstevel@tonic-gate 	if (&jbus_stst_order == NULL)
6630Sstevel@tonic-gate 		return (DDI_SUCCESS);
6640Sstevel@tonic-gate 
665909Segillett 	if (!(mp->dmai_flags & PX_DMAI_FLAGS_INUSE)) {
66627Sjchu 		cmn_err(CE_WARN, "%s%d: Unbound dma handle %p.",
66727Sjchu 		    ddi_driver_name(rdip), ddi_get_instance(rdip), (void *)mp);
66827Sjchu 
6690Sstevel@tonic-gate 		return (DDI_FAILURE);
6700Sstevel@tonic-gate 	}
6710Sstevel@tonic-gate 
672909Segillett 	if (mp->dmai_flags & PX_DMAI_FLAGS_NOSYNC)
6730Sstevel@tonic-gate 		return (DDI_SUCCESS);
6740Sstevel@tonic-gate 
6750Sstevel@tonic-gate 	/*
6760Sstevel@tonic-gate 	 * No flush needed when sending data from memory to device.
6770Sstevel@tonic-gate 	 * Nothing to do to "sync" memory to what device would already see.
6780Sstevel@tonic-gate 	 */
6790Sstevel@tonic-gate 	if (!(mp->dmai_rflags & DDI_DMA_READ) ||
6800Sstevel@tonic-gate 	    ((cache_flags & PX_DMA_SYNC_DDI_FLAGS) == DDI_DMA_SYNC_FORDEV))
6810Sstevel@tonic-gate 		return (DDI_SUCCESS);
6820Sstevel@tonic-gate 
6830Sstevel@tonic-gate 	/*
6840Sstevel@tonic-gate 	 * Perform necessary cpu workaround to ensure jbus ordering.
6850Sstevel@tonic-gate 	 * CPU's internal "invalidate FIFOs" are flushed.
6860Sstevel@tonic-gate 	 */
6870Sstevel@tonic-gate 
6880Sstevel@tonic-gate #if !defined(lint)
6890Sstevel@tonic-gate 	kpreempt_disable();
6900Sstevel@tonic-gate #endif
6910Sstevel@tonic-gate 	jbus_stst_order();
6920Sstevel@tonic-gate #if !defined(lint)
6930Sstevel@tonic-gate 	kpreempt_enable();
6940Sstevel@tonic-gate #endif
6950Sstevel@tonic-gate 	return (DDI_SUCCESS);
6960Sstevel@tonic-gate }
6970Sstevel@tonic-gate 
6980Sstevel@tonic-gate /*
6990Sstevel@tonic-gate  * MSIQ Functions:
7000Sstevel@tonic-gate  */
7010Sstevel@tonic-gate /*ARGSUSED*/
7020Sstevel@tonic-gate int
7030Sstevel@tonic-gate px_lib_msiq_init(dev_info_t *dip)
7040Sstevel@tonic-gate {
7050Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
7060Sstevel@tonic-gate 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
7070Sstevel@tonic-gate 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
7080Sstevel@tonic-gate 	px_dvma_addr_t	pg_index;
7097403SAlan.Adamson@Sun.COM 	size_t		q_sz = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
7100Sstevel@tonic-gate 	size_t		size;
7117403SAlan.Adamson@Sun.COM 	int		i, ret;
7120Sstevel@tonic-gate 
7130Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip);
7140Sstevel@tonic-gate 
7157403SAlan.Adamson@Sun.COM 	/* must aligned on q_sz (happens to be !!! page) boundary */
7167403SAlan.Adamson@Sun.COM 	ASSERT(q_sz == 8 * 1024);
7177403SAlan.Adamson@Sun.COM 
7180Sstevel@tonic-gate 	/*
7190Sstevel@tonic-gate 	 * Map the EQ memory into the Fire MMU (has to be 512KB aligned)
7200Sstevel@tonic-gate 	 * and then initialize the base address register.
7210Sstevel@tonic-gate 	 *
7220Sstevel@tonic-gate 	 * Allocate entries from Fire IOMMU so that the resulting address
7230Sstevel@tonic-gate 	 * is properly aligned.  Calculate the index of the first allocated
7240Sstevel@tonic-gate 	 * entry.  Note: The size of the mapping is assumed to be a multiple
7250Sstevel@tonic-gate 	 * of the page size.
7260Sstevel@tonic-gate 	 */
7277403SAlan.Adamson@Sun.COM 	size = msiq_state_p->msiq_cnt * q_sz;
7287403SAlan.Adamson@Sun.COM 
7297403SAlan.Adamson@Sun.COM 	msiq_state_p->msiq_buf_p = kmem_zalloc(size, KM_SLEEP);
7307403SAlan.Adamson@Sun.COM 
7317403SAlan.Adamson@Sun.COM 	for (i = 0; i < msiq_state_p->msiq_cnt; i++)
7327403SAlan.Adamson@Sun.COM 		msiq_state_p->msiq_p[i].msiq_base_p = (msiqhead_t *)
7337403SAlan.Adamson@Sun.COM 		    ((caddr_t)msiq_state_p->msiq_buf_p + (i * q_sz));
7340Sstevel@tonic-gate 
7350Sstevel@tonic-gate 	pxu_p->msiq_mapped_p = vmem_xalloc(px_p->px_mmu_p->mmu_dvma_map,
7360Sstevel@tonic-gate 	    size, (512 * 1024), 0, 0, NULL, NULL, VM_NOSLEEP | VM_BESTFIT);
7370Sstevel@tonic-gate 
7380Sstevel@tonic-gate 	if (pxu_p->msiq_mapped_p == NULL)
7390Sstevel@tonic-gate 		return (DDI_FAILURE);
7400Sstevel@tonic-gate 
7410Sstevel@tonic-gate 	pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p,
7420Sstevel@tonic-gate 	    MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p));
7430Sstevel@tonic-gate 
7440Sstevel@tonic-gate 	if ((ret = px_lib_iommu_map(px_p->px_dip, PCI_TSBID(0, pg_index),
7452755Segillett 	    MMU_BTOP(size), PCI_MAP_ATTR_WRITE, msiq_state_p->msiq_buf_p,
7462755Segillett 	    0, MMU_MAP_BUF)) != DDI_SUCCESS) {
7470Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
7486983Sanbui 		    "px_lib_msiq_init: px_lib_iommu_map failed, "
7496983Sanbui 		    "ret 0x%lx\n", ret);
7500Sstevel@tonic-gate 
7510Sstevel@tonic-gate 		(void) px_lib_msiq_fini(dip);
7520Sstevel@tonic-gate 		return (DDI_FAILURE);
7530Sstevel@tonic-gate 	}
7540Sstevel@tonic-gate 
7557124Sanbui 	if ((ret = hvio_msiq_init(DIP_TO_HANDLE(dip),
7567124Sanbui 	    pxu_p)) != H_EOK) {
7577124Sanbui 		DBG(DBG_LIB_MSIQ, dip,
7587124Sanbui 		    "hvio_msiq_init failed, ret 0x%lx\n", ret);
7597124Sanbui 
7607124Sanbui 		(void) px_lib_msiq_fini(dip);
7617124Sanbui 		return (DDI_FAILURE);
7627124Sanbui 	}
7630Sstevel@tonic-gate 
7640Sstevel@tonic-gate 	return (DDI_SUCCESS);
7650Sstevel@tonic-gate }
7660Sstevel@tonic-gate 
7670Sstevel@tonic-gate /*ARGSUSED*/
7680Sstevel@tonic-gate int
7690Sstevel@tonic-gate px_lib_msiq_fini(dev_info_t *dip)
7700Sstevel@tonic-gate {
7710Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
7720Sstevel@tonic-gate 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
7730Sstevel@tonic-gate 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
7740Sstevel@tonic-gate 	px_dvma_addr_t	pg_index;
7750Sstevel@tonic-gate 	size_t		size;
7760Sstevel@tonic-gate 
7770Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip);
7780Sstevel@tonic-gate 
7790Sstevel@tonic-gate 	/*
7800Sstevel@tonic-gate 	 * Unmap and free the EQ memory that had been mapped
7810Sstevel@tonic-gate 	 * into the Fire IOMMU.
7820Sstevel@tonic-gate 	 */
7830Sstevel@tonic-gate 	size = msiq_state_p->msiq_cnt *
7840Sstevel@tonic-gate 	    msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
7850Sstevel@tonic-gate 
7860Sstevel@tonic-gate 	pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p,
7870Sstevel@tonic-gate 	    MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p));
7880Sstevel@tonic-gate 
7890Sstevel@tonic-gate 	(void) px_lib_iommu_demap(px_p->px_dip,
7900Sstevel@tonic-gate 	    PCI_TSBID(0, pg_index), MMU_BTOP(size));
7910Sstevel@tonic-gate 
7920Sstevel@tonic-gate 	/* Free the entries from the Fire MMU */
7930Sstevel@tonic-gate 	vmem_xfree(px_p->px_mmu_p->mmu_dvma_map,
7940Sstevel@tonic-gate 	    (void *)pxu_p->msiq_mapped_p, size);
7950Sstevel@tonic-gate 
7967403SAlan.Adamson@Sun.COM 	kmem_free(msiq_state_p->msiq_buf_p, msiq_state_p->msiq_cnt *
7977403SAlan.Adamson@Sun.COM 	    msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t));
7987403SAlan.Adamson@Sun.COM 
7990Sstevel@tonic-gate 	return (DDI_SUCCESS);
8000Sstevel@tonic-gate }
8010Sstevel@tonic-gate 
8020Sstevel@tonic-gate /*ARGSUSED*/
8030Sstevel@tonic-gate int
8040Sstevel@tonic-gate px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p,
8050Sstevel@tonic-gate     uint_t *msiq_rec_cnt_p)
8060Sstevel@tonic-gate {
8070Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
8080Sstevel@tonic-gate 	px_msiq_state_t	*msiq_state_p = &px_p->px_ib_p->ib_msiq_state;
8090Sstevel@tonic-gate 	size_t		msiq_size;
8100Sstevel@tonic-gate 
8110Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n",
8120Sstevel@tonic-gate 	    dip, msiq_id);
8130Sstevel@tonic-gate 
8140Sstevel@tonic-gate 	msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t);
8152755Segillett 	ra_p = (r_addr_t *)((caddr_t)msiq_state_p->msiq_buf_p +
8162755Segillett 	    (msiq_id * msiq_size));
8170Sstevel@tonic-gate 
8180Sstevel@tonic-gate 	*msiq_rec_cnt_p = msiq_state_p->msiq_rec_cnt;
8190Sstevel@tonic-gate 
8200Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n",
8210Sstevel@tonic-gate 	    ra_p, *msiq_rec_cnt_p);
8220Sstevel@tonic-gate 
8230Sstevel@tonic-gate 	return (DDI_SUCCESS);
8240Sstevel@tonic-gate }
8250Sstevel@tonic-gate 
8260Sstevel@tonic-gate /*ARGSUSED*/
8270Sstevel@tonic-gate int
8280Sstevel@tonic-gate px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id,
8290Sstevel@tonic-gate     pci_msiq_valid_state_t *msiq_valid_state)
8300Sstevel@tonic-gate {
8310Sstevel@tonic-gate 	uint64_t	ret;
8320Sstevel@tonic-gate 
8330Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n",
8340Sstevel@tonic-gate 	    dip, msiq_id);
8350Sstevel@tonic-gate 
8360Sstevel@tonic-gate 	if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip),
8370Sstevel@tonic-gate 	    msiq_id, msiq_valid_state)) != H_EOK) {
8380Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
8390Sstevel@tonic-gate 		    "hvio_msiq_getvalid failed, ret 0x%lx\n", ret);
8400Sstevel@tonic-gate 		return (DDI_FAILURE);
8410Sstevel@tonic-gate 	}
8420Sstevel@tonic-gate 
8430Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n",
8440Sstevel@tonic-gate 	    *msiq_valid_state);
8450Sstevel@tonic-gate 
8460Sstevel@tonic-gate 	return (DDI_SUCCESS);
8470Sstevel@tonic-gate }
8480Sstevel@tonic-gate 
8490Sstevel@tonic-gate /*ARGSUSED*/
8500Sstevel@tonic-gate int
8510Sstevel@tonic-gate px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id,
8520Sstevel@tonic-gate     pci_msiq_valid_state_t msiq_valid_state)
8530Sstevel@tonic-gate {
8540Sstevel@tonic-gate 	uint64_t	ret;
8550Sstevel@tonic-gate 
8560Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x "
8570Sstevel@tonic-gate 	    "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state);
8580Sstevel@tonic-gate 
8590Sstevel@tonic-gate 	if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip),
8600Sstevel@tonic-gate 	    msiq_id, msiq_valid_state)) != H_EOK) {
8610Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
8620Sstevel@tonic-gate 		    "hvio_msiq_setvalid failed, ret 0x%lx\n", ret);
8630Sstevel@tonic-gate 		return (DDI_FAILURE);
8640Sstevel@tonic-gate 	}
8650Sstevel@tonic-gate 
8660Sstevel@tonic-gate 	return (DDI_SUCCESS);
8670Sstevel@tonic-gate }
8680Sstevel@tonic-gate 
8690Sstevel@tonic-gate /*ARGSUSED*/
8700Sstevel@tonic-gate int
8710Sstevel@tonic-gate px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id,
8720Sstevel@tonic-gate     pci_msiq_state_t *msiq_state)
8730Sstevel@tonic-gate {
8740Sstevel@tonic-gate 	uint64_t	ret;
8750Sstevel@tonic-gate 
8760Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n",
8770Sstevel@tonic-gate 	    dip, msiq_id);
8780Sstevel@tonic-gate 
8790Sstevel@tonic-gate 	if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip),
8800Sstevel@tonic-gate 	    msiq_id, msiq_state)) != H_EOK) {
8810Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
8820Sstevel@tonic-gate 		    "hvio_msiq_getstate failed, ret 0x%lx\n", ret);
8830Sstevel@tonic-gate 		return (DDI_FAILURE);
8840Sstevel@tonic-gate 	}
8850Sstevel@tonic-gate 
8860Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n",
8870Sstevel@tonic-gate 	    *msiq_state);
8880Sstevel@tonic-gate 
8890Sstevel@tonic-gate 	return (DDI_SUCCESS);
8900Sstevel@tonic-gate }
8910Sstevel@tonic-gate 
8920Sstevel@tonic-gate /*ARGSUSED*/
8930Sstevel@tonic-gate int
8940Sstevel@tonic-gate px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id,
8950Sstevel@tonic-gate     pci_msiq_state_t msiq_state)
8960Sstevel@tonic-gate {
8970Sstevel@tonic-gate 	uint64_t	ret;
8980Sstevel@tonic-gate 
8990Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x "
9000Sstevel@tonic-gate 	    "msiq_state 0x%x\n", dip, msiq_id, msiq_state);
9010Sstevel@tonic-gate 
9020Sstevel@tonic-gate 	if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip),
9030Sstevel@tonic-gate 	    msiq_id, msiq_state)) != H_EOK) {
9040Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
9050Sstevel@tonic-gate 		    "hvio_msiq_setstate failed, ret 0x%lx\n", ret);
9060Sstevel@tonic-gate 		return (DDI_FAILURE);
9070Sstevel@tonic-gate 	}
9080Sstevel@tonic-gate 
9090Sstevel@tonic-gate 	return (DDI_SUCCESS);
9100Sstevel@tonic-gate }
9110Sstevel@tonic-gate 
9120Sstevel@tonic-gate /*ARGSUSED*/
9130Sstevel@tonic-gate int
9140Sstevel@tonic-gate px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id,
9150Sstevel@tonic-gate     msiqhead_t *msiq_head)
9160Sstevel@tonic-gate {
9170Sstevel@tonic-gate 	uint64_t	ret;
9180Sstevel@tonic-gate 
9190Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n",
9200Sstevel@tonic-gate 	    dip, msiq_id);
9210Sstevel@tonic-gate 
9220Sstevel@tonic-gate 	if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip),
9230Sstevel@tonic-gate 	    msiq_id, msiq_head)) != H_EOK) {
9240Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
9250Sstevel@tonic-gate 		    "hvio_msiq_gethead failed, ret 0x%lx\n", ret);
9260Sstevel@tonic-gate 		return (DDI_FAILURE);
9270Sstevel@tonic-gate 	}
9280Sstevel@tonic-gate 
9290Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: msiq_head 0x%x\n",
9300Sstevel@tonic-gate 	    *msiq_head);
9310Sstevel@tonic-gate 
9320Sstevel@tonic-gate 	return (DDI_SUCCESS);
9330Sstevel@tonic-gate }
9340Sstevel@tonic-gate 
9350Sstevel@tonic-gate /*ARGSUSED*/
9360Sstevel@tonic-gate int
9370Sstevel@tonic-gate px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id,
9380Sstevel@tonic-gate     msiqhead_t msiq_head)
9390Sstevel@tonic-gate {
9400Sstevel@tonic-gate 	uint64_t	ret;
9410Sstevel@tonic-gate 
9420Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x "
9430Sstevel@tonic-gate 	    "msiq_head 0x%x\n", dip, msiq_id, msiq_head);
9440Sstevel@tonic-gate 
9450Sstevel@tonic-gate 	if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip),
9460Sstevel@tonic-gate 	    msiq_id, msiq_head)) != H_EOK) {
9470Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
9480Sstevel@tonic-gate 		    "hvio_msiq_sethead failed, ret 0x%lx\n", ret);
9490Sstevel@tonic-gate 		return (DDI_FAILURE);
9500Sstevel@tonic-gate 	}
9510Sstevel@tonic-gate 
9520Sstevel@tonic-gate 	return (DDI_SUCCESS);
9530Sstevel@tonic-gate }
9540Sstevel@tonic-gate 
9550Sstevel@tonic-gate /*ARGSUSED*/
9560Sstevel@tonic-gate int
9570Sstevel@tonic-gate px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id,
9580Sstevel@tonic-gate     msiqtail_t *msiq_tail)
9590Sstevel@tonic-gate {
9600Sstevel@tonic-gate 	uint64_t	ret;
9610Sstevel@tonic-gate 
9620Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n",
9630Sstevel@tonic-gate 	    dip, msiq_id);
9640Sstevel@tonic-gate 
9650Sstevel@tonic-gate 	if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip),
9660Sstevel@tonic-gate 	    msiq_id, msiq_tail)) != H_EOK) {
9670Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip,
9680Sstevel@tonic-gate 		    "hvio_msiq_gettail failed, ret 0x%lx\n", ret);
9690Sstevel@tonic-gate 		return (DDI_FAILURE);
9700Sstevel@tonic-gate 	}
9710Sstevel@tonic-gate 
9720Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n",
9730Sstevel@tonic-gate 	    *msiq_tail);
9740Sstevel@tonic-gate 
9750Sstevel@tonic-gate 	return (DDI_SUCCESS);
9760Sstevel@tonic-gate }
9770Sstevel@tonic-gate 
9780Sstevel@tonic-gate /*ARGSUSED*/
9790Sstevel@tonic-gate void
9802588Segillett px_lib_get_msiq_rec(dev_info_t *dip, msiqhead_t *msiq_head_p,
9812588Segillett     msiq_rec_t *msiq_rec_p)
9820Sstevel@tonic-gate {
9832588Segillett 	eq_rec_t	*eq_rec_p = (eq_rec_t *)msiq_head_p;
9840Sstevel@tonic-gate 
9850Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p eq_rec_p 0x%p\n",
9860Sstevel@tonic-gate 	    dip, eq_rec_p);
9870Sstevel@tonic-gate 
988287Smg140465 	if (!eq_rec_p->eq_rec_fmt_type) {
989287Smg140465 		/* Set msiq_rec_type to zero */
990287Smg140465 		msiq_rec_p->msiq_rec_type = 0;
9910Sstevel@tonic-gate 
9920Sstevel@tonic-gate 		return;
9930Sstevel@tonic-gate 	}
9940Sstevel@tonic-gate 
9950Sstevel@tonic-gate 	DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: EQ RECORD, "
9960Sstevel@tonic-gate 	    "eq_rec_rid 0x%llx eq_rec_fmt_type 0x%llx "
9970Sstevel@tonic-gate 	    "eq_rec_len 0x%llx eq_rec_addr0 0x%llx "
9980Sstevel@tonic-gate 	    "eq_rec_addr1 0x%llx eq_rec_data0 0x%llx "
9990Sstevel@tonic-gate 	    "eq_rec_data1 0x%llx\n", eq_rec_p->eq_rec_rid,
10000Sstevel@tonic-gate 	    eq_rec_p->eq_rec_fmt_type, eq_rec_p->eq_rec_len,
10010Sstevel@tonic-gate 	    eq_rec_p->eq_rec_addr0, eq_rec_p->eq_rec_addr1,
10020Sstevel@tonic-gate 	    eq_rec_p->eq_rec_data0, eq_rec_p->eq_rec_data1);
10030Sstevel@tonic-gate 
10040Sstevel@tonic-gate 	/*
10050Sstevel@tonic-gate 	 * Only upper 4 bits of eq_rec_fmt_type is used
10060Sstevel@tonic-gate 	 * to identify the EQ record type.
10070Sstevel@tonic-gate 	 */
10080Sstevel@tonic-gate 	switch (eq_rec_p->eq_rec_fmt_type >> 3) {
10090Sstevel@tonic-gate 	case EQ_REC_MSI32:
10100Sstevel@tonic-gate 		msiq_rec_p->msiq_rec_type = MSI32_REC;
10110Sstevel@tonic-gate 
1012225Sess 		msiq_rec_p->msiq_rec_data.msi.msi_data =
1013225Sess 		    eq_rec_p->eq_rec_data0;
10140Sstevel@tonic-gate 		break;
10150Sstevel@tonic-gate 	case EQ_REC_MSI64:
10160Sstevel@tonic-gate 		msiq_rec_p->msiq_rec_type = MSI64_REC;
10170Sstevel@tonic-gate 
1018225Sess 		msiq_rec_p->msiq_rec_data.msi.msi_data =
1019225Sess 		    eq_rec_p->eq_rec_data0;
10200Sstevel@tonic-gate 		break;
10210Sstevel@tonic-gate 	case EQ_REC_MSG:
10220Sstevel@tonic-gate 		msiq_rec_p->msiq_rec_type = MSG_REC;
10230Sstevel@tonic-gate 
10240Sstevel@tonic-gate 		msiq_rec_p->msiq_rec_data.msg.msg_route =
10250Sstevel@tonic-gate 		    eq_rec_p->eq_rec_fmt_type & 7;
10260Sstevel@tonic-gate 		msiq_rec_p->msiq_rec_data.msg.msg_targ = eq_rec_p->eq_rec_rid;
10270Sstevel@tonic-gate 		msiq_rec_p->msiq_rec_data.msg.msg_code = eq_rec_p->eq_rec_data0;
10280Sstevel@tonic-gate 		break;
10290Sstevel@tonic-gate 	default:
10300Sstevel@tonic-gate 		cmn_err(CE_WARN, "%s%d: px_lib_get_msiq_rec: "
1031671Skrishnae 		    "0x%x is an unknown EQ record type",
10320Sstevel@tonic-gate 		    ddi_driver_name(dip), ddi_get_instance(dip),
1033671Skrishnae 		    (int)eq_rec_p->eq_rec_fmt_type);
10340Sstevel@tonic-gate 		break;
10350Sstevel@tonic-gate 	}
10360Sstevel@tonic-gate 
10370Sstevel@tonic-gate 	msiq_rec_p->msiq_rec_rid = eq_rec_p->eq_rec_rid;
10380Sstevel@tonic-gate 	msiq_rec_p->msiq_rec_msi_addr = ((eq_rec_p->eq_rec_addr1 << 16) |
10390Sstevel@tonic-gate 	    (eq_rec_p->eq_rec_addr0 << 2));
10402973Sgovinda }
10412973Sgovinda 
10422973Sgovinda /*ARGSUSED*/
10432973Sgovinda void
10442973Sgovinda px_lib_clr_msiq_rec(dev_info_t *dip, msiqhead_t *msiq_head_p)
10452973Sgovinda {
10462973Sgovinda 	eq_rec_t	*eq_rec_p = (eq_rec_t *)msiq_head_p;
10472973Sgovinda 
10482973Sgovinda 	DBG(DBG_LIB_MSIQ, dip, "px_lib_clr_msiq_rec: dip 0x%p eq_rec_p 0x%p\n",
10492973Sgovinda 	    dip, eq_rec_p);
10502973Sgovinda 
10512973Sgovinda 	if (eq_rec_p->eq_rec_fmt_type) {
10522973Sgovinda 		/* Zero out eq_rec_fmt_type field */
10532973Sgovinda 		eq_rec_p->eq_rec_fmt_type = 0;
10542973Sgovinda 	}
10550Sstevel@tonic-gate }
10560Sstevel@tonic-gate 
10570Sstevel@tonic-gate /*
10580Sstevel@tonic-gate  * MSI Functions:
10590Sstevel@tonic-gate  */
10600Sstevel@tonic-gate /*ARGSUSED*/
10610Sstevel@tonic-gate int
10620Sstevel@tonic-gate px_lib_msi_init(dev_info_t *dip)
10630Sstevel@tonic-gate {
10640Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
10650Sstevel@tonic-gate 	px_msi_state_t	*msi_state_p = &px_p->px_ib_p->ib_msi_state;
10660Sstevel@tonic-gate 	uint64_t	ret;
10670Sstevel@tonic-gate 
10680Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip);
10690Sstevel@tonic-gate 
10700Sstevel@tonic-gate 	if ((ret = hvio_msi_init(DIP_TO_HANDLE(dip),
10710Sstevel@tonic-gate 	    msi_state_p->msi_addr32, msi_state_p->msi_addr64)) != H_EOK) {
10720Sstevel@tonic-gate 		DBG(DBG_LIB_MSIQ, dip, "px_lib_msi_init failed, ret 0x%lx\n",
10730Sstevel@tonic-gate 		    ret);
10740Sstevel@tonic-gate 		return (DDI_FAILURE);
10750Sstevel@tonic-gate 	}
10760Sstevel@tonic-gate 
10770Sstevel@tonic-gate 	return (DDI_SUCCESS);
10780Sstevel@tonic-gate }
10790Sstevel@tonic-gate 
10800Sstevel@tonic-gate /*ARGSUSED*/
10810Sstevel@tonic-gate int
10820Sstevel@tonic-gate px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num,
10830Sstevel@tonic-gate     msiqid_t *msiq_id)
10840Sstevel@tonic-gate {
10850Sstevel@tonic-gate 	uint64_t	ret;
10860Sstevel@tonic-gate 
10870Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n",
10880Sstevel@tonic-gate 	    dip, msi_num);
10890Sstevel@tonic-gate 
10900Sstevel@tonic-gate 	if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip),
10910Sstevel@tonic-gate 	    msi_num, msiq_id)) != H_EOK) {
10920Sstevel@tonic-gate 		DBG(DBG_LIB_MSI, dip,
10930Sstevel@tonic-gate 		    "hvio_msi_getmsiq failed, ret 0x%lx\n", ret);
10940Sstevel@tonic-gate 		return (DDI_FAILURE);
10950Sstevel@tonic-gate 	}
10960Sstevel@tonic-gate 
10970Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n",
10980Sstevel@tonic-gate 	    *msiq_id);
10990Sstevel@tonic-gate 
11000Sstevel@tonic-gate 	return (DDI_SUCCESS);
11010Sstevel@tonic-gate }
11020Sstevel@tonic-gate 
11030Sstevel@tonic-gate /*ARGSUSED*/
11040Sstevel@tonic-gate int
11050Sstevel@tonic-gate px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num,
11060Sstevel@tonic-gate     msiqid_t msiq_id, msi_type_t msitype)
11070Sstevel@tonic-gate {
11080Sstevel@tonic-gate 	uint64_t	ret;
11090Sstevel@tonic-gate 
11100Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x "
11110Sstevel@tonic-gate 	    "msq_id 0x%x\n", dip, msi_num, msiq_id);
11120Sstevel@tonic-gate 
11130Sstevel@tonic-gate 	if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip),
11140Sstevel@tonic-gate 	    msi_num, msiq_id)) != H_EOK) {
11150Sstevel@tonic-gate 		DBG(DBG_LIB_MSI, dip,
11160Sstevel@tonic-gate 		    "hvio_msi_setmsiq failed, ret 0x%lx\n", ret);
11170Sstevel@tonic-gate 		return (DDI_FAILURE);
11180Sstevel@tonic-gate 	}
11190Sstevel@tonic-gate 
11200Sstevel@tonic-gate 	return (DDI_SUCCESS);
11210Sstevel@tonic-gate }
11220Sstevel@tonic-gate 
11230Sstevel@tonic-gate /*ARGSUSED*/
11240Sstevel@tonic-gate int
11250Sstevel@tonic-gate px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num,
11260Sstevel@tonic-gate     pci_msi_valid_state_t *msi_valid_state)
11270Sstevel@tonic-gate {
11280Sstevel@tonic-gate 	uint64_t	ret;
11290Sstevel@tonic-gate 
11300Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n",
11310Sstevel@tonic-gate 	    dip, msi_num);
11320Sstevel@tonic-gate 
11330Sstevel@tonic-gate 	if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip),
11340Sstevel@tonic-gate 	    msi_num, msi_valid_state)) != H_EOK) {
11350Sstevel@tonic-gate 		DBG(DBG_LIB_MSI, dip,
11360Sstevel@tonic-gate 		    "hvio_msi_getvalid failed, ret 0x%lx\n", ret);
11370Sstevel@tonic-gate 		return (DDI_FAILURE);
11380Sstevel@tonic-gate 	}
11390Sstevel@tonic-gate 
11400Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n",
11410Sstevel@tonic-gate 	    *msi_valid_state);
11420Sstevel@tonic-gate 
11430Sstevel@tonic-gate 	return (DDI_SUCCESS);
11440Sstevel@tonic-gate }
11450Sstevel@tonic-gate 
11460Sstevel@tonic-gate /*ARGSUSED*/
11470Sstevel@tonic-gate int
11480Sstevel@tonic-gate px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num,
11490Sstevel@tonic-gate     pci_msi_valid_state_t msi_valid_state)
11500Sstevel@tonic-gate {
11510Sstevel@tonic-gate 	uint64_t	ret;
11520Sstevel@tonic-gate 
11530Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x "
11540Sstevel@tonic-gate 	    "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state);
11550Sstevel@tonic-gate 
11560Sstevel@tonic-gate 	if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip),
11570Sstevel@tonic-gate 	    msi_num, msi_valid_state)) != H_EOK) {
11580Sstevel@tonic-gate 		DBG(DBG_LIB_MSI, dip,
11590Sstevel@tonic-gate 		    "hvio_msi_setvalid failed, ret 0x%lx\n", ret);
11600Sstevel@tonic-gate 		return (DDI_FAILURE);
11610Sstevel@tonic-gate 	}
11620Sstevel@tonic-gate 
11630Sstevel@tonic-gate 	return (DDI_SUCCESS);
11640Sstevel@tonic-gate }
11650Sstevel@tonic-gate 
11660Sstevel@tonic-gate /*ARGSUSED*/
11670Sstevel@tonic-gate int
11680Sstevel@tonic-gate px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num,
11690Sstevel@tonic-gate     pci_msi_state_t *msi_state)
11700Sstevel@tonic-gate {
11710Sstevel@tonic-gate 	uint64_t	ret;
11720Sstevel@tonic-gate 
11730Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n",
11740Sstevel@tonic-gate 	    dip, msi_num);
11750Sstevel@tonic-gate 
11760Sstevel@tonic-gate 	if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip),
11770Sstevel@tonic-gate 	    msi_num, msi_state)) != H_EOK) {
11780Sstevel@tonic-gate 		DBG(DBG_LIB_MSI, dip,
11790Sstevel@tonic-gate 		    "hvio_msi_getstate failed, ret 0x%lx\n", ret);
11800Sstevel@tonic-gate 		return (DDI_FAILURE);
11810Sstevel@tonic-gate 	}
11820Sstevel@tonic-gate 
11830Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n",
11840Sstevel@tonic-gate 	    *msi_state);
11850Sstevel@tonic-gate 
11860Sstevel@tonic-gate 	return (DDI_SUCCESS);
11870Sstevel@tonic-gate }
11880Sstevel@tonic-gate 
11890Sstevel@tonic-gate /*ARGSUSED*/
11900Sstevel@tonic-gate int
11910Sstevel@tonic-gate px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num,
11920Sstevel@tonic-gate     pci_msi_state_t msi_state)
11930Sstevel@tonic-gate {
11940Sstevel@tonic-gate 	uint64_t	ret;
11950Sstevel@tonic-gate 
11960Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x "
11970Sstevel@tonic-gate 	    "msi_state 0x%x\n", dip, msi_num, msi_state);
11980Sstevel@tonic-gate 
11990Sstevel@tonic-gate 	if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip),
12000Sstevel@tonic-gate 	    msi_num, msi_state)) != H_EOK) {
12010Sstevel@tonic-gate 		DBG(DBG_LIB_MSI, dip,
12020Sstevel@tonic-gate 		    "hvio_msi_setstate failed, ret 0x%lx\n", ret);
12030Sstevel@tonic-gate 		return (DDI_FAILURE);
12040Sstevel@tonic-gate 	}
12050Sstevel@tonic-gate 
12060Sstevel@tonic-gate 	return (DDI_SUCCESS);
12070Sstevel@tonic-gate }
12080Sstevel@tonic-gate 
12090Sstevel@tonic-gate /*
12100Sstevel@tonic-gate  * MSG Functions:
12110Sstevel@tonic-gate  */
12120Sstevel@tonic-gate /*ARGSUSED*/
12130Sstevel@tonic-gate int
12140Sstevel@tonic-gate px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
12150Sstevel@tonic-gate     msiqid_t *msiq_id)
12160Sstevel@tonic-gate {
12170Sstevel@tonic-gate 	uint64_t	ret;
12180Sstevel@tonic-gate 
12190Sstevel@tonic-gate 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n",
12200Sstevel@tonic-gate 	    dip, msg_type);
12210Sstevel@tonic-gate 
12220Sstevel@tonic-gate 	if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip),
12230Sstevel@tonic-gate 	    msg_type, msiq_id)) != H_EOK) {
12240Sstevel@tonic-gate 		DBG(DBG_LIB_MSG, dip,
12250Sstevel@tonic-gate 		    "hvio_msg_getmsiq failed, ret 0x%lx\n", ret);
12260Sstevel@tonic-gate 		return (DDI_FAILURE);
12270Sstevel@tonic-gate 	}
12280Sstevel@tonic-gate 
12290Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n",
12300Sstevel@tonic-gate 	    *msiq_id);
12310Sstevel@tonic-gate 
12320Sstevel@tonic-gate 	return (DDI_SUCCESS);
12330Sstevel@tonic-gate }
12340Sstevel@tonic-gate 
12350Sstevel@tonic-gate /*ARGSUSED*/
12360Sstevel@tonic-gate int
12370Sstevel@tonic-gate px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type,
12380Sstevel@tonic-gate     msiqid_t msiq_id)
12390Sstevel@tonic-gate {
12400Sstevel@tonic-gate 	uint64_t	ret;
12410Sstevel@tonic-gate 
12420Sstevel@tonic-gate 	DBG(DBG_LIB_MSG, dip, "px_lib_msi_setstate: dip 0x%p msg_type 0x%x "
12430Sstevel@tonic-gate 	    "msiq_id 0x%x\n", dip, msg_type, msiq_id);
12440Sstevel@tonic-gate 
12450Sstevel@tonic-gate 	if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip),
12460Sstevel@tonic-gate 	    msg_type, msiq_id)) != H_EOK) {
12470Sstevel@tonic-gate 		DBG(DBG_LIB_MSG, dip,
12480Sstevel@tonic-gate 		    "hvio_msg_setmsiq failed, ret 0x%lx\n", ret);
12490Sstevel@tonic-gate 		return (DDI_FAILURE);
12500Sstevel@tonic-gate 	}
12510Sstevel@tonic-gate 
12520Sstevel@tonic-gate 	return (DDI_SUCCESS);
12530Sstevel@tonic-gate }
12540Sstevel@tonic-gate 
12550Sstevel@tonic-gate /*ARGSUSED*/
12560Sstevel@tonic-gate int
12570Sstevel@tonic-gate px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
12580Sstevel@tonic-gate     pcie_msg_valid_state_t *msg_valid_state)
12590Sstevel@tonic-gate {
12600Sstevel@tonic-gate 	uint64_t	ret;
12610Sstevel@tonic-gate 
12620Sstevel@tonic-gate 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n",
12630Sstevel@tonic-gate 	    dip, msg_type);
12640Sstevel@tonic-gate 
12650Sstevel@tonic-gate 	if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type,
12660Sstevel@tonic-gate 	    msg_valid_state)) != H_EOK) {
12670Sstevel@tonic-gate 		DBG(DBG_LIB_MSG, dip,
12680Sstevel@tonic-gate 		    "hvio_msg_getvalid failed, ret 0x%lx\n", ret);
12690Sstevel@tonic-gate 		return (DDI_FAILURE);
12700Sstevel@tonic-gate 	}
12710Sstevel@tonic-gate 
12720Sstevel@tonic-gate 	DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n",
12730Sstevel@tonic-gate 	    *msg_valid_state);
12740Sstevel@tonic-gate 
12750Sstevel@tonic-gate 	return (DDI_SUCCESS);
12760Sstevel@tonic-gate }
12770Sstevel@tonic-gate 
12780Sstevel@tonic-gate /*ARGSUSED*/
12790Sstevel@tonic-gate int
12800Sstevel@tonic-gate px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type,
12810Sstevel@tonic-gate     pcie_msg_valid_state_t msg_valid_state)
12820Sstevel@tonic-gate {
12830Sstevel@tonic-gate 	uint64_t	ret;
12840Sstevel@tonic-gate 
12850Sstevel@tonic-gate 	DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x "
12860Sstevel@tonic-gate 	    "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state);
12870Sstevel@tonic-gate 
12880Sstevel@tonic-gate 	if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type,
12890Sstevel@tonic-gate 	    msg_valid_state)) != H_EOK) {
12900Sstevel@tonic-gate 		DBG(DBG_LIB_MSG, dip,
12910Sstevel@tonic-gate 		    "hvio_msg_setvalid failed, ret 0x%lx\n", ret);
12920Sstevel@tonic-gate 		return (DDI_FAILURE);
12930Sstevel@tonic-gate 	}
12940Sstevel@tonic-gate 
12950Sstevel@tonic-gate 	return (DDI_SUCCESS);
12960Sstevel@tonic-gate }
12970Sstevel@tonic-gate 
12980Sstevel@tonic-gate /*
12990Sstevel@tonic-gate  * Suspend/Resume Functions:
13000Sstevel@tonic-gate  * Currently unsupported by hypervisor
13010Sstevel@tonic-gate  */
13020Sstevel@tonic-gate int
13030Sstevel@tonic-gate px_lib_suspend(dev_info_t *dip)
13040Sstevel@tonic-gate {
13050Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
13060Sstevel@tonic-gate 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
13071648Sjchu 	px_cb_t		*cb_p = PX2CB(px_p);
13080Sstevel@tonic-gate 	devhandle_t	dev_hdl, xbus_dev_hdl;
13091648Sjchu 	uint64_t	ret = H_EOK;
13100Sstevel@tonic-gate 
13110Sstevel@tonic-gate 	DBG(DBG_DETACH, dip, "px_lib_suspend: dip 0x%p\n", dip);
13120Sstevel@tonic-gate 
131327Sjchu 	dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR];
131427Sjchu 	xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC];
13150Sstevel@tonic-gate 
13161648Sjchu 	if ((ret = hvio_suspend(dev_hdl, pxu_p)) != H_EOK)
13171648Sjchu 		goto fail;
13181648Sjchu 
13191648Sjchu 	if (--cb_p->attachcnt == 0) {
13201648Sjchu 		ret = hvio_cb_suspend(xbus_dev_hdl, pxu_p);
13211648Sjchu 		if (ret != H_EOK)
13221648Sjchu 			cb_p->attachcnt++;
13230Sstevel@tonic-gate 	}
13243274Set142600 	pxu_p->cpr_flag = PX_ENTERED_CPR;
13250Sstevel@tonic-gate 
13261648Sjchu fail:
13270Sstevel@tonic-gate 	return ((ret != H_EOK) ? DDI_FAILURE: DDI_SUCCESS);
13280Sstevel@tonic-gate }
13290Sstevel@tonic-gate 
13300Sstevel@tonic-gate void
13310Sstevel@tonic-gate px_lib_resume(dev_info_t *dip)
13320Sstevel@tonic-gate {
13330Sstevel@tonic-gate 	px_t		*px_p = DIP_TO_STATE(dip);
13340Sstevel@tonic-gate 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
13351648Sjchu 	px_cb_t		*cb_p = PX2CB(px_p);
13360Sstevel@tonic-gate 	devhandle_t	dev_hdl, xbus_dev_hdl;
13370Sstevel@tonic-gate 	devino_t	pec_ino = px_p->px_inos[PX_INTR_PEC];
13380Sstevel@tonic-gate 	devino_t	xbc_ino = px_p->px_inos[PX_INTR_XBC];
13390Sstevel@tonic-gate 
13400Sstevel@tonic-gate 	DBG(DBG_ATTACH, dip, "px_lib_resume: dip 0x%p\n", dip);
13410Sstevel@tonic-gate 
134227Sjchu 	dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR];
134327Sjchu 	xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC];
13440Sstevel@tonic-gate 
13451648Sjchu 	if (++cb_p->attachcnt == 1)
13460Sstevel@tonic-gate 		hvio_cb_resume(dev_hdl, xbus_dev_hdl, xbc_ino, pxu_p);
13470Sstevel@tonic-gate 
13481648Sjchu 	hvio_resume(dev_hdl, pec_ino, pxu_p);
13490Sstevel@tonic-gate }
13500Sstevel@tonic-gate 
13511772Sjl139090 /*
13521772Sjl139090  * Generate a unique Oberon UBC ID based on the Logicial System Board and
13531772Sjl139090  * the IO Channel from the portid property field.
13541772Sjl139090  */
13551772Sjl139090 static uint64_t
13561772Sjl139090 oberon_get_ubc_id(dev_info_t *dip)
13571772Sjl139090 {
13581772Sjl139090 	px_t	*px_p = DIP_TO_STATE(dip);
13591772Sjl139090 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
13601772Sjl139090 	uint64_t	ubc_id;
13611772Sjl139090 
13621772Sjl139090 	/*
13631772Sjl139090 	 * Generate a unique 6 bit UBC ID using the 2 IO_Channel#[1:0] bits and
13641772Sjl139090 	 * the 4 LSB_ID[3:0] bits from the Oberon's portid property.
13651772Sjl139090 	 */
13661772Sjl139090 	ubc_id = (((pxu_p->portid >> OBERON_PORT_ID_IOC) &
13671772Sjl139090 	    OBERON_PORT_ID_IOC_MASK) | (((pxu_p->portid >>
13681772Sjl139090 	    OBERON_PORT_ID_LSB) & OBERON_PORT_ID_LSB_MASK)
13691772Sjl139090 	    << OBERON_UBC_ID_LSB));
13701772Sjl139090 
13711772Sjl139090 	return (ubc_id);
13721772Sjl139090 }
13731772Sjl139090 
13741772Sjl139090 /*
13751772Sjl139090  * Oberon does not have a UBC scratch register, so alloc an array of scratch
13761772Sjl139090  * registers when needed and use a unique UBC ID as an index. This code
13771772Sjl139090  * can be simplified if we use a pre-allocated array. They are currently
13781772Sjl139090  * being dynamically allocated because it's only needed by the Oberon.
13791772Sjl139090  */
13801772Sjl139090 static void
13811772Sjl139090 oberon_set_cb(dev_info_t *dip, uint64_t val)
13821772Sjl139090 {
13831772Sjl139090 	uint64_t	ubc_id;
13841772Sjl139090 
13851772Sjl139090 	if (px_oberon_ubc_scratch_regs == NULL)
13861772Sjl139090 		px_oberon_ubc_scratch_regs =
13871772Sjl139090 		    (uint64_t *)kmem_zalloc(sizeof (uint64_t)*
13881772Sjl139090 		    OBERON_UBC_ID_MAX, KM_SLEEP);
13891772Sjl139090 
13901772Sjl139090 	ubc_id = oberon_get_ubc_id(dip);
13911772Sjl139090 
13921772Sjl139090 	px_oberon_ubc_scratch_regs[ubc_id] = val;
13931772Sjl139090 
13941772Sjl139090 	/*
13951772Sjl139090 	 * Check if any scratch registers are still in use. If all scratch
13961772Sjl139090 	 * registers are currently set to zero, then deallocate the scratch
13971772Sjl139090 	 * register array.
13981772Sjl139090 	 */
13991772Sjl139090 	for (ubc_id = 0; ubc_id < OBERON_UBC_ID_MAX; ubc_id++) {
14001772Sjl139090 		if (px_oberon_ubc_scratch_regs[ubc_id] != NULL)
14011772Sjl139090 			return;
14021772Sjl139090 	}
14031772Sjl139090 
14041772Sjl139090 	/*
14051772Sjl139090 	 * All scratch registers are set to zero so deallocate the scratch
14061772Sjl139090 	 * register array and set the pointer to NULL.
14071772Sjl139090 	 */
14081772Sjl139090 	kmem_free(px_oberon_ubc_scratch_regs,
14091772Sjl139090 	    (sizeof (uint64_t)*OBERON_UBC_ID_MAX));
14101772Sjl139090 
14111772Sjl139090 	px_oberon_ubc_scratch_regs = NULL;
14121772Sjl139090 }
14131772Sjl139090 
14141772Sjl139090 /*
14151772Sjl139090  * Oberon does not have a UBC scratch register, so use an allocated array of
14161772Sjl139090  * scratch registers and use the unique UBC ID as an index into that array.
14171772Sjl139090  */
14181772Sjl139090 static uint64_t
14191772Sjl139090 oberon_get_cb(dev_info_t *dip)
14201772Sjl139090 {
14211772Sjl139090 	uint64_t	ubc_id;
14221772Sjl139090 
14231772Sjl139090 	if (px_oberon_ubc_scratch_regs == NULL)
14241772Sjl139090 		return (0);
14251772Sjl139090 
14261772Sjl139090 	ubc_id = oberon_get_ubc_id(dip);
14271772Sjl139090 
14281772Sjl139090 	return (px_oberon_ubc_scratch_regs[ubc_id]);
14291772Sjl139090 }
14301772Sjl139090 
14311772Sjl139090 /*
14321772Sjl139090  * Misc Functions:
14331772Sjl139090  * Currently unsupported by hypervisor
14341772Sjl139090  */
14351772Sjl139090 static uint64_t
14361772Sjl139090 px_get_cb(dev_info_t *dip)
14371772Sjl139090 {
14381772Sjl139090 	px_t	*px_p = DIP_TO_STATE(dip);
14391772Sjl139090 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
14401772Sjl139090 
14411772Sjl139090 	/*
14421772Sjl139090 	 * Oberon does not currently have Scratchpad registers.
14431772Sjl139090 	 */
14441772Sjl139090 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
14451772Sjl139090 		return (oberon_get_cb(dip));
14461772Sjl139090 
14471772Sjl139090 	return (CSR_XR((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1));
14481772Sjl139090 }
14491772Sjl139090 
14501772Sjl139090 static void
14511772Sjl139090 px_set_cb(dev_info_t *dip, uint64_t val)
14521772Sjl139090 {
14531772Sjl139090 	px_t	*px_p = DIP_TO_STATE(dip);
14541772Sjl139090 	pxu_t	*pxu_p = (pxu_t *)px_p->px_plat_p;
14551772Sjl139090 
14561772Sjl139090 	/*
14571772Sjl139090 	 * Oberon does not currently have Scratchpad registers.
14581772Sjl139090 	 */
14591772Sjl139090 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
14601772Sjl139090 		oberon_set_cb(dip, val);
14611772Sjl139090 		return;
14621772Sjl139090 	}
14631772Sjl139090 
14641772Sjl139090 	CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1, val);
14651772Sjl139090 }
14661772Sjl139090 
14670Sstevel@tonic-gate /*ARGSUSED*/
14680Sstevel@tonic-gate int
14690Sstevel@tonic-gate px_lib_map_vconfig(dev_info_t *dip,
14700Sstevel@tonic-gate 	ddi_map_req_t *mp, pci_config_offset_t off,
14710Sstevel@tonic-gate 		pci_regspec_t *rp, caddr_t *addrp)
14720Sstevel@tonic-gate {
14730Sstevel@tonic-gate 	/*
14740Sstevel@tonic-gate 	 * No special config space access services in this layer.
14750Sstevel@tonic-gate 	 */
14760Sstevel@tonic-gate 	return (DDI_FAILURE);
14770Sstevel@tonic-gate }
14780Sstevel@tonic-gate 
1479624Sschwartz void
1480677Sjchu px_lib_map_attr_check(ddi_map_req_t *mp)
1481677Sjchu {
1482677Sjchu 	ddi_acc_hdl_t *hp = mp->map_handlep;
1483677Sjchu 
1484677Sjchu 	/* fire does not accept byte masks from PIO store merge */
1485677Sjchu 	if (hp->ah_acc.devacc_attr_dataorder == DDI_STORECACHING_OK_ACC)
1486677Sjchu 		hp->ah_acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1487677Sjchu }
1488677Sjchu 
14893274Set142600 /* This function is called only by poke, caut put and pxtool poke. */
1490677Sjchu void
14913274Set142600 px_lib_clr_errs(px_t *px_p, dev_info_t *rdip, uint64_t addr)
149227Sjchu {
1493624Sschwartz 	px_pec_t	*pec_p = px_p->px_pec_p;
149427Sjchu 	dev_info_t	*rpdip = px_p->px_dip;
14953274Set142600 	int		rc_err, fab_err, i;
149627Sjchu 	int		acctype = pec_p->pec_safeacc_type;
149727Sjchu 	ddi_fm_error_t	derr;
1498*10923SEvan.Yan@Sun.COM 	pci_ranges_t	*ranges_p;
14993274Set142600 	int		range_len;
15003274Set142600 	uint32_t	addr_high, addr_low;
15019921SKrishna.Elango@Sun.COM 	pcie_req_id_t	bdf = PCIE_INVALID_BDF;
150227Sjchu 
150327Sjchu 	/* Create the derr */
150427Sjchu 	bzero(&derr, sizeof (ddi_fm_error_t));
150527Sjchu 	derr.fme_version = DDI_FME_VERSION;
150627Sjchu 	derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
150727Sjchu 	derr.fme_flag = acctype;
150827Sjchu 
150927Sjchu 	if (acctype == DDI_FM_ERR_EXPECTED) {
151027Sjchu 		derr.fme_status = DDI_FM_NONFATAL;
151127Sjchu 		ndi_fm_acc_err_set(pec_p->pec_acc_hdl, &derr);
151227Sjchu 	}
151327Sjchu 
15146313Skrishnae 	if (px_fm_enter(px_p) != DDI_SUCCESS)
15156313Skrishnae 		return;
151627Sjchu 
151727Sjchu 	/* send ereport/handle/clear fire registers */
15183274Set142600 	rc_err = px_err_cmn_intr(px_p, &derr, PX_LIB_CALL, PX_FM_BLOCK_ALL);
15193274Set142600 
15203274Set142600 	/* Figure out if this is a cfg or mem32 access */
15213274Set142600 	addr_high = (uint32_t)(addr >> 32);
15223274Set142600 	addr_low = (uint32_t)addr;
1523*10923SEvan.Yan@Sun.COM 	range_len = px_p->px_ranges_length / sizeof (pci_ranges_t);
15243274Set142600 	i = 0;
15253274Set142600 	for (ranges_p = px_p->px_ranges_p; i < range_len; i++, ranges_p++) {
15263274Set142600 		if (ranges_p->parent_high == addr_high) {
15273274Set142600 			switch (ranges_p->child_high & PCI_ADDR_MASK) {
15283274Set142600 			case PCI_ADDR_CONFIG:
15293274Set142600 				bdf = (pcie_req_id_t)(addr_low >> 12);
15303274Set142600 				addr_low = 0;
15313274Set142600 				break;
15323274Set142600 			case PCI_ADDR_MEM32:
15333274Set142600 				if (rdip)
15346313Skrishnae 					bdf = PCI_GET_BDF(rdip);
15353274Set142600 				else
15369921SKrishna.Elango@Sun.COM 					bdf = PCIE_INVALID_BDF;
15373274Set142600 				break;
15383274Set142600 			}
15393274Set142600 			break;
15403274Set142600 		}
15413274Set142600 	}
15423274Set142600 
15433274Set142600 	px_rp_en_q(px_p, bdf, addr_low, NULL);
15443274Set142600 
15453274Set142600 	/*
15463274Set142600 	 * XXX - Current code scans the fabric for all px_tool accesses.
15473274Set142600 	 * In future, do not scan fabric for px_tool access to IO Root Nexus
15483274Set142600 	 */
15496313Skrishnae 	fab_err = px_scan_fabric(px_p, rpdip, &derr);
15506313Skrishnae 
15516313Skrishnae 	px_err_panic(rc_err, PX_RC, fab_err, B_TRUE);
15526313Skrishnae 	px_fm_exit(px_p);
15536313Skrishnae 	px_err_panic(rc_err, PX_RC, fab_err, B_FALSE);
155427Sjchu }
155527Sjchu 
15560Sstevel@tonic-gate #ifdef  DEBUG
15570Sstevel@tonic-gate int	px_peekfault_cnt = 0;
15580Sstevel@tonic-gate int	px_pokefault_cnt = 0;
15590Sstevel@tonic-gate #endif  /* DEBUG */
15600Sstevel@tonic-gate 
15610Sstevel@tonic-gate /*ARGSUSED*/
15620Sstevel@tonic-gate static int
15630Sstevel@tonic-gate px_lib_do_poke(dev_info_t *dip, dev_info_t *rdip,
15640Sstevel@tonic-gate     peekpoke_ctlops_t *in_args)
15650Sstevel@tonic-gate {
15660Sstevel@tonic-gate 	px_t *px_p = DIP_TO_STATE(dip);
15670Sstevel@tonic-gate 	px_pec_t *pec_p = px_p->px_pec_p;
15680Sstevel@tonic-gate 	int err = DDI_SUCCESS;
15690Sstevel@tonic-gate 	on_trap_data_t otd;
15700Sstevel@tonic-gate 
15710Sstevel@tonic-gate 	mutex_enter(&pec_p->pec_pokefault_mutex);
15720Sstevel@tonic-gate 	pec_p->pec_ontrap_data = &otd;
157327Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_POKE;
15740Sstevel@tonic-gate 
15750Sstevel@tonic-gate 	/* Set up protected environment. */
15760Sstevel@tonic-gate 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
15770Sstevel@tonic-gate 		uintptr_t tramp = otd.ot_trampoline;
15780Sstevel@tonic-gate 
15790Sstevel@tonic-gate 		otd.ot_trampoline = (uintptr_t)&poke_fault;
15800Sstevel@tonic-gate 		err = do_poke(in_args->size, (void *)in_args->dev_addr,
15810Sstevel@tonic-gate 		    (void *)in_args->host_addr);
15820Sstevel@tonic-gate 		otd.ot_trampoline = tramp;
15830Sstevel@tonic-gate 	} else
15840Sstevel@tonic-gate 		err = DDI_FAILURE;
15850Sstevel@tonic-gate 
15863274Set142600 	px_lib_clr_errs(px_p, rdip, in_args->dev_addr);
158727Sjchu 
15880Sstevel@tonic-gate 	if (otd.ot_trap & OT_DATA_ACCESS)
15890Sstevel@tonic-gate 		err = DDI_FAILURE;
15900Sstevel@tonic-gate 
15910Sstevel@tonic-gate 	/* Take down protected environment. */
15920Sstevel@tonic-gate 	no_trap();
15930Sstevel@tonic-gate 
15940Sstevel@tonic-gate 	pec_p->pec_ontrap_data = NULL;
159527Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
15960Sstevel@tonic-gate 	mutex_exit(&pec_p->pec_pokefault_mutex);
15970Sstevel@tonic-gate 
15980Sstevel@tonic-gate #ifdef  DEBUG
15990Sstevel@tonic-gate 	if (err == DDI_FAILURE)
16000Sstevel@tonic-gate 		px_pokefault_cnt++;
16010Sstevel@tonic-gate #endif
16020Sstevel@tonic-gate 	return (err);
16030Sstevel@tonic-gate }
16040Sstevel@tonic-gate 
16050Sstevel@tonic-gate /*ARGSUSED*/
16060Sstevel@tonic-gate static int
16070Sstevel@tonic-gate px_lib_do_caut_put(dev_info_t *dip, dev_info_t *rdip,
16080Sstevel@tonic-gate     peekpoke_ctlops_t *cautacc_ctlops_arg)
16090Sstevel@tonic-gate {
16100Sstevel@tonic-gate 	size_t size = cautacc_ctlops_arg->size;
16110Sstevel@tonic-gate 	uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr;
16120Sstevel@tonic-gate 	uintptr_t host_addr = cautacc_ctlops_arg->host_addr;
16130Sstevel@tonic-gate 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle;
16140Sstevel@tonic-gate 	size_t repcount = cautacc_ctlops_arg->repcount;
16150Sstevel@tonic-gate 	uint_t flags = cautacc_ctlops_arg->flags;
16160Sstevel@tonic-gate 
16170Sstevel@tonic-gate 	px_t *px_p = DIP_TO_STATE(dip);
16180Sstevel@tonic-gate 	px_pec_t *pec_p = px_p->px_pec_p;
16190Sstevel@tonic-gate 	int err = DDI_SUCCESS;
16200Sstevel@tonic-gate 
162127Sjchu 	/*
162227Sjchu 	 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault
162327Sjchu 	 * mutex.
162427Sjchu 	 */
16250Sstevel@tonic-gate 	i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
16260Sstevel@tonic-gate 
162727Sjchu 	pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap;
162827Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
162927Sjchu 	hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
16300Sstevel@tonic-gate 
16310Sstevel@tonic-gate 	if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
16320Sstevel@tonic-gate 		for (; repcount; repcount--) {
16330Sstevel@tonic-gate 			switch (size) {
16340Sstevel@tonic-gate 
16350Sstevel@tonic-gate 			case sizeof (uint8_t):
16360Sstevel@tonic-gate 				i_ddi_put8(hp, (uint8_t *)dev_addr,
16370Sstevel@tonic-gate 				    *(uint8_t *)host_addr);
16380Sstevel@tonic-gate 				break;
16390Sstevel@tonic-gate 
16400Sstevel@tonic-gate 			case sizeof (uint16_t):
16410Sstevel@tonic-gate 				i_ddi_put16(hp, (uint16_t *)dev_addr,
16420Sstevel@tonic-gate 				    *(uint16_t *)host_addr);
16430Sstevel@tonic-gate 				break;
16440Sstevel@tonic-gate 
16450Sstevel@tonic-gate 			case sizeof (uint32_t):
16460Sstevel@tonic-gate 				i_ddi_put32(hp, (uint32_t *)dev_addr,
16470Sstevel@tonic-gate 				    *(uint32_t *)host_addr);
16480Sstevel@tonic-gate 				break;
16490Sstevel@tonic-gate 
16500Sstevel@tonic-gate 			case sizeof (uint64_t):
16510Sstevel@tonic-gate 				i_ddi_put64(hp, (uint64_t *)dev_addr,
16520Sstevel@tonic-gate 				    *(uint64_t *)host_addr);
16530Sstevel@tonic-gate 				break;
16540Sstevel@tonic-gate 			}
16550Sstevel@tonic-gate 
16560Sstevel@tonic-gate 			host_addr += size;
16570Sstevel@tonic-gate 
16580Sstevel@tonic-gate 			if (flags == DDI_DEV_AUTOINCR)
16590Sstevel@tonic-gate 				dev_addr += size;
16600Sstevel@tonic-gate 
16613274Set142600 			px_lib_clr_errs(px_p, rdip, dev_addr);
166227Sjchu 
16630Sstevel@tonic-gate 			if (pec_p->pec_ontrap_data->ot_trap & OT_DATA_ACCESS) {
16640Sstevel@tonic-gate 				err = DDI_FAILURE;
16650Sstevel@tonic-gate #ifdef  DEBUG
16660Sstevel@tonic-gate 				px_pokefault_cnt++;
16670Sstevel@tonic-gate #endif
16680Sstevel@tonic-gate 				break;
16690Sstevel@tonic-gate 			}
16700Sstevel@tonic-gate 		}
16710Sstevel@tonic-gate 	}
16720Sstevel@tonic-gate 
16730Sstevel@tonic-gate 	i_ddi_notrap((ddi_acc_handle_t)hp);
16740Sstevel@tonic-gate 	pec_p->pec_ontrap_data = NULL;
167527Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
16760Sstevel@tonic-gate 	i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
16770Sstevel@tonic-gate 	hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED;
16780Sstevel@tonic-gate 
16790Sstevel@tonic-gate 	return (err);
16800Sstevel@tonic-gate }
16810Sstevel@tonic-gate 
16820Sstevel@tonic-gate 
16830Sstevel@tonic-gate int
16840Sstevel@tonic-gate px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip,
16850Sstevel@tonic-gate     peekpoke_ctlops_t *in_args)
16860Sstevel@tonic-gate {
16870Sstevel@tonic-gate 	return (in_args->handle ? px_lib_do_caut_put(dip, rdip, in_args) :
16880Sstevel@tonic-gate 	    px_lib_do_poke(dip, rdip, in_args));
16890Sstevel@tonic-gate }
16900Sstevel@tonic-gate 
16910Sstevel@tonic-gate 
16920Sstevel@tonic-gate /*ARGSUSED*/
16930Sstevel@tonic-gate static int
16940Sstevel@tonic-gate px_lib_do_peek(dev_info_t *dip, peekpoke_ctlops_t *in_args)
16950Sstevel@tonic-gate {
169627Sjchu 	px_t *px_p = DIP_TO_STATE(dip);
169727Sjchu 	px_pec_t *pec_p = px_p->px_pec_p;
16980Sstevel@tonic-gate 	int err = DDI_SUCCESS;
16990Sstevel@tonic-gate 	on_trap_data_t otd;
17000Sstevel@tonic-gate 
170127Sjchu 	mutex_enter(&pec_p->pec_pokefault_mutex);
17026313Skrishnae 	if (px_fm_enter(px_p) != DDI_SUCCESS)
17036313Skrishnae 		return (DDI_FAILURE);
170427Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK;
17056313Skrishnae 	px_fm_exit(px_p);
170627Sjchu 
17070Sstevel@tonic-gate 	if (!on_trap(&otd, OT_DATA_ACCESS)) {
17080Sstevel@tonic-gate 		uintptr_t tramp = otd.ot_trampoline;
17090Sstevel@tonic-gate 
17100Sstevel@tonic-gate 		otd.ot_trampoline = (uintptr_t)&peek_fault;
17110Sstevel@tonic-gate 		err = do_peek(in_args->size, (void *)in_args->dev_addr,
17120Sstevel@tonic-gate 		    (void *)in_args->host_addr);
17130Sstevel@tonic-gate 		otd.ot_trampoline = tramp;
17140Sstevel@tonic-gate 	} else
17150Sstevel@tonic-gate 		err = DDI_FAILURE;
17160Sstevel@tonic-gate 
17170Sstevel@tonic-gate 	no_trap();
171827Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
171927Sjchu 	mutex_exit(&pec_p->pec_pokefault_mutex);
17200Sstevel@tonic-gate 
17210Sstevel@tonic-gate #ifdef  DEBUG
17220Sstevel@tonic-gate 	if (err == DDI_FAILURE)
17230Sstevel@tonic-gate 		px_peekfault_cnt++;
17240Sstevel@tonic-gate #endif
17250Sstevel@tonic-gate 	return (err);
17260Sstevel@tonic-gate }
17270Sstevel@tonic-gate 
17280Sstevel@tonic-gate 
17290Sstevel@tonic-gate static int
17300Sstevel@tonic-gate px_lib_do_caut_get(dev_info_t *dip, peekpoke_ctlops_t *cautacc_ctlops_arg)
17310Sstevel@tonic-gate {
17320Sstevel@tonic-gate 	size_t size = cautacc_ctlops_arg->size;
17330Sstevel@tonic-gate 	uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr;
17340Sstevel@tonic-gate 	uintptr_t host_addr = cautacc_ctlops_arg->host_addr;
17350Sstevel@tonic-gate 	ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle;
17360Sstevel@tonic-gate 	size_t repcount = cautacc_ctlops_arg->repcount;
17370Sstevel@tonic-gate 	uint_t flags = cautacc_ctlops_arg->flags;
17380Sstevel@tonic-gate 
17390Sstevel@tonic-gate 	px_t *px_p = DIP_TO_STATE(dip);
17400Sstevel@tonic-gate 	px_pec_t *pec_p = px_p->px_pec_p;
17410Sstevel@tonic-gate 	int err = DDI_SUCCESS;
17420Sstevel@tonic-gate 
174327Sjchu 	/*
174427Sjchu 	 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault
174527Sjchu 	 * mutex.
174627Sjchu 	 */
174727Sjchu 	i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
174827Sjchu 
174927Sjchu 	pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap;
175027Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED;
17510Sstevel@tonic-gate 	hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED;
17520Sstevel@tonic-gate 
17530Sstevel@tonic-gate 	if (repcount == 1) {
17540Sstevel@tonic-gate 		if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
17550Sstevel@tonic-gate 			i_ddi_caut_get(size, (void *)dev_addr,
17560Sstevel@tonic-gate 			    (void *)host_addr);
17570Sstevel@tonic-gate 		} else {
17580Sstevel@tonic-gate 			int i;
17590Sstevel@tonic-gate 			uint8_t *ff_addr = (uint8_t *)host_addr;
17600Sstevel@tonic-gate 			for (i = 0; i < size; i++)
17610Sstevel@tonic-gate 				*ff_addr++ = 0xff;
17620Sstevel@tonic-gate 
17630Sstevel@tonic-gate 			err = DDI_FAILURE;
17640Sstevel@tonic-gate #ifdef  DEBUG
17650Sstevel@tonic-gate 			px_peekfault_cnt++;
17660Sstevel@tonic-gate #endif
17670Sstevel@tonic-gate 		}
17680Sstevel@tonic-gate 	} else {
17690Sstevel@tonic-gate 		if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) {
17700Sstevel@tonic-gate 			for (; repcount; repcount--) {
17710Sstevel@tonic-gate 				i_ddi_caut_get(size, (void *)dev_addr,
17720Sstevel@tonic-gate 				    (void *)host_addr);
17730Sstevel@tonic-gate 
17740Sstevel@tonic-gate 				host_addr += size;
17750Sstevel@tonic-gate 
17760Sstevel@tonic-gate 				if (flags == DDI_DEV_AUTOINCR)
17770Sstevel@tonic-gate 					dev_addr += size;
17780Sstevel@tonic-gate 			}
17790Sstevel@tonic-gate 		} else {
17800Sstevel@tonic-gate 			err = DDI_FAILURE;
17810Sstevel@tonic-gate #ifdef  DEBUG
17820Sstevel@tonic-gate 			px_peekfault_cnt++;
17830Sstevel@tonic-gate #endif
17840Sstevel@tonic-gate 		}
17850Sstevel@tonic-gate 	}
17860Sstevel@tonic-gate 
17870Sstevel@tonic-gate 	i_ddi_notrap((ddi_acc_handle_t)hp);
17880Sstevel@tonic-gate 	pec_p->pec_ontrap_data = NULL;
178927Sjchu 	pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED;
17900Sstevel@tonic-gate 	i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp);
17910Sstevel@tonic-gate 	hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED;
17920Sstevel@tonic-gate 
17930Sstevel@tonic-gate 	return (err);
17940Sstevel@tonic-gate }
17950Sstevel@tonic-gate 
17960Sstevel@tonic-gate /*ARGSUSED*/
17970Sstevel@tonic-gate int
17980Sstevel@tonic-gate px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip,
17990Sstevel@tonic-gate     peekpoke_ctlops_t *in_args, void *result)
18000Sstevel@tonic-gate {
18010Sstevel@tonic-gate 	result = (void *)in_args->host_addr;
18020Sstevel@tonic-gate 	return (in_args->handle ? px_lib_do_caut_get(dip, in_args) :
18030Sstevel@tonic-gate 	    px_lib_do_peek(dip, in_args));
18040Sstevel@tonic-gate }
1805118Sjchu 
18060Sstevel@tonic-gate /*
18070Sstevel@tonic-gate  * implements PPM interface
18080Sstevel@tonic-gate  */
18090Sstevel@tonic-gate int
18100Sstevel@tonic-gate px_lib_pmctl(int cmd, px_t *px_p)
18110Sstevel@tonic-gate {
18120Sstevel@tonic-gate 	ASSERT((cmd & ~PPMREQ_MASK) == PPMREQ);
18130Sstevel@tonic-gate 	switch (cmd) {
18140Sstevel@tonic-gate 	case PPMREQ_PRE_PWR_OFF:
18150Sstevel@tonic-gate 		/*
18160Sstevel@tonic-gate 		 * Currently there is no device power management for
18170Sstevel@tonic-gate 		 * the root complex (fire). When there is we need to make
18180Sstevel@tonic-gate 		 * sure that it is at full power before trying to send the
18190Sstevel@tonic-gate 		 * PME_Turn_Off message.
18200Sstevel@tonic-gate 		 */
18210Sstevel@tonic-gate 		DBG(DBG_PWR, px_p->px_dip,
18220Sstevel@tonic-gate 		    "ioctl: request to send PME_Turn_Off\n");
18230Sstevel@tonic-gate 		return (px_goto_l23ready(px_p));
18240Sstevel@tonic-gate 
18250Sstevel@tonic-gate 	case PPMREQ_PRE_PWR_ON:
1826118Sjchu 		DBG(DBG_PWR, px_p->px_dip, "ioctl: PRE_PWR_ON request\n");
1827118Sjchu 		return (px_pre_pwron_check(px_p));
1828118Sjchu 
18290Sstevel@tonic-gate 	case PPMREQ_POST_PWR_ON:
1830118Sjchu 		DBG(DBG_PWR, px_p->px_dip, "ioctl: POST_PWR_ON request\n");
1831118Sjchu 		return (px_goto_l0(px_p));
18320Sstevel@tonic-gate 
18330Sstevel@tonic-gate 	default:
18340Sstevel@tonic-gate 		return (DDI_FAILURE);
18350Sstevel@tonic-gate 	}
18360Sstevel@tonic-gate }
18370Sstevel@tonic-gate 
18380Sstevel@tonic-gate /*
18390Sstevel@tonic-gate  * sends PME_Turn_Off message to put the link in L2/L3 ready state.
18400Sstevel@tonic-gate  * called by px_ioctl.
18410Sstevel@tonic-gate  * returns DDI_SUCCESS or DDI_FAILURE
18420Sstevel@tonic-gate  * 1. Wait for link to be in L1 state (link status reg)
18430Sstevel@tonic-gate  * 2. write to PME_Turn_off reg to boradcast
18440Sstevel@tonic-gate  * 3. set timeout
18450Sstevel@tonic-gate  * 4. If timeout, return failure.
18460Sstevel@tonic-gate  * 5. If PM_TO_Ack, wait till link is in L2/L3 ready
18470Sstevel@tonic-gate  */
18480Sstevel@tonic-gate static int
18490Sstevel@tonic-gate px_goto_l23ready(px_t *px_p)
18500Sstevel@tonic-gate {
18510Sstevel@tonic-gate 	pcie_pwr_t	*pwr_p;
185227Sjchu 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
185327Sjchu 	caddr_t	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
18540Sstevel@tonic-gate 	int		ret = DDI_SUCCESS;
18550Sstevel@tonic-gate 	clock_t		end, timeleft;
1856118Sjchu 	int		mutex_held = 1;
18570Sstevel@tonic-gate 
18580Sstevel@tonic-gate 	/* If no PM info, return failure */
18590Sstevel@tonic-gate 	if (!PCIE_PMINFO(px_p->px_dip) ||
18600Sstevel@tonic-gate 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
18610Sstevel@tonic-gate 		return (DDI_FAILURE);
18620Sstevel@tonic-gate 
18630Sstevel@tonic-gate 	mutex_enter(&pwr_p->pwr_lock);
1864118Sjchu 	mutex_enter(&px_p->px_l23ready_lock);
18650Sstevel@tonic-gate 	/* Clear the PME_To_ACK receieved flag */
1866118Sjchu 	px_p->px_pm_flags &= ~PX_PMETOACK_RECVD;
1867287Smg140465 	/*
1868287Smg140465 	 * When P25 is the downstream device, after receiving
1869287Smg140465 	 * PME_To_ACK, fire will go to Detect state, which causes
1870287Smg140465 	 * the link down event. Inform FMA that this is expected.
1871287Smg140465 	 * In case of all other cards complaint with the pci express
1872287Smg140465 	 * spec, this will happen when the power is re-applied. FMA
1873287Smg140465 	 * code will clear this flag after one instance of LDN. Since
1874287Smg140465 	 * there will not be a LDN event for the spec compliant cards,
1875287Smg140465 	 * we need to clear the flag after receiving PME_To_ACK.
1876287Smg140465 	 */
1877287Smg140465 	px_p->px_pm_flags |= PX_LDN_EXPECTED;
18780Sstevel@tonic-gate 	if (px_send_pme_turnoff(csr_base) != DDI_SUCCESS) {
18790Sstevel@tonic-gate 		ret = DDI_FAILURE;
18800Sstevel@tonic-gate 		goto l23ready_done;
18810Sstevel@tonic-gate 	}
1882118Sjchu 	px_p->px_pm_flags |= PX_PME_TURNOFF_PENDING;
18830Sstevel@tonic-gate 
18840Sstevel@tonic-gate 	end = ddi_get_lbolt() + drv_usectohz(px_pme_to_ack_timeout);
1885118Sjchu 	while (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
1886118Sjchu 		timeleft = cv_timedwait(&px_p->px_l23ready_cv,
1887118Sjchu 		    &px_p->px_l23ready_lock, end);
18880Sstevel@tonic-gate 		/*
18890Sstevel@tonic-gate 		 * if cv_timedwait returns -1, it is either
18900Sstevel@tonic-gate 		 * 1) timed out or
18910Sstevel@tonic-gate 		 * 2) there was a pre-mature wakeup but by the time
18920Sstevel@tonic-gate 		 * cv_timedwait is called again end < lbolt i.e.
18930Sstevel@tonic-gate 		 * end is in the past.
18940Sstevel@tonic-gate 		 * 3) By the time we make first cv_timedwait call,
18950Sstevel@tonic-gate 		 * end < lbolt is true.
18960Sstevel@tonic-gate 		 */
18970Sstevel@tonic-gate 		if (timeleft == -1)
18980Sstevel@tonic-gate 			break;
18990Sstevel@tonic-gate 	}
1900118Sjchu 	if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
19010Sstevel@tonic-gate 		/*
19020Sstevel@tonic-gate 		 * Either timedout or interrupt didn't get a
19030Sstevel@tonic-gate 		 * chance to grab the mutex and set the flag.
19040Sstevel@tonic-gate 		 * release the mutex and delay for sometime.
19050Sstevel@tonic-gate 		 * This will 1) give a chance for interrupt to
19060Sstevel@tonic-gate 		 * set the flag 2) creates a delay between two
19070Sstevel@tonic-gate 		 * consequetive requests.
19080Sstevel@tonic-gate 		 */
1909118Sjchu 		mutex_exit(&px_p->px_l23ready_lock);
19101147Sjchu 		delay(drv_usectohz(50 * PX_MSEC_TO_USEC));
1911118Sjchu 		mutex_held = 0;
1912118Sjchu 		if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) {
19130Sstevel@tonic-gate 			ret = DDI_FAILURE;
19140Sstevel@tonic-gate 			DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting"
19150Sstevel@tonic-gate 			    " for PME_TO_ACK\n");
19160Sstevel@tonic-gate 		}
19170Sstevel@tonic-gate 	}
1918287Smg140465 	px_p->px_pm_flags &=
1919287Smg140465 	    ~(PX_PME_TURNOFF_PENDING | PX_PMETOACK_RECVD | PX_LDN_EXPECTED);
19200Sstevel@tonic-gate 
19210Sstevel@tonic-gate l23ready_done:
1922118Sjchu 	if (mutex_held)
1923118Sjchu 		mutex_exit(&px_p->px_l23ready_lock);
1924118Sjchu 	/*
1925118Sjchu 	 * Wait till link is in L1 idle, if sending PME_Turn_Off
1926118Sjchu 	 * was succesful.
1927118Sjchu 	 */
1928118Sjchu 	if (ret == DDI_SUCCESS) {
1929118Sjchu 		if (px_link_wait4l1idle(csr_base) != DDI_SUCCESS) {
1930118Sjchu 			DBG(DBG_PWR, px_p->px_dip, " Link is not at L1"
1931287Smg140465 			    " even though we received PME_To_ACK.\n");
1932287Smg140465 			/*
1933287Smg140465 			 * Workaround for hardware bug with P25.
1934287Smg140465 			 * Due to a hardware bug with P25, link state
1935287Smg140465 			 * will be Detect state rather than L1 after
1936287Smg140465 			 * link is transitioned to L23Ready state. Since
1937287Smg140465 			 * we don't know whether link is L23ready state
1938287Smg140465 			 * without Fire's state being L1_idle, we delay
1939287Smg140465 			 * here just to make sure that we wait till link
1940287Smg140465 			 * is transitioned to L23Ready state.
1941287Smg140465 			 */
19421147Sjchu 			delay(drv_usectohz(100 * PX_MSEC_TO_USEC));
1943287Smg140465 		}
1944287Smg140465 		pwr_p->pwr_link_lvl = PM_LEVEL_L3;
1945118Sjchu 
1946118Sjchu 	}
19470Sstevel@tonic-gate 	mutex_exit(&pwr_p->pwr_lock);
19480Sstevel@tonic-gate 	return (ret);
19490Sstevel@tonic-gate }
19500Sstevel@tonic-gate 
1951118Sjchu /*
1952118Sjchu  * Message interrupt handler intended to be shared for both
1953118Sjchu  * PME and PME_TO_ACK msg handling, currently only handles
1954118Sjchu  * PME_To_ACK message.
1955118Sjchu  */
1956118Sjchu uint_t
1957118Sjchu px_pmeq_intr(caddr_t arg)
1958118Sjchu {
1959118Sjchu 	px_t	*px_p = (px_t *)arg;
1960118Sjchu 
1961287Smg140465 	DBG(DBG_PWR, px_p->px_dip, " PME_To_ACK received \n");
1962118Sjchu 	mutex_enter(&px_p->px_l23ready_lock);
1963118Sjchu 	cv_broadcast(&px_p->px_l23ready_cv);
1964118Sjchu 	if (px_p->px_pm_flags & PX_PME_TURNOFF_PENDING) {
1965118Sjchu 		px_p->px_pm_flags |= PX_PMETOACK_RECVD;
1966118Sjchu 	} else {
1967118Sjchu 		/*
1968118Sjchu 		 * This maybe the second ack received. If so then,
1969118Sjchu 		 * we should be receiving it during wait4L1 stage.
1970118Sjchu 		 */
1971118Sjchu 		px_p->px_pmetoack_ignored++;
1972118Sjchu 	}
1973118Sjchu 	mutex_exit(&px_p->px_l23ready_lock);
1974118Sjchu 	return (DDI_INTR_CLAIMED);
1975118Sjchu }
1976118Sjchu 
1977118Sjchu static int
1978118Sjchu px_pre_pwron_check(px_t *px_p)
1979118Sjchu {
1980118Sjchu 	pcie_pwr_t	*pwr_p;
1981118Sjchu 
1982118Sjchu 	/* If no PM info, return failure */
1983118Sjchu 	if (!PCIE_PMINFO(px_p->px_dip) ||
1984118Sjchu 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
1985118Sjchu 		return (DDI_FAILURE);
1986118Sjchu 
1987287Smg140465 	/*
1988287Smg140465 	 * For the spec compliant downstream cards link down
1989287Smg140465 	 * is expected when the device is powered on.
1990287Smg140465 	 */
1991287Smg140465 	px_p->px_pm_flags |= PX_LDN_EXPECTED;
1992118Sjchu 	return (pwr_p->pwr_link_lvl == PM_LEVEL_L3 ? DDI_SUCCESS : DDI_FAILURE);
1993118Sjchu }
1994118Sjchu 
1995118Sjchu static int
1996118Sjchu px_goto_l0(px_t *px_p)
1997118Sjchu {
1998118Sjchu 	pcie_pwr_t	*pwr_p;
1999118Sjchu 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
2000118Sjchu 	caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
2001118Sjchu 	int		ret = DDI_SUCCESS;
20021147Sjchu 	uint64_t	time_spent = 0;
2003118Sjchu 
2004118Sjchu 	/* If no PM info, return failure */
2005118Sjchu 	if (!PCIE_PMINFO(px_p->px_dip) ||
2006118Sjchu 	    !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip)))
2007118Sjchu 		return (DDI_FAILURE);
2008118Sjchu 
2009118Sjchu 	mutex_enter(&pwr_p->pwr_lock);
2010287Smg140465 	/*
20111147Sjchu 	 * The following link retrain activity will cause LDN and LUP event.
20121147Sjchu 	 * Receiving LDN prior to receiving LUP is expected, not an error in
20131147Sjchu 	 * this case.  Receiving LUP indicates link is fully up to support
20141147Sjchu 	 * powering up down stream device, and of course any further LDN and
20151147Sjchu 	 * LUP outside this context will be error.
2016287Smg140465 	 */
20171147Sjchu 	px_p->px_lup_pending = 1;
2018118Sjchu 	if (px_link_retrain(csr_base) != DDI_SUCCESS) {
2019118Sjchu 		ret = DDI_FAILURE;
2020118Sjchu 		goto l0_done;
2021118Sjchu 	}
2022118Sjchu 
20231147Sjchu 	/* LUP event takes the order of 15ms amount of time to occur */
20241147Sjchu 	for (; px_p->px_lup_pending && (time_spent < px_lup_poll_to);
20251147Sjchu 	    time_spent += px_lup_poll_interval)
20261147Sjchu 		drv_usecwait(px_lup_poll_interval);
20271147Sjchu 	if (px_p->px_lup_pending)
20281147Sjchu 		ret = DDI_FAILURE;
2029118Sjchu l0_done:
2030287Smg140465 	px_enable_detect_quiet(csr_base);
2031118Sjchu 	if (ret == DDI_SUCCESS)
2032287Smg140465 		pwr_p->pwr_link_lvl = PM_LEVEL_L0;
2033118Sjchu 	mutex_exit(&pwr_p->pwr_lock);
2034118Sjchu 	return (ret);
2035118Sjchu }
2036118Sjchu 
20370Sstevel@tonic-gate /*
20380Sstevel@tonic-gate  * Extract the drivers binding name to identify which chip we're binding to.
20390Sstevel@tonic-gate  * Whenever a new bus bridge is created, the driver alias entry should be
20400Sstevel@tonic-gate  * added here to identify the device if needed.  If a device isn't added,
20410Sstevel@tonic-gate  * the identity defaults to PX_CHIP_UNIDENTIFIED.
20420Sstevel@tonic-gate  */
20430Sstevel@tonic-gate static uint32_t
20442426Sschwartz px_identity_init(px_t *px_p)
20450Sstevel@tonic-gate {
20460Sstevel@tonic-gate 	dev_info_t	*dip = px_p->px_dip;
20470Sstevel@tonic-gate 	char		*name = ddi_binding_name(dip);
20480Sstevel@tonic-gate 	uint32_t	revision = 0;
20490Sstevel@tonic-gate 
20500Sstevel@tonic-gate 	revision = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
20510Sstevel@tonic-gate 	    "module-revision#", 0);
20520Sstevel@tonic-gate 
20530Sstevel@tonic-gate 	/* Check for Fire driver binding name */
20542426Sschwartz 	if (strcmp(name, "pciex108e,80f0") == 0) {
20552426Sschwartz 		DBG(DBG_ATTACH, dip, "px_identity_init: %s%d: "
20562426Sschwartz 		    "(FIRE), module-revision %d\n", NAMEINST(dip),
20572426Sschwartz 		    revision);
20582426Sschwartz 
20592426Sschwartz 		return ((revision >= FIRE_MOD_REV_20) ?
20602426Sschwartz 		    PX_CHIP_FIRE : PX_CHIP_UNIDENTIFIED);
20610Sstevel@tonic-gate 	}
20620Sstevel@tonic-gate 
20631772Sjl139090 	/* Check for Oberon driver binding name */
20641772Sjl139090 	if (strcmp(name, "pciex108e,80f8") == 0) {
20652426Sschwartz 		DBG(DBG_ATTACH, dip, "px_identity_init: %s%d: "
20662426Sschwartz 		    "(OBERON), module-revision %d\n", NAMEINST(dip),
20672426Sschwartz 		    revision);
20682426Sschwartz 
20692426Sschwartz 		return (PX_CHIP_OBERON);
20701772Sjl139090 	}
20711772Sjl139090 
20720Sstevel@tonic-gate 	DBG(DBG_ATTACH, dip, "%s%d: Unknown PCI Express Host bridge %s %x\n",
20730Sstevel@tonic-gate 	    ddi_driver_name(dip), ddi_get_instance(dip), name, revision);
20740Sstevel@tonic-gate 
20750Sstevel@tonic-gate 	return (PX_CHIP_UNIDENTIFIED);
20760Sstevel@tonic-gate }
207727Sjchu 
207827Sjchu int
207927Sjchu px_err_add_intr(px_fault_t *px_fault_p)
208027Sjchu {
208127Sjchu 	dev_info_t	*dip = px_fault_p->px_fh_dip;
208227Sjchu 	px_t		*px_p = DIP_TO_STATE(dip);
208327Sjchu 
208427Sjchu 	VERIFY(add_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL,
20852973Sgovinda 	    (intrfunc)px_fault_p->px_err_func, (caddr_t)px_fault_p,
20862973Sgovinda 	    NULL, NULL) == 0);
208727Sjchu 
208827Sjchu 	px_ib_intr_enable(px_p, intr_dist_cpuid(), px_fault_p->px_intr_ino);
208927Sjchu 
209027Sjchu 	return (DDI_SUCCESS);
209127Sjchu }
209227Sjchu 
209327Sjchu void
209427Sjchu px_err_rem_intr(px_fault_t *px_fault_p)
209527Sjchu {
209627Sjchu 	dev_info_t	*dip = px_fault_p->px_fh_dip;
209727Sjchu 	px_t		*px_p = DIP_TO_STATE(dip);
209827Sjchu 
209927Sjchu 	px_ib_intr_disable(px_p->px_ib_p, px_fault_p->px_intr_ino,
21006313Skrishnae 	    IB_INTR_WAIT);
2101965Sgovinda 
21022973Sgovinda 	VERIFY(rem_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL) == 0);
210327Sjchu }
210427Sjchu 
21051648Sjchu /*
21063623Sjchu  * px_cb_intr_redist() - sun4u only, CB interrupt redistribution
21073623Sjchu  */
21083623Sjchu void
21093623Sjchu px_cb_intr_redist(void *arg)
21103623Sjchu {
21113623Sjchu 	px_cb_t		*cb_p = (px_cb_t *)arg;
21123623Sjchu 	px_cb_list_t	*pxl;
21133623Sjchu 	px_t		*pxp = NULL;
21143623Sjchu 	px_fault_t	*f_p = NULL;
21153623Sjchu 	uint32_t	new_cpuid;
21163623Sjchu 	intr_valid_state_t	enabled = 0;
21173623Sjchu 
21183623Sjchu 	mutex_enter(&cb_p->cb_mutex);
21193623Sjchu 
21203623Sjchu 	pxl = cb_p->pxl;
21213623Sjchu 	if (!pxl)
21223623Sjchu 		goto cb_done;
21233623Sjchu 
21243623Sjchu 	pxp = pxl->pxp;
21253623Sjchu 	f_p = &pxp->px_cb_fault;
21263623Sjchu 	for (; pxl && (f_p->px_fh_sysino != cb_p->sysino); ) {
21273623Sjchu 		pxl = pxl->next;
21283623Sjchu 		pxp = pxl->pxp;
21293623Sjchu 		f_p = &pxp->px_cb_fault;
21303623Sjchu 	}
21313623Sjchu 	if (pxl == NULL)
21323623Sjchu 		goto cb_done;
21333623Sjchu 
21343623Sjchu 	new_cpuid =  intr_dist_cpuid();
21353623Sjchu 	if (new_cpuid == cb_p->cpuid)
21363623Sjchu 		goto cb_done;
21373623Sjchu 
21383623Sjchu 	if ((px_lib_intr_getvalid(pxp->px_dip, f_p->px_fh_sysino, &enabled)
21393623Sjchu 	    != DDI_SUCCESS) || !enabled) {
21403623Sjchu 		DBG(DBG_IB, pxp->px_dip, "px_cb_intr_redist: CB not enabled, "
21413623Sjchu 		    "sysino(0x%x)\n", f_p->px_fh_sysino);
21423623Sjchu 		goto cb_done;
21433623Sjchu 	}
21443623Sjchu 
21453623Sjchu 	PX_INTR_DISABLE(pxp->px_dip, f_p->px_fh_sysino);
21463623Sjchu 
21473623Sjchu 	cb_p->cpuid = new_cpuid;
21483623Sjchu 	cb_p->sysino = f_p->px_fh_sysino;
21493623Sjchu 	PX_INTR_ENABLE(pxp->px_dip, cb_p->sysino, cb_p->cpuid);
21503623Sjchu 
21513623Sjchu cb_done:
21523623Sjchu 	mutex_exit(&cb_p->cb_mutex);
21533623Sjchu }
21543623Sjchu 
21553623Sjchu /*
21561648Sjchu  * px_cb_add_intr() - Called from attach(9E) to create CB if not yet
21571648Sjchu  * created, to add CB interrupt vector always, but enable only once.
21581648Sjchu  */
21591648Sjchu int
21601648Sjchu px_cb_add_intr(px_fault_t *fault_p)
21611648Sjchu {
21621648Sjchu 	px_t		*px_p = DIP_TO_STATE(fault_p->px_fh_dip);
21631648Sjchu 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
21641772Sjl139090 	px_cb_t		*cb_p = (px_cb_t *)px_get_cb(fault_p->px_fh_dip);
21651648Sjchu 	px_cb_list_t	*pxl, *pxl_new;
21663623Sjchu 	boolean_t	is_proxy = B_FALSE;
21673623Sjchu 
21683623Sjchu 	/* create cb */
21691648Sjchu 	if (cb_p == NULL) {
21701648Sjchu 		cb_p = kmem_zalloc(sizeof (px_cb_t), KM_SLEEP);
21713623Sjchu 
21723623Sjchu 		mutex_init(&cb_p->cb_mutex, NULL, MUTEX_DRIVER,
21733623Sjchu 		    (void *) ipltospl(FM_ERR_PIL));
21743623Sjchu 
21751648Sjchu 		cb_p->px_cb_func = px_cb_intr;
21761648Sjchu 		pxu_p->px_cb_p = cb_p;
21771772Sjl139090 		px_set_cb(fault_p->px_fh_dip, (uint64_t)cb_p);
21782509Sschwartz 
21792509Sschwartz 		/* px_lib_dev_init allows only FIRE and OBERON */
21802509Sschwartz 		px_err_reg_enable(
21812509Sschwartz 		    (pxu_p->chip_type == PX_CHIP_FIRE) ?
21826313Skrishnae 		    PX_ERR_JBC : PX_ERR_UBC,
21832509Sschwartz 		    pxu_p->px_address[PX_REG_XBC]);
21841648Sjchu 	} else
21851648Sjchu 		pxu_p->px_cb_p = cb_p;
21861648Sjchu 
21873623Sjchu 	/* register cb interrupt */
21881648Sjchu 	VERIFY(add_ivintr(fault_p->px_fh_sysino, PX_ERR_PIL,
21892973Sgovinda 	    (intrfunc)cb_p->px_cb_func, (caddr_t)cb_p, NULL, NULL) == 0);
21901648Sjchu 
21913623Sjchu 
21923623Sjchu 	/* update cb list */
21933623Sjchu 	mutex_enter(&cb_p->cb_mutex);
21941648Sjchu 	if (cb_p->pxl == NULL) {
21953623Sjchu 		is_proxy = B_TRUE;
21961648Sjchu 		pxl = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP);
21971648Sjchu 		pxl->pxp = px_p;
21981648Sjchu 		cb_p->pxl = pxl;
21991648Sjchu 		cb_p->sysino = fault_p->px_fh_sysino;
22003623Sjchu 		cb_p->cpuid = intr_dist_cpuid();
22011648Sjchu 	} else {
22021648Sjchu 		/*
22031648Sjchu 		 * Find the last pxl or
22043623Sjchu 		 * stop short at encountering a redundent entry, or
22051648Sjchu 		 * both.
22061648Sjchu 		 */
22071648Sjchu 		pxl = cb_p->pxl;
22086313Skrishnae 		for (; !(pxl->pxp == px_p) && pxl->next; pxl = pxl->next) {};
22093623Sjchu 		ASSERT(pxl->pxp != px_p);
22101648Sjchu 
22111648Sjchu 		/* add to linked list */
22121648Sjchu 		pxl_new = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP);
22131648Sjchu 		pxl_new->pxp = px_p;
22141648Sjchu 		pxl->next = pxl_new;
22151648Sjchu 	}
22161648Sjchu 	cb_p->attachcnt++;
22171648Sjchu 	mutex_exit(&cb_p->cb_mutex);
22181648Sjchu 
22193623Sjchu 	if (is_proxy) {
22203623Sjchu 		/* add to interrupt redistribution list */
22213623Sjchu 		intr_dist_add(px_cb_intr_redist, cb_p);
22223623Sjchu 
22233623Sjchu 		/* enable cb hw interrupt */
22243623Sjchu 		px_ib_intr_enable(px_p, cb_p->cpuid, fault_p->px_intr_ino);
22253623Sjchu 	}
22263623Sjchu 
22271648Sjchu 	return (DDI_SUCCESS);
22281648Sjchu }
22291648Sjchu 
22301648Sjchu /*
22311648Sjchu  * px_cb_rem_intr() - Called from detach(9E) to remove its CB
22321648Sjchu  * interrupt vector, to shift proxy to the next available px,
22331648Sjchu  * or disable CB interrupt when itself is the last.
22341648Sjchu  */
22351648Sjchu void
22361648Sjchu px_cb_rem_intr(px_fault_t *fault_p)
22371648Sjchu {
22381648Sjchu 	px_t		*px_p = DIP_TO_STATE(fault_p->px_fh_dip), *pxp;
22391648Sjchu 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
22401648Sjchu 	px_cb_t		*cb_p = PX2CB(px_p);
22411648Sjchu 	px_cb_list_t	*pxl, *prev;
22421648Sjchu 	px_fault_t	*f_p;
22431648Sjchu 
22441648Sjchu 	ASSERT(cb_p->pxl);
22451648Sjchu 
22463623Sjchu 	/* find and remove this px, and update cb list */
22471648Sjchu 	mutex_enter(&cb_p->cb_mutex);
22481648Sjchu 
22491648Sjchu 	pxl = cb_p->pxl;
22501648Sjchu 	if (pxl->pxp == px_p) {
22511648Sjchu 		cb_p->pxl = pxl->next;
22521648Sjchu 	} else {
22531648Sjchu 		prev = pxl;
22541648Sjchu 		pxl = pxl->next;
22556313Skrishnae 		for (; pxl && (pxl->pxp != px_p); prev = pxl, pxl = pxl->next) {
22566313Skrishnae 		};
22571648Sjchu 		if (!pxl) {
22581648Sjchu 			cmn_err(CE_WARN, "px_cb_rem_intr: can't find px_p 0x%p "
22591650Sjchu 			    "in registered CB list.", (void *)px_p);
22603623Sjchu 			mutex_exit(&cb_p->cb_mutex);
22611648Sjchu 			return;
22621648Sjchu 		}
22631648Sjchu 		prev->next = pxl->next;
22641648Sjchu 	}
22653623Sjchu 	pxu_p->px_cb_p = NULL;
22663623Sjchu 	cb_p->attachcnt--;
22671648Sjchu 	kmem_free(pxl, sizeof (px_cb_list_t));
22683623Sjchu 	mutex_exit(&cb_p->cb_mutex);
22693623Sjchu 
22703623Sjchu 	/* disable cb hw interrupt */
22713623Sjchu 	if (fault_p->px_fh_sysino == cb_p->sysino)
22721648Sjchu 		px_ib_intr_disable(px_p->px_ib_p, fault_p->px_intr_ino,
22731648Sjchu 		    IB_INTR_WAIT);
22741648Sjchu 
22753623Sjchu 	/* if last px, remove from interrupt redistribution list */
22763623Sjchu 	if (cb_p->pxl == NULL)
22773623Sjchu 		intr_dist_rem(px_cb_intr_redist, cb_p);
22783623Sjchu 
22793623Sjchu 	/* de-register interrupt */
22803623Sjchu 	VERIFY(rem_ivintr(fault_p->px_fh_sysino, PX_ERR_PIL) == 0);
22813623Sjchu 
22823623Sjchu 	/* if not last px, assign next px to manage cb */
22833623Sjchu 	mutex_enter(&cb_p->cb_mutex);
22843623Sjchu 	if (cb_p->pxl) {
22853623Sjchu 		if (fault_p->px_fh_sysino == cb_p->sysino) {
22861648Sjchu 			pxp = cb_p->pxl->pxp;
22871648Sjchu 			f_p = &pxp->px_cb_fault;
22881648Sjchu 			cb_p->sysino = f_p->px_fh_sysino;
22891648Sjchu 
22901648Sjchu 			PX_INTR_ENABLE(pxp->px_dip, cb_p->sysino, cb_p->cpuid);
22911650Sjchu 			(void) px_lib_intr_setstate(pxp->px_dip, cb_p->sysino,
22921648Sjchu 			    INTR_IDLE_STATE);
22931648Sjchu 		}
22941648Sjchu 		mutex_exit(&cb_p->cb_mutex);
22951648Sjchu 		return;
22961648Sjchu 	}
22973623Sjchu 
22983623Sjchu 	/* clean up after the last px */
22991648Sjchu 	mutex_exit(&cb_p->cb_mutex);
23001648Sjchu 
23012509Sschwartz 	/* px_lib_dev_init allows only FIRE and OBERON */
23022509Sschwartz 	px_err_reg_disable(
23032509Sschwartz 	    (pxu_p->chip_type == PX_CHIP_FIRE) ? PX_ERR_JBC : PX_ERR_UBC,
23042509Sschwartz 	    pxu_p->px_address[PX_REG_XBC]);
23052509Sschwartz 
23061648Sjchu 	mutex_destroy(&cb_p->cb_mutex);
23071772Sjl139090 	px_set_cb(fault_p->px_fh_dip, 0ull);
23081648Sjchu 	kmem_free(cb_p, sizeof (px_cb_t));
23091648Sjchu }
23101648Sjchu 
23111648Sjchu /*
23121648Sjchu  * px_cb_intr() - sun4u only,  CB interrupt dispatcher
23131648Sjchu  */
23141648Sjchu uint_t
23151648Sjchu px_cb_intr(caddr_t arg)
23161648Sjchu {
23171648Sjchu 	px_cb_t		*cb_p = (px_cb_t *)arg;
23183623Sjchu 	px_t		*pxp;
23193623Sjchu 	px_fault_t	*f_p;
23203623Sjchu 	int		ret;
23213354Sjl139090 
23221648Sjchu 	mutex_enter(&cb_p->cb_mutex);
23231648Sjchu 
23243623Sjchu 	if (!cb_p->pxl) {
23251648Sjchu 		mutex_exit(&cb_p->cb_mutex);
23263623Sjchu 		return (DDI_INTR_UNCLAIMED);
23271648Sjchu 	}
23281648Sjchu 
23293623Sjchu 	pxp = cb_p->pxl->pxp;
23303623Sjchu 	f_p = &pxp->px_cb_fault;
23313623Sjchu 
23323623Sjchu 	ret = f_p->px_err_func((caddr_t)f_p);
23331648Sjchu 
23341648Sjchu 	mutex_exit(&cb_p->cb_mutex);
23353623Sjchu 	return (ret);
23361648Sjchu }
23371648Sjchu 
23383623Sjchu #ifdef	FMA
233927Sjchu void
234027Sjchu px_fill_rc_status(px_fault_t *px_fault_p, pciex_rc_error_regs_t *rc_status)
234127Sjchu {
234227Sjchu 	/* populate the rc_status by reading the registers - TBD */
234327Sjchu }
234427Sjchu #endif /* FMA */
2345383Set142600 
2346383Set142600 /*
2347383Set142600  * Unprotected raw reads/writes of fabric device's config space.
2348383Set142600  * Only used for temporary PCI-E Fabric Error Handling.
2349383Set142600  */
2350383Set142600 uint32_t
23511648Sjchu px_fab_get(px_t *px_p, pcie_req_id_t bdf, uint16_t offset)
23521648Sjchu {
2353*10923SEvan.Yan@Sun.COM 	pci_ranges_t	*rp = px_p->px_ranges_p;
2354383Set142600 	uint64_t	range_prop, base_addr;
2355383Set142600 	int		bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG);
2356383Set142600 	uint32_t	val;
2357383Set142600 
2358383Set142600 	/* Get Fire's Physical Base Address */
23591772Sjl139090 	range_prop = px_get_range_prop(px_p, rp, bank);
2360383Set142600 
2361383Set142600 	/* Get config space first. */
2362383Set142600 	base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset);
2363383Set142600 
2364383Set142600 	val = ldphysio(base_addr);
2365383Set142600 
2366383Set142600 	return (LE_32(val));
2367383Set142600 }
2368383Set142600 
2369383Set142600 void
2370383Set142600 px_fab_set(px_t *px_p, pcie_req_id_t bdf, uint16_t offset,
2371383Set142600     uint32_t val) {
2372*10923SEvan.Yan@Sun.COM 	pci_ranges_t	*rp = px_p->px_ranges_p;
2373383Set142600 	uint64_t	range_prop, base_addr;
2374383Set142600 	int		bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG);
2375383Set142600 
2376383Set142600 	/* Get Fire's Physical Base Address */
23771772Sjl139090 	range_prop = px_get_range_prop(px_p, rp, bank);
2378383Set142600 
2379383Set142600 	/* Get config space first. */
2380383Set142600 	base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset);
2381383Set142600 
2382383Set142600 	stphysio(base_addr, LE_32(val));
2383383Set142600 }
2384435Sjchu 
2385435Sjchu /*
2386435Sjchu  * cpr callback
2387435Sjchu  *
2388435Sjchu  * disable fabric error msg interrupt prior to suspending
2389435Sjchu  * all device drivers; re-enable fabric error msg interrupt
2390435Sjchu  * after all devices are resumed.
2391435Sjchu  */
2392435Sjchu static boolean_t
2393435Sjchu px_cpr_callb(void *arg, int code)
2394435Sjchu {
2395435Sjchu 	px_t		*px_p = (px_t *)arg;
2396435Sjchu 	px_ib_t		*ib_p = px_p->px_ib_p;
2397435Sjchu 	px_pec_t	*pec_p = px_p->px_pec_p;
2398435Sjchu 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
2399435Sjchu 	caddr_t		csr_base;
2400435Sjchu 	devino_t	ce_ino, nf_ino, f_ino;
24012973Sgovinda 	px_ino_t	*ce_ino_p, *nf_ino_p, *f_ino_p;
2402435Sjchu 	uint64_t	imu_log_enable, imu_intr_enable;
2403435Sjchu 	uint64_t	imu_log_mask, imu_intr_mask;
2404435Sjchu 
2405435Sjchu 	ce_ino = px_msiqid_to_devino(px_p, pec_p->pec_corr_msg_msiq_id);
2406435Sjchu 	nf_ino = px_msiqid_to_devino(px_p, pec_p->pec_non_fatal_msg_msiq_id);
2407435Sjchu 	f_ino = px_msiqid_to_devino(px_p, pec_p->pec_fatal_msg_msiq_id);
2408435Sjchu 	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
2409435Sjchu 
2410435Sjchu 	imu_log_enable = CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE);
2411435Sjchu 	imu_intr_enable = CSR_XR(csr_base, IMU_INTERRUPT_ENABLE);
2412435Sjchu 
2413435Sjchu 	imu_log_mask = BITMASK(IMU_ERROR_LOG_ENABLE_FATAL_MES_NOT_EN_LOG_EN) |
2414435Sjchu 	    BITMASK(IMU_ERROR_LOG_ENABLE_NONFATAL_MES_NOT_EN_LOG_EN) |
2415435Sjchu 	    BITMASK(IMU_ERROR_LOG_ENABLE_COR_MES_NOT_EN_LOG_EN);
2416435Sjchu 
2417435Sjchu 	imu_intr_mask =
2418435Sjchu 	    BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_S_INT_EN) |
2419435Sjchu 	    BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_S_INT_EN) |
2420435Sjchu 	    BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_S_INT_EN) |
2421435Sjchu 	    BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_P_INT_EN) |
2422435Sjchu 	    BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_P_INT_EN) |
2423435Sjchu 	    BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_P_INT_EN);
2424435Sjchu 
2425435Sjchu 	switch (code) {
2426435Sjchu 	case CB_CODE_CPR_CHKPT:
2427435Sjchu 		/* disable imu rbne on corr/nonfatal/fatal errors */
2428435Sjchu 		CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE,
2429435Sjchu 		    imu_log_enable & (~imu_log_mask));
2430435Sjchu 
2431435Sjchu 		CSR_XS(csr_base, IMU_INTERRUPT_ENABLE,
2432435Sjchu 		    imu_intr_enable & (~imu_intr_mask));
2433435Sjchu 
2434435Sjchu 		/* disable CORR intr mapping */
2435435Sjchu 		px_ib_intr_disable(ib_p, ce_ino, IB_INTR_NOWAIT);
2436435Sjchu 
2437435Sjchu 		/* disable NON FATAL intr mapping */
2438435Sjchu 		px_ib_intr_disable(ib_p, nf_ino, IB_INTR_NOWAIT);
2439435Sjchu 
2440435Sjchu 		/* disable FATAL intr mapping */
2441435Sjchu 		px_ib_intr_disable(ib_p, f_ino, IB_INTR_NOWAIT);
2442435Sjchu 
2443435Sjchu 		break;
2444435Sjchu 
2445435Sjchu 	case CB_CODE_CPR_RESUME:
24463274Set142600 		pxu_p->cpr_flag = PX_NOT_CPR;
2447435Sjchu 		mutex_enter(&ib_p->ib_ino_lst_mutex);
2448435Sjchu 
2449435Sjchu 		ce_ino_p = px_ib_locate_ino(ib_p, ce_ino);
2450435Sjchu 		nf_ino_p = px_ib_locate_ino(ib_p, nf_ino);
2451435Sjchu 		f_ino_p = px_ib_locate_ino(ib_p, f_ino);
2452435Sjchu 
2453435Sjchu 		/* enable CORR intr mapping */
2454435Sjchu 		if (ce_ino_p)
2455435Sjchu 			px_ib_intr_enable(px_p, ce_ino_p->ino_cpuid, ce_ino);
2456435Sjchu 		else
2457435Sjchu 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2458435Sjchu 			    "reenable PCIe Correctable msg intr.\n");
2459435Sjchu 
2460435Sjchu 		/* enable NON FATAL intr mapping */
2461435Sjchu 		if (nf_ino_p)
2462435Sjchu 			px_ib_intr_enable(px_p, nf_ino_p->ino_cpuid, nf_ino);
2463435Sjchu 		else
2464435Sjchu 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2465435Sjchu 			    "reenable PCIe Non Fatal msg intr.\n");
2466435Sjchu 
2467435Sjchu 		/* enable FATAL intr mapping */
2468435Sjchu 		if (f_ino_p)
2469435Sjchu 			px_ib_intr_enable(px_p, f_ino_p->ino_cpuid, f_ino);
2470435Sjchu 		else
2471435Sjchu 			cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to "
2472435Sjchu 			    "reenable PCIe Fatal msg intr.\n");
2473435Sjchu 
2474435Sjchu 		mutex_exit(&ib_p->ib_ino_lst_mutex);
2475435Sjchu 
2476435Sjchu 		/* enable corr/nonfatal/fatal not enable error */
2477435Sjchu 		CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, (imu_log_enable |
2478435Sjchu 		    (imu_log_mask & px_imu_log_mask)));
2479435Sjchu 		CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, (imu_intr_enable |
2480435Sjchu 		    (imu_intr_mask & px_imu_intr_mask)));
2481435Sjchu 
2482435Sjchu 		break;
2483435Sjchu 	}
2484435Sjchu 
2485435Sjchu 	return (B_TRUE);
2486435Sjchu }
2487435Sjchu 
24882053Sschwartz uint64_t
24892053Sschwartz px_get_rng_parent_hi_mask(px_t *px_p)
24902053Sschwartz {
24912053Sschwartz 	pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
24922053Sschwartz 	uint64_t mask;
24932053Sschwartz 
24942053Sschwartz 	switch (PX_CHIP_TYPE(pxu_p)) {
24952053Sschwartz 	case PX_CHIP_OBERON:
24962053Sschwartz 		mask = OBERON_RANGE_PROP_MASK;
24972053Sschwartz 		break;
24982053Sschwartz 	case PX_CHIP_FIRE:
24992053Sschwartz 		mask = PX_RANGE_PROP_MASK;
25002053Sschwartz 		break;
25012053Sschwartz 	default:
25022053Sschwartz 		mask = PX_RANGE_PROP_MASK;
25032053Sschwartz 	}
25042053Sschwartz 
25052053Sschwartz 	return (mask);
25062053Sschwartz }
25072053Sschwartz 
2508435Sjchu /*
25091772Sjl139090  * fetch chip's range propery's value
25101772Sjl139090  */
25111772Sjl139090 uint64_t
2512*10923SEvan.Yan@Sun.COM px_get_range_prop(px_t *px_p, pci_ranges_t *rp, int bank)
25131772Sjl139090 {
25141772Sjl139090 	uint64_t mask, range_prop;
25151772Sjl139090 
25162053Sschwartz 	mask = px_get_rng_parent_hi_mask(px_p);
25171772Sjl139090 	range_prop = (((uint64_t)(rp[bank].parent_high & mask)) << 32) |
25186313Skrishnae 	    rp[bank].parent_low;
25191772Sjl139090 
25201772Sjl139090 	return (range_prop);
25211772Sjl139090 }
25221772Sjl139090 
25231772Sjl139090 /*
2524435Sjchu  * add cpr callback
2525435Sjchu  */
2526435Sjchu void
2527435Sjchu px_cpr_add_callb(px_t *px_p)
2528435Sjchu {
2529435Sjchu 	px_p->px_cprcb_id = callb_add(px_cpr_callb, (void *)px_p,
25306313Skrishnae 	    CB_CL_CPR_POST_USER, "px_cpr");
2531435Sjchu }
2532435Sjchu 
2533435Sjchu /*
2534435Sjchu  * remove cpr callback
2535435Sjchu  */
2536435Sjchu void
2537435Sjchu px_cpr_rem_callb(px_t *px_p)
2538435Sjchu {
2539435Sjchu 	(void) callb_delete(px_p->px_cprcb_id);
2540435Sjchu }
25411531Skini 
25421531Skini /*ARGSUSED*/
25431772Sjl139090 static uint_t
25441772Sjl139090 px_hp_intr(caddr_t arg1, caddr_t arg2)
25451772Sjl139090 {
2546*10923SEvan.Yan@Sun.COM 	px_t		*px_p = (px_t *)arg1;
2547*10923SEvan.Yan@Sun.COM 	pxu_t		*pxu_p = (pxu_t *)px_p->px_plat_p;
2548*10923SEvan.Yan@Sun.COM 	int		rval;
2549*10923SEvan.Yan@Sun.COM 
2550*10923SEvan.Yan@Sun.COM 	rval = pcie_intr(px_p->px_dip);
25511772Sjl139090 
25521772Sjl139090 #ifdef  DEBUG
25531772Sjl139090 	if (rval == DDI_INTR_UNCLAIMED)
25546313Skrishnae 		cmn_err(CE_WARN, "%s%d: UNCLAIMED intr\n",
25556313Skrishnae 		    ddi_driver_name(px_p->px_dip),
25566313Skrishnae 		    ddi_get_instance(px_p->px_dip));
25571772Sjl139090 #endif
25581772Sjl139090 
25594701Sgovinda 	/* Set the interrupt state to idle */
25604701Sgovinda 	if (px_lib_intr_setstate(px_p->px_dip,
25614701Sgovinda 	    pxu_p->hp_sysino, INTR_IDLE_STATE) != DDI_SUCCESS)
25624701Sgovinda 		return (DDI_INTR_UNCLAIMED);
25634701Sgovinda 
25641772Sjl139090 	return (rval);
25651772Sjl139090 }
25661772Sjl139090 
25671531Skini int
25681531Skini px_lib_hotplug_init(dev_info_t *dip, void *arg)
25691531Skini {
25701772Sjl139090 	px_t	*px_p = DIP_TO_STATE(dip);
25714701Sgovinda 	pxu_t 	*pxu_p = (pxu_t *)px_p->px_plat_p;
25721772Sjl139090 	uint64_t ret;
25731772Sjl139090 
2574*10923SEvan.Yan@Sun.COM 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
2575*10923SEvan.Yan@Sun.COM 	    "hotplug-capable") == 0)
2576*10923SEvan.Yan@Sun.COM 		return (DDI_FAILURE);
2577*10923SEvan.Yan@Sun.COM 
25781772Sjl139090 	if ((ret = hvio_hotplug_init(dip, arg)) == DDI_SUCCESS) {
25791772Sjl139090 		if (px_lib_intr_devino_to_sysino(px_p->px_dip,
25804701Sgovinda 		    px_p->px_inos[PX_INTR_HOTPLUG], &pxu_p->hp_sysino) !=
25811772Sjl139090 		    DDI_SUCCESS) {
25821772Sjl139090 #ifdef	DEBUG
25831772Sjl139090 			cmn_err(CE_WARN, "%s%d: devino_to_sysino fails\n",
25841772Sjl139090 			    ddi_driver_name(px_p->px_dip),
25851772Sjl139090 			    ddi_get_instance(px_p->px_dip));
25861772Sjl139090 #endif
25871772Sjl139090 			return (DDI_FAILURE);
25881772Sjl139090 		}
25891772Sjl139090 
2590*10923SEvan.Yan@Sun.COM 		VERIFY(add_ivintr(pxu_p->hp_sysino, PCIE_INTR_PRI,
25912973Sgovinda 		    (intrfunc)px_hp_intr, (caddr_t)px_p, NULL, NULL) == 0);
25923953Sscarter 
25933953Sscarter 		px_ib_intr_enable(px_p, intr_dist_cpuid(),
25943953Sscarter 		    px_p->px_inos[PX_INTR_HOTPLUG]);
25951772Sjl139090 	}
25961772Sjl139090 
25971772Sjl139090 	return (ret);
25981531Skini }
25991531Skini 
26001531Skini void
26011531Skini px_lib_hotplug_uninit(dev_info_t *dip)
26021531Skini {
26031772Sjl139090 	if (hvio_hotplug_uninit(dip) == DDI_SUCCESS) {
26041772Sjl139090 		px_t	*px_p = DIP_TO_STATE(dip);
26054701Sgovinda 		pxu_t 	*pxu_p = (pxu_t *)px_p->px_plat_p;
26061772Sjl139090 
26073953Sscarter 		px_ib_intr_disable(px_p->px_ib_p,
26083953Sscarter 		    px_p->px_inos[PX_INTR_HOTPLUG], IB_INTR_WAIT);
26093953Sscarter 
2610*10923SEvan.Yan@Sun.COM 		VERIFY(rem_ivintr(pxu_p->hp_sysino, PCIE_INTR_PRI) == 0);
26111772Sjl139090 	}
26121531Skini }
26132476Sdwoods 
26143953Sscarter /*
26153953Sscarter  * px_hp_intr_redist() - sun4u only, HP interrupt redistribution
26163953Sscarter  */
26173953Sscarter void
26183953Sscarter px_hp_intr_redist(px_t *px_p)
26193953Sscarter {
2620*10923SEvan.Yan@Sun.COM 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(px_p->px_dip);
2621*10923SEvan.Yan@Sun.COM 
2622*10923SEvan.Yan@Sun.COM 	if (px_p && PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) {
26233953Sscarter 		px_ib_intr_dist_en(px_p->px_dip, intr_dist_cpuid(),
26243953Sscarter 		    px_p->px_inos[PX_INTR_HOTPLUG], B_FALSE);
26253953Sscarter 	}
26263953Sscarter }
26273953Sscarter 
26282476Sdwoods boolean_t
26292476Sdwoods px_lib_is_in_drain_state(px_t *px_p)
26302476Sdwoods {
26312476Sdwoods 	pxu_t 	*pxu_p = (pxu_t *)px_p->px_plat_p;
26322476Sdwoods 	caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
26332476Sdwoods 	uint64_t drain_status;
26342476Sdwoods 
26352476Sdwoods 	if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
26362476Sdwoods 		drain_status = CSR_BR(csr_base, DRAIN_CONTROL_STATUS, DRAIN);
26372476Sdwoods 	} else {
26382476Sdwoods 		drain_status = CSR_BR(csr_base, TLU_STATUS, DRAIN);
26392476Sdwoods 	}
26402476Sdwoods 
26412476Sdwoods 	return (drain_status);
26422476Sdwoods }
26433613Set142600 
26443613Set142600 pcie_req_id_t
26453613Set142600 px_lib_get_bdf(px_t *px_p)
26463613Set142600 {
26473613Set142600 	pxu_t 	*pxu_p = (pxu_t *)px_p->px_plat_p;
26483613Set142600 	caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
26493613Set142600 	pcie_req_id_t bdf;
26503613Set142600 
26513613Set142600 	bdf = CSR_BR(csr_base, DMC_PCI_EXPRESS_CONFIGURATION, REQ_ID);
26523613Set142600 
26533613Set142600 	return (bdf);
26543613Set142600 }
26557596SAlan.Adamson@Sun.COM 
26567596SAlan.Adamson@Sun.COM /*ARGSUSED*/
26577596SAlan.Adamson@Sun.COM int
26587596SAlan.Adamson@Sun.COM px_lib_get_root_complex_mps(px_t *px_p, dev_info_t *dip, int *mps)
26597596SAlan.Adamson@Sun.COM {
26607596SAlan.Adamson@Sun.COM 	pxu_t	*pxu_p;
26617596SAlan.Adamson@Sun.COM 	caddr_t csr_base;
26627596SAlan.Adamson@Sun.COM 
26637596SAlan.Adamson@Sun.COM 	pxu_p = (pxu_t *)px_p->px_plat_p;
26647596SAlan.Adamson@Sun.COM 
26657596SAlan.Adamson@Sun.COM 	if (pxu_p == NULL)
26667596SAlan.Adamson@Sun.COM 		return (DDI_FAILURE);
26677596SAlan.Adamson@Sun.COM 
26687596SAlan.Adamson@Sun.COM 	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
26697596SAlan.Adamson@Sun.COM 
26707596SAlan.Adamson@Sun.COM 
26717596SAlan.Adamson@Sun.COM 	*mps = CSR_XR(csr_base, TLU_DEVICE_CAPABILITIES) &
26727596SAlan.Adamson@Sun.COM 	    TLU_DEVICE_CAPABILITIES_MPS_MASK;
26737596SAlan.Adamson@Sun.COM 
26747596SAlan.Adamson@Sun.COM 	return (DDI_SUCCESS);
26757596SAlan.Adamson@Sun.COM }
26767596SAlan.Adamson@Sun.COM 
26777596SAlan.Adamson@Sun.COM /*ARGSUSED*/
26787596SAlan.Adamson@Sun.COM int
26797596SAlan.Adamson@Sun.COM px_lib_set_root_complex_mps(px_t *px_p,  dev_info_t *dip, int mps)
26807596SAlan.Adamson@Sun.COM {
26817596SAlan.Adamson@Sun.COM 	pxu_t	*pxu_p;
26827596SAlan.Adamson@Sun.COM 	caddr_t csr_base;
26837596SAlan.Adamson@Sun.COM 	uint64_t dev_ctrl;
26847596SAlan.Adamson@Sun.COM 	int link_width, val;
26857596SAlan.Adamson@Sun.COM 	px_chip_type_t chip_type = px_identity_init(px_p);
26867596SAlan.Adamson@Sun.COM 
26877596SAlan.Adamson@Sun.COM 	pxu_p = (pxu_t *)px_p->px_plat_p;
26887596SAlan.Adamson@Sun.COM 
26897596SAlan.Adamson@Sun.COM 	if (pxu_p == NULL)
26907596SAlan.Adamson@Sun.COM 		return (DDI_FAILURE);
26917596SAlan.Adamson@Sun.COM 
26927596SAlan.Adamson@Sun.COM 	csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
26937596SAlan.Adamson@Sun.COM 
26947596SAlan.Adamson@Sun.COM 	dev_ctrl = CSR_XR(csr_base, TLU_DEVICE_CONTROL);
26957596SAlan.Adamson@Sun.COM 	dev_ctrl |= (mps << TLU_DEVICE_CONTROL_MPS);
26967596SAlan.Adamson@Sun.COM 
26977596SAlan.Adamson@Sun.COM 	CSR_XS(csr_base, TLU_DEVICE_CONTROL, dev_ctrl);
26987596SAlan.Adamson@Sun.COM 
26997596SAlan.Adamson@Sun.COM 	link_width = CSR_FR(csr_base, TLU_LINK_STATUS, WIDTH);
27007596SAlan.Adamson@Sun.COM 
27017596SAlan.Adamson@Sun.COM 	/*
27027596SAlan.Adamson@Sun.COM 	 * Convert link_width to match timer array configuration.
27037596SAlan.Adamson@Sun.COM 	 */
27047596SAlan.Adamson@Sun.COM 	switch (link_width) {
27057596SAlan.Adamson@Sun.COM 	case 1:
27067596SAlan.Adamson@Sun.COM 		link_width = 0;
27077596SAlan.Adamson@Sun.COM 		break;
27087596SAlan.Adamson@Sun.COM 	case 4:
27097596SAlan.Adamson@Sun.COM 		link_width = 1;
27107596SAlan.Adamson@Sun.COM 		break;
27117596SAlan.Adamson@Sun.COM 	case 8:
27127596SAlan.Adamson@Sun.COM 		link_width = 2;
27137596SAlan.Adamson@Sun.COM 		break;
27147596SAlan.Adamson@Sun.COM 	case 16:
27157596SAlan.Adamson@Sun.COM 		link_width = 3;
27167596SAlan.Adamson@Sun.COM 		break;
27177596SAlan.Adamson@Sun.COM 	default:
27187596SAlan.Adamson@Sun.COM 		link_width = 0;
27197596SAlan.Adamson@Sun.COM 	}
27207596SAlan.Adamson@Sun.COM 
27217596SAlan.Adamson@Sun.COM 	val = px_replay_timer_table[mps][link_width];
27227596SAlan.Adamson@Sun.COM 	CSR_XS(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD, val);
27237596SAlan.Adamson@Sun.COM 
27247596SAlan.Adamson@Sun.COM 	if (chip_type == PX_CHIP_OBERON)
27257596SAlan.Adamson@Sun.COM 		return (DDI_SUCCESS);
27267596SAlan.Adamson@Sun.COM 
27277596SAlan.Adamson@Sun.COM 	val = px_acknak_timer_table[mps][link_width];
27287596SAlan.Adamson@Sun.COM 	CSR_XS(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, val);
27297596SAlan.Adamson@Sun.COM 
27307596SAlan.Adamson@Sun.COM 	return (DDI_SUCCESS);
27317596SAlan.Adamson@Sun.COM }
2732