xref: /onnv-gate/usr/src/uts/common/io/nxge/nxge_main.c (revision 5770:d3e555c4449c)
13859Sml29623 /*
23859Sml29623  * CDDL HEADER START
33859Sml29623  *
43859Sml29623  * The contents of this file are subject to the terms of the
53859Sml29623  * Common Development and Distribution License (the "License").
63859Sml29623  * You may not use this file except in compliance with the License.
73859Sml29623  *
83859Sml29623  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
93859Sml29623  * or http://www.opensolaris.org/os/licensing.
103859Sml29623  * See the License for the specific language governing permissions
113859Sml29623  * and limitations under the License.
123859Sml29623  *
133859Sml29623  * When distributing Covered Code, include this CDDL HEADER in each
143859Sml29623  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
153859Sml29623  * If applicable, add the following below this CDDL HEADER, with the
163859Sml29623  * fields enclosed by brackets "[]" replaced with your own identifying
173859Sml29623  * information: Portions Copyright [yyyy] [name of copyright owner]
183859Sml29623  *
193859Sml29623  * CDDL HEADER END
203859Sml29623  */
213859Sml29623 /*
22*5770Sml29623  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
233859Sml29623  * Use is subject to license terms.
243859Sml29623  */
253859Sml29623 
263859Sml29623 #pragma ident	"%Z%%M%	%I%	%E% SMI"
273859Sml29623 
283859Sml29623 /*
293859Sml29623  * SunOs MT STREAMS NIU/Neptune 10Gb Ethernet Device Driver.
303859Sml29623  */
313859Sml29623 #include	<sys/nxge/nxge_impl.h>
323859Sml29623 #include	<sys/pcie.h>
333859Sml29623 
343859Sml29623 uint32_t 	nxge_use_partition = 0;		/* debug partition flag */
353859Sml29623 uint32_t 	nxge_dma_obp_props_only = 1;	/* use obp published props */
363859Sml29623 uint32_t 	nxge_use_rdc_intr = 1;		/* debug to assign rdc intr */
373859Sml29623 /*
385013Sml29623  * PSARC/2007/453 MSI-X interrupt limit override
395013Sml29623  * (This PSARC case is limited to MSI-X vectors
405013Sml29623  *  and SPARC platforms only).
413859Sml29623  */
425013Sml29623 #if defined(_BIG_ENDIAN)
435013Sml29623 uint32_t	nxge_msi_enable = 2;
445013Sml29623 #else
455013Sml29623 uint32_t	nxge_msi_enable = 1;
465013Sml29623 #endif
473859Sml29623 
483859Sml29623 /*
493859Sml29623  * Globals: tunable parameters (/etc/system or adb)
503859Sml29623  *
513859Sml29623  */
523859Sml29623 uint32_t 	nxge_rbr_size = NXGE_RBR_RBB_DEFAULT;
533859Sml29623 uint32_t 	nxge_rbr_spare_size = 0;
543859Sml29623 uint32_t 	nxge_rcr_size = NXGE_RCR_DEFAULT;
553859Sml29623 uint32_t 	nxge_tx_ring_size = NXGE_TX_RING_DEFAULT;
564193Sspeer boolean_t 	nxge_no_msg = B_TRUE;		/* control message display */
573859Sml29623 uint32_t 	nxge_no_link_notify = 0;	/* control DL_NOTIFY */
583859Sml29623 uint32_t 	nxge_bcopy_thresh = TX_BCOPY_MAX;
593859Sml29623 uint32_t 	nxge_dvma_thresh = TX_FASTDVMA_MIN;
603859Sml29623 uint32_t 	nxge_dma_stream_thresh = TX_STREAM_MIN;
613859Sml29623 uint32_t	nxge_jumbo_mtu	= TX_JUMBO_MTU;
623859Sml29623 boolean_t	nxge_jumbo_enable = B_FALSE;
633859Sml29623 uint16_t	nxge_rcr_timeout = NXGE_RDC_RCR_TIMEOUT;
643859Sml29623 uint16_t	nxge_rcr_threshold = NXGE_RDC_RCR_THRESHOLD;
653952Sml29623 nxge_tx_mode_t	nxge_tx_scheme = NXGE_USE_SERIAL;
663859Sml29623 
67*5770Sml29623 /* MAX LSO size */
68*5770Sml29623 #define		NXGE_LSO_MAXLEN	65535
69*5770Sml29623 /* Enable Software LSO flag */
70*5770Sml29623 uint32_t	nxge_lso_enable = 1;
71*5770Sml29623 uint32_t	nxge_lso_max = NXGE_LSO_MAXLEN;
72*5770Sml29623 
733859Sml29623 /*
743859Sml29623  * Debugging flags:
753859Sml29623  *		nxge_no_tx_lb : transmit load balancing
763859Sml29623  *		nxge_tx_lb_policy: 0 - TCP port (default)
773859Sml29623  *				   3 - DEST MAC
783859Sml29623  */
793859Sml29623 uint32_t 	nxge_no_tx_lb = 0;
803859Sml29623 uint32_t 	nxge_tx_lb_policy = NXGE_TX_LB_TCPUDP;
813859Sml29623 
823859Sml29623 /*
833859Sml29623  * Add tunable to reduce the amount of time spent in the
843859Sml29623  * ISR doing Rx Processing.
853859Sml29623  */
863859Sml29623 uint32_t nxge_max_rx_pkts = 1024;
873859Sml29623 
883859Sml29623 /*
893859Sml29623  * Tunables to manage the receive buffer blocks.
903859Sml29623  *
913859Sml29623  * nxge_rx_threshold_hi: copy all buffers.
923859Sml29623  * nxge_rx_bcopy_size_type: receive buffer block size type.
933859Sml29623  * nxge_rx_threshold_lo: copy only up to tunable block size type.
943859Sml29623  */
953859Sml29623 nxge_rxbuf_threshold_t nxge_rx_threshold_hi = NXGE_RX_COPY_6;
963859Sml29623 nxge_rxbuf_type_t nxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
973859Sml29623 nxge_rxbuf_threshold_t nxge_rx_threshold_lo = NXGE_RX_COPY_3;
983859Sml29623 
993859Sml29623 rtrace_t npi_rtracebuf;
1003859Sml29623 
1013859Sml29623 #if	defined(sun4v)
1023859Sml29623 /*
1033859Sml29623  * Hypervisor N2/NIU services information.
1043859Sml29623  */
1053859Sml29623 static hsvc_info_t niu_hsvc = {
1063859Sml29623 	HSVC_REV_1, NULL, HSVC_GROUP_NIU, NIU_MAJOR_VER,
1073859Sml29623 	NIU_MINOR_VER, "nxge"
1083859Sml29623 };
1093859Sml29623 #endif
1103859Sml29623 
1113859Sml29623 /*
1123859Sml29623  * Function Prototypes
1133859Sml29623  */
1143859Sml29623 static int nxge_attach(dev_info_t *, ddi_attach_cmd_t);
1153859Sml29623 static int nxge_detach(dev_info_t *, ddi_detach_cmd_t);
1163859Sml29623 static void nxge_unattach(p_nxge_t);
1173859Sml29623 
1183859Sml29623 #if NXGE_PROPERTY
1193859Sml29623 static void nxge_remove_hard_properties(p_nxge_t);
1203859Sml29623 #endif
1213859Sml29623 
1223859Sml29623 static nxge_status_t nxge_setup_system_dma_pages(p_nxge_t);
1233859Sml29623 
1243859Sml29623 static nxge_status_t nxge_setup_mutexes(p_nxge_t);
1253859Sml29623 static void nxge_destroy_mutexes(p_nxge_t);
1263859Sml29623 
1273859Sml29623 static nxge_status_t nxge_map_regs(p_nxge_t nxgep);
1283859Sml29623 static void nxge_unmap_regs(p_nxge_t nxgep);
1293859Sml29623 #ifdef	NXGE_DEBUG
1303859Sml29623 static void nxge_test_map_regs(p_nxge_t nxgep);
1313859Sml29623 #endif
1323859Sml29623 
1333859Sml29623 static nxge_status_t nxge_add_intrs(p_nxge_t nxgep);
1343859Sml29623 static nxge_status_t nxge_add_soft_intrs(p_nxge_t nxgep);
1353859Sml29623 static void nxge_remove_intrs(p_nxge_t nxgep);
1363859Sml29623 static void nxge_remove_soft_intrs(p_nxge_t nxgep);
1373859Sml29623 
1383859Sml29623 static nxge_status_t nxge_add_intrs_adv(p_nxge_t nxgep);
1393859Sml29623 static nxge_status_t nxge_add_intrs_adv_type(p_nxge_t, uint32_t);
1403859Sml29623 static nxge_status_t nxge_add_intrs_adv_type_fix(p_nxge_t, uint32_t);
1413859Sml29623 static void nxge_intrs_enable(p_nxge_t nxgep);
1423859Sml29623 static void nxge_intrs_disable(p_nxge_t nxgep);
1433859Sml29623 
1443859Sml29623 static void nxge_suspend(p_nxge_t);
1453859Sml29623 static nxge_status_t nxge_resume(p_nxge_t);
1463859Sml29623 
1473859Sml29623 static nxge_status_t nxge_setup_dev(p_nxge_t);
1483859Sml29623 static void nxge_destroy_dev(p_nxge_t);
1493859Sml29623 
1503859Sml29623 static nxge_status_t nxge_alloc_mem_pool(p_nxge_t);
1513859Sml29623 static void nxge_free_mem_pool(p_nxge_t);
1523859Sml29623 
1533859Sml29623 static nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t);
1543859Sml29623 static void nxge_free_rx_mem_pool(p_nxge_t);
1553859Sml29623 
1563859Sml29623 static nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t);
1573859Sml29623 static void nxge_free_tx_mem_pool(p_nxge_t);
1583859Sml29623 
1593859Sml29623 static nxge_status_t nxge_dma_mem_alloc(p_nxge_t, dma_method_t,
1603859Sml29623 	struct ddi_dma_attr *,
1613859Sml29623 	size_t, ddi_device_acc_attr_t *, uint_t,
1623859Sml29623 	p_nxge_dma_common_t);
1633859Sml29623 
1643859Sml29623 static void nxge_dma_mem_free(p_nxge_dma_common_t);
1653859Sml29623 
1663859Sml29623 static nxge_status_t nxge_alloc_rx_buf_dma(p_nxge_t, uint16_t,
1673859Sml29623 	p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
1683859Sml29623 static void nxge_free_rx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
1693859Sml29623 
1703859Sml29623 static nxge_status_t nxge_alloc_rx_cntl_dma(p_nxge_t, uint16_t,
1713859Sml29623 	p_nxge_dma_common_t *, size_t);
1723859Sml29623 static void nxge_free_rx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
1733859Sml29623 
1743859Sml29623 static nxge_status_t nxge_alloc_tx_buf_dma(p_nxge_t, uint16_t,
1753859Sml29623 	p_nxge_dma_common_t *, size_t, size_t, uint32_t *);
1763859Sml29623 static void nxge_free_tx_buf_dma(p_nxge_t, p_nxge_dma_common_t, uint32_t);
1773859Sml29623 
1783859Sml29623 static nxge_status_t nxge_alloc_tx_cntl_dma(p_nxge_t, uint16_t,
1793859Sml29623 	p_nxge_dma_common_t *,
1803859Sml29623 	size_t);
1813859Sml29623 static void nxge_free_tx_cntl_dma(p_nxge_t, p_nxge_dma_common_t);
1823859Sml29623 
1833859Sml29623 static int nxge_init_common_dev(p_nxge_t);
1843859Sml29623 static void nxge_uninit_common_dev(p_nxge_t);
1853859Sml29623 
1863859Sml29623 /*
1873859Sml29623  * The next declarations are for the GLDv3 interface.
1883859Sml29623  */
1893859Sml29623 static int nxge_m_start(void *);
1903859Sml29623 static void nxge_m_stop(void *);
1913859Sml29623 static int nxge_m_unicst(void *, const uint8_t *);
1923859Sml29623 static int nxge_m_multicst(void *, boolean_t, const uint8_t *);
1933859Sml29623 static int nxge_m_promisc(void *, boolean_t);
1943859Sml29623 static void nxge_m_ioctl(void *, queue_t *, mblk_t *);
1953859Sml29623 static void nxge_m_resources(void *);
1963859Sml29623 mblk_t *nxge_m_tx(void *arg, mblk_t *);
1973859Sml29623 static nxge_status_t nxge_mac_register(p_nxge_t);
1983859Sml29623 static int nxge_altmac_set(p_nxge_t nxgep, uint8_t *mac_addr,
1993859Sml29623 	mac_addr_slot_t slot);
2003859Sml29623 static void nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot,
2013859Sml29623 	boolean_t factory);
2023859Sml29623 static int nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr);
2033859Sml29623 static int nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr);
2043859Sml29623 static int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot);
2053859Sml29623 static int nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr);
2063859Sml29623 static int nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr);
2073859Sml29623 
2083859Sml29623 #define	NXGE_NEPTUNE_MAGIC	0x4E584745UL
2093859Sml29623 #define	MAX_DUMP_SZ 256
2103859Sml29623 
2113859Sml29623 #define	NXGE_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB)
2123859Sml29623 
2133859Sml29623 static	boolean_t	nxge_m_getcapab(void *, mac_capab_t, void *);
2143859Sml29623 static mac_callbacks_t nxge_m_callbacks = {
2153859Sml29623 	NXGE_M_CALLBACK_FLAGS,
2163859Sml29623 	nxge_m_stat,
2173859Sml29623 	nxge_m_start,
2183859Sml29623 	nxge_m_stop,
2193859Sml29623 	nxge_m_promisc,
2203859Sml29623 	nxge_m_multicst,
2213859Sml29623 	nxge_m_unicst,
2223859Sml29623 	nxge_m_tx,
2233859Sml29623 	nxge_m_resources,
2243859Sml29623 	nxge_m_ioctl,
2253859Sml29623 	nxge_m_getcapab
2263859Sml29623 };
2273859Sml29623 
2283859Sml29623 void
2293859Sml29623 nxge_err_inject(p_nxge_t, queue_t *, mblk_t *);
2303859Sml29623 
2315013Sml29623 /* PSARC/2007/453 MSI-X interrupt limit override. */
2325013Sml29623 #define	NXGE_MSIX_REQUEST_10G	8
2335013Sml29623 #define	NXGE_MSIX_REQUEST_1G	2
2345013Sml29623 static int nxge_create_msi_property(p_nxge_t);
2355013Sml29623 
2363859Sml29623 /*
2373859Sml29623  * These global variables control the message
2383859Sml29623  * output.
2393859Sml29623  */
2403859Sml29623 out_dbgmsg_t nxge_dbgmsg_out = DBG_CONSOLE | STR_LOG;
2413859Sml29623 uint64_t nxge_debug_level = 0;
2423859Sml29623 
2433859Sml29623 /*
2443859Sml29623  * This list contains the instance structures for the Neptune
2453859Sml29623  * devices present in the system. The lock exists to guarantee
2463859Sml29623  * mutually exclusive access to the list.
2473859Sml29623  */
2483859Sml29623 void 			*nxge_list = NULL;
2493859Sml29623 
2503859Sml29623 void			*nxge_hw_list = NULL;
2513859Sml29623 nxge_os_mutex_t 	nxge_common_lock;
2523859Sml29623 
2533859Sml29623 nxge_os_mutex_t		nxge_mii_lock;
2543859Sml29623 static uint32_t		nxge_mii_lock_init = 0;
2553859Sml29623 nxge_os_mutex_t		nxge_mdio_lock;
2563859Sml29623 static uint32_t		nxge_mdio_lock_init = 0;
2573859Sml29623 
2583859Sml29623 extern uint64_t 	npi_debug_level;
2593859Sml29623 
2603859Sml29623 extern nxge_status_t	nxge_ldgv_init(p_nxge_t, int *, int *);
2613859Sml29623 extern nxge_status_t	nxge_ldgv_init_n2(p_nxge_t, int *, int *);
2623859Sml29623 extern nxge_status_t	nxge_ldgv_uninit(p_nxge_t);
2633859Sml29623 extern nxge_status_t	nxge_intr_ldgv_init(p_nxge_t);
2643859Sml29623 extern void		nxge_fm_init(p_nxge_t,
2653859Sml29623 					ddi_device_acc_attr_t *,
2663859Sml29623 					ddi_device_acc_attr_t *,
2673859Sml29623 					ddi_dma_attr_t *);
2683859Sml29623 extern void		nxge_fm_fini(p_nxge_t);
2693859Sml29623 extern npi_status_t	npi_mac_altaddr_disable(npi_handle_t, uint8_t, uint8_t);
2703859Sml29623 
2713859Sml29623 /*
2723859Sml29623  * Count used to maintain the number of buffers being used
2733859Sml29623  * by Neptune instances and loaned up to the upper layers.
2743859Sml29623  */
2753859Sml29623 uint32_t nxge_mblks_pending = 0;
2763859Sml29623 
2773859Sml29623 /*
2783859Sml29623  * Device register access attributes for PIO.
2793859Sml29623  */
2803859Sml29623 static ddi_device_acc_attr_t nxge_dev_reg_acc_attr = {
2813859Sml29623 	DDI_DEVICE_ATTR_V0,
2823859Sml29623 	DDI_STRUCTURE_LE_ACC,
2833859Sml29623 	DDI_STRICTORDER_ACC,
2843859Sml29623 };
2853859Sml29623 
2863859Sml29623 /*
2873859Sml29623  * Device descriptor access attributes for DMA.
2883859Sml29623  */
2893859Sml29623 static ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr = {
2903859Sml29623 	DDI_DEVICE_ATTR_V0,
2913859Sml29623 	DDI_STRUCTURE_LE_ACC,
2923859Sml29623 	DDI_STRICTORDER_ACC
2933859Sml29623 };
2943859Sml29623 
2953859Sml29623 /*
2963859Sml29623  * Device buffer access attributes for DMA.
2973859Sml29623  */
2983859Sml29623 static ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr = {
2993859Sml29623 	DDI_DEVICE_ATTR_V0,
3003859Sml29623 	DDI_STRUCTURE_BE_ACC,
3013859Sml29623 	DDI_STRICTORDER_ACC
3023859Sml29623 };
3033859Sml29623 
3043859Sml29623 ddi_dma_attr_t nxge_desc_dma_attr = {
3053859Sml29623 	DMA_ATTR_V0,		/* version number. */
3063859Sml29623 	0,			/* low address */
3073859Sml29623 	0xffffffffffffffff,	/* high address */
3083859Sml29623 	0xffffffffffffffff,	/* address counter max */
3093859Sml29623 #ifndef NIU_PA_WORKAROUND
3103859Sml29623 	0x100000,		/* alignment */
3113859Sml29623 #else
3123859Sml29623 	0x2000,
3133859Sml29623 #endif
3143859Sml29623 	0xfc00fc,		/* dlim_burstsizes */
3153859Sml29623 	0x1,			/* minimum transfer size */
3163859Sml29623 	0xffffffffffffffff,	/* maximum transfer size */
3173859Sml29623 	0xffffffffffffffff,	/* maximum segment size */
3183859Sml29623 	1,			/* scatter/gather list length */
3193859Sml29623 	(unsigned int) 1,	/* granularity */
3203859Sml29623 	0			/* attribute flags */
3213859Sml29623 };
3223859Sml29623 
3233859Sml29623 ddi_dma_attr_t nxge_tx_dma_attr = {
3243859Sml29623 	DMA_ATTR_V0,		/* version number. */
3253859Sml29623 	0,			/* low address */
3263859Sml29623 	0xffffffffffffffff,	/* high address */
3273859Sml29623 	0xffffffffffffffff,	/* address counter max */
3283859Sml29623 #if defined(_BIG_ENDIAN)
3293859Sml29623 	0x2000,			/* alignment */
3303859Sml29623 #else
3313859Sml29623 	0x1000,			/* alignment */
3323859Sml29623 #endif
3333859Sml29623 	0xfc00fc,		/* dlim_burstsizes */
3343859Sml29623 	0x1,			/* minimum transfer size */
3353859Sml29623 	0xffffffffffffffff,	/* maximum transfer size */
3363859Sml29623 	0xffffffffffffffff,	/* maximum segment size */
3373859Sml29623 	5,			/* scatter/gather list length */
3383859Sml29623 	(unsigned int) 1,	/* granularity */
3393859Sml29623 	0			/* attribute flags */
3403859Sml29623 };
3413859Sml29623 
3423859Sml29623 ddi_dma_attr_t nxge_rx_dma_attr = {
3433859Sml29623 	DMA_ATTR_V0,		/* version number. */
3443859Sml29623 	0,			/* low address */
3453859Sml29623 	0xffffffffffffffff,	/* high address */
3463859Sml29623 	0xffffffffffffffff,	/* address counter max */
3473859Sml29623 	0x2000,			/* alignment */
3483859Sml29623 	0xfc00fc,		/* dlim_burstsizes */
3493859Sml29623 	0x1,			/* minimum transfer size */
3503859Sml29623 	0xffffffffffffffff,	/* maximum transfer size */
3513859Sml29623 	0xffffffffffffffff,	/* maximum segment size */
3523859Sml29623 	1,			/* scatter/gather list length */
3533859Sml29623 	(unsigned int) 1,	/* granularity */
3544781Ssbehera 	DDI_DMA_RELAXED_ORDERING /* attribute flags */
3553859Sml29623 };
3563859Sml29623 
3573859Sml29623 ddi_dma_lim_t nxge_dma_limits = {
3583859Sml29623 	(uint_t)0,		/* dlim_addr_lo */
3593859Sml29623 	(uint_t)0xffffffff,	/* dlim_addr_hi */
3603859Sml29623 	(uint_t)0xffffffff,	/* dlim_cntr_max */
3613859Sml29623 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
3623859Sml29623 	0x1,			/* dlim_minxfer */
3633859Sml29623 	1024			/* dlim_speed */
3643859Sml29623 };
3653859Sml29623 
3663859Sml29623 dma_method_t nxge_force_dma = DVMA;
3673859Sml29623 
3683859Sml29623 /*
3693859Sml29623  * dma chunk sizes.
3703859Sml29623  *
3713859Sml29623  * Try to allocate the largest possible size
3723859Sml29623  * so that fewer number of dma chunks would be managed
3733859Sml29623  */
3743859Sml29623 #ifdef NIU_PA_WORKAROUND
3753859Sml29623 size_t alloc_sizes [] = {0x2000};
3763859Sml29623 #else
3773859Sml29623 size_t alloc_sizes [] = {0x1000, 0x2000, 0x4000, 0x8000,
3783859Sml29623 		0x10000, 0x20000, 0x40000, 0x80000,
379*5770Sml29623 		0x100000, 0x200000, 0x400000, 0x800000,
380*5770Sml29623 		0x1000000, 0x2000000, 0x4000000};
3813859Sml29623 #endif
3823859Sml29623 
3833859Sml29623 /*
3843859Sml29623  * Translate "dev_t" to a pointer to the associated "dev_info_t".
3853859Sml29623  */
3863859Sml29623 
3873859Sml29623 static int
3883859Sml29623 nxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
3893859Sml29623 {
3903859Sml29623 	p_nxge_t	nxgep = NULL;
3913859Sml29623 	int		instance;
3923859Sml29623 	int		status = DDI_SUCCESS;
3933859Sml29623 	uint8_t		portn;
3943859Sml29623 	nxge_mmac_t	*mmac_info;
3953859Sml29623 
3963859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_attach"));
3973859Sml29623 
3983859Sml29623 	/*
3993859Sml29623 	 * Get the device instance since we'll need to setup
4003859Sml29623 	 * or retrieve a soft state for this instance.
4013859Sml29623 	 */
4023859Sml29623 	instance = ddi_get_instance(dip);
4033859Sml29623 
4043859Sml29623 	switch (cmd) {
4053859Sml29623 	case DDI_ATTACH:
4063859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_ATTACH"));
4073859Sml29623 		break;
4083859Sml29623 
4093859Sml29623 	case DDI_RESUME:
4103859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_RESUME"));
4113859Sml29623 		nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
4123859Sml29623 		if (nxgep == NULL) {
4133859Sml29623 			status = DDI_FAILURE;
4143859Sml29623 			break;
4153859Sml29623 		}
4163859Sml29623 		if (nxgep->dip != dip) {
4173859Sml29623 			status = DDI_FAILURE;
4183859Sml29623 			break;
4193859Sml29623 		}
4203859Sml29623 		if (nxgep->suspended == DDI_PM_SUSPEND) {
4213859Sml29623 			status = ddi_dev_is_needed(nxgep->dip, 0, 1);
4223859Sml29623 		} else {
4234185Sspeer 			status = nxge_resume(nxgep);
4243859Sml29623 		}
4253859Sml29623 		goto nxge_attach_exit;
4263859Sml29623 
4273859Sml29623 	case DDI_PM_RESUME:
4283859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_RESUME"));
4293859Sml29623 		nxgep = (p_nxge_t)ddi_get_soft_state(nxge_list, instance);
4303859Sml29623 		if (nxgep == NULL) {
4313859Sml29623 			status = DDI_FAILURE;
4323859Sml29623 			break;
4333859Sml29623 		}
4343859Sml29623 		if (nxgep->dip != dip) {
4353859Sml29623 			status = DDI_FAILURE;
4363859Sml29623 			break;
4373859Sml29623 		}
4384185Sspeer 		status = nxge_resume(nxgep);
4393859Sml29623 		goto nxge_attach_exit;
4403859Sml29623 
4413859Sml29623 	default:
4423859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing unknown"));
4433859Sml29623 		status = DDI_FAILURE;
4443859Sml29623 		goto nxge_attach_exit;
4453859Sml29623 	}
4463859Sml29623 
4473859Sml29623 
4483859Sml29623 	if (ddi_soft_state_zalloc(nxge_list, instance) == DDI_FAILURE) {
4493859Sml29623 		status = DDI_FAILURE;
4503859Sml29623 		goto nxge_attach_exit;
4513859Sml29623 	}
4523859Sml29623 
4533859Sml29623 	nxgep = ddi_get_soft_state(nxge_list, instance);
4543859Sml29623 	if (nxgep == NULL) {
4554977Sraghus 		status = NXGE_ERROR;
4564977Sraghus 		goto nxge_attach_fail2;
4573859Sml29623 	}
4583859Sml29623 
4594693Stm144005 	nxgep->nxge_magic = NXGE_MAGIC;
4604693Stm144005 
4613859Sml29623 	nxgep->drv_state = 0;
4623859Sml29623 	nxgep->dip = dip;
4633859Sml29623 	nxgep->instance = instance;
4643859Sml29623 	nxgep->p_dip = ddi_get_parent(dip);
4653859Sml29623 	nxgep->nxge_debug_level = nxge_debug_level;
4663859Sml29623 	npi_debug_level = nxge_debug_level;
4673859Sml29623 
4683859Sml29623 	nxge_fm_init(nxgep, &nxge_dev_reg_acc_attr, &nxge_dev_desc_dma_acc_attr,
4693859Sml29623 				&nxge_rx_dma_attr);
4703859Sml29623 
4713859Sml29623 	status = nxge_map_regs(nxgep);
4723859Sml29623 	if (status != NXGE_OK) {
4733859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_map_regs failed"));
4744977Sraghus 		goto nxge_attach_fail3;
4753859Sml29623 	}
4763859Sml29623 
4773859Sml29623 	status = nxge_init_common_dev(nxgep);
4783859Sml29623 	if (status != NXGE_OK) {
4793859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4803859Sml29623 			"nxge_init_common_dev failed"));
4814977Sraghus 		goto nxge_attach_fail4;
4823859Sml29623 	}
4833859Sml29623 
4844732Sdavemq 	if (nxgep->niu_type == NEPTUNE_2_10GF) {
4854732Sdavemq 		if (nxgep->function_num > 1) {
4864732Sdavemq 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Unsupported"
4874732Sdavemq 			    " function %d. Only functions 0 and 1 are "
4884732Sdavemq 			    "supported for this card.", nxgep->function_num));
4894732Sdavemq 			status = NXGE_ERROR;
4904977Sraghus 			goto nxge_attach_fail4;
4914732Sdavemq 		}
4924732Sdavemq 	}
4934732Sdavemq 
4943859Sml29623 	portn = NXGE_GET_PORT_NUM(nxgep->function_num);
4953859Sml29623 	nxgep->mac.portnum = portn;
4963859Sml29623 	if ((portn == 0) || (portn == 1))
4973859Sml29623 		nxgep->mac.porttype = PORT_TYPE_XMAC;
4983859Sml29623 	else
4993859Sml29623 		nxgep->mac.porttype = PORT_TYPE_BMAC;
5003859Sml29623 	/*
5013859Sml29623 	 * Neptune has 4 ports, the first 2 ports use XMAC (10G MAC)
5023859Sml29623 	 * internally, the rest 2 ports use BMAC (1G "Big" MAC).
5033859Sml29623 	 * The two types of MACs have different characterizations.
5043859Sml29623 	 */
5053859Sml29623 	mmac_info = &nxgep->nxge_mmac_info;
5063859Sml29623 	if (nxgep->function_num < 2) {
5073859Sml29623 		mmac_info->num_mmac = XMAC_MAX_ALT_ADDR_ENTRY;
5083859Sml29623 		mmac_info->naddrfree = XMAC_MAX_ALT_ADDR_ENTRY;
5093859Sml29623 	} else {
5103859Sml29623 		mmac_info->num_mmac = BMAC_MAX_ALT_ADDR_ENTRY;
5113859Sml29623 		mmac_info->naddrfree = BMAC_MAX_ALT_ADDR_ENTRY;
5123859Sml29623 	}
5133859Sml29623 	/*
5143859Sml29623 	 * Setup the Ndd parameters for the this instance.
5153859Sml29623 	 */
5163859Sml29623 	nxge_init_param(nxgep);
5173859Sml29623 
5183859Sml29623 	/*
5193859Sml29623 	 * Setup Register Tracing Buffer.
5203859Sml29623 	 */
5213859Sml29623 	npi_rtrace_buf_init((rtrace_t *)&npi_rtracebuf);
5223859Sml29623 
5233859Sml29623 	/* init stats ptr */
5243859Sml29623 	nxge_init_statsp(nxgep);
5254185Sspeer 
5264977Sraghus 	/*
5274977Sraghus 	 * read the vpd info from the eeprom into local data
5284977Sraghus 	 * structure and check for the VPD info validity
5294977Sraghus 	 */
5304977Sraghus 	nxge_vpd_info_get(nxgep);
5314977Sraghus 
5324977Sraghus 	status = nxge_xcvr_find(nxgep);
5333859Sml29623 
5343859Sml29623 	if (status != NXGE_OK) {
5354185Sspeer 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_attach: "
5363859Sml29623 				    " Couldn't determine card type"
5373859Sml29623 				    " .... exit "));
5384977Sraghus 		goto nxge_attach_fail5;
5393859Sml29623 	}
5403859Sml29623 
5413859Sml29623 	status = nxge_get_config_properties(nxgep);
5423859Sml29623 
5433859Sml29623 	if (status != NXGE_OK) {
5443859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "get_hw create failed"));
5453859Sml29623 		goto nxge_attach_fail;
5463859Sml29623 	}
5473859Sml29623 
5483859Sml29623 	/*
5493859Sml29623 	 * Setup the Kstats for the driver.
5503859Sml29623 	 */
5513859Sml29623 	nxge_setup_kstats(nxgep);
5523859Sml29623 
5533859Sml29623 	nxge_setup_param(nxgep);
5543859Sml29623 
5553859Sml29623 	status = nxge_setup_system_dma_pages(nxgep);
5563859Sml29623 	if (status != NXGE_OK) {
5573859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "set dma page failed"));
5583859Sml29623 		goto nxge_attach_fail;
5593859Sml29623 	}
5603859Sml29623 
5613859Sml29623 #if	defined(sun4v)
5623859Sml29623 	if (nxgep->niu_type == N2_NIU) {
5633859Sml29623 		nxgep->niu_hsvc_available = B_FALSE;
5643859Sml29623 		bcopy(&niu_hsvc, &nxgep->niu_hsvc, sizeof (hsvc_info_t));
5653859Sml29623 		if ((status =
5663859Sml29623 			hsvc_register(&nxgep->niu_hsvc,
5673859Sml29623 					&nxgep->niu_min_ver)) != 0) {
5683859Sml29623 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
5693859Sml29623 					"nxge_attach: "
5703859Sml29623 					"%s: cannot negotiate "
5713859Sml29623 					"hypervisor services "
5723859Sml29623 					"revision %d "
5733859Sml29623 					"group: 0x%lx "
5743859Sml29623 					"major: 0x%lx minor: 0x%lx "
5753859Sml29623 					"errno: %d",
5763859Sml29623 					niu_hsvc.hsvc_modname,
5773859Sml29623 					niu_hsvc.hsvc_rev,
5783859Sml29623 					niu_hsvc.hsvc_group,
5793859Sml29623 					niu_hsvc.hsvc_major,
5803859Sml29623 					niu_hsvc.hsvc_minor,
5813859Sml29623 					status));
5823859Sml29623 				status = DDI_FAILURE;
5833859Sml29623 				goto nxge_attach_fail;
5843859Sml29623 		}
5853859Sml29623 
5863859Sml29623 		nxgep->niu_hsvc_available = B_TRUE;
5873859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
5883859Sml29623 			"NIU Hypervisor service enabled"));
5893859Sml29623 	}
5903859Sml29623 #endif
5913859Sml29623 
5923859Sml29623 	nxge_hw_id_init(nxgep);
5933859Sml29623 	nxge_hw_init_niu_common(nxgep);
5943859Sml29623 
5953859Sml29623 	status = nxge_setup_mutexes(nxgep);
5963859Sml29623 	if (status != NXGE_OK) {
5973859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set mutex failed"));
5983859Sml29623 		goto nxge_attach_fail;
5993859Sml29623 	}
6003859Sml29623 
6013859Sml29623 	status = nxge_setup_dev(nxgep);
6023859Sml29623 	if (status != DDI_SUCCESS) {
6033859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "set dev failed"));
6043859Sml29623 		goto nxge_attach_fail;
6053859Sml29623 	}
6063859Sml29623 
6073859Sml29623 	status = nxge_add_intrs(nxgep);
6083859Sml29623 	if (status != DDI_SUCCESS) {
6093859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "add_intr failed"));
6103859Sml29623 		goto nxge_attach_fail;
6113859Sml29623 	}
6123859Sml29623 	status = nxge_add_soft_intrs(nxgep);
6133859Sml29623 	if (status != DDI_SUCCESS) {
6143859Sml29623 		NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "add_soft_intr failed"));
6153859Sml29623 		goto nxge_attach_fail;
6163859Sml29623 	}
6173859Sml29623 
6183859Sml29623 	/*
6193859Sml29623 	 * Enable interrupts.
6203859Sml29623 	 */
6213859Sml29623 	nxge_intrs_enable(nxgep);
6223859Sml29623 
6234977Sraghus 	if ((status = nxge_mac_register(nxgep)) != NXGE_OK) {
6243859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
6253859Sml29623 			"unable to register to mac layer (%d)", status));
6263859Sml29623 		goto nxge_attach_fail;
6273859Sml29623 	}
6283859Sml29623 
6293859Sml29623 	mac_link_update(nxgep->mach, LINK_STATE_UNKNOWN);
6303859Sml29623 
6313859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "registered to mac (instance %d)",
6323859Sml29623 		instance));
6333859Sml29623 
6343859Sml29623 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
6353859Sml29623 
6363859Sml29623 	goto nxge_attach_exit;
6373859Sml29623 
6383859Sml29623 nxge_attach_fail:
6393859Sml29623 	nxge_unattach(nxgep);
6404977Sraghus 	goto nxge_attach_fail1;
6414977Sraghus 
6424977Sraghus nxge_attach_fail5:
6434977Sraghus 	/*
6444977Sraghus 	 * Tear down the ndd parameters setup.
6454977Sraghus 	 */
6464977Sraghus 	nxge_destroy_param(nxgep);
6474977Sraghus 
6484977Sraghus 	/*
6494977Sraghus 	 * Tear down the kstat setup.
6504977Sraghus 	 */
6514977Sraghus 	nxge_destroy_kstats(nxgep);
6524977Sraghus 
6534977Sraghus nxge_attach_fail4:
6544977Sraghus 	if (nxgep->nxge_hw_p) {
6554977Sraghus 		nxge_uninit_common_dev(nxgep);
6564977Sraghus 		nxgep->nxge_hw_p = NULL;
6574977Sraghus 	}
6584977Sraghus 
6594977Sraghus nxge_attach_fail3:
6604977Sraghus 	/*
6614977Sraghus 	 * Unmap the register setup.
6624977Sraghus 	 */
6634977Sraghus 	nxge_unmap_regs(nxgep);
6644977Sraghus 
6654977Sraghus 	nxge_fm_fini(nxgep);
6664977Sraghus 
6674977Sraghus nxge_attach_fail2:
6684977Sraghus 	ddi_soft_state_free(nxge_list, nxgep->instance);
6694977Sraghus 
6704977Sraghus nxge_attach_fail1:
6714185Sspeer 	if (status != NXGE_OK)
6724185Sspeer 		status = (NXGE_ERROR | NXGE_DDI_FAILED);
6733859Sml29623 	nxgep = NULL;
6743859Sml29623 
6753859Sml29623 nxge_attach_exit:
6763859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_attach status = 0x%08x",
6773859Sml29623 		status));
6783859Sml29623 
6793859Sml29623 	return (status);
6803859Sml29623 }
6813859Sml29623 
6823859Sml29623 static int
6833859Sml29623 nxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
6843859Sml29623 {
6853859Sml29623 	int 		status = DDI_SUCCESS;
6863859Sml29623 	int 		instance;
6873859Sml29623 	p_nxge_t 	nxgep = NULL;
6883859Sml29623 
6893859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_detach"));
6903859Sml29623 	instance = ddi_get_instance(dip);
6913859Sml29623 	nxgep = ddi_get_soft_state(nxge_list, instance);
6923859Sml29623 	if (nxgep == NULL) {
6933859Sml29623 		status = DDI_FAILURE;
6943859Sml29623 		goto nxge_detach_exit;
6953859Sml29623 	}
6963859Sml29623 
6973859Sml29623 	switch (cmd) {
6983859Sml29623 	case DDI_DETACH:
6993859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_DETACH"));
7003859Sml29623 		break;
7013859Sml29623 
7023859Sml29623 	case DDI_PM_SUSPEND:
7033859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
7043859Sml29623 		nxgep->suspended = DDI_PM_SUSPEND;
7053859Sml29623 		nxge_suspend(nxgep);
7063859Sml29623 		break;
7073859Sml29623 
7083859Sml29623 	case DDI_SUSPEND:
7093859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "doing DDI_SUSPEND"));
7103859Sml29623 		if (nxgep->suspended != DDI_PM_SUSPEND) {
7113859Sml29623 			nxgep->suspended = DDI_SUSPEND;
7123859Sml29623 			nxge_suspend(nxgep);
7133859Sml29623 		}
7143859Sml29623 		break;
7153859Sml29623 
7163859Sml29623 	default:
7173859Sml29623 		status = DDI_FAILURE;
7183859Sml29623 	}
7193859Sml29623 
7203859Sml29623 	if (cmd != DDI_DETACH)
7213859Sml29623 		goto nxge_detach_exit;
7223859Sml29623 
7233859Sml29623 	/*
7243859Sml29623 	 * Stop the xcvr polling.
7253859Sml29623 	 */
7263859Sml29623 	nxgep->suspended = cmd;
7273859Sml29623 
7283859Sml29623 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
7293859Sml29623 
7303859Sml29623 	if (nxgep->mach && (status = mac_unregister(nxgep->mach)) != 0) {
7313859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
7323859Sml29623 			"<== nxge_detach status = 0x%08X", status));
7333859Sml29623 		return (DDI_FAILURE);
7343859Sml29623 	}
7353859Sml29623 
7363859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
7373859Sml29623 		"<== nxge_detach (mac_unregister) status = 0x%08X", status));
7383859Sml29623 
7393859Sml29623 	nxge_unattach(nxgep);
7403859Sml29623 	nxgep = NULL;
7413859Sml29623 
7423859Sml29623 nxge_detach_exit:
7433859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_detach status = 0x%08X",
7443859Sml29623 		status));
7453859Sml29623 
7463859Sml29623 	return (status);
7473859Sml29623 }
7483859Sml29623 
7493859Sml29623 static void
7503859Sml29623 nxge_unattach(p_nxge_t nxgep)
7513859Sml29623 {
7523859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unattach"));
7533859Sml29623 
7543859Sml29623 	if (nxgep == NULL || nxgep->dev_regs == NULL) {
7553859Sml29623 		return;
7563859Sml29623 	}
7573859Sml29623 
7584693Stm144005 	nxgep->nxge_magic = 0;
7594693Stm144005 
7603859Sml29623 	if (nxgep->nxge_hw_p) {
7613859Sml29623 		nxge_uninit_common_dev(nxgep);
7623859Sml29623 		nxgep->nxge_hw_p = NULL;
7633859Sml29623 	}
7643859Sml29623 
7653859Sml29623 	if (nxgep->nxge_timerid) {
7663859Sml29623 		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
7673859Sml29623 		nxgep->nxge_timerid = 0;
7683859Sml29623 	}
7693859Sml29623 
7703859Sml29623 #if	defined(sun4v)
7713859Sml29623 	if (nxgep->niu_type == N2_NIU && nxgep->niu_hsvc_available == B_TRUE) {
7723859Sml29623 		(void) hsvc_unregister(&nxgep->niu_hsvc);
7733859Sml29623 		nxgep->niu_hsvc_available = B_FALSE;
7743859Sml29623 	}
7753859Sml29623 #endif
7763859Sml29623 	/*
7773859Sml29623 	 * Stop any further interrupts.
7783859Sml29623 	 */
7793859Sml29623 	nxge_remove_intrs(nxgep);
7803859Sml29623 
7813859Sml29623 	/* remove soft interrups */
7823859Sml29623 	nxge_remove_soft_intrs(nxgep);
7833859Sml29623 
7843859Sml29623 	/*
7853859Sml29623 	 * Stop the device and free resources.
7863859Sml29623 	 */
7873859Sml29623 	nxge_destroy_dev(nxgep);
7883859Sml29623 
7893859Sml29623 	/*
7903859Sml29623 	 * Tear down the ndd parameters setup.
7913859Sml29623 	 */
7923859Sml29623 	nxge_destroy_param(nxgep);
7933859Sml29623 
7943859Sml29623 	/*
7953859Sml29623 	 * Tear down the kstat setup.
7963859Sml29623 	 */
7973859Sml29623 	nxge_destroy_kstats(nxgep);
7983859Sml29623 
7993859Sml29623 	/*
8003859Sml29623 	 * Destroy all mutexes.
8013859Sml29623 	 */
8023859Sml29623 	nxge_destroy_mutexes(nxgep);
8033859Sml29623 
8043859Sml29623 	/*
8053859Sml29623 	 * Remove the list of ndd parameters which
8063859Sml29623 	 * were setup during attach.
8073859Sml29623 	 */
8083859Sml29623 	if (nxgep->dip) {
8093859Sml29623 		NXGE_DEBUG_MSG((nxgep, OBP_CTL,
8103859Sml29623 				    " nxge_unattach: remove all properties"));
8113859Sml29623 
8123859Sml29623 		(void) ddi_prop_remove_all(nxgep->dip);
8133859Sml29623 	}
8143859Sml29623 
8153859Sml29623 #if NXGE_PROPERTY
8163859Sml29623 	nxge_remove_hard_properties(nxgep);
8173859Sml29623 #endif
8183859Sml29623 
8193859Sml29623 	/*
8203859Sml29623 	 * Unmap the register setup.
8213859Sml29623 	 */
8223859Sml29623 	nxge_unmap_regs(nxgep);
8233859Sml29623 
8243859Sml29623 	nxge_fm_fini(nxgep);
8253859Sml29623 
8263859Sml29623 	ddi_soft_state_free(nxge_list, nxgep->instance);
8273859Sml29623 
8283859Sml29623 	NXGE_DEBUG_MSG((NULL, DDI_CTL, "<== nxge_unattach"));
8293859Sml29623 }
8303859Sml29623 
8313859Sml29623 static char n2_siu_name[] = "niu";
8323859Sml29623 
8333859Sml29623 static nxge_status_t
8343859Sml29623 nxge_map_regs(p_nxge_t nxgep)
8353859Sml29623 {
8363859Sml29623 	int		ddi_status = DDI_SUCCESS;
8373859Sml29623 	p_dev_regs_t 	dev_regs;
8383859Sml29623 	char		buf[MAXPATHLEN + 1];
8393859Sml29623 	char 		*devname;
8403859Sml29623 #ifdef	NXGE_DEBUG
8413859Sml29623 	char 		*sysname;
8423859Sml29623 #endif
8433859Sml29623 	off_t		regsize;
8443859Sml29623 	nxge_status_t	status = NXGE_OK;
8453859Sml29623 #if !defined(_BIG_ENDIAN)
8463859Sml29623 	off_t pci_offset;
8473859Sml29623 	uint16_t pcie_devctl;
8483859Sml29623 #endif
8493859Sml29623 
8503859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_map_regs"));
8513859Sml29623 	nxgep->dev_regs = NULL;
8523859Sml29623 	dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
8533859Sml29623 	dev_regs->nxge_regh = NULL;
8543859Sml29623 	dev_regs->nxge_pciregh = NULL;
8553859Sml29623 	dev_regs->nxge_msix_regh = NULL;
8563859Sml29623 	dev_regs->nxge_vir_regh = NULL;
8573859Sml29623 	dev_regs->nxge_vir2_regh = NULL;
8584732Sdavemq 	nxgep->niu_type = NIU_TYPE_NONE;
8593859Sml29623 
8603859Sml29623 	devname = ddi_pathname(nxgep->dip, buf);
8613859Sml29623 	ASSERT(strlen(devname) > 0);
8623859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
8633859Sml29623 		"nxge_map_regs: pathname devname %s", devname));
8643859Sml29623 
8653859Sml29623 	if (strstr(devname, n2_siu_name)) {
8663859Sml29623 		/* N2/NIU */
8673859Sml29623 		nxgep->niu_type = N2_NIU;
8683859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
8693859Sml29623 			"nxge_map_regs: N2/NIU devname %s", devname));
8703859Sml29623 		/* get function number */
8713859Sml29623 		nxgep->function_num =
8723859Sml29623 			(devname[strlen(devname) -1] == '1' ? 1 : 0);
8733859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
8743859Sml29623 			"nxge_map_regs: N2/NIU function number %d",
8753859Sml29623 			nxgep->function_num));
8763859Sml29623 	} else {
8773859Sml29623 		int		*prop_val;
8783859Sml29623 		uint_t 		prop_len;
8793859Sml29623 		uint8_t 	func_num;
8803859Sml29623 
8813859Sml29623 		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxgep->dip,
8823859Sml29623 				0, "reg",
8833859Sml29623 				&prop_val, &prop_len) != DDI_PROP_SUCCESS) {
8843859Sml29623 			NXGE_DEBUG_MSG((nxgep, VPD_CTL,
8853859Sml29623 				"Reg property not found"));
8863859Sml29623 			ddi_status = DDI_FAILURE;
8873859Sml29623 			goto nxge_map_regs_fail0;
8883859Sml29623 
8893859Sml29623 		} else {
8903859Sml29623 			func_num = (prop_val[0] >> 8) & 0x7;
8913859Sml29623 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
8923859Sml29623 				"Reg property found: fun # %d",
8933859Sml29623 				func_num));
8943859Sml29623 			nxgep->function_num = func_num;
8953859Sml29623 			ddi_prop_free(prop_val);
8963859Sml29623 		}
8973859Sml29623 	}
8983859Sml29623 
8993859Sml29623 	switch (nxgep->niu_type) {
9003859Sml29623 	default:
9013859Sml29623 		(void) ddi_dev_regsize(nxgep->dip, 0, &regsize);
9023859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
9033859Sml29623 			"nxge_map_regs: pci config size 0x%x", regsize));
9043859Sml29623 
9053859Sml29623 		ddi_status = ddi_regs_map_setup(nxgep->dip, 0,
9063859Sml29623 			(caddr_t *)&(dev_regs->nxge_pciregp), 0, 0,
9073859Sml29623 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_pciregh);
9083859Sml29623 		if (ddi_status != DDI_SUCCESS) {
9093859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
9103859Sml29623 				"ddi_map_regs, nxge bus config regs failed"));
9113859Sml29623 			goto nxge_map_regs_fail0;
9123859Sml29623 		}
9133859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
9143859Sml29623 			"nxge_map_reg: PCI config addr 0x%0llx "
9153859Sml29623 			" handle 0x%0llx", dev_regs->nxge_pciregp,
9163859Sml29623 			dev_regs->nxge_pciregh));
9173859Sml29623 			/*
9183859Sml29623 			 * IMP IMP
9193859Sml29623 			 * workaround  for bit swapping bug in HW
9203859Sml29623 			 * which ends up in no-snoop = yes
9213859Sml29623 			 * resulting, in DMA not synched properly
9223859Sml29623 			 */
9233859Sml29623 #if !defined(_BIG_ENDIAN)
9243859Sml29623 		/* workarounds for x86 systems */
9253859Sml29623 		pci_offset = 0x80 + PCIE_DEVCTL;
9263859Sml29623 		pcie_devctl = 0x0;
9273859Sml29623 		pcie_devctl &= PCIE_DEVCTL_ENABLE_NO_SNOOP;
9283859Sml29623 		pcie_devctl |= PCIE_DEVCTL_RO_EN;
9293859Sml29623 		pci_config_put16(dev_regs->nxge_pciregh, pci_offset,
9303859Sml29623 				    pcie_devctl);
9313859Sml29623 #endif
9323859Sml29623 
9333859Sml29623 		(void) ddi_dev_regsize(nxgep->dip, 1, &regsize);
9343859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
9353859Sml29623 			"nxge_map_regs: pio size 0x%x", regsize));
9363859Sml29623 		/* set up the device mapped register */
9373859Sml29623 		ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
9383859Sml29623 			(caddr_t *)&(dev_regs->nxge_regp), 0, 0,
9393859Sml29623 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
9403859Sml29623 		if (ddi_status != DDI_SUCCESS) {
9413859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
9423859Sml29623 				"ddi_map_regs for Neptune global reg failed"));
9433859Sml29623 			goto nxge_map_regs_fail1;
9443859Sml29623 		}
9453859Sml29623 
9463859Sml29623 		/* set up the msi/msi-x mapped register */
9473859Sml29623 		(void) ddi_dev_regsize(nxgep->dip, 2, &regsize);
9483859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
9493859Sml29623 			"nxge_map_regs: msix size 0x%x", regsize));
9503859Sml29623 		ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
9513859Sml29623 			(caddr_t *)&(dev_regs->nxge_msix_regp), 0, 0,
9523859Sml29623 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_msix_regh);
9533859Sml29623 		if (ddi_status != DDI_SUCCESS) {
9543859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
9553859Sml29623 				"ddi_map_regs for msi reg failed"));
9563859Sml29623 			goto nxge_map_regs_fail2;
9573859Sml29623 		}
9583859Sml29623 
9593859Sml29623 		/* set up the vio region mapped register */
9603859Sml29623 		(void) ddi_dev_regsize(nxgep->dip, 3, &regsize);
9613859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
9623859Sml29623 			"nxge_map_regs: vio size 0x%x", regsize));
9633859Sml29623 		ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
9643859Sml29623 			(caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
9653859Sml29623 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
9663859Sml29623 
9673859Sml29623 		if (ddi_status != DDI_SUCCESS) {
9683859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
9693859Sml29623 				"ddi_map_regs for nxge vio reg failed"));
9703859Sml29623 			goto nxge_map_regs_fail3;
9713859Sml29623 		}
9723859Sml29623 		nxgep->dev_regs = dev_regs;
9733859Sml29623 
9743859Sml29623 		NPI_PCI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_pciregh);
9753859Sml29623 		NPI_PCI_ADD_HANDLE_SET(nxgep,
9763859Sml29623 			(npi_reg_ptr_t)dev_regs->nxge_pciregp);
9773859Sml29623 		NPI_MSI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_msix_regh);
9783859Sml29623 		NPI_MSI_ADD_HANDLE_SET(nxgep,
9793859Sml29623 			(npi_reg_ptr_t)dev_regs->nxge_msix_regp);
9803859Sml29623 
9813859Sml29623 		NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
9823859Sml29623 		NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
9833859Sml29623 
9843859Sml29623 		NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
9853859Sml29623 		NPI_REG_ADD_HANDLE_SET(nxgep,
9863859Sml29623 			(npi_reg_ptr_t)dev_regs->nxge_regp);
9873859Sml29623 
9883859Sml29623 		NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
9893859Sml29623 		NPI_VREG_ADD_HANDLE_SET(nxgep,
9903859Sml29623 			(npi_reg_ptr_t)dev_regs->nxge_vir_regp);
9913859Sml29623 
9923859Sml29623 		break;
9933859Sml29623 
9943859Sml29623 	case N2_NIU:
9953859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL, "ddi_map_regs, NIU"));
9963859Sml29623 		/*
9973859Sml29623 		 * Set up the device mapped register (FWARC 2006/556)
9983859Sml29623 		 * (changed back to 1: reg starts at 1!)
9993859Sml29623 		 */
10003859Sml29623 		(void) ddi_dev_regsize(nxgep->dip, 1, &regsize);
10013859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
10023859Sml29623 			"nxge_map_regs: dev size 0x%x", regsize));
10033859Sml29623 		ddi_status = ddi_regs_map_setup(nxgep->dip, 1,
10043859Sml29623 				(caddr_t *)&(dev_regs->nxge_regp), 0, 0,
10053859Sml29623 				&nxge_dev_reg_acc_attr, &dev_regs->nxge_regh);
10063859Sml29623 
10073859Sml29623 		if (ddi_status != DDI_SUCCESS) {
10083859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
10093859Sml29623 				"ddi_map_regs for N2/NIU, global reg failed "));
10103859Sml29623 			goto nxge_map_regs_fail1;
10113859Sml29623 		}
10123859Sml29623 
10133859Sml29623 		/* set up the vio region mapped register */
10143859Sml29623 		(void) ddi_dev_regsize(nxgep->dip, 2, &regsize);
10153859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
10163859Sml29623 			"nxge_map_regs: vio (1) size 0x%x", regsize));
10173859Sml29623 		ddi_status = ddi_regs_map_setup(nxgep->dip, 2,
10183859Sml29623 			(caddr_t *)&(dev_regs->nxge_vir_regp), 0, 0,
10193859Sml29623 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_vir_regh);
10203859Sml29623 
10213859Sml29623 		if (ddi_status != DDI_SUCCESS) {
10223859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
10233859Sml29623 				"ddi_map_regs for nxge vio reg failed"));
10243859Sml29623 			goto nxge_map_regs_fail2;
10253859Sml29623 		}
10263859Sml29623 		/* set up the vio region mapped register */
10273859Sml29623 		(void) ddi_dev_regsize(nxgep->dip, 3, &regsize);
10283859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
10293859Sml29623 			"nxge_map_regs: vio (3) size 0x%x", regsize));
10303859Sml29623 		ddi_status = ddi_regs_map_setup(nxgep->dip, 3,
10313859Sml29623 			(caddr_t *)&(dev_regs->nxge_vir2_regp), 0, 0,
10323859Sml29623 			&nxge_dev_reg_acc_attr, &dev_regs->nxge_vir2_regh);
10333859Sml29623 
10343859Sml29623 		if (ddi_status != DDI_SUCCESS) {
10353859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
10363859Sml29623 				"ddi_map_regs for nxge vio2 reg failed"));
10373859Sml29623 			goto nxge_map_regs_fail3;
10383859Sml29623 		}
10393859Sml29623 		nxgep->dev_regs = dev_regs;
10403859Sml29623 
10413859Sml29623 		NPI_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
10423859Sml29623 		NPI_ADD_HANDLE_SET(nxgep, (npi_reg_ptr_t)dev_regs->nxge_regp);
10433859Sml29623 
10443859Sml29623 		NPI_REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_regh);
10453859Sml29623 		NPI_REG_ADD_HANDLE_SET(nxgep,
10463859Sml29623 			(npi_reg_ptr_t)dev_regs->nxge_regp);
10473859Sml29623 
10483859Sml29623 		NPI_VREG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir_regh);
10493859Sml29623 		NPI_VREG_ADD_HANDLE_SET(nxgep,
10503859Sml29623 			(npi_reg_ptr_t)dev_regs->nxge_vir_regp);
10513859Sml29623 
10523859Sml29623 		NPI_V2REG_ACC_HANDLE_SET(nxgep, dev_regs->nxge_vir2_regh);
10533859Sml29623 		NPI_V2REG_ADD_HANDLE_SET(nxgep,
10543859Sml29623 			(npi_reg_ptr_t)dev_regs->nxge_vir2_regp);
10553859Sml29623 
10563859Sml29623 		break;
10573859Sml29623 	}
10583859Sml29623 
10593859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "nxge_map_reg: hardware addr 0x%0llx "
10603859Sml29623 		" handle 0x%0llx", dev_regs->nxge_regp, dev_regs->nxge_regh));
10613859Sml29623 
10623859Sml29623 	goto nxge_map_regs_exit;
10633859Sml29623 nxge_map_regs_fail3:
10643859Sml29623 	if (dev_regs->nxge_msix_regh) {
10653859Sml29623 		ddi_regs_map_free(&dev_regs->nxge_msix_regh);
10663859Sml29623 	}
10673859Sml29623 	if (dev_regs->nxge_vir_regh) {
10683859Sml29623 		ddi_regs_map_free(&dev_regs->nxge_regh);
10693859Sml29623 	}
10703859Sml29623 nxge_map_regs_fail2:
10713859Sml29623 	if (dev_regs->nxge_regh) {
10723859Sml29623 		ddi_regs_map_free(&dev_regs->nxge_regh);
10733859Sml29623 	}
10743859Sml29623 nxge_map_regs_fail1:
10753859Sml29623 	if (dev_regs->nxge_pciregh) {
10763859Sml29623 		ddi_regs_map_free(&dev_regs->nxge_pciregh);
10773859Sml29623 	}
10783859Sml29623 nxge_map_regs_fail0:
10793859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "Freeing register set memory"));
10803859Sml29623 	kmem_free(dev_regs, sizeof (dev_regs_t));
10813859Sml29623 
10823859Sml29623 nxge_map_regs_exit:
10833859Sml29623 	if (ddi_status != DDI_SUCCESS)
10843859Sml29623 		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
10853859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_map_regs"));
10863859Sml29623 	return (status);
10873859Sml29623 }
10883859Sml29623 
10893859Sml29623 static void
10903859Sml29623 nxge_unmap_regs(p_nxge_t nxgep)
10913859Sml29623 {
10923859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_unmap_regs"));
10933859Sml29623 	if (nxgep->dev_regs) {
10943859Sml29623 		if (nxgep->dev_regs->nxge_pciregh) {
10953859Sml29623 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
10963859Sml29623 				"==> nxge_unmap_regs: bus"));
10973859Sml29623 			ddi_regs_map_free(&nxgep->dev_regs->nxge_pciregh);
10983859Sml29623 			nxgep->dev_regs->nxge_pciregh = NULL;
10993859Sml29623 		}
11003859Sml29623 		if (nxgep->dev_regs->nxge_regh) {
11013859Sml29623 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
11023859Sml29623 				"==> nxge_unmap_regs: device registers"));
11033859Sml29623 			ddi_regs_map_free(&nxgep->dev_regs->nxge_regh);
11043859Sml29623 			nxgep->dev_regs->nxge_regh = NULL;
11053859Sml29623 		}
11063859Sml29623 		if (nxgep->dev_regs->nxge_msix_regh) {
11073859Sml29623 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
11083859Sml29623 				"==> nxge_unmap_regs: device interrupts"));
11093859Sml29623 			ddi_regs_map_free(&nxgep->dev_regs->nxge_msix_regh);
11103859Sml29623 			nxgep->dev_regs->nxge_msix_regh = NULL;
11113859Sml29623 		}
11123859Sml29623 		if (nxgep->dev_regs->nxge_vir_regh) {
11133859Sml29623 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
11143859Sml29623 				"==> nxge_unmap_regs: vio region"));
11153859Sml29623 			ddi_regs_map_free(&nxgep->dev_regs->nxge_vir_regh);
11163859Sml29623 			nxgep->dev_regs->nxge_vir_regh = NULL;
11173859Sml29623 		}
11183859Sml29623 		if (nxgep->dev_regs->nxge_vir2_regh) {
11193859Sml29623 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
11203859Sml29623 				"==> nxge_unmap_regs: vio2 region"));
11213859Sml29623 			ddi_regs_map_free(&nxgep->dev_regs->nxge_vir2_regh);
11223859Sml29623 			nxgep->dev_regs->nxge_vir2_regh = NULL;
11233859Sml29623 		}
11243859Sml29623 
11253859Sml29623 		kmem_free(nxgep->dev_regs, sizeof (dev_regs_t));
11263859Sml29623 		nxgep->dev_regs = NULL;
11273859Sml29623 	}
11283859Sml29623 
11293859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_unmap_regs"));
11303859Sml29623 }
11313859Sml29623 
11323859Sml29623 static nxge_status_t
11333859Sml29623 nxge_setup_mutexes(p_nxge_t nxgep)
11343859Sml29623 {
11353859Sml29623 	int ddi_status = DDI_SUCCESS;
11363859Sml29623 	nxge_status_t status = NXGE_OK;
11373859Sml29623 	nxge_classify_t *classify_ptr;
11383859Sml29623 	int partition;
11393859Sml29623 
11403859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_mutexes"));
11413859Sml29623 
11423859Sml29623 	/*
11433859Sml29623 	 * Get the interrupt cookie so the mutexes can be
11443859Sml29623 	 * Initialized.
11453859Sml29623 	 */
11463859Sml29623 	ddi_status = ddi_get_iblock_cookie(nxgep->dip, 0,
11473859Sml29623 					&nxgep->interrupt_cookie);
11483859Sml29623 	if (ddi_status != DDI_SUCCESS) {
11493859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
11503859Sml29623 			"<== nxge_setup_mutexes: failed 0x%x", ddi_status));
11513859Sml29623 		goto nxge_setup_mutexes_exit;
11523859Sml29623 	}
11533859Sml29623 
11543859Sml29623 	/* Initialize global mutex */
11553859Sml29623 
11563859Sml29623 	if (nxge_mdio_lock_init == 0) {
11573859Sml29623 		MUTEX_INIT(&nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL);
11583859Sml29623 	}
11593859Sml29623 	atomic_add_32(&nxge_mdio_lock_init, 1);
11603859Sml29623 
11613859Sml29623 	if (nxge_mii_lock_init == 0) {
11623859Sml29623 		MUTEX_INIT(&nxge_mii_lock, NULL, MUTEX_DRIVER, NULL);
11633859Sml29623 	}
11643859Sml29623 	atomic_add_32(&nxge_mii_lock_init, 1);
11653859Sml29623 
11663859Sml29623 	nxgep->drv_state |= STATE_MDIO_LOCK_INIT;
11673859Sml29623 	nxgep->drv_state |= STATE_MII_LOCK_INIT;
11683859Sml29623 
11694693Stm144005 	cv_init(&nxgep->poll_cv, NULL, CV_DRIVER, NULL);
11704693Stm144005 	MUTEX_INIT(&nxgep->poll_lock, NULL,
11714693Stm144005 	    MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
11724693Stm144005 
11733859Sml29623 	/*
11744693Stm144005 	 * Initialize mutexes for this device.
11753859Sml29623 	 */
11763859Sml29623 	MUTEX_INIT(nxgep->genlock, NULL,
11773859Sml29623 		MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
11783859Sml29623 	MUTEX_INIT(&nxgep->ouraddr_lock, NULL,
11793859Sml29623 		MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
11803859Sml29623 	MUTEX_INIT(&nxgep->mif_lock, NULL,
11813859Sml29623 		MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
11823859Sml29623 	RW_INIT(&nxgep->filter_lock, NULL,
11833859Sml29623 		RW_DRIVER, (void *)nxgep->interrupt_cookie);
11843859Sml29623 
11853859Sml29623 	classify_ptr = &nxgep->classifier;
11863859Sml29623 		/*
11873859Sml29623 		 * FFLP Mutexes are never used in interrupt context
11883859Sml29623 		 * as fflp operation can take very long time to
11893859Sml29623 		 * complete and hence not suitable to invoke from interrupt
11903859Sml29623 		 * handlers.
11913859Sml29623 		 */
11923859Sml29623 	MUTEX_INIT(&classify_ptr->tcam_lock, NULL,
11934732Sdavemq 	    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
11944977Sraghus 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
11953859Sml29623 		MUTEX_INIT(&classify_ptr->fcram_lock, NULL,
11964732Sdavemq 		    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
11973859Sml29623 		for (partition = 0; partition < MAX_PARTITION; partition++) {
11983859Sml29623 			MUTEX_INIT(&classify_ptr->hash_lock[partition], NULL,
11993859Sml29623 			    NXGE_MUTEX_DRIVER, (void *)nxgep->interrupt_cookie);
12003859Sml29623 		}
12013859Sml29623 	}
12023859Sml29623 
12033859Sml29623 nxge_setup_mutexes_exit:
12043859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
12054732Sdavemq 	    "<== nxge_setup_mutexes status = %x", status));
12063859Sml29623 
12073859Sml29623 	if (ddi_status != DDI_SUCCESS)
12083859Sml29623 		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
12093859Sml29623 
12103859Sml29623 	return (status);
12113859Sml29623 }
12123859Sml29623 
12133859Sml29623 static void
12143859Sml29623 nxge_destroy_mutexes(p_nxge_t nxgep)
12153859Sml29623 {
12163859Sml29623 	int partition;
12173859Sml29623 	nxge_classify_t *classify_ptr;
12183859Sml29623 
12193859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_mutexes"));
12203859Sml29623 	RW_DESTROY(&nxgep->filter_lock);
12213859Sml29623 	MUTEX_DESTROY(&nxgep->mif_lock);
12223859Sml29623 	MUTEX_DESTROY(&nxgep->ouraddr_lock);
12233859Sml29623 	MUTEX_DESTROY(nxgep->genlock);
12243859Sml29623 
12253859Sml29623 	classify_ptr = &nxgep->classifier;
12263859Sml29623 	MUTEX_DESTROY(&classify_ptr->tcam_lock);
12273859Sml29623 
12284693Stm144005 	/* Destroy all polling resources. */
12294693Stm144005 	MUTEX_DESTROY(&nxgep->poll_lock);
12304693Stm144005 	cv_destroy(&nxgep->poll_cv);
12314693Stm144005 
12324693Stm144005 	/* free data structures, based on HW type */
12334977Sraghus 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
12343859Sml29623 		MUTEX_DESTROY(&classify_ptr->fcram_lock);
12353859Sml29623 		for (partition = 0; partition < MAX_PARTITION; partition++) {
12363859Sml29623 			MUTEX_DESTROY(&classify_ptr->hash_lock[partition]);
12373859Sml29623 		}
12383859Sml29623 	}
12393859Sml29623 	if (nxgep->drv_state & STATE_MDIO_LOCK_INIT) {
12403859Sml29623 		if (nxge_mdio_lock_init == 1) {
12413859Sml29623 			MUTEX_DESTROY(&nxge_mdio_lock);
12423859Sml29623 		}
12433859Sml29623 		atomic_add_32(&nxge_mdio_lock_init, -1);
12443859Sml29623 	}
12453859Sml29623 	if (nxgep->drv_state & STATE_MII_LOCK_INIT) {
12463859Sml29623 		if (nxge_mii_lock_init == 1) {
12473859Sml29623 			MUTEX_DESTROY(&nxge_mii_lock);
12483859Sml29623 		}
12493859Sml29623 		atomic_add_32(&nxge_mii_lock_init, -1);
12503859Sml29623 	}
12513859Sml29623 
12523859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_mutexes"));
12533859Sml29623 }
12543859Sml29623 
12553859Sml29623 nxge_status_t
12563859Sml29623 nxge_init(p_nxge_t nxgep)
12573859Sml29623 {
12583859Sml29623 	nxge_status_t	status = NXGE_OK;
12593859Sml29623 
12603859Sml29623 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_init"));
12613859Sml29623 
12623859Sml29623 	if (nxgep->drv_state & STATE_HW_INITIALIZED) {
12633859Sml29623 		return (status);
12643859Sml29623 	}
12653859Sml29623 
12663859Sml29623 	/*
12673859Sml29623 	 * Allocate system memory for the receive/transmit buffer blocks
12683859Sml29623 	 * and receive/transmit descriptor rings.
12693859Sml29623 	 */
12703859Sml29623 	status = nxge_alloc_mem_pool(nxgep);
12713859Sml29623 	if (status != NXGE_OK) {
12723859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "alloc mem failed\n"));
12733859Sml29623 		goto nxge_init_fail1;
12743859Sml29623 	}
12753859Sml29623 
12763859Sml29623 	/*
12773859Sml29623 	 * Initialize and enable TXC registers
12783859Sml29623 	 * (Globally enable TX controller,
12793859Sml29623 	 *  enable a port, configure dma channel bitmap,
12803859Sml29623 	 *  configure the max burst size).
12813859Sml29623 	 */
12823859Sml29623 	status = nxge_txc_init(nxgep);
12833859Sml29623 	if (status != NXGE_OK) {
12843859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txc failed\n"));
12853859Sml29623 		goto nxge_init_fail2;
12863859Sml29623 	}
12873859Sml29623 
12883859Sml29623 	/*
12893859Sml29623 	 * Initialize and enable TXDMA channels.
12903859Sml29623 	 */
12913859Sml29623 	status = nxge_init_txdma_channels(nxgep);
12923859Sml29623 	if (status != NXGE_OK) {
12933859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init txdma failed\n"));
12943859Sml29623 		goto nxge_init_fail3;
12953859Sml29623 	}
12963859Sml29623 
12973859Sml29623 	/*
12983859Sml29623 	 * Initialize and enable RXDMA channels.
12993859Sml29623 	 */
13003859Sml29623 	status = nxge_init_rxdma_channels(nxgep);
13013859Sml29623 	if (status != NXGE_OK) {
13023859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init rxdma failed\n"));
13033859Sml29623 		goto nxge_init_fail4;
13043859Sml29623 	}
13053859Sml29623 
13063859Sml29623 	/*
13073859Sml29623 	 * Initialize TCAM and FCRAM (Neptune).
13083859Sml29623 	 */
13093859Sml29623 	status = nxge_classify_init(nxgep);
13103859Sml29623 	if (status != NXGE_OK) {
13113859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init classify failed\n"));
13123859Sml29623 		goto nxge_init_fail5;
13133859Sml29623 	}
13143859Sml29623 
13153859Sml29623 	/*
13163859Sml29623 	 * Initialize ZCP
13173859Sml29623 	 */
13183859Sml29623 	status = nxge_zcp_init(nxgep);
13193859Sml29623 	if (status != NXGE_OK) {
13203859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init ZCP failed\n"));
13213859Sml29623 		goto nxge_init_fail5;
13223859Sml29623 	}
13233859Sml29623 
13243859Sml29623 	/*
13253859Sml29623 	 * Initialize IPP.
13263859Sml29623 	 */
13273859Sml29623 	status = nxge_ipp_init(nxgep);
13283859Sml29623 	if (status != NXGE_OK) {
13293859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init IPP failed\n"));
13303859Sml29623 		goto nxge_init_fail5;
13313859Sml29623 	}
13323859Sml29623 
13333859Sml29623 	/*
13343859Sml29623 	 * Initialize the MAC block.
13353859Sml29623 	 */
13363859Sml29623 	status = nxge_mac_init(nxgep);
13373859Sml29623 	if (status != NXGE_OK) {
13383859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "init MAC failed\n"));
13393859Sml29623 		goto nxge_init_fail5;
13403859Sml29623 	}
13413859Sml29623 
13423859Sml29623 	nxge_intrs_enable(nxgep);
13433859Sml29623 
13443859Sml29623 	/*
13453859Sml29623 	 * Enable hardware interrupts.
13463859Sml29623 	 */
13473859Sml29623 	nxge_intr_hw_enable(nxgep);
13483859Sml29623 	nxgep->drv_state |= STATE_HW_INITIALIZED;
13493859Sml29623 
13503859Sml29623 	goto nxge_init_exit;
13513859Sml29623 
13523859Sml29623 nxge_init_fail5:
13533859Sml29623 	nxge_uninit_rxdma_channels(nxgep);
13543859Sml29623 nxge_init_fail4:
13553859Sml29623 	nxge_uninit_txdma_channels(nxgep);
13563859Sml29623 nxge_init_fail3:
13573859Sml29623 	(void) nxge_txc_uninit(nxgep);
13583859Sml29623 nxge_init_fail2:
13593859Sml29623 	nxge_free_mem_pool(nxgep);
13603859Sml29623 nxge_init_fail1:
13613859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
13623859Sml29623 		"<== nxge_init status (failed) = 0x%08x", status));
13633859Sml29623 	return (status);
13643859Sml29623 
13653859Sml29623 nxge_init_exit:
13663859Sml29623 
13673859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_init status = 0x%08x",
13683859Sml29623 		status));
13693859Sml29623 	return (status);
13703859Sml29623 }
13713859Sml29623 
13723859Sml29623 
13733859Sml29623 timeout_id_t
13743859Sml29623 nxge_start_timer(p_nxge_t nxgep, fptrv_t func, int msec)
13753859Sml29623 {
13763859Sml29623 	if ((nxgep->suspended == 0) ||
13773859Sml29623 			(nxgep->suspended == DDI_RESUME)) {
13783859Sml29623 		return (timeout(func, (caddr_t)nxgep,
13793859Sml29623 			drv_usectohz(1000 * msec)));
13803859Sml29623 	}
13813859Sml29623 	return (NULL);
13823859Sml29623 }
13833859Sml29623 
13843859Sml29623 /*ARGSUSED*/
13853859Sml29623 void
13863859Sml29623 nxge_stop_timer(p_nxge_t nxgep, timeout_id_t timerid)
13873859Sml29623 {
13883859Sml29623 	if (timerid) {
13893859Sml29623 		(void) untimeout(timerid);
13903859Sml29623 	}
13913859Sml29623 }
13923859Sml29623 
13933859Sml29623 void
13943859Sml29623 nxge_uninit(p_nxge_t nxgep)
13953859Sml29623 {
13963859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_uninit"));
13973859Sml29623 
13983859Sml29623 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
13993859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
14003859Sml29623 			"==> nxge_uninit: not initialized"));
14013859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
14023859Sml29623 			"<== nxge_uninit"));
14033859Sml29623 		return;
14043859Sml29623 	}
14053859Sml29623 
14063859Sml29623 	/* stop timer */
14073859Sml29623 	if (nxgep->nxge_timerid) {
14083859Sml29623 		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
14093859Sml29623 		nxgep->nxge_timerid = 0;
14103859Sml29623 	}
14113859Sml29623 
14123859Sml29623 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
14133859Sml29623 	(void) nxge_intr_hw_disable(nxgep);
14143859Sml29623 
14153859Sml29623 	/*
14163859Sml29623 	 * Reset the receive MAC side.
14173859Sml29623 	 */
14183859Sml29623 	(void) nxge_rx_mac_disable(nxgep);
14193859Sml29623 
14203859Sml29623 	/* Disable and soft reset the IPP */
14213859Sml29623 	(void) nxge_ipp_disable(nxgep);
14223859Sml29623 
14233859Sml29623 	/* Free classification resources */
14243859Sml29623 	(void) nxge_classify_uninit(nxgep);
14253859Sml29623 
14263859Sml29623 	/*
14273859Sml29623 	 * Reset the transmit/receive DMA side.
14283859Sml29623 	 */
14293859Sml29623 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP);
14303859Sml29623 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
14313859Sml29623 
14323859Sml29623 	nxge_uninit_txdma_channels(nxgep);
14333859Sml29623 	nxge_uninit_rxdma_channels(nxgep);
14343859Sml29623 
14353859Sml29623 	/*
14363859Sml29623 	 * Reset the transmit MAC side.
14373859Sml29623 	 */
14383859Sml29623 	(void) nxge_tx_mac_disable(nxgep);
14393859Sml29623 
14403859Sml29623 	nxge_free_mem_pool(nxgep);
14413859Sml29623 
14423859Sml29623 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
14433859Sml29623 
14443859Sml29623 	nxgep->drv_state &= ~STATE_HW_INITIALIZED;
14453859Sml29623 
14463859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_uninit: "
14473859Sml29623 		"nxge_mblks_pending %d", nxge_mblks_pending));
14483859Sml29623 }
14493859Sml29623 
14503859Sml29623 void
14513859Sml29623 nxge_get64(p_nxge_t nxgep, p_mblk_t mp)
14523859Sml29623 {
14535125Sjoycey #if defined(__i386)
14545125Sjoycey 	size_t		reg;
14555125Sjoycey #else
14563859Sml29623 	uint64_t	reg;
14575125Sjoycey #endif
14583859Sml29623 	uint64_t	regdata;
14593859Sml29623 	int		i, retry;
14603859Sml29623 
14613859Sml29623 	bcopy((char *)mp->b_rptr, (char *)&reg, sizeof (uint64_t));
14623859Sml29623 	regdata = 0;
14633859Sml29623 	retry = 1;
14643859Sml29623 
14653859Sml29623 	for (i = 0; i < retry; i++) {
14663859Sml29623 		NXGE_REG_RD64(nxgep->npi_handle, reg, &regdata);
14673859Sml29623 	}
14683859Sml29623 	bcopy((char *)&regdata, (char *)mp->b_rptr, sizeof (uint64_t));
14693859Sml29623 }
14703859Sml29623 
14713859Sml29623 void
14723859Sml29623 nxge_put64(p_nxge_t nxgep, p_mblk_t mp)
14733859Sml29623 {
14745125Sjoycey #if defined(__i386)
14755125Sjoycey 	size_t		reg;
14765125Sjoycey #else
14773859Sml29623 	uint64_t	reg;
14785125Sjoycey #endif
14793859Sml29623 	uint64_t	buf[2];
14803859Sml29623 
14813859Sml29623 	bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
14825133Sjoycey #if defined(__i386)
14835133Sjoycey 	reg = (size_t)buf[0];
14845133Sjoycey #else
14853859Sml29623 	reg = buf[0];
14865133Sjoycey #endif
14873859Sml29623 
14883859Sml29623 	NXGE_NPI_PIO_WRITE64(nxgep->npi_handle, reg, buf[1]);
14893859Sml29623 }
14903859Sml29623 
14913859Sml29623 
14923859Sml29623 nxge_os_mutex_t nxgedebuglock;
14933859Sml29623 int nxge_debug_init = 0;
14943859Sml29623 
14953859Sml29623 /*ARGSUSED*/
14963859Sml29623 /*VARARGS*/
14973859Sml29623 void
14983859Sml29623 nxge_debug_msg(p_nxge_t nxgep, uint64_t level, char *fmt, ...)
14993859Sml29623 {
15003859Sml29623 	char msg_buffer[1048];
15013859Sml29623 	char prefix_buffer[32];
15023859Sml29623 	int instance;
15033859Sml29623 	uint64_t debug_level;
15043859Sml29623 	int cmn_level = CE_CONT;
15053859Sml29623 	va_list ap;
15063859Sml29623 
15073859Sml29623 	debug_level = (nxgep == NULL) ? nxge_debug_level :
15083859Sml29623 		nxgep->nxge_debug_level;
15093859Sml29623 
15103859Sml29623 	if ((level & debug_level) ||
15113859Sml29623 		(level == NXGE_NOTE) ||
15123859Sml29623 		(level == NXGE_ERR_CTL)) {
15133859Sml29623 		/* do the msg processing */
15143859Sml29623 		if (nxge_debug_init == 0) {
15153859Sml29623 			MUTEX_INIT(&nxgedebuglock, NULL, MUTEX_DRIVER, NULL);
15163859Sml29623 			nxge_debug_init = 1;
15173859Sml29623 		}
15183859Sml29623 
15193859Sml29623 		MUTEX_ENTER(&nxgedebuglock);
15203859Sml29623 
15213859Sml29623 		if ((level & NXGE_NOTE)) {
15223859Sml29623 			cmn_level = CE_NOTE;
15233859Sml29623 		}
15243859Sml29623 
15253859Sml29623 		if (level & NXGE_ERR_CTL) {
15263859Sml29623 			cmn_level = CE_WARN;
15273859Sml29623 		}
15283859Sml29623 
15293859Sml29623 		va_start(ap, fmt);
15303859Sml29623 		(void) vsprintf(msg_buffer, fmt, ap);
15313859Sml29623 		va_end(ap);
15323859Sml29623 		if (nxgep == NULL) {
15333859Sml29623 			instance = -1;
15343859Sml29623 			(void) sprintf(prefix_buffer, "%s :", "nxge");
15353859Sml29623 		} else {
15363859Sml29623 			instance = nxgep->instance;
15373859Sml29623 			(void) sprintf(prefix_buffer,
15383859Sml29623 						    "%s%d :", "nxge", instance);
15393859Sml29623 		}
15403859Sml29623 
15413859Sml29623 		MUTEX_EXIT(&nxgedebuglock);
15423859Sml29623 		cmn_err(cmn_level, "!%s %s\n",
15433859Sml29623 				prefix_buffer, msg_buffer);
15443859Sml29623 
15453859Sml29623 	}
15463859Sml29623 }
15473859Sml29623 
15483859Sml29623 char *
15493859Sml29623 nxge_dump_packet(char *addr, int size)
15503859Sml29623 {
15513859Sml29623 	uchar_t *ap = (uchar_t *)addr;
15523859Sml29623 	int i;
15533859Sml29623 	static char etherbuf[1024];
15543859Sml29623 	char *cp = etherbuf;
15553859Sml29623 	char digits[] = "0123456789abcdef";
15563859Sml29623 
15573859Sml29623 	if (!size)
15583859Sml29623 		size = 60;
15593859Sml29623 
15603859Sml29623 	if (size > MAX_DUMP_SZ) {
15613859Sml29623 		/* Dump the leading bytes */
15623859Sml29623 		for (i = 0; i < MAX_DUMP_SZ/2; i++) {
15633859Sml29623 			if (*ap > 0x0f)
15643859Sml29623 				*cp++ = digits[*ap >> 4];
15653859Sml29623 			*cp++ = digits[*ap++ & 0xf];
15663859Sml29623 			*cp++ = ':';
15673859Sml29623 		}
15683859Sml29623 		for (i = 0; i < 20; i++)
15693859Sml29623 			*cp++ = '.';
15703859Sml29623 		/* Dump the last MAX_DUMP_SZ/2 bytes */
15713859Sml29623 		ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ/2));
15723859Sml29623 		for (i = 0; i < MAX_DUMP_SZ/2; i++) {
15733859Sml29623 			if (*ap > 0x0f)
15743859Sml29623 				*cp++ = digits[*ap >> 4];
15753859Sml29623 			*cp++ = digits[*ap++ & 0xf];
15763859Sml29623 			*cp++ = ':';
15773859Sml29623 		}
15783859Sml29623 	} else {
15793859Sml29623 		for (i = 0; i < size; i++) {
15803859Sml29623 			if (*ap > 0x0f)
15813859Sml29623 				*cp++ = digits[*ap >> 4];
15823859Sml29623 			*cp++ = digits[*ap++ & 0xf];
15833859Sml29623 			*cp++ = ':';
15843859Sml29623 		}
15853859Sml29623 	}
15863859Sml29623 	*--cp = 0;
15873859Sml29623 	return (etherbuf);
15883859Sml29623 }
15893859Sml29623 
15903859Sml29623 #ifdef	NXGE_DEBUG
15913859Sml29623 static void
15923859Sml29623 nxge_test_map_regs(p_nxge_t nxgep)
15933859Sml29623 {
15943859Sml29623 	ddi_acc_handle_t cfg_handle;
15953859Sml29623 	p_pci_cfg_t	cfg_ptr;
15963859Sml29623 	ddi_acc_handle_t dev_handle;
15973859Sml29623 	char		*dev_ptr;
15983859Sml29623 	ddi_acc_handle_t pci_config_handle;
15993859Sml29623 	uint32_t	regval;
16003859Sml29623 	int		i;
16013859Sml29623 
16023859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_test_map_regs"));
16033859Sml29623 
16043859Sml29623 	dev_handle = nxgep->dev_regs->nxge_regh;
16053859Sml29623 	dev_ptr = (char *)nxgep->dev_regs->nxge_regp;
16063859Sml29623 
16074977Sraghus 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
16083859Sml29623 		cfg_handle = nxgep->dev_regs->nxge_pciregh;
16093859Sml29623 		cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
16103859Sml29623 
16113859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
16124732Sdavemq 		    "Neptune PCI regp cfg_ptr 0x%llx", (char *)cfg_ptr));
16133859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
16144732Sdavemq 		    "Neptune PCI cfg_ptr vendor id ptr 0x%llx",
16154732Sdavemq 		    &cfg_ptr->vendorid));
16163859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
16174732Sdavemq 		    "\tvendorid 0x%x devid 0x%x",
16184732Sdavemq 		    NXGE_PIO_READ16(cfg_handle, &cfg_ptr->vendorid, 0),
16194732Sdavemq 		    NXGE_PIO_READ16(cfg_handle, &cfg_ptr->devid,    0)));
16203859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
16214732Sdavemq 		    "PCI BAR: base 0x%x base14 0x%x base 18 0x%x "
16224732Sdavemq 		    "bar1c 0x%x",
16234732Sdavemq 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base,   0),
16244732Sdavemq 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base14, 0),
16254732Sdavemq 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base18, 0),
16264732Sdavemq 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base1c, 0)));
16273859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
16284732Sdavemq 		    "\nNeptune PCI BAR: base20 0x%x base24 0x%x "
16294732Sdavemq 		    "base 28 0x%x bar2c 0x%x\n",
16304732Sdavemq 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base20, 0),
16314732Sdavemq 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base24, 0),
16324732Sdavemq 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base28, 0),
16334732Sdavemq 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base2c, 0)));
16343859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
16354732Sdavemq 		    "\nNeptune PCI BAR: base30 0x%x\n",
16364732Sdavemq 		    NXGE_PIO_READ32(cfg_handle, &cfg_ptr->base30, 0)));
16373859Sml29623 
16383859Sml29623 		cfg_handle = nxgep->dev_regs->nxge_pciregh;
16393859Sml29623 		cfg_ptr = (void *)nxgep->dev_regs->nxge_pciregp;
16403859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
16414732Sdavemq 		    "first  0x%llx second 0x%llx third 0x%llx "
16424732Sdavemq 		    "last 0x%llx ",
16434732Sdavemq 		    NXGE_PIO_READ64(dev_handle,
16444732Sdavemq 		    (uint64_t *)(dev_ptr + 0),  0),
16454732Sdavemq 		    NXGE_PIO_READ64(dev_handle,
16464732Sdavemq 		    (uint64_t *)(dev_ptr + 8),  0),
16474732Sdavemq 		    NXGE_PIO_READ64(dev_handle,
16484732Sdavemq 		    (uint64_t *)(dev_ptr + 16), 0),
16494732Sdavemq 		    NXGE_PIO_READ64(cfg_handle,
16504732Sdavemq 		    (uint64_t *)(dev_ptr + 24), 0)));
16513859Sml29623 	}
16523859Sml29623 }
16533859Sml29623 
16543859Sml29623 #endif
16553859Sml29623 
16563859Sml29623 static void
16573859Sml29623 nxge_suspend(p_nxge_t nxgep)
16583859Sml29623 {
16593859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_suspend"));
16603859Sml29623 
16613859Sml29623 	nxge_intrs_disable(nxgep);
16623859Sml29623 	nxge_destroy_dev(nxgep);
16633859Sml29623 
16643859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_suspend"));
16653859Sml29623 }
16663859Sml29623 
16673859Sml29623 static nxge_status_t
16683859Sml29623 nxge_resume(p_nxge_t nxgep)
16693859Sml29623 {
16703859Sml29623 	nxge_status_t status = NXGE_OK;
16713859Sml29623 
16723859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_resume"));
16734587Sjoycey 
16743859Sml29623 	nxgep->suspended = DDI_RESUME;
16754587Sjoycey 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
16764587Sjoycey 	(void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
16774587Sjoycey 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
16784587Sjoycey 	(void) nxge_rx_mac_enable(nxgep);
16794587Sjoycey 	(void) nxge_tx_mac_enable(nxgep);
16804587Sjoycey 	nxge_intrs_enable(nxgep);
16813859Sml29623 	nxgep->suspended = 0;
16823859Sml29623 
16833859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
16843859Sml29623 			"<== nxge_resume status = 0x%x", status));
16853859Sml29623 	return (status);
16863859Sml29623 }
16873859Sml29623 
16883859Sml29623 static nxge_status_t
16893859Sml29623 nxge_setup_dev(p_nxge_t nxgep)
16903859Sml29623 {
16913859Sml29623 	nxge_status_t	status = NXGE_OK;
16923859Sml29623 
16933859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_dev port %d",
16944732Sdavemq 	    nxgep->mac.portnum));
16953859Sml29623 
16963859Sml29623 	status = nxge_link_init(nxgep);
16973859Sml29623 
16983859Sml29623 	if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) {
16993859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
17003859Sml29623 			"port%d Bad register acc handle", nxgep->mac.portnum));
17013859Sml29623 		status = NXGE_ERROR;
17023859Sml29623 	}
17033859Sml29623 
17043859Sml29623 	if (status != NXGE_OK) {
17053859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
17063859Sml29623 			    " nxge_setup_dev status "
17073859Sml29623 			    "(xcvr init 0x%08x)", status));
17083859Sml29623 		goto nxge_setup_dev_exit;
17093859Sml29623 	}
17103859Sml29623 
17113859Sml29623 nxge_setup_dev_exit:
17123859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
17133859Sml29623 		"<== nxge_setup_dev port %d status = 0x%08x",
17143859Sml29623 		nxgep->mac.portnum, status));
17153859Sml29623 
17163859Sml29623 	return (status);
17173859Sml29623 }
17183859Sml29623 
17193859Sml29623 static void
17203859Sml29623 nxge_destroy_dev(p_nxge_t nxgep)
17213859Sml29623 {
17223859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_destroy_dev"));
17233859Sml29623 
17243859Sml29623 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
17253859Sml29623 
17263859Sml29623 	(void) nxge_hw_stop(nxgep);
17273859Sml29623 
17283859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_destroy_dev"));
17293859Sml29623 }
17303859Sml29623 
17313859Sml29623 static nxge_status_t
17323859Sml29623 nxge_setup_system_dma_pages(p_nxge_t nxgep)
17333859Sml29623 {
17343859Sml29623 	int 			ddi_status = DDI_SUCCESS;
17353859Sml29623 	uint_t 			count;
17363859Sml29623 	ddi_dma_cookie_t 	cookie;
17373859Sml29623 	uint_t 			iommu_pagesize;
17383859Sml29623 	nxge_status_t		status = NXGE_OK;
17393859Sml29623 
17403859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_setup_system_dma_pages"));
17413859Sml29623 	nxgep->sys_page_sz = ddi_ptob(nxgep->dip, (ulong_t)1);
17423859Sml29623 	if (nxgep->niu_type != N2_NIU) {
17433859Sml29623 		iommu_pagesize = dvma_pagesize(nxgep->dip);
17443859Sml29623 		NXGE_DEBUG_MSG((nxgep, DDI_CTL,
17453859Sml29623 			" nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
17463859Sml29623 			" default_block_size %d iommu_pagesize %d",
17473859Sml29623 			nxgep->sys_page_sz,
17483859Sml29623 			ddi_ptob(nxgep->dip, (ulong_t)1),
17493859Sml29623 			nxgep->rx_default_block_size,
17503859Sml29623 			iommu_pagesize));
17513859Sml29623 
17523859Sml29623 		if (iommu_pagesize != 0) {
17533859Sml29623 			if (nxgep->sys_page_sz == iommu_pagesize) {
17543859Sml29623 				if (iommu_pagesize > 0x4000)
17553859Sml29623 					nxgep->sys_page_sz = 0x4000;
17563859Sml29623 			} else {
17573859Sml29623 				if (nxgep->sys_page_sz > iommu_pagesize)
17583859Sml29623 					nxgep->sys_page_sz = iommu_pagesize;
17593859Sml29623 			}
17603859Sml29623 		}
17613859Sml29623 	}
17623859Sml29623 	nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
17633859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
17643859Sml29623 		"==> nxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
17653859Sml29623 		"default_block_size %d page mask %d",
17663859Sml29623 		nxgep->sys_page_sz,
17673859Sml29623 		ddi_ptob(nxgep->dip, (ulong_t)1),
17683859Sml29623 		nxgep->rx_default_block_size,
17693859Sml29623 		nxgep->sys_page_mask));
17703859Sml29623 
17713859Sml29623 
17723859Sml29623 	switch (nxgep->sys_page_sz) {
17733859Sml29623 	default:
17743859Sml29623 		nxgep->sys_page_sz = 0x1000;
17753859Sml29623 		nxgep->sys_page_mask = ~(nxgep->sys_page_sz - 1);
17763859Sml29623 		nxgep->rx_default_block_size = 0x1000;
17773859Sml29623 		nxgep->rx_bksize_code = RBR_BKSIZE_4K;
17783859Sml29623 		break;
17793859Sml29623 	case 0x1000:
17803859Sml29623 		nxgep->rx_default_block_size = 0x1000;
17813859Sml29623 		nxgep->rx_bksize_code = RBR_BKSIZE_4K;
17823859Sml29623 		break;
17833859Sml29623 	case 0x2000:
17843859Sml29623 		nxgep->rx_default_block_size = 0x2000;
17853859Sml29623 		nxgep->rx_bksize_code = RBR_BKSIZE_8K;
17863859Sml29623 		break;
17873859Sml29623 	case 0x4000:
17883859Sml29623 		nxgep->rx_default_block_size = 0x4000;
17893859Sml29623 		nxgep->rx_bksize_code = RBR_BKSIZE_16K;
17903859Sml29623 		break;
17913859Sml29623 	case 0x8000:
17923859Sml29623 		nxgep->rx_default_block_size = 0x8000;
17933859Sml29623 		nxgep->rx_bksize_code = RBR_BKSIZE_32K;
17943859Sml29623 		break;
17953859Sml29623 	}
17963859Sml29623 
17973859Sml29623 #ifndef USE_RX_BIG_BUF
17983859Sml29623 	nxge_rx_dma_attr.dma_attr_align = nxgep->sys_page_sz;
17993859Sml29623 #else
18003859Sml29623 		nxgep->rx_default_block_size = 0x2000;
18013859Sml29623 		nxgep->rx_bksize_code = RBR_BKSIZE_8K;
18023859Sml29623 #endif
18033859Sml29623 	/*
18043859Sml29623 	 * Get the system DMA burst size.
18053859Sml29623 	 */
18063859Sml29623 	ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
18073859Sml29623 			DDI_DMA_DONTWAIT, 0,
18083859Sml29623 			&nxgep->dmasparehandle);
18093859Sml29623 	if (ddi_status != DDI_SUCCESS) {
18103859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
18113859Sml29623 			"ddi_dma_alloc_handle: failed "
18123859Sml29623 			" status 0x%x", ddi_status));
18133859Sml29623 		goto nxge_get_soft_properties_exit;
18143859Sml29623 	}
18153859Sml29623 
18163859Sml29623 	ddi_status = ddi_dma_addr_bind_handle(nxgep->dmasparehandle, NULL,
18173859Sml29623 				(caddr_t)nxgep->dmasparehandle,
18183859Sml29623 				sizeof (nxgep->dmasparehandle),
18193859Sml29623 				DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
18203859Sml29623 				DDI_DMA_DONTWAIT, 0,
18213859Sml29623 				&cookie, &count);
18223859Sml29623 	if (ddi_status != DDI_DMA_MAPPED) {
18233859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
18243859Sml29623 			"Binding spare handle to find system"
18253859Sml29623 			" burstsize failed."));
18263859Sml29623 		ddi_status = DDI_FAILURE;
18273859Sml29623 		goto nxge_get_soft_properties_fail1;
18283859Sml29623 	}
18293859Sml29623 
18303859Sml29623 	nxgep->sys_burst_sz = ddi_dma_burstsizes(nxgep->dmasparehandle);
18313859Sml29623 	(void) ddi_dma_unbind_handle(nxgep->dmasparehandle);
18323859Sml29623 
18333859Sml29623 nxge_get_soft_properties_fail1:
18343859Sml29623 	ddi_dma_free_handle(&nxgep->dmasparehandle);
18353859Sml29623 
18363859Sml29623 nxge_get_soft_properties_exit:
18373859Sml29623 
18383859Sml29623 	if (ddi_status != DDI_SUCCESS)
18393859Sml29623 		status |= (NXGE_ERROR | NXGE_DDI_FAILED);
18403859Sml29623 
18413859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
18423859Sml29623 		"<== nxge_setup_system_dma_pages status = 0x%08x", status));
18433859Sml29623 	return (status);
18443859Sml29623 }
18453859Sml29623 
18463859Sml29623 static nxge_status_t
18473859Sml29623 nxge_alloc_mem_pool(p_nxge_t nxgep)
18483859Sml29623 {
18493859Sml29623 	nxge_status_t	status = NXGE_OK;
18503859Sml29623 
18513859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_alloc_mem_pool"));
18523859Sml29623 
18533859Sml29623 	status = nxge_alloc_rx_mem_pool(nxgep);
18543859Sml29623 	if (status != NXGE_OK) {
18553859Sml29623 		return (NXGE_ERROR);
18563859Sml29623 	}
18573859Sml29623 
18583859Sml29623 	status = nxge_alloc_tx_mem_pool(nxgep);
18593859Sml29623 	if (status != NXGE_OK) {
18603859Sml29623 		nxge_free_rx_mem_pool(nxgep);
18613859Sml29623 		return (NXGE_ERROR);
18623859Sml29623 	}
18633859Sml29623 
18643859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_alloc_mem_pool"));
18653859Sml29623 	return (NXGE_OK);
18663859Sml29623 }
18673859Sml29623 
18683859Sml29623 static void
18693859Sml29623 nxge_free_mem_pool(p_nxge_t nxgep)
18703859Sml29623 {
18713859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_mem_pool"));
18723859Sml29623 
18733859Sml29623 	nxge_free_rx_mem_pool(nxgep);
18743859Sml29623 	nxge_free_tx_mem_pool(nxgep);
18753859Sml29623 
18763859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_mem_pool"));
18773859Sml29623 }
18783859Sml29623 
18793859Sml29623 static nxge_status_t
18803859Sml29623 nxge_alloc_rx_mem_pool(p_nxge_t nxgep)
18813859Sml29623 {
18823859Sml29623 	int			i, j;
18833859Sml29623 	uint32_t		ndmas, st_rdc;
18843859Sml29623 	p_nxge_dma_pt_cfg_t	p_all_cfgp;
18853859Sml29623 	p_nxge_hw_pt_cfg_t	p_cfgp;
18863859Sml29623 	p_nxge_dma_pool_t	dma_poolp;
18873859Sml29623 	p_nxge_dma_common_t	*dma_buf_p;
18883859Sml29623 	p_nxge_dma_pool_t	dma_cntl_poolp;
18893859Sml29623 	p_nxge_dma_common_t	*dma_cntl_p;
18903859Sml29623 	size_t			rx_buf_alloc_size;
18913859Sml29623 	size_t			rx_cntl_alloc_size;
18923859Sml29623 	uint32_t 		*num_chunks; /* per dma */
18933859Sml29623 	nxge_status_t		status = NXGE_OK;
18943859Sml29623 
18953859Sml29623 	uint32_t		nxge_port_rbr_size;
18963859Sml29623 	uint32_t		nxge_port_rbr_spare_size;
18973859Sml29623 	uint32_t		nxge_port_rcr_size;
18983859Sml29623 
18993859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_mem_pool"));
19003859Sml29623 
19013859Sml29623 	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
19023859Sml29623 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
19033859Sml29623 	st_rdc = p_cfgp->start_rdc;
19043859Sml29623 	ndmas = p_cfgp->max_rdcs;
19053859Sml29623 
19063859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
19073859Sml29623 		" nxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
19083859Sml29623 
19093859Sml29623 	/*
19103859Sml29623 	 * Allocate memory for each receive DMA channel.
19113859Sml29623 	 */
19123859Sml29623 	dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
19133859Sml29623 			KM_SLEEP);
19143859Sml29623 	dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
19153859Sml29623 			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
19163859Sml29623 
19173859Sml29623 	dma_cntl_poolp = (p_nxge_dma_pool_t)
19183859Sml29623 				KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
19193859Sml29623 	dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
19203859Sml29623 			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
19213859Sml29623 
19223859Sml29623 	num_chunks = (uint32_t *)KMEM_ZALLOC(
19233859Sml29623 			sizeof (uint32_t) * ndmas, KM_SLEEP);
19243859Sml29623 
19253859Sml29623 	/*
19263859Sml29623 	 * Assume that each DMA channel will be configured with default
19273859Sml29623 	 * block size.
19283859Sml29623 	 * rbr block counts are mod of batch count (16).
19293859Sml29623 	 */
19303859Sml29623 	nxge_port_rbr_size = p_all_cfgp->rbr_size;
19313859Sml29623 	nxge_port_rcr_size = p_all_cfgp->rcr_size;
19323859Sml29623 
19333859Sml29623 	if (!nxge_port_rbr_size) {
19343859Sml29623 		nxge_port_rbr_size = NXGE_RBR_RBB_DEFAULT;
19353859Sml29623 	}
19363859Sml29623 	if (nxge_port_rbr_size % NXGE_RXDMA_POST_BATCH) {
19373859Sml29623 		nxge_port_rbr_size = (NXGE_RXDMA_POST_BATCH *
19383859Sml29623 			(nxge_port_rbr_size / NXGE_RXDMA_POST_BATCH + 1));
19393859Sml29623 	}
19403859Sml29623 
19413859Sml29623 	p_all_cfgp->rbr_size = nxge_port_rbr_size;
19423859Sml29623 	nxge_port_rbr_spare_size = nxge_rbr_spare_size;
19433859Sml29623 
19443859Sml29623 	if (nxge_port_rbr_spare_size % NXGE_RXDMA_POST_BATCH) {
19453859Sml29623 		nxge_port_rbr_spare_size = (NXGE_RXDMA_POST_BATCH *
19463859Sml29623 			(nxge_port_rbr_spare_size / NXGE_RXDMA_POST_BATCH + 1));
19473859Sml29623 	}
1948*5770Sml29623 	if (nxge_port_rbr_size > RBR_DEFAULT_MAX_BLKS) {
1949*5770Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM_CTL,
1950*5770Sml29623 		    "nxge_alloc_rx_mem_pool: RBR size too high %d, "
1951*5770Sml29623 		    "set to default %d",
1952*5770Sml29623 		    nxge_port_rbr_size, RBR_DEFAULT_MAX_BLKS));
1953*5770Sml29623 		nxge_port_rbr_size = RBR_DEFAULT_MAX_BLKS;
1954*5770Sml29623 	}
1955*5770Sml29623 	if (nxge_port_rcr_size > RCR_DEFAULT_MAX) {
1956*5770Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM_CTL,
1957*5770Sml29623 		    "nxge_alloc_rx_mem_pool: RCR too high %d, "
1958*5770Sml29623 		    "set to default %d",
1959*5770Sml29623 		    nxge_port_rcr_size, RCR_DEFAULT_MAX));
1960*5770Sml29623 		nxge_port_rcr_size = RCR_DEFAULT_MAX;
1961*5770Sml29623 	}
19623859Sml29623 
19633859Sml29623 	/*
19643859Sml29623 	 * N2/NIU has limitation on the descriptor sizes (contiguous
19653859Sml29623 	 * memory allocation on data buffers to 4M (contig_mem_alloc)
19663859Sml29623 	 * and little endian for control buffers (must use the ddi/dki mem alloc
19673859Sml29623 	 * function).
19683859Sml29623 	 */
19693859Sml29623 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
19703859Sml29623 	if (nxgep->niu_type == N2_NIU) {
19713859Sml29623 		nxge_port_rbr_spare_size = 0;
19723859Sml29623 		if ((nxge_port_rbr_size > NXGE_NIU_CONTIG_RBR_MAX) ||
19733859Sml29623 				(!ISP2(nxge_port_rbr_size))) {
19743859Sml29623 			nxge_port_rbr_size = NXGE_NIU_CONTIG_RBR_MAX;
19753859Sml29623 		}
19763859Sml29623 		if ((nxge_port_rcr_size > NXGE_NIU_CONTIG_RCR_MAX) ||
19773859Sml29623 				(!ISP2(nxge_port_rcr_size))) {
19783859Sml29623 			nxge_port_rcr_size = NXGE_NIU_CONTIG_RCR_MAX;
19793859Sml29623 		}
19803859Sml29623 	}
19813859Sml29623 #endif
19823859Sml29623 
19833859Sml29623 	rx_buf_alloc_size = (nxgep->rx_default_block_size *
19843859Sml29623 		(nxge_port_rbr_size + nxge_port_rbr_spare_size));
19853859Sml29623 
19863859Sml29623 	/*
19873859Sml29623 	 * Addresses of receive block ring, receive completion ring and the
19883859Sml29623 	 * mailbox must be all cache-aligned (64 bytes).
19893859Sml29623 	 */
19903859Sml29623 	rx_cntl_alloc_size = nxge_port_rbr_size + nxge_port_rbr_spare_size;
19913859Sml29623 	rx_cntl_alloc_size *= (sizeof (rx_desc_t));
19923859Sml29623 	rx_cntl_alloc_size += (sizeof (rcr_entry_t) * nxge_port_rcr_size);
19933859Sml29623 	rx_cntl_alloc_size += sizeof (rxdma_mailbox_t);
19943859Sml29623 
19953859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_alloc_rx_mem_pool: "
19963859Sml29623 		"nxge_port_rbr_size = %d nxge_port_rbr_spare_size = %d "
19973859Sml29623 		"nxge_port_rcr_size = %d "
19983859Sml29623 		"rx_cntl_alloc_size = %d",
19993859Sml29623 		nxge_port_rbr_size, nxge_port_rbr_spare_size,
20003859Sml29623 		nxge_port_rcr_size,
20013859Sml29623 		rx_cntl_alloc_size));
20023859Sml29623 
20033859Sml29623 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
20043859Sml29623 	if (nxgep->niu_type == N2_NIU) {
20053859Sml29623 		if (!ISP2(rx_buf_alloc_size)) {
20063859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
20073859Sml29623 				"==> nxge_alloc_rx_mem_pool: "
20083859Sml29623 				" must be power of 2"));
20093859Sml29623 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
20103859Sml29623 			goto nxge_alloc_rx_mem_pool_exit;
20113859Sml29623 		}
20123859Sml29623 
20133859Sml29623 		if (rx_buf_alloc_size > (1 << 22)) {
20143859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
20153859Sml29623 				"==> nxge_alloc_rx_mem_pool: "
20163859Sml29623 				" limit size to 4M"));
20173859Sml29623 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
20183859Sml29623 			goto nxge_alloc_rx_mem_pool_exit;
20193859Sml29623 		}
20203859Sml29623 
20213859Sml29623 		if (rx_cntl_alloc_size < 0x2000) {
20223859Sml29623 			rx_cntl_alloc_size = 0x2000;
20233859Sml29623 		}
20243859Sml29623 	}
20253859Sml29623 #endif
20263859Sml29623 	nxgep->nxge_port_rbr_size = nxge_port_rbr_size;
20273859Sml29623 	nxgep->nxge_port_rcr_size = nxge_port_rcr_size;
20283859Sml29623 
20293859Sml29623 	/*
20303859Sml29623 	 * Allocate memory for receive buffers and descriptor rings.
20313859Sml29623 	 * Replace allocation functions with interface functions provided
20323859Sml29623 	 * by the partition manager when it is available.
20333859Sml29623 	 */
20343859Sml29623 	/*
20353859Sml29623 	 * Allocate memory for the receive buffer blocks.
20363859Sml29623 	 */
20373859Sml29623 	for (i = 0; i < ndmas; i++) {
20383859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
20393859Sml29623 			" nxge_alloc_rx_mem_pool to alloc mem: "
20403859Sml29623 			" dma %d dma_buf_p %llx &dma_buf_p %llx",
20413859Sml29623 			i, dma_buf_p[i], &dma_buf_p[i]));
20423859Sml29623 		num_chunks[i] = 0;
20433859Sml29623 		status = nxge_alloc_rx_buf_dma(nxgep, st_rdc, &dma_buf_p[i],
20443859Sml29623 				rx_buf_alloc_size,
20453859Sml29623 				nxgep->rx_default_block_size, &num_chunks[i]);
20463859Sml29623 		if (status != NXGE_OK) {
20473859Sml29623 			break;
20483859Sml29623 		}
20493859Sml29623 		st_rdc++;
20503859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
20513859Sml29623 			" nxge_alloc_rx_mem_pool DONE  alloc mem: "
20523859Sml29623 			"dma %d dma_buf_p %llx &dma_buf_p %llx", i,
20533859Sml29623 			dma_buf_p[i], &dma_buf_p[i]));
20543859Sml29623 	}
20553859Sml29623 	if (i < ndmas) {
20563859Sml29623 		goto nxge_alloc_rx_mem_fail1;
20573859Sml29623 	}
20583859Sml29623 	/*
20593859Sml29623 	 * Allocate memory for descriptor rings and mailbox.
20603859Sml29623 	 */
20613859Sml29623 	st_rdc = p_cfgp->start_rdc;
20623859Sml29623 	for (j = 0; j < ndmas; j++) {
20633859Sml29623 		status = nxge_alloc_rx_cntl_dma(nxgep, st_rdc, &dma_cntl_p[j],
20643859Sml29623 					rx_cntl_alloc_size);
20653859Sml29623 		if (status != NXGE_OK) {
20663859Sml29623 			break;
20673859Sml29623 		}
20683859Sml29623 		st_rdc++;
20693859Sml29623 	}
20703859Sml29623 	if (j < ndmas) {
20713859Sml29623 		goto nxge_alloc_rx_mem_fail2;
20723859Sml29623 	}
20733859Sml29623 
20743859Sml29623 	dma_poolp->ndmas = ndmas;
20753859Sml29623 	dma_poolp->num_chunks = num_chunks;
20763859Sml29623 	dma_poolp->buf_allocated = B_TRUE;
20773859Sml29623 	nxgep->rx_buf_pool_p = dma_poolp;
20783859Sml29623 	dma_poolp->dma_buf_pool_p = dma_buf_p;
20793859Sml29623 
20803859Sml29623 	dma_cntl_poolp->ndmas = ndmas;
20813859Sml29623 	dma_cntl_poolp->buf_allocated = B_TRUE;
20823859Sml29623 	nxgep->rx_cntl_pool_p = dma_cntl_poolp;
20833859Sml29623 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
20843859Sml29623 
20853859Sml29623 	goto nxge_alloc_rx_mem_pool_exit;
20863859Sml29623 
20873859Sml29623 nxge_alloc_rx_mem_fail2:
20883859Sml29623 	/* Free control buffers */
20893859Sml29623 	j--;
20903859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
20913859Sml29623 		"==> nxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
20923859Sml29623 	for (; j >= 0; j--) {
20933859Sml29623 		nxge_free_rx_cntl_dma(nxgep,
20944185Sspeer 			(p_nxge_dma_common_t)dma_cntl_p[j]);
20953859Sml29623 		NXGE_DEBUG_MSG((nxgep, DMA_CTL,
20963859Sml29623 			"==> nxge_alloc_rx_mem_pool: control bufs freed (%d)",
20973859Sml29623 			j));
20983859Sml29623 	}
20993859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
21003859Sml29623 		"==> nxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
21013859Sml29623 
21023859Sml29623 nxge_alloc_rx_mem_fail1:
21033859Sml29623 	/* Free data buffers */
21043859Sml29623 	i--;
21053859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
21063859Sml29623 		"==> nxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
21073859Sml29623 	for (; i >= 0; i--) {
21083859Sml29623 		nxge_free_rx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i],
21093859Sml29623 			num_chunks[i]);
21103859Sml29623 	}
21113859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
21123859Sml29623 		"==> nxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
21133859Sml29623 
21143859Sml29623 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
21153859Sml29623 	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
21163859Sml29623 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
21173859Sml29623 	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
21183859Sml29623 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
21193859Sml29623 
21203859Sml29623 nxge_alloc_rx_mem_pool_exit:
21213859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
21223859Sml29623 		"<== nxge_alloc_rx_mem_pool:status 0x%08x", status));
21233859Sml29623 
21243859Sml29623 	return (status);
21253859Sml29623 }
21263859Sml29623 
21273859Sml29623 static void
21283859Sml29623 nxge_free_rx_mem_pool(p_nxge_t nxgep)
21293859Sml29623 {
21303859Sml29623 	uint32_t		i, ndmas;
21313859Sml29623 	p_nxge_dma_pool_t	dma_poolp;
21323859Sml29623 	p_nxge_dma_common_t	*dma_buf_p;
21333859Sml29623 	p_nxge_dma_pool_t	dma_cntl_poolp;
21343859Sml29623 	p_nxge_dma_common_t	*dma_cntl_p;
21353859Sml29623 	uint32_t 		*num_chunks;
21363859Sml29623 
21373859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_mem_pool"));
21383859Sml29623 
21393859Sml29623 	dma_poolp = nxgep->rx_buf_pool_p;
21403859Sml29623 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
21413859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
21423859Sml29623 			"<== nxge_free_rx_mem_pool "
21433859Sml29623 			"(null rx buf pool or buf not allocated"));
21443859Sml29623 		return;
21453859Sml29623 	}
21463859Sml29623 
21473859Sml29623 	dma_cntl_poolp = nxgep->rx_cntl_pool_p;
21483859Sml29623 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
21493859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
21503859Sml29623 			"<== nxge_free_rx_mem_pool "
21513859Sml29623 			"(null rx cntl buf pool or cntl buf not allocated"));
21523859Sml29623 		return;
21533859Sml29623 	}
21543859Sml29623 
21553859Sml29623 	dma_buf_p = dma_poolp->dma_buf_pool_p;
21563859Sml29623 	num_chunks = dma_poolp->num_chunks;
21573859Sml29623 
21583859Sml29623 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
21593859Sml29623 	ndmas = dma_cntl_poolp->ndmas;
21603859Sml29623 
21613859Sml29623 	for (i = 0; i < ndmas; i++) {
21623859Sml29623 		nxge_free_rx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]);
21633859Sml29623 	}
21643859Sml29623 
21653859Sml29623 	for (i = 0; i < ndmas; i++) {
21663859Sml29623 		nxge_free_rx_cntl_dma(nxgep, dma_cntl_p[i]);
21673859Sml29623 	}
21683859Sml29623 
21693859Sml29623 	for (i = 0; i < ndmas; i++) {
21703859Sml29623 		KMEM_FREE(dma_buf_p[i],
21713859Sml29623 			sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
21723859Sml29623 		KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t));
21733859Sml29623 	}
21743859Sml29623 
21753859Sml29623 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
21763859Sml29623 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
21773859Sml29623 	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
21783859Sml29623 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
21793859Sml29623 	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
21803859Sml29623 
21813859Sml29623 	nxgep->rx_buf_pool_p = NULL;
21823859Sml29623 	nxgep->rx_cntl_pool_p = NULL;
21833859Sml29623 
21843859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_free_rx_mem_pool"));
21853859Sml29623 }
21863859Sml29623 
21873859Sml29623 
21883859Sml29623 static nxge_status_t
21893859Sml29623 nxge_alloc_rx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
21903859Sml29623 	p_nxge_dma_common_t *dmap,
21913859Sml29623 	size_t alloc_size, size_t block_size, uint32_t *num_chunks)
21923859Sml29623 {
21933859Sml29623 	p_nxge_dma_common_t 	rx_dmap;
21943859Sml29623 	nxge_status_t		status = NXGE_OK;
21953859Sml29623 	size_t			total_alloc_size;
21963859Sml29623 	size_t			allocated = 0;
21973859Sml29623 	int			i, size_index, array_size;
21983859Sml29623 
21993859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_buf_dma"));
22003859Sml29623 
22013859Sml29623 	rx_dmap = (p_nxge_dma_common_t)
22023859Sml29623 			KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
22033859Sml29623 			KM_SLEEP);
22043859Sml29623 
22053859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
22063859Sml29623 		" alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
22073859Sml29623 		dma_channel, alloc_size, block_size, dmap));
22083859Sml29623 
22093859Sml29623 	total_alloc_size = alloc_size;
22103859Sml29623 
22113859Sml29623 #if defined(RX_USE_RECLAIM_POST)
22123859Sml29623 	total_alloc_size = alloc_size + alloc_size/4;
22133859Sml29623 #endif
22143859Sml29623 
22153859Sml29623 	i = 0;
22163859Sml29623 	size_index = 0;
22173859Sml29623 	array_size =  sizeof (alloc_sizes)/sizeof (size_t);
22183859Sml29623 	while ((alloc_sizes[size_index] < alloc_size) &&
22193859Sml29623 			(size_index < array_size))
22203859Sml29623 			size_index++;
22213859Sml29623 	if (size_index >= array_size) {
22223859Sml29623 		size_index = array_size - 1;
22233859Sml29623 	}
22243859Sml29623 
22253859Sml29623 	while ((allocated < total_alloc_size) &&
22263859Sml29623 			(size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
22273859Sml29623 		rx_dmap[i].dma_chunk_index = i;
22283859Sml29623 		rx_dmap[i].block_size = block_size;
22293859Sml29623 		rx_dmap[i].alength = alloc_sizes[size_index];
22303859Sml29623 		rx_dmap[i].orig_alength = rx_dmap[i].alength;
22313859Sml29623 		rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
22323859Sml29623 		rx_dmap[i].dma_channel = dma_channel;
22333859Sml29623 		rx_dmap[i].contig_alloc_type = B_FALSE;
22343859Sml29623 
22353859Sml29623 		/*
22363859Sml29623 		 * N2/NIU: data buffers must be contiguous as the driver
22373859Sml29623 		 *	   needs to call Hypervisor api to set up
22383859Sml29623 		 *	   logical pages.
22393859Sml29623 		 */
22403859Sml29623 		if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
22413859Sml29623 			rx_dmap[i].contig_alloc_type = B_TRUE;
22423859Sml29623 		}
22433859Sml29623 
22443859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
22453859Sml29623 			"alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
22463859Sml29623 			"i %d nblocks %d alength %d",
22473859Sml29623 			dma_channel, i, &rx_dmap[i], block_size,
22483859Sml29623 			i, rx_dmap[i].nblocks,
22493859Sml29623 			rx_dmap[i].alength));
22503859Sml29623 		status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
22513859Sml29623 			&nxge_rx_dma_attr,
22523859Sml29623 			rx_dmap[i].alength,
22533859Sml29623 			&nxge_dev_buf_dma_acc_attr,
22543859Sml29623 			DDI_DMA_READ | DDI_DMA_STREAMING,
22553859Sml29623 			(p_nxge_dma_common_t)(&rx_dmap[i]));
22563859Sml29623 		if (status != NXGE_OK) {
22573859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
22583859Sml29623 				" nxge_alloc_rx_buf_dma: Alloc Failed "));
22593859Sml29623 			size_index--;
22603859Sml29623 		} else {
22613859Sml29623 			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
22623859Sml29623 				" alloc_rx_buf_dma allocated rdc %d "
22633859Sml29623 				"chunk %d size %x dvma %x bufp %llx ",
22643859Sml29623 				dma_channel, i, rx_dmap[i].alength,
22653859Sml29623 				rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
22663859Sml29623 			i++;
22673859Sml29623 			allocated += alloc_sizes[size_index];
22683859Sml29623 		}
22693859Sml29623 	}
22703859Sml29623 
22713859Sml29623 
22723859Sml29623 	if (allocated < total_alloc_size) {
2273*5770Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2274*5770Sml29623 		    "==> nxge_alloc_rx_buf_dma: not enough for channe %d "
2275*5770Sml29623 		    "allocated 0x%x requested 0x%x",
2276*5770Sml29623 		    dma_channel,
2277*5770Sml29623 		    allocated, total_alloc_size));
2278*5770Sml29623 		status = NXGE_ERROR;
22793859Sml29623 		goto nxge_alloc_rx_mem_fail1;
22803859Sml29623 	}
22813859Sml29623 
2282*5770Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2283*5770Sml29623 	    "==> nxge_alloc_rx_buf_dma: Allocated for channe %d "
2284*5770Sml29623 	    "allocated 0x%x requested 0x%x",
2285*5770Sml29623 	    dma_channel,
2286*5770Sml29623 	    allocated, total_alloc_size));
2287*5770Sml29623 
22883859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
22893859Sml29623 		" alloc_rx_buf_dma rdc %d allocated %d chunks",
22903859Sml29623 		dma_channel, i));
22913859Sml29623 	*num_chunks = i;
22923859Sml29623 	*dmap = rx_dmap;
22933859Sml29623 
22943859Sml29623 	goto nxge_alloc_rx_mem_exit;
22953859Sml29623 
22963859Sml29623 nxge_alloc_rx_mem_fail1:
22973859Sml29623 	KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
22983859Sml29623 
22993859Sml29623 nxge_alloc_rx_mem_exit:
23003859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
23013859Sml29623 		"<== nxge_alloc_rx_buf_dma status 0x%08x", status));
23023859Sml29623 
23033859Sml29623 	return (status);
23043859Sml29623 }
23053859Sml29623 
23063859Sml29623 /*ARGSUSED*/
23073859Sml29623 static void
23083859Sml29623 nxge_free_rx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
23093859Sml29623     uint32_t num_chunks)
23103859Sml29623 {
23113859Sml29623 	int		i;
23123859Sml29623 
23133859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
23143859Sml29623 		"==> nxge_free_rx_buf_dma: # of chunks %d", num_chunks));
23153859Sml29623 
23163859Sml29623 	for (i = 0; i < num_chunks; i++) {
23173859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
23183859Sml29623 			"==> nxge_free_rx_buf_dma: chunk %d dmap 0x%llx",
23193859Sml29623 				i, dmap));
23203859Sml29623 		nxge_dma_mem_free(dmap++);
23213859Sml29623 	}
23223859Sml29623 
23233859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_free_rx_buf_dma"));
23243859Sml29623 }
23253859Sml29623 
23263859Sml29623 /*ARGSUSED*/
23273859Sml29623 static nxge_status_t
23283859Sml29623 nxge_alloc_rx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
23293859Sml29623     p_nxge_dma_common_t *dmap, size_t size)
23303859Sml29623 {
23313859Sml29623 	p_nxge_dma_common_t 	rx_dmap;
23323859Sml29623 	nxge_status_t		status = NXGE_OK;
23333859Sml29623 
23343859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_rx_cntl_dma"));
23353859Sml29623 
23363859Sml29623 	rx_dmap = (p_nxge_dma_common_t)
23373859Sml29623 			KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
23383859Sml29623 
23393859Sml29623 	rx_dmap->contig_alloc_type = B_FALSE;
23403859Sml29623 
23413859Sml29623 	status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
23423859Sml29623 			&nxge_desc_dma_attr,
23433859Sml29623 			size,
23443859Sml29623 			&nxge_dev_desc_dma_acc_attr,
23453859Sml29623 			DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
23463859Sml29623 			rx_dmap);
23473859Sml29623 	if (status != NXGE_OK) {
23483859Sml29623 		goto nxge_alloc_rx_cntl_dma_fail1;
23493859Sml29623 	}
23503859Sml29623 
23513859Sml29623 	*dmap = rx_dmap;
23523859Sml29623 	goto nxge_alloc_rx_cntl_dma_exit;
23533859Sml29623 
23543859Sml29623 nxge_alloc_rx_cntl_dma_fail1:
23553859Sml29623 	KMEM_FREE(rx_dmap, sizeof (nxge_dma_common_t));
23563859Sml29623 
23573859Sml29623 nxge_alloc_rx_cntl_dma_exit:
23583859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
23593859Sml29623 		"<== nxge_alloc_rx_cntl_dma status 0x%08x", status));
23603859Sml29623 
23613859Sml29623 	return (status);
23623859Sml29623 }
23633859Sml29623 
23643859Sml29623 /*ARGSUSED*/
23653859Sml29623 static void
23663859Sml29623 nxge_free_rx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
23673859Sml29623 {
23683859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_rx_cntl_dma"));
23693859Sml29623 
23703859Sml29623 	nxge_dma_mem_free(dmap);
23713859Sml29623 
23723859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_rx_cntl_dma"));
23733859Sml29623 }
23743859Sml29623 
23753859Sml29623 static nxge_status_t
23763859Sml29623 nxge_alloc_tx_mem_pool(p_nxge_t nxgep)
23773859Sml29623 {
23783859Sml29623 	nxge_status_t		status = NXGE_OK;
23793859Sml29623 	int			i, j;
23803859Sml29623 	uint32_t		ndmas, st_tdc;
23813859Sml29623 	p_nxge_dma_pt_cfg_t	p_all_cfgp;
23823859Sml29623 	p_nxge_hw_pt_cfg_t	p_cfgp;
23833859Sml29623 	p_nxge_dma_pool_t	dma_poolp;
23843859Sml29623 	p_nxge_dma_common_t	*dma_buf_p;
23853859Sml29623 	p_nxge_dma_pool_t	dma_cntl_poolp;
23863859Sml29623 	p_nxge_dma_common_t	*dma_cntl_p;
23873859Sml29623 	size_t			tx_buf_alloc_size;
23883859Sml29623 	size_t			tx_cntl_alloc_size;
23893859Sml29623 	uint32_t		*num_chunks; /* per dma */
23903952Sml29623 	uint32_t		bcopy_thresh;
23913859Sml29623 
23923859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool"));
23933859Sml29623 
23943859Sml29623 	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
23953859Sml29623 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
23963859Sml29623 	st_tdc = p_cfgp->start_tdc;
23973859Sml29623 	ndmas = p_cfgp->max_tdcs;
23983859Sml29623 
23993859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_alloc_tx_mem_pool: "
24003859Sml29623 		"p_cfgp 0x%016llx start_tdc %d ndmas %d nxgep->max_tdcs %d",
24013859Sml29623 		p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, nxgep->max_tdcs));
24023859Sml29623 	/*
24033859Sml29623 	 * Allocate memory for each transmit DMA channel.
24043859Sml29623 	 */
24053859Sml29623 	dma_poolp = (p_nxge_dma_pool_t)KMEM_ZALLOC(sizeof (nxge_dma_pool_t),
24063859Sml29623 			KM_SLEEP);
24073859Sml29623 	dma_buf_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
24083859Sml29623 			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
24093859Sml29623 
24103859Sml29623 	dma_cntl_poolp = (p_nxge_dma_pool_t)
24113859Sml29623 			KMEM_ZALLOC(sizeof (nxge_dma_pool_t), KM_SLEEP);
24123859Sml29623 	dma_cntl_p = (p_nxge_dma_common_t *)KMEM_ZALLOC(
24133859Sml29623 			sizeof (p_nxge_dma_common_t) * ndmas, KM_SLEEP);
24143859Sml29623 
2415*5770Sml29623 	if (nxge_tx_ring_size > TDC_DEFAULT_MAX) {
2416*5770Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM_CTL,
2417*5770Sml29623 		    "nxge_alloc_tx_mem_pool: TDC too high %d, "
2418*5770Sml29623 		    "set to default %d",
2419*5770Sml29623 		    nxge_tx_ring_size, TDC_DEFAULT_MAX));
2420*5770Sml29623 		nxge_tx_ring_size = TDC_DEFAULT_MAX;
2421*5770Sml29623 	}
2422*5770Sml29623 
24233859Sml29623 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
24243859Sml29623 	/*
24253859Sml29623 	 * N2/NIU has limitation on the descriptor sizes (contiguous
24263859Sml29623 	 * memory allocation on data buffers to 4M (contig_mem_alloc)
24273859Sml29623 	 * and little endian for control buffers (must use the ddi/dki mem alloc
24283859Sml29623 	 * function). The transmit ring is limited to 8K (includes the
24293859Sml29623 	 * mailbox).
24303859Sml29623 	 */
24313859Sml29623 	if (nxgep->niu_type == N2_NIU) {
24323859Sml29623 		if ((nxge_tx_ring_size > NXGE_NIU_CONTIG_TX_MAX) ||
24333859Sml29623 			(!ISP2(nxge_tx_ring_size))) {
24343859Sml29623 			nxge_tx_ring_size = NXGE_NIU_CONTIG_TX_MAX;
24353859Sml29623 		}
24363859Sml29623 	}
24373859Sml29623 #endif
24383859Sml29623 
24393859Sml29623 	nxgep->nxge_port_tx_ring_size = nxge_tx_ring_size;
24403859Sml29623 
24413859Sml29623 	/*
24423859Sml29623 	 * Assume that each DMA channel will be configured with default
24433859Sml29623 	 * transmit bufer size for copying transmit data.
24443859Sml29623 	 * (For packet payload over this limit, packets will not be
24453859Sml29623 	 *  copied.)
24463859Sml29623 	 */
24473952Sml29623 	if (nxgep->niu_type == N2_NIU) {
24483952Sml29623 		bcopy_thresh = TX_BCOPY_SIZE;
24493952Sml29623 	} else {
24503952Sml29623 		bcopy_thresh = nxge_bcopy_thresh;
24513952Sml29623 	}
24523952Sml29623 	tx_buf_alloc_size = (bcopy_thresh * nxge_tx_ring_size);
24533859Sml29623 
24543859Sml29623 	/*
24553859Sml29623 	 * Addresses of transmit descriptor ring and the
24563859Sml29623 	 * mailbox must be all cache-aligned (64 bytes).
24573859Sml29623 	 */
24583859Sml29623 	tx_cntl_alloc_size = nxge_tx_ring_size;
24593859Sml29623 	tx_cntl_alloc_size *= (sizeof (tx_desc_t));
24603859Sml29623 	tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
24613859Sml29623 
24623859Sml29623 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
24633859Sml29623 	if (nxgep->niu_type == N2_NIU) {
24643859Sml29623 		if (!ISP2(tx_buf_alloc_size)) {
24653859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
24663859Sml29623 				"==> nxge_alloc_tx_mem_pool: "
24673859Sml29623 				" must be power of 2"));
24683859Sml29623 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
24693859Sml29623 			goto nxge_alloc_tx_mem_pool_exit;
24703859Sml29623 		}
24713859Sml29623 
24723859Sml29623 		if (tx_buf_alloc_size > (1 << 22)) {
24733859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
24743859Sml29623 				"==> nxge_alloc_tx_mem_pool: "
24753859Sml29623 				" limit size to 4M"));
24763859Sml29623 			status |= (NXGE_ERROR | NXGE_DDI_FAILED);
24773859Sml29623 			goto nxge_alloc_tx_mem_pool_exit;
24783859Sml29623 		}
24793859Sml29623 
24803859Sml29623 		if (tx_cntl_alloc_size < 0x2000) {
24813859Sml29623 			tx_cntl_alloc_size = 0x2000;
24823859Sml29623 		}
24833859Sml29623 	}
24843859Sml29623 #endif
24853859Sml29623 
24863859Sml29623 	num_chunks = (uint32_t *)KMEM_ZALLOC(
24873859Sml29623 			sizeof (uint32_t) * ndmas, KM_SLEEP);
24883859Sml29623 
24893859Sml29623 	/*
24903859Sml29623 	 * Allocate memory for transmit buffers and descriptor rings.
24913859Sml29623 	 * Replace allocation functions with interface functions provided
24923859Sml29623 	 * by the partition manager when it is available.
24933859Sml29623 	 *
24943859Sml29623 	 * Allocate memory for the transmit buffer pool.
24953859Sml29623 	 */
24963859Sml29623 	for (i = 0; i < ndmas; i++) {
24973859Sml29623 		num_chunks[i] = 0;
24983859Sml29623 		status = nxge_alloc_tx_buf_dma(nxgep, st_tdc, &dma_buf_p[i],
24993859Sml29623 					tx_buf_alloc_size,
25003952Sml29623 					bcopy_thresh, &num_chunks[i]);
25013859Sml29623 		if (status != NXGE_OK) {
25023859Sml29623 			break;
25033859Sml29623 		}
25043859Sml29623 		st_tdc++;
25053859Sml29623 	}
25063859Sml29623 	if (i < ndmas) {
25073859Sml29623 		goto nxge_alloc_tx_mem_pool_fail1;
25083859Sml29623 	}
25093859Sml29623 
25103859Sml29623 	st_tdc = p_cfgp->start_tdc;
25113859Sml29623 	/*
25123859Sml29623 	 * Allocate memory for descriptor rings and mailbox.
25133859Sml29623 	 */
25143859Sml29623 	for (j = 0; j < ndmas; j++) {
25153859Sml29623 		status = nxge_alloc_tx_cntl_dma(nxgep, st_tdc, &dma_cntl_p[j],
25163859Sml29623 					tx_cntl_alloc_size);
25173859Sml29623 		if (status != NXGE_OK) {
25183859Sml29623 			break;
25193859Sml29623 		}
25203859Sml29623 		st_tdc++;
25213859Sml29623 	}
25223859Sml29623 	if (j < ndmas) {
25233859Sml29623 		goto nxge_alloc_tx_mem_pool_fail2;
25243859Sml29623 	}
25253859Sml29623 
25263859Sml29623 	dma_poolp->ndmas = ndmas;
25273859Sml29623 	dma_poolp->num_chunks = num_chunks;
25283859Sml29623 	dma_poolp->buf_allocated = B_TRUE;
25293859Sml29623 	dma_poolp->dma_buf_pool_p = dma_buf_p;
25303859Sml29623 	nxgep->tx_buf_pool_p = dma_poolp;
25313859Sml29623 
25323859Sml29623 	dma_cntl_poolp->ndmas = ndmas;
25333859Sml29623 	dma_cntl_poolp->buf_allocated = B_TRUE;
25343859Sml29623 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
25353859Sml29623 	nxgep->tx_cntl_pool_p = dma_cntl_poolp;
25363859Sml29623 
25373859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM_CTL,
25383859Sml29623 		"==> nxge_alloc_tx_mem_pool: start_tdc %d "
25393859Sml29623 		"ndmas %d poolp->ndmas %d",
25403859Sml29623 		st_tdc, ndmas, dma_poolp->ndmas));
25413859Sml29623 
25423859Sml29623 	goto nxge_alloc_tx_mem_pool_exit;
25433859Sml29623 
25443859Sml29623 nxge_alloc_tx_mem_pool_fail2:
25453859Sml29623 	/* Free control buffers */
25463859Sml29623 	j--;
25473859Sml29623 	for (; j >= 0; j--) {
25483859Sml29623 		nxge_free_tx_cntl_dma(nxgep,
25494185Sspeer 			(p_nxge_dma_common_t)dma_cntl_p[j]);
25503859Sml29623 	}
25513859Sml29623 
25523859Sml29623 nxge_alloc_tx_mem_pool_fail1:
25533859Sml29623 	/* Free data buffers */
25543859Sml29623 	i--;
25553859Sml29623 	for (; i >= 0; i--) {
25563859Sml29623 		nxge_free_tx_buf_dma(nxgep, (p_nxge_dma_common_t)dma_buf_p[i],
25573859Sml29623 			num_chunks[i]);
25583859Sml29623 	}
25593859Sml29623 
25603859Sml29623 	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
25613859Sml29623 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
25623859Sml29623 	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
25633859Sml29623 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
25643859Sml29623 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
25653859Sml29623 
25663859Sml29623 nxge_alloc_tx_mem_pool_exit:
25673859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM_CTL,
25683859Sml29623 		"<== nxge_alloc_tx_mem_pool:status 0x%08x", status));
25693859Sml29623 
25703859Sml29623 	return (status);
25713859Sml29623 }
25723859Sml29623 
25733859Sml29623 static nxge_status_t
25743859Sml29623 nxge_alloc_tx_buf_dma(p_nxge_t nxgep, uint16_t dma_channel,
25753859Sml29623     p_nxge_dma_common_t *dmap, size_t alloc_size,
25763859Sml29623     size_t block_size, uint32_t *num_chunks)
25773859Sml29623 {
25783859Sml29623 	p_nxge_dma_common_t 	tx_dmap;
25793859Sml29623 	nxge_status_t		status = NXGE_OK;
25803859Sml29623 	size_t			total_alloc_size;
25813859Sml29623 	size_t			allocated = 0;
25823859Sml29623 	int			i, size_index, array_size;
25833859Sml29623 
25843859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_buf_dma"));
25853859Sml29623 
25863859Sml29623 	tx_dmap = (p_nxge_dma_common_t)
25873859Sml29623 		KMEM_ZALLOC(sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK,
25883859Sml29623 			KM_SLEEP);
25893859Sml29623 
25903859Sml29623 	total_alloc_size = alloc_size;
25913859Sml29623 	i = 0;
25923859Sml29623 	size_index = 0;
25933859Sml29623 	array_size =  sizeof (alloc_sizes) /  sizeof (size_t);
25943859Sml29623 	while ((alloc_sizes[size_index] < alloc_size) &&
25953859Sml29623 		(size_index < array_size))
25963859Sml29623 		size_index++;
25973859Sml29623 	if (size_index >= array_size) {
25983859Sml29623 		size_index = array_size - 1;
25993859Sml29623 	}
26003859Sml29623 
26013859Sml29623 	while ((allocated < total_alloc_size) &&
26023859Sml29623 			(size_index >= 0) && (i < NXGE_DMA_BLOCK)) {
26033859Sml29623 
26043859Sml29623 		tx_dmap[i].dma_chunk_index = i;
26053859Sml29623 		tx_dmap[i].block_size = block_size;
26063859Sml29623 		tx_dmap[i].alength = alloc_sizes[size_index];
26073859Sml29623 		tx_dmap[i].orig_alength = tx_dmap[i].alength;
26083859Sml29623 		tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
26093859Sml29623 		tx_dmap[i].dma_channel = dma_channel;
26103859Sml29623 		tx_dmap[i].contig_alloc_type = B_FALSE;
26113859Sml29623 
26123859Sml29623 		/*
26133859Sml29623 		 * N2/NIU: data buffers must be contiguous as the driver
26143859Sml29623 		 *	   needs to call Hypervisor api to set up
26153859Sml29623 		 *	   logical pages.
26163859Sml29623 		 */
26173859Sml29623 		if ((nxgep->niu_type == N2_NIU) && (NXGE_DMA_BLOCK == 1)) {
26183859Sml29623 			tx_dmap[i].contig_alloc_type = B_TRUE;
26193859Sml29623 		}
26203859Sml29623 
26213859Sml29623 		status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
26223859Sml29623 			&nxge_tx_dma_attr,
26233859Sml29623 			tx_dmap[i].alength,
26243859Sml29623 			&nxge_dev_buf_dma_acc_attr,
26253859Sml29623 			DDI_DMA_WRITE | DDI_DMA_STREAMING,
26263859Sml29623 			(p_nxge_dma_common_t)(&tx_dmap[i]));
26273859Sml29623 		if (status != NXGE_OK) {
26283859Sml29623 			size_index--;
26293859Sml29623 		} else {
26303859Sml29623 			i++;
26313859Sml29623 			allocated += alloc_sizes[size_index];
26323859Sml29623 		}
26333859Sml29623 	}
26343859Sml29623 
26353859Sml29623 	if (allocated < total_alloc_size) {
2636*5770Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2637*5770Sml29623 		    "==> nxge_alloc_tx_buf_dma: not enough channel %d: "
2638*5770Sml29623 		    "allocated 0x%x requested 0x%x",
2639*5770Sml29623 		    dma_channel,
2640*5770Sml29623 		    allocated, total_alloc_size));
2641*5770Sml29623 		status = NXGE_ERROR;
26423859Sml29623 		goto nxge_alloc_tx_mem_fail1;
26433859Sml29623 	}
26443859Sml29623 
2645*5770Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2646*5770Sml29623 	    "==> nxge_alloc_tx_buf_dma: Allocated for channel %d: "
2647*5770Sml29623 	    "allocated 0x%x requested 0x%x",
2648*5770Sml29623 	    dma_channel,
2649*5770Sml29623 	    allocated, total_alloc_size));
2650*5770Sml29623 
26513859Sml29623 	*num_chunks = i;
26523859Sml29623 	*dmap = tx_dmap;
26533859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
26543859Sml29623 		"==> nxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
26553859Sml29623 		*dmap, i));
26563859Sml29623 	goto nxge_alloc_tx_mem_exit;
26573859Sml29623 
26583859Sml29623 nxge_alloc_tx_mem_fail1:
26593859Sml29623 	KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
26603859Sml29623 
26613859Sml29623 nxge_alloc_tx_mem_exit:
26623859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
26633859Sml29623 		"<== nxge_alloc_tx_buf_dma status 0x%08x", status));
26643859Sml29623 
26653859Sml29623 	return (status);
26663859Sml29623 }
26673859Sml29623 
26683859Sml29623 /*ARGSUSED*/
26693859Sml29623 static void
26703859Sml29623 nxge_free_tx_buf_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap,
26713859Sml29623     uint32_t num_chunks)
26723859Sml29623 {
26733859Sml29623 	int		i;
26743859Sml29623 
26753859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "==> nxge_free_tx_buf_dma"));
26763859Sml29623 
26773859Sml29623 	for (i = 0; i < num_chunks; i++) {
26783859Sml29623 		nxge_dma_mem_free(dmap++);
26793859Sml29623 	}
26803859Sml29623 
26813859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM_CTL, "<== nxge_free_tx_buf_dma"));
26823859Sml29623 }
26833859Sml29623 
26843859Sml29623 /*ARGSUSED*/
26853859Sml29623 static nxge_status_t
26863859Sml29623 nxge_alloc_tx_cntl_dma(p_nxge_t nxgep, uint16_t dma_channel,
26873859Sml29623     p_nxge_dma_common_t *dmap, size_t size)
26883859Sml29623 {
26893859Sml29623 	p_nxge_dma_common_t 	tx_dmap;
26903859Sml29623 	nxge_status_t		status = NXGE_OK;
26913859Sml29623 
26923859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_alloc_tx_cntl_dma"));
26933859Sml29623 	tx_dmap = (p_nxge_dma_common_t)
26943859Sml29623 			KMEM_ZALLOC(sizeof (nxge_dma_common_t), KM_SLEEP);
26953859Sml29623 
26963859Sml29623 	tx_dmap->contig_alloc_type = B_FALSE;
26973859Sml29623 
26983859Sml29623 	status = nxge_dma_mem_alloc(nxgep, nxge_force_dma,
26993859Sml29623 			&nxge_desc_dma_attr,
27003859Sml29623 			size,
27013859Sml29623 			&nxge_dev_desc_dma_acc_attr,
27023859Sml29623 			DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
27033859Sml29623 			tx_dmap);
27043859Sml29623 	if (status != NXGE_OK) {
27053859Sml29623 		goto nxge_alloc_tx_cntl_dma_fail1;
27063859Sml29623 	}
27073859Sml29623 
27083859Sml29623 	*dmap = tx_dmap;
27093859Sml29623 	goto nxge_alloc_tx_cntl_dma_exit;
27103859Sml29623 
27113859Sml29623 nxge_alloc_tx_cntl_dma_fail1:
27123859Sml29623 	KMEM_FREE(tx_dmap, sizeof (nxge_dma_common_t));
27133859Sml29623 
27143859Sml29623 nxge_alloc_tx_cntl_dma_exit:
27153859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
27163859Sml29623 		"<== nxge_alloc_tx_cntl_dma status 0x%08x", status));
27173859Sml29623 
27183859Sml29623 	return (status);
27193859Sml29623 }
27203859Sml29623 
27213859Sml29623 /*ARGSUSED*/
27223859Sml29623 static void
27233859Sml29623 nxge_free_tx_cntl_dma(p_nxge_t nxgep, p_nxge_dma_common_t dmap)
27243859Sml29623 {
27253859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_free_tx_cntl_dma"));
27263859Sml29623 
27273859Sml29623 	nxge_dma_mem_free(dmap);
27283859Sml29623 
27293859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_free_tx_cntl_dma"));
27303859Sml29623 }
27313859Sml29623 
27323859Sml29623 static void
27333859Sml29623 nxge_free_tx_mem_pool(p_nxge_t nxgep)
27343859Sml29623 {
27353859Sml29623 	uint32_t		i, ndmas;
27363859Sml29623 	p_nxge_dma_pool_t	dma_poolp;
27373859Sml29623 	p_nxge_dma_common_t	*dma_buf_p;
27383859Sml29623 	p_nxge_dma_pool_t	dma_cntl_poolp;
27393859Sml29623 	p_nxge_dma_common_t	*dma_cntl_p;
27403859Sml29623 	uint32_t 		*num_chunks;
27413859Sml29623 
27423859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_free_tx_mem_pool"));
27433859Sml29623 
27443859Sml29623 	dma_poolp = nxgep->tx_buf_pool_p;
27453859Sml29623 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
27463859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
27473859Sml29623 			"<== nxge_free_tx_mem_pool "
27483859Sml29623 			"(null rx buf pool or buf not allocated"));
27493859Sml29623 		return;
27503859Sml29623 	}
27513859Sml29623 
27523859Sml29623 	dma_cntl_poolp = nxgep->tx_cntl_pool_p;
27533859Sml29623 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
27543859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
27553859Sml29623 			"<== nxge_free_tx_mem_pool "
27563859Sml29623 			"(null tx cntl buf pool or cntl buf not allocated"));
27573859Sml29623 		return;
27583859Sml29623 	}
27593859Sml29623 
27603859Sml29623 	dma_buf_p = dma_poolp->dma_buf_pool_p;
27613859Sml29623 	num_chunks = dma_poolp->num_chunks;
27623859Sml29623 
27633859Sml29623 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
27643859Sml29623 	ndmas = dma_cntl_poolp->ndmas;
27653859Sml29623 
27663859Sml29623 	for (i = 0; i < ndmas; i++) {
27673859Sml29623 		nxge_free_tx_buf_dma(nxgep, dma_buf_p[i], num_chunks[i]);
27683859Sml29623 	}
27693859Sml29623 
27703859Sml29623 	for (i = 0; i < ndmas; i++) {
27713859Sml29623 		nxge_free_tx_cntl_dma(nxgep, dma_cntl_p[i]);
27723859Sml29623 	}
27733859Sml29623 
27743859Sml29623 	for (i = 0; i < ndmas; i++) {
27753859Sml29623 		KMEM_FREE(dma_buf_p[i],
27763859Sml29623 			sizeof (nxge_dma_common_t) * NXGE_DMA_BLOCK);
27773859Sml29623 		KMEM_FREE(dma_cntl_p[i], sizeof (nxge_dma_common_t));
27783859Sml29623 	}
27793859Sml29623 
27803859Sml29623 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
27813859Sml29623 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_nxge_dma_common_t));
27823859Sml29623 	KMEM_FREE(dma_cntl_poolp, sizeof (nxge_dma_pool_t));
27833859Sml29623 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_nxge_dma_common_t));
27843859Sml29623 	KMEM_FREE(dma_poolp, sizeof (nxge_dma_pool_t));
27853859Sml29623 
27863859Sml29623 	nxgep->tx_buf_pool_p = NULL;
27873859Sml29623 	nxgep->tx_cntl_pool_p = NULL;
27883859Sml29623 
27893859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_free_tx_mem_pool"));
27903859Sml29623 }
27913859Sml29623 
27923859Sml29623 /*ARGSUSED*/
27933859Sml29623 static nxge_status_t
27943859Sml29623 nxge_dma_mem_alloc(p_nxge_t nxgep, dma_method_t method,
27953859Sml29623 	struct ddi_dma_attr *dma_attrp,
27963859Sml29623 	size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
27973859Sml29623 	p_nxge_dma_common_t dma_p)
27983859Sml29623 {
27993859Sml29623 	caddr_t 		kaddrp;
28003859Sml29623 	int			ddi_status = DDI_SUCCESS;
28013859Sml29623 	boolean_t		contig_alloc_type;
28023859Sml29623 
28033859Sml29623 	contig_alloc_type = dma_p->contig_alloc_type;
28043859Sml29623 
28053859Sml29623 	if (contig_alloc_type && (nxgep->niu_type != N2_NIU)) {
28063859Sml29623 		/*
28073859Sml29623 		 * contig_alloc_type for contiguous memory only allowed
28083859Sml29623 		 * for N2/NIU.
28093859Sml29623 		 */
28103859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
28113859Sml29623 			"nxge_dma_mem_alloc: alloc type not allows (%d)",
28123859Sml29623 			dma_p->contig_alloc_type));
28133859Sml29623 		return (NXGE_ERROR | NXGE_DDI_FAILED);
28143859Sml29623 	}
28153859Sml29623 
28163859Sml29623 	dma_p->dma_handle = NULL;
28173859Sml29623 	dma_p->acc_handle = NULL;
28183859Sml29623 	dma_p->kaddrp = dma_p->last_kaddrp = NULL;
28193859Sml29623 	dma_p->first_ioaddr_pp = dma_p->last_ioaddr_pp = NULL;
28203859Sml29623 	ddi_status = ddi_dma_alloc_handle(nxgep->dip, dma_attrp,
28213859Sml29623 		DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
28223859Sml29623 	if (ddi_status != DDI_SUCCESS) {
28233859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
28243859Sml29623 			"nxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
28253859Sml29623 		return (NXGE_ERROR | NXGE_DDI_FAILED);
28263859Sml29623 	}
28273859Sml29623 
28283859Sml29623 	switch (contig_alloc_type) {
28293859Sml29623 	case B_FALSE:
28303859Sml29623 		ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length,
28313859Sml29623 			acc_attr_p,
28323859Sml29623 			xfer_flags,
28333859Sml29623 			DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
28343859Sml29623 			&dma_p->acc_handle);
28353859Sml29623 		if (ddi_status != DDI_SUCCESS) {
28363859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
28373859Sml29623 				"nxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
28383859Sml29623 			ddi_dma_free_handle(&dma_p->dma_handle);
28393859Sml29623 			dma_p->dma_handle = NULL;
28403859Sml29623 			return (NXGE_ERROR | NXGE_DDI_FAILED);
28413859Sml29623 		}
28423859Sml29623 		if (dma_p->alength < length) {
28433859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
28443859Sml29623 				"nxge_dma_mem_alloc:ddi_dma_mem_alloc "
28453859Sml29623 				"< length."));
28463859Sml29623 			ddi_dma_mem_free(&dma_p->acc_handle);
28473859Sml29623 			ddi_dma_free_handle(&dma_p->dma_handle);
28483859Sml29623 			dma_p->acc_handle = NULL;
28493859Sml29623 			dma_p->dma_handle = NULL;
28503859Sml29623 			return (NXGE_ERROR);
28513859Sml29623 		}
28523859Sml29623 
28533859Sml29623 		ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
28543859Sml29623 			kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
28553859Sml29623 			&dma_p->dma_cookie, &dma_p->ncookies);
28563859Sml29623 		if (ddi_status != DDI_DMA_MAPPED) {
28573859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
28583859Sml29623 				"nxge_dma_mem_alloc:di_dma_addr_bind failed "
28593859Sml29623 				"(staus 0x%x ncookies %d.)", ddi_status,
28603859Sml29623 				dma_p->ncookies));
28613859Sml29623 			if (dma_p->acc_handle) {
28623859Sml29623 				ddi_dma_mem_free(&dma_p->acc_handle);
28633859Sml29623 				dma_p->acc_handle = NULL;
28643859Sml29623 			}
28653859Sml29623 			ddi_dma_free_handle(&dma_p->dma_handle);
28663859Sml29623 			dma_p->dma_handle = NULL;
28673859Sml29623 			return (NXGE_ERROR | NXGE_DDI_FAILED);
28683859Sml29623 		}
28693859Sml29623 
28703859Sml29623 		if (dma_p->ncookies != 1) {
28713859Sml29623 			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
28723859Sml29623 				"nxge_dma_mem_alloc:ddi_dma_addr_bind "
28733859Sml29623 				"> 1 cookie"
28743859Sml29623 				"(staus 0x%x ncookies %d.)", ddi_status,
28753859Sml29623 				dma_p->ncookies));
28763859Sml29623 			if (dma_p->acc_handle) {
28773859Sml29623 				ddi_dma_mem_free(&dma_p->acc_handle);
28783859Sml29623 				dma_p->acc_handle = NULL;
28793859Sml29623 			}
28804185Sspeer 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
28813859Sml29623 			ddi_dma_free_handle(&dma_p->dma_handle);
28823859Sml29623 			dma_p->dma_handle = NULL;
28833859Sml29623 			return (NXGE_ERROR);
28843859Sml29623 		}
28853859Sml29623 		break;
28863859Sml29623 
28873859Sml29623 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
28883859Sml29623 	case B_TRUE:
28893859Sml29623 		kaddrp = (caddr_t)contig_mem_alloc(length);
28903859Sml29623 		if (kaddrp == NULL) {
28913859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
28923859Sml29623 				"nxge_dma_mem_alloc:contig_mem_alloc failed."));
28933859Sml29623 			ddi_dma_free_handle(&dma_p->dma_handle);
28943859Sml29623 			return (NXGE_ERROR | NXGE_DDI_FAILED);
28953859Sml29623 		}
28963859Sml29623 
28973859Sml29623 		dma_p->alength = length;
28983859Sml29623 		ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
28993859Sml29623 			kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
29003859Sml29623 			&dma_p->dma_cookie, &dma_p->ncookies);
29013859Sml29623 		if (ddi_status != DDI_DMA_MAPPED) {
29023859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
29033859Sml29623 				"nxge_dma_mem_alloc:di_dma_addr_bind failed "
29043859Sml29623 				"(status 0x%x ncookies %d.)", ddi_status,
29053859Sml29623 				dma_p->ncookies));
29063859Sml29623 
29073859Sml29623 			NXGE_DEBUG_MSG((nxgep, DMA_CTL,
29083859Sml29623 				"==> nxge_dma_mem_alloc: (not mapped)"
29093859Sml29623 				"length %lu (0x%x) "
29103859Sml29623 				"free contig kaddrp $%p "
29113859Sml29623 				"va_to_pa $%p",
29123859Sml29623 				length, length,
29133859Sml29623 				kaddrp,
29143859Sml29623 				va_to_pa(kaddrp)));
29153859Sml29623 
29163859Sml29623 
29173859Sml29623 			contig_mem_free((void *)kaddrp, length);
29183859Sml29623 			ddi_dma_free_handle(&dma_p->dma_handle);
29193859Sml29623 
29203859Sml29623 			dma_p->dma_handle = NULL;
29213859Sml29623 			dma_p->acc_handle = NULL;
29223859Sml29623 			dma_p->alength = NULL;
29233859Sml29623 			dma_p->kaddrp = NULL;
29243859Sml29623 
29253859Sml29623 			return (NXGE_ERROR | NXGE_DDI_FAILED);
29263859Sml29623 		}
29273859Sml29623 
29283859Sml29623 		if (dma_p->ncookies != 1 ||
29293859Sml29623 			(dma_p->dma_cookie.dmac_laddress == NULL)) {
29303859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
29313859Sml29623 				"nxge_dma_mem_alloc:di_dma_addr_bind > 1 "
29323859Sml29623 				"cookie or "
29333859Sml29623 				"dmac_laddress is NULL $%p size %d "
29343859Sml29623 				" (status 0x%x ncookies %d.)",
29353859Sml29623 				ddi_status,
29363859Sml29623 				dma_p->dma_cookie.dmac_laddress,
29373859Sml29623 				dma_p->dma_cookie.dmac_size,
29383859Sml29623 				dma_p->ncookies));
29393859Sml29623 
29403859Sml29623 			contig_mem_free((void *)kaddrp, length);
29414185Sspeer 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
29423859Sml29623 			ddi_dma_free_handle(&dma_p->dma_handle);
29433859Sml29623 
29443859Sml29623 			dma_p->alength = 0;
29453859Sml29623 			dma_p->dma_handle = NULL;
29463859Sml29623 			dma_p->acc_handle = NULL;
29473859Sml29623 			dma_p->kaddrp = NULL;
29483859Sml29623 
29493859Sml29623 			return (NXGE_ERROR | NXGE_DDI_FAILED);
29503859Sml29623 		}
29513859Sml29623 		break;
29523859Sml29623 
29533859Sml29623 #else
29543859Sml29623 	case B_TRUE:
29553859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
29563859Sml29623 			"nxge_dma_mem_alloc: invalid alloc type for !sun4v"));
29573859Sml29623 		return (NXGE_ERROR | NXGE_DDI_FAILED);
29583859Sml29623 #endif
29593859Sml29623 	}
29603859Sml29623 
29613859Sml29623 	dma_p->kaddrp = kaddrp;
29623859Sml29623 	dma_p->last_kaddrp = (unsigned char *)kaddrp +
29633859Sml29623 			dma_p->alength - RXBUF_64B_ALIGNED;
29645125Sjoycey #if defined(__i386)
29655125Sjoycey 	dma_p->ioaddr_pp =
29665125Sjoycey 		(unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
29675125Sjoycey #else
29683859Sml29623 	dma_p->ioaddr_pp = (unsigned char *)dma_p->dma_cookie.dmac_laddress;
29695125Sjoycey #endif
29703859Sml29623 	dma_p->last_ioaddr_pp =
29715125Sjoycey #if defined(__i386)
29725125Sjoycey 		(unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress +
29735125Sjoycey #else
29743859Sml29623 		(unsigned char *)dma_p->dma_cookie.dmac_laddress +
29755125Sjoycey #endif
29763859Sml29623 				dma_p->alength - RXBUF_64B_ALIGNED;
29773859Sml29623 
29783859Sml29623 	NPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
29793859Sml29623 
29803859Sml29623 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
29813859Sml29623 	dma_p->orig_ioaddr_pp =
29823859Sml29623 		(unsigned char *)dma_p->dma_cookie.dmac_laddress;
29833859Sml29623 	dma_p->orig_alength = length;
29843859Sml29623 	dma_p->orig_kaddrp = kaddrp;
29853859Sml29623 	dma_p->orig_vatopa = (uint64_t)va_to_pa(kaddrp);
29863859Sml29623 #endif
29873859Sml29623 
29883859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dma_mem_alloc: "
29893859Sml29623 		"dma buffer allocated: dma_p $%p "
29903859Sml29623 		"return dmac_ladress from cookie $%p cookie dmac_size %d "
29913859Sml29623 		"dma_p->ioaddr_p $%p "
29923859Sml29623 		"dma_p->orig_ioaddr_p $%p "
29933859Sml29623 		"orig_vatopa $%p "
29943859Sml29623 		"alength %d (0x%x) "
29953859Sml29623 		"kaddrp $%p "
29963859Sml29623 		"length %d (0x%x)",
29973859Sml29623 		dma_p,
29983859Sml29623 		dma_p->dma_cookie.dmac_laddress, dma_p->dma_cookie.dmac_size,
29993859Sml29623 		dma_p->ioaddr_pp,
30003859Sml29623 		dma_p->orig_ioaddr_pp,
30013859Sml29623 		dma_p->orig_vatopa,
30023859Sml29623 		dma_p->alength, dma_p->alength,
30033859Sml29623 		kaddrp,
30043859Sml29623 		length, length));
30053859Sml29623 
30063859Sml29623 	return (NXGE_OK);
30073859Sml29623 }
30083859Sml29623 
30093859Sml29623 static void
30103859Sml29623 nxge_dma_mem_free(p_nxge_dma_common_t dma_p)
30113859Sml29623 {
30123859Sml29623 	if (dma_p->dma_handle != NULL) {
30133859Sml29623 		if (dma_p->ncookies) {
30143859Sml29623 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
30153859Sml29623 			dma_p->ncookies = 0;
30163859Sml29623 		}
30173859Sml29623 		ddi_dma_free_handle(&dma_p->dma_handle);
30183859Sml29623 		dma_p->dma_handle = NULL;
30193859Sml29623 	}
30203859Sml29623 
30213859Sml29623 	if (dma_p->acc_handle != NULL) {
30223859Sml29623 		ddi_dma_mem_free(&dma_p->acc_handle);
30233859Sml29623 		dma_p->acc_handle = NULL;
30243859Sml29623 		NPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
30253859Sml29623 	}
30263859Sml29623 
30273859Sml29623 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
30283859Sml29623 	if (dma_p->contig_alloc_type &&
30293859Sml29623 			dma_p->orig_kaddrp && dma_p->orig_alength) {
30303859Sml29623 		NXGE_DEBUG_MSG((NULL, DMA_CTL, "nxge_dma_mem_free: "
30313859Sml29623 			"kaddrp $%p (orig_kaddrp $%p)"
30323859Sml29623 			"mem type %d ",
30333859Sml29623 			"orig_alength %d "
30343859Sml29623 			"alength 0x%x (%d)",
30353859Sml29623 			dma_p->kaddrp,
30363859Sml29623 			dma_p->orig_kaddrp,
30373859Sml29623 			dma_p->contig_alloc_type,
30383859Sml29623 			dma_p->orig_alength,
30393859Sml29623 			dma_p->alength, dma_p->alength));
30403859Sml29623 
30413859Sml29623 		contig_mem_free(dma_p->orig_kaddrp, dma_p->orig_alength);
30423859Sml29623 		dma_p->orig_alength = NULL;
30433859Sml29623 		dma_p->orig_kaddrp = NULL;
30443859Sml29623 		dma_p->contig_alloc_type = B_FALSE;
30453859Sml29623 	}
30463859Sml29623 #endif
30473859Sml29623 	dma_p->kaddrp = NULL;
30483859Sml29623 	dma_p->alength = NULL;
30493859Sml29623 }
30503859Sml29623 
30513859Sml29623 /*
30523859Sml29623  *	nxge_m_start() -- start transmitting and receiving.
30533859Sml29623  *
30543859Sml29623  *	This function is called by the MAC layer when the first
30553859Sml29623  *	stream is open to prepare the hardware ready for sending
30563859Sml29623  *	and transmitting packets.
30573859Sml29623  */
30583859Sml29623 static int
30593859Sml29623 nxge_m_start(void *arg)
30603859Sml29623 {
30613859Sml29623 	p_nxge_t 	nxgep = (p_nxge_t)arg;
30623859Sml29623 
30633859Sml29623 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_start"));
30643859Sml29623 
30653859Sml29623 	MUTEX_ENTER(nxgep->genlock);
30663859Sml29623 	if (nxge_init(nxgep) != NXGE_OK) {
30673859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
30683859Sml29623 			"<== nxge_m_start: initialization failed"));
30693859Sml29623 		MUTEX_EXIT(nxgep->genlock);
30703859Sml29623 		return (EIO);
30713859Sml29623 	}
30723859Sml29623 
30733859Sml29623 	if (nxgep->nxge_mac_state == NXGE_MAC_STARTED)
30743859Sml29623 		goto nxge_m_start_exit;
30753859Sml29623 	/*
30763859Sml29623 	 * Start timer to check the system error and tx hangs
30773859Sml29623 	 */
30783859Sml29623 	nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state,
30793859Sml29623 		NXGE_CHECK_TIMER);
30803859Sml29623 
30813859Sml29623 	nxgep->link_notify = B_TRUE;
30823859Sml29623 
30833859Sml29623 	nxgep->nxge_mac_state = NXGE_MAC_STARTED;
30843859Sml29623 
30853859Sml29623 nxge_m_start_exit:
30863859Sml29623 	MUTEX_EXIT(nxgep->genlock);
30873859Sml29623 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_start"));
30883859Sml29623 	return (0);
30893859Sml29623 }
30903859Sml29623 
30913859Sml29623 /*
30923859Sml29623  *	nxge_m_stop(): stop transmitting and receiving.
30933859Sml29623  */
30943859Sml29623 static void
30953859Sml29623 nxge_m_stop(void *arg)
30963859Sml29623 {
30973859Sml29623 	p_nxge_t 	nxgep = (p_nxge_t)arg;
30983859Sml29623 
30993859Sml29623 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_stop"));
31003859Sml29623 
31013859Sml29623 	if (nxgep->nxge_timerid) {
31023859Sml29623 		nxge_stop_timer(nxgep, nxgep->nxge_timerid);
31033859Sml29623 		nxgep->nxge_timerid = 0;
31043859Sml29623 	}
31053859Sml29623 
31063859Sml29623 	MUTEX_ENTER(nxgep->genlock);
31073859Sml29623 	nxge_uninit(nxgep);
31083859Sml29623 
31093859Sml29623 	nxgep->nxge_mac_state = NXGE_MAC_STOPPED;
31103859Sml29623 
31113859Sml29623 	MUTEX_EXIT(nxgep->genlock);
31123859Sml29623 
31133859Sml29623 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_stop"));
31143859Sml29623 }
31153859Sml29623 
31163859Sml29623 static int
31173859Sml29623 nxge_m_unicst(void *arg, const uint8_t *macaddr)
31183859Sml29623 {
31193859Sml29623 	p_nxge_t 	nxgep = (p_nxge_t)arg;
31203859Sml29623 	struct 		ether_addr addrp;
31213859Sml29623 
31223859Sml29623 	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "==> nxge_m_unicst"));
31233859Sml29623 
31243859Sml29623 	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
31253859Sml29623 	if (nxge_set_mac_addr(nxgep, &addrp)) {
31263859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31273859Sml29623 			"<== nxge_m_unicst: set unitcast failed"));
31283859Sml29623 		return (EINVAL);
31293859Sml29623 	}
31303859Sml29623 
31313859Sml29623 	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_unicst"));
31323859Sml29623 
31333859Sml29623 	return (0);
31343859Sml29623 }
31353859Sml29623 
31363859Sml29623 static int
31373859Sml29623 nxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
31383859Sml29623 {
31393859Sml29623 	p_nxge_t 	nxgep = (p_nxge_t)arg;
31403859Sml29623 	struct 		ether_addr addrp;
31413859Sml29623 
31423859Sml29623 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
31433859Sml29623 		"==> nxge_m_multicst: add %d", add));
31443859Sml29623 
31453859Sml29623 	bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
31463859Sml29623 	if (add) {
31473859Sml29623 		if (nxge_add_mcast_addr(nxgep, &addrp)) {
31483859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31493859Sml29623 				"<== nxge_m_multicst: add multicast failed"));
31503859Sml29623 			return (EINVAL);
31513859Sml29623 		}
31523859Sml29623 	} else {
31533859Sml29623 		if (nxge_del_mcast_addr(nxgep, &addrp)) {
31543859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31553859Sml29623 				"<== nxge_m_multicst: del multicast failed"));
31563859Sml29623 			return (EINVAL);
31573859Sml29623 		}
31583859Sml29623 	}
31593859Sml29623 
31603859Sml29623 	NXGE_DEBUG_MSG((nxgep, MAC_CTL, "<== nxge_m_multicst"));
31613859Sml29623 
31623859Sml29623 	return (0);
31633859Sml29623 }
31643859Sml29623 
31653859Sml29623 static int
31663859Sml29623 nxge_m_promisc(void *arg, boolean_t on)
31673859Sml29623 {
31683859Sml29623 	p_nxge_t 	nxgep = (p_nxge_t)arg;
31693859Sml29623 
31703859Sml29623 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
31713859Sml29623 		"==> nxge_m_promisc: on %d", on));
31723859Sml29623 
31733859Sml29623 	if (nxge_set_promisc(nxgep, on)) {
31743859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31753859Sml29623 			"<== nxge_m_promisc: set promisc failed"));
31763859Sml29623 		return (EINVAL);
31773859Sml29623 	}
31783859Sml29623 
31793859Sml29623 	NXGE_DEBUG_MSG((nxgep, MAC_CTL,
31803859Sml29623 		"<== nxge_m_promisc: on %d", on));
31813859Sml29623 
31823859Sml29623 	return (0);
31833859Sml29623 }
31843859Sml29623 
31853859Sml29623 static void
31863859Sml29623 nxge_m_ioctl(void *arg,  queue_t *wq, mblk_t *mp)
31873859Sml29623 {
31883859Sml29623 	p_nxge_t 	nxgep = (p_nxge_t)arg;
31894185Sspeer 	struct 		iocblk *iocp;
31903859Sml29623 	boolean_t 	need_privilege;
31913859Sml29623 	int 		err;
31923859Sml29623 	int 		cmd;
31933859Sml29623 
31943859Sml29623 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl"));
31953859Sml29623 
31963859Sml29623 	iocp = (struct iocblk *)mp->b_rptr;
31973859Sml29623 	iocp->ioc_error = 0;
31983859Sml29623 	need_privilege = B_TRUE;
31993859Sml29623 	cmd = iocp->ioc_cmd;
32003859Sml29623 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "==> nxge_m_ioctl: cmd 0x%08x", cmd));
32013859Sml29623 	switch (cmd) {
32023859Sml29623 	default:
32033859Sml29623 		miocnak(wq, mp, 0, EINVAL);
32043859Sml29623 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl: invalid"));
32053859Sml29623 		return;
32063859Sml29623 
32073859Sml29623 	case LB_GET_INFO_SIZE:
32083859Sml29623 	case LB_GET_INFO:
32093859Sml29623 	case LB_GET_MODE:
32103859Sml29623 		need_privilege = B_FALSE;
32113859Sml29623 		break;
32123859Sml29623 	case LB_SET_MODE:
32133859Sml29623 		break;
32143859Sml29623 
32153859Sml29623 	case ND_GET:
32163859Sml29623 		need_privilege = B_FALSE;
32173859Sml29623 		break;
32183859Sml29623 	case ND_SET:
32193859Sml29623 		break;
32203859Sml29623 
32213859Sml29623 	case NXGE_GET_MII:
32223859Sml29623 	case NXGE_PUT_MII:
32233859Sml29623 	case NXGE_GET64:
32243859Sml29623 	case NXGE_PUT64:
32253859Sml29623 	case NXGE_GET_TX_RING_SZ:
32263859Sml29623 	case NXGE_GET_TX_DESC:
32273859Sml29623 	case NXGE_TX_SIDE_RESET:
32283859Sml29623 	case NXGE_RX_SIDE_RESET:
32293859Sml29623 	case NXGE_GLOBAL_RESET:
32303859Sml29623 	case NXGE_RESET_MAC:
32313859Sml29623 	case NXGE_TX_REGS_DUMP:
32323859Sml29623 	case NXGE_RX_REGS_DUMP:
32333859Sml29623 	case NXGE_INT_REGS_DUMP:
32343859Sml29623 	case NXGE_VIR_INT_REGS_DUMP:
32353859Sml29623 	case NXGE_PUT_TCAM:
32363859Sml29623 	case NXGE_GET_TCAM:
32373859Sml29623 	case NXGE_RTRACE:
32383859Sml29623 	case NXGE_RDUMP:
32393859Sml29623 
32403859Sml29623 		need_privilege = B_FALSE;
32413859Sml29623 		break;
32423859Sml29623 	case NXGE_INJECT_ERR:
32433859Sml29623 		cmn_err(CE_NOTE, "!nxge_m_ioctl: Inject error\n");
32443859Sml29623 		nxge_err_inject(nxgep, wq, mp);
32453859Sml29623 		break;
32463859Sml29623 	}
32473859Sml29623 
32483859Sml29623 	if (need_privilege) {
32494185Sspeer 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
32503859Sml29623 		if (err != 0) {
32513859Sml29623 			miocnak(wq, mp, 0, err);
32523859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32533859Sml29623 				"<== nxge_m_ioctl: no priv"));
32543859Sml29623 			return;
32553859Sml29623 		}
32563859Sml29623 	}
32573859Sml29623 
32583859Sml29623 	switch (cmd) {
32593859Sml29623 	case ND_GET:
32603859Sml29623 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_GET command"));
32613859Sml29623 	case ND_SET:
32623859Sml29623 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "ND_SET command"));
32633859Sml29623 		nxge_param_ioctl(nxgep, wq, mp, iocp);
32643859Sml29623 		break;
32653859Sml29623 
32663859Sml29623 	case LB_GET_MODE:
32673859Sml29623 	case LB_SET_MODE:
32683859Sml29623 	case LB_GET_INFO_SIZE:
32693859Sml29623 	case LB_GET_INFO:
32703859Sml29623 		nxge_loopback_ioctl(nxgep, wq, mp, iocp);
32713859Sml29623 		break;
32723859Sml29623 
32733859Sml29623 	case NXGE_GET_MII:
32743859Sml29623 	case NXGE_PUT_MII:
32753859Sml29623 	case NXGE_PUT_TCAM:
32763859Sml29623 	case NXGE_GET_TCAM:
32773859Sml29623 	case NXGE_GET64:
32783859Sml29623 	case NXGE_PUT64:
32793859Sml29623 	case NXGE_GET_TX_RING_SZ:
32803859Sml29623 	case NXGE_GET_TX_DESC:
32813859Sml29623 	case NXGE_TX_SIDE_RESET:
32823859Sml29623 	case NXGE_RX_SIDE_RESET:
32833859Sml29623 	case NXGE_GLOBAL_RESET:
32843859Sml29623 	case NXGE_RESET_MAC:
32853859Sml29623 	case NXGE_TX_REGS_DUMP:
32863859Sml29623 	case NXGE_RX_REGS_DUMP:
32873859Sml29623 	case NXGE_INT_REGS_DUMP:
32883859Sml29623 	case NXGE_VIR_INT_REGS_DUMP:
32893859Sml29623 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
32903859Sml29623 			"==> nxge_m_ioctl: cmd 0x%x", cmd));
32913859Sml29623 		nxge_hw_ioctl(nxgep, wq, mp, iocp);
32923859Sml29623 		break;
32933859Sml29623 	}
32943859Sml29623 
32953859Sml29623 	NXGE_DEBUG_MSG((nxgep, NXGE_CTL, "<== nxge_m_ioctl"));
32963859Sml29623 }
32973859Sml29623 
32983859Sml29623 extern void nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
32993859Sml29623 
33003859Sml29623 static void
33013859Sml29623 nxge_m_resources(void *arg)
33023859Sml29623 {
33033859Sml29623 	p_nxge_t		nxgep = arg;
33043859Sml29623 	mac_rx_fifo_t 		mrf;
33053859Sml29623 	p_rx_rcr_rings_t	rcr_rings;
33063859Sml29623 	p_rx_rcr_ring_t		*rcr_p;
33073859Sml29623 	uint32_t		i, ndmas;
33083859Sml29623 	nxge_status_t		status;
33093859Sml29623 
33103859Sml29623 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_m_resources"));
33113859Sml29623 
33123859Sml29623 	MUTEX_ENTER(nxgep->genlock);
33133859Sml29623 
33143859Sml29623 	/*
33153859Sml29623 	 * CR 6492541 Check to see if the drv_state has been initialized,
33163859Sml29623 	 * if not * call nxge_init().
33173859Sml29623 	 */
33183859Sml29623 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
33193859Sml29623 		status = nxge_init(nxgep);
33203859Sml29623 		if (status != NXGE_OK)
33213859Sml29623 			goto nxge_m_resources_exit;
33223859Sml29623 	}
33233859Sml29623 
33243859Sml29623 	mrf.mrf_type = MAC_RX_FIFO;
33253859Sml29623 	mrf.mrf_blank = nxge_rx_hw_blank;
33263859Sml29623 	mrf.mrf_arg = (void *)nxgep;
33273859Sml29623 
33283859Sml29623 	mrf.mrf_normal_blank_time = 128;
33293859Sml29623 	mrf.mrf_normal_pkt_count = 8;
33303859Sml29623 	rcr_rings = nxgep->rx_rcr_rings;
33313859Sml29623 	rcr_p = rcr_rings->rcr_rings;
33323859Sml29623 	ndmas = rcr_rings->ndmas;
33333859Sml29623 
33343859Sml29623 	/*
33353859Sml29623 	 * Export our receive resources to the MAC layer.
33363859Sml29623 	 */
33373859Sml29623 	for (i = 0; i < ndmas; i++) {
33383859Sml29623 		((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle =
33393859Sml29623 				mac_resource_add(nxgep->mach,
33403859Sml29623 				    (mac_resource_t *)&mrf);
33413859Sml29623 
33423859Sml29623 		NXGE_DEBUG_MSG((nxgep, NXGE_CTL,
33433859Sml29623 			"==> nxge_m_resources: vdma %d dma %d "
33443859Sml29623 			"rcrptr 0x%016llx mac_handle 0x%016llx",
33453859Sml29623 			i, ((p_rx_rcr_ring_t)rcr_p[i])->rdc,
33463859Sml29623 			rcr_p[i],
33473859Sml29623 			((p_rx_rcr_ring_t)rcr_p[i])->rcr_mac_handle));
33483859Sml29623 	}
33493859Sml29623 
33503859Sml29623 nxge_m_resources_exit:
33513859Sml29623 	MUTEX_EXIT(nxgep->genlock);
33523859Sml29623 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_m_resources"));
33533859Sml29623 }
33543859Sml29623 
33553859Sml29623 static void
33563859Sml29623 nxge_mmac_kstat_update(p_nxge_t nxgep, mac_addr_slot_t slot, boolean_t factory)
33573859Sml29623 {
33583859Sml29623 	p_nxge_mmac_stats_t mmac_stats;
33593859Sml29623 	int i;
33603859Sml29623 	nxge_mmac_t *mmac_info;
33613859Sml29623 
33623859Sml29623 	mmac_info = &nxgep->nxge_mmac_info;
33633859Sml29623 
33643859Sml29623 	mmac_stats = &nxgep->statsp->mmac_stats;
33653859Sml29623 	mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
33663859Sml29623 	mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
33673859Sml29623 
33683859Sml29623 	for (i = 0; i < ETHERADDRL; i++) {
33693859Sml29623 		if (factory) {
33703859Sml29623 			mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
33713859Sml29623 			= mmac_info->factory_mac_pool[slot][(ETHERADDRL-1) - i];
33723859Sml29623 		} else {
33733859Sml29623 			mmac_stats->mmac_avail_pool[slot-1].ether_addr_octet[i]
33743859Sml29623 			= mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i];
33753859Sml29623 		}
33763859Sml29623 	}
33773859Sml29623 }
33783859Sml29623 
33793859Sml29623 /*
33803859Sml29623  * nxge_altmac_set() -- Set an alternate MAC address
33813859Sml29623  */
33823859Sml29623 static int
33833859Sml29623 nxge_altmac_set(p_nxge_t nxgep, uint8_t *maddr, mac_addr_slot_t slot)
33843859Sml29623 {
33853859Sml29623 	uint8_t addrn;
33863859Sml29623 	uint8_t portn;
33873859Sml29623 	npi_mac_addr_t altmac;
33884484Sspeer 	hostinfo_t mac_rdc;
33894484Sspeer 	p_nxge_class_pt_cfg_t clscfgp;
33903859Sml29623 
33913859Sml29623 	altmac.w2 = ((uint16_t)maddr[0] << 8) | ((uint16_t)maddr[1] & 0x0ff);
33923859Sml29623 	altmac.w1 = ((uint16_t)maddr[2] << 8) | ((uint16_t)maddr[3] & 0x0ff);
33933859Sml29623 	altmac.w0 = ((uint16_t)maddr[4] << 8) | ((uint16_t)maddr[5] & 0x0ff);
33943859Sml29623 
33953859Sml29623 	portn = nxgep->mac.portnum;
33963859Sml29623 	addrn = (uint8_t)slot - 1;
33973859Sml29623 
33983859Sml29623 	if (npi_mac_altaddr_entry(nxgep->npi_handle, OP_SET, portn,
33993859Sml29623 		addrn, &altmac) != NPI_SUCCESS)
34003859Sml29623 		return (EIO);
34014484Sspeer 
34024484Sspeer 	/*
34034484Sspeer 	 * Set the rdc table number for the host info entry
34044484Sspeer 	 * for this mac address slot.
34054484Sspeer 	 */
34064484Sspeer 	clscfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
34074484Sspeer 	mac_rdc.value = 0;
34084484Sspeer 	mac_rdc.bits.w0.rdc_tbl_num = clscfgp->mac_host_info[addrn].rdctbl;
34094484Sspeer 	mac_rdc.bits.w0.mac_pref = clscfgp->mac_host_info[addrn].mpr_npr;
34104484Sspeer 
34114484Sspeer 	if (npi_mac_hostinfo_entry(nxgep->npi_handle, OP_SET,
34124484Sspeer 	    nxgep->function_num, addrn, &mac_rdc) != NPI_SUCCESS) {
34134484Sspeer 		return (EIO);
34144484Sspeer 	}
34154484Sspeer 
34163859Sml29623 	/*
34173859Sml29623 	 * Enable comparison with the alternate MAC address.
34183859Sml29623 	 * While the first alternate addr is enabled by bit 1 of register
34193859Sml29623 	 * BMAC_ALTAD_CMPEN, it is enabled by bit 0 of register
34203859Sml29623 	 * XMAC_ADDR_CMPEN, so slot needs to be converted to addrn
34213859Sml29623 	 * accordingly before calling npi_mac_altaddr_entry.
34223859Sml29623 	 */
34233859Sml29623 	if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
34243859Sml29623 		addrn = (uint8_t)slot - 1;
34253859Sml29623 	else
34263859Sml29623 		addrn = (uint8_t)slot;
34273859Sml29623 
34283859Sml29623 	if (npi_mac_altaddr_enable(nxgep->npi_handle, portn, addrn)
34293859Sml29623 		!= NPI_SUCCESS)
34303859Sml29623 		return (EIO);
34313859Sml29623 
34323859Sml29623 	return (0);
34333859Sml29623 }
34343859Sml29623 
34353859Sml29623 /*
34363859Sml29623  * nxeg_m_mmac_add() - find an unused address slot, set the address
34373859Sml29623  * value to the one specified, enable the port to start filtering on
34383859Sml29623  * the new MAC address.  Returns 0 on success.
34393859Sml29623  */
34403859Sml29623 static int
34413859Sml29623 nxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr)
34423859Sml29623 {
34433859Sml29623 	p_nxge_t nxgep = arg;
34443859Sml29623 	mac_addr_slot_t slot;
34453859Sml29623 	nxge_mmac_t *mmac_info;
34463859Sml29623 	int err;
34473859Sml29623 	nxge_status_t status;
34483859Sml29623 
34493859Sml29623 	mutex_enter(nxgep->genlock);
34503859Sml29623 
34513859Sml29623 	/*
34523859Sml29623 	 * Make sure that nxge is initialized, if _start() has
34533859Sml29623 	 * not been called.
34543859Sml29623 	 */
34553859Sml29623 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
34563859Sml29623 		status = nxge_init(nxgep);
34573859Sml29623 		if (status != NXGE_OK) {
34583859Sml29623 			mutex_exit(nxgep->genlock);
34593859Sml29623 			return (ENXIO);
34603859Sml29623 		}
34613859Sml29623 	}
34623859Sml29623 
34633859Sml29623 	mmac_info = &nxgep->nxge_mmac_info;
34643859Sml29623 	if (mmac_info->naddrfree == 0) {
34653859Sml29623 		mutex_exit(nxgep->genlock);
34663859Sml29623 		return (ENOSPC);
34673859Sml29623 	}
34683859Sml29623 	if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr,
34693859Sml29623 		maddr->mma_addrlen)) {
34703859Sml29623 		mutex_exit(nxgep->genlock);
34713859Sml29623 		return (EINVAL);
34723859Sml29623 	}
34733859Sml29623 	/*
34743859Sml29623 	 * 	Search for the first available slot. Because naddrfree
34753859Sml29623 	 * is not zero, we are guaranteed to find one.
34763859Sml29623 	 * 	Slot 0 is for unique (primary) MAC. The first alternate
34773859Sml29623 	 * MAC slot is slot 1.
34783859Sml29623 	 *	Each of the first two ports of Neptune has 16 alternate
34794185Sspeer 	 * MAC slots but only the first 7 (or 15) slots have assigned factory
34803859Sml29623 	 * MAC addresses. We first search among the slots without bundled
34813859Sml29623 	 * factory MACs. If we fail to find one in that range, then we
34823859Sml29623 	 * search the slots with bundled factory MACs.  A factory MAC
34833859Sml29623 	 * will be wasted while the slot is used with a user MAC address.
34843859Sml29623 	 * But the slot could be used by factory MAC again after calling
34853859Sml29623 	 * nxge_m_mmac_remove and nxge_m_mmac_reserve.
34863859Sml29623 	 */
34873859Sml29623 	if (mmac_info->num_factory_mmac < mmac_info->num_mmac) {
34883859Sml29623 		for (slot = mmac_info->num_factory_mmac + 1;
34893859Sml29623 			slot <= mmac_info->num_mmac; slot++) {
34903859Sml29623 			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
34913859Sml29623 				break;
34923859Sml29623 		}
34933859Sml29623 		if (slot > mmac_info->num_mmac) {
34943859Sml29623 			for (slot = 1; slot <= mmac_info->num_factory_mmac;
34953859Sml29623 				slot++) {
34963859Sml29623 				if (!(mmac_info->mac_pool[slot].flags
34973859Sml29623 					& MMAC_SLOT_USED))
34983859Sml29623 					break;
34993859Sml29623 			}
35003859Sml29623 		}
35013859Sml29623 	} else {
35023859Sml29623 		for (slot = 1; slot <= mmac_info->num_mmac; slot++) {
35033859Sml29623 			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
35043859Sml29623 				break;
35053859Sml29623 		}
35063859Sml29623 	}
35073859Sml29623 	ASSERT(slot <= mmac_info->num_mmac);
35083859Sml29623 	if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot)) != 0) {
35093859Sml29623 		mutex_exit(nxgep->genlock);
35103859Sml29623 		return (err);
35113859Sml29623 	}
35123859Sml29623 	bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
35133859Sml29623 	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
35143859Sml29623 	mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
35153859Sml29623 	mmac_info->naddrfree--;
35163859Sml29623 	nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
35173859Sml29623 
35183859Sml29623 	maddr->mma_slot = slot;
35193859Sml29623 
35203859Sml29623 	mutex_exit(nxgep->genlock);
35213859Sml29623 	return (0);
35223859Sml29623 }
35233859Sml29623 
35243859Sml29623 /*
35253859Sml29623  * This function reserves an unused slot and programs the slot and the HW
35263859Sml29623  * with a factory mac address.
35273859Sml29623  */
35283859Sml29623 static int
35293859Sml29623 nxge_m_mmac_reserve(void *arg, mac_multi_addr_t *maddr)
35303859Sml29623 {
35313859Sml29623 	p_nxge_t nxgep = arg;
35323859Sml29623 	mac_addr_slot_t slot;
35333859Sml29623 	nxge_mmac_t *mmac_info;
35343859Sml29623 	int err;
35353859Sml29623 	nxge_status_t status;
35363859Sml29623 
35373859Sml29623 	mutex_enter(nxgep->genlock);
35383859Sml29623 
35393859Sml29623 	/*
35403859Sml29623 	 * Make sure that nxge is initialized, if _start() has
35413859Sml29623 	 * not been called.
35423859Sml29623 	 */
35433859Sml29623 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
35443859Sml29623 		status = nxge_init(nxgep);
35453859Sml29623 		if (status != NXGE_OK) {
35463859Sml29623 			mutex_exit(nxgep->genlock);
35473859Sml29623 			return (ENXIO);
35483859Sml29623 		}
35493859Sml29623 	}
35503859Sml29623 
35513859Sml29623 	mmac_info = &nxgep->nxge_mmac_info;
35523859Sml29623 	if (mmac_info->naddrfree == 0) {
35533859Sml29623 		mutex_exit(nxgep->genlock);
35543859Sml29623 		return (ENOSPC);
35553859Sml29623 	}
35563859Sml29623 
35573859Sml29623 	slot = maddr->mma_slot;
35583859Sml29623 	if (slot == -1) {  /* -1: Take the first available slot */
35593859Sml29623 		for (slot = 1; slot <= mmac_info->num_factory_mmac; slot++) {
35603859Sml29623 			if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
35613859Sml29623 				break;
35623859Sml29623 		}
35633859Sml29623 		if (slot > mmac_info->num_factory_mmac) {
35643859Sml29623 			mutex_exit(nxgep->genlock);
35653859Sml29623 			return (ENOSPC);
35663859Sml29623 		}
35673859Sml29623 	}
35683859Sml29623 	if (slot < 1 || slot > mmac_info->num_factory_mmac) {
35693859Sml29623 		/*
35703859Sml29623 		 * Do not support factory MAC at a slot greater than
35713859Sml29623 		 * num_factory_mmac even when there are available factory
35723859Sml29623 		 * MAC addresses because the alternate MACs are bundled with
35733859Sml29623 		 * slot[1] through slot[num_factory_mmac]
35743859Sml29623 		 */
35753859Sml29623 		mutex_exit(nxgep->genlock);
35763859Sml29623 		return (EINVAL);
35773859Sml29623 	}
35783859Sml29623 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
35793859Sml29623 		mutex_exit(nxgep->genlock);
35803859Sml29623 		return (EBUSY);
35813859Sml29623 	}
35823859Sml29623 	/* Verify the address to be reserved */
35833859Sml29623 	if (!mac_unicst_verify(nxgep->mach,
35843859Sml29623 		mmac_info->factory_mac_pool[slot], ETHERADDRL)) {
35853859Sml29623 		mutex_exit(nxgep->genlock);
35863859Sml29623 		return (EINVAL);
35873859Sml29623 	}
35883859Sml29623 	if (err = nxge_altmac_set(nxgep,
35893859Sml29623 		mmac_info->factory_mac_pool[slot], slot)) {
35903859Sml29623 		mutex_exit(nxgep->genlock);
35913859Sml29623 		return (err);
35923859Sml29623 	}
35933859Sml29623 	bcopy(mmac_info->factory_mac_pool[slot], maddr->mma_addr, ETHERADDRL);
35943859Sml29623 	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED | MMAC_VENDOR_ADDR;
35953859Sml29623 	mmac_info->naddrfree--;
35963859Sml29623 
35973859Sml29623 	nxge_mmac_kstat_update(nxgep, slot, B_TRUE);
35983859Sml29623 	mutex_exit(nxgep->genlock);
35993859Sml29623 
36003859Sml29623 	/* Pass info back to the caller */
36013859Sml29623 	maddr->mma_slot = slot;
36023859Sml29623 	maddr->mma_addrlen = ETHERADDRL;
36033859Sml29623 	maddr->mma_flags = MMAC_SLOT_USED | MMAC_VENDOR_ADDR;
36043859Sml29623 
36053859Sml29623 	return (0);
36063859Sml29623 }
36073859Sml29623 
36083859Sml29623 /*
36093859Sml29623  * Remove the specified mac address and update the HW not to filter
36103859Sml29623  * the mac address anymore.
36113859Sml29623  */
36123859Sml29623 static int
36133859Sml29623 nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot)
36143859Sml29623 {
36153859Sml29623 	p_nxge_t nxgep = arg;
36163859Sml29623 	nxge_mmac_t *mmac_info;
36173859Sml29623 	uint8_t addrn;
36183859Sml29623 	uint8_t portn;
36193859Sml29623 	int err = 0;
36203859Sml29623 	nxge_status_t status;
36213859Sml29623 
36223859Sml29623 	mutex_enter(nxgep->genlock);
36233859Sml29623 
36243859Sml29623 	/*
36253859Sml29623 	 * Make sure that nxge is initialized, if _start() has
36263859Sml29623 	 * not been called.
36273859Sml29623 	 */
36283859Sml29623 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
36293859Sml29623 		status = nxge_init(nxgep);
36303859Sml29623 		if (status != NXGE_OK) {
36313859Sml29623 			mutex_exit(nxgep->genlock);
36323859Sml29623 			return (ENXIO);
36333859Sml29623 		}
36343859Sml29623 	}
36353859Sml29623 
36363859Sml29623 	mmac_info = &nxgep->nxge_mmac_info;
36373859Sml29623 	if (slot < 1 || slot > mmac_info->num_mmac) {
36383859Sml29623 		mutex_exit(nxgep->genlock);
36393859Sml29623 		return (EINVAL);
36403859Sml29623 	}
36413859Sml29623 
36423859Sml29623 	portn = nxgep->mac.portnum;
36433859Sml29623 	if (portn == XMAC_PORT_0 || portn == XMAC_PORT_1)
36443859Sml29623 		addrn = (uint8_t)slot - 1;
36453859Sml29623 	else
36463859Sml29623 		addrn = (uint8_t)slot;
36473859Sml29623 
36483859Sml29623 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
36493859Sml29623 		if (npi_mac_altaddr_disable(nxgep->npi_handle, portn, addrn)
36503859Sml29623 				== NPI_SUCCESS) {
36513859Sml29623 			mmac_info->naddrfree++;
36523859Sml29623 			mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
36533859Sml29623 			/*
36543859Sml29623 			 * Regardless if the MAC we just stopped filtering
36553859Sml29623 			 * is a user addr or a facory addr, we must set
36563859Sml29623 			 * the MMAC_VENDOR_ADDR flag if this slot has an
36573859Sml29623 			 * associated factory MAC to indicate that a factory
36583859Sml29623 			 * MAC is available.
36593859Sml29623 			 */
36603859Sml29623 			if (slot <= mmac_info->num_factory_mmac) {
36613859Sml29623 				mmac_info->mac_pool[slot].flags
36623859Sml29623 					|= MMAC_VENDOR_ADDR;
36633859Sml29623 			}
36643859Sml29623 			/*
36653859Sml29623 			 * Clear mac_pool[slot].addr so that kstat shows 0
36663859Sml29623 			 * alternate MAC address if the slot is not used.
36673859Sml29623 			 * (But nxge_m_mmac_get returns the factory MAC even
36683859Sml29623 			 * when the slot is not used!)
36693859Sml29623 			 */
36703859Sml29623 			bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
36713859Sml29623 			nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
36723859Sml29623 		} else {
36733859Sml29623 			err = EIO;
36743859Sml29623 		}
36753859Sml29623 	} else {
36763859Sml29623 		err = EINVAL;
36773859Sml29623 	}
36783859Sml29623 
36793859Sml29623 	mutex_exit(nxgep->genlock);
36803859Sml29623 	return (err);
36813859Sml29623 }
36823859Sml29623 
36833859Sml29623 
36843859Sml29623 /*
36853859Sml29623  * Modify a mac address added by nxge_m_mmac_add or nxge_m_mmac_reserve().
36863859Sml29623  */
36873859Sml29623 static int
36883859Sml29623 nxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr)
36893859Sml29623 {
36903859Sml29623 	p_nxge_t nxgep = arg;
36913859Sml29623 	mac_addr_slot_t slot;
36923859Sml29623 	nxge_mmac_t *mmac_info;
36933859Sml29623 	int err = 0;
36943859Sml29623 	nxge_status_t status;
36953859Sml29623 
36963859Sml29623 	if (!mac_unicst_verify(nxgep->mach, maddr->mma_addr,
36973859Sml29623 			maddr->mma_addrlen))
36983859Sml29623 		return (EINVAL);
36993859Sml29623 
37003859Sml29623 	slot = maddr->mma_slot;
37013859Sml29623 
37023859Sml29623 	mutex_enter(nxgep->genlock);
37033859Sml29623 
37043859Sml29623 	/*
37053859Sml29623 	 * Make sure that nxge is initialized, if _start() has
37063859Sml29623 	 * not been called.
37073859Sml29623 	 */
37083859Sml29623 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
37093859Sml29623 		status = nxge_init(nxgep);
37103859Sml29623 		if (status != NXGE_OK) {
37113859Sml29623 			mutex_exit(nxgep->genlock);
37123859Sml29623 			return (ENXIO);
37133859Sml29623 		}
37143859Sml29623 	}
37153859Sml29623 
37163859Sml29623 	mmac_info = &nxgep->nxge_mmac_info;
37173859Sml29623 	if (slot < 1 || slot > mmac_info->num_mmac) {
37183859Sml29623 		mutex_exit(nxgep->genlock);
37193859Sml29623 		return (EINVAL);
37203859Sml29623 	}
37213859Sml29623 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
37223859Sml29623 		if ((err = nxge_altmac_set(nxgep, maddr->mma_addr, slot))
37233859Sml29623 			!= 0) {
37243859Sml29623 			bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr,
37253859Sml29623 				ETHERADDRL);
37263859Sml29623 			/*
37273859Sml29623 			 * Assume that the MAC passed down from the caller
37283859Sml29623 			 * is not a factory MAC address (The user should
37293859Sml29623 			 * call mmac_remove followed by mmac_reserve if
37303859Sml29623 			 * he wants to use the factory MAC for this slot).
37313859Sml29623 			 */
37323859Sml29623 			mmac_info->mac_pool[slot].flags &= ~MMAC_VENDOR_ADDR;
37333859Sml29623 			nxge_mmac_kstat_update(nxgep, slot, B_FALSE);
37343859Sml29623 		}
37353859Sml29623 	} else {
37363859Sml29623 		err = EINVAL;
37373859Sml29623 	}
37383859Sml29623 	mutex_exit(nxgep->genlock);
37393859Sml29623 	return (err);
37403859Sml29623 }
37413859Sml29623 
37423859Sml29623 /*
37433859Sml29623  * nxge_m_mmac_get() - Get the MAC address and other information
37443859Sml29623  * related to the slot.  mma_flags should be set to 0 in the call.
37453859Sml29623  * Note: although kstat shows MAC address as zero when a slot is
37463859Sml29623  * not used, Crossbow expects nxge_m_mmac_get to copy factory MAC
37473859Sml29623  * to the caller as long as the slot is not using a user MAC address.
37483859Sml29623  * The following table shows the rules,
37493859Sml29623  *
37503859Sml29623  *				   USED    VENDOR    mma_addr
37513859Sml29623  * ------------------------------------------------------------
37523859Sml29623  * (1) Slot uses a user MAC:        yes      no     user MAC
37533859Sml29623  * (2) Slot uses a factory MAC:     yes      yes    factory MAC
37543859Sml29623  * (3) Slot is not used but is
37553859Sml29623  *     factory MAC capable:         no       yes    factory MAC
37563859Sml29623  * (4) Slot is not used and is
37573859Sml29623  *     not factory MAC capable:     no       no        0
37583859Sml29623  * ------------------------------------------------------------
37593859Sml29623  */
37603859Sml29623 static int
37613859Sml29623 nxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr)
37623859Sml29623 {
37633859Sml29623 	nxge_t *nxgep = arg;
37643859Sml29623 	mac_addr_slot_t slot;
37653859Sml29623 	nxge_mmac_t *mmac_info;
37663859Sml29623 	nxge_status_t status;
37673859Sml29623 
37683859Sml29623 	slot = maddr->mma_slot;
37693859Sml29623 
37703859Sml29623 	mutex_enter(nxgep->genlock);
37713859Sml29623 
37723859Sml29623 	/*
37733859Sml29623 	 * Make sure that nxge is initialized, if _start() has
37743859Sml29623 	 * not been called.
37753859Sml29623 	 */
37763859Sml29623 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
37773859Sml29623 		status = nxge_init(nxgep);
37783859Sml29623 		if (status != NXGE_OK) {
37793859Sml29623 			mutex_exit(nxgep->genlock);
37803859Sml29623 			return (ENXIO);
37813859Sml29623 		}
37823859Sml29623 	}
37833859Sml29623 
37843859Sml29623 	mmac_info = &nxgep->nxge_mmac_info;
37853859Sml29623 
37863859Sml29623 	if (slot < 1 || slot > mmac_info->num_mmac) {
37873859Sml29623 		mutex_exit(nxgep->genlock);
37883859Sml29623 		return (EINVAL);
37893859Sml29623 	}
37903859Sml29623 	maddr->mma_flags = 0;
37913859Sml29623 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED)
37923859Sml29623 		maddr->mma_flags |= MMAC_SLOT_USED;
37933859Sml29623 
37943859Sml29623 	if (mmac_info->mac_pool[slot].flags & MMAC_VENDOR_ADDR) {
37953859Sml29623 		maddr->mma_flags |= MMAC_VENDOR_ADDR;
37963859Sml29623 		bcopy(mmac_info->factory_mac_pool[slot],
37973859Sml29623 			maddr->mma_addr, ETHERADDRL);
37983859Sml29623 		maddr->mma_addrlen = ETHERADDRL;
37993859Sml29623 	} else {
38003859Sml29623 		if (maddr->mma_flags & MMAC_SLOT_USED) {
38013859Sml29623 			bcopy(mmac_info->mac_pool[slot].addr,
38023859Sml29623 				maddr->mma_addr, ETHERADDRL);
38033859Sml29623 			maddr->mma_addrlen = ETHERADDRL;
38043859Sml29623 		} else {
38053859Sml29623 			bzero(maddr->mma_addr, ETHERADDRL);
38063859Sml29623 			maddr->mma_addrlen = 0;
38073859Sml29623 		}
38083859Sml29623 	}
38093859Sml29623 	mutex_exit(nxgep->genlock);
38103859Sml29623 	return (0);
38113859Sml29623 }
38123859Sml29623 
38133859Sml29623 
38143859Sml29623 static boolean_t
38153859Sml29623 nxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
38163859Sml29623 {
38173859Sml29623 	nxge_t *nxgep = arg;
38183859Sml29623 	uint32_t *txflags = cap_data;
38193859Sml29623 	multiaddress_capab_t *mmacp = cap_data;
38203859Sml29623 
38213859Sml29623 	switch (cap) {
38223859Sml29623 	case MAC_CAPAB_HCKSUM:
38233859Sml29623 		*txflags = HCKSUM_INET_PARTIAL;
38243859Sml29623 		break;
38253859Sml29623 	case MAC_CAPAB_POLL:
38263859Sml29623 		/*
38273859Sml29623 		 * There's nothing for us to fill in, simply returning
38283859Sml29623 		 * B_TRUE stating that we support polling is sufficient.
38293859Sml29623 		 */
38303859Sml29623 		break;
38313859Sml29623 
38323859Sml29623 	case MAC_CAPAB_MULTIADDRESS:
38333859Sml29623 		mutex_enter(nxgep->genlock);
38343859Sml29623 
38353859Sml29623 		mmacp->maddr_naddr = nxgep->nxge_mmac_info.num_mmac;
38363859Sml29623 		mmacp->maddr_naddrfree = nxgep->nxge_mmac_info.naddrfree;
38373859Sml29623 		mmacp->maddr_flag = 0; /* 0 is requried by PSARC2006/265 */
38383859Sml29623 		/*
38393859Sml29623 		 * maddr_handle is driver's private data, passed back to
38403859Sml29623 		 * entry point functions as arg.
38413859Sml29623 		 */
38423859Sml29623 		mmacp->maddr_handle	= nxgep;
38433859Sml29623 		mmacp->maddr_add	= nxge_m_mmac_add;
38443859Sml29623 		mmacp->maddr_remove	= nxge_m_mmac_remove;
38453859Sml29623 		mmacp->maddr_modify	= nxge_m_mmac_modify;
38463859Sml29623 		mmacp->maddr_get	= nxge_m_mmac_get;
38473859Sml29623 		mmacp->maddr_reserve	= nxge_m_mmac_reserve;
38483859Sml29623 
38493859Sml29623 		mutex_exit(nxgep->genlock);
38503859Sml29623 		break;
3851*5770Sml29623 	case MAC_CAPAB_LSO: {
3852*5770Sml29623 		mac_capab_lso_t *cap_lso = cap_data;
3853*5770Sml29623 
3854*5770Sml29623 		if (nxge_lso_enable) {
3855*5770Sml29623 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
3856*5770Sml29623 			if (nxge_lso_max > NXGE_LSO_MAXLEN) {
3857*5770Sml29623 				nxge_lso_max = NXGE_LSO_MAXLEN;
3858*5770Sml29623 			}
3859*5770Sml29623 			cap_lso->lso_basic_tcp_ipv4.lso_max = nxge_lso_max;
3860*5770Sml29623 			break;
3861*5770Sml29623 		} else {
3862*5770Sml29623 			return (B_FALSE);
3863*5770Sml29623 		}
3864*5770Sml29623 	}
3865*5770Sml29623 
38663859Sml29623 	default:
38673859Sml29623 		return (B_FALSE);
38683859Sml29623 	}
38693859Sml29623 	return (B_TRUE);
38703859Sml29623 }
38713859Sml29623 
38723859Sml29623 /*
38733859Sml29623  * Module loading and removing entry points.
38743859Sml29623  */
38753859Sml29623 
38763859Sml29623 static	struct cb_ops 	nxge_cb_ops = {
38773859Sml29623 	nodev,			/* cb_open */
38783859Sml29623 	nodev,			/* cb_close */
38793859Sml29623 	nodev,			/* cb_strategy */
38803859Sml29623 	nodev,			/* cb_print */
38813859Sml29623 	nodev,			/* cb_dump */
38823859Sml29623 	nodev,			/* cb_read */
38833859Sml29623 	nodev,			/* cb_write */
38843859Sml29623 	nodev,			/* cb_ioctl */
38853859Sml29623 	nodev,			/* cb_devmap */
38863859Sml29623 	nodev,			/* cb_mmap */
38873859Sml29623 	nodev,			/* cb_segmap */
38883859Sml29623 	nochpoll,		/* cb_chpoll */
38893859Sml29623 	ddi_prop_op,		/* cb_prop_op */
38903859Sml29623 	NULL,
38913859Sml29623 	D_MP, 			/* cb_flag */
38923859Sml29623 	CB_REV,			/* rev */
38933859Sml29623 	nodev,			/* int (*cb_aread)() */
38943859Sml29623 	nodev			/* int (*cb_awrite)() */
38953859Sml29623 };
38963859Sml29623 
38973859Sml29623 static struct dev_ops nxge_dev_ops = {
38983859Sml29623 	DEVO_REV,		/* devo_rev */
38993859Sml29623 	0,			/* devo_refcnt */
39003859Sml29623 	nulldev,
39013859Sml29623 	nulldev,		/* devo_identify */
39023859Sml29623 	nulldev,		/* devo_probe */
39033859Sml29623 	nxge_attach,		/* devo_attach */
39043859Sml29623 	nxge_detach,		/* devo_detach */
39053859Sml29623 	nodev,			/* devo_reset */
39063859Sml29623 	&nxge_cb_ops,		/* devo_cb_ops */
39073859Sml29623 	(struct bus_ops *)NULL, /* devo_bus_ops	*/
39083859Sml29623 	ddi_power		/* devo_power */
39093859Sml29623 };
39103859Sml29623 
39113859Sml29623 extern	struct	mod_ops	mod_driverops;
39123859Sml29623 
39134977Sraghus #define	NXGE_DESC_VER		"Sun NIU 10Gb Ethernet"
39143859Sml29623 
39153859Sml29623 /*
39163859Sml29623  * Module linkage information for the kernel.
39173859Sml29623  */
39183859Sml29623 static struct modldrv 	nxge_modldrv = {
39193859Sml29623 	&mod_driverops,
39203859Sml29623 	NXGE_DESC_VER,
39213859Sml29623 	&nxge_dev_ops
39223859Sml29623 };
39233859Sml29623 
39243859Sml29623 static struct modlinkage modlinkage = {
39253859Sml29623 	MODREV_1, (void *) &nxge_modldrv, NULL
39263859Sml29623 };
39273859Sml29623 
39283859Sml29623 int
39293859Sml29623 _init(void)
39303859Sml29623 {
39313859Sml29623 	int		status;
39323859Sml29623 
39333859Sml29623 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
39343859Sml29623 	mac_init_ops(&nxge_dev_ops, "nxge");
39353859Sml29623 	status = ddi_soft_state_init(&nxge_list, sizeof (nxge_t), 0);
39363859Sml29623 	if (status != 0) {
39373859Sml29623 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
39383859Sml29623 			"failed to init device soft state"));
39393859Sml29623 		goto _init_exit;
39403859Sml29623 	}
39413859Sml29623 	status = mod_install(&modlinkage);
39423859Sml29623 	if (status != 0) {
39433859Sml29623 		ddi_soft_state_fini(&nxge_list);
39443859Sml29623 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "Mod install failed"));
39453859Sml29623 		goto _init_exit;
39463859Sml29623 	}
39473859Sml29623 
39483859Sml29623 	MUTEX_INIT(&nxge_common_lock, NULL, MUTEX_DRIVER, NULL);
39493859Sml29623 
39503859Sml29623 _init_exit:
39513859Sml29623 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
39523859Sml29623 
39533859Sml29623 	return (status);
39543859Sml29623 }
39553859Sml29623 
39563859Sml29623 int
39573859Sml29623 _fini(void)
39583859Sml29623 {
39593859Sml29623 	int		status;
39603859Sml29623 
39613859Sml29623 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
39623859Sml29623 
39633859Sml29623 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
39643859Sml29623 
39653859Sml29623 	if (nxge_mblks_pending)
39663859Sml29623 		return (EBUSY);
39673859Sml29623 
39683859Sml29623 	status = mod_remove(&modlinkage);
39693859Sml29623 	if (status != DDI_SUCCESS) {
39703859Sml29623 		NXGE_DEBUG_MSG((NULL, MOD_CTL,
39713859Sml29623 			    "Module removal failed 0x%08x",
39723859Sml29623 			    status));
39733859Sml29623 		goto _fini_exit;
39743859Sml29623 	}
39753859Sml29623 
39763859Sml29623 	mac_fini_ops(&nxge_dev_ops);
39773859Sml29623 
39783859Sml29623 	ddi_soft_state_fini(&nxge_list);
39793859Sml29623 
39803859Sml29623 	MUTEX_DESTROY(&nxge_common_lock);
39813859Sml29623 _fini_exit:
39823859Sml29623 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
39833859Sml29623 
39843859Sml29623 	return (status);
39853859Sml29623 }
39863859Sml29623 
39873859Sml29623 int
39883859Sml29623 _info(struct modinfo *modinfop)
39893859Sml29623 {
39903859Sml29623 	int		status;
39913859Sml29623 
39923859Sml29623 	NXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
39933859Sml29623 	status = mod_info(&modlinkage, modinfop);
39943859Sml29623 	NXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
39953859Sml29623 
39963859Sml29623 	return (status);
39973859Sml29623 }
39983859Sml29623 
39993859Sml29623 /*ARGSUSED*/
40003859Sml29623 static nxge_status_t
40013859Sml29623 nxge_add_intrs(p_nxge_t nxgep)
40023859Sml29623 {
40033859Sml29623 
40043859Sml29623 	int		intr_types;
40053859Sml29623 	int		type = 0;
40063859Sml29623 	int		ddi_status = DDI_SUCCESS;
40073859Sml29623 	nxge_status_t	status = NXGE_OK;
40083859Sml29623 
40093859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs"));
40103859Sml29623 
40113859Sml29623 	nxgep->nxge_intr_type.intr_registered = B_FALSE;
40123859Sml29623 	nxgep->nxge_intr_type.intr_enabled = B_FALSE;
40133859Sml29623 	nxgep->nxge_intr_type.msi_intx_cnt = 0;
40143859Sml29623 	nxgep->nxge_intr_type.intr_added = 0;
40153859Sml29623 	nxgep->nxge_intr_type.niu_msi_enable = B_FALSE;
40163859Sml29623 	nxgep->nxge_intr_type.intr_type = 0;
40173859Sml29623 
40183859Sml29623 	if (nxgep->niu_type == N2_NIU) {
40193859Sml29623 		nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
40203859Sml29623 	} else if (nxge_msi_enable) {
40213859Sml29623 		nxgep->nxge_intr_type.niu_msi_enable = B_TRUE;
40223859Sml29623 	}
40233859Sml29623 
40243859Sml29623 	/* Get the supported interrupt types */
40253859Sml29623 	if ((ddi_status = ddi_intr_get_supported_types(nxgep->dip, &intr_types))
40263859Sml29623 			!= DDI_SUCCESS) {
40273859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_intrs: "
40283859Sml29623 			"ddi_intr_get_supported_types failed: status 0x%08x",
40293859Sml29623 			ddi_status));
40303859Sml29623 		return (NXGE_ERROR | NXGE_DDI_FAILED);
40313859Sml29623 	}
40323859Sml29623 	nxgep->nxge_intr_type.intr_types = intr_types;
40333859Sml29623 
40343859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
40353859Sml29623 		"ddi_intr_get_supported_types: 0x%08x", intr_types));
40363859Sml29623 
40373859Sml29623 	/*
40383859Sml29623 	 * Solaris MSIX is not supported yet. use MSI for now.
40393859Sml29623 	 * nxge_msi_enable (1):
40403859Sml29623 	 *	1 - MSI		2 - MSI-X	others - FIXED
40413859Sml29623 	 */
40423859Sml29623 	switch (nxge_msi_enable) {
40433859Sml29623 	default:
40443859Sml29623 		type = DDI_INTR_TYPE_FIXED;
40453859Sml29623 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
40463859Sml29623 			"use fixed (intx emulation) type %08x",
40473859Sml29623 			type));
40483859Sml29623 		break;
40493859Sml29623 
40503859Sml29623 	case 2:
40513859Sml29623 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
40523859Sml29623 			"ddi_intr_get_supported_types: 0x%08x", intr_types));
40533859Sml29623 		if (intr_types & DDI_INTR_TYPE_MSIX) {
40543859Sml29623 			type = DDI_INTR_TYPE_MSIX;
40553859Sml29623 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
40563859Sml29623 				"ddi_intr_get_supported_types: MSIX 0x%08x",
40573859Sml29623 				type));
40583859Sml29623 		} else if (intr_types & DDI_INTR_TYPE_MSI) {
40593859Sml29623 			type = DDI_INTR_TYPE_MSI;
40603859Sml29623 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
40613859Sml29623 				"ddi_intr_get_supported_types: MSI 0x%08x",
40623859Sml29623 				type));
40633859Sml29623 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
40643859Sml29623 			type = DDI_INTR_TYPE_FIXED;
40653859Sml29623 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
40663859Sml29623 				"ddi_intr_get_supported_types: MSXED0x%08x",
40673859Sml29623 				type));
40683859Sml29623 		}
40693859Sml29623 		break;
40703859Sml29623 
40713859Sml29623 	case 1:
40723859Sml29623 		if (intr_types & DDI_INTR_TYPE_MSI) {
40733859Sml29623 			type = DDI_INTR_TYPE_MSI;
40743859Sml29623 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs: "
40753859Sml29623 				"ddi_intr_get_supported_types: MSI 0x%08x",
40763859Sml29623 				type));
40773859Sml29623 		} else if (intr_types & DDI_INTR_TYPE_MSIX) {
40783859Sml29623 			type = DDI_INTR_TYPE_MSIX;
40793859Sml29623 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
40803859Sml29623 				"ddi_intr_get_supported_types: MSIX 0x%08x",
40813859Sml29623 				type));
40823859Sml29623 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
40833859Sml29623 			type = DDI_INTR_TYPE_FIXED;
40843859Sml29623 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
40853859Sml29623 				"ddi_intr_get_supported_types: MSXED0x%08x",
40863859Sml29623 				type));
40873859Sml29623 		}
40883859Sml29623 	}
40893859Sml29623 
40903859Sml29623 	nxgep->nxge_intr_type.intr_type = type;
40913859Sml29623 	if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
40923859Sml29623 		type == DDI_INTR_TYPE_FIXED) &&
40933859Sml29623 			nxgep->nxge_intr_type.niu_msi_enable) {
40943859Sml29623 		if ((status = nxge_add_intrs_adv(nxgep)) != DDI_SUCCESS) {
40953859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
40963859Sml29623 				    " nxge_add_intrs: "
40973859Sml29623 				    " nxge_add_intrs_adv failed: status 0x%08x",
40983859Sml29623 				    status));
40993859Sml29623 			return (status);
41003859Sml29623 		} else {
41013859Sml29623 			NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs: "
41023859Sml29623 			"interrupts registered : type %d", type));
41033859Sml29623 			nxgep->nxge_intr_type.intr_registered = B_TRUE;
41043859Sml29623 
41053859Sml29623 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
41063859Sml29623 				"\nAdded advanced nxge add_intr_adv "
41073859Sml29623 					"intr type 0x%x\n", type));
41083859Sml29623 
41093859Sml29623 			return (status);
41103859Sml29623 		}
41113859Sml29623 	}
41123859Sml29623 
41133859Sml29623 	if (!nxgep->nxge_intr_type.intr_registered) {
41143859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_add_intrs: "
41153859Sml29623 			"failed to register interrupts"));
41163859Sml29623 		return (NXGE_ERROR | NXGE_DDI_FAILED);
41173859Sml29623 	}
41183859Sml29623 
41193859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_add_intrs"));
41203859Sml29623 	return (status);
41213859Sml29623 }
41223859Sml29623 
41233859Sml29623 /*ARGSUSED*/
41243859Sml29623 static nxge_status_t
41253859Sml29623 nxge_add_soft_intrs(p_nxge_t nxgep)
41263859Sml29623 {
41273859Sml29623 
41283859Sml29623 	int		ddi_status = DDI_SUCCESS;
41293859Sml29623 	nxge_status_t	status = NXGE_OK;
41303859Sml29623 
41313859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_soft_intrs"));
41323859Sml29623 
41333859Sml29623 	nxgep->resched_id = NULL;
41343859Sml29623 	nxgep->resched_running = B_FALSE;
41353859Sml29623 	ddi_status = ddi_add_softintr(nxgep->dip, DDI_SOFTINT_LOW,
41363859Sml29623 			&nxgep->resched_id,
41373859Sml29623 		NULL, NULL, nxge_reschedule, (caddr_t)nxgep);
41383859Sml29623 	if (ddi_status != DDI_SUCCESS) {
41393859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_add_soft_intrs: "
41403859Sml29623 			"ddi_add_softintrs failed: status 0x%08x",
41413859Sml29623 			ddi_status));
41423859Sml29623 		return (NXGE_ERROR | NXGE_DDI_FAILED);
41433859Sml29623 	}
41443859Sml29623 
41453859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_ddi_add_soft_intrs"));
41463859Sml29623 
41473859Sml29623 	return (status);
41483859Sml29623 }
41493859Sml29623 
41503859Sml29623 static nxge_status_t
41513859Sml29623 nxge_add_intrs_adv(p_nxge_t nxgep)
41523859Sml29623 {
41533859Sml29623 	int		intr_type;
41543859Sml29623 	p_nxge_intr_t	intrp;
41553859Sml29623 
41563859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv"));
41573859Sml29623 
41583859Sml29623 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
41593859Sml29623 	intr_type = intrp->intr_type;
41603859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_add_intrs_adv: type 0x%x",
41613859Sml29623 		intr_type));
41623859Sml29623 
41633859Sml29623 	switch (intr_type) {
41643859Sml29623 	case DDI_INTR_TYPE_MSI: /* 0x2 */
41653859Sml29623 	case DDI_INTR_TYPE_MSIX: /* 0x4 */
41663859Sml29623 		return (nxge_add_intrs_adv_type(nxgep, intr_type));
41673859Sml29623 
41683859Sml29623 	case DDI_INTR_TYPE_FIXED: /* 0x1 */
41693859Sml29623 		return (nxge_add_intrs_adv_type_fix(nxgep, intr_type));
41703859Sml29623 
41713859Sml29623 	default:
41723859Sml29623 		return (NXGE_ERROR);
41733859Sml29623 	}
41743859Sml29623 }
41753859Sml29623 
41763859Sml29623 
41773859Sml29623 /*ARGSUSED*/
41783859Sml29623 static nxge_status_t
41793859Sml29623 nxge_add_intrs_adv_type(p_nxge_t nxgep, uint32_t int_type)
41803859Sml29623 {
41813859Sml29623 	dev_info_t		*dip = nxgep->dip;
41823859Sml29623 	p_nxge_ldg_t		ldgp;
41833859Sml29623 	p_nxge_intr_t		intrp;
41843859Sml29623 	uint_t			*inthandler;
41853859Sml29623 	void			*arg1, *arg2;
41863859Sml29623 	int			behavior;
41875013Sml29623 	int			nintrs, navail, nrequest;
41883859Sml29623 	int			nactual, nrequired;
41893859Sml29623 	int			inum = 0;
41903859Sml29623 	int			x, y;
41913859Sml29623 	int			ddi_status = DDI_SUCCESS;
41923859Sml29623 	nxge_status_t		status = NXGE_OK;
41933859Sml29623 
41943859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type"));
41953859Sml29623 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
41963859Sml29623 	intrp->start_inum = 0;
41973859Sml29623 
41983859Sml29623 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
41993859Sml29623 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
42003859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
42013859Sml29623 			"ddi_intr_get_nintrs() failed, status: 0x%x%, "
42023859Sml29623 			    "nintrs: %d", ddi_status, nintrs));
42033859Sml29623 		return (NXGE_ERROR | NXGE_DDI_FAILED);
42043859Sml29623 	}
42053859Sml29623 
42063859Sml29623 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
42073859Sml29623 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
42083859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
42093859Sml29623 			"ddi_intr_get_navail() failed, status: 0x%x%, "
42103859Sml29623 			    "nintrs: %d", ddi_status, navail));
42113859Sml29623 		return (NXGE_ERROR | NXGE_DDI_FAILED);
42123859Sml29623 	}
42133859Sml29623 
42143859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
42153859Sml29623 		"ddi_intr_get_navail() returned: nintrs %d, navail %d",
42163859Sml29623 		    nintrs, navail));
42173859Sml29623 
42185013Sml29623 	/* PSARC/2007/453 MSI-X interrupt limit override */
42195013Sml29623 	if (int_type == DDI_INTR_TYPE_MSIX) {
42205013Sml29623 		nrequest = nxge_create_msi_property(nxgep);
42215013Sml29623 		if (nrequest < navail) {
42225013Sml29623 			navail = nrequest;
42235013Sml29623 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
42245013Sml29623 			    "nxge_add_intrs_adv_type: nintrs %d "
42255013Sml29623 			    "navail %d (nrequest %d)",
42265013Sml29623 			    nintrs, navail, nrequest));
42275013Sml29623 		}
42285013Sml29623 	}
42295013Sml29623 
42303859Sml29623 	if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
42313859Sml29623 		/* MSI must be power of 2 */
42323859Sml29623 		if ((navail & 16) == 16) {
42333859Sml29623 			navail = 16;
42343859Sml29623 		} else if ((navail & 8) == 8) {
42353859Sml29623 			navail = 8;
42363859Sml29623 		} else if ((navail & 4) == 4) {
42373859Sml29623 			navail = 4;
42383859Sml29623 		} else if ((navail & 2) == 2) {
42393859Sml29623 			navail = 2;
42403859Sml29623 		} else {
42413859Sml29623 			navail = 1;
42423859Sml29623 		}
42433859Sml29623 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
42443859Sml29623 			"ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
42453859Sml29623 			"navail %d", nintrs, navail));
42463859Sml29623 	}
42473859Sml29623 
42483859Sml29623 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
42493859Sml29623 			DDI_INTR_ALLOC_NORMAL);
42503859Sml29623 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
42513859Sml29623 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
42523859Sml29623 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
42533859Sml29623 		    navail, &nactual, behavior);
42543859Sml29623 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
42553859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
42563859Sml29623 				    " ddi_intr_alloc() failed: %d",
42573859Sml29623 				    ddi_status));
42583859Sml29623 		kmem_free(intrp->htable, intrp->intr_size);
42593859Sml29623 		return (NXGE_ERROR | NXGE_DDI_FAILED);
42603859Sml29623 	}
42613859Sml29623 
42623859Sml29623 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
42633859Sml29623 			(uint_t *)&intrp->pri)) != DDI_SUCCESS) {
42643859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
42653859Sml29623 				    " ddi_intr_get_pri() failed: %d",
42663859Sml29623 				    ddi_status));
42673859Sml29623 		/* Free already allocated interrupts */
42683859Sml29623 		for (y = 0; y < nactual; y++) {
42693859Sml29623 			(void) ddi_intr_free(intrp->htable[y]);
42703859Sml29623 		}
42713859Sml29623 
42723859Sml29623 		kmem_free(intrp->htable, intrp->intr_size);
42733859Sml29623 		return (NXGE_ERROR | NXGE_DDI_FAILED);
42743859Sml29623 	}
42753859Sml29623 
42763859Sml29623 	nrequired = 0;
42773859Sml29623 	switch (nxgep->niu_type) {
42783859Sml29623 	default:
42793859Sml29623 		status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
42803859Sml29623 		break;
42813859Sml29623 
42823859Sml29623 	case N2_NIU:
42833859Sml29623 		status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
42843859Sml29623 		break;
42853859Sml29623 	}
42863859Sml29623 
42873859Sml29623 	if (status != NXGE_OK) {
42883859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
42893859Sml29623 			"nxge_add_intrs_adv_typ:nxge_ldgv_init "
42903859Sml29623 			"failed: 0x%x", status));
42913859Sml29623 		/* Free already allocated interrupts */
42923859Sml29623 		for (y = 0; y < nactual; y++) {
42933859Sml29623 			(void) ddi_intr_free(intrp->htable[y]);
42943859Sml29623 		}
42953859Sml29623 
42963859Sml29623 		kmem_free(intrp->htable, intrp->intr_size);
42973859Sml29623 		return (status);
42983859Sml29623 	}
42993859Sml29623 
43003859Sml29623 	ldgp = nxgep->ldgvp->ldgp;
43013859Sml29623 	for (x = 0; x < nrequired; x++, ldgp++) {
43023859Sml29623 		ldgp->vector = (uint8_t)x;
43033859Sml29623 		ldgp->intdata = SID_DATA(ldgp->func, x);
43043859Sml29623 		arg1 = ldgp->ldvp;
43053859Sml29623 		arg2 = nxgep;
43063859Sml29623 		if (ldgp->nldvs == 1) {
43073859Sml29623 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
43083859Sml29623 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
43093859Sml29623 				"nxge_add_intrs_adv_type: "
43103859Sml29623 				"arg1 0x%x arg2 0x%x: "
43113859Sml29623 				"1-1 int handler (entry %d intdata 0x%x)\n",
43123859Sml29623 				arg1, arg2,
43133859Sml29623 				x, ldgp->intdata));
43143859Sml29623 		} else if (ldgp->nldvs > 1) {
43153859Sml29623 			inthandler = (uint_t *)ldgp->sys_intr_handler;
43163859Sml29623 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
43173859Sml29623 				"nxge_add_intrs_adv_type: "
43183859Sml29623 				"arg1 0x%x arg2 0x%x: "
43193859Sml29623 				"nldevs %d int handler "
43203859Sml29623 				"(entry %d intdata 0x%x)\n",
43213859Sml29623 				arg1, arg2,
43223859Sml29623 				ldgp->nldvs, x, ldgp->intdata));
43233859Sml29623 		}
43243859Sml29623 
43253859Sml29623 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
43263859Sml29623 			"==> nxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
43273859Sml29623 			"htable 0x%llx", x, intrp->htable[x]));
43283859Sml29623 
43293859Sml29623 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
43303859Sml29623 			(ddi_intr_handler_t *)inthandler, arg1, arg2))
43313859Sml29623 				!= DDI_SUCCESS) {
43323859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
43333859Sml29623 				"==> nxge_add_intrs_adv_type: failed #%d "
43343859Sml29623 				"status 0x%x", x, ddi_status));
43353859Sml29623 			for (y = 0; y < intrp->intr_added; y++) {
43363859Sml29623 				(void) ddi_intr_remove_handler(
43373859Sml29623 						intrp->htable[y]);
43383859Sml29623 			}
43393859Sml29623 			/* Free already allocated intr */
43403859Sml29623 			for (y = 0; y < nactual; y++) {
43413859Sml29623 				(void) ddi_intr_free(intrp->htable[y]);
43423859Sml29623 			}
43433859Sml29623 			kmem_free(intrp->htable, intrp->intr_size);
43443859Sml29623 
43453859Sml29623 			(void) nxge_ldgv_uninit(nxgep);
43463859Sml29623 
43473859Sml29623 			return (NXGE_ERROR | NXGE_DDI_FAILED);
43483859Sml29623 		}
43493859Sml29623 		intrp->intr_added++;
43503859Sml29623 	}
43513859Sml29623 
43523859Sml29623 	intrp->msi_intx_cnt = nactual;
43533859Sml29623 
43543859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL,
43553859Sml29623 		"Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
43563859Sml29623 		navail, nactual,
43573859Sml29623 		intrp->msi_intx_cnt,
43583859Sml29623 		intrp->intr_added));
43593859Sml29623 
43603859Sml29623 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
43613859Sml29623 
43623859Sml29623 	(void) nxge_intr_ldgv_init(nxgep);
43633859Sml29623 
43643859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type"));
43653859Sml29623 
43663859Sml29623 	return (status);
43673859Sml29623 }
43683859Sml29623 
43693859Sml29623 /*ARGSUSED*/
43703859Sml29623 static nxge_status_t
43713859Sml29623 nxge_add_intrs_adv_type_fix(p_nxge_t nxgep, uint32_t int_type)
43723859Sml29623 {
43733859Sml29623 	dev_info_t		*dip = nxgep->dip;
43743859Sml29623 	p_nxge_ldg_t		ldgp;
43753859Sml29623 	p_nxge_intr_t		intrp;
43763859Sml29623 	uint_t			*inthandler;
43773859Sml29623 	void			*arg1, *arg2;
43783859Sml29623 	int			behavior;
43793859Sml29623 	int			nintrs, navail;
43803859Sml29623 	int			nactual, nrequired;
43813859Sml29623 	int			inum = 0;
43823859Sml29623 	int			x, y;
43833859Sml29623 	int			ddi_status = DDI_SUCCESS;
43843859Sml29623 	nxge_status_t		status = NXGE_OK;
43853859Sml29623 
43863859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_add_intrs_adv_type_fix"));
43873859Sml29623 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
43883859Sml29623 	intrp->start_inum = 0;
43893859Sml29623 
43903859Sml29623 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
43913859Sml29623 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
43923859Sml29623 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
43933859Sml29623 			"ddi_intr_get_nintrs() failed, status: 0x%x%, "
43943859Sml29623 			    "nintrs: %d", status, nintrs));
43953859Sml29623 		return (NXGE_ERROR | NXGE_DDI_FAILED);
43963859Sml29623 	}
43973859Sml29623 
43983859Sml29623 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
43993859Sml29623 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
44003859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
44013859Sml29623 			"ddi_intr_get_navail() failed, status: 0x%x%, "
44023859Sml29623 			    "nintrs: %d", ddi_status, navail));
44033859Sml29623 		return (NXGE_ERROR | NXGE_DDI_FAILED);
44043859Sml29623 	}
44053859Sml29623 
44063859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
44073859Sml29623 		"ddi_intr_get_navail() returned: nintrs %d, naavail %d",
44083859Sml29623 		    nintrs, navail));
44093859Sml29623 
44103859Sml29623 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
44113859Sml29623 			DDI_INTR_ALLOC_NORMAL);
44123859Sml29623 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
44133859Sml29623 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
44143859Sml29623 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
44153859Sml29623 		    navail, &nactual, behavior);
44163859Sml29623 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
44173859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
44183859Sml29623 			    " ddi_intr_alloc() failed: %d",
44193859Sml29623 			    ddi_status));
44203859Sml29623 		kmem_free(intrp->htable, intrp->intr_size);
44213859Sml29623 		return (NXGE_ERROR | NXGE_DDI_FAILED);
44223859Sml29623 	}
44233859Sml29623 
44243859Sml29623 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
44253859Sml29623 			(uint_t *)&intrp->pri)) != DDI_SUCCESS) {
44263859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
44273859Sml29623 				    " ddi_intr_get_pri() failed: %d",
44283859Sml29623 				    ddi_status));
44293859Sml29623 		/* Free already allocated interrupts */
44303859Sml29623 		for (y = 0; y < nactual; y++) {
44313859Sml29623 			(void) ddi_intr_free(intrp->htable[y]);
44323859Sml29623 		}
44333859Sml29623 
44343859Sml29623 		kmem_free(intrp->htable, intrp->intr_size);
44353859Sml29623 		return (NXGE_ERROR | NXGE_DDI_FAILED);
44363859Sml29623 	}
44373859Sml29623 
44383859Sml29623 	nrequired = 0;
44393859Sml29623 	switch (nxgep->niu_type) {
44403859Sml29623 	default:
44413859Sml29623 		status = nxge_ldgv_init(nxgep, &nactual, &nrequired);
44423859Sml29623 		break;
44433859Sml29623 
44443859Sml29623 	case N2_NIU:
44453859Sml29623 		status = nxge_ldgv_init_n2(nxgep, &nactual, &nrequired);
44463859Sml29623 		break;
44473859Sml29623 	}
44483859Sml29623 
44493859Sml29623 	if (status != NXGE_OK) {
44503859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
44513859Sml29623 			"nxge_add_intrs_adv_type_fix:nxge_ldgv_init "
44523859Sml29623 			"failed: 0x%x", status));
44533859Sml29623 		/* Free already allocated interrupts */
44543859Sml29623 		for (y = 0; y < nactual; y++) {
44553859Sml29623 			(void) ddi_intr_free(intrp->htable[y]);
44563859Sml29623 		}
44573859Sml29623 
44583859Sml29623 		kmem_free(intrp->htable, intrp->intr_size);
44593859Sml29623 		return (status);
44603859Sml29623 	}
44613859Sml29623 
44623859Sml29623 	ldgp = nxgep->ldgvp->ldgp;
44633859Sml29623 	for (x = 0; x < nrequired; x++, ldgp++) {
44643859Sml29623 		ldgp->vector = (uint8_t)x;
44653859Sml29623 		if (nxgep->niu_type != N2_NIU) {
44663859Sml29623 			ldgp->intdata = SID_DATA(ldgp->func, x);
44673859Sml29623 		}
44683859Sml29623 
44693859Sml29623 		arg1 = ldgp->ldvp;
44703859Sml29623 		arg2 = nxgep;
44713859Sml29623 		if (ldgp->nldvs == 1) {
44723859Sml29623 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
44733859Sml29623 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
44743859Sml29623 				"nxge_add_intrs_adv_type_fix: "
44753859Sml29623 				"1-1 int handler(%d) ldg %d ldv %d "
44763859Sml29623 				"arg1 $%p arg2 $%p\n",
44773859Sml29623 				x, ldgp->ldg, ldgp->ldvp->ldv,
44783859Sml29623 				arg1, arg2));
44793859Sml29623 		} else if (ldgp->nldvs > 1) {
44803859Sml29623 			inthandler = (uint_t *)ldgp->sys_intr_handler;
44813859Sml29623 			NXGE_DEBUG_MSG((nxgep, INT_CTL,
44823859Sml29623 				"nxge_add_intrs_adv_type_fix: "
44833859Sml29623 				"shared ldv %d int handler(%d) ldv %d ldg %d"
44843859Sml29623 				"arg1 0x%016llx arg2 0x%016llx\n",
44853859Sml29623 				x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
44863859Sml29623 				arg1, arg2));
44873859Sml29623 		}
44883859Sml29623 
44893859Sml29623 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
44903859Sml29623 			(ddi_intr_handler_t *)inthandler, arg1, arg2))
44913859Sml29623 				!= DDI_SUCCESS) {
44923859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
44933859Sml29623 				"==> nxge_add_intrs_adv_type_fix: failed #%d "
44943859Sml29623 				"status 0x%x", x, ddi_status));
44953859Sml29623 			for (y = 0; y < intrp->intr_added; y++) {
44963859Sml29623 				(void) ddi_intr_remove_handler(
44973859Sml29623 						intrp->htable[y]);
44983859Sml29623 			}
44993859Sml29623 			for (y = 0; y < nactual; y++) {
45003859Sml29623 				(void) ddi_intr_free(intrp->htable[y]);
45013859Sml29623 			}
45023859Sml29623 			/* Free already allocated intr */
45033859Sml29623 			kmem_free(intrp->htable, intrp->intr_size);
45043859Sml29623 
45053859Sml29623 			(void) nxge_ldgv_uninit(nxgep);
45063859Sml29623 
45073859Sml29623 			return (NXGE_ERROR | NXGE_DDI_FAILED);
45083859Sml29623 		}
45093859Sml29623 		intrp->intr_added++;
45103859Sml29623 	}
45113859Sml29623 
45123859Sml29623 	intrp->msi_intx_cnt = nactual;
45133859Sml29623 
45143859Sml29623 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
45153859Sml29623 
45163859Sml29623 	status = nxge_intr_ldgv_init(nxgep);
45173859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_add_intrs_adv_type_fix"));
45183859Sml29623 
45193859Sml29623 	return (status);
45203859Sml29623 }
45213859Sml29623 
45223859Sml29623 static void
45233859Sml29623 nxge_remove_intrs(p_nxge_t nxgep)
45243859Sml29623 {
45253859Sml29623 	int		i, inum;
45263859Sml29623 	p_nxge_intr_t	intrp;
45273859Sml29623 
45283859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs"));
45293859Sml29623 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
45303859Sml29623 	if (!intrp->intr_registered) {
45313859Sml29623 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
45323859Sml29623 			"<== nxge_remove_intrs: interrupts not registered"));
45333859Sml29623 		return;
45343859Sml29623 	}
45353859Sml29623 
45363859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_intrs:advanced"));
45373859Sml29623 
45383859Sml29623 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
45393859Sml29623 		(void) ddi_intr_block_disable(intrp->htable,
45403859Sml29623 			intrp->intr_added);
45413859Sml29623 	} else {
45423859Sml29623 		for (i = 0; i < intrp->intr_added; i++) {
45433859Sml29623 			(void) ddi_intr_disable(intrp->htable[i]);
45443859Sml29623 		}
45453859Sml29623 	}
45463859Sml29623 
45473859Sml29623 	for (inum = 0; inum < intrp->intr_added; inum++) {
45483859Sml29623 		if (intrp->htable[inum]) {
45493859Sml29623 			(void) ddi_intr_remove_handler(intrp->htable[inum]);
45503859Sml29623 		}
45513859Sml29623 	}
45523859Sml29623 
45533859Sml29623 	for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
45543859Sml29623 		if (intrp->htable[inum]) {
45553859Sml29623 			NXGE_DEBUG_MSG((nxgep, DDI_CTL,
45563859Sml29623 				"nxge_remove_intrs: ddi_intr_free inum %d "
45573859Sml29623 				"msi_intx_cnt %d intr_added %d",
45583859Sml29623 				inum,
45593859Sml29623 				intrp->msi_intx_cnt,
45603859Sml29623 				intrp->intr_added));
45613859Sml29623 
45623859Sml29623 			(void) ddi_intr_free(intrp->htable[inum]);
45633859Sml29623 		}
45643859Sml29623 	}
45653859Sml29623 
45663859Sml29623 	kmem_free(intrp->htable, intrp->intr_size);
45673859Sml29623 	intrp->intr_registered = B_FALSE;
45683859Sml29623 	intrp->intr_enabled = B_FALSE;
45693859Sml29623 	intrp->msi_intx_cnt = 0;
45703859Sml29623 	intrp->intr_added = 0;
45713859Sml29623 
45723859Sml29623 	(void) nxge_ldgv_uninit(nxgep);
45733859Sml29623 
45745013Sml29623 	(void) ddi_prop_remove(DDI_DEV_T_NONE, nxgep->dip,
45755013Sml29623 	    "#msix-request");
45765013Sml29623 
45773859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_intrs"));
45783859Sml29623 }
45793859Sml29623 
45803859Sml29623 /*ARGSUSED*/
45813859Sml29623 static void
45823859Sml29623 nxge_remove_soft_intrs(p_nxge_t nxgep)
45833859Sml29623 {
45843859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_remove_soft_intrs"));
45853859Sml29623 	if (nxgep->resched_id) {
45863859Sml29623 		ddi_remove_softintr(nxgep->resched_id);
45873859Sml29623 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
45883859Sml29623 			"==> nxge_remove_soft_intrs: removed"));
45893859Sml29623 		nxgep->resched_id = NULL;
45903859Sml29623 	}
45913859Sml29623 
45923859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_remove_soft_intrs"));
45933859Sml29623 }
45943859Sml29623 
45953859Sml29623 /*ARGSUSED*/
45963859Sml29623 static void
45973859Sml29623 nxge_intrs_enable(p_nxge_t nxgep)
45983859Sml29623 {
45993859Sml29623 	p_nxge_intr_t	intrp;
46003859Sml29623 	int		i;
46013859Sml29623 	int		status;
46023859Sml29623 
46033859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable"));
46043859Sml29623 
46053859Sml29623 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
46063859Sml29623 
46073859Sml29623 	if (!intrp->intr_registered) {
46083859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_intrs_enable: "
46093859Sml29623 			"interrupts are not registered"));
46103859Sml29623 		return;
46113859Sml29623 	}
46123859Sml29623 
46133859Sml29623 	if (intrp->intr_enabled) {
46143859Sml29623 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
46153859Sml29623 			"<== nxge_intrs_enable: already enabled"));
46163859Sml29623 		return;
46173859Sml29623 	}
46183859Sml29623 
46193859Sml29623 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
46203859Sml29623 		status = ddi_intr_block_enable(intrp->htable,
46213859Sml29623 			intrp->intr_added);
46223859Sml29623 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
46233859Sml29623 			"block enable - status 0x%x total inums #%d\n",
46243859Sml29623 			status, intrp->intr_added));
46253859Sml29623 	} else {
46263859Sml29623 		for (i = 0; i < intrp->intr_added; i++) {
46273859Sml29623 			status = ddi_intr_enable(intrp->htable[i]);
46283859Sml29623 			NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_enable "
46293859Sml29623 				"ddi_intr_enable:enable - status 0x%x "
46303859Sml29623 				"total inums %d enable inum #%d\n",
46313859Sml29623 				status, intrp->intr_added, i));
46323859Sml29623 			if (status == DDI_SUCCESS) {
46333859Sml29623 				intrp->intr_enabled = B_TRUE;
46343859Sml29623 			}
46353859Sml29623 		}
46363859Sml29623 	}
46373859Sml29623 
46383859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_enable"));
46393859Sml29623 }
46403859Sml29623 
46413859Sml29623 /*ARGSUSED*/
46423859Sml29623 static void
46433859Sml29623 nxge_intrs_disable(p_nxge_t nxgep)
46443859Sml29623 {
46453859Sml29623 	p_nxge_intr_t	intrp;
46463859Sml29623 	int		i;
46473859Sml29623 
46483859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intrs_disable"));
46493859Sml29623 
46503859Sml29623 	intrp = (p_nxge_intr_t)&nxgep->nxge_intr_type;
46513859Sml29623 
46523859Sml29623 	if (!intrp->intr_registered) {
46533859Sml29623 		NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable: "
46543859Sml29623 			"interrupts are not registered"));
46553859Sml29623 		return;
46563859Sml29623 	}
46573859Sml29623 
46583859Sml29623 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
46593859Sml29623 		(void) ddi_intr_block_disable(intrp->htable,
46603859Sml29623 			intrp->intr_added);
46613859Sml29623 	} else {
46623859Sml29623 		for (i = 0; i < intrp->intr_added; i++) {
46633859Sml29623 			(void) ddi_intr_disable(intrp->htable[i]);
46643859Sml29623 		}
46653859Sml29623 	}
46663859Sml29623 
46673859Sml29623 	intrp->intr_enabled = B_FALSE;
46683859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intrs_disable"));
46693859Sml29623 }
46703859Sml29623 
46713859Sml29623 static nxge_status_t
46723859Sml29623 nxge_mac_register(p_nxge_t nxgep)
46733859Sml29623 {
46743859Sml29623 	mac_register_t *macp;
46753859Sml29623 	int		status;
46763859Sml29623 
46773859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_mac_register"));
46783859Sml29623 
46793859Sml29623 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
46803859Sml29623 		return (NXGE_ERROR);
46813859Sml29623 
46823859Sml29623 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
46833859Sml29623 	macp->m_driver = nxgep;
46843859Sml29623 	macp->m_dip = nxgep->dip;
46853859Sml29623 	macp->m_src_addr = nxgep->ouraddr.ether_addr_octet;
46863859Sml29623 	macp->m_callbacks = &nxge_m_callbacks;
46873859Sml29623 	macp->m_min_sdu = 0;
46883859Sml29623 	macp->m_max_sdu = nxgep->mac.maxframesize -
46893859Sml29623 		sizeof (struct ether_header) - ETHERFCSL - 4;
46903859Sml29623 
46913859Sml29623 	status = mac_register(macp, &nxgep->mach);
46923859Sml29623 	mac_free(macp);
46933859Sml29623 
46943859Sml29623 	if (status != 0) {
46953859Sml29623 		cmn_err(CE_WARN,
46963859Sml29623 			"!nxge_mac_register failed (status %d instance %d)",
46973859Sml29623 			status, nxgep->instance);
46983859Sml29623 		return (NXGE_ERROR);
46993859Sml29623 	}
47003859Sml29623 
47013859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_mac_register success "
47023859Sml29623 		"(instance %d)", nxgep->instance));
47033859Sml29623 
47043859Sml29623 	return (NXGE_OK);
47053859Sml29623 }
47063859Sml29623 
47073859Sml29623 void
47083859Sml29623 nxge_err_inject(p_nxge_t nxgep, queue_t *wq, mblk_t *mp)
47093859Sml29623 {
47103859Sml29623 	ssize_t		size;
47113859Sml29623 	mblk_t		*nmp;
47123859Sml29623 	uint8_t		blk_id;
47133859Sml29623 	uint8_t		chan;
47143859Sml29623 	uint32_t	err_id;
47153859Sml29623 	err_inject_t	*eip;
47163859Sml29623 
47173859Sml29623 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_err_inject"));
47183859Sml29623 
47193859Sml29623 	size = 1024;
47203859Sml29623 	nmp = mp->b_cont;
47213859Sml29623 	eip = (err_inject_t *)nmp->b_rptr;
47223859Sml29623 	blk_id = eip->blk_id;
47233859Sml29623 	err_id = eip->err_id;
47243859Sml29623 	chan = eip->chan;
47253859Sml29623 	cmn_err(CE_NOTE, "!blk_id = 0x%x\n", blk_id);
47263859Sml29623 	cmn_err(CE_NOTE, "!err_id = 0x%x\n", err_id);
47273859Sml29623 	cmn_err(CE_NOTE, "!chan = 0x%x\n", chan);
47283859Sml29623 	switch (blk_id) {
47293859Sml29623 	case MAC_BLK_ID:
47303859Sml29623 		break;
47313859Sml29623 	case TXMAC_BLK_ID:
47323859Sml29623 		break;
47333859Sml29623 	case RXMAC_BLK_ID:
47343859Sml29623 		break;
47353859Sml29623 	case MIF_BLK_ID:
47363859Sml29623 		break;
47373859Sml29623 	case IPP_BLK_ID:
47383859Sml29623 		nxge_ipp_inject_err(nxgep, err_id);
47393859Sml29623 		break;
47403859Sml29623 	case TXC_BLK_ID:
47413859Sml29623 		nxge_txc_inject_err(nxgep, err_id);
47423859Sml29623 		break;
47433859Sml29623 	case TXDMA_BLK_ID:
47443859Sml29623 		nxge_txdma_inject_err(nxgep, err_id, chan);
47453859Sml29623 		break;
47463859Sml29623 	case RXDMA_BLK_ID:
47473859Sml29623 		nxge_rxdma_inject_err(nxgep, err_id, chan);
47483859Sml29623 		break;
47493859Sml29623 	case ZCP_BLK_ID:
47503859Sml29623 		nxge_zcp_inject_err(nxgep, err_id);
47513859Sml29623 		break;
47523859Sml29623 	case ESPC_BLK_ID:
47533859Sml29623 		break;
47543859Sml29623 	case FFLP_BLK_ID:
47553859Sml29623 		break;
47563859Sml29623 	case PHY_BLK_ID:
47573859Sml29623 		break;
47583859Sml29623 	case ETHER_SERDES_BLK_ID:
47593859Sml29623 		break;
47603859Sml29623 	case PCIE_SERDES_BLK_ID:
47613859Sml29623 		break;
47623859Sml29623 	case VIR_BLK_ID:
47633859Sml29623 		break;
47643859Sml29623 	}
47653859Sml29623 
47663859Sml29623 	nmp->b_wptr = nmp->b_rptr + size;
47673859Sml29623 	NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_err_inject"));
47683859Sml29623 
47693859Sml29623 	miocack(wq, mp, (int)size, 0);
47703859Sml29623 }
47713859Sml29623 
47723859Sml29623 static int
47733859Sml29623 nxge_init_common_dev(p_nxge_t nxgep)
47743859Sml29623 {
47753859Sml29623 	p_nxge_hw_list_t	hw_p;
47763859Sml29623 	dev_info_t 		*p_dip;
47773859Sml29623 
47783859Sml29623 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_init_common_device"));
47793859Sml29623 
47803859Sml29623 	p_dip = nxgep->p_dip;
47813859Sml29623 	MUTEX_ENTER(&nxge_common_lock);
47823859Sml29623 	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
47833859Sml29623 		"==> nxge_init_common_dev:func # %d",
47843859Sml29623 			nxgep->function_num));
47853859Sml29623 	/*
47863859Sml29623 	 * Loop through existing per neptune hardware list.
47873859Sml29623 	 */
47883859Sml29623 	for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
47893859Sml29623 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
47903859Sml29623 			"==> nxge_init_common_device:func # %d "
47913859Sml29623 			"hw_p $%p parent dip $%p",
47923859Sml29623 			nxgep->function_num,
47933859Sml29623 			hw_p,
47943859Sml29623 			p_dip));
47953859Sml29623 		if (hw_p->parent_devp == p_dip) {
47963859Sml29623 			nxgep->nxge_hw_p = hw_p;
47973859Sml29623 			hw_p->ndevs++;
47983859Sml29623 			hw_p->nxge_p[nxgep->function_num] = nxgep;
47993859Sml29623 			NXGE_DEBUG_MSG((nxgep, MOD_CTL,
48003859Sml29623 				"==> nxge_init_common_device:func # %d "
48013859Sml29623 				"hw_p $%p parent dip $%p "
48023859Sml29623 				"ndevs %d (found)",
48033859Sml29623 				nxgep->function_num,
48043859Sml29623 				hw_p,
48053859Sml29623 				p_dip,
48063859Sml29623 				hw_p->ndevs));
48073859Sml29623 			break;
48083859Sml29623 		}
48093859Sml29623 	}
48103859Sml29623 
48113859Sml29623 	if (hw_p == NULL) {
48123859Sml29623 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
48133859Sml29623 			"==> nxge_init_common_device:func # %d "
48143859Sml29623 			"parent dip $%p (new)",
48153859Sml29623 			nxgep->function_num,
48163859Sml29623 			p_dip));
48173859Sml29623 		hw_p = kmem_zalloc(sizeof (nxge_hw_list_t), KM_SLEEP);
48183859Sml29623 		hw_p->parent_devp = p_dip;
48193859Sml29623 		hw_p->magic = NXGE_NEPTUNE_MAGIC;
48203859Sml29623 		nxgep->nxge_hw_p = hw_p;
48213859Sml29623 		hw_p->ndevs++;
48223859Sml29623 		hw_p->nxge_p[nxgep->function_num] = nxgep;
48233859Sml29623 		hw_p->next = nxge_hw_list;
48244732Sdavemq 		if (nxgep->niu_type == N2_NIU) {
48254732Sdavemq 			hw_p->niu_type = N2_NIU;
48264732Sdavemq 			hw_p->platform_type = P_NEPTUNE_NIU;
48274732Sdavemq 		} else {
48284732Sdavemq 			hw_p->niu_type = NIU_TYPE_NONE;
48294977Sraghus 			hw_p->platform_type = P_NEPTUNE_NONE;
48304732Sdavemq 		}
48313859Sml29623 
48323859Sml29623 		MUTEX_INIT(&hw_p->nxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
48333859Sml29623 		MUTEX_INIT(&hw_p->nxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
48343859Sml29623 		MUTEX_INIT(&hw_p->nxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
48353859Sml29623 		MUTEX_INIT(&hw_p->nxge_mdio_lock, NULL, MUTEX_DRIVER, NULL);
48363859Sml29623 		MUTEX_INIT(&hw_p->nxge_mii_lock, NULL, MUTEX_DRIVER, NULL);
48373859Sml29623 
48383859Sml29623 		nxge_hw_list = hw_p;
48394732Sdavemq 
48404732Sdavemq 		(void) nxge_scan_ports_phy(nxgep, nxge_hw_list);
48413859Sml29623 	}
48423859Sml29623 
48433859Sml29623 	MUTEX_EXIT(&nxge_common_lock);
48444732Sdavemq 
48454977Sraghus 	nxgep->platform_type = hw_p->platform_type;
48464732Sdavemq 	if (nxgep->niu_type != N2_NIU) {
48474732Sdavemq 		nxgep->niu_type = hw_p->niu_type;
48484732Sdavemq 	}
48494732Sdavemq 
48503859Sml29623 	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
48513859Sml29623 		"==> nxge_init_common_device (nxge_hw_list) $%p",
48523859Sml29623 		nxge_hw_list));
48533859Sml29623 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<== nxge_init_common_device"));
48543859Sml29623 
48553859Sml29623 	return (NXGE_OK);
48563859Sml29623 }
48573859Sml29623 
48583859Sml29623 static void
48593859Sml29623 nxge_uninit_common_dev(p_nxge_t nxgep)
48603859Sml29623 {
48613859Sml29623 	p_nxge_hw_list_t	hw_p, h_hw_p;
48623859Sml29623 	dev_info_t 		*p_dip;
48633859Sml29623 
48643859Sml29623 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==> nxge_uninit_common_device"));
48653859Sml29623 	if (nxgep->nxge_hw_p == NULL) {
48663859Sml29623 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
48673859Sml29623 			"<== nxge_uninit_common_device (no common)"));
48683859Sml29623 		return;
48693859Sml29623 	}
48703859Sml29623 
48713859Sml29623 	MUTEX_ENTER(&nxge_common_lock);
48723859Sml29623 	h_hw_p = nxge_hw_list;
48733859Sml29623 	for (hw_p = nxge_hw_list; hw_p; hw_p = hw_p->next) {
48743859Sml29623 		p_dip = hw_p->parent_devp;
48753859Sml29623 		if (nxgep->nxge_hw_p == hw_p &&
48763859Sml29623 			p_dip == nxgep->p_dip &&
48773859Sml29623 			nxgep->nxge_hw_p->magic == NXGE_NEPTUNE_MAGIC &&
48783859Sml29623 			hw_p->magic == NXGE_NEPTUNE_MAGIC) {
48793859Sml29623 
48803859Sml29623 			NXGE_DEBUG_MSG((nxgep, MOD_CTL,
48813859Sml29623 				"==> nxge_uninit_common_device:func # %d "
48823859Sml29623 				"hw_p $%p parent dip $%p "
48833859Sml29623 				"ndevs %d (found)",
48843859Sml29623 				nxgep->function_num,
48853859Sml29623 				hw_p,
48863859Sml29623 				p_dip,
48873859Sml29623 				hw_p->ndevs));
48883859Sml29623 
48893859Sml29623 			nxgep->nxge_hw_p = NULL;
48903859Sml29623 			if (hw_p->ndevs) {
48913859Sml29623 				hw_p->ndevs--;
48923859Sml29623 			}
48933859Sml29623 			hw_p->nxge_p[nxgep->function_num] = NULL;
48943859Sml29623 			if (!hw_p->ndevs) {
48953859Sml29623 				MUTEX_DESTROY(&hw_p->nxge_vlan_lock);
48963859Sml29623 				MUTEX_DESTROY(&hw_p->nxge_tcam_lock);
48973859Sml29623 				MUTEX_DESTROY(&hw_p->nxge_cfg_lock);
48983859Sml29623 				MUTEX_DESTROY(&hw_p->nxge_mdio_lock);
48993859Sml29623 				MUTEX_DESTROY(&hw_p->nxge_mii_lock);
49003859Sml29623 				NXGE_DEBUG_MSG((nxgep, MOD_CTL,
49013859Sml29623 					"==> nxge_uninit_common_device: "
49023859Sml29623 					"func # %d "
49033859Sml29623 					"hw_p $%p parent dip $%p "
49043859Sml29623 					"ndevs %d (last)",
49053859Sml29623 					nxgep->function_num,
49063859Sml29623 					hw_p,
49073859Sml29623 					p_dip,
49083859Sml29623 					hw_p->ndevs));
49093859Sml29623 
49103859Sml29623 				if (hw_p == nxge_hw_list) {
49113859Sml29623 					NXGE_DEBUG_MSG((nxgep, MOD_CTL,
49123859Sml29623 						"==> nxge_uninit_common_device:"
49133859Sml29623 						"remove head func # %d "
49143859Sml29623 						"hw_p $%p parent dip $%p "
49153859Sml29623 						"ndevs %d (head)",
49163859Sml29623 						nxgep->function_num,
49173859Sml29623 						hw_p,
49183859Sml29623 						p_dip,
49193859Sml29623 						hw_p->ndevs));
49203859Sml29623 					nxge_hw_list = hw_p->next;
49213859Sml29623 				} else {
49223859Sml29623 					NXGE_DEBUG_MSG((nxgep, MOD_CTL,
49233859Sml29623 						"==> nxge_uninit_common_device:"
49243859Sml29623 						"remove middle func # %d "
49253859Sml29623 						"hw_p $%p parent dip $%p "
49263859Sml29623 						"ndevs %d (middle)",
49273859Sml29623 						nxgep->function_num,
49283859Sml29623 						hw_p,
49293859Sml29623 						p_dip,
49303859Sml29623 						hw_p->ndevs));
49313859Sml29623 					h_hw_p->next = hw_p->next;
49323859Sml29623 				}
49333859Sml29623 
49343859Sml29623 				KMEM_FREE(hw_p, sizeof (nxge_hw_list_t));
49353859Sml29623 			}
49363859Sml29623 			break;
49373859Sml29623 		} else {
49383859Sml29623 			h_hw_p = hw_p;
49393859Sml29623 		}
49403859Sml29623 	}
49413859Sml29623 
49423859Sml29623 	MUTEX_EXIT(&nxge_common_lock);
49433859Sml29623 	NXGE_DEBUG_MSG((nxgep, MOD_CTL,
49443859Sml29623 		"==> nxge_uninit_common_device (nxge_hw_list) $%p",
49453859Sml29623 		nxge_hw_list));
49463859Sml29623 
49473859Sml29623 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<= nxge_uninit_common_device"));
49483859Sml29623 }
49494732Sdavemq 
49504732Sdavemq /*
49514977Sraghus  * Determines the number of ports from the niu_type or the platform type.
49524732Sdavemq  * Returns the number of ports, or returns zero on failure.
49534732Sdavemq  */
49544732Sdavemq 
49554732Sdavemq int
49564977Sraghus nxge_get_nports(p_nxge_t nxgep)
49574732Sdavemq {
49584732Sdavemq 	int	nports = 0;
49594732Sdavemq 
49604977Sraghus 	switch (nxgep->niu_type) {
49614732Sdavemq 	case N2_NIU:
49624732Sdavemq 	case NEPTUNE_2_10GF:
49634732Sdavemq 		nports = 2;
49644732Sdavemq 		break;
49654732Sdavemq 	case NEPTUNE_4_1GC:
49664732Sdavemq 	case NEPTUNE_2_10GF_2_1GC:
49674732Sdavemq 	case NEPTUNE_1_10GF_3_1GC:
49684732Sdavemq 	case NEPTUNE_1_1GC_1_10GF_2_1GC:
49694732Sdavemq 		nports = 4;
49704732Sdavemq 		break;
49714732Sdavemq 	default:
49724977Sraghus 		switch (nxgep->platform_type) {
49734977Sraghus 		case P_NEPTUNE_NIU:
49744977Sraghus 		case P_NEPTUNE_ATLAS_2PORT:
49754977Sraghus 			nports = 2;
49764977Sraghus 			break;
49774977Sraghus 		case P_NEPTUNE_ATLAS_4PORT:
49784977Sraghus 		case P_NEPTUNE_MARAMBA_P0:
49794977Sraghus 		case P_NEPTUNE_MARAMBA_P1:
49805196Ssbehera 		case P_NEPTUNE_ALONSO:
49814977Sraghus 			nports = 4;
49824977Sraghus 			break;
49834977Sraghus 		default:
49844977Sraghus 			break;
49854977Sraghus 		}
49864732Sdavemq 		break;
49874732Sdavemq 	}
49884732Sdavemq 
49894732Sdavemq 	return (nports);
49904732Sdavemq }
49915013Sml29623 
49925013Sml29623 /*
49935013Sml29623  * The following two functions are to support
49945013Sml29623  * PSARC/2007/453 MSI-X interrupt limit override.
49955013Sml29623  */
49965013Sml29623 static int
49975013Sml29623 nxge_create_msi_property(p_nxge_t nxgep)
49985013Sml29623 {
49995013Sml29623 	int	nmsi;
50005013Sml29623 	extern	int ncpus;
50015013Sml29623 
50025013Sml29623 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "==>nxge_create_msi_property"));
50035013Sml29623 
50045013Sml29623 	switch (nxgep->mac.portmode) {
50055013Sml29623 	case PORT_10G_COPPER:
50065013Sml29623 	case PORT_10G_FIBER:
50075013Sml29623 		(void) ddi_prop_create(DDI_DEV_T_NONE, nxgep->dip,
50085013Sml29623 		    DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
50095013Sml29623 		/*
50105013Sml29623 		 * The maximum MSI-X requested will be 8.
50115013Sml29623 		 * If the # of CPUs is less than 8, we will reqeust
50125013Sml29623 		 * # MSI-X based on the # of CPUs.
50135013Sml29623 		 */
50145013Sml29623 		if (ncpus >= NXGE_MSIX_REQUEST_10G) {
50155013Sml29623 			nmsi = NXGE_MSIX_REQUEST_10G;
50165013Sml29623 		} else {
50175013Sml29623 			nmsi = ncpus;
50185013Sml29623 		}
50195013Sml29623 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
50205013Sml29623 		    "==>nxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
50215013Sml29623 		    ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip,
50225013Sml29623 		    DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
50235013Sml29623 		break;
50245013Sml29623 
50255013Sml29623 	default:
50265013Sml29623 		nmsi = NXGE_MSIX_REQUEST_1G;
50275013Sml29623 		NXGE_DEBUG_MSG((nxgep, MOD_CTL,
50285013Sml29623 		    "==>nxge_create_msi_property(1G): exists 0x%x (nmsi %d)",
50295013Sml29623 		    ddi_prop_exists(DDI_DEV_T_NONE, nxgep->dip,
50305013Sml29623 		    DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
50315013Sml29623 		break;
50325013Sml29623 	}
50335013Sml29623 
50345013Sml29623 	NXGE_DEBUG_MSG((nxgep, MOD_CTL, "<==nxge_create_msi_property"));
50355013Sml29623 	return (nmsi);
50365013Sml29623 }
5037