xref: /onnv-gate/usr/src/uts/common/io/nxge/nxge_txdma.c (revision 7950:2bc2440fbed9)
13859Sml29623 /*
23859Sml29623  * CDDL HEADER START
33859Sml29623  *
43859Sml29623  * The contents of this file are subject to the terms of the
53859Sml29623  * Common Development and Distribution License (the "License").
63859Sml29623  * You may not use this file except in compliance with the License.
73859Sml29623  *
83859Sml29623  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
93859Sml29623  * or http://www.opensolaris.org/os/licensing.
103859Sml29623  * See the License for the specific language governing permissions
113859Sml29623  * and limitations under the License.
123859Sml29623  *
133859Sml29623  * When distributing Covered Code, include this CDDL HEADER in each
143859Sml29623  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
153859Sml29623  * If applicable, add the following below this CDDL HEADER, with the
163859Sml29623  * fields enclosed by brackets "[]" replaced with your own identifying
173859Sml29623  * information: Portions Copyright [yyyy] [name of copyright owner]
183859Sml29623  *
193859Sml29623  * CDDL HEADER END
203859Sml29623  */
213859Sml29623 /*
226495Sspeer  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
233859Sml29623  * Use is subject to license terms.
243859Sml29623  */
253859Sml29623 
263859Sml29623 #include <sys/nxge/nxge_impl.h>
273859Sml29623 #include <sys/nxge/nxge_txdma.h>
286495Sspeer #include <sys/nxge/nxge_hio.h>
296495Sspeer #include <npi_tx_rd64.h>
306495Sspeer #include <npi_tx_wr64.h>
313859Sml29623 #include <sys/llc1.h>
323859Sml29623 
333859Sml29623 uint32_t 	nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT;
343859Sml29623 uint32_t	nxge_tx_minfree = 32;
353859Sml29623 uint32_t	nxge_tx_intr_thres = 0;
363859Sml29623 uint32_t	nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS;
373859Sml29623 uint32_t	nxge_tx_tiny_pack = 1;
383859Sml29623 uint32_t	nxge_tx_use_bcopy = 1;
393859Sml29623 
403859Sml29623 extern uint32_t 	nxge_tx_ring_size;
413859Sml29623 extern uint32_t 	nxge_bcopy_thresh;
423859Sml29623 extern uint32_t 	nxge_dvma_thresh;
433859Sml29623 extern uint32_t 	nxge_dma_stream_thresh;
443859Sml29623 extern dma_method_t 	nxge_force_dma;
456611Sml29623 extern uint32_t		nxge_cksum_offload;
463859Sml29623 
473859Sml29623 /* Device register access attributes for PIO.  */
483859Sml29623 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr;
493859Sml29623 /* Device descriptor access attributes for DMA.  */
503859Sml29623 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr;
513859Sml29623 /* Device buffer access attributes for DMA.  */
523859Sml29623 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr;
533859Sml29623 extern ddi_dma_attr_t nxge_desc_dma_attr;
543859Sml29623 extern ddi_dma_attr_t nxge_tx_dma_attr;
553859Sml29623 
563952Sml29623 extern int nxge_serial_tx(mblk_t *mp, void *arg);
573952Sml29623 
587906SMichael.Speer@Sun.COM void nxge_txdma_freemsg_task(p_tx_ring_t tx_ring_p);
597906SMichael.Speer@Sun.COM 
606495Sspeer static nxge_status_t nxge_map_txdma(p_nxge_t, int);
616495Sspeer 
626495Sspeer static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int);
633859Sml29623 
643859Sml29623 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t,
653859Sml29623 	p_nxge_dma_common_t *, p_tx_ring_t *,
663859Sml29623 	uint32_t, p_nxge_dma_common_t *,
673859Sml29623 	p_tx_mbox_t *);
686495Sspeer static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t);
693859Sml29623 
703859Sml29623 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t,
713859Sml29623 	p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t);
723859Sml29623 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t);
733859Sml29623 
743859Sml29623 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t,
753859Sml29623 	p_nxge_dma_common_t *, p_tx_ring_t,
763859Sml29623 	p_tx_mbox_t *);
773859Sml29623 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t,
783859Sml29623 	p_tx_ring_t, p_tx_mbox_t);
793859Sml29623 
803859Sml29623 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t,
813859Sml29623     p_tx_ring_t, p_tx_mbox_t);
826495Sspeer static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t);
833859Sml29623 
843859Sml29623 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t);
853859Sml29623 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t,
863859Sml29623 	p_nxge_ldv_t, tx_cs_t);
873859Sml29623 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t);
883859Sml29623 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t,
893859Sml29623 	uint16_t, p_tx_ring_t);
903859Sml29623 
916495Sspeer static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep,
926495Sspeer     p_tx_ring_t ring_p, uint16_t channel);
936495Sspeer 
943859Sml29623 nxge_status_t
953859Sml29623 nxge_init_txdma_channels(p_nxge_t nxgep)
963859Sml29623 {
97*7950SMichael.Speer@Sun.COM 	nxge_grp_set_t	*set = &nxgep->tx_set;
98*7950SMichael.Speer@Sun.COM 	int		i, tdc, count;
99*7950SMichael.Speer@Sun.COM 	nxge_grp_t	*group;
1006495Sspeer 
1016495Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels"));
1026495Sspeer 
1036495Sspeer 	for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
1046495Sspeer 		if ((1 << i) & set->lg.map) {
105*7950SMichael.Speer@Sun.COM 			group = set->group[i];
106*7950SMichael.Speer@Sun.COM 
1076495Sspeer 			for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1086495Sspeer 				if ((1 << tdc) & group->map) {
109*7950SMichael.Speer@Sun.COM 					if ((nxge_grp_dc_add(nxgep, group,
110*7950SMichael.Speer@Sun.COM 					    VP_BOUND_TX, tdc)))
111*7950SMichael.Speer@Sun.COM 						goto init_txdma_channels_exit;
1126495Sspeer 				}
1136495Sspeer 			}
1146495Sspeer 		}
115*7950SMichael.Speer@Sun.COM 
1166495Sspeer 		if (++count == set->lg.count)
1176495Sspeer 			break;
1186495Sspeer 	}
1196495Sspeer 
1206495Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels"));
1216495Sspeer 	return (NXGE_OK);
122*7950SMichael.Speer@Sun.COM 
123*7950SMichael.Speer@Sun.COM init_txdma_channels_exit:
124*7950SMichael.Speer@Sun.COM 	for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
125*7950SMichael.Speer@Sun.COM 		if ((1 << i) & set->lg.map) {
126*7950SMichael.Speer@Sun.COM 			group = set->group[i];
127*7950SMichael.Speer@Sun.COM 
128*7950SMichael.Speer@Sun.COM 			for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
129*7950SMichael.Speer@Sun.COM 				if ((1 << tdc) & group->map) {
130*7950SMichael.Speer@Sun.COM 					nxge_grp_dc_remove(nxgep,
131*7950SMichael.Speer@Sun.COM 					    VP_BOUND_TX, tdc);
132*7950SMichael.Speer@Sun.COM 				}
133*7950SMichael.Speer@Sun.COM 			}
134*7950SMichael.Speer@Sun.COM 		}
135*7950SMichael.Speer@Sun.COM 
136*7950SMichael.Speer@Sun.COM 		if (++count == set->lg.count)
137*7950SMichael.Speer@Sun.COM 			break;
138*7950SMichael.Speer@Sun.COM 	}
139*7950SMichael.Speer@Sun.COM 
140*7950SMichael.Speer@Sun.COM 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels"));
141*7950SMichael.Speer@Sun.COM 	return (NXGE_ERROR);
1426495Sspeer }
1436495Sspeer 
1446495Sspeer nxge_status_t
1456495Sspeer nxge_init_txdma_channel(
1466495Sspeer 	p_nxge_t nxge,
1476495Sspeer 	int channel)
1486495Sspeer {
1496495Sspeer 	nxge_status_t status;
1506495Sspeer 
1516495Sspeer 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel"));
1526495Sspeer 
1536495Sspeer 	status = nxge_map_txdma(nxge, channel);
1543859Sml29623 	if (status != NXGE_OK) {
1556495Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1566495Sspeer 		    "<== nxge_init_txdma_channel: status 0x%x", status));
1576495Sspeer 		(void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
1583859Sml29623 		return (status);
1593859Sml29623 	}
1603859Sml29623 
1616495Sspeer 	status = nxge_txdma_hw_start(nxge, channel);
1623859Sml29623 	if (status != NXGE_OK) {
1636495Sspeer 		(void) nxge_unmap_txdma_channel(nxge, channel);
1646495Sspeer 		(void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
1653859Sml29623 		return (status);
1663859Sml29623 	}
1673859Sml29623 
1686495Sspeer 	if (!nxge->statsp->tdc_ksp[channel])
1696495Sspeer 		nxge_setup_tdc_kstats(nxge, channel);
1706495Sspeer 
1716495Sspeer 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel"));
1726495Sspeer 
1736495Sspeer 	return (status);
1743859Sml29623 }
1753859Sml29623 
1763859Sml29623 void
1773859Sml29623 nxge_uninit_txdma_channels(p_nxge_t nxgep)
1783859Sml29623 {
1796495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
1806495Sspeer 	int tdc;
1816495Sspeer 
1826495Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels"));
1836495Sspeer 
1846495Sspeer 	if (set->owned.map == 0) {
1856495Sspeer 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1866495Sspeer 		    "nxge_uninit_txdma_channels: no channels"));
1876495Sspeer 		return;
1886495Sspeer 	}
1896495Sspeer 
1906495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1916495Sspeer 		if ((1 << tdc) & set->owned.map) {
1926495Sspeer 			nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc);
1936495Sspeer 		}
1946495Sspeer 	}
1956495Sspeer 
1966495Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels"));
1976495Sspeer }
1986495Sspeer 
1996495Sspeer void
2006495Sspeer nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel)
2016495Sspeer {
2026495Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel"));
2036495Sspeer 
2046495Sspeer 	if (nxgep->statsp->tdc_ksp[channel]) {
2056495Sspeer 		kstat_delete(nxgep->statsp->tdc_ksp[channel]);
2066495Sspeer 		nxgep->statsp->tdc_ksp[channel] = 0;
2076495Sspeer 	}
2086495Sspeer 
2096495Sspeer 	(void) nxge_txdma_stop_channel(nxgep, channel);
2106495Sspeer 	nxge_unmap_txdma_channel(nxgep, channel);
2113859Sml29623 
2123859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2136929Smisaki 	    "<== nxge_uninit_txdma_channel"));
2143859Sml29623 }
2153859Sml29623 
2163859Sml29623 void
2173859Sml29623 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p,
2183859Sml29623 	uint32_t entries, uint32_t size)
2193859Sml29623 {
2203859Sml29623 	size_t		tsize;
2213859Sml29623 	*dest_p = *src_p;
2223859Sml29623 	tsize = size * entries;
2233859Sml29623 	dest_p->alength = tsize;
2243859Sml29623 	dest_p->nblocks = entries;
2253859Sml29623 	dest_p->block_size = size;
2263859Sml29623 	dest_p->offset += tsize;
2273859Sml29623 
2283859Sml29623 	src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize;
2293859Sml29623 	src_p->alength -= tsize;
2303859Sml29623 	src_p->dma_cookie.dmac_laddress += tsize;
2313859Sml29623 	src_p->dma_cookie.dmac_size -= tsize;
2323859Sml29623 }
2333859Sml29623 
2346495Sspeer /*
2356495Sspeer  * nxge_reset_txdma_channel
2366495Sspeer  *
2376495Sspeer  *	Reset a TDC.
2386495Sspeer  *
2396495Sspeer  * Arguments:
2406495Sspeer  * 	nxgep
2416495Sspeer  * 	channel		The channel to reset.
2426495Sspeer  * 	reg_data	The current TX_CS.
2436495Sspeer  *
2446495Sspeer  * Notes:
2456495Sspeer  *
2466495Sspeer  * NPI/NXGE function calls:
2476495Sspeer  *	npi_txdma_channel_reset()
2486495Sspeer  *	npi_txdma_channel_control()
2496495Sspeer  *
2506495Sspeer  * Registers accessed:
2516495Sspeer  *	TX_CS		DMC+0x40028 Transmit Control And Status
2526495Sspeer  *	TX_RING_KICK	DMC+0x40018 Transmit Ring Kick
2536495Sspeer  *
2546495Sspeer  * Context:
2556495Sspeer  *	Any domain
2566495Sspeer  */
2573859Sml29623 nxge_status_t
2583859Sml29623 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data)
2593859Sml29623 {
2603859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
2613859Sml29623 	nxge_status_t		status = NXGE_OK;
2623859Sml29623 	npi_handle_t		handle;
2633859Sml29623 
2643859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel"));
2653859Sml29623 
2663859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
2673859Sml29623 	if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) {
2683859Sml29623 		rs = npi_txdma_channel_reset(handle, channel);
2693859Sml29623 	} else {
2703859Sml29623 		rs = npi_txdma_channel_control(handle, TXDMA_RESET,
2716929Smisaki 		    channel);
2723859Sml29623 	}
2733859Sml29623 
2743859Sml29623 	if (rs != NPI_SUCCESS) {
2753859Sml29623 		status = NXGE_ERROR | rs;
2763859Sml29623 	}
2773859Sml29623 
2783859Sml29623 	/*
2793859Sml29623 	 * Reset the tail (kick) register to 0.
2803859Sml29623 	 * (Hardware will not reset it. Tx overflow fatal
2813859Sml29623 	 * error if tail is not set to 0 after reset!
2823859Sml29623 	 */
2833859Sml29623 	TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
2843859Sml29623 
2853859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel"));
2863859Sml29623 	return (status);
2873859Sml29623 }
2883859Sml29623 
2896495Sspeer /*
2906495Sspeer  * nxge_init_txdma_channel_event_mask
2916495Sspeer  *
2926495Sspeer  *	Enable interrupts for a set of events.
2936495Sspeer  *
2946495Sspeer  * Arguments:
2956495Sspeer  * 	nxgep
2966495Sspeer  * 	channel	The channel to map.
2976495Sspeer  * 	mask_p	The events to enable.
2986495Sspeer  *
2996495Sspeer  * Notes:
3006495Sspeer  *
3016495Sspeer  * NPI/NXGE function calls:
3026495Sspeer  *	npi_txdma_event_mask()
3036495Sspeer  *
3046495Sspeer  * Registers accessed:
3056495Sspeer  *	TX_ENT_MSK	DMC+0x40020 Transmit Event Mask
3066495Sspeer  *
3076495Sspeer  * Context:
3086495Sspeer  *	Any domain
3096495Sspeer  */
3103859Sml29623 nxge_status_t
3113859Sml29623 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
3123859Sml29623 		p_tx_dma_ent_msk_t mask_p)
3133859Sml29623 {
3143859Sml29623 	npi_handle_t		handle;
3153859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
3163859Sml29623 	nxge_status_t		status = NXGE_OK;
3173859Sml29623 
3183859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
3196929Smisaki 	    "<== nxge_init_txdma_channel_event_mask"));
3203859Sml29623 
3213859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
3223859Sml29623 	rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p);
3233859Sml29623 	if (rs != NPI_SUCCESS) {
3243859Sml29623 		status = NXGE_ERROR | rs;
3253859Sml29623 	}
3263859Sml29623 
3273859Sml29623 	return (status);
3283859Sml29623 }
3293859Sml29623 
3306495Sspeer /*
3316495Sspeer  * nxge_init_txdma_channel_cntl_stat
3326495Sspeer  *
3336495Sspeer  *	Stop a TDC.  If at first we don't succeed, inject an error.
3346495Sspeer  *
3356495Sspeer  * Arguments:
3366495Sspeer  * 	nxgep
3376495Sspeer  * 	channel		The channel to stop.
3386495Sspeer  *
3396495Sspeer  * Notes:
3406495Sspeer  *
3416495Sspeer  * NPI/NXGE function calls:
3426495Sspeer  *	npi_txdma_control_status()
3436495Sspeer  *
3446495Sspeer  * Registers accessed:
3456495Sspeer  *	TX_CS		DMC+0x40028 Transmit Control And Status
3466495Sspeer  *
3476495Sspeer  * Context:
3486495Sspeer  *	Any domain
3496495Sspeer  */
3503859Sml29623 nxge_status_t
3513859Sml29623 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
3523859Sml29623 	uint64_t reg_data)
3533859Sml29623 {
3543859Sml29623 	npi_handle_t		handle;
3553859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
3563859Sml29623 	nxge_status_t		status = NXGE_OK;
3573859Sml29623 
3583859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
3596929Smisaki 	    "<== nxge_init_txdma_channel_cntl_stat"));
3603859Sml29623 
3613859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
3623859Sml29623 	rs = npi_txdma_control_status(handle, OP_SET, channel,
3636929Smisaki 	    (p_tx_cs_t)&reg_data);
3643859Sml29623 
3653859Sml29623 	if (rs != NPI_SUCCESS) {
3663859Sml29623 		status = NXGE_ERROR | rs;
3673859Sml29623 	}
3683859Sml29623 
3693859Sml29623 	return (status);
3703859Sml29623 }
3713859Sml29623 
3726495Sspeer /*
3736495Sspeer  * nxge_enable_txdma_channel
3746495Sspeer  *
3756495Sspeer  *	Enable a TDC.
3766495Sspeer  *
3776495Sspeer  * Arguments:
3786495Sspeer  * 	nxgep
3796495Sspeer  * 	channel		The channel to enable.
3806495Sspeer  * 	tx_desc_p	channel's transmit descriptor ring.
3816495Sspeer  * 	mbox_p		channel's mailbox,
3826495Sspeer  *
3836495Sspeer  * Notes:
3846495Sspeer  *
3856495Sspeer  * NPI/NXGE function calls:
3866495Sspeer  *	npi_txdma_ring_config()
3876495Sspeer  *	npi_txdma_mbox_config()
3886495Sspeer  *	npi_txdma_channel_init_enable()
3896495Sspeer  *
3906495Sspeer  * Registers accessed:
3916495Sspeer  *	TX_RNG_CFIG	DMC+0x40000 Transmit Ring Configuration
3926495Sspeer  *	TXDMA_MBH	DMC+0x40030 TXDMA Mailbox High
3936495Sspeer  *	TXDMA_MBL	DMC+0x40038 TXDMA Mailbox Low
3946495Sspeer  *	TX_CS		DMC+0x40028 Transmit Control And Status
3956495Sspeer  *
3966495Sspeer  * Context:
3976495Sspeer  *	Any domain
3986495Sspeer  */
3993859Sml29623 nxge_status_t
4003859Sml29623 nxge_enable_txdma_channel(p_nxge_t nxgep,
4013859Sml29623 	uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p)
4023859Sml29623 {
4033859Sml29623 	npi_handle_t		handle;
4043859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
4053859Sml29623 	nxge_status_t		status = NXGE_OK;
4063859Sml29623 
4073859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel"));
4083859Sml29623 
4093859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
4103859Sml29623 	/*
4113859Sml29623 	 * Use configuration data composed at init time.
4123859Sml29623 	 * Write to hardware the transmit ring configurations.
4133859Sml29623 	 */
4143859Sml29623 	rs = npi_txdma_ring_config(handle, OP_SET, channel,
4156495Sspeer 	    (uint64_t *)&(tx_desc_p->tx_ring_cfig.value));
4163859Sml29623 
4173859Sml29623 	if (rs != NPI_SUCCESS) {
4183859Sml29623 		return (NXGE_ERROR | rs);
4193859Sml29623 	}
4203859Sml29623 
4216495Sspeer 	if (isLDOMguest(nxgep)) {
4226495Sspeer 		/* Add interrupt handler for this channel. */
4236495Sspeer 		if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK)
4246495Sspeer 			return (NXGE_ERROR);
4256495Sspeer 	}
4266495Sspeer 
4273859Sml29623 	/* Write to hardware the mailbox */
4283859Sml29623 	rs = npi_txdma_mbox_config(handle, OP_SET, channel,
4296929Smisaki 	    (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress);
4303859Sml29623 
4313859Sml29623 	if (rs != NPI_SUCCESS) {
4323859Sml29623 		return (NXGE_ERROR | rs);
4333859Sml29623 	}
4343859Sml29623 
4353859Sml29623 	/* Start the DMA engine. */
4363859Sml29623 	rs = npi_txdma_channel_init_enable(handle, channel);
4373859Sml29623 
4383859Sml29623 	if (rs != NPI_SUCCESS) {
4393859Sml29623 		return (NXGE_ERROR | rs);
4403859Sml29623 	}
4413859Sml29623 
4423859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel"));
4433859Sml29623 
4443859Sml29623 	return (status);
4453859Sml29623 }
4463859Sml29623 
4473859Sml29623 void
4483859Sml29623 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len,
4493859Sml29623 		boolean_t l4_cksum, int pkt_len, uint8_t npads,
4506611Sml29623 		p_tx_pkt_hdr_all_t pkthdrp,
4516611Sml29623 		t_uscalar_t start_offset,
4526611Sml29623 		t_uscalar_t stuff_offset)
4533859Sml29623 {
4543859Sml29623 	p_tx_pkt_header_t	hdrp;
4553859Sml29623 	p_mblk_t 		nmp;
4563859Sml29623 	uint64_t		tmp;
4573859Sml29623 	size_t 			mblk_len;
4583859Sml29623 	size_t 			iph_len;
4593859Sml29623 	size_t 			hdrs_size;
4603859Sml29623 	uint8_t			hdrs_buf[sizeof (struct ether_header) +
4616929Smisaki 	    64 + sizeof (uint32_t)];
4625505Smisaki 	uint8_t			*cursor;
4633859Sml29623 	uint8_t 		*ip_buf;
4643859Sml29623 	uint16_t		eth_type;
4653859Sml29623 	uint8_t			ipproto;
4663859Sml29623 	boolean_t		is_vlan = B_FALSE;
4673859Sml29623 	size_t			eth_hdr_size;
4683859Sml29623 
4693859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp));
4703859Sml29623 
4713859Sml29623 	/*
4723859Sml29623 	 * Caller should zero out the headers first.
4733859Sml29623 	 */
4743859Sml29623 	hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr;
4753859Sml29623 
4763859Sml29623 	if (fill_len) {
4773859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL,
4786929Smisaki 		    "==> nxge_fill_tx_hdr: pkt_len %d "
4796929Smisaki 		    "npads %d", pkt_len, npads));
4803859Sml29623 		tmp = (uint64_t)pkt_len;
4813859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
4823859Sml29623 		goto fill_tx_header_done;
4833859Sml29623 	}
4843859Sml29623 
4856611Sml29623 	hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT);
4863859Sml29623 
4873859Sml29623 	/*
4883859Sml29623 	 * mp is the original data packet (does not include the
4893859Sml29623 	 * Neptune transmit header).
4903859Sml29623 	 */
4913859Sml29623 	nmp = mp;
4923859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: "
4936929Smisaki 	    "mp $%p b_rptr $%p len %d",
4946929Smisaki 	    mp, nmp->b_rptr, MBLKL(nmp)));
4955505Smisaki 	/* copy ether_header from mblk to hdrs_buf */
4965505Smisaki 	cursor = &hdrs_buf[0];
4975505Smisaki 	tmp = sizeof (struct ether_vlan_header);
4985505Smisaki 	while ((nmp != NULL) && (tmp > 0)) {
4995505Smisaki 		size_t buflen;
5005505Smisaki 		mblk_len = MBLKL(nmp);
5015512Smisaki 		buflen = min((size_t)tmp, mblk_len);
5025505Smisaki 		bcopy(nmp->b_rptr, cursor, buflen);
5035505Smisaki 		cursor += buflen;
5045505Smisaki 		tmp -= buflen;
5055505Smisaki 		nmp = nmp->b_cont;
5065505Smisaki 	}
5075505Smisaki 
5085505Smisaki 	nmp = mp;
5095505Smisaki 	mblk_len = MBLKL(nmp);
5103859Sml29623 	ip_buf = NULL;
5113859Sml29623 	eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type);
5123859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) "
5136929Smisaki 	    "ether type 0x%x", eth_type, hdrp->value));
5143859Sml29623 
5153859Sml29623 	if (eth_type < ETHERMTU) {
5163859Sml29623 		tmp = 1ull;
5173859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT);
5183859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC "
5196929Smisaki 		    "value 0x%llx", hdrp->value));
5203859Sml29623 		if (*(hdrs_buf + sizeof (struct ether_header))
5216929Smisaki 		    == LLC_SNAP_SAP) {
5223859Sml29623 			eth_type = ntohs(*((uint16_t *)(hdrs_buf +
5236929Smisaki 			    sizeof (struct ether_header) + 6)));
5243859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
5256929Smisaki 			    "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x",
5266929Smisaki 			    eth_type));
5273859Sml29623 		} else {
5283859Sml29623 			goto fill_tx_header_done;
5293859Sml29623 		}
5303859Sml29623 	} else if (eth_type == VLAN_ETHERTYPE) {
5313859Sml29623 		tmp = 1ull;
5323859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT);
5333859Sml29623 
5343859Sml29623 		eth_type = ntohs(((struct ether_vlan_header *)
5356929Smisaki 		    hdrs_buf)->ether_type);
5363859Sml29623 		is_vlan = B_TRUE;
5373859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN "
5386929Smisaki 		    "value 0x%llx", hdrp->value));
5393859Sml29623 	}
5403859Sml29623 
5413859Sml29623 	if (!is_vlan) {
5423859Sml29623 		eth_hdr_size = sizeof (struct ether_header);
5433859Sml29623 	} else {
5443859Sml29623 		eth_hdr_size = sizeof (struct ether_vlan_header);
5453859Sml29623 	}
5463859Sml29623 
5473859Sml29623 	switch (eth_type) {
5483859Sml29623 	case ETHERTYPE_IP:
5493859Sml29623 		if (mblk_len > eth_hdr_size + sizeof (uint8_t)) {
5503859Sml29623 			ip_buf = nmp->b_rptr + eth_hdr_size;
5513859Sml29623 			mblk_len -= eth_hdr_size;
5523859Sml29623 			iph_len = ((*ip_buf) & 0x0f);
5533859Sml29623 			if (mblk_len > (iph_len + sizeof (uint32_t))) {
5543859Sml29623 				ip_buf = nmp->b_rptr;
5553859Sml29623 				ip_buf += eth_hdr_size;
5563859Sml29623 			} else {
5573859Sml29623 				ip_buf = NULL;
5583859Sml29623 			}
5593859Sml29623 
5603859Sml29623 		}
5613859Sml29623 		if (ip_buf == NULL) {
5623859Sml29623 			hdrs_size = 0;
5633859Sml29623 			((p_ether_header_t)hdrs_buf)->ether_type = 0;
5643859Sml29623 			while ((nmp) && (hdrs_size <
5656929Smisaki 			    sizeof (hdrs_buf))) {
5663859Sml29623 				mblk_len = (size_t)nmp->b_wptr -
5676929Smisaki 				    (size_t)nmp->b_rptr;
5683859Sml29623 				if (mblk_len >=
5696929Smisaki 				    (sizeof (hdrs_buf) - hdrs_size))
5703859Sml29623 					mblk_len = sizeof (hdrs_buf) -
5716929Smisaki 					    hdrs_size;
5723859Sml29623 				bcopy(nmp->b_rptr,
5736929Smisaki 				    &hdrs_buf[hdrs_size], mblk_len);
5743859Sml29623 				hdrs_size += mblk_len;
5753859Sml29623 				nmp = nmp->b_cont;
5763859Sml29623 			}
5773859Sml29623 			ip_buf = hdrs_buf;
5783859Sml29623 			ip_buf += eth_hdr_size;
5793859Sml29623 			iph_len = ((*ip_buf) & 0x0f);
5803859Sml29623 		}
5813859Sml29623 
5823859Sml29623 		ipproto = ip_buf[9];
5833859Sml29623 
5843859Sml29623 		tmp = (uint64_t)iph_len;
5853859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT);
5863859Sml29623 		tmp = (uint64_t)(eth_hdr_size >> 1);
5873859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
5883859Sml29623 
5893859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 "
5906929Smisaki 		    " iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
5916929Smisaki 		    "tmp 0x%x",
5926929Smisaki 		    iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
5936929Smisaki 		    ipproto, tmp));
5943859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP "
5956929Smisaki 		    "value 0x%llx", hdrp->value));
5963859Sml29623 
5973859Sml29623 		break;
5983859Sml29623 
5993859Sml29623 	case ETHERTYPE_IPV6:
6003859Sml29623 		hdrs_size = 0;
6013859Sml29623 		((p_ether_header_t)hdrs_buf)->ether_type = 0;
6023859Sml29623 		while ((nmp) && (hdrs_size <
6036929Smisaki 		    sizeof (hdrs_buf))) {
6043859Sml29623 			mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
6053859Sml29623 			if (mblk_len >=
6066929Smisaki 			    (sizeof (hdrs_buf) - hdrs_size))
6073859Sml29623 				mblk_len = sizeof (hdrs_buf) -
6086929Smisaki 				    hdrs_size;
6093859Sml29623 			bcopy(nmp->b_rptr,
6106929Smisaki 			    &hdrs_buf[hdrs_size], mblk_len);
6113859Sml29623 			hdrs_size += mblk_len;
6123859Sml29623 			nmp = nmp->b_cont;
6133859Sml29623 		}
6143859Sml29623 		ip_buf = hdrs_buf;
6153859Sml29623 		ip_buf += eth_hdr_size;
6163859Sml29623 
6173859Sml29623 		tmp = 1ull;
6183859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT);
6193859Sml29623 
6203859Sml29623 		tmp = (eth_hdr_size >> 1);
6213859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
6223859Sml29623 
6233859Sml29623 		/* byte 6 is the next header protocol */
6243859Sml29623 		ipproto = ip_buf[6];
6253859Sml29623 
6263859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 "
6276929Smisaki 		    " iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
6286929Smisaki 		    iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
6296929Smisaki 		    ipproto));
6303859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 "
6316929Smisaki 		    "value 0x%llx", hdrp->value));
6323859Sml29623 
6333859Sml29623 		break;
6343859Sml29623 
6353859Sml29623 	default:
6363859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP"));
6373859Sml29623 		goto fill_tx_header_done;
6383859Sml29623 	}
6393859Sml29623 
6403859Sml29623 	switch (ipproto) {
6413859Sml29623 	case IPPROTO_TCP:
6423859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL,
6436611Sml29623 		    "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
6443859Sml29623 		if (l4_cksum) {
6456611Sml29623 			hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP;
6466611Sml29623 			hdrp->value |=
6476611Sml29623 			    (((uint64_t)(start_offset >> 1)) <<
6486611Sml29623 			    TX_PKT_HEADER_L4START_SHIFT);
6496611Sml29623 			hdrp->value |=
6506611Sml29623 			    (((uint64_t)(stuff_offset >> 1)) <<
6516611Sml29623 			    TX_PKT_HEADER_L4STUFF_SHIFT);
6526611Sml29623 
6533859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
6546611Sml29623 			    "==> nxge_tx_pkt_hdr_init: TCP CKSUM "
6556611Sml29623 			    "value 0x%llx", hdrp->value));
6563859Sml29623 		}
6573859Sml29623 
6583859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP "
6596611Sml29623 		    "value 0x%llx", hdrp->value));
6603859Sml29623 		break;
6613859Sml29623 
6623859Sml29623 	case IPPROTO_UDP:
6633859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP"));
6643859Sml29623 		if (l4_cksum) {
6656611Sml29623 			if (!nxge_cksum_offload) {
6666611Sml29623 				uint16_t	*up;
6676611Sml29623 				uint16_t	cksum;
6686611Sml29623 				t_uscalar_t	stuff_len;
6696611Sml29623 
6706611Sml29623 				/*
6716611Sml29623 				 * The checksum field has the
6726611Sml29623 				 * partial checksum.
6736611Sml29623 				 * IP_CSUM() macro calls ip_cksum() which
6746611Sml29623 				 * can add in the partial checksum.
6756611Sml29623 				 */
6766611Sml29623 				cksum = IP_CSUM(mp, start_offset, 0);
6776611Sml29623 				stuff_len = stuff_offset;
6786611Sml29623 				nmp = mp;
6796611Sml29623 				mblk_len = MBLKL(nmp);
6806611Sml29623 				while ((nmp != NULL) &&
6816611Sml29623 				    (mblk_len < stuff_len)) {
6826611Sml29623 					stuff_len -= mblk_len;
6836611Sml29623 					nmp = nmp->b_cont;
6846611Sml29623 				}
6856611Sml29623 				ASSERT(nmp);
6866611Sml29623 				up = (uint16_t *)(nmp->b_rptr + stuff_len);
6876611Sml29623 
6886611Sml29623 				*up = cksum;
6896611Sml29623 				hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP;
6906611Sml29623 				NXGE_DEBUG_MSG((NULL, TX_CTL,
6916611Sml29623 				    "==> nxge_tx_pkt_hdr_init: UDP offset %d "
6926611Sml29623 				    "use sw cksum "
6936611Sml29623 				    "write to $%p cksum 0x%x content up 0x%x",
6946611Sml29623 				    stuff_len,
6956611Sml29623 				    up,
6966611Sml29623 				    cksum,
6976611Sml29623 				    *up));
6986611Sml29623 			} else {
6996611Sml29623 				/* Hardware will compute the full checksum */
7006611Sml29623 				hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP;
7016611Sml29623 				hdrp->value |=
7026611Sml29623 				    (((uint64_t)(start_offset >> 1)) <<
7036611Sml29623 				    TX_PKT_HEADER_L4START_SHIFT);
7046611Sml29623 				hdrp->value |=
7056611Sml29623 				    (((uint64_t)(stuff_offset >> 1)) <<
7066611Sml29623 				    TX_PKT_HEADER_L4STUFF_SHIFT);
7076611Sml29623 
7086611Sml29623 				NXGE_DEBUG_MSG((NULL, TX_CTL,
7096611Sml29623 				    "==> nxge_tx_pkt_hdr_init: UDP offset %d "
7106611Sml29623 				    " use partial checksum "
7116611Sml29623 				    "cksum 0x%x ",
7126611Sml29623 				    "value 0x%llx",
7136611Sml29623 				    stuff_offset,
7146611Sml29623 				    IP_CSUM(mp, start_offset, 0),
7156611Sml29623 				    hdrp->value));
7166611Sml29623 			}
7173859Sml29623 		}
7186611Sml29623 
7193859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL,
7206929Smisaki 		    "==> nxge_tx_pkt_hdr_init: UDP"
7216929Smisaki 		    "value 0x%llx", hdrp->value));
7223859Sml29623 		break;
7233859Sml29623 
7243859Sml29623 	default:
7253859Sml29623 		goto fill_tx_header_done;
7263859Sml29623 	}
7273859Sml29623 
7283859Sml29623 fill_tx_header_done:
7293859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL,
7306929Smisaki 	    "==> nxge_fill_tx_hdr: pkt_len %d  "
7316929Smisaki 	    "npads %d value 0x%llx", pkt_len, npads, hdrp->value));
7323859Sml29623 
7333859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr"));
7343859Sml29623 }
7353859Sml29623 
7363859Sml29623 /*ARGSUSED*/
7373859Sml29623 p_mblk_t
7383859Sml29623 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads)
7393859Sml29623 {
7403859Sml29623 	p_mblk_t 		newmp = NULL;
7413859Sml29623 
7423859Sml29623 	if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) {
7433859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL,
7446929Smisaki 		    "<== nxge_tx_pkt_header_reserve: allocb failed"));
7453859Sml29623 		return (NULL);
7463859Sml29623 	}
7473859Sml29623 
7483859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL,
7496929Smisaki 	    "==> nxge_tx_pkt_header_reserve: get new mp"));
7503859Sml29623 	DB_TYPE(newmp) = M_DATA;
7513859Sml29623 	newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp);
7523859Sml29623 	linkb(newmp, mp);
7533859Sml29623 	newmp->b_rptr -= TX_PKT_HEADER_SIZE;
7543859Sml29623 
7553859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: "
7566929Smisaki 	    "b_rptr $%p b_wptr $%p",
7576929Smisaki 	    newmp->b_rptr, newmp->b_wptr));
7583859Sml29623 
7593859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL,
7606929Smisaki 	    "<== nxge_tx_pkt_header_reserve: use new mp"));
7613859Sml29623 
7623859Sml29623 	return (newmp);
7633859Sml29623 }
7643859Sml29623 
7653859Sml29623 int
7663859Sml29623 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p)
7673859Sml29623 {
7683859Sml29623 	uint_t 			nmblks;
7693859Sml29623 	ssize_t			len;
7703859Sml29623 	uint_t 			pkt_len;
7713859Sml29623 	p_mblk_t 		nmp, bmp, tmp;
7723859Sml29623 	uint8_t 		*b_wptr;
7733859Sml29623 
7743859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL,
7756929Smisaki 	    "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p "
7766929Smisaki 	    "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp)));
7773859Sml29623 
7783859Sml29623 	nmp = mp;
7793859Sml29623 	bmp = mp;
7803859Sml29623 	nmblks = 0;
7813859Sml29623 	pkt_len = 0;
7823859Sml29623 	*tot_xfer_len_p = 0;
7833859Sml29623 
7843859Sml29623 	while (nmp) {
7853859Sml29623 		len = MBLKL(nmp);
7863859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
7876929Smisaki 		    "len %d pkt_len %d nmblks %d tot_xfer_len %d",
7886929Smisaki 		    len, pkt_len, nmblks,
7896929Smisaki 		    *tot_xfer_len_p));
7903859Sml29623 
7913859Sml29623 		if (len <= 0) {
7923859Sml29623 			bmp = nmp;
7933859Sml29623 			nmp = nmp->b_cont;
7943859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
7956929Smisaki 			    "==> nxge_tx_pkt_nmblocks: "
7966929Smisaki 			    "len (0) pkt_len %d nmblks %d",
7976929Smisaki 			    pkt_len, nmblks));
7983859Sml29623 			continue;
7993859Sml29623 		}
8003859Sml29623 
8013859Sml29623 		*tot_xfer_len_p += len;
8023859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
8036929Smisaki 		    "len %d pkt_len %d nmblks %d tot_xfer_len %d",
8046929Smisaki 		    len, pkt_len, nmblks,
8056929Smisaki 		    *tot_xfer_len_p));
8063859Sml29623 
8073859Sml29623 		if (len < nxge_bcopy_thresh) {
8083859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
8096929Smisaki 			    "==> nxge_tx_pkt_nmblocks: "
8106929Smisaki 			    "len %d (< thresh) pkt_len %d nmblks %d",
8116929Smisaki 			    len, pkt_len, nmblks));
8123859Sml29623 			if (pkt_len == 0)
8133859Sml29623 				nmblks++;
8143859Sml29623 			pkt_len += len;
8153859Sml29623 			if (pkt_len >= nxge_bcopy_thresh) {
8163859Sml29623 				pkt_len = 0;
8173859Sml29623 				len = 0;
8183859Sml29623 				nmp = bmp;
8193859Sml29623 			}
8203859Sml29623 		} else {
8213859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
8226929Smisaki 			    "==> nxge_tx_pkt_nmblocks: "
8236929Smisaki 			    "len %d (> thresh) pkt_len %d nmblks %d",
8246929Smisaki 			    len, pkt_len, nmblks));
8253859Sml29623 			pkt_len = 0;
8263859Sml29623 			nmblks++;
8273859Sml29623 			/*
8283859Sml29623 			 * Hardware limits the transfer length to 4K.
8293859Sml29623 			 * If len is more than 4K, we need to break
8303859Sml29623 			 * it up to at most 2 more blocks.
8313859Sml29623 			 */
8323859Sml29623 			if (len > TX_MAX_TRANSFER_LENGTH) {
8333859Sml29623 				uint32_t	nsegs;
8343859Sml29623 
8356495Sspeer 				nsegs = 1;
8363859Sml29623 				NXGE_DEBUG_MSG((NULL, TX_CTL,
8376929Smisaki 				    "==> nxge_tx_pkt_nmblocks: "
8386929Smisaki 				    "len %d pkt_len %d nmblks %d nsegs %d",
8396929Smisaki 				    len, pkt_len, nmblks, nsegs));
8403859Sml29623 				if (len % (TX_MAX_TRANSFER_LENGTH * 2)) {
8413859Sml29623 					++nsegs;
8423859Sml29623 				}
8433859Sml29623 				do {
8443859Sml29623 					b_wptr = nmp->b_rptr +
8456929Smisaki 					    TX_MAX_TRANSFER_LENGTH;
8463859Sml29623 					nmp->b_wptr = b_wptr;
8473859Sml29623 					if ((tmp = dupb(nmp)) == NULL) {
8483859Sml29623 						return (0);
8493859Sml29623 					}
8503859Sml29623 					tmp->b_rptr = b_wptr;
8513859Sml29623 					tmp->b_wptr = nmp->b_wptr;
8523859Sml29623 					tmp->b_cont = nmp->b_cont;
8533859Sml29623 					nmp->b_cont = tmp;
8543859Sml29623 					nmblks++;
8553859Sml29623 					if (--nsegs) {
8563859Sml29623 						nmp = tmp;
8573859Sml29623 					}
8583859Sml29623 				} while (nsegs);
8593859Sml29623 				nmp = tmp;
8603859Sml29623 			}
8613859Sml29623 		}
8623859Sml29623 
8633859Sml29623 		/*
8643859Sml29623 		 * Hardware limits the transmit gather pointers to 15.
8653859Sml29623 		 */
8663859Sml29623 		if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) >
8676929Smisaki 		    TX_MAX_GATHER_POINTERS) {
8683859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
8696929Smisaki 			    "==> nxge_tx_pkt_nmblocks: pull msg - "
8706929Smisaki 			    "len %d pkt_len %d nmblks %d",
8716929Smisaki 			    len, pkt_len, nmblks));
8723859Sml29623 			/* Pull all message blocks from b_cont */
8733859Sml29623 			if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) {
8743859Sml29623 				return (0);
8753859Sml29623 			}
8763859Sml29623 			freemsg(nmp->b_cont);
8773859Sml29623 			nmp->b_cont = tmp;
8783859Sml29623 			pkt_len = 0;
8793859Sml29623 		}
8803859Sml29623 		bmp = nmp;
8813859Sml29623 		nmp = nmp->b_cont;
8823859Sml29623 	}
8833859Sml29623 
8843859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL,
8856929Smisaki 	    "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
8866929Smisaki 	    "nmblks %d len %d tot_xfer_len %d",
8876929Smisaki 	    mp->b_rptr, mp->b_wptr, nmblks,
8886929Smisaki 	    MBLKL(mp), *tot_xfer_len_p));
8893859Sml29623 
8903859Sml29623 	return (nmblks);
8913859Sml29623 }
8923859Sml29623 
8937906SMichael.Speer@Sun.COM static void
8947906SMichael.Speer@Sun.COM nxge_txdma_freemsg_list_add(p_tx_ring_t tx_ring_p, p_tx_msg_t msgp)
8957906SMichael.Speer@Sun.COM {
8967906SMichael.Speer@Sun.COM 	MUTEX_ENTER(&tx_ring_p->freelock);
8977906SMichael.Speer@Sun.COM 	if (tx_ring_p->tx_free_list_p != NULL)
8987906SMichael.Speer@Sun.COM 		msgp->nextp = tx_ring_p->tx_free_list_p;
8997906SMichael.Speer@Sun.COM 	tx_ring_p->tx_free_list_p = msgp;
9007906SMichael.Speer@Sun.COM 	MUTEX_EXIT(&tx_ring_p->freelock);
9017906SMichael.Speer@Sun.COM }
9027906SMichael.Speer@Sun.COM 
9037906SMichael.Speer@Sun.COM /*
9047906SMichael.Speer@Sun.COM  * void
9057906SMichael.Speer@Sun.COM  * nxge_txdma_freemsg_task() -- walk the list of messages to be
9067906SMichael.Speer@Sun.COM  *	freed and free the messages.
9077906SMichael.Speer@Sun.COM  */
9087906SMichael.Speer@Sun.COM void
9097906SMichael.Speer@Sun.COM nxge_txdma_freemsg_task(p_tx_ring_t tx_ring_p)
9107906SMichael.Speer@Sun.COM {
9117906SMichael.Speer@Sun.COM 	p_tx_msg_t	msgp, nextp;
9127906SMichael.Speer@Sun.COM 
9137906SMichael.Speer@Sun.COM 	if (tx_ring_p->tx_free_list_p != NULL) {
9147906SMichael.Speer@Sun.COM 		MUTEX_ENTER(&tx_ring_p->freelock);
9157906SMichael.Speer@Sun.COM 		msgp = tx_ring_p->tx_free_list_p;
9167906SMichael.Speer@Sun.COM 		tx_ring_p->tx_free_list_p = (p_tx_msg_t)NULL;
9177906SMichael.Speer@Sun.COM 		MUTEX_EXIT(&tx_ring_p->freelock);
9187906SMichael.Speer@Sun.COM 
9197906SMichael.Speer@Sun.COM 		while (msgp != NULL) {
9207906SMichael.Speer@Sun.COM 			nextp = msgp->nextp;
9217906SMichael.Speer@Sun.COM 			if (msgp->tx_message != NULL) {
9227906SMichael.Speer@Sun.COM 				freemsg(msgp->tx_message);
9237906SMichael.Speer@Sun.COM 				msgp->tx_message = NULL;
9247906SMichael.Speer@Sun.COM 			}
9257906SMichael.Speer@Sun.COM 			msgp->nextp = NULL;
9267906SMichael.Speer@Sun.COM 			msgp = nextp;
9277906SMichael.Speer@Sun.COM 		}
9287906SMichael.Speer@Sun.COM 	}
9297906SMichael.Speer@Sun.COM }
9307906SMichael.Speer@Sun.COM 
9313859Sml29623 boolean_t
9323859Sml29623 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks)
9333859Sml29623 {
9343859Sml29623 	boolean_t 		status = B_TRUE;
9353859Sml29623 	p_nxge_dma_common_t	tx_desc_dma_p;
9363859Sml29623 	nxge_dma_common_t	desc_area;
9373859Sml29623 	p_tx_desc_t 		tx_desc_ring_vp;
9383859Sml29623 	p_tx_desc_t 		tx_desc_p;
9393859Sml29623 	p_tx_desc_t 		tx_desc_pp;
9403859Sml29623 	tx_desc_t 		r_tx_desc;
9413859Sml29623 	p_tx_msg_t 		tx_msg_ring;
9423859Sml29623 	p_tx_msg_t 		tx_msg_p;
9433859Sml29623 	npi_handle_t		handle;
9443859Sml29623 	tx_ring_hdl_t		tx_head;
9453859Sml29623 	uint32_t 		pkt_len;
9463859Sml29623 	uint_t			tx_rd_index;
9473859Sml29623 	uint16_t		head_index, tail_index;
9483859Sml29623 	uint8_t			tdc;
9493859Sml29623 	boolean_t		head_wrap, tail_wrap;
9507906SMichael.Speer@Sun.COM 	p_nxge_tx_ring_stats_t	tdc_stats;
9513859Sml29623 	int			rc;
9523859Sml29623 
9533859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim"));
9543859Sml29623 
9553859Sml29623 	status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) &&
9566929Smisaki 	    (nmblks != 0));
9573859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
9586929Smisaki 	    "==> nxge_txdma_reclaim: pending %d  reclaim %d nmblks %d",
9596929Smisaki 	    tx_ring_p->descs_pending, nxge_reclaim_pending,
9606929Smisaki 	    nmblks));
9613859Sml29623 	if (!status) {
9623859Sml29623 		tx_desc_dma_p = &tx_ring_p->tdc_desc;
9633859Sml29623 		desc_area = tx_ring_p->tdc_desc;
9643859Sml29623 		handle = NXGE_DEV_NPI_HANDLE(nxgep);
9653859Sml29623 		tx_desc_ring_vp = tx_desc_dma_p->kaddrp;
9663859Sml29623 		tx_desc_ring_vp =
9676929Smisaki 		    (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
9683859Sml29623 		tx_rd_index = tx_ring_p->rd_index;
9693859Sml29623 		tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
9703859Sml29623 		tx_msg_ring = tx_ring_p->tx_msg_ring;
9713859Sml29623 		tx_msg_p = &tx_msg_ring[tx_rd_index];
9723859Sml29623 		tdc = tx_ring_p->tdc;
9733859Sml29623 		tdc_stats = tx_ring_p->tdc_stats;
9743859Sml29623 		if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) {
9753859Sml29623 			tdc_stats->tx_max_pend = tx_ring_p->descs_pending;
9763859Sml29623 		}
9773859Sml29623 
9783859Sml29623 		tail_index = tx_ring_p->wr_index;
9793859Sml29623 		tail_wrap = tx_ring_p->wr_index_wrap;
9803859Sml29623 
9813859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
9826929Smisaki 		    "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d "
9836929Smisaki 		    "tail_index %d tail_wrap %d "
9846929Smisaki 		    "tx_desc_p $%p ($%p) ",
9856929Smisaki 		    tdc, tx_rd_index, tail_index, tail_wrap,
9866929Smisaki 		    tx_desc_p, (*(uint64_t *)tx_desc_p)));
9873859Sml29623 		/*
9883859Sml29623 		 * Read the hardware maintained transmit head
9893859Sml29623 		 * and wrap around bit.
9903859Sml29623 		 */
9913859Sml29623 		TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value);
9923859Sml29623 		head_index =  tx_head.bits.ldw.head;
9933859Sml29623 		head_wrap = tx_head.bits.ldw.wrap;
9943859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
9956929Smisaki 		    "==> nxge_txdma_reclaim: "
9966929Smisaki 		    "tx_rd_index %d tail %d tail_wrap %d "
9976929Smisaki 		    "head %d wrap %d",
9986929Smisaki 		    tx_rd_index, tail_index, tail_wrap,
9996929Smisaki 		    head_index, head_wrap));
10003859Sml29623 
10013859Sml29623 		if (head_index == tail_index) {
10023859Sml29623 			if (TXDMA_RING_EMPTY(head_index, head_wrap,
10036929Smisaki 			    tail_index, tail_wrap) &&
10046929Smisaki 			    (head_index == tx_rd_index)) {
10053859Sml29623 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
10066929Smisaki 				    "==> nxge_txdma_reclaim: EMPTY"));
10073859Sml29623 				return (B_TRUE);
10083859Sml29623 			}
10093859Sml29623 
10103859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
10116929Smisaki 			    "==> nxge_txdma_reclaim: Checking "
10126929Smisaki 			    "if ring full"));
10133859Sml29623 			if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
10146929Smisaki 			    tail_wrap)) {
10153859Sml29623 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
10166929Smisaki 				    "==> nxge_txdma_reclaim: full"));
10173859Sml29623 				return (B_FALSE);
10183859Sml29623 			}
10193859Sml29623 		}
10203859Sml29623 
10213859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
10226929Smisaki 		    "==> nxge_txdma_reclaim: tx_rd_index and head_index"));
10233859Sml29623 
10243859Sml29623 		tx_desc_pp = &r_tx_desc;
10253859Sml29623 		while ((tx_rd_index != head_index) &&
10266929Smisaki 		    (tx_ring_p->descs_pending != 0)) {
10273859Sml29623 
10283859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
10296929Smisaki 			    "==> nxge_txdma_reclaim: Checking if pending"));
10303859Sml29623 
10313859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
10326929Smisaki 			    "==> nxge_txdma_reclaim: "
10336929Smisaki 			    "descs_pending %d ",
10346929Smisaki 			    tx_ring_p->descs_pending));
10353859Sml29623 
10363859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
10376929Smisaki 			    "==> nxge_txdma_reclaim: "
10386929Smisaki 			    "(tx_rd_index %d head_index %d "
10396929Smisaki 			    "(tx_desc_p $%p)",
10406929Smisaki 			    tx_rd_index, head_index,
10416929Smisaki 			    tx_desc_p));
10423859Sml29623 
10433859Sml29623 			tx_desc_pp->value = tx_desc_p->value;
10443859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
10456929Smisaki 			    "==> nxge_txdma_reclaim: "
10466929Smisaki 			    "(tx_rd_index %d head_index %d "
10476929Smisaki 			    "tx_desc_p $%p (desc value 0x%llx) ",
10486929Smisaki 			    tx_rd_index, head_index,
10496929Smisaki 			    tx_desc_pp, (*(uint64_t *)tx_desc_pp)));
10503859Sml29623 
10513859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
10526929Smisaki 			    "==> nxge_txdma_reclaim: dump desc:"));
10533859Sml29623 
10543859Sml29623 			pkt_len = tx_desc_pp->bits.hdw.tr_len;
10553859Sml29623 			tdc_stats->obytes += pkt_len;
10563859Sml29623 			tdc_stats->opackets += tx_desc_pp->bits.hdw.sop;
10573859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
10586929Smisaki 			    "==> nxge_txdma_reclaim: pkt_len %d "
10596929Smisaki 			    "tdc channel %d opackets %d",
10606929Smisaki 			    pkt_len,
10616929Smisaki 			    tdc,
10626929Smisaki 			    tdc_stats->opackets));
10633859Sml29623 
10643859Sml29623 			if (tx_msg_p->flags.dma_type == USE_DVMA) {
10653859Sml29623 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
10666929Smisaki 				    "tx_desc_p = $%p "
10676929Smisaki 				    "tx_desc_pp = $%p "
10686929Smisaki 				    "index = %d",
10696929Smisaki 				    tx_desc_p,
10706929Smisaki 				    tx_desc_pp,
10716929Smisaki 				    tx_ring_p->rd_index));
10723859Sml29623 				(void) dvma_unload(tx_msg_p->dvma_handle,
10736929Smisaki 				    0, -1);
10743859Sml29623 				tx_msg_p->dvma_handle = NULL;
10753859Sml29623 				if (tx_ring_p->dvma_wr_index ==
10766929Smisaki 				    tx_ring_p->dvma_wrap_mask) {
10773859Sml29623 					tx_ring_p->dvma_wr_index = 0;
10783859Sml29623 				} else {
10793859Sml29623 					tx_ring_p->dvma_wr_index++;
10803859Sml29623 				}
10813859Sml29623 				tx_ring_p->dvma_pending--;
10823859Sml29623 			} else if (tx_msg_p->flags.dma_type ==
10836929Smisaki 			    USE_DMA) {
10843859Sml29623 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
10856929Smisaki 				    "==> nxge_txdma_reclaim: "
10866929Smisaki 				    "USE DMA"));
10873859Sml29623 				if (rc = ddi_dma_unbind_handle
10886929Smisaki 				    (tx_msg_p->dma_handle)) {
10893859Sml29623 					cmn_err(CE_WARN, "!nxge_reclaim: "
10906929Smisaki 					    "ddi_dma_unbind_handle "
10916929Smisaki 					    "failed. status %d", rc);
10923859Sml29623 				}
10933859Sml29623 			}
10943859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
10956929Smisaki 			    "==> nxge_txdma_reclaim: count packets"));
10967906SMichael.Speer@Sun.COM 
10973859Sml29623 			/*
10983859Sml29623 			 * count a chained packet only once.
10993859Sml29623 			 */
11003859Sml29623 			if (tx_msg_p->tx_message != NULL) {
11017906SMichael.Speer@Sun.COM 				nxge_txdma_freemsg_list_add(tx_ring_p,
11027906SMichael.Speer@Sun.COM 				    tx_msg_p);
11033859Sml29623 			}
11043859Sml29623 
11053859Sml29623 			tx_msg_p->flags.dma_type = USE_NONE;
11063859Sml29623 			tx_rd_index = tx_ring_p->rd_index;
11073859Sml29623 			tx_rd_index = (tx_rd_index + 1) &
11086929Smisaki 			    tx_ring_p->tx_wrap_mask;
11093859Sml29623 			tx_ring_p->rd_index = tx_rd_index;
11103859Sml29623 			tx_ring_p->descs_pending--;
11113859Sml29623 			tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
11123859Sml29623 			tx_msg_p = &tx_msg_ring[tx_rd_index];
11133859Sml29623 		}
11143859Sml29623 
11153859Sml29623 		status = (nmblks <= (tx_ring_p->tx_ring_size -
11166929Smisaki 		    tx_ring_p->descs_pending -
11176929Smisaki 		    TX_FULL_MARK));
11183859Sml29623 		if (status) {
11193859Sml29623 			cas32((uint32_t *)&tx_ring_p->queueing, 1, 0);
11203859Sml29623 		}
11213859Sml29623 	} else {
11223859Sml29623 		status = (nmblks <=
11236929Smisaki 		    (tx_ring_p->tx_ring_size -
11246929Smisaki 		    tx_ring_p->descs_pending -
11256929Smisaki 		    TX_FULL_MARK));
11263859Sml29623 	}
11273859Sml29623 
11283859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
11296929Smisaki 	    "<== nxge_txdma_reclaim status = 0x%08x", status));
11303859Sml29623 
11313859Sml29623 	return (status);
11323859Sml29623 }
11333859Sml29623 
11346495Sspeer /*
11356495Sspeer  * nxge_tx_intr
11366495Sspeer  *
11376495Sspeer  *	Process a TDC interrupt
11386495Sspeer  *
11396495Sspeer  * Arguments:
11406495Sspeer  * 	arg1	A Logical Device state Vector (LSV) data structure.
11416495Sspeer  * 	arg2	nxge_t *
11426495Sspeer  *
11436495Sspeer  * Notes:
11446495Sspeer  *
11456495Sspeer  * NPI/NXGE function calls:
11466495Sspeer  *	npi_txdma_control_status()
11476495Sspeer  *	npi_intr_ldg_mgmt_set()
11486495Sspeer  *
11496495Sspeer  *	nxge_tx_err_evnts()
11506495Sspeer  *	nxge_txdma_reclaim()
11516495Sspeer  *
11526495Sspeer  * Registers accessed:
11536495Sspeer  *	TX_CS		DMC+0x40028 Transmit Control And Status
11546495Sspeer  *	PIO_LDSV
11556495Sspeer  *
11566495Sspeer  * Context:
11576495Sspeer  *	Any domain
11586495Sspeer  */
11593859Sml29623 uint_t
11603859Sml29623 nxge_tx_intr(void *arg1, void *arg2)
11613859Sml29623 {
11623859Sml29623 	p_nxge_ldv_t		ldvp = (p_nxge_ldv_t)arg1;
11633859Sml29623 	p_nxge_t		nxgep = (p_nxge_t)arg2;
11643859Sml29623 	p_nxge_ldg_t		ldgp;
11653859Sml29623 	uint8_t			channel;
11663859Sml29623 	uint32_t		vindex;
11673859Sml29623 	npi_handle_t		handle;
11683859Sml29623 	tx_cs_t			cs;
11693859Sml29623 	p_tx_ring_t 		*tx_rings;
11703859Sml29623 	p_tx_ring_t 		tx_ring_p;
11713859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
11723859Sml29623 	uint_t 			serviced = DDI_INTR_UNCLAIMED;
11733859Sml29623 	nxge_status_t 		status = NXGE_OK;
11743859Sml29623 
11753859Sml29623 	if (ldvp == NULL) {
11763859Sml29623 		NXGE_DEBUG_MSG((NULL, INT_CTL,
11776929Smisaki 		    "<== nxge_tx_intr: nxgep $%p ldvp $%p",
11786929Smisaki 		    nxgep, ldvp));
11793859Sml29623 		return (DDI_INTR_UNCLAIMED);
11803859Sml29623 	}
11813859Sml29623 
11823859Sml29623 	if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
11833859Sml29623 		nxgep = ldvp->nxgep;
11843859Sml29623 	}
11853859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
11866929Smisaki 	    "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p",
11876929Smisaki 	    nxgep, ldvp));
11886713Sspeer 
11896713Sspeer 	if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
11906713Sspeer 	    (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
11916713Sspeer 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
11926713Sspeer 		    "<== nxge_tx_intr: interface not started or intialized"));
11936713Sspeer 		return (DDI_INTR_CLAIMED);
11946713Sspeer 	}
11956713Sspeer 
11963859Sml29623 	/*
11973859Sml29623 	 * This interrupt handler is for a specific
11983859Sml29623 	 * transmit dma channel.
11993859Sml29623 	 */
12003859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
12013859Sml29623 	/* Get the control and status for this channel. */
12023859Sml29623 	channel = ldvp->channel;
12033859Sml29623 	ldgp = ldvp->ldgp;
12043859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
12056929Smisaki 	    "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p "
12066929Smisaki 	    "channel %d",
12076929Smisaki 	    nxgep, ldvp, channel));
12083859Sml29623 
12093859Sml29623 	rs = npi_txdma_control_status(handle, OP_GET, channel, &cs);
12103859Sml29623 	vindex = ldvp->vdma_index;
12113859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
12126929Smisaki 	    "==> nxge_tx_intr:channel %d ring index %d status 0x%08x",
12136929Smisaki 	    channel, vindex, rs));
12143859Sml29623 	if (!rs && cs.bits.ldw.mk) {
12153859Sml29623 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
12166929Smisaki 		    "==> nxge_tx_intr:channel %d ring index %d "
12176929Smisaki 		    "status 0x%08x (mk bit set)",
12186929Smisaki 		    channel, vindex, rs));
12193859Sml29623 		tx_rings = nxgep->tx_rings->rings;
12203859Sml29623 		tx_ring_p = tx_rings[vindex];
12213859Sml29623 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
12226929Smisaki 		    "==> nxge_tx_intr:channel %d ring index %d "
12236929Smisaki 		    "status 0x%08x (mk bit set, calling reclaim)",
12246929Smisaki 		    channel, vindex, rs));
12253859Sml29623 
12263859Sml29623 		MUTEX_ENTER(&tx_ring_p->lock);
12273859Sml29623 		(void) nxge_txdma_reclaim(nxgep, tx_rings[vindex], 0);
12283859Sml29623 		MUTEX_EXIT(&tx_ring_p->lock);
12297906SMichael.Speer@Sun.COM 
12307906SMichael.Speer@Sun.COM 		nxge_txdma_freemsg_task(tx_ring_p);
12317906SMichael.Speer@Sun.COM 
12323859Sml29623 		mac_tx_update(nxgep->mach);
12333859Sml29623 	}
12343859Sml29623 
12353859Sml29623 	/*
12363859Sml29623 	 * Process other transmit control and status.
12373859Sml29623 	 * Check the ldv state.
12383859Sml29623 	 */
12393859Sml29623 	status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs);
12403859Sml29623 	/*
12413859Sml29623 	 * Rearm this logical group if this is a single device
12423859Sml29623 	 * group.
12433859Sml29623 	 */
12443859Sml29623 	if (ldgp->nldvs == 1) {
12453859Sml29623 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
12466929Smisaki 		    "==> nxge_tx_intr: rearm"));
12473859Sml29623 		if (status == NXGE_OK) {
12486495Sspeer 			if (isLDOMguest(nxgep)) {
12496495Sspeer 				nxge_hio_ldgimgn(nxgep, ldgp);
12506495Sspeer 			} else {
12516495Sspeer 				(void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
12526495Sspeer 				    B_TRUE, ldgp->ldg_timer);
12536495Sspeer 			}
12543859Sml29623 		}
12553859Sml29623 	}
12563859Sml29623 
12573859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr"));
12583859Sml29623 	serviced = DDI_INTR_CLAIMED;
12593859Sml29623 	return (serviced);
12603859Sml29623 }
12613859Sml29623 
12623859Sml29623 void
12636495Sspeer nxge_txdma_stop(p_nxge_t nxgep)	/* Dead */
12643859Sml29623 {
12653859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop"));
12663859Sml29623 
12673859Sml29623 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
12683859Sml29623 
12693859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop"));
12703859Sml29623 }
12713859Sml29623 
12723859Sml29623 void
12736495Sspeer nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */
12743859Sml29623 {
12753859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start"));
12763859Sml29623 
12773859Sml29623 	(void) nxge_txdma_stop(nxgep);
12783859Sml29623 
12793859Sml29623 	(void) nxge_fixup_txdma_rings(nxgep);
12803859Sml29623 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
12813859Sml29623 	(void) nxge_tx_mac_enable(nxgep);
12823859Sml29623 	(void) nxge_txdma_hw_kick(nxgep);
12833859Sml29623 
12843859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start"));
12853859Sml29623 }
12863859Sml29623 
12876495Sspeer npi_status_t
12886495Sspeer nxge_txdma_channel_disable(
12896495Sspeer 	nxge_t *nxge,
12906495Sspeer 	int channel)
12916495Sspeer {
12926495Sspeer 	npi_handle_t	handle = NXGE_DEV_NPI_HANDLE(nxge);
12936495Sspeer 	npi_status_t	rs;
12946495Sspeer 	tdmc_intr_dbg_t	intr_dbg;
12956495Sspeer 
12966495Sspeer 	/*
12976495Sspeer 	 * Stop the dma channel and wait for the stop-done.
12986495Sspeer 	 * If the stop-done bit is not present, then force
12996495Sspeer 	 * an error so TXC will stop.
13006495Sspeer 	 * All channels bound to this port need to be stopped
13016495Sspeer 	 * and reset after injecting an interrupt error.
13026495Sspeer 	 */
13036495Sspeer 	rs = npi_txdma_channel_disable(handle, channel);
13046495Sspeer 	NXGE_DEBUG_MSG((nxge, MEM3_CTL,
13056929Smisaki 	    "==> nxge_txdma_channel_disable(%d) "
13066929Smisaki 	    "rs 0x%x", channel, rs));
13076495Sspeer 	if (rs != NPI_SUCCESS) {
13086495Sspeer 		/* Inject any error */
13096495Sspeer 		intr_dbg.value = 0;
13106495Sspeer 		intr_dbg.bits.ldw.nack_pref = 1;
13116495Sspeer 		NXGE_DEBUG_MSG((nxge, MEM3_CTL,
13126929Smisaki 		    "==> nxge_txdma_hw_mode: "
13136929Smisaki 		    "channel %d (stop failed 0x%x) "
13146929Smisaki 		    "(inject err)", rs, channel));
13156495Sspeer 		(void) npi_txdma_inj_int_error_set(
13166929Smisaki 		    handle, channel, &intr_dbg);
13176495Sspeer 		rs = npi_txdma_channel_disable(handle, channel);
13186495Sspeer 		NXGE_DEBUG_MSG((nxge, MEM3_CTL,
13196929Smisaki 		    "==> nxge_txdma_hw_mode: "
13206929Smisaki 		    "channel %d (stop again 0x%x) "
13216929Smisaki 		    "(after inject err)",
13226929Smisaki 		    rs, channel));
13236495Sspeer 	}
13246495Sspeer 
13256495Sspeer 	return (rs);
13266495Sspeer }
13276495Sspeer 
13286495Sspeer /*
13296495Sspeer  * nxge_txdma_hw_mode
13306495Sspeer  *
13316495Sspeer  *	Toggle all TDCs on (enable) or off (disable).
13326495Sspeer  *
13336495Sspeer  * Arguments:
13346495Sspeer  * 	nxgep
13356495Sspeer  * 	enable	Enable or disable a TDC.
13366495Sspeer  *
13376495Sspeer  * Notes:
13386495Sspeer  *
13396495Sspeer  * NPI/NXGE function calls:
13406495Sspeer  *	npi_txdma_channel_enable(TX_CS)
13416495Sspeer  *	npi_txdma_channel_disable(TX_CS)
13426495Sspeer  *	npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
13436495Sspeer  *
13446495Sspeer  * Registers accessed:
13456495Sspeer  *	TX_CS		DMC+0x40028 Transmit Control And Status
13466495Sspeer  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
13476495Sspeer  *
13486495Sspeer  * Context:
13496495Sspeer  *	Any domain
13506495Sspeer  */
13513859Sml29623 nxge_status_t
13523859Sml29623 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
13533859Sml29623 {
13546495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
13556495Sspeer 
13566495Sspeer 	npi_handle_t	handle;
13576495Sspeer 	nxge_status_t	status;
13586495Sspeer 	npi_status_t	rs;
13596495Sspeer 	int		tdc;
13603859Sml29623 
13613859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
13626929Smisaki 	    "==> nxge_txdma_hw_mode: enable mode %d", enable));
13633859Sml29623 
13643859Sml29623 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
13653859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
13666929Smisaki 		    "<== nxge_txdma_mode: not initialized"));
13673859Sml29623 		return (NXGE_ERROR);
13683859Sml29623 	}
13693859Sml29623 
13706495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
13713859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
13726495Sspeer 		    "<== nxge_txdma_hw_mode: NULL ring pointer(s)"));
13733859Sml29623 		return (NXGE_ERROR);
13743859Sml29623 	}
13753859Sml29623 
13766495Sspeer 	/* Enable or disable all of the TDCs owned by us. */
13773859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
13786495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
13796495Sspeer 		if ((1 << tdc) & set->owned.map) {
13806495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
13816495Sspeer 			if (ring) {
13826495Sspeer 				NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
13836495Sspeer 				    "==> nxge_txdma_hw_mode: channel %d", tdc));
13846495Sspeer 				if (enable) {
13856495Sspeer 					rs = npi_txdma_channel_enable
13866495Sspeer 					    (handle, tdc);
13873859Sml29623 					NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
13886495Sspeer 					    "==> nxge_txdma_hw_mode: "
13896495Sspeer 					    "channel %d (enable) rs 0x%x",
13906495Sspeer 					    tdc, rs));
13916495Sspeer 				} else {
13926495Sspeer 					rs = nxge_txdma_channel_disable
13936495Sspeer 					    (nxgep, tdc);
13943859Sml29623 				}
13953859Sml29623 			}
13963859Sml29623 		}
13973859Sml29623 	}
13983859Sml29623 
13993859Sml29623 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
14003859Sml29623 
14013859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
14026929Smisaki 	    "<== nxge_txdma_hw_mode: status 0x%x", status));
14033859Sml29623 
14043859Sml29623 	return (status);
14053859Sml29623 }
14063859Sml29623 
14073859Sml29623 void
14083859Sml29623 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
14093859Sml29623 {
14103859Sml29623 	npi_handle_t		handle;
14113859Sml29623 
14123859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
14136929Smisaki 	    "==> nxge_txdma_enable_channel: channel %d", channel));
14143859Sml29623 
14153859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
14163859Sml29623 	/* enable the transmit dma channels */
14173859Sml29623 	(void) npi_txdma_channel_enable(handle, channel);
14183859Sml29623 
14193859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel"));
14203859Sml29623 }
14213859Sml29623 
14223859Sml29623 void
14233859Sml29623 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
14243859Sml29623 {
14253859Sml29623 	npi_handle_t		handle;
14263859Sml29623 
14273859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
14286929Smisaki 	    "==> nxge_txdma_disable_channel: channel %d", channel));
14293859Sml29623 
14303859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
14313859Sml29623 	/* stop the transmit dma channels */
14323859Sml29623 	(void) npi_txdma_channel_disable(handle, channel);
14333859Sml29623 
14343859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel"));
14353859Sml29623 }
14363859Sml29623 
14376495Sspeer /*
14386495Sspeer  * nxge_txdma_stop_inj_err
14396495Sspeer  *
14406495Sspeer  *	Stop a TDC.  If at first we don't succeed, inject an error.
14416495Sspeer  *
14426495Sspeer  * Arguments:
14436495Sspeer  * 	nxgep
14446495Sspeer  * 	channel		The channel to stop.
14456495Sspeer  *
14466495Sspeer  * Notes:
14476495Sspeer  *
14486495Sspeer  * NPI/NXGE function calls:
14496495Sspeer  *	npi_txdma_channel_disable()
14506495Sspeer  *	npi_txdma_inj_int_error_set()
14516495Sspeer  * #if defined(NXGE_DEBUG)
14526495Sspeer  *	nxge_txdma_regs_dump_channels(nxgep);
14536495Sspeer  * #endif
14546495Sspeer  *
14556495Sspeer  * Registers accessed:
14566495Sspeer  *	TX_CS		DMC+0x40028 Transmit Control And Status
14576495Sspeer  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
14586495Sspeer  *
14596495Sspeer  * Context:
14606495Sspeer  *	Any domain
14616495Sspeer  */
14623859Sml29623 int
14633859Sml29623 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel)
14643859Sml29623 {
14653859Sml29623 	npi_handle_t		handle;
14663859Sml29623 	tdmc_intr_dbg_t		intr_dbg;
14673859Sml29623 	int			status;
14683859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
14693859Sml29623 
14703859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err"));
14713859Sml29623 	/*
14723859Sml29623 	 * Stop the dma channel waits for the stop done.
14733859Sml29623 	 * If the stop done bit is not set, then create
14743859Sml29623 	 * an error.
14753859Sml29623 	 */
14763859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
14773859Sml29623 	rs = npi_txdma_channel_disable(handle, channel);
14783859Sml29623 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
14793859Sml29623 	if (status == NXGE_OK) {
14803859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
14816929Smisaki 		    "<== nxge_txdma_stop_inj_err (channel %d): "
14826929Smisaki 		    "stopped OK", channel));
14833859Sml29623 		return (status);
14843859Sml29623 	}
14853859Sml29623 
14863859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
14876929Smisaki 	    "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) "
14886929Smisaki 	    "injecting error", channel, rs));
14893859Sml29623 	/* Inject any error */
14903859Sml29623 	intr_dbg.value = 0;
14913859Sml29623 	intr_dbg.bits.ldw.nack_pref = 1;
14923859Sml29623 	(void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
14933859Sml29623 
14943859Sml29623 	/* Stop done bit will be set as a result of error injection */
14953859Sml29623 	rs = npi_txdma_channel_disable(handle, channel);
14963859Sml29623 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
14973859Sml29623 	if (!(rs & NPI_TXDMA_STOP_FAILED)) {
14983859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
14996929Smisaki 		    "<== nxge_txdma_stop_inj_err (channel %d): "
15006929Smisaki 		    "stopped OK ", channel));
15013859Sml29623 		return (status);
15023859Sml29623 	}
15033859Sml29623 
15043859Sml29623 #if	defined(NXGE_DEBUG)
15053859Sml29623 	nxge_txdma_regs_dump_channels(nxgep);
15063859Sml29623 #endif
15073859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
15086929Smisaki 	    "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
15096929Smisaki 	    " (injected error but still not stopped)", channel, rs));
15103859Sml29623 
15113859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err"));
15123859Sml29623 	return (status);
15133859Sml29623 }
15143859Sml29623 
15153859Sml29623 /*ARGSUSED*/
15163859Sml29623 void
15173859Sml29623 nxge_fixup_txdma_rings(p_nxge_t nxgep)
15183859Sml29623 {
15196495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
15206495Sspeer 	int tdc;
15213859Sml29623 
15223859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings"));
15233859Sml29623 
15246495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
15256495Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
15266495Sspeer 		    "<== nxge_fixup_txdma_rings: NULL ring pointer(s)"));
15273859Sml29623 		return;
15283859Sml29623 	}
15293859Sml29623 
15306495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
15316495Sspeer 		if ((1 << tdc) & set->owned.map) {
15326495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
15336495Sspeer 			if (ring) {
15346495Sspeer 				NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
15356495Sspeer 				    "==> nxge_fixup_txdma_rings: channel %d",
15366495Sspeer 				    tdc));
15376495Sspeer 				nxge_txdma_fixup_channel(nxgep, ring, tdc);
15386495Sspeer 			}
15396495Sspeer 		}
15403859Sml29623 	}
15413859Sml29623 
15423859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings"));
15433859Sml29623 }
15443859Sml29623 
15453859Sml29623 /*ARGSUSED*/
15463859Sml29623 void
15473859Sml29623 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
15483859Sml29623 {
15493859Sml29623 	p_tx_ring_t	ring_p;
15503859Sml29623 
15513859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel"));
15523859Sml29623 	ring_p = nxge_txdma_get_ring(nxgep, channel);
15533859Sml29623 	if (ring_p == NULL) {
15543859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
15553859Sml29623 		return;
15563859Sml29623 	}
15573859Sml29623 
15583859Sml29623 	if (ring_p->tdc != channel) {
15593859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
15606929Smisaki 		    "<== nxge_txdma_fix_channel: channel not matched "
15616929Smisaki 		    "ring tdc %d passed channel",
15626929Smisaki 		    ring_p->tdc, channel));
15633859Sml29623 		return;
15643859Sml29623 	}
15653859Sml29623 
15663859Sml29623 	nxge_txdma_fixup_channel(nxgep, ring_p, channel);
15673859Sml29623 
15683859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
15693859Sml29623 }
15703859Sml29623 
15713859Sml29623 /*ARGSUSED*/
15723859Sml29623 void
15733859Sml29623 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
15743859Sml29623 {
15753859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel"));
15763859Sml29623 
15773859Sml29623 	if (ring_p == NULL) {
15783859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
15796929Smisaki 		    "<== nxge_txdma_fixup_channel: NULL ring pointer"));
15803859Sml29623 		return;
15813859Sml29623 	}
15823859Sml29623 
15833859Sml29623 	if (ring_p->tdc != channel) {
15843859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
15856929Smisaki 		    "<== nxge_txdma_fixup_channel: channel not matched "
15866929Smisaki 		    "ring tdc %d passed channel",
15876929Smisaki 		    ring_p->tdc, channel));
15883859Sml29623 		return;
15893859Sml29623 	}
15903859Sml29623 
15913859Sml29623 	MUTEX_ENTER(&ring_p->lock);
15923859Sml29623 	(void) nxge_txdma_reclaim(nxgep, ring_p, 0);
15933859Sml29623 	ring_p->rd_index = 0;
15943859Sml29623 	ring_p->wr_index = 0;
15953859Sml29623 	ring_p->ring_head.value = 0;
15963859Sml29623 	ring_p->ring_kick_tail.value = 0;
15973859Sml29623 	ring_p->descs_pending = 0;
15983859Sml29623 	MUTEX_EXIT(&ring_p->lock);
15997906SMichael.Speer@Sun.COM 	nxge_txdma_freemsg_task(ring_p);
16003859Sml29623 
16013859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel"));
16023859Sml29623 }
16033859Sml29623 
16043859Sml29623 /*ARGSUSED*/
16053859Sml29623 void
16063859Sml29623 nxge_txdma_hw_kick(p_nxge_t nxgep)
16073859Sml29623 {
16086495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
16096495Sspeer 	int tdc;
16103859Sml29623 
16113859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick"));
16123859Sml29623 
16136495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
16143859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
16156495Sspeer 		    "<== nxge_txdma_hw_kick: NULL ring pointer(s)"));
16163859Sml29623 		return;
16173859Sml29623 	}
16183859Sml29623 
16196495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
16206495Sspeer 		if ((1 << tdc) & set->owned.map) {
16216495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
16226495Sspeer 			if (ring) {
16236495Sspeer 				NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
16246495Sspeer 				    "==> nxge_txdma_hw_kick: channel %d", tdc));
16256495Sspeer 				nxge_txdma_hw_kick_channel(nxgep, ring, tdc);
16266495Sspeer 			}
16276495Sspeer 		}
16283859Sml29623 	}
16293859Sml29623 
16303859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick"));
16313859Sml29623 }
16323859Sml29623 
16333859Sml29623 /*ARGSUSED*/
16343859Sml29623 void
16353859Sml29623 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel)
16363859Sml29623 {
16373859Sml29623 	p_tx_ring_t	ring_p;
16383859Sml29623 
16393859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel"));
16403859Sml29623 
16413859Sml29623 	ring_p = nxge_txdma_get_ring(nxgep, channel);
16423859Sml29623 	if (ring_p == NULL) {
16433859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
16446929Smisaki 		    " nxge_txdma_kick_channel"));
16453859Sml29623 		return;
16463859Sml29623 	}
16473859Sml29623 
16483859Sml29623 	if (ring_p->tdc != channel) {
16493859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
16506929Smisaki 		    "<== nxge_txdma_kick_channel: channel not matched "
16516929Smisaki 		    "ring tdc %d passed channel",
16526929Smisaki 		    ring_p->tdc, channel));
16533859Sml29623 		return;
16543859Sml29623 	}
16553859Sml29623 
16563859Sml29623 	nxge_txdma_hw_kick_channel(nxgep, ring_p, channel);
16573859Sml29623 
16583859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel"));
16593859Sml29623 }
16603859Sml29623 
16613859Sml29623 /*ARGSUSED*/
16623859Sml29623 void
16633859Sml29623 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
16643859Sml29623 {
16653859Sml29623 
16663859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel"));
16673859Sml29623 
16683859Sml29623 	if (ring_p == NULL) {
16693859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
16706929Smisaki 		    "<== nxge_txdma_hw_kick_channel: NULL ring pointer"));
16713859Sml29623 		return;
16723859Sml29623 	}
16733859Sml29623 
16743859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel"));
16753859Sml29623 }
16763859Sml29623 
16776495Sspeer /*
16786495Sspeer  * nxge_check_tx_hang
16796495Sspeer  *
16806495Sspeer  *	Check the state of all TDCs belonging to nxgep.
16816495Sspeer  *
16826495Sspeer  * Arguments:
16836495Sspeer  * 	nxgep
16846495Sspeer  *
16856495Sspeer  * Notes:
16866495Sspeer  *	Called by nxge_hw.c:nxge_check_hw_state().
16876495Sspeer  *
16886495Sspeer  * NPI/NXGE function calls:
16896495Sspeer  *
16906495Sspeer  * Registers accessed:
16916495Sspeer  *
16926495Sspeer  * Context:
16936495Sspeer  *	Any domain
16946495Sspeer  */
16953859Sml29623 /*ARGSUSED*/
16963859Sml29623 void
16973859Sml29623 nxge_check_tx_hang(p_nxge_t nxgep)
16983859Sml29623 {
16993859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang"));
17003859Sml29623 
17016713Sspeer 	if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
17026713Sspeer 	    (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
17036713Sspeer 		goto nxge_check_tx_hang_exit;
17046713Sspeer 	}
17056713Sspeer 
17063859Sml29623 	/*
17073859Sml29623 	 * Needs inputs from hardware for regs:
17083859Sml29623 	 *	head index had not moved since last timeout.
17093859Sml29623 	 *	packets not transmitted or stuffed registers.
17103859Sml29623 	 */
17113859Sml29623 	if (nxge_txdma_hung(nxgep)) {
17123859Sml29623 		nxge_fixup_hung_txdma_rings(nxgep);
17133859Sml29623 	}
17146713Sspeer 
17156713Sspeer nxge_check_tx_hang_exit:
17163859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang"));
17173859Sml29623 }
17183859Sml29623 
17196495Sspeer /*
17206495Sspeer  * nxge_txdma_hung
17216495Sspeer  *
17226495Sspeer  *	Reset a TDC.
17236495Sspeer  *
17246495Sspeer  * Arguments:
17256495Sspeer  * 	nxgep
17266495Sspeer  * 	channel		The channel to reset.
17276495Sspeer  * 	reg_data	The current TX_CS.
17286495Sspeer  *
17296495Sspeer  * Notes:
17306495Sspeer  *	Called by nxge_check_tx_hang()
17316495Sspeer  *
17326495Sspeer  * NPI/NXGE function calls:
17336495Sspeer  *	nxge_txdma_channel_hung()
17346495Sspeer  *
17356495Sspeer  * Registers accessed:
17366495Sspeer  *
17376495Sspeer  * Context:
17386495Sspeer  *	Any domain
17396495Sspeer  */
17403859Sml29623 int
17413859Sml29623 nxge_txdma_hung(p_nxge_t nxgep)
17423859Sml29623 {
17437812SMichael.Speer@Sun.COM 	nxge_grp_set_t	*set = &nxgep->tx_set;
17447812SMichael.Speer@Sun.COM 	int		tdc;
17457812SMichael.Speer@Sun.COM 	boolean_t	shared;
17463859Sml29623 
17473859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung"));
17486495Sspeer 
17496495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
17503859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
17516495Sspeer 		    "<== nxge_txdma_hung: NULL ring pointer(s)"));
17523859Sml29623 		return (B_FALSE);
17533859Sml29623 	}
17543859Sml29623 
17556495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
17567812SMichael.Speer@Sun.COM 		/*
17577812SMichael.Speer@Sun.COM 		 * Grab the shared state of the TDC.
17587812SMichael.Speer@Sun.COM 		 */
17597812SMichael.Speer@Sun.COM 		if (isLDOMservice(nxgep)) {
17607812SMichael.Speer@Sun.COM 			nxge_hio_data_t *nhd =
17617812SMichael.Speer@Sun.COM 			    (nxge_hio_data_t *)nxgep->nxge_hw_p->hio;
17627812SMichael.Speer@Sun.COM 
17637812SMichael.Speer@Sun.COM 			MUTEX_ENTER(&nhd->lock);
17647812SMichael.Speer@Sun.COM 			shared = nxgep->tdc_is_shared[tdc];
17657812SMichael.Speer@Sun.COM 			MUTEX_EXIT(&nhd->lock);
17667812SMichael.Speer@Sun.COM 		} else {
17677812SMichael.Speer@Sun.COM 			shared = B_FALSE;
17687812SMichael.Speer@Sun.COM 		}
17697812SMichael.Speer@Sun.COM 
17707812SMichael.Speer@Sun.COM 		/*
17717812SMichael.Speer@Sun.COM 		 * Now, process continue to process.
17727812SMichael.Speer@Sun.COM 		 */
17737812SMichael.Speer@Sun.COM 		if (((1 << tdc) & set->owned.map) && !shared) {
17746495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
17756495Sspeer 			if (ring) {
17766495Sspeer 				if (nxge_txdma_channel_hung(nxgep, ring, tdc)) {
17776495Sspeer 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
17786495Sspeer 					    "==> nxge_txdma_hung: TDC %d hung",
17796495Sspeer 					    tdc));
17806495Sspeer 					return (B_TRUE);
17816495Sspeer 				}
17826495Sspeer 			}
17833859Sml29623 		}
17843859Sml29623 	}
17853859Sml29623 
17863859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung"));
17873859Sml29623 
17883859Sml29623 	return (B_FALSE);
17893859Sml29623 }
17903859Sml29623 
17916495Sspeer /*
17926495Sspeer  * nxge_txdma_channel_hung
17936495Sspeer  *
17946495Sspeer  *	Reset a TDC.
17956495Sspeer  *
17966495Sspeer  * Arguments:
17976495Sspeer  * 	nxgep
17986495Sspeer  * 	ring		<channel>'s ring.
17996495Sspeer  * 	channel		The channel to reset.
18006495Sspeer  *
18016495Sspeer  * Notes:
18026495Sspeer  *	Called by nxge_txdma.c:nxge_txdma_hung()
18036495Sspeer  *
18046495Sspeer  * NPI/NXGE function calls:
18056495Sspeer  *	npi_txdma_ring_head_get()
18066495Sspeer  *
18076495Sspeer  * Registers accessed:
18086495Sspeer  *	TX_RING_HDL	DMC+0x40010 Transmit Ring Head Low
18096495Sspeer  *
18106495Sspeer  * Context:
18116495Sspeer  *	Any domain
18126495Sspeer  */
18133859Sml29623 int
18143859Sml29623 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel)
18153859Sml29623 {
18163859Sml29623 	uint16_t		head_index, tail_index;
18173859Sml29623 	boolean_t		head_wrap, tail_wrap;
18183859Sml29623 	npi_handle_t		handle;
18193859Sml29623 	tx_ring_hdl_t		tx_head;
18203859Sml29623 	uint_t			tx_rd_index;
18213859Sml29623 
18223859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung"));
18233859Sml29623 
18243859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
18253859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
18266929Smisaki 	    "==> nxge_txdma_channel_hung: channel %d", channel));
18273859Sml29623 	MUTEX_ENTER(&tx_ring_p->lock);
18283859Sml29623 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
18293859Sml29623 
18303859Sml29623 	tail_index = tx_ring_p->wr_index;
18313859Sml29623 	tail_wrap = tx_ring_p->wr_index_wrap;
18323859Sml29623 	tx_rd_index = tx_ring_p->rd_index;
18333859Sml29623 	MUTEX_EXIT(&tx_ring_p->lock);
18347906SMichael.Speer@Sun.COM 	nxge_txdma_freemsg_task(tx_ring_p);
18353859Sml29623 
18363859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
18376929Smisaki 	    "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d "
18386929Smisaki 	    "tail_index %d tail_wrap %d ",
18396929Smisaki 	    channel, tx_rd_index, tail_index, tail_wrap));
18403859Sml29623 	/*
18413859Sml29623 	 * Read the hardware maintained transmit head
18423859Sml29623 	 * and wrap around bit.
18433859Sml29623 	 */
18443859Sml29623 	(void) npi_txdma_ring_head_get(handle, channel, &tx_head);
18453859Sml29623 	head_index =  tx_head.bits.ldw.head;
18463859Sml29623 	head_wrap = tx_head.bits.ldw.wrap;
18473859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
18486929Smisaki 	    "==> nxge_txdma_channel_hung: "
18496929Smisaki 	    "tx_rd_index %d tail %d tail_wrap %d "
18506929Smisaki 	    "head %d wrap %d",
18516929Smisaki 	    tx_rd_index, tail_index, tail_wrap,
18526929Smisaki 	    head_index, head_wrap));
18533859Sml29623 
18543859Sml29623 	if (TXDMA_RING_EMPTY(head_index, head_wrap,
18556929Smisaki 	    tail_index, tail_wrap) &&
18566929Smisaki 	    (head_index == tx_rd_index)) {
18573859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
18586929Smisaki 		    "==> nxge_txdma_channel_hung: EMPTY"));
18593859Sml29623 		return (B_FALSE);
18603859Sml29623 	}
18613859Sml29623 
18623859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
18636929Smisaki 	    "==> nxge_txdma_channel_hung: Checking if ring full"));
18643859Sml29623 	if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
18656929Smisaki 	    tail_wrap)) {
18663859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
18676929Smisaki 		    "==> nxge_txdma_channel_hung: full"));
18683859Sml29623 		return (B_TRUE);
18693859Sml29623 	}
18703859Sml29623 
18713859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung"));
18723859Sml29623 
18733859Sml29623 	return (B_FALSE);
18743859Sml29623 }
18753859Sml29623 
18766495Sspeer /*
18776495Sspeer  * nxge_fixup_hung_txdma_rings
18786495Sspeer  *
18796495Sspeer  *	Disable a TDC.
18806495Sspeer  *
18816495Sspeer  * Arguments:
18826495Sspeer  * 	nxgep
18836495Sspeer  * 	channel		The channel to reset.
18846495Sspeer  * 	reg_data	The current TX_CS.
18856495Sspeer  *
18866495Sspeer  * Notes:
18876495Sspeer  *	Called by nxge_check_tx_hang()
18886495Sspeer  *
18896495Sspeer  * NPI/NXGE function calls:
18906495Sspeer  *	npi_txdma_ring_head_get()
18916495Sspeer  *
18926495Sspeer  * Registers accessed:
18936495Sspeer  *	TX_RING_HDL	DMC+0x40010 Transmit Ring Head Low
18946495Sspeer  *
18956495Sspeer  * Context:
18966495Sspeer  *	Any domain
18976495Sspeer  */
18983859Sml29623 /*ARGSUSED*/
18993859Sml29623 void
19003859Sml29623 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep)
19013859Sml29623 {
19026495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
19036495Sspeer 	int tdc;
19043859Sml29623 
19053859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings"));
19066495Sspeer 
19076495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
19083859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
19096495Sspeer 		    "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
19103859Sml29623 		return;
19113859Sml29623 	}
19123859Sml29623 
19136495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
19146495Sspeer 		if ((1 << tdc) & set->owned.map) {
19156495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
19166495Sspeer 			if (ring) {
19176495Sspeer 				nxge_txdma_fixup_hung_channel(nxgep, ring, tdc);
19186495Sspeer 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
19196495Sspeer 				    "==> nxge_fixup_hung_txdma_rings: TDC %d",
19206495Sspeer 				    tdc));
19216495Sspeer 			}
19226495Sspeer 		}
19233859Sml29623 	}
19243859Sml29623 
19253859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings"));
19263859Sml29623 }
19273859Sml29623 
19286495Sspeer /*
19296495Sspeer  * nxge_txdma_fixup_hung_channel
19306495Sspeer  *
19316495Sspeer  *	'Fix' a hung TDC.
19326495Sspeer  *
19336495Sspeer  * Arguments:
19346495Sspeer  * 	nxgep
19356495Sspeer  * 	channel		The channel to fix.
19366495Sspeer  *
19376495Sspeer  * Notes:
19386495Sspeer  *	Called by nxge_fixup_hung_txdma_rings()
19396495Sspeer  *
19406495Sspeer  *	1. Reclaim the TDC.
19416495Sspeer  *	2. Disable the TDC.
19426495Sspeer  *
19436495Sspeer  * NPI/NXGE function calls:
19446495Sspeer  *	nxge_txdma_reclaim()
19456495Sspeer  *	npi_txdma_channel_disable(TX_CS)
19466495Sspeer  *	npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
19476495Sspeer  *
19486495Sspeer  * Registers accessed:
19496495Sspeer  *	TX_CS		DMC+0x40028 Transmit Control And Status
19506495Sspeer  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
19516495Sspeer  *
19526495Sspeer  * Context:
19536495Sspeer  *	Any domain
19546495Sspeer  */
19553859Sml29623 /*ARGSUSED*/
19563859Sml29623 void
19573859Sml29623 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel)
19583859Sml29623 {
19593859Sml29623 	p_tx_ring_t	ring_p;
19603859Sml29623 
19613859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel"));
19623859Sml29623 	ring_p = nxge_txdma_get_ring(nxgep, channel);
19633859Sml29623 	if (ring_p == NULL) {
19643859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
19656929Smisaki 		    "<== nxge_txdma_fix_hung_channel"));
19663859Sml29623 		return;
19673859Sml29623 	}
19683859Sml29623 
19693859Sml29623 	if (ring_p->tdc != channel) {
19703859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
19716929Smisaki 		    "<== nxge_txdma_fix_hung_channel: channel not matched "
19726929Smisaki 		    "ring tdc %d passed channel",
19736929Smisaki 		    ring_p->tdc, channel));
19743859Sml29623 		return;
19753859Sml29623 	}
19763859Sml29623 
19773859Sml29623 	nxge_txdma_fixup_channel(nxgep, ring_p, channel);
19783859Sml29623 
19793859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel"));
19803859Sml29623 }
19813859Sml29623 
19823859Sml29623 /*ARGSUSED*/
19833859Sml29623 void
19843859Sml29623 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p,
19853859Sml29623 	uint16_t channel)
19863859Sml29623 {
19873859Sml29623 	npi_handle_t		handle;
19883859Sml29623 	tdmc_intr_dbg_t		intr_dbg;
19893859Sml29623 	int			status = NXGE_OK;
19903859Sml29623 
19913859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel"));
19923859Sml29623 
19933859Sml29623 	if (ring_p == NULL) {
19943859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
19956929Smisaki 		    "<== nxge_txdma_fixup_channel: NULL ring pointer"));
19963859Sml29623 		return;
19973859Sml29623 	}
19983859Sml29623 
19993859Sml29623 	if (ring_p->tdc != channel) {
20003859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
20016929Smisaki 		    "<== nxge_txdma_fixup_hung_channel: channel "
20026929Smisaki 		    "not matched "
20036929Smisaki 		    "ring tdc %d passed channel",
20046929Smisaki 		    ring_p->tdc, channel));
20053859Sml29623 		return;
20063859Sml29623 	}
20073859Sml29623 
20083859Sml29623 	/* Reclaim descriptors */
20093859Sml29623 	MUTEX_ENTER(&ring_p->lock);
20103859Sml29623 	(void) nxge_txdma_reclaim(nxgep, ring_p, 0);
20113859Sml29623 	MUTEX_EXIT(&ring_p->lock);
20123859Sml29623 
20137906SMichael.Speer@Sun.COM 	nxge_txdma_freemsg_task(ring_p);
20147906SMichael.Speer@Sun.COM 
20153859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
20163859Sml29623 	/*
20173859Sml29623 	 * Stop the dma channel waits for the stop done.
20183859Sml29623 	 * If the stop done bit is not set, then force
20193859Sml29623 	 * an error.
20203859Sml29623 	 */
20213859Sml29623 	status = npi_txdma_channel_disable(handle, channel);
20223859Sml29623 	if (!(status & NPI_TXDMA_STOP_FAILED)) {
20233859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
20246929Smisaki 		    "<== nxge_txdma_fixup_hung_channel: stopped OK "
20256929Smisaki 		    "ring tdc %d passed channel %d",
20266929Smisaki 		    ring_p->tdc, channel));
20273859Sml29623 		return;
20283859Sml29623 	}
20293859Sml29623 
20303859Sml29623 	/* Inject any error */
20313859Sml29623 	intr_dbg.value = 0;
20323859Sml29623 	intr_dbg.bits.ldw.nack_pref = 1;
20333859Sml29623 	(void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
20343859Sml29623 
20353859Sml29623 	/* Stop done bit will be set as a result of error injection */
20363859Sml29623 	status = npi_txdma_channel_disable(handle, channel);
20373859Sml29623 	if (!(status & NPI_TXDMA_STOP_FAILED)) {
20383859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
20396929Smisaki 		    "<== nxge_txdma_fixup_hung_channel: stopped again"
20406929Smisaki 		    "ring tdc %d passed channel",
20416929Smisaki 		    ring_p->tdc, channel));
20423859Sml29623 		return;
20433859Sml29623 	}
20443859Sml29623 
20453859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
20466929Smisaki 	    "<== nxge_txdma_fixup_hung_channel: stop done still not set!! "
20476929Smisaki 	    "ring tdc %d passed channel",
20486929Smisaki 	    ring_p->tdc, channel));
20493859Sml29623 
20503859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel"));
20513859Sml29623 }
20523859Sml29623 
20533859Sml29623 /*ARGSUSED*/
20543859Sml29623 void
20553859Sml29623 nxge_reclaim_rings(p_nxge_t nxgep)
20563859Sml29623 {
20576495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
20586495Sspeer 	int tdc;
20596495Sspeer 
20606495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings"));
20616495Sspeer 
20626495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
20633859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
20646495Sspeer 		    "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
20653859Sml29623 		return;
20663859Sml29623 	}
20673859Sml29623 
20686495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
20696495Sspeer 		if ((1 << tdc) & set->owned.map) {
20706495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
20716495Sspeer 			if (ring) {
20726495Sspeer 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
20736495Sspeer 				    "==> nxge_reclaim_rings: TDC %d", tdc));
20746495Sspeer 				MUTEX_ENTER(&ring->lock);
20756495Sspeer 				(void) nxge_txdma_reclaim(nxgep, ring, tdc);
20766495Sspeer 				MUTEX_EXIT(&ring->lock);
20777906SMichael.Speer@Sun.COM 
20787906SMichael.Speer@Sun.COM 				nxge_txdma_freemsg_task(ring);
20796495Sspeer 			}
20806495Sspeer 		}
20813859Sml29623 	}
20823859Sml29623 
20833859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings"));
20843859Sml29623 }
20853859Sml29623 
20863859Sml29623 void
20873859Sml29623 nxge_txdma_regs_dump_channels(p_nxge_t nxgep)
20883859Sml29623 {
20896495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
20906495Sspeer 	npi_handle_t handle;
20916495Sspeer 	int tdc;
20926495Sspeer 
20936495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels"));
20943859Sml29623 
20953859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
20966495Sspeer 
20976495Sspeer 	if (!isLDOMguest(nxgep)) {
20986495Sspeer 		(void) npi_txdma_dump_fzc_regs(handle);
20996495Sspeer 
21006495Sspeer 		/* Dump TXC registers. */
21016495Sspeer 		(void) npi_txc_dump_fzc_regs(handle);
21026495Sspeer 		(void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num);
21033859Sml29623 	}
21043859Sml29623 
21056495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
21063859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
21076495Sspeer 		    "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
21083859Sml29623 		return;
21093859Sml29623 	}
21103859Sml29623 
21116495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
21126495Sspeer 		if ((1 << tdc) & set->owned.map) {
21136495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
21146495Sspeer 			if (ring) {
21156495Sspeer 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
21166495Sspeer 				    "==> nxge_txdma_regs_dump_channels: "
21176495Sspeer 				    "TDC %d", tdc));
21186495Sspeer 				(void) npi_txdma_dump_tdc_regs(handle, tdc);
21196495Sspeer 
21206495Sspeer 				/* Dump TXC registers, if able to. */
21216495Sspeer 				if (!isLDOMguest(nxgep)) {
21226495Sspeer 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
21236495Sspeer 					    "==> nxge_txdma_regs_dump_channels:"
21246495Sspeer 					    " FZC TDC %d", tdc));
21256495Sspeer 					(void) npi_txc_dump_tdc_fzc_regs
21266495Sspeer 					    (handle, tdc);
21276495Sspeer 				}
21286495Sspeer 				nxge_txdma_regs_dump(nxgep, tdc);
21296495Sspeer 			}
21306495Sspeer 		}
21313859Sml29623 	}
21323859Sml29623 
21333859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump"));
21343859Sml29623 }
21353859Sml29623 
21363859Sml29623 void
21373859Sml29623 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel)
21383859Sml29623 {
21393859Sml29623 	npi_handle_t		handle;
21403859Sml29623 	tx_ring_hdl_t 		hdl;
21413859Sml29623 	tx_ring_kick_t 		kick;
21423859Sml29623 	tx_cs_t 		cs;
21433859Sml29623 	txc_control_t		control;
21443859Sml29623 	uint32_t		bitmap = 0;
21453859Sml29623 	uint32_t		burst = 0;
21463859Sml29623 	uint32_t		bytes = 0;
21473859Sml29623 	dma_log_page_t		cfg;
21483859Sml29623 
21493859Sml29623 	printf("\n\tfunc # %d tdc %d ",
21506929Smisaki 	    nxgep->function_num, channel);
21513859Sml29623 	cfg.page_num = 0;
21523859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
21533859Sml29623 	(void) npi_txdma_log_page_get(handle, channel, &cfg);
21543859Sml29623 	printf("\n\tlog page func %d valid page 0 %d",
21556929Smisaki 	    cfg.func_num, cfg.valid);
21563859Sml29623 	cfg.page_num = 1;
21573859Sml29623 	(void) npi_txdma_log_page_get(handle, channel, &cfg);
21583859Sml29623 	printf("\n\tlog page func %d valid page 1 %d",
21596929Smisaki 	    cfg.func_num, cfg.valid);
21603859Sml29623 
21613859Sml29623 	(void) npi_txdma_ring_head_get(handle, channel, &hdl);
21623859Sml29623 	(void) npi_txdma_desc_kick_reg_get(handle, channel, &kick);
21633859Sml29623 	printf("\n\thead value is 0x%0llx",
21646929Smisaki 	    (long long)hdl.value);
21653859Sml29623 	printf("\n\thead index %d", hdl.bits.ldw.head);
21663859Sml29623 	printf("\n\tkick value is 0x%0llx",
21676929Smisaki 	    (long long)kick.value);
21683859Sml29623 	printf("\n\ttail index %d\n", kick.bits.ldw.tail);
21693859Sml29623 
21703859Sml29623 	(void) npi_txdma_control_status(handle, OP_GET, channel, &cs);
21713859Sml29623 	printf("\n\tControl statue is 0x%0llx", (long long)cs.value);
21723859Sml29623 	printf("\n\tControl status RST state %d", cs.bits.ldw.rst);
21733859Sml29623 
21743859Sml29623 	(void) npi_txc_control(handle, OP_GET, &control);
21753859Sml29623 	(void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap);
21763859Sml29623 	(void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst);
21773859Sml29623 	(void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes);
21783859Sml29623 
21793859Sml29623 	printf("\n\tTXC port control 0x%0llx",
21806929Smisaki 	    (long long)control.value);
21813859Sml29623 	printf("\n\tTXC port bitmap 0x%x", bitmap);
21823859Sml29623 	printf("\n\tTXC max burst %d", burst);
21833859Sml29623 	printf("\n\tTXC bytes xmt %d\n", bytes);
21843859Sml29623 
21853859Sml29623 	{
21863859Sml29623 		ipp_status_t status;
21873859Sml29623 
21883859Sml29623 		(void) npi_ipp_get_status(handle, nxgep->function_num, &status);
21895125Sjoycey #if defined(__i386)
21905125Sjoycey 		printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value);
21915125Sjoycey #else
21923859Sml29623 		printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value);
21935125Sjoycey #endif
21943859Sml29623 	}
21953859Sml29623 }
21963859Sml29623 
21973859Sml29623 /*
21986495Sspeer  * nxge_tdc_hvio_setup
21996495Sspeer  *
22006495Sspeer  *	I'm not exactly sure what this code does.
22016495Sspeer  *
22026495Sspeer  * Arguments:
22036495Sspeer  * 	nxgep
22046495Sspeer  * 	channel	The channel to map.
22056495Sspeer  *
22066495Sspeer  * Notes:
22076495Sspeer  *
22086495Sspeer  * NPI/NXGE function calls:
22096495Sspeer  *	na
22106495Sspeer  *
22116495Sspeer  * Context:
22126495Sspeer  *	Service domain?
22133859Sml29623  */
22146495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
22156495Sspeer static void
22166495Sspeer nxge_tdc_hvio_setup(
22176495Sspeer 	nxge_t *nxgep, int channel)
22183859Sml29623 {
22196495Sspeer 	nxge_dma_common_t	*data;
22206495Sspeer 	nxge_dma_common_t	*control;
22216495Sspeer 	tx_ring_t 		*ring;
22226495Sspeer 
22236495Sspeer 	ring = nxgep->tx_rings->rings[channel];
22246495Sspeer 	data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
22256495Sspeer 
22266495Sspeer 	ring->hv_set = B_FALSE;
22276495Sspeer 
22286495Sspeer 	ring->hv_tx_buf_base_ioaddr_pp =
22296495Sspeer 	    (uint64_t)data->orig_ioaddr_pp;
22306495Sspeer 	ring->hv_tx_buf_ioaddr_size =
22316495Sspeer 	    (uint64_t)data->orig_alength;
22326495Sspeer 
22336495Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
22346929Smisaki 	    "hv data buf base io $%p size 0x%llx (%d) buf base io $%p "
22356929Smisaki 	    "orig vatopa base io $%p orig_len 0x%llx (%d)",
22366929Smisaki 	    ring->hv_tx_buf_base_ioaddr_pp,
22376929Smisaki 	    ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size,
22386929Smisaki 	    data->ioaddr_pp, data->orig_vatopa,
22396929Smisaki 	    data->orig_alength, data->orig_alength));
22406495Sspeer 
22416495Sspeer 	control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
22426495Sspeer 
22436495Sspeer 	ring->hv_tx_cntl_base_ioaddr_pp =
22446495Sspeer 	    (uint64_t)control->orig_ioaddr_pp;
22456495Sspeer 	ring->hv_tx_cntl_ioaddr_size =
22466495Sspeer 	    (uint64_t)control->orig_alength;
22476495Sspeer 
22486495Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
22496929Smisaki 	    "hv cntl base io $%p orig ioaddr_pp ($%p) "
22506929Smisaki 	    "orig vatopa ($%p) size 0x%llx (%d 0x%x)",
22516929Smisaki 	    ring->hv_tx_cntl_base_ioaddr_pp,
22526929Smisaki 	    control->orig_ioaddr_pp, control->orig_vatopa,
22536929Smisaki 	    ring->hv_tx_cntl_ioaddr_size,
22546929Smisaki 	    control->orig_alength, control->orig_alength));
22556495Sspeer }
22563859Sml29623 #endif
22573859Sml29623 
22586495Sspeer static nxge_status_t
22596495Sspeer nxge_map_txdma(p_nxge_t nxgep, int channel)
22606495Sspeer {
22616495Sspeer 	nxge_dma_common_t	**pData;
22626495Sspeer 	nxge_dma_common_t	**pControl;
22636495Sspeer 	tx_ring_t 		**pRing, *ring;
22646495Sspeer 	tx_mbox_t		**mailbox;
22656495Sspeer 	uint32_t		num_chunks;
22666495Sspeer 
22676495Sspeer 	nxge_status_t		status = NXGE_OK;
22686495Sspeer 
22696495Sspeer 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma"));
22706495Sspeer 
22716495Sspeer 	if (!nxgep->tx_cntl_pool_p->buf_allocated) {
22726495Sspeer 		if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) {
22736495Sspeer 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
22746495Sspeer 			    "<== nxge_map_txdma: buf not allocated"));
22756495Sspeer 			return (NXGE_ERROR);
22766495Sspeer 		}
22773859Sml29623 	}
22783859Sml29623 
22796495Sspeer 	if (nxge_alloc_txb(nxgep, channel) != NXGE_OK)
22806495Sspeer 		return (NXGE_ERROR);
22816495Sspeer 
22826495Sspeer 	num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel];
22836495Sspeer 	pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
22846495Sspeer 	pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
22856495Sspeer 	pRing = &nxgep->tx_rings->rings[channel];
22866495Sspeer 	mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
22876495Sspeer 
22886495Sspeer 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
22896929Smisaki 	    "tx_rings $%p tx_desc_rings $%p",
22906929Smisaki 	    nxgep->tx_rings, nxgep->tx_rings->rings));
22913859Sml29623 
22923859Sml29623 	/*
22936495Sspeer 	 * Map descriptors from the buffer pools for <channel>.
22946495Sspeer 	 */
22956495Sspeer 
22966495Sspeer 	/*
22976495Sspeer 	 * Set up and prepare buffer blocks, descriptors
22986495Sspeer 	 * and mailbox.
22993859Sml29623 	 */
23006495Sspeer 	status = nxge_map_txdma_channel(nxgep, channel,
23016495Sspeer 	    pData, pRing, num_chunks, pControl, mailbox);
23026495Sspeer 	if (status != NXGE_OK) {
23036495Sspeer 		NXGE_ERROR_MSG((nxgep, MEM3_CTL,
23046929Smisaki 		    "==> nxge_map_txdma(%d): nxge_map_txdma_channel() "
23056929Smisaki 		    "returned 0x%x",
23066929Smisaki 		    nxgep, channel, status));
23076495Sspeer 		return (status);
23086495Sspeer 	}
23096495Sspeer 
23106495Sspeer 	ring = *pRing;
23116495Sspeer 
23126495Sspeer 	ring->index = (uint16_t)channel;
23136495Sspeer 	ring->tdc_stats = &nxgep->statsp->tdc_stats[channel];
23146495Sspeer 
23156495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
23166495Sspeer 	if (isLDOMguest(nxgep)) {
23176495Sspeer 		(void) nxge_tdc_lp_conf(nxgep, channel);
23186495Sspeer 	} else {
23196495Sspeer 		nxge_tdc_hvio_setup(nxgep, channel);
23206495Sspeer 	}
23213859Sml29623 #endif
23226495Sspeer 
23236495Sspeer 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
23246495Sspeer 	    "(status 0x%x channel %d)", status, channel));
23253859Sml29623 
23263859Sml29623 	return (status);
23273859Sml29623 }
23283859Sml29623 
23293859Sml29623 static nxge_status_t
23303859Sml29623 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel,
23313859Sml29623 	p_nxge_dma_common_t *dma_buf_p,
23323859Sml29623 	p_tx_ring_t *tx_desc_p,
23333859Sml29623 	uint32_t num_chunks,
23343859Sml29623 	p_nxge_dma_common_t *dma_cntl_p,
23353859Sml29623 	p_tx_mbox_t *tx_mbox_p)
23363859Sml29623 {
23373859Sml29623 	int	status = NXGE_OK;
23383859Sml29623 
23393859Sml29623 	/*
23403859Sml29623 	 * Set up and prepare buffer blocks, descriptors
23413859Sml29623 	 * and mailbox.
23423859Sml29623 	 */
23436495Sspeer 	NXGE_ERROR_MSG((nxgep, MEM3_CTL,
23446929Smisaki 	    "==> nxge_map_txdma_channel (channel %d)", channel));
23453859Sml29623 	/*
23463859Sml29623 	 * Transmit buffer blocks
23473859Sml29623 	 */
23483859Sml29623 	status = nxge_map_txdma_channel_buf_ring(nxgep, channel,
23496929Smisaki 	    dma_buf_p, tx_desc_p, num_chunks);
23503859Sml29623 	if (status != NXGE_OK) {
23513859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
23526929Smisaki 		    "==> nxge_map_txdma_channel (channel %d): "
23536929Smisaki 		    "map buffer failed 0x%x", channel, status));
23543859Sml29623 		goto nxge_map_txdma_channel_exit;
23553859Sml29623 	}
23563859Sml29623 
23573859Sml29623 	/*
23583859Sml29623 	 * Transmit block ring, and mailbox.
23593859Sml29623 	 */
23603859Sml29623 	nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p,
23616929Smisaki 	    tx_mbox_p);
23623859Sml29623 
23633859Sml29623 	goto nxge_map_txdma_channel_exit;
23643859Sml29623 
23653859Sml29623 nxge_map_txdma_channel_fail1:
23666495Sspeer 	NXGE_ERROR_MSG((nxgep, MEM3_CTL,
23676929Smisaki 	    "==> nxge_map_txdma_channel: unmap buf"
23686929Smisaki 	    "(status 0x%x channel %d)",
23696929Smisaki 	    status, channel));
23703859Sml29623 	nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p);
23713859Sml29623 
23723859Sml29623 nxge_map_txdma_channel_exit:
23736495Sspeer 	NXGE_ERROR_MSG((nxgep, MEM3_CTL,
23746929Smisaki 	    "<== nxge_map_txdma_channel: "
23756929Smisaki 	    "(status 0x%x channel %d)",
23766929Smisaki 	    status, channel));
23773859Sml29623 
23783859Sml29623 	return (status);
23793859Sml29623 }
23803859Sml29623 
23813859Sml29623 /*ARGSUSED*/
23823859Sml29623 static void
23836495Sspeer nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel)
23843859Sml29623 {
23856495Sspeer 	tx_ring_t *ring;
23866495Sspeer 	tx_mbox_t *mailbox;
23876495Sspeer 
23883859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
23896929Smisaki 	    "==> nxge_unmap_txdma_channel (channel %d)", channel));
23903859Sml29623 	/*
23913859Sml29623 	 * unmap tx block ring, and mailbox.
23923859Sml29623 	 */
23936495Sspeer 	ring = nxgep->tx_rings->rings[channel];
23946495Sspeer 	mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
23956495Sspeer 
23966495Sspeer 	(void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox);
23973859Sml29623 
23983859Sml29623 	/* unmap buffer blocks */
23996495Sspeer 	(void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring);
24006495Sspeer 
24016495Sspeer 	nxge_free_txb(nxgep, channel);
24023859Sml29623 
24033859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel"));
24043859Sml29623 }
24053859Sml29623 
24066495Sspeer /*
24076495Sspeer  * nxge_map_txdma_channel_cfg_ring
24086495Sspeer  *
24096495Sspeer  *	Map a TDC into our kernel space.
24106495Sspeer  *	This function allocates all of the per-channel data structures.
24116495Sspeer  *
24126495Sspeer  * Arguments:
24136495Sspeer  * 	nxgep
24146495Sspeer  * 	dma_channel	The channel to map.
24156495Sspeer  *	dma_cntl_p
24166495Sspeer  *	tx_ring_p	dma_channel's transmit ring
24176495Sspeer  *	tx_mbox_p	dma_channel's mailbox
24186495Sspeer  *
24196495Sspeer  * Notes:
24206495Sspeer  *
24216495Sspeer  * NPI/NXGE function calls:
24226495Sspeer  *	nxge_setup_dma_common()
24236495Sspeer  *
24246495Sspeer  * Registers accessed:
24256495Sspeer  *	none.
24266495Sspeer  *
24276495Sspeer  * Context:
24286495Sspeer  *	Any domain
24296495Sspeer  */
24303859Sml29623 /*ARGSUSED*/
24313859Sml29623 static void
24323859Sml29623 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
24333859Sml29623 	p_nxge_dma_common_t *dma_cntl_p,
24343859Sml29623 	p_tx_ring_t tx_ring_p,
24353859Sml29623 	p_tx_mbox_t *tx_mbox_p)
24363859Sml29623 {
24373859Sml29623 	p_tx_mbox_t 		mboxp;
24383859Sml29623 	p_nxge_dma_common_t 	cntl_dmap;
24393859Sml29623 	p_nxge_dma_common_t 	dmap;
24403859Sml29623 	p_tx_rng_cfig_t		tx_ring_cfig_p;
24413859Sml29623 	p_tx_ring_kick_t	tx_ring_kick_p;
24423859Sml29623 	p_tx_cs_t		tx_cs_p;
24433859Sml29623 	p_tx_dma_ent_msk_t	tx_evmask_p;
24443859Sml29623 	p_txdma_mbh_t		mboxh_p;
24453859Sml29623 	p_txdma_mbl_t		mboxl_p;
24463859Sml29623 	uint64_t		tx_desc_len;
24473859Sml29623 
24483859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24496929Smisaki 	    "==> nxge_map_txdma_channel_cfg_ring"));
24503859Sml29623 
24513859Sml29623 	cntl_dmap = *dma_cntl_p;
24523859Sml29623 
24533859Sml29623 	dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc;
24543859Sml29623 	nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size,
24556929Smisaki 	    sizeof (tx_desc_t));
24563859Sml29623 	/*
24573859Sml29623 	 * Zero out transmit ring descriptors.
24583859Sml29623 	 */
24593859Sml29623 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
24603859Sml29623 	tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig);
24613859Sml29623 	tx_ring_kick_p = &(tx_ring_p->tx_ring_kick);
24623859Sml29623 	tx_cs_p = &(tx_ring_p->tx_cs);
24633859Sml29623 	tx_evmask_p = &(tx_ring_p->tx_evmask);
24643859Sml29623 	tx_ring_cfig_p->value = 0;
24653859Sml29623 	tx_ring_kick_p->value = 0;
24663859Sml29623 	tx_cs_p->value = 0;
24673859Sml29623 	tx_evmask_p->value = 0;
24683859Sml29623 
24693859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24706929Smisaki 	    "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p",
24716929Smisaki 	    dma_channel,
24726929Smisaki 	    dmap->dma_cookie.dmac_laddress));
24733859Sml29623 
24743859Sml29623 	tx_ring_cfig_p->value = 0;
24753859Sml29623 	tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3);
24763859Sml29623 	tx_ring_cfig_p->value =
24776929Smisaki 	    (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) |
24786929Smisaki 	    (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT);
24793859Sml29623 
24803859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24816929Smisaki 	    "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
24826929Smisaki 	    dma_channel,
24836929Smisaki 	    tx_ring_cfig_p->value));
24843859Sml29623 
24853859Sml29623 	tx_cs_p->bits.ldw.rst = 1;
24863859Sml29623 
24873859Sml29623 	/* Map in mailbox */
24883859Sml29623 	mboxp = (p_tx_mbox_t)
24896929Smisaki 	    KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP);
24903859Sml29623 	dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox;
24913859Sml29623 	nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t));
24923859Sml29623 	mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh;
24933859Sml29623 	mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl;
24943859Sml29623 	mboxh_p->value = mboxl_p->value = 0;
24953859Sml29623 
24963859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24976929Smisaki 	    "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
24986929Smisaki 	    dmap->dma_cookie.dmac_laddress));
24993859Sml29623 
25003859Sml29623 	mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >>
25016929Smisaki 	    TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK);
25023859Sml29623 
25033859Sml29623 	mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress &
25046929Smisaki 	    TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT);
25053859Sml29623 
25063859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25076929Smisaki 	    "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
25086929Smisaki 	    dmap->dma_cookie.dmac_laddress));
25093859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25106929Smisaki 	    "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p "
25116929Smisaki 	    "mbox $%p",
25126929Smisaki 	    mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr));
25133859Sml29623 	tx_ring_p->page_valid.value = 0;
25143859Sml29623 	tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0;
25153859Sml29623 	tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0;
25163859Sml29623 	tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0;
25173859Sml29623 	tx_ring_p->page_hdl.value = 0;
25183859Sml29623 
25193859Sml29623 	tx_ring_p->page_valid.bits.ldw.page0 = 1;
25203859Sml29623 	tx_ring_p->page_valid.bits.ldw.page1 = 1;
25213859Sml29623 
25223859Sml29623 	tx_ring_p->max_burst.value = 0;
25233859Sml29623 	tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT;
25243859Sml29623 
25253859Sml29623 	*tx_mbox_p = mboxp;
25263859Sml29623 
25273859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25286929Smisaki 	    "<== nxge_map_txdma_channel_cfg_ring"));
25293859Sml29623 }
25303859Sml29623 
25313859Sml29623 /*ARGSUSED*/
25323859Sml29623 static void
25333859Sml29623 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep,
25343859Sml29623 	p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
25353859Sml29623 {
25363859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25376929Smisaki 	    "==> nxge_unmap_txdma_channel_cfg_ring: channel %d",
25386929Smisaki 	    tx_ring_p->tdc));
25393859Sml29623 
25403859Sml29623 	KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t));
25413859Sml29623 
25423859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25436929Smisaki 	    "<== nxge_unmap_txdma_channel_cfg_ring"));
25443859Sml29623 }
25453859Sml29623 
25466495Sspeer /*
25476495Sspeer  * nxge_map_txdma_channel_buf_ring
25486495Sspeer  *
25496495Sspeer  *
25506495Sspeer  * Arguments:
25516495Sspeer  * 	nxgep
25526495Sspeer  * 	channel		The channel to map.
25536495Sspeer  *	dma_buf_p
25546495Sspeer  *	tx_desc_p	channel's descriptor ring
25556495Sspeer  *	num_chunks
25566495Sspeer  *
25576495Sspeer  * Notes:
25586495Sspeer  *
25596495Sspeer  * NPI/NXGE function calls:
25606495Sspeer  *	nxge_setup_dma_common()
25616495Sspeer  *
25626495Sspeer  * Registers accessed:
25636495Sspeer  *	none.
25646495Sspeer  *
25656495Sspeer  * Context:
25666495Sspeer  *	Any domain
25676495Sspeer  */
25683859Sml29623 static nxge_status_t
25693859Sml29623 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
25703859Sml29623 	p_nxge_dma_common_t *dma_buf_p,
25713859Sml29623 	p_tx_ring_t *tx_desc_p, uint32_t num_chunks)
25723859Sml29623 {
25733859Sml29623 	p_nxge_dma_common_t 	dma_bufp, tmp_bufp;
25743859Sml29623 	p_nxge_dma_common_t 	dmap;
25753859Sml29623 	nxge_os_dma_handle_t	tx_buf_dma_handle;
25763859Sml29623 	p_tx_ring_t 		tx_ring_p;
25773859Sml29623 	p_tx_msg_t 		tx_msg_ring;
25783859Sml29623 	nxge_status_t		status = NXGE_OK;
25793859Sml29623 	int			ddi_status = DDI_SUCCESS;
25803859Sml29623 	int			i, j, index;
25813859Sml29623 	uint32_t		size, bsize;
25823859Sml29623 	uint32_t 		nblocks, nmsgs;
25833859Sml29623 
25843859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25856929Smisaki 	    "==> nxge_map_txdma_channel_buf_ring"));
25863859Sml29623 
25873859Sml29623 	dma_bufp = tmp_bufp = *dma_buf_p;
25883859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25893859Sml29623 		" nxge_map_txdma_channel_buf_ring: channel %d to map %d "
25903859Sml29623 		"chunks bufp $%p",
25916929Smisaki 		    channel, num_chunks, dma_bufp));
25923859Sml29623 
25933859Sml29623 	nmsgs = 0;
25943859Sml29623 	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
25953859Sml29623 		nmsgs += tmp_bufp->nblocks;
25963859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25976929Smisaki 		    "==> nxge_map_txdma_channel_buf_ring: channel %d "
25986929Smisaki 		    "bufp $%p nblocks %d nmsgs %d",
25996929Smisaki 		    channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
26003859Sml29623 	}
26013859Sml29623 	if (!nmsgs) {
26023859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
26036929Smisaki 		    "<== nxge_map_txdma_channel_buf_ring: channel %d "
26046929Smisaki 		    "no msg blocks",
26056929Smisaki 		    channel));
26063859Sml29623 		status = NXGE_ERROR;
26073859Sml29623 		goto nxge_map_txdma_channel_buf_ring_exit;
26083859Sml29623 	}
26093859Sml29623 
26103859Sml29623 	tx_ring_p = (p_tx_ring_t)
26116929Smisaki 	    KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP);
26123859Sml29623 	MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER,
26136929Smisaki 	    (void *)nxgep->interrupt_cookie);
26147906SMichael.Speer@Sun.COM 	MUTEX_INIT(&tx_ring_p->freelock, NULL, MUTEX_DRIVER,
26157906SMichael.Speer@Sun.COM 	    (void *)nxgep->interrupt_cookie);
26163952Sml29623 
26176713Sspeer 	(void) atomic_swap_32(&tx_ring_p->tx_ring_offline, NXGE_TX_RING_ONLINE);
26186886Sspeer 	tx_ring_p->tx_ring_busy = B_FALSE;
26193952Sml29623 	tx_ring_p->nxgep = nxgep;
26203952Sml29623 	tx_ring_p->serial = nxge_serialize_create(nmsgs,
26216929Smisaki 	    nxge_serial_tx, tx_ring_p);
26223859Sml29623 	/*
26233859Sml29623 	 * Allocate transmit message rings and handles for packets
26243859Sml29623 	 * not to be copied to premapped buffers.
26253859Sml29623 	 */
26263859Sml29623 	size = nmsgs * sizeof (tx_msg_t);
26273859Sml29623 	tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
26283859Sml29623 	for (i = 0; i < nmsgs; i++) {
26293859Sml29623 		ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
26306929Smisaki 		    DDI_DMA_DONTWAIT, 0,
26316929Smisaki 		    &tx_msg_ring[i].dma_handle);
26323859Sml29623 		if (ddi_status != DDI_SUCCESS) {
26333859Sml29623 			status |= NXGE_DDI_FAILED;
26343859Sml29623 			break;
26353859Sml29623 		}
26363859Sml29623 	}
26373859Sml29623 	if (i < nmsgs) {
26384185Sspeer 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
26394185Sspeer 		    "Allocate handles failed."));
26403859Sml29623 		goto nxge_map_txdma_channel_buf_ring_fail1;
26413859Sml29623 	}
26423859Sml29623 
26433859Sml29623 	tx_ring_p->tdc = channel;
26443859Sml29623 	tx_ring_p->tx_msg_ring = tx_msg_ring;
26453859Sml29623 	tx_ring_p->tx_ring_size = nmsgs;
26463859Sml29623 	tx_ring_p->num_chunks = num_chunks;
26473859Sml29623 	if (!nxge_tx_intr_thres) {
26483859Sml29623 		nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4;
26493859Sml29623 	}
26503859Sml29623 	tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1;
26513859Sml29623 	tx_ring_p->rd_index = 0;
26523859Sml29623 	tx_ring_p->wr_index = 0;
26533859Sml29623 	tx_ring_p->ring_head.value = 0;
26543859Sml29623 	tx_ring_p->ring_kick_tail.value = 0;
26553859Sml29623 	tx_ring_p->descs_pending = 0;
26563859Sml29623 
26573859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
26586929Smisaki 	    "==> nxge_map_txdma_channel_buf_ring: channel %d "
26596929Smisaki 	    "actual tx desc max %d nmsgs %d "
26606929Smisaki 	    "(config nxge_tx_ring_size %d)",
26616929Smisaki 	    channel, tx_ring_p->tx_ring_size, nmsgs,
26626929Smisaki 	    nxge_tx_ring_size));
26633859Sml29623 
26643859Sml29623 	/*
26653859Sml29623 	 * Map in buffers from the buffer pool.
26663859Sml29623 	 */
26673859Sml29623 	index = 0;
26683859Sml29623 	bsize = dma_bufp->block_size;
26693859Sml29623 
26703859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: "
26716929Smisaki 	    "dma_bufp $%p tx_rng_p $%p "
26726929Smisaki 	    "tx_msg_rng_p $%p bsize %d",
26736929Smisaki 	    dma_bufp, tx_ring_p, tx_msg_ring, bsize));
26743859Sml29623 
26753859Sml29623 	tx_buf_dma_handle = dma_bufp->dma_handle;
26763859Sml29623 	for (i = 0; i < num_chunks; i++, dma_bufp++) {
26773859Sml29623 		bsize = dma_bufp->block_size;
26783859Sml29623 		nblocks = dma_bufp->nblocks;
26793859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
26806929Smisaki 		    "==> nxge_map_txdma_channel_buf_ring: dma chunk %d "
26816929Smisaki 		    "size %d dma_bufp $%p",
26826929Smisaki 		    i, sizeof (nxge_dma_common_t), dma_bufp));
26833859Sml29623 
26843859Sml29623 		for (j = 0; j < nblocks; j++) {
26853859Sml29623 			tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle;
26867906SMichael.Speer@Sun.COM 			tx_msg_ring[index].nextp = NULL;
26873859Sml29623 			dmap = &tx_msg_ring[index++].buf_dma;
26883859Sml29623 #ifdef TX_MEM_DEBUG
26893859Sml29623 			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
26906929Smisaki 			    "==> nxge_map_txdma_channel_buf_ring: j %d"
26916929Smisaki 			    "dmap $%p", i, dmap));
26923859Sml29623 #endif
26933859Sml29623 			nxge_setup_dma_common(dmap, dma_bufp, 1,
26946929Smisaki 			    bsize);
26953859Sml29623 		}
26963859Sml29623 	}
26973859Sml29623 
26983859Sml29623 	if (i < num_chunks) {
26994185Sspeer 		status = NXGE_ERROR;
27003859Sml29623 		goto nxge_map_txdma_channel_buf_ring_fail1;
27013859Sml29623 	}
27023859Sml29623 
27033859Sml29623 	*tx_desc_p = tx_ring_p;
27043859Sml29623 
27053859Sml29623 	goto nxge_map_txdma_channel_buf_ring_exit;
27063859Sml29623 
27073859Sml29623 nxge_map_txdma_channel_buf_ring_fail1:
27083952Sml29623 	if (tx_ring_p->serial) {
27093952Sml29623 		nxge_serialize_destroy(tx_ring_p->serial);
27103952Sml29623 		tx_ring_p->serial = NULL;
27113952Sml29623 	}
27123952Sml29623 
27133859Sml29623 	index--;
27143859Sml29623 	for (; index >= 0; index--) {
27154185Sspeer 		if (tx_msg_ring[index].dma_handle != NULL) {
27164185Sspeer 			ddi_dma_free_handle(&tx_msg_ring[index].dma_handle);
27173859Sml29623 		}
27183859Sml29623 	}
27197906SMichael.Speer@Sun.COM 
27207906SMichael.Speer@Sun.COM 	MUTEX_DESTROY(&tx_ring_p->freelock);
27213859Sml29623 	MUTEX_DESTROY(&tx_ring_p->lock);
27224185Sspeer 	KMEM_FREE(tx_msg_ring, size);
27233859Sml29623 	KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
27243859Sml29623 
27254185Sspeer 	status = NXGE_ERROR;
27264185Sspeer 
27273859Sml29623 nxge_map_txdma_channel_buf_ring_exit:
27283859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
27296929Smisaki 	    "<== nxge_map_txdma_channel_buf_ring status 0x%x", status));
27303859Sml29623 
27313859Sml29623 	return (status);
27323859Sml29623 }
27333859Sml29623 
27343859Sml29623 /*ARGSUSED*/
27353859Sml29623 static void
27363859Sml29623 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p)
27373859Sml29623 {
27383859Sml29623 	p_tx_msg_t 		tx_msg_ring;
27393859Sml29623 	p_tx_msg_t 		tx_msg_p;
27403859Sml29623 	int			i;
27413859Sml29623 
27423859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
27436929Smisaki 	    "==> nxge_unmap_txdma_channel_buf_ring"));
27443859Sml29623 	if (tx_ring_p == NULL) {
27453859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
27466929Smisaki 		    "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp"));
27473859Sml29623 		return;
27483859Sml29623 	}
27493859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
27506929Smisaki 	    "==> nxge_unmap_txdma_channel_buf_ring: channel %d",
27516929Smisaki 	    tx_ring_p->tdc));
27523859Sml29623 
27533859Sml29623 	tx_msg_ring = tx_ring_p->tx_msg_ring;
27546495Sspeer 
27556495Sspeer 	/*
27566495Sspeer 	 * Since the serialization thread, timer thread and
27576495Sspeer 	 * interrupt thread can all call the transmit reclaim,
27586495Sspeer 	 * the unmapping function needs to acquire the lock
27596495Sspeer 	 * to free those buffers which were transmitted
27606495Sspeer 	 * by the hardware already.
27616495Sspeer 	 */
27626495Sspeer 	MUTEX_ENTER(&tx_ring_p->lock);
27636495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
27646495Sspeer 	    "==> nxge_unmap_txdma_channel_buf_ring (reclaim): "
27656495Sspeer 	    "channel %d",
27666495Sspeer 	    tx_ring_p->tdc));
27676495Sspeer 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
27686495Sspeer 
27693859Sml29623 	for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
27703859Sml29623 		tx_msg_p = &tx_msg_ring[i];
27713859Sml29623 		if (tx_msg_p->tx_message != NULL) {
27723859Sml29623 			freemsg(tx_msg_p->tx_message);
27733859Sml29623 			tx_msg_p->tx_message = NULL;
27743859Sml29623 		}
27753859Sml29623 	}
27763859Sml29623 
27773859Sml29623 	for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
27783859Sml29623 		if (tx_msg_ring[i].dma_handle != NULL) {
27793859Sml29623 			ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
27803859Sml29623 		}
27816495Sspeer 		tx_msg_ring[i].dma_handle = NULL;
27823859Sml29623 	}
27833859Sml29623 
27846495Sspeer 	MUTEX_EXIT(&tx_ring_p->lock);
27856495Sspeer 
27863952Sml29623 	if (tx_ring_p->serial) {
27873952Sml29623 		nxge_serialize_destroy(tx_ring_p->serial);
27883952Sml29623 		tx_ring_p->serial = NULL;
27893952Sml29623 	}
27903952Sml29623 
27917906SMichael.Speer@Sun.COM 	MUTEX_DESTROY(&tx_ring_p->freelock);
27923859Sml29623 	MUTEX_DESTROY(&tx_ring_p->lock);
27933859Sml29623 	KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
27943859Sml29623 	KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
27953859Sml29623 
27963859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
27976929Smisaki 	    "<== nxge_unmap_txdma_channel_buf_ring"));
27983859Sml29623 }
27993859Sml29623 
28003859Sml29623 static nxge_status_t
28016495Sspeer nxge_txdma_hw_start(p_nxge_t nxgep, int channel)
28023859Sml29623 {
28033859Sml29623 	p_tx_rings_t 		tx_rings;
28043859Sml29623 	p_tx_ring_t 		*tx_desc_rings;
28053859Sml29623 	p_tx_mbox_areas_t 	tx_mbox_areas_p;
28063859Sml29623 	p_tx_mbox_t		*tx_mbox_p;
28073859Sml29623 	nxge_status_t		status = NXGE_OK;
28083859Sml29623 
28093859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start"));
28103859Sml29623 
28113859Sml29623 	tx_rings = nxgep->tx_rings;
28123859Sml29623 	if (tx_rings == NULL) {
28133859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
28146929Smisaki 		    "<== nxge_txdma_hw_start: NULL ring pointer"));
28153859Sml29623 		return (NXGE_ERROR);
28163859Sml29623 	}
28173859Sml29623 	tx_desc_rings = tx_rings->rings;
28183859Sml29623 	if (tx_desc_rings == NULL) {
28193859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
28206929Smisaki 		    "<== nxge_txdma_hw_start: NULL ring pointers"));
28213859Sml29623 		return (NXGE_ERROR);
28223859Sml29623 	}
28233859Sml29623 
28246495Sspeer 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
28256495Sspeer 	    "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
28263859Sml29623 
28273859Sml29623 	tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
28283859Sml29623 	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
28293859Sml29623 
28306495Sspeer 	status = nxge_txdma_start_channel(nxgep, channel,
28316495Sspeer 	    (p_tx_ring_t)tx_desc_rings[channel],
28326495Sspeer 	    (p_tx_mbox_t)tx_mbox_p[channel]);
28336495Sspeer 	if (status != NXGE_OK) {
28346495Sspeer 		goto nxge_txdma_hw_start_fail1;
28353859Sml29623 	}
28363859Sml29623 
28373859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
28386929Smisaki 	    "tx_rings $%p rings $%p",
28396929Smisaki 	    nxgep->tx_rings, nxgep->tx_rings->rings));
28403859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
28416929Smisaki 	    "tx_rings $%p tx_desc_rings $%p",
28426929Smisaki 	    nxgep->tx_rings, tx_desc_rings));
28433859Sml29623 
28443859Sml29623 	goto nxge_txdma_hw_start_exit;
28453859Sml29623 
28463859Sml29623 nxge_txdma_hw_start_fail1:
28473859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
28486929Smisaki 	    "==> nxge_txdma_hw_start: disable "
28496929Smisaki 	    "(status 0x%x channel %d)", status, channel));
28503859Sml29623 
28513859Sml29623 nxge_txdma_hw_start_exit:
28523859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
28536929Smisaki 	    "==> nxge_txdma_hw_start: (status 0x%x)", status));
28543859Sml29623 
28553859Sml29623 	return (status);
28563859Sml29623 }
28573859Sml29623 
28586495Sspeer /*
28596495Sspeer  * nxge_txdma_start_channel
28606495Sspeer  *
28616495Sspeer  *	Start a TDC.
28626495Sspeer  *
28636495Sspeer  * Arguments:
28646495Sspeer  * 	nxgep
28656495Sspeer  * 	channel		The channel to start.
28666495Sspeer  * 	tx_ring_p	channel's transmit descriptor ring.
28676495Sspeer  * 	tx_mbox_p	channel' smailbox.
28686495Sspeer  *
28696495Sspeer  * Notes:
28706495Sspeer  *
28716495Sspeer  * NPI/NXGE function calls:
28726495Sspeer  *	nxge_reset_txdma_channel()
28736495Sspeer  *	nxge_init_txdma_channel_event_mask()
28746495Sspeer  *	nxge_enable_txdma_channel()
28756495Sspeer  *
28766495Sspeer  * Registers accessed:
28776495Sspeer  *	none directly (see functions above).
28786495Sspeer  *
28796495Sspeer  * Context:
28806495Sspeer  *	Any domain
28816495Sspeer  */
28823859Sml29623 static nxge_status_t
28833859Sml29623 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel,
28843859Sml29623     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
28853859Sml29623 
28863859Sml29623 {
28873859Sml29623 	nxge_status_t		status = NXGE_OK;
28883859Sml29623 
28893859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
28903859Sml29623 		"==> nxge_txdma_start_channel (channel %d)", channel));
28913859Sml29623 	/*
28923859Sml29623 	 * TXDMA/TXC must be in stopped state.
28933859Sml29623 	 */
28943859Sml29623 	(void) nxge_txdma_stop_inj_err(nxgep, channel);
28953859Sml29623 
28963859Sml29623 	/*
28973859Sml29623 	 * Reset TXDMA channel
28983859Sml29623 	 */
28993859Sml29623 	tx_ring_p->tx_cs.value = 0;
29003859Sml29623 	tx_ring_p->tx_cs.bits.ldw.rst = 1;
29013859Sml29623 	status = nxge_reset_txdma_channel(nxgep, channel,
29023859Sml29623 			tx_ring_p->tx_cs.value);
29033859Sml29623 	if (status != NXGE_OK) {
29043859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
29053859Sml29623 			"==> nxge_txdma_start_channel (channel %d)"
29063859Sml29623 			" reset channel failed 0x%x", channel, status));
29073859Sml29623 		goto nxge_txdma_start_channel_exit;
29083859Sml29623 	}
29093859Sml29623 
29103859Sml29623 	/*
29113859Sml29623 	 * Initialize the TXDMA channel specific FZC control
29123859Sml29623 	 * configurations. These FZC registers are pertaining
29133859Sml29623 	 * to each TX channel (i.e. logical pages).
29143859Sml29623 	 */
29156495Sspeer 	if (!isLDOMguest(nxgep)) {
29166495Sspeer 		status = nxge_init_fzc_txdma_channel(nxgep, channel,
29176495Sspeer 		    tx_ring_p, tx_mbox_p);
29186495Sspeer 		if (status != NXGE_OK) {
29196495Sspeer 			goto nxge_txdma_start_channel_exit;
29206495Sspeer 		}
29213859Sml29623 	}
29223859Sml29623 
29233859Sml29623 	/*
29243859Sml29623 	 * Initialize the event masks.
29253859Sml29623 	 */
29263859Sml29623 	tx_ring_p->tx_evmask.value = 0;
29273859Sml29623 	status = nxge_init_txdma_channel_event_mask(nxgep,
29286495Sspeer 	    channel, &tx_ring_p->tx_evmask);
29293859Sml29623 	if (status != NXGE_OK) {
29303859Sml29623 		goto nxge_txdma_start_channel_exit;
29313859Sml29623 	}
29323859Sml29623 
29333859Sml29623 	/*
29343859Sml29623 	 * Load TXDMA descriptors, buffers, mailbox,
29353859Sml29623 	 * initialise the DMA channels and
29363859Sml29623 	 * enable each DMA channel.
29373859Sml29623 	 */
29383859Sml29623 	status = nxge_enable_txdma_channel(nxgep, channel,
29393859Sml29623 			tx_ring_p, tx_mbox_p);
29403859Sml29623 	if (status != NXGE_OK) {
29413859Sml29623 		goto nxge_txdma_start_channel_exit;
29423859Sml29623 	}
29433859Sml29623 
29443859Sml29623 nxge_txdma_start_channel_exit:
29453859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel"));
29463859Sml29623 
29473859Sml29623 	return (status);
29483859Sml29623 }
29493859Sml29623 
29506495Sspeer /*
29516495Sspeer  * nxge_txdma_stop_channel
29526495Sspeer  *
29536495Sspeer  *	Stop a TDC.
29546495Sspeer  *
29556495Sspeer  * Arguments:
29566495Sspeer  * 	nxgep
29576495Sspeer  * 	channel		The channel to stop.
29586495Sspeer  * 	tx_ring_p	channel's transmit descriptor ring.
29596495Sspeer  * 	tx_mbox_p	channel' smailbox.
29606495Sspeer  *
29616495Sspeer  * Notes:
29626495Sspeer  *
29636495Sspeer  * NPI/NXGE function calls:
29646495Sspeer  *	nxge_txdma_stop_inj_err()
29656495Sspeer  *	nxge_reset_txdma_channel()
29666495Sspeer  *	nxge_init_txdma_channel_event_mask()
29676495Sspeer  *	nxge_init_txdma_channel_cntl_stat()
29686495Sspeer  *	nxge_disable_txdma_channel()
29696495Sspeer  *
29706495Sspeer  * Registers accessed:
29716495Sspeer  *	none directly (see functions above).
29726495Sspeer  *
29736495Sspeer  * Context:
29746495Sspeer  *	Any domain
29756495Sspeer  */
29763859Sml29623 /*ARGSUSED*/
29773859Sml29623 static nxge_status_t
29786495Sspeer nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
29793859Sml29623 {
29806495Sspeer 	p_tx_ring_t tx_ring_p;
29816495Sspeer 	int status = NXGE_OK;
29823859Sml29623 
29833859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
29846929Smisaki 	    "==> nxge_txdma_stop_channel: channel %d", channel));
29853859Sml29623 
29863859Sml29623 	/*
29873859Sml29623 	 * Stop (disable) TXDMA and TXC (if stop bit is set
29883859Sml29623 	 * and STOP_N_GO bit not set, the TXDMA reset state will
29893859Sml29623 	 * not be set if reset TXDMA.
29903859Sml29623 	 */
29913859Sml29623 	(void) nxge_txdma_stop_inj_err(nxgep, channel);
29923859Sml29623 
29936495Sspeer 	tx_ring_p = nxgep->tx_rings->rings[channel];
29946495Sspeer 
29953859Sml29623 	/*
29963859Sml29623 	 * Reset TXDMA channel
29973859Sml29623 	 */
29983859Sml29623 	tx_ring_p->tx_cs.value = 0;
29993859Sml29623 	tx_ring_p->tx_cs.bits.ldw.rst = 1;
30003859Sml29623 	status = nxge_reset_txdma_channel(nxgep, channel,
30016929Smisaki 	    tx_ring_p->tx_cs.value);
30023859Sml29623 	if (status != NXGE_OK) {
30033859Sml29623 		goto nxge_txdma_stop_channel_exit;
30043859Sml29623 	}
30053859Sml29623 
30063859Sml29623 #ifdef HARDWARE_REQUIRED
30073859Sml29623 	/* Set up the interrupt event masks. */
30083859Sml29623 	tx_ring_p->tx_evmask.value = 0;
30093859Sml29623 	status = nxge_init_txdma_channel_event_mask(nxgep,
30106929Smisaki 	    channel, &tx_ring_p->tx_evmask);
30113859Sml29623 	if (status != NXGE_OK) {
30123859Sml29623 		goto nxge_txdma_stop_channel_exit;
30133859Sml29623 	}
30143859Sml29623 
30153859Sml29623 	/* Initialize the DMA control and status register */
30163859Sml29623 	tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL;
30173859Sml29623 	status = nxge_init_txdma_channel_cntl_stat(nxgep, channel,
30186929Smisaki 	    tx_ring_p->tx_cs.value);
30193859Sml29623 	if (status != NXGE_OK) {
30203859Sml29623 		goto nxge_txdma_stop_channel_exit;
30213859Sml29623 	}
30223859Sml29623 
30236495Sspeer 	tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
30246495Sspeer 
30253859Sml29623 	/* Disable channel */
30263859Sml29623 	status = nxge_disable_txdma_channel(nxgep, channel,
30276495Sspeer 	    tx_ring_p, tx_mbox_p);
30283859Sml29623 	if (status != NXGE_OK) {
30293859Sml29623 		goto nxge_txdma_start_channel_exit;
30303859Sml29623 	}
30313859Sml29623 
30323859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
30336929Smisaki 	    "==> nxge_txdma_stop_channel: event done"));
30343859Sml29623 
30353859Sml29623 #endif
30363859Sml29623 
30373859Sml29623 nxge_txdma_stop_channel_exit:
30383859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel"));
30393859Sml29623 	return (status);
30403859Sml29623 }
30413859Sml29623 
30426495Sspeer /*
30436495Sspeer  * nxge_txdma_get_ring
30446495Sspeer  *
30456495Sspeer  *	Get the ring for a TDC.
30466495Sspeer  *
30476495Sspeer  * Arguments:
30486495Sspeer  * 	nxgep
30496495Sspeer  * 	channel
30506495Sspeer  *
30516495Sspeer  * Notes:
30526495Sspeer  *
30536495Sspeer  * NPI/NXGE function calls:
30546495Sspeer  *
30556495Sspeer  * Registers accessed:
30566495Sspeer  *
30576495Sspeer  * Context:
30586495Sspeer  *	Any domain
30596495Sspeer  */
30603859Sml29623 static p_tx_ring_t
30613859Sml29623 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel)
30623859Sml29623 {
30636495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
30646495Sspeer 	int tdc;
30653859Sml29623 
30663859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring"));
30673859Sml29623 
30686495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
30693859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
30706495Sspeer 		    "<== nxge_txdma_get_ring: NULL ring pointer(s)"));
30716495Sspeer 		goto return_null;
30723859Sml29623 	}
30733859Sml29623 
30746495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
30756495Sspeer 		if ((1 << tdc) & set->owned.map) {
30766495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
30776495Sspeer 			if (ring) {
30786495Sspeer 				if (channel == ring->tdc) {
30796495Sspeer 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
30806495Sspeer 					    "<== nxge_txdma_get_ring: "
30816495Sspeer 					    "tdc %d ring $%p", tdc, ring));
30826495Sspeer 					return (ring);
30836495Sspeer 				}
30846495Sspeer 			}
30853859Sml29623 		}
30863859Sml29623 	}
30873859Sml29623 
30886495Sspeer return_null:
30896495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: "
30906929Smisaki 	    "ring not found"));
30916495Sspeer 
30923859Sml29623 	return (NULL);
30933859Sml29623 }
30943859Sml29623 
30956495Sspeer /*
30966495Sspeer  * nxge_txdma_get_mbox
30976495Sspeer  *
30986495Sspeer  *	Get the mailbox for a TDC.
30996495Sspeer  *
31006495Sspeer  * Arguments:
31016495Sspeer  * 	nxgep
31026495Sspeer  * 	channel
31036495Sspeer  *
31046495Sspeer  * Notes:
31056495Sspeer  *
31066495Sspeer  * NPI/NXGE function calls:
31076495Sspeer  *
31086495Sspeer  * Registers accessed:
31096495Sspeer  *
31106495Sspeer  * Context:
31116495Sspeer  *	Any domain
31126495Sspeer  */
31133859Sml29623 static p_tx_mbox_t
31143859Sml29623 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel)
31153859Sml29623 {
31166495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
31176495Sspeer 	int tdc;
31183859Sml29623 
31193859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox"));
31203859Sml29623 
31216495Sspeer 	if (nxgep->tx_mbox_areas_p == 0 ||
31226495Sspeer 	    nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) {
31236495Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
31246495Sspeer 		    "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)"));
31256495Sspeer 		goto return_null;
31263859Sml29623 	}
31273859Sml29623 
31286495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
31296495Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
31306495Sspeer 		    "<== nxge_txdma_get_mbox: NULL ring pointer(s)"));
31316495Sspeer 		goto return_null;
31323859Sml29623 	}
31333859Sml29623 
31346495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
31356495Sspeer 		if ((1 << tdc) & set->owned.map) {
31366495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
31376495Sspeer 			if (ring) {
31386495Sspeer 				if (channel == ring->tdc) {
31396495Sspeer 					tx_mbox_t *mailbox = nxgep->
31406495Sspeer 					    tx_mbox_areas_p->
31416495Sspeer 					    txmbox_areas_p[tdc];
31426495Sspeer 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
31436495Sspeer 					    "<== nxge_txdma_get_mbox: tdc %d "
31446495Sspeer 					    "ring $%p", tdc, mailbox));
31456495Sspeer 					return (mailbox);
31466495Sspeer 				}
31476495Sspeer 			}
31483859Sml29623 		}
31493859Sml29623 	}
31503859Sml29623 
31516495Sspeer return_null:
31526495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: "
31536929Smisaki 	    "mailbox not found"));
31546495Sspeer 
31553859Sml29623 	return (NULL);
31563859Sml29623 }
31573859Sml29623 
31586495Sspeer /*
31596495Sspeer  * nxge_tx_err_evnts
31606495Sspeer  *
31616495Sspeer  *	Recover a TDC.
31626495Sspeer  *
31636495Sspeer  * Arguments:
31646495Sspeer  * 	nxgep
31656495Sspeer  * 	index	The index to the TDC ring.
31666495Sspeer  * 	ldvp	Used to get the channel number ONLY.
31676495Sspeer  * 	cs	A copy of the bits from TX_CS.
31686495Sspeer  *
31696495Sspeer  * Notes:
31706495Sspeer  *	Calling tree:
31716495Sspeer  *	 nxge_tx_intr()
31726495Sspeer  *
31736495Sspeer  * NPI/NXGE function calls:
31746495Sspeer  *	npi_txdma_ring_error_get()
31756495Sspeer  *	npi_txdma_inj_par_error_get()
31766495Sspeer  *	nxge_txdma_fatal_err_recover()
31776495Sspeer  *
31786495Sspeer  * Registers accessed:
31796495Sspeer  *	TX_RNG_ERR_LOGH	DMC+0x40048 Transmit Ring Error Log High
31806495Sspeer  *	TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low
31816495Sspeer  *	TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
31826495Sspeer  *
31836495Sspeer  * Context:
31846495Sspeer  *	Any domain	XXX Remove code which accesses TDMC_INJ_PAR_ERR.
31856495Sspeer  */
31863859Sml29623 /*ARGSUSED*/
31873859Sml29623 static nxge_status_t
31883859Sml29623 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs)
31893859Sml29623 {
31903859Sml29623 	npi_handle_t		handle;
31913859Sml29623 	npi_status_t		rs;
31923859Sml29623 	uint8_t			channel;
31933859Sml29623 	p_tx_ring_t 		*tx_rings;
31943859Sml29623 	p_tx_ring_t 		tx_ring_p;
31953859Sml29623 	p_nxge_tx_ring_stats_t	tdc_stats;
31963859Sml29623 	boolean_t		txchan_fatal = B_FALSE;
31973859Sml29623 	nxge_status_t		status = NXGE_OK;
31983859Sml29623 	tdmc_inj_par_err_t	par_err;
31993859Sml29623 	uint32_t		value;
32003859Sml29623 
32016495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts"));
32023859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
32033859Sml29623 	channel = ldvp->channel;
32043859Sml29623 
32053859Sml29623 	tx_rings = nxgep->tx_rings->rings;
32063859Sml29623 	tx_ring_p = tx_rings[index];
32073859Sml29623 	tdc_stats = tx_ring_p->tdc_stats;
32083859Sml29623 	if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) ||
32096929Smisaki 	    (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) ||
32106929Smisaki 	    (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) {
32113859Sml29623 		if ((rs = npi_txdma_ring_error_get(handle, channel,
32126929Smisaki 		    &tdc_stats->errlog)) != NPI_SUCCESS)
32133859Sml29623 			return (NXGE_ERROR | rs);
32143859Sml29623 	}
32153859Sml29623 
32163859Sml29623 	if (cs.bits.ldw.mbox_err) {
32173859Sml29623 		tdc_stats->mbox_err++;
32183859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
32196929Smisaki 		    NXGE_FM_EREPORT_TDMC_MBOX_ERR);
32203859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32216929Smisaki 		    "==> nxge_tx_err_evnts(channel %d): "
32226929Smisaki 		    "fatal error: mailbox", channel));
32233859Sml29623 		txchan_fatal = B_TRUE;
32243859Sml29623 	}
32253859Sml29623 	if (cs.bits.ldw.pkt_size_err) {
32263859Sml29623 		tdc_stats->pkt_size_err++;
32273859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
32286929Smisaki 		    NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR);
32293859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32306929Smisaki 		    "==> nxge_tx_err_evnts(channel %d): "
32316929Smisaki 		    "fatal error: pkt_size_err", channel));
32323859Sml29623 		txchan_fatal = B_TRUE;
32333859Sml29623 	}
32343859Sml29623 	if (cs.bits.ldw.tx_ring_oflow) {
32353859Sml29623 		tdc_stats->tx_ring_oflow++;
32363859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
32376929Smisaki 		    NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW);
32383859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32396929Smisaki 		    "==> nxge_tx_err_evnts(channel %d): "
32406929Smisaki 		    "fatal error: tx_ring_oflow", channel));
32413859Sml29623 		txchan_fatal = B_TRUE;
32423859Sml29623 	}
32433859Sml29623 	if (cs.bits.ldw.pref_buf_par_err) {
32443859Sml29623 		tdc_stats->pre_buf_par_err++;
32453859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
32466929Smisaki 		    NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR);
32473859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32486929Smisaki 		    "==> nxge_tx_err_evnts(channel %d): "
32496929Smisaki 		    "fatal error: pre_buf_par_err", channel));
32503859Sml29623 		/* Clear error injection source for parity error */
32513859Sml29623 		(void) npi_txdma_inj_par_error_get(handle, &value);
32523859Sml29623 		par_err.value = value;
32533859Sml29623 		par_err.bits.ldw.inject_parity_error &= ~(1 << channel);
32543859Sml29623 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
32553859Sml29623 		txchan_fatal = B_TRUE;
32563859Sml29623 	}
32573859Sml29623 	if (cs.bits.ldw.nack_pref) {
32583859Sml29623 		tdc_stats->nack_pref++;
32593859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
32606929Smisaki 		    NXGE_FM_EREPORT_TDMC_NACK_PREF);
32613859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32626929Smisaki 		    "==> nxge_tx_err_evnts(channel %d): "
32636929Smisaki 		    "fatal error: nack_pref", channel));
32643859Sml29623 		txchan_fatal = B_TRUE;
32653859Sml29623 	}
32663859Sml29623 	if (cs.bits.ldw.nack_pkt_rd) {
32673859Sml29623 		tdc_stats->nack_pkt_rd++;
32683859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
32696929Smisaki 		    NXGE_FM_EREPORT_TDMC_NACK_PKT_RD);
32703859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32716929Smisaki 		    "==> nxge_tx_err_evnts(channel %d): "
32726929Smisaki 		    "fatal error: nack_pkt_rd", channel));
32733859Sml29623 		txchan_fatal = B_TRUE;
32743859Sml29623 	}
32753859Sml29623 	if (cs.bits.ldw.conf_part_err) {
32763859Sml29623 		tdc_stats->conf_part_err++;
32773859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
32786929Smisaki 		    NXGE_FM_EREPORT_TDMC_CONF_PART_ERR);
32793859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32806929Smisaki 		    "==> nxge_tx_err_evnts(channel %d): "
32816929Smisaki 		    "fatal error: config_partition_err", channel));
32823859Sml29623 		txchan_fatal = B_TRUE;
32833859Sml29623 	}
32843859Sml29623 	if (cs.bits.ldw.pkt_prt_err) {
32853859Sml29623 		tdc_stats->pkt_part_err++;
32863859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
32876929Smisaki 		    NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR);
32883859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32896929Smisaki 		    "==> nxge_tx_err_evnts(channel %d): "
32906929Smisaki 		    "fatal error: pkt_prt_err", channel));
32913859Sml29623 		txchan_fatal = B_TRUE;
32923859Sml29623 	}
32933859Sml29623 
32943859Sml29623 	/* Clear error injection source in case this is an injected error */
32953859Sml29623 	TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0);
32963859Sml29623 
32973859Sml29623 	if (txchan_fatal) {
32983859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32996929Smisaki 		    " nxge_tx_err_evnts: "
33006929Smisaki 		    " fatal error on channel %d cs 0x%llx\n",
33016929Smisaki 		    channel, cs.value));
33023859Sml29623 		status = nxge_txdma_fatal_err_recover(nxgep, channel,
33036929Smisaki 		    tx_ring_p);
33043859Sml29623 		if (status == NXGE_OK) {
33053859Sml29623 			FM_SERVICE_RESTORED(nxgep);
33063859Sml29623 		}
33073859Sml29623 	}
33083859Sml29623 
33096495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts"));
33103859Sml29623 
33113859Sml29623 	return (status);
33123859Sml29623 }
33133859Sml29623 
33143859Sml29623 static nxge_status_t
33156495Sspeer nxge_txdma_fatal_err_recover(
33166495Sspeer 	p_nxge_t nxgep,
33176495Sspeer 	uint16_t channel,
33186495Sspeer 	p_tx_ring_t tx_ring_p)
33193859Sml29623 {
33203859Sml29623 	npi_handle_t	handle;
33213859Sml29623 	npi_status_t	rs = NPI_SUCCESS;
33223859Sml29623 	p_tx_mbox_t	tx_mbox_p;
33233859Sml29623 	nxge_status_t	status = NXGE_OK;
33243859Sml29623 
33253859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover"));
33263859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
33276929Smisaki 	    "Recovering from TxDMAChannel#%d error...", channel));
33283859Sml29623 
33293859Sml29623 	/*
33303859Sml29623 	 * Stop the dma channel waits for the stop done.
33313859Sml29623 	 * If the stop done bit is not set, then create
33323859Sml29623 	 * an error.
33333859Sml29623 	 */
33343859Sml29623 
33353859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
33363859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop..."));
33373859Sml29623 	MUTEX_ENTER(&tx_ring_p->lock);
33383859Sml29623 	rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel);
33393859Sml29623 	if (rs != NPI_SUCCESS) {
33403859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
33416929Smisaki 		    "==> nxge_txdma_fatal_err_recover (channel %d): "
33426929Smisaki 		    "stop failed ", channel));
33433859Sml29623 		goto fail;
33443859Sml29623 	}
33453859Sml29623 
33463859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim..."));
33473859Sml29623 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
33483859Sml29623 
33493859Sml29623 	/*
33503859Sml29623 	 * Reset TXDMA channel
33513859Sml29623 	 */
33523859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset..."));
33533859Sml29623 	if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) !=
33546929Smisaki 	    NPI_SUCCESS) {
33553859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
33566929Smisaki 		    "==> nxge_txdma_fatal_err_recover (channel %d)"
33576929Smisaki 		    " reset channel failed 0x%x", channel, rs));
33583859Sml29623 		goto fail;
33593859Sml29623 	}
33603859Sml29623 
33613859Sml29623 	/*
33623859Sml29623 	 * Reset the tail (kick) register to 0.
33633859Sml29623 	 * (Hardware will not reset it. Tx overflow fatal
33643859Sml29623 	 * error if tail is not set to 0 after reset!
33653859Sml29623 	 */
33663859Sml29623 	TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
33673859Sml29623 
33683859Sml29623 	/* Restart TXDMA channel */
33693859Sml29623 
33706495Sspeer 	if (!isLDOMguest(nxgep)) {
33716495Sspeer 		tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
33726495Sspeer 
33736495Sspeer 		// XXX This is a problem in HIO!
33746495Sspeer 		/*
33756495Sspeer 		 * Initialize the TXDMA channel specific FZC control
33766495Sspeer 		 * configurations. These FZC registers are pertaining
33776495Sspeer 		 * to each TX channel (i.e. logical pages).
33786495Sspeer 		 */
33796495Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart..."));
33806495Sspeer 		status = nxge_init_fzc_txdma_channel(nxgep, channel,
33816495Sspeer 		    tx_ring_p, tx_mbox_p);
33826495Sspeer 		if (status != NXGE_OK)
33836495Sspeer 			goto fail;
33846495Sspeer 	}
33853859Sml29623 
33863859Sml29623 	/*
33873859Sml29623 	 * Initialize the event masks.
33883859Sml29623 	 */
33893859Sml29623 	tx_ring_p->tx_evmask.value = 0;
33903859Sml29623 	status = nxge_init_txdma_channel_event_mask(nxgep, channel,
33916929Smisaki 	    &tx_ring_p->tx_evmask);
33923859Sml29623 	if (status != NXGE_OK)
33933859Sml29623 		goto fail;
33943859Sml29623 
33953859Sml29623 	tx_ring_p->wr_index_wrap = B_FALSE;
33963859Sml29623 	tx_ring_p->wr_index = 0;
33973859Sml29623 	tx_ring_p->rd_index = 0;
33983859Sml29623 
33993859Sml29623 	/*
34003859Sml29623 	 * Load TXDMA descriptors, buffers, mailbox,
34013859Sml29623 	 * initialise the DMA channels and
34023859Sml29623 	 * enable each DMA channel.
34033859Sml29623 	 */
34043859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable..."));
34053859Sml29623 	status = nxge_enable_txdma_channel(nxgep, channel,
34066929Smisaki 	    tx_ring_p, tx_mbox_p);
34073859Sml29623 	MUTEX_EXIT(&tx_ring_p->lock);
34083859Sml29623 	if (status != NXGE_OK)
34093859Sml29623 		goto fail;
34103859Sml29623 
34117906SMichael.Speer@Sun.COM 	nxge_txdma_freemsg_task(tx_ring_p);
34127906SMichael.Speer@Sun.COM 
34133859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
34146929Smisaki 	    "Recovery Successful, TxDMAChannel#%d Restored",
34156929Smisaki 	    channel));
34163859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover"));
34173859Sml29623 
34183859Sml29623 	return (NXGE_OK);
34193859Sml29623 
34203859Sml29623 fail:
34213859Sml29623 	MUTEX_EXIT(&tx_ring_p->lock);
34227906SMichael.Speer@Sun.COM 
34237906SMichael.Speer@Sun.COM 	nxge_txdma_freemsg_task(tx_ring_p);
34247906SMichael.Speer@Sun.COM 
34253859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
34266929Smisaki 	    "nxge_txdma_fatal_err_recover (channel %d): "
34276929Smisaki 	    "failed to recover this txdma channel", channel));
34283859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
34293859Sml29623 
34303859Sml29623 	return (status);
34313859Sml29623 }
34323859Sml29623 
34336495Sspeer /*
34346495Sspeer  * nxge_tx_port_fatal_err_recover
34356495Sspeer  *
34366495Sspeer  *	Attempt to recover from a fatal port error.
34376495Sspeer  *
34386495Sspeer  * Arguments:
34396495Sspeer  * 	nxgep
34406495Sspeer  *
34416495Sspeer  * Notes:
34426495Sspeer  *	How would a guest do this?
34436495Sspeer  *
34446495Sspeer  * NPI/NXGE function calls:
34456495Sspeer  *
34466495Sspeer  * Registers accessed:
34476495Sspeer  *
34486495Sspeer  * Context:
34496495Sspeer  *	Service domain
34506495Sspeer  */
34513859Sml29623 nxge_status_t
34523859Sml29623 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep)
34533859Sml29623 {
34546495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
34556495Sspeer 	nxge_channel_t tdc;
34566495Sspeer 
34576495Sspeer 	tx_ring_t	*ring;
34586495Sspeer 	tx_mbox_t	*mailbox;
34596495Sspeer 
34603859Sml29623 	npi_handle_t	handle;
34616495Sspeer 	nxge_status_t	status;
34626495Sspeer 	npi_status_t	rs;
34633859Sml29623 
34643859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover"));
34653859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
34666495Sspeer 	    "Recovering from TxPort error..."));
34676495Sspeer 
34686495Sspeer 	if (isLDOMguest(nxgep)) {
34696495Sspeer 		return (NXGE_OK);
34706495Sspeer 	}
34716495Sspeer 
34726495Sspeer 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
34736495Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
34746495Sspeer 		    "<== nxge_tx_port_fatal_err_recover: not initialized"));
34756495Sspeer 		return (NXGE_ERROR);
34766495Sspeer 	}
34776495Sspeer 
34786495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
34796495Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
34806495Sspeer 		    "<== nxge_tx_port_fatal_err_recover: "
34816495Sspeer 		    "NULL ring pointer(s)"));
34826495Sspeer 		return (NXGE_ERROR);
34836495Sspeer 	}
34846495Sspeer 
34856495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
34866495Sspeer 		if ((1 << tdc) & set->owned.map) {
34876495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
34886495Sspeer 			if (ring)
34896495Sspeer 				MUTEX_ENTER(&ring->lock);
34906495Sspeer 		}
34916495Sspeer 	}
34923859Sml29623 
34933859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
34946495Sspeer 
34956495Sspeer 	/*
34966495Sspeer 	 * Stop all the TDCs owned by us.
34976495Sspeer 	 * (The shared TDCs will have been stopped by their owners.)
34986495Sspeer 	 */
34996495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
35006495Sspeer 		if ((1 << tdc) & set->owned.map) {
35016495Sspeer 			ring = nxgep->tx_rings->rings[tdc];
35026495Sspeer 			if (ring) {
35036495Sspeer 				rs = npi_txdma_channel_control
35046495Sspeer 				    (handle, TXDMA_STOP, tdc);
35056495Sspeer 				if (rs != NPI_SUCCESS) {
35066495Sspeer 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
35076495Sspeer 					    "nxge_tx_port_fatal_err_recover "
35086495Sspeer 					    "(channel %d): stop failed ", tdc));
35096495Sspeer 					goto fail;
35106495Sspeer 				}
35116495Sspeer 			}
35123859Sml29623 		}
35133859Sml29623 	}
35143859Sml29623 
35156495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs..."));
35166495Sspeer 
35176495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
35186495Sspeer 		if ((1 << tdc) & set->owned.map) {
35196495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
35207906SMichael.Speer@Sun.COM 			if (ring) {
35216495Sspeer 				(void) nxge_txdma_reclaim(nxgep, ring, 0);
35227906SMichael.Speer@Sun.COM 				nxge_txdma_freemsg_task(ring);
35237906SMichael.Speer@Sun.COM 			}
35243859Sml29623 		}
35253859Sml29623 	}
35263859Sml29623 
35273859Sml29623 	/*
35286495Sspeer 	 * Reset all the TDCs.
35293859Sml29623 	 */
35306495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs..."));
35316495Sspeer 
35326495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
35336495Sspeer 		if ((1 << tdc) & set->owned.map) {
35346495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
35356495Sspeer 			if (ring) {
35366495Sspeer 				if ((rs = npi_txdma_channel_control
35376929Smisaki 				    (handle, TXDMA_RESET, tdc))
35386495Sspeer 				    != NPI_SUCCESS) {
35396495Sspeer 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
35406495Sspeer 					    "nxge_tx_port_fatal_err_recover "
35416495Sspeer 					    "(channel %d) reset channel "
35426495Sspeer 					    "failed 0x%x", tdc, rs));
35436495Sspeer 					goto fail;
35446495Sspeer 				}
35456495Sspeer 			}
35466495Sspeer 			/*
35476495Sspeer 			 * Reset the tail (kick) register to 0.
35486495Sspeer 			 * (Hardware will not reset it. Tx overflow fatal
35496495Sspeer 			 * error if tail is not set to 0 after reset!
35506495Sspeer 			 */
35516495Sspeer 			TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0);
35523859Sml29623 		}
35536495Sspeer 	}
35546495Sspeer 
35556495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs..."));
35566495Sspeer 
35576495Sspeer 	/* Restart all the TDCs */
35586495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
35596495Sspeer 		if ((1 << tdc) & set->owned.map) {
35606495Sspeer 			ring = nxgep->tx_rings->rings[tdc];
35616495Sspeer 			if (ring) {
35626495Sspeer 				mailbox = nxge_txdma_get_mbox(nxgep, tdc);
35636495Sspeer 				status = nxge_init_fzc_txdma_channel(nxgep, tdc,
35646495Sspeer 				    ring, mailbox);
35656495Sspeer 				ring->tx_evmask.value = 0;
35666495Sspeer 				/*
35676495Sspeer 				 * Initialize the event masks.
35686495Sspeer 				 */
35696495Sspeer 				status = nxge_init_txdma_channel_event_mask
35706495Sspeer 				    (nxgep, tdc, &ring->tx_evmask);
35716495Sspeer 
35726495Sspeer 				ring->wr_index_wrap = B_FALSE;
35736495Sspeer 				ring->wr_index = 0;
35746495Sspeer 				ring->rd_index = 0;
35756495Sspeer 
35766495Sspeer 				if (status != NXGE_OK)
35776495Sspeer 					goto fail;
35786495Sspeer 				if (status != NXGE_OK)
35796495Sspeer 					goto fail;
35806495Sspeer 			}
35813859Sml29623 		}
35826495Sspeer 	}
35836495Sspeer 
35846495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs..."));
35856495Sspeer 
35866495Sspeer 	/* Re-enable all the TDCs */
35876495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
35886495Sspeer 		if ((1 << tdc) & set->owned.map) {
35896495Sspeer 			ring = nxgep->tx_rings->rings[tdc];
35906495Sspeer 			if (ring) {
35916495Sspeer 				mailbox = nxge_txdma_get_mbox(nxgep, tdc);
35926495Sspeer 				status = nxge_enable_txdma_channel(nxgep, tdc,
35936495Sspeer 				    ring, mailbox);
35946495Sspeer 				if (status != NXGE_OK)
35956495Sspeer 					goto fail;
35966495Sspeer 			}
35976495Sspeer 		}
35983859Sml29623 	}
35993859Sml29623 
36003859Sml29623 	/*
36016495Sspeer 	 * Unlock all the TDCs.
36023859Sml29623 	 */
36036495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
36046495Sspeer 		if ((1 << tdc) & set->owned.map) {
36056495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
36066495Sspeer 			if (ring)
36076495Sspeer 				MUTEX_EXIT(&ring->lock);
36083859Sml29623 		}
36093859Sml29623 	}
36103859Sml29623 
36116495Sspeer 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded"));
36123859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
36133859Sml29623 
36143859Sml29623 	return (NXGE_OK);
36153859Sml29623 
36163859Sml29623 fail:
36176495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
36186495Sspeer 		if ((1 << tdc) & set->owned.map) {
36196495Sspeer 			ring = nxgep->tx_rings->rings[tdc];
36206495Sspeer 			if (ring)
36216495Sspeer 				MUTEX_EXIT(&ring->lock);
36223859Sml29623 		}
36233859Sml29623 	}
36243859Sml29623 
36256495Sspeer 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed"));
36266495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
36273859Sml29623 
36283859Sml29623 	return (status);
36293859Sml29623 }
36303859Sml29623 
36316495Sspeer /*
36326495Sspeer  * nxge_txdma_inject_err
36336495Sspeer  *
36346495Sspeer  *	Inject an error into a TDC.
36356495Sspeer  *
36366495Sspeer  * Arguments:
36376495Sspeer  * 	nxgep
36386495Sspeer  * 	err_id	The error to inject.
36396495Sspeer  * 	chan	The channel to inject into.
36406495Sspeer  *
36416495Sspeer  * Notes:
36426495Sspeer  *	This is called from nxge_main.c:nxge_err_inject()
36436495Sspeer  *	Has this ioctl ever been used?
36446495Sspeer  *
36456495Sspeer  * NPI/NXGE function calls:
36466495Sspeer  *	npi_txdma_inj_par_error_get()
36476495Sspeer  *	npi_txdma_inj_par_error_set()
36486495Sspeer  *
36496495Sspeer  * Registers accessed:
36506495Sspeer  *	TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
36516495Sspeer  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
36526495Sspeer  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
36536495Sspeer  *
36546495Sspeer  * Context:
36556495Sspeer  *	Service domain
36566495Sspeer  */
36573859Sml29623 void
36583859Sml29623 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
36593859Sml29623 {
36603859Sml29623 	tdmc_intr_dbg_t		tdi;
36613859Sml29623 	tdmc_inj_par_err_t	par_err;
36623859Sml29623 	uint32_t		value;
36633859Sml29623 	npi_handle_t		handle;
36643859Sml29623 
36653859Sml29623 	switch (err_id) {
36663859Sml29623 
36673859Sml29623 	case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR:
36683859Sml29623 		handle = NXGE_DEV_NPI_HANDLE(nxgep);
36693859Sml29623 		/* Clear error injection source for parity error */
36703859Sml29623 		(void) npi_txdma_inj_par_error_get(handle, &value);
36713859Sml29623 		par_err.value = value;
36723859Sml29623 		par_err.bits.ldw.inject_parity_error &= ~(1 << chan);
36733859Sml29623 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
36743859Sml29623 
36753859Sml29623 		par_err.bits.ldw.inject_parity_error = (1 << chan);
36763859Sml29623 		(void) npi_txdma_inj_par_error_get(handle, &value);
36773859Sml29623 		par_err.value = value;
36783859Sml29623 		par_err.bits.ldw.inject_parity_error |= (1 << chan);
36793859Sml29623 		cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n",
36806929Smisaki 		    (unsigned long long)par_err.value);
36813859Sml29623 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
36823859Sml29623 		break;
36833859Sml29623 
36843859Sml29623 	case NXGE_FM_EREPORT_TDMC_MBOX_ERR:
36853859Sml29623 	case NXGE_FM_EREPORT_TDMC_NACK_PREF:
36863859Sml29623 	case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD:
36873859Sml29623 	case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR:
36883859Sml29623 	case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW:
36893859Sml29623 	case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR:
36903859Sml29623 	case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR:
36913859Sml29623 		TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
36926929Smisaki 		    chan, &tdi.value);
36933859Sml29623 		if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR)
36943859Sml29623 			tdi.bits.ldw.pref_buf_par_err = 1;
36953859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR)
36963859Sml29623 			tdi.bits.ldw.mbox_err = 1;
36973859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF)
36983859Sml29623 			tdi.bits.ldw.nack_pref = 1;
36993859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD)
37003859Sml29623 			tdi.bits.ldw.nack_pkt_rd = 1;
37013859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR)
37023859Sml29623 			tdi.bits.ldw.pkt_size_err = 1;
37033859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW)
37043859Sml29623 			tdi.bits.ldw.tx_ring_oflow = 1;
37053859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR)
37063859Sml29623 			tdi.bits.ldw.conf_part_err = 1;
37073859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR)
37083859Sml29623 			tdi.bits.ldw.pkt_part_err = 1;
37095125Sjoycey #if defined(__i386)
37105125Sjoycey 		cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n",
37116929Smisaki 		    tdi.value);
37125125Sjoycey #else
37133859Sml29623 		cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n",
37146929Smisaki 		    tdi.value);
37155125Sjoycey #endif
37163859Sml29623 		TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
37176929Smisaki 		    chan, tdi.value);
37183859Sml29623 
37193859Sml29623 		break;
37203859Sml29623 	}
37213859Sml29623 }
3722