xref: /onnv-gate/usr/src/uts/common/io/nxge/nxge_txdma.c (revision 6886:da8ca932add7)
13859Sml29623 /*
23859Sml29623  * CDDL HEADER START
33859Sml29623  *
43859Sml29623  * The contents of this file are subject to the terms of the
53859Sml29623  * Common Development and Distribution License (the "License").
63859Sml29623  * You may not use this file except in compliance with the License.
73859Sml29623  *
83859Sml29623  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
93859Sml29623  * or http://www.opensolaris.org/os/licensing.
103859Sml29623  * See the License for the specific language governing permissions
113859Sml29623  * and limitations under the License.
123859Sml29623  *
133859Sml29623  * When distributing Covered Code, include this CDDL HEADER in each
143859Sml29623  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
153859Sml29623  * If applicable, add the following below this CDDL HEADER, with the
163859Sml29623  * fields enclosed by brackets "[]" replaced with your own identifying
173859Sml29623  * information: Portions Copyright [yyyy] [name of copyright owner]
183859Sml29623  *
193859Sml29623  * CDDL HEADER END
203859Sml29623  */
213859Sml29623 /*
226495Sspeer  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
233859Sml29623  * Use is subject to license terms.
243859Sml29623  */
253859Sml29623 
263859Sml29623 #pragma ident	"%Z%%M%	%I%	%E% SMI"
273859Sml29623 
283859Sml29623 #include <sys/nxge/nxge_impl.h>
293859Sml29623 #include <sys/nxge/nxge_txdma.h>
306495Sspeer #include <sys/nxge/nxge_hio.h>
316495Sspeer #include <npi_tx_rd64.h>
326495Sspeer #include <npi_tx_wr64.h>
333859Sml29623 #include <sys/llc1.h>
343859Sml29623 
353859Sml29623 uint32_t 	nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT;
363859Sml29623 uint32_t	nxge_tx_minfree = 32;
373859Sml29623 uint32_t	nxge_tx_intr_thres = 0;
383859Sml29623 uint32_t	nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS;
393859Sml29623 uint32_t	nxge_tx_tiny_pack = 1;
403859Sml29623 uint32_t	nxge_tx_use_bcopy = 1;
413859Sml29623 
423859Sml29623 extern uint32_t 	nxge_tx_ring_size;
433859Sml29623 extern uint32_t 	nxge_bcopy_thresh;
443859Sml29623 extern uint32_t 	nxge_dvma_thresh;
453859Sml29623 extern uint32_t 	nxge_dma_stream_thresh;
463859Sml29623 extern dma_method_t 	nxge_force_dma;
476611Sml29623 extern uint32_t		nxge_cksum_offload;
483859Sml29623 
493859Sml29623 /* Device register access attributes for PIO.  */
503859Sml29623 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr;
513859Sml29623 /* Device descriptor access attributes for DMA.  */
523859Sml29623 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr;
533859Sml29623 /* Device buffer access attributes for DMA.  */
543859Sml29623 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr;
553859Sml29623 extern ddi_dma_attr_t nxge_desc_dma_attr;
563859Sml29623 extern ddi_dma_attr_t nxge_tx_dma_attr;
573859Sml29623 
583952Sml29623 extern int nxge_serial_tx(mblk_t *mp, void *arg);
593952Sml29623 
606495Sspeer static nxge_status_t nxge_map_txdma(p_nxge_t, int);
616495Sspeer 
626495Sspeer static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int);
633859Sml29623 
643859Sml29623 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t,
653859Sml29623 	p_nxge_dma_common_t *, p_tx_ring_t *,
663859Sml29623 	uint32_t, p_nxge_dma_common_t *,
673859Sml29623 	p_tx_mbox_t *);
686495Sspeer static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t);
693859Sml29623 
703859Sml29623 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t,
713859Sml29623 	p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t);
723859Sml29623 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t);
733859Sml29623 
743859Sml29623 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t,
753859Sml29623 	p_nxge_dma_common_t *, p_tx_ring_t,
763859Sml29623 	p_tx_mbox_t *);
773859Sml29623 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t,
783859Sml29623 	p_tx_ring_t, p_tx_mbox_t);
793859Sml29623 
803859Sml29623 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t,
813859Sml29623     p_tx_ring_t, p_tx_mbox_t);
826495Sspeer static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t);
833859Sml29623 
843859Sml29623 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t);
853859Sml29623 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t,
863859Sml29623 	p_nxge_ldv_t, tx_cs_t);
873859Sml29623 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t);
883859Sml29623 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t,
893859Sml29623 	uint16_t, p_tx_ring_t);
903859Sml29623 
916495Sspeer static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep,
926495Sspeer     p_tx_ring_t ring_p, uint16_t channel);
936495Sspeer 
943859Sml29623 nxge_status_t
953859Sml29623 nxge_init_txdma_channels(p_nxge_t nxgep)
963859Sml29623 {
976495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
986495Sspeer 	int i, count;
996495Sspeer 
1006495Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels"));
1016495Sspeer 
1026495Sspeer 	for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
1036495Sspeer 		if ((1 << i) & set->lg.map) {
1046495Sspeer 			int tdc;
1056495Sspeer 			nxge_grp_t *group = set->group[i];
1066495Sspeer 			for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1076495Sspeer 				if ((1 << tdc) & group->map) {
1086495Sspeer 					if ((nxge_grp_dc_add(nxgep,
1096495Sspeer 						(vr_handle_t)group,
1106495Sspeer 						VP_BOUND_TX, tdc)))
1116495Sspeer 						return (NXGE_ERROR);
1126495Sspeer 				}
1136495Sspeer 			}
1146495Sspeer 		}
1156495Sspeer 		if (++count == set->lg.count)
1166495Sspeer 			break;
1176495Sspeer 	}
1186495Sspeer 
1196495Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels"));
1206495Sspeer 
1216495Sspeer 	return (NXGE_OK);
1226495Sspeer }
1236495Sspeer 
1246495Sspeer nxge_status_t
1256495Sspeer nxge_init_txdma_channel(
1266495Sspeer 	p_nxge_t nxge,
1276495Sspeer 	int channel)
1286495Sspeer {
1296495Sspeer 	nxge_status_t status;
1306495Sspeer 
1316495Sspeer 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel"));
1326495Sspeer 
1336495Sspeer 	status = nxge_map_txdma(nxge, channel);
1343859Sml29623 	if (status != NXGE_OK) {
1356495Sspeer 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1366495Sspeer 		    "<== nxge_init_txdma_channel: status 0x%x", status));
1376495Sspeer 		(void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
1383859Sml29623 		return (status);
1393859Sml29623 	}
1403859Sml29623 
1416495Sspeer 	status = nxge_txdma_hw_start(nxge, channel);
1423859Sml29623 	if (status != NXGE_OK) {
1436495Sspeer 		(void) nxge_unmap_txdma_channel(nxge, channel);
1446495Sspeer 		(void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
1453859Sml29623 		return (status);
1463859Sml29623 	}
1473859Sml29623 
1486495Sspeer 	if (!nxge->statsp->tdc_ksp[channel])
1496495Sspeer 		nxge_setup_tdc_kstats(nxge, channel);
1506495Sspeer 
1516495Sspeer 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel"));
1526495Sspeer 
1536495Sspeer 	return (status);
1543859Sml29623 }
1553859Sml29623 
1563859Sml29623 void
1573859Sml29623 nxge_uninit_txdma_channels(p_nxge_t nxgep)
1583859Sml29623 {
1596495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
1606495Sspeer 	int tdc;
1616495Sspeer 
1626495Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels"));
1636495Sspeer 
1646495Sspeer 	if (set->owned.map == 0) {
1656495Sspeer 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1666495Sspeer 		    "nxge_uninit_txdma_channels: no channels"));
1676495Sspeer 		return;
1686495Sspeer 	}
1696495Sspeer 
1706495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1716495Sspeer 		if ((1 << tdc) & set->owned.map) {
1726495Sspeer 			nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc);
1736495Sspeer 		}
1746495Sspeer 	}
1756495Sspeer 
1766495Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels"));
1776495Sspeer }
1786495Sspeer 
1796495Sspeer void
1806495Sspeer nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel)
1816495Sspeer {
1826495Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel"));
1836495Sspeer 
1846495Sspeer 	if (nxgep->statsp->tdc_ksp[channel]) {
1856495Sspeer 		kstat_delete(nxgep->statsp->tdc_ksp[channel]);
1866495Sspeer 		nxgep->statsp->tdc_ksp[channel] = 0;
1876495Sspeer 	}
1886495Sspeer 
1896495Sspeer 	(void) nxge_txdma_stop_channel(nxgep, channel);
1906495Sspeer 	nxge_unmap_txdma_channel(nxgep, channel);
1913859Sml29623 
1923859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1936495Sspeer 		"<== nxge_uninit_txdma_channel"));
1943859Sml29623 }
1953859Sml29623 
1963859Sml29623 void
1973859Sml29623 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p,
1983859Sml29623 	uint32_t entries, uint32_t size)
1993859Sml29623 {
2003859Sml29623 	size_t		tsize;
2013859Sml29623 	*dest_p = *src_p;
2023859Sml29623 	tsize = size * entries;
2033859Sml29623 	dest_p->alength = tsize;
2043859Sml29623 	dest_p->nblocks = entries;
2053859Sml29623 	dest_p->block_size = size;
2063859Sml29623 	dest_p->offset += tsize;
2073859Sml29623 
2083859Sml29623 	src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize;
2093859Sml29623 	src_p->alength -= tsize;
2103859Sml29623 	src_p->dma_cookie.dmac_laddress += tsize;
2113859Sml29623 	src_p->dma_cookie.dmac_size -= tsize;
2123859Sml29623 }
2133859Sml29623 
2146495Sspeer /*
2156495Sspeer  * nxge_reset_txdma_channel
2166495Sspeer  *
2176495Sspeer  *	Reset a TDC.
2186495Sspeer  *
2196495Sspeer  * Arguments:
2206495Sspeer  * 	nxgep
2216495Sspeer  * 	channel		The channel to reset.
2226495Sspeer  * 	reg_data	The current TX_CS.
2236495Sspeer  *
2246495Sspeer  * Notes:
2256495Sspeer  *
2266495Sspeer  * NPI/NXGE function calls:
2276495Sspeer  *	npi_txdma_channel_reset()
2286495Sspeer  *	npi_txdma_channel_control()
2296495Sspeer  *
2306495Sspeer  * Registers accessed:
2316495Sspeer  *	TX_CS		DMC+0x40028 Transmit Control And Status
2326495Sspeer  *	TX_RING_KICK	DMC+0x40018 Transmit Ring Kick
2336495Sspeer  *
2346495Sspeer  * Context:
2356495Sspeer  *	Any domain
2366495Sspeer  */
2373859Sml29623 nxge_status_t
2383859Sml29623 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data)
2393859Sml29623 {
2403859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
2413859Sml29623 	nxge_status_t		status = NXGE_OK;
2423859Sml29623 	npi_handle_t		handle;
2433859Sml29623 
2443859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel"));
2453859Sml29623 
2463859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
2473859Sml29623 	if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) {
2483859Sml29623 		rs = npi_txdma_channel_reset(handle, channel);
2493859Sml29623 	} else {
2503859Sml29623 		rs = npi_txdma_channel_control(handle, TXDMA_RESET,
2513859Sml29623 				channel);
2523859Sml29623 	}
2533859Sml29623 
2543859Sml29623 	if (rs != NPI_SUCCESS) {
2553859Sml29623 		status = NXGE_ERROR | rs;
2563859Sml29623 	}
2573859Sml29623 
2583859Sml29623 	/*
2593859Sml29623 	 * Reset the tail (kick) register to 0.
2603859Sml29623 	 * (Hardware will not reset it. Tx overflow fatal
2613859Sml29623 	 * error if tail is not set to 0 after reset!
2623859Sml29623 	 */
2633859Sml29623 	TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
2643859Sml29623 
2653859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel"));
2663859Sml29623 	return (status);
2673859Sml29623 }
2683859Sml29623 
2696495Sspeer /*
2706495Sspeer  * nxge_init_txdma_channel_event_mask
2716495Sspeer  *
2726495Sspeer  *	Enable interrupts for a set of events.
2736495Sspeer  *
2746495Sspeer  * Arguments:
2756495Sspeer  * 	nxgep
2766495Sspeer  * 	channel	The channel to map.
2776495Sspeer  * 	mask_p	The events to enable.
2786495Sspeer  *
2796495Sspeer  * Notes:
2806495Sspeer  *
2816495Sspeer  * NPI/NXGE function calls:
2826495Sspeer  *	npi_txdma_event_mask()
2836495Sspeer  *
2846495Sspeer  * Registers accessed:
2856495Sspeer  *	TX_ENT_MSK	DMC+0x40020 Transmit Event Mask
2866495Sspeer  *
2876495Sspeer  * Context:
2886495Sspeer  *	Any domain
2896495Sspeer  */
2903859Sml29623 nxge_status_t
2913859Sml29623 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
2923859Sml29623 		p_tx_dma_ent_msk_t mask_p)
2933859Sml29623 {
2943859Sml29623 	npi_handle_t		handle;
2953859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
2963859Sml29623 	nxge_status_t		status = NXGE_OK;
2973859Sml29623 
2983859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2993859Sml29623 		"<== nxge_init_txdma_channel_event_mask"));
3003859Sml29623 
3013859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
3023859Sml29623 	rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p);
3033859Sml29623 	if (rs != NPI_SUCCESS) {
3043859Sml29623 		status = NXGE_ERROR | rs;
3053859Sml29623 	}
3063859Sml29623 
3073859Sml29623 	return (status);
3083859Sml29623 }
3093859Sml29623 
3106495Sspeer /*
3116495Sspeer  * nxge_init_txdma_channel_cntl_stat
3126495Sspeer  *
3136495Sspeer  *	Stop a TDC.  If at first we don't succeed, inject an error.
3146495Sspeer  *
3156495Sspeer  * Arguments:
3166495Sspeer  * 	nxgep
3176495Sspeer  * 	channel		The channel to stop.
3186495Sspeer  *
3196495Sspeer  * Notes:
3206495Sspeer  *
3216495Sspeer  * NPI/NXGE function calls:
3226495Sspeer  *	npi_txdma_control_status()
3236495Sspeer  *
3246495Sspeer  * Registers accessed:
3256495Sspeer  *	TX_CS		DMC+0x40028 Transmit Control And Status
3266495Sspeer  *
3276495Sspeer  * Context:
3286495Sspeer  *	Any domain
3296495Sspeer  */
3303859Sml29623 nxge_status_t
3313859Sml29623 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
3323859Sml29623 	uint64_t reg_data)
3333859Sml29623 {
3343859Sml29623 	npi_handle_t		handle;
3353859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
3363859Sml29623 	nxge_status_t		status = NXGE_OK;
3373859Sml29623 
3383859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
3393859Sml29623 		"<== nxge_init_txdma_channel_cntl_stat"));
3403859Sml29623 
3413859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
3423859Sml29623 	rs = npi_txdma_control_status(handle, OP_SET, channel,
3433859Sml29623 			(p_tx_cs_t)&reg_data);
3443859Sml29623 
3453859Sml29623 	if (rs != NPI_SUCCESS) {
3463859Sml29623 		status = NXGE_ERROR | rs;
3473859Sml29623 	}
3483859Sml29623 
3493859Sml29623 	return (status);
3503859Sml29623 }
3513859Sml29623 
3526495Sspeer /*
3536495Sspeer  * nxge_enable_txdma_channel
3546495Sspeer  *
3556495Sspeer  *	Enable a TDC.
3566495Sspeer  *
3576495Sspeer  * Arguments:
3586495Sspeer  * 	nxgep
3596495Sspeer  * 	channel		The channel to enable.
3606495Sspeer  * 	tx_desc_p	channel's transmit descriptor ring.
3616495Sspeer  * 	mbox_p		channel's mailbox,
3626495Sspeer  *
3636495Sspeer  * Notes:
3646495Sspeer  *
3656495Sspeer  * NPI/NXGE function calls:
3666495Sspeer  *	npi_txdma_ring_config()
3676495Sspeer  *	npi_txdma_mbox_config()
3686495Sspeer  *	npi_txdma_channel_init_enable()
3696495Sspeer  *
3706495Sspeer  * Registers accessed:
3716495Sspeer  *	TX_RNG_CFIG	DMC+0x40000 Transmit Ring Configuration
3726495Sspeer  *	TXDMA_MBH	DMC+0x40030 TXDMA Mailbox High
3736495Sspeer  *	TXDMA_MBL	DMC+0x40038 TXDMA Mailbox Low
3746495Sspeer  *	TX_CS		DMC+0x40028 Transmit Control And Status
3756495Sspeer  *
3766495Sspeer  * Context:
3776495Sspeer  *	Any domain
3786495Sspeer  */
3793859Sml29623 nxge_status_t
3803859Sml29623 nxge_enable_txdma_channel(p_nxge_t nxgep,
3813859Sml29623 	uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p)
3823859Sml29623 {
3833859Sml29623 	npi_handle_t		handle;
3843859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
3853859Sml29623 	nxge_status_t		status = NXGE_OK;
3863859Sml29623 
3873859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel"));
3883859Sml29623 
3893859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
3903859Sml29623 	/*
3913859Sml29623 	 * Use configuration data composed at init time.
3923859Sml29623 	 * Write to hardware the transmit ring configurations.
3933859Sml29623 	 */
3943859Sml29623 	rs = npi_txdma_ring_config(handle, OP_SET, channel,
3956495Sspeer 	    (uint64_t *)&(tx_desc_p->tx_ring_cfig.value));
3963859Sml29623 
3973859Sml29623 	if (rs != NPI_SUCCESS) {
3983859Sml29623 		return (NXGE_ERROR | rs);
3993859Sml29623 	}
4003859Sml29623 
4016495Sspeer 	if (isLDOMguest(nxgep)) {
4026495Sspeer 		/* Add interrupt handler for this channel. */
4036495Sspeer 		if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK)
4046495Sspeer 			return (NXGE_ERROR);
4056495Sspeer 	}
4066495Sspeer 
4073859Sml29623 	/* Write to hardware the mailbox */
4083859Sml29623 	rs = npi_txdma_mbox_config(handle, OP_SET, channel,
4093859Sml29623 		(uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress);
4103859Sml29623 
4113859Sml29623 	if (rs != NPI_SUCCESS) {
4123859Sml29623 		return (NXGE_ERROR | rs);
4133859Sml29623 	}
4143859Sml29623 
4153859Sml29623 	/* Start the DMA engine. */
4163859Sml29623 	rs = npi_txdma_channel_init_enable(handle, channel);
4173859Sml29623 
4183859Sml29623 	if (rs != NPI_SUCCESS) {
4193859Sml29623 		return (NXGE_ERROR | rs);
4203859Sml29623 	}
4213859Sml29623 
4223859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel"));
4233859Sml29623 
4243859Sml29623 	return (status);
4253859Sml29623 }
4263859Sml29623 
4273859Sml29623 void
4283859Sml29623 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len,
4293859Sml29623 		boolean_t l4_cksum, int pkt_len, uint8_t npads,
4306611Sml29623 		p_tx_pkt_hdr_all_t pkthdrp,
4316611Sml29623 		t_uscalar_t start_offset,
4326611Sml29623 		t_uscalar_t stuff_offset)
4333859Sml29623 {
4343859Sml29623 	p_tx_pkt_header_t	hdrp;
4353859Sml29623 	p_mblk_t 		nmp;
4363859Sml29623 	uint64_t		tmp;
4373859Sml29623 	size_t 			mblk_len;
4383859Sml29623 	size_t 			iph_len;
4393859Sml29623 	size_t 			hdrs_size;
4403859Sml29623 	uint8_t			hdrs_buf[sizeof (struct ether_header) +
4413859Sml29623 					64 + sizeof (uint32_t)];
4425505Smisaki 	uint8_t			*cursor;
4433859Sml29623 	uint8_t 		*ip_buf;
4443859Sml29623 	uint16_t		eth_type;
4453859Sml29623 	uint8_t			ipproto;
4463859Sml29623 	boolean_t		is_vlan = B_FALSE;
4473859Sml29623 	size_t			eth_hdr_size;
4483859Sml29623 
4493859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp));
4503859Sml29623 
4513859Sml29623 	/*
4523859Sml29623 	 * Caller should zero out the headers first.
4533859Sml29623 	 */
4543859Sml29623 	hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr;
4553859Sml29623 
4563859Sml29623 	if (fill_len) {
4573859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL,
4583859Sml29623 			"==> nxge_fill_tx_hdr: pkt_len %d "
4593859Sml29623 			"npads %d", pkt_len, npads));
4603859Sml29623 		tmp = (uint64_t)pkt_len;
4613859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
4623859Sml29623 		goto fill_tx_header_done;
4633859Sml29623 	}
4643859Sml29623 
4656611Sml29623 	hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT);
4663859Sml29623 
4673859Sml29623 	/*
4683859Sml29623 	 * mp is the original data packet (does not include the
4693859Sml29623 	 * Neptune transmit header).
4703859Sml29623 	 */
4713859Sml29623 	nmp = mp;
4723859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: "
4733859Sml29623 		"mp $%p b_rptr $%p len %d",
4745505Smisaki 		mp, nmp->b_rptr, MBLKL(nmp)));
4755505Smisaki 	/* copy ether_header from mblk to hdrs_buf */
4765505Smisaki 	cursor = &hdrs_buf[0];
4775505Smisaki 	tmp = sizeof (struct ether_vlan_header);
4785505Smisaki 	while ((nmp != NULL) && (tmp > 0)) {
4795505Smisaki 		size_t buflen;
4805505Smisaki 		mblk_len = MBLKL(nmp);
4815512Smisaki 		buflen = min((size_t)tmp, mblk_len);
4825505Smisaki 		bcopy(nmp->b_rptr, cursor, buflen);
4835505Smisaki 		cursor += buflen;
4845505Smisaki 		tmp -= buflen;
4855505Smisaki 		nmp = nmp->b_cont;
4865505Smisaki 	}
4875505Smisaki 
4885505Smisaki 	nmp = mp;
4895505Smisaki 	mblk_len = MBLKL(nmp);
4903859Sml29623 	ip_buf = NULL;
4913859Sml29623 	eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type);
4923859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) "
4933859Sml29623 		"ether type 0x%x", eth_type, hdrp->value));
4943859Sml29623 
4953859Sml29623 	if (eth_type < ETHERMTU) {
4963859Sml29623 		tmp = 1ull;
4973859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT);
4983859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC "
4993859Sml29623 			"value 0x%llx", hdrp->value));
5003859Sml29623 		if (*(hdrs_buf + sizeof (struct ether_header))
5013859Sml29623 				== LLC_SNAP_SAP) {
5023859Sml29623 			eth_type = ntohs(*((uint16_t *)(hdrs_buf +
5033859Sml29623 					sizeof (struct ether_header) + 6)));
5043859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
5053859Sml29623 				"==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x",
5063859Sml29623 				eth_type));
5073859Sml29623 		} else {
5083859Sml29623 			goto fill_tx_header_done;
5093859Sml29623 		}
5103859Sml29623 	} else if (eth_type == VLAN_ETHERTYPE) {
5113859Sml29623 		tmp = 1ull;
5123859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT);
5133859Sml29623 
5143859Sml29623 		eth_type = ntohs(((struct ether_vlan_header *)
5153859Sml29623 			hdrs_buf)->ether_type);
5163859Sml29623 		is_vlan = B_TRUE;
5173859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN "
5183859Sml29623 			"value 0x%llx", hdrp->value));
5193859Sml29623 	}
5203859Sml29623 
5213859Sml29623 	if (!is_vlan) {
5223859Sml29623 		eth_hdr_size = sizeof (struct ether_header);
5233859Sml29623 	} else {
5243859Sml29623 		eth_hdr_size = sizeof (struct ether_vlan_header);
5253859Sml29623 	}
5263859Sml29623 
5273859Sml29623 	switch (eth_type) {
5283859Sml29623 	case ETHERTYPE_IP:
5293859Sml29623 		if (mblk_len > eth_hdr_size + sizeof (uint8_t)) {
5303859Sml29623 			ip_buf = nmp->b_rptr + eth_hdr_size;
5313859Sml29623 			mblk_len -= eth_hdr_size;
5323859Sml29623 			iph_len = ((*ip_buf) & 0x0f);
5333859Sml29623 			if (mblk_len > (iph_len + sizeof (uint32_t))) {
5343859Sml29623 				ip_buf = nmp->b_rptr;
5353859Sml29623 				ip_buf += eth_hdr_size;
5363859Sml29623 			} else {
5373859Sml29623 				ip_buf = NULL;
5383859Sml29623 			}
5393859Sml29623 
5403859Sml29623 		}
5413859Sml29623 		if (ip_buf == NULL) {
5423859Sml29623 			hdrs_size = 0;
5433859Sml29623 			((p_ether_header_t)hdrs_buf)->ether_type = 0;
5443859Sml29623 			while ((nmp) && (hdrs_size <
5453859Sml29623 					sizeof (hdrs_buf))) {
5463859Sml29623 				mblk_len = (size_t)nmp->b_wptr -
5473859Sml29623 					(size_t)nmp->b_rptr;
5483859Sml29623 				if (mblk_len >=
5493859Sml29623 					(sizeof (hdrs_buf) - hdrs_size))
5503859Sml29623 					mblk_len = sizeof (hdrs_buf) -
5513859Sml29623 						hdrs_size;
5523859Sml29623 				bcopy(nmp->b_rptr,
5533859Sml29623 					&hdrs_buf[hdrs_size], mblk_len);
5543859Sml29623 				hdrs_size += mblk_len;
5553859Sml29623 				nmp = nmp->b_cont;
5563859Sml29623 			}
5573859Sml29623 			ip_buf = hdrs_buf;
5583859Sml29623 			ip_buf += eth_hdr_size;
5593859Sml29623 			iph_len = ((*ip_buf) & 0x0f);
5603859Sml29623 		}
5613859Sml29623 
5623859Sml29623 		ipproto = ip_buf[9];
5633859Sml29623 
5643859Sml29623 		tmp = (uint64_t)iph_len;
5653859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT);
5663859Sml29623 		tmp = (uint64_t)(eth_hdr_size >> 1);
5673859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
5683859Sml29623 
5693859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 "
5703859Sml29623 			" iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
5713859Sml29623 			"tmp 0x%x",
5723859Sml29623 			iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
5733859Sml29623 			ipproto, tmp));
5743859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP "
5753859Sml29623 			"value 0x%llx", hdrp->value));
5763859Sml29623 
5773859Sml29623 		break;
5783859Sml29623 
5793859Sml29623 	case ETHERTYPE_IPV6:
5803859Sml29623 		hdrs_size = 0;
5813859Sml29623 		((p_ether_header_t)hdrs_buf)->ether_type = 0;
5823859Sml29623 		while ((nmp) && (hdrs_size <
5833859Sml29623 				sizeof (hdrs_buf))) {
5843859Sml29623 			mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
5853859Sml29623 			if (mblk_len >=
5863859Sml29623 				(sizeof (hdrs_buf) - hdrs_size))
5873859Sml29623 				mblk_len = sizeof (hdrs_buf) -
5883859Sml29623 					hdrs_size;
5893859Sml29623 			bcopy(nmp->b_rptr,
5903859Sml29623 				&hdrs_buf[hdrs_size], mblk_len);
5913859Sml29623 			hdrs_size += mblk_len;
5923859Sml29623 			nmp = nmp->b_cont;
5933859Sml29623 		}
5943859Sml29623 		ip_buf = hdrs_buf;
5953859Sml29623 		ip_buf += eth_hdr_size;
5963859Sml29623 
5973859Sml29623 		tmp = 1ull;
5983859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT);
5993859Sml29623 
6003859Sml29623 		tmp = (eth_hdr_size >> 1);
6013859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
6023859Sml29623 
6033859Sml29623 		/* byte 6 is the next header protocol */
6043859Sml29623 		ipproto = ip_buf[6];
6053859Sml29623 
6063859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 "
6073859Sml29623 			" iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
6083859Sml29623 			iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
6093859Sml29623 			ipproto));
6103859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 "
6113859Sml29623 			"value 0x%llx", hdrp->value));
6123859Sml29623 
6133859Sml29623 		break;
6143859Sml29623 
6153859Sml29623 	default:
6163859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP"));
6173859Sml29623 		goto fill_tx_header_done;
6183859Sml29623 	}
6193859Sml29623 
6203859Sml29623 	switch (ipproto) {
6213859Sml29623 	case IPPROTO_TCP:
6223859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL,
6236611Sml29623 		    "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
6243859Sml29623 		if (l4_cksum) {
6256611Sml29623 			hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP;
6266611Sml29623 			hdrp->value |=
6276611Sml29623 			    (((uint64_t)(start_offset >> 1)) <<
6286611Sml29623 			    TX_PKT_HEADER_L4START_SHIFT);
6296611Sml29623 			hdrp->value |=
6306611Sml29623 			    (((uint64_t)(stuff_offset >> 1)) <<
6316611Sml29623 			    TX_PKT_HEADER_L4STUFF_SHIFT);
6326611Sml29623 
6333859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
6346611Sml29623 			    "==> nxge_tx_pkt_hdr_init: TCP CKSUM "
6356611Sml29623 			    "value 0x%llx", hdrp->value));
6363859Sml29623 		}
6373859Sml29623 
6383859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP "
6396611Sml29623 		    "value 0x%llx", hdrp->value));
6403859Sml29623 		break;
6413859Sml29623 
6423859Sml29623 	case IPPROTO_UDP:
6433859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP"));
6443859Sml29623 		if (l4_cksum) {
6456611Sml29623 			if (!nxge_cksum_offload) {
6466611Sml29623 				uint16_t	*up;
6476611Sml29623 				uint16_t	cksum;
6486611Sml29623 				t_uscalar_t	stuff_len;
6496611Sml29623 
6506611Sml29623 				/*
6516611Sml29623 				 * The checksum field has the
6526611Sml29623 				 * partial checksum.
6536611Sml29623 				 * IP_CSUM() macro calls ip_cksum() which
6546611Sml29623 				 * can add in the partial checksum.
6556611Sml29623 				 */
6566611Sml29623 				cksum = IP_CSUM(mp, start_offset, 0);
6576611Sml29623 				stuff_len = stuff_offset;
6586611Sml29623 				nmp = mp;
6596611Sml29623 				mblk_len = MBLKL(nmp);
6606611Sml29623 				while ((nmp != NULL) &&
6616611Sml29623 				    (mblk_len < stuff_len)) {
6626611Sml29623 					stuff_len -= mblk_len;
6636611Sml29623 					nmp = nmp->b_cont;
6646611Sml29623 				}
6656611Sml29623 				ASSERT(nmp);
6666611Sml29623 				up = (uint16_t *)(nmp->b_rptr + stuff_len);
6676611Sml29623 
6686611Sml29623 				*up = cksum;
6696611Sml29623 				hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP;
6706611Sml29623 				NXGE_DEBUG_MSG((NULL, TX_CTL,
6716611Sml29623 				    "==> nxge_tx_pkt_hdr_init: UDP offset %d "
6726611Sml29623 				    "use sw cksum "
6736611Sml29623 				    "write to $%p cksum 0x%x content up 0x%x",
6746611Sml29623 				    stuff_len,
6756611Sml29623 				    up,
6766611Sml29623 				    cksum,
6776611Sml29623 				    *up));
6786611Sml29623 			} else {
6796611Sml29623 				/* Hardware will compute the full checksum */
6806611Sml29623 				hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP;
6816611Sml29623 				hdrp->value |=
6826611Sml29623 				    (((uint64_t)(start_offset >> 1)) <<
6836611Sml29623 				    TX_PKT_HEADER_L4START_SHIFT);
6846611Sml29623 				hdrp->value |=
6856611Sml29623 				    (((uint64_t)(stuff_offset >> 1)) <<
6866611Sml29623 				    TX_PKT_HEADER_L4STUFF_SHIFT);
6876611Sml29623 
6886611Sml29623 				NXGE_DEBUG_MSG((NULL, TX_CTL,
6896611Sml29623 				    "==> nxge_tx_pkt_hdr_init: UDP offset %d "
6906611Sml29623 				    " use partial checksum "
6916611Sml29623 				    "cksum 0x%x ",
6926611Sml29623 				    "value 0x%llx",
6936611Sml29623 				    stuff_offset,
6946611Sml29623 				    IP_CSUM(mp, start_offset, 0),
6956611Sml29623 				    hdrp->value));
6966611Sml29623 			}
6973859Sml29623 		}
6986611Sml29623 
6993859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL,
7003859Sml29623 			"==> nxge_tx_pkt_hdr_init: UDP"
7013859Sml29623 			"value 0x%llx", hdrp->value));
7023859Sml29623 		break;
7033859Sml29623 
7043859Sml29623 	default:
7053859Sml29623 		goto fill_tx_header_done;
7063859Sml29623 	}
7073859Sml29623 
7083859Sml29623 fill_tx_header_done:
7093859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL,
7103859Sml29623 		"==> nxge_fill_tx_hdr: pkt_len %d  "
7113859Sml29623 		"npads %d value 0x%llx", pkt_len, npads, hdrp->value));
7123859Sml29623 
7133859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr"));
7143859Sml29623 }
7153859Sml29623 
7163859Sml29623 /*ARGSUSED*/
7173859Sml29623 p_mblk_t
7183859Sml29623 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads)
7193859Sml29623 {
7203859Sml29623 	p_mblk_t 		newmp = NULL;
7213859Sml29623 
7223859Sml29623 	if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) {
7233859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL,
7243859Sml29623 			"<== nxge_tx_pkt_header_reserve: allocb failed"));
7253859Sml29623 		return (NULL);
7263859Sml29623 	}
7273859Sml29623 
7283859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL,
7293859Sml29623 		"==> nxge_tx_pkt_header_reserve: get new mp"));
7303859Sml29623 	DB_TYPE(newmp) = M_DATA;
7313859Sml29623 	newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp);
7323859Sml29623 	linkb(newmp, mp);
7333859Sml29623 	newmp->b_rptr -= TX_PKT_HEADER_SIZE;
7343859Sml29623 
7353859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: "
7363859Sml29623 		"b_rptr $%p b_wptr $%p",
7373859Sml29623 		newmp->b_rptr, newmp->b_wptr));
7383859Sml29623 
7393859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL,
7403859Sml29623 		"<== nxge_tx_pkt_header_reserve: use new mp"));
7413859Sml29623 
7423859Sml29623 	return (newmp);
7433859Sml29623 }
7443859Sml29623 
7453859Sml29623 int
7463859Sml29623 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p)
7473859Sml29623 {
7483859Sml29623 	uint_t 			nmblks;
7493859Sml29623 	ssize_t			len;
7503859Sml29623 	uint_t 			pkt_len;
7513859Sml29623 	p_mblk_t 		nmp, bmp, tmp;
7523859Sml29623 	uint8_t 		*b_wptr;
7533859Sml29623 
7543859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL,
7553859Sml29623 		"==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p "
7563859Sml29623 		"len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp)));
7573859Sml29623 
7583859Sml29623 	nmp = mp;
7593859Sml29623 	bmp = mp;
7603859Sml29623 	nmblks = 0;
7613859Sml29623 	pkt_len = 0;
7623859Sml29623 	*tot_xfer_len_p = 0;
7633859Sml29623 
7643859Sml29623 	while (nmp) {
7653859Sml29623 		len = MBLKL(nmp);
7663859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
7673859Sml29623 			"len %d pkt_len %d nmblks %d tot_xfer_len %d",
7683859Sml29623 			len, pkt_len, nmblks,
7693859Sml29623 			*tot_xfer_len_p));
7703859Sml29623 
7713859Sml29623 		if (len <= 0) {
7723859Sml29623 			bmp = nmp;
7733859Sml29623 			nmp = nmp->b_cont;
7743859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
7753859Sml29623 				"==> nxge_tx_pkt_nmblocks: "
7763859Sml29623 				"len (0) pkt_len %d nmblks %d",
7773859Sml29623 				pkt_len, nmblks));
7783859Sml29623 			continue;
7793859Sml29623 		}
7803859Sml29623 
7813859Sml29623 		*tot_xfer_len_p += len;
7823859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
7833859Sml29623 			"len %d pkt_len %d nmblks %d tot_xfer_len %d",
7843859Sml29623 			len, pkt_len, nmblks,
7853859Sml29623 			*tot_xfer_len_p));
7863859Sml29623 
7873859Sml29623 		if (len < nxge_bcopy_thresh) {
7883859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
7893859Sml29623 				"==> nxge_tx_pkt_nmblocks: "
7903859Sml29623 				"len %d (< thresh) pkt_len %d nmblks %d",
7913859Sml29623 				len, pkt_len, nmblks));
7923859Sml29623 			if (pkt_len == 0)
7933859Sml29623 				nmblks++;
7943859Sml29623 			pkt_len += len;
7953859Sml29623 			if (pkt_len >= nxge_bcopy_thresh) {
7963859Sml29623 				pkt_len = 0;
7973859Sml29623 				len = 0;
7983859Sml29623 				nmp = bmp;
7993859Sml29623 			}
8003859Sml29623 		} else {
8013859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
8023859Sml29623 				"==> nxge_tx_pkt_nmblocks: "
8033859Sml29623 				"len %d (> thresh) pkt_len %d nmblks %d",
8043859Sml29623 				len, pkt_len, nmblks));
8053859Sml29623 			pkt_len = 0;
8063859Sml29623 			nmblks++;
8073859Sml29623 			/*
8083859Sml29623 			 * Hardware limits the transfer length to 4K.
8093859Sml29623 			 * If len is more than 4K, we need to break
8103859Sml29623 			 * it up to at most 2 more blocks.
8113859Sml29623 			 */
8123859Sml29623 			if (len > TX_MAX_TRANSFER_LENGTH) {
8133859Sml29623 				uint32_t	nsegs;
8143859Sml29623 
8156495Sspeer 				nsegs = 1;
8163859Sml29623 				NXGE_DEBUG_MSG((NULL, TX_CTL,
8173859Sml29623 					"==> nxge_tx_pkt_nmblocks: "
8183859Sml29623 					"len %d pkt_len %d nmblks %d nsegs %d",
8193859Sml29623 					len, pkt_len, nmblks, nsegs));
8203859Sml29623 				if (len % (TX_MAX_TRANSFER_LENGTH * 2)) {
8213859Sml29623 					++nsegs;
8223859Sml29623 				}
8233859Sml29623 				do {
8243859Sml29623 					b_wptr = nmp->b_rptr +
8253859Sml29623 						TX_MAX_TRANSFER_LENGTH;
8263859Sml29623 					nmp->b_wptr = b_wptr;
8273859Sml29623 					if ((tmp = dupb(nmp)) == NULL) {
8283859Sml29623 						return (0);
8293859Sml29623 					}
8303859Sml29623 					tmp->b_rptr = b_wptr;
8313859Sml29623 					tmp->b_wptr = nmp->b_wptr;
8323859Sml29623 					tmp->b_cont = nmp->b_cont;
8333859Sml29623 					nmp->b_cont = tmp;
8343859Sml29623 					nmblks++;
8353859Sml29623 					if (--nsegs) {
8363859Sml29623 						nmp = tmp;
8373859Sml29623 					}
8383859Sml29623 				} while (nsegs);
8393859Sml29623 				nmp = tmp;
8403859Sml29623 			}
8413859Sml29623 		}
8423859Sml29623 
8433859Sml29623 		/*
8443859Sml29623 		 * Hardware limits the transmit gather pointers to 15.
8453859Sml29623 		 */
8463859Sml29623 		if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) >
8473859Sml29623 				TX_MAX_GATHER_POINTERS) {
8483859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
8493859Sml29623 				"==> nxge_tx_pkt_nmblocks: pull msg - "
8503859Sml29623 				"len %d pkt_len %d nmblks %d",
8513859Sml29623 				len, pkt_len, nmblks));
8523859Sml29623 			/* Pull all message blocks from b_cont */
8533859Sml29623 			if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) {
8543859Sml29623 				return (0);
8553859Sml29623 			}
8563859Sml29623 			freemsg(nmp->b_cont);
8573859Sml29623 			nmp->b_cont = tmp;
8583859Sml29623 			pkt_len = 0;
8593859Sml29623 		}
8603859Sml29623 		bmp = nmp;
8613859Sml29623 		nmp = nmp->b_cont;
8623859Sml29623 	}
8633859Sml29623 
8643859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL,
8653859Sml29623 		"<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
8663859Sml29623 		"nmblks %d len %d tot_xfer_len %d",
8673859Sml29623 		mp->b_rptr, mp->b_wptr, nmblks,
8683859Sml29623 		MBLKL(mp), *tot_xfer_len_p));
8693859Sml29623 
8703859Sml29623 	return (nmblks);
8713859Sml29623 }
8723859Sml29623 
8733859Sml29623 boolean_t
8743859Sml29623 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks)
8753859Sml29623 {
8763859Sml29623 	boolean_t 		status = B_TRUE;
8773859Sml29623 	p_nxge_dma_common_t	tx_desc_dma_p;
8783859Sml29623 	nxge_dma_common_t	desc_area;
8793859Sml29623 	p_tx_desc_t 		tx_desc_ring_vp;
8803859Sml29623 	p_tx_desc_t 		tx_desc_p;
8813859Sml29623 	p_tx_desc_t 		tx_desc_pp;
8823859Sml29623 	tx_desc_t 		r_tx_desc;
8833859Sml29623 	p_tx_msg_t 		tx_msg_ring;
8843859Sml29623 	p_tx_msg_t 		tx_msg_p;
8853859Sml29623 	npi_handle_t		handle;
8863859Sml29623 	tx_ring_hdl_t		tx_head;
8873859Sml29623 	uint32_t 		pkt_len;
8883859Sml29623 	uint_t			tx_rd_index;
8893859Sml29623 	uint16_t		head_index, tail_index;
8903859Sml29623 	uint8_t			tdc;
8913859Sml29623 	boolean_t		head_wrap, tail_wrap;
8923859Sml29623 	p_nxge_tx_ring_stats_t tdc_stats;
8933859Sml29623 	int			rc;
8943859Sml29623 
8953859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim"));
8963859Sml29623 
8973859Sml29623 	status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) &&
8983859Sml29623 			(nmblks != 0));
8993859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
9003859Sml29623 		"==> nxge_txdma_reclaim: pending %d  reclaim %d nmblks %d",
9013859Sml29623 			tx_ring_p->descs_pending, nxge_reclaim_pending,
9023859Sml29623 			nmblks));
9033859Sml29623 	if (!status) {
9043859Sml29623 		tx_desc_dma_p = &tx_ring_p->tdc_desc;
9053859Sml29623 		desc_area = tx_ring_p->tdc_desc;
9063859Sml29623 		handle = NXGE_DEV_NPI_HANDLE(nxgep);
9073859Sml29623 		tx_desc_ring_vp = tx_desc_dma_p->kaddrp;
9083859Sml29623 		tx_desc_ring_vp =
9093859Sml29623 			(p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
9103859Sml29623 		tx_rd_index = tx_ring_p->rd_index;
9113859Sml29623 		tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
9123859Sml29623 		tx_msg_ring = tx_ring_p->tx_msg_ring;
9133859Sml29623 		tx_msg_p = &tx_msg_ring[tx_rd_index];
9143859Sml29623 		tdc = tx_ring_p->tdc;
9153859Sml29623 		tdc_stats = tx_ring_p->tdc_stats;
9163859Sml29623 		if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) {
9173859Sml29623 			tdc_stats->tx_max_pend = tx_ring_p->descs_pending;
9183859Sml29623 		}
9193859Sml29623 
9203859Sml29623 		tail_index = tx_ring_p->wr_index;
9213859Sml29623 		tail_wrap = tx_ring_p->wr_index_wrap;
9223859Sml29623 
9233859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
9243859Sml29623 			"==> nxge_txdma_reclaim: tdc %d tx_rd_index %d "
9253859Sml29623 			"tail_index %d tail_wrap %d "
9263859Sml29623 			"tx_desc_p $%p ($%p) ",
9273859Sml29623 			tdc, tx_rd_index, tail_index, tail_wrap,
9283859Sml29623 			tx_desc_p, (*(uint64_t *)tx_desc_p)));
9293859Sml29623 		/*
9303859Sml29623 		 * Read the hardware maintained transmit head
9313859Sml29623 		 * and wrap around bit.
9323859Sml29623 		 */
9333859Sml29623 		TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value);
9343859Sml29623 		head_index =  tx_head.bits.ldw.head;
9353859Sml29623 		head_wrap = tx_head.bits.ldw.wrap;
9363859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
9373859Sml29623 			"==> nxge_txdma_reclaim: "
9383859Sml29623 			"tx_rd_index %d tail %d tail_wrap %d "
9393859Sml29623 			"head %d wrap %d",
9403859Sml29623 			tx_rd_index, tail_index, tail_wrap,
9413859Sml29623 			head_index, head_wrap));
9423859Sml29623 
9433859Sml29623 		if (head_index == tail_index) {
9443859Sml29623 			if (TXDMA_RING_EMPTY(head_index, head_wrap,
9453859Sml29623 					tail_index, tail_wrap) &&
9463859Sml29623 					(head_index == tx_rd_index)) {
9473859Sml29623 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
9483859Sml29623 					"==> nxge_txdma_reclaim: EMPTY"));
9493859Sml29623 				return (B_TRUE);
9503859Sml29623 			}
9513859Sml29623 
9523859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
9533859Sml29623 				"==> nxge_txdma_reclaim: Checking "
9543859Sml29623 					"if ring full"));
9553859Sml29623 			if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
9563859Sml29623 					tail_wrap)) {
9573859Sml29623 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
9583859Sml29623 					"==> nxge_txdma_reclaim: full"));
9593859Sml29623 				return (B_FALSE);
9603859Sml29623 			}
9613859Sml29623 		}
9623859Sml29623 
9633859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
9643859Sml29623 			"==> nxge_txdma_reclaim: tx_rd_index and head_index"));
9653859Sml29623 
9663859Sml29623 		tx_desc_pp = &r_tx_desc;
9673859Sml29623 		while ((tx_rd_index != head_index) &&
9683859Sml29623 			(tx_ring_p->descs_pending != 0)) {
9693859Sml29623 
9703859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
9713859Sml29623 				"==> nxge_txdma_reclaim: Checking if pending"));
9723859Sml29623 
9733859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
9743859Sml29623 				"==> nxge_txdma_reclaim: "
9753859Sml29623 				"descs_pending %d ",
9763859Sml29623 				tx_ring_p->descs_pending));
9773859Sml29623 
9783859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
9793859Sml29623 				"==> nxge_txdma_reclaim: "
9803859Sml29623 				"(tx_rd_index %d head_index %d "
9813859Sml29623 				"(tx_desc_p $%p)",
9823859Sml29623 				tx_rd_index, head_index,
9833859Sml29623 				tx_desc_p));
9843859Sml29623 
9853859Sml29623 			tx_desc_pp->value = tx_desc_p->value;
9863859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
9873859Sml29623 				"==> nxge_txdma_reclaim: "
9883859Sml29623 				"(tx_rd_index %d head_index %d "
9893859Sml29623 				"tx_desc_p $%p (desc value 0x%llx) ",
9903859Sml29623 				tx_rd_index, head_index,
9913859Sml29623 				tx_desc_pp, (*(uint64_t *)tx_desc_pp)));
9923859Sml29623 
9933859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
9943859Sml29623 				"==> nxge_txdma_reclaim: dump desc:"));
9953859Sml29623 
9963859Sml29623 			pkt_len = tx_desc_pp->bits.hdw.tr_len;
9973859Sml29623 			tdc_stats->obytes += pkt_len;
9983859Sml29623 			tdc_stats->opackets += tx_desc_pp->bits.hdw.sop;
9993859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
10003859Sml29623 				"==> nxge_txdma_reclaim: pkt_len %d "
10013859Sml29623 				"tdc channel %d opackets %d",
10023859Sml29623 				pkt_len,
10033859Sml29623 				tdc,
10043859Sml29623 				tdc_stats->opackets));
10053859Sml29623 
10063859Sml29623 			if (tx_msg_p->flags.dma_type == USE_DVMA) {
10073859Sml29623 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
10083859Sml29623 					"tx_desc_p = $%p "
10093859Sml29623 					"tx_desc_pp = $%p "
10103859Sml29623 					"index = %d",
10113859Sml29623 					tx_desc_p,
10123859Sml29623 					tx_desc_pp,
10133859Sml29623 					tx_ring_p->rd_index));
10143859Sml29623 				(void) dvma_unload(tx_msg_p->dvma_handle,
10153859Sml29623 					0, -1);
10163859Sml29623 				tx_msg_p->dvma_handle = NULL;
10173859Sml29623 				if (tx_ring_p->dvma_wr_index ==
10183859Sml29623 					tx_ring_p->dvma_wrap_mask) {
10193859Sml29623 					tx_ring_p->dvma_wr_index = 0;
10203859Sml29623 				} else {
10213859Sml29623 					tx_ring_p->dvma_wr_index++;
10223859Sml29623 				}
10233859Sml29623 				tx_ring_p->dvma_pending--;
10243859Sml29623 			} else if (tx_msg_p->flags.dma_type ==
10253859Sml29623 					USE_DMA) {
10263859Sml29623 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
10273859Sml29623 					"==> nxge_txdma_reclaim: "
10283859Sml29623 					"USE DMA"));
10293859Sml29623 				if (rc = ddi_dma_unbind_handle
10303859Sml29623 					(tx_msg_p->dma_handle)) {
10313859Sml29623 					cmn_err(CE_WARN, "!nxge_reclaim: "
10323859Sml29623 						"ddi_dma_unbind_handle "
10333859Sml29623 						"failed. status %d", rc);
10343859Sml29623 				}
10353859Sml29623 			}
10363859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
10373859Sml29623 				"==> nxge_txdma_reclaim: count packets"));
10383859Sml29623 			/*
10393859Sml29623 			 * count a chained packet only once.
10403859Sml29623 			 */
10413859Sml29623 			if (tx_msg_p->tx_message != NULL) {
10423859Sml29623 				freemsg(tx_msg_p->tx_message);
10433859Sml29623 				tx_msg_p->tx_message = NULL;
10443859Sml29623 			}
10453859Sml29623 
10463859Sml29623 			tx_msg_p->flags.dma_type = USE_NONE;
10473859Sml29623 			tx_rd_index = tx_ring_p->rd_index;
10483859Sml29623 			tx_rd_index = (tx_rd_index + 1) &
10493859Sml29623 					tx_ring_p->tx_wrap_mask;
10503859Sml29623 			tx_ring_p->rd_index = tx_rd_index;
10513859Sml29623 			tx_ring_p->descs_pending--;
10523859Sml29623 			tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
10533859Sml29623 			tx_msg_p = &tx_msg_ring[tx_rd_index];
10543859Sml29623 		}
10553859Sml29623 
10563859Sml29623 		status = (nmblks <= (tx_ring_p->tx_ring_size -
10573859Sml29623 				tx_ring_p->descs_pending -
10583859Sml29623 				TX_FULL_MARK));
10593859Sml29623 		if (status) {
10603859Sml29623 			cas32((uint32_t *)&tx_ring_p->queueing, 1, 0);
10613859Sml29623 		}
10623859Sml29623 	} else {
10633859Sml29623 		status = (nmblks <=
10643859Sml29623 			(tx_ring_p->tx_ring_size -
10653859Sml29623 				tx_ring_p->descs_pending -
10663859Sml29623 				TX_FULL_MARK));
10673859Sml29623 	}
10683859Sml29623 
10693859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
10703859Sml29623 		"<== nxge_txdma_reclaim status = 0x%08x", status));
10713859Sml29623 
10723859Sml29623 	return (status);
10733859Sml29623 }
10743859Sml29623 
10756495Sspeer /*
10766495Sspeer  * nxge_tx_intr
10776495Sspeer  *
10786495Sspeer  *	Process a TDC interrupt
10796495Sspeer  *
10806495Sspeer  * Arguments:
10816495Sspeer  * 	arg1	A Logical Device state Vector (LSV) data structure.
10826495Sspeer  * 	arg2	nxge_t *
10836495Sspeer  *
10846495Sspeer  * Notes:
10856495Sspeer  *
10866495Sspeer  * NPI/NXGE function calls:
10876495Sspeer  *	npi_txdma_control_status()
10886495Sspeer  *	npi_intr_ldg_mgmt_set()
10896495Sspeer  *
10906495Sspeer  *	nxge_tx_err_evnts()
10916495Sspeer  *	nxge_txdma_reclaim()
10926495Sspeer  *
10936495Sspeer  * Registers accessed:
10946495Sspeer  *	TX_CS		DMC+0x40028 Transmit Control And Status
10956495Sspeer  *	PIO_LDSV
10966495Sspeer  *
10976495Sspeer  * Context:
10986495Sspeer  *	Any domain
10996495Sspeer  */
11003859Sml29623 uint_t
11013859Sml29623 nxge_tx_intr(void *arg1, void *arg2)
11023859Sml29623 {
11033859Sml29623 	p_nxge_ldv_t		ldvp = (p_nxge_ldv_t)arg1;
11043859Sml29623 	p_nxge_t		nxgep = (p_nxge_t)arg2;
11053859Sml29623 	p_nxge_ldg_t		ldgp;
11063859Sml29623 	uint8_t			channel;
11073859Sml29623 	uint32_t		vindex;
11083859Sml29623 	npi_handle_t		handle;
11093859Sml29623 	tx_cs_t			cs;
11103859Sml29623 	p_tx_ring_t 		*tx_rings;
11113859Sml29623 	p_tx_ring_t 		tx_ring_p;
11123859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
11133859Sml29623 	uint_t 			serviced = DDI_INTR_UNCLAIMED;
11143859Sml29623 	nxge_status_t 		status = NXGE_OK;
11153859Sml29623 
11163859Sml29623 	if (ldvp == NULL) {
11173859Sml29623 		NXGE_DEBUG_MSG((NULL, INT_CTL,
11183859Sml29623 			"<== nxge_tx_intr: nxgep $%p ldvp $%p",
11193859Sml29623 			nxgep, ldvp));
11203859Sml29623 		return (DDI_INTR_UNCLAIMED);
11213859Sml29623 	}
11223859Sml29623 
11233859Sml29623 	if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
11243859Sml29623 		nxgep = ldvp->nxgep;
11253859Sml29623 	}
11263859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
11273859Sml29623 		"==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p",
11283859Sml29623 		nxgep, ldvp));
11296713Sspeer 
11306713Sspeer 	if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
11316713Sspeer 	    (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
11326713Sspeer 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
11336713Sspeer 		    "<== nxge_tx_intr: interface not started or intialized"));
11346713Sspeer 		return (DDI_INTR_CLAIMED);
11356713Sspeer 	}
11366713Sspeer 
11373859Sml29623 	/*
11383859Sml29623 	 * This interrupt handler is for a specific
11393859Sml29623 	 * transmit dma channel.
11403859Sml29623 	 */
11413859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
11423859Sml29623 	/* Get the control and status for this channel. */
11433859Sml29623 	channel = ldvp->channel;
11443859Sml29623 	ldgp = ldvp->ldgp;
11453859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
11463859Sml29623 		"==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p "
11473859Sml29623 		"channel %d",
11483859Sml29623 		nxgep, ldvp, channel));
11493859Sml29623 
11503859Sml29623 	rs = npi_txdma_control_status(handle, OP_GET, channel, &cs);
11513859Sml29623 	vindex = ldvp->vdma_index;
11523859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
11533859Sml29623 		"==> nxge_tx_intr:channel %d ring index %d status 0x%08x",
11543859Sml29623 		channel, vindex, rs));
11553859Sml29623 	if (!rs && cs.bits.ldw.mk) {
11563859Sml29623 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
11573859Sml29623 			"==> nxge_tx_intr:channel %d ring index %d "
11583859Sml29623 			"status 0x%08x (mk bit set)",
11593859Sml29623 			channel, vindex, rs));
11603859Sml29623 		tx_rings = nxgep->tx_rings->rings;
11613859Sml29623 		tx_ring_p = tx_rings[vindex];
11623859Sml29623 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
11633859Sml29623 			"==> nxge_tx_intr:channel %d ring index %d "
11643859Sml29623 			"status 0x%08x (mk bit set, calling reclaim)",
11653859Sml29623 			channel, vindex, rs));
11663859Sml29623 
11673859Sml29623 		MUTEX_ENTER(&tx_ring_p->lock);
11683859Sml29623 		(void) nxge_txdma_reclaim(nxgep, tx_rings[vindex], 0);
11693859Sml29623 		MUTEX_EXIT(&tx_ring_p->lock);
11703859Sml29623 		mac_tx_update(nxgep->mach);
11713859Sml29623 	}
11723859Sml29623 
11733859Sml29623 	/*
11743859Sml29623 	 * Process other transmit control and status.
11753859Sml29623 	 * Check the ldv state.
11763859Sml29623 	 */
11773859Sml29623 	status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs);
11783859Sml29623 	/*
11793859Sml29623 	 * Rearm this logical group if this is a single device
11803859Sml29623 	 * group.
11813859Sml29623 	 */
11823859Sml29623 	if (ldgp->nldvs == 1) {
11833859Sml29623 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
11843859Sml29623 			"==> nxge_tx_intr: rearm"));
11853859Sml29623 		if (status == NXGE_OK) {
11866495Sspeer 			if (isLDOMguest(nxgep)) {
11876495Sspeer 				nxge_hio_ldgimgn(nxgep, ldgp);
11886495Sspeer 			} else {
11896495Sspeer 				(void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
11906495Sspeer 				    B_TRUE, ldgp->ldg_timer);
11916495Sspeer 			}
11923859Sml29623 		}
11933859Sml29623 	}
11943859Sml29623 
11953859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr"));
11963859Sml29623 	serviced = DDI_INTR_CLAIMED;
11973859Sml29623 	return (serviced);
11983859Sml29623 }
11993859Sml29623 
12003859Sml29623 void
12016495Sspeer nxge_txdma_stop(p_nxge_t nxgep)	/* Dead */
12023859Sml29623 {
12033859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop"));
12043859Sml29623 
12053859Sml29623 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
12063859Sml29623 
12073859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop"));
12083859Sml29623 }
12093859Sml29623 
12103859Sml29623 void
12116495Sspeer nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */
12123859Sml29623 {
12133859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start"));
12143859Sml29623 
12153859Sml29623 	(void) nxge_txdma_stop(nxgep);
12163859Sml29623 
12173859Sml29623 	(void) nxge_fixup_txdma_rings(nxgep);
12183859Sml29623 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
12193859Sml29623 	(void) nxge_tx_mac_enable(nxgep);
12203859Sml29623 	(void) nxge_txdma_hw_kick(nxgep);
12213859Sml29623 
12223859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start"));
12233859Sml29623 }
12243859Sml29623 
12256495Sspeer npi_status_t
12266495Sspeer nxge_txdma_channel_disable(
12276495Sspeer 	nxge_t *nxge,
12286495Sspeer 	int channel)
12296495Sspeer {
12306495Sspeer 	npi_handle_t	handle = NXGE_DEV_NPI_HANDLE(nxge);
12316495Sspeer 	npi_status_t	rs;
12326495Sspeer 	tdmc_intr_dbg_t	intr_dbg;
12336495Sspeer 
12346495Sspeer 	/*
12356495Sspeer 	 * Stop the dma channel and wait for the stop-done.
12366495Sspeer 	 * If the stop-done bit is not present, then force
12376495Sspeer 	 * an error so TXC will stop.
12386495Sspeer 	 * All channels bound to this port need to be stopped
12396495Sspeer 	 * and reset after injecting an interrupt error.
12406495Sspeer 	 */
12416495Sspeer 	rs = npi_txdma_channel_disable(handle, channel);
12426495Sspeer 	NXGE_DEBUG_MSG((nxge, MEM3_CTL,
12436495Sspeer 		"==> nxge_txdma_channel_disable(%d) "
12446495Sspeer 		"rs 0x%x", channel, rs));
12456495Sspeer 	if (rs != NPI_SUCCESS) {
12466495Sspeer 		/* Inject any error */
12476495Sspeer 		intr_dbg.value = 0;
12486495Sspeer 		intr_dbg.bits.ldw.nack_pref = 1;
12496495Sspeer 		NXGE_DEBUG_MSG((nxge, MEM3_CTL,
12506495Sspeer 			"==> nxge_txdma_hw_mode: "
12516495Sspeer 			"channel %d (stop failed 0x%x) "
12526495Sspeer 			"(inject err)", rs, channel));
12536495Sspeer 		(void) npi_txdma_inj_int_error_set(
12546495Sspeer 			handle, channel, &intr_dbg);
12556495Sspeer 		rs = npi_txdma_channel_disable(handle, channel);
12566495Sspeer 		NXGE_DEBUG_MSG((nxge, MEM3_CTL,
12576495Sspeer 			"==> nxge_txdma_hw_mode: "
12586495Sspeer 			"channel %d (stop again 0x%x) "
12596495Sspeer 			"(after inject err)",
12606495Sspeer 			rs, channel));
12616495Sspeer 	}
12626495Sspeer 
12636495Sspeer 	return (rs);
12646495Sspeer }
12656495Sspeer 
12666495Sspeer /*
12676495Sspeer  * nxge_txdma_hw_mode
12686495Sspeer  *
12696495Sspeer  *	Toggle all TDCs on (enable) or off (disable).
12706495Sspeer  *
12716495Sspeer  * Arguments:
12726495Sspeer  * 	nxgep
12736495Sspeer  * 	enable	Enable or disable a TDC.
12746495Sspeer  *
12756495Sspeer  * Notes:
12766495Sspeer  *
12776495Sspeer  * NPI/NXGE function calls:
12786495Sspeer  *	npi_txdma_channel_enable(TX_CS)
12796495Sspeer  *	npi_txdma_channel_disable(TX_CS)
12806495Sspeer  *	npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
12816495Sspeer  *
12826495Sspeer  * Registers accessed:
12836495Sspeer  *	TX_CS		DMC+0x40028 Transmit Control And Status
12846495Sspeer  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
12856495Sspeer  *
12866495Sspeer  * Context:
12876495Sspeer  *	Any domain
12886495Sspeer  */
12893859Sml29623 nxge_status_t
12903859Sml29623 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
12913859Sml29623 {
12926495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
12936495Sspeer 
12946495Sspeer 	npi_handle_t	handle;
12956495Sspeer 	nxge_status_t	status;
12966495Sspeer 	npi_status_t	rs;
12976495Sspeer 	int		tdc;
12983859Sml29623 
12993859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
13003859Sml29623 		"==> nxge_txdma_hw_mode: enable mode %d", enable));
13013859Sml29623 
13023859Sml29623 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
13033859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
13043859Sml29623 			"<== nxge_txdma_mode: not initialized"));
13053859Sml29623 		return (NXGE_ERROR);
13063859Sml29623 	}
13073859Sml29623 
13086495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
13093859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
13106495Sspeer 		    "<== nxge_txdma_hw_mode: NULL ring pointer(s)"));
13113859Sml29623 		return (NXGE_ERROR);
13123859Sml29623 	}
13133859Sml29623 
13146495Sspeer 	/* Enable or disable all of the TDCs owned by us. */
13153859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
13166495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
13176495Sspeer 		if ((1 << tdc) & set->owned.map) {
13186495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
13196495Sspeer 			if (ring) {
13206495Sspeer 				NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
13216495Sspeer 				    "==> nxge_txdma_hw_mode: channel %d", tdc));
13226495Sspeer 				if (enable) {
13236495Sspeer 					rs = npi_txdma_channel_enable
13246495Sspeer 					    (handle, tdc);
13253859Sml29623 					NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
13266495Sspeer 					    "==> nxge_txdma_hw_mode: "
13276495Sspeer 					    "channel %d (enable) rs 0x%x",
13286495Sspeer 					    tdc, rs));
13296495Sspeer 				} else {
13306495Sspeer 					rs = nxge_txdma_channel_disable
13316495Sspeer 					    (nxgep, tdc);
13323859Sml29623 				}
13333859Sml29623 			}
13343859Sml29623 		}
13353859Sml29623 	}
13363859Sml29623 
13373859Sml29623 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
13383859Sml29623 
13393859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
13403859Sml29623 		"<== nxge_txdma_hw_mode: status 0x%x", status));
13413859Sml29623 
13423859Sml29623 	return (status);
13433859Sml29623 }
13443859Sml29623 
13453859Sml29623 void
13463859Sml29623 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
13473859Sml29623 {
13483859Sml29623 	npi_handle_t		handle;
13493859Sml29623 
13503859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
13513859Sml29623 		"==> nxge_txdma_enable_channel: channel %d", channel));
13523859Sml29623 
13533859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
13543859Sml29623 	/* enable the transmit dma channels */
13553859Sml29623 	(void) npi_txdma_channel_enable(handle, channel);
13563859Sml29623 
13573859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel"));
13583859Sml29623 }
13593859Sml29623 
13603859Sml29623 void
13613859Sml29623 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
13623859Sml29623 {
13633859Sml29623 	npi_handle_t		handle;
13643859Sml29623 
13653859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
13663859Sml29623 		"==> nxge_txdma_disable_channel: channel %d", channel));
13673859Sml29623 
13683859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
13693859Sml29623 	/* stop the transmit dma channels */
13703859Sml29623 	(void) npi_txdma_channel_disable(handle, channel);
13713859Sml29623 
13723859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel"));
13733859Sml29623 }
13743859Sml29623 
13756495Sspeer /*
13766495Sspeer  * nxge_txdma_stop_inj_err
13776495Sspeer  *
13786495Sspeer  *	Stop a TDC.  If at first we don't succeed, inject an error.
13796495Sspeer  *
13806495Sspeer  * Arguments:
13816495Sspeer  * 	nxgep
13826495Sspeer  * 	channel		The channel to stop.
13836495Sspeer  *
13846495Sspeer  * Notes:
13856495Sspeer  *
13866495Sspeer  * NPI/NXGE function calls:
13876495Sspeer  *	npi_txdma_channel_disable()
13886495Sspeer  *	npi_txdma_inj_int_error_set()
13896495Sspeer  * #if defined(NXGE_DEBUG)
13906495Sspeer  *	nxge_txdma_regs_dump_channels(nxgep);
13916495Sspeer  * #endif
13926495Sspeer  *
13936495Sspeer  * Registers accessed:
13946495Sspeer  *	TX_CS		DMC+0x40028 Transmit Control And Status
13956495Sspeer  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
13966495Sspeer  *
13976495Sspeer  * Context:
13986495Sspeer  *	Any domain
13996495Sspeer  */
14003859Sml29623 int
14013859Sml29623 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel)
14023859Sml29623 {
14033859Sml29623 	npi_handle_t		handle;
14043859Sml29623 	tdmc_intr_dbg_t		intr_dbg;
14053859Sml29623 	int			status;
14063859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
14073859Sml29623 
14083859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err"));
14093859Sml29623 	/*
14103859Sml29623 	 * Stop the dma channel waits for the stop done.
14113859Sml29623 	 * If the stop done bit is not set, then create
14123859Sml29623 	 * an error.
14133859Sml29623 	 */
14143859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
14153859Sml29623 	rs = npi_txdma_channel_disable(handle, channel);
14163859Sml29623 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
14173859Sml29623 	if (status == NXGE_OK) {
14183859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
14193859Sml29623 			"<== nxge_txdma_stop_inj_err (channel %d): "
14203859Sml29623 			"stopped OK", channel));
14213859Sml29623 		return (status);
14223859Sml29623 	}
14233859Sml29623 
14243859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
14253859Sml29623 		"==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) "
14263859Sml29623 		"injecting error", channel, rs));
14273859Sml29623 	/* Inject any error */
14283859Sml29623 	intr_dbg.value = 0;
14293859Sml29623 	intr_dbg.bits.ldw.nack_pref = 1;
14303859Sml29623 	(void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
14313859Sml29623 
14323859Sml29623 	/* Stop done bit will be set as a result of error injection */
14333859Sml29623 	rs = npi_txdma_channel_disable(handle, channel);
14343859Sml29623 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
14353859Sml29623 	if (!(rs & NPI_TXDMA_STOP_FAILED)) {
14363859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
14373859Sml29623 			"<== nxge_txdma_stop_inj_err (channel %d): "
14383859Sml29623 			"stopped OK ", channel));
14393859Sml29623 		return (status);
14403859Sml29623 	}
14413859Sml29623 
14423859Sml29623 #if	defined(NXGE_DEBUG)
14433859Sml29623 	nxge_txdma_regs_dump_channels(nxgep);
14443859Sml29623 #endif
14453859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
14463859Sml29623 		"==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
14473859Sml29623 		" (injected error but still not stopped)", channel, rs));
14483859Sml29623 
14493859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err"));
14503859Sml29623 	return (status);
14513859Sml29623 }
14523859Sml29623 
14533859Sml29623 /*ARGSUSED*/
14543859Sml29623 void
14553859Sml29623 nxge_fixup_txdma_rings(p_nxge_t nxgep)
14563859Sml29623 {
14576495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
14586495Sspeer 	int tdc;
14593859Sml29623 
14603859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings"));
14613859Sml29623 
14626495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
14636495Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
14646495Sspeer 		    "<== nxge_fixup_txdma_rings: NULL ring pointer(s)"));
14653859Sml29623 		return;
14663859Sml29623 	}
14673859Sml29623 
14686495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
14696495Sspeer 		if ((1 << tdc) & set->owned.map) {
14706495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
14716495Sspeer 			if (ring) {
14726495Sspeer 				NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
14736495Sspeer 				    "==> nxge_fixup_txdma_rings: channel %d",
14746495Sspeer 				    tdc));
14756495Sspeer 				nxge_txdma_fixup_channel(nxgep, ring, tdc);
14766495Sspeer 			}
14776495Sspeer 		}
14783859Sml29623 	}
14793859Sml29623 
14803859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings"));
14813859Sml29623 }
14823859Sml29623 
14833859Sml29623 /*ARGSUSED*/
14843859Sml29623 void
14853859Sml29623 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
14863859Sml29623 {
14873859Sml29623 	p_tx_ring_t	ring_p;
14883859Sml29623 
14893859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel"));
14903859Sml29623 	ring_p = nxge_txdma_get_ring(nxgep, channel);
14913859Sml29623 	if (ring_p == NULL) {
14923859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
14933859Sml29623 		return;
14943859Sml29623 	}
14953859Sml29623 
14963859Sml29623 	if (ring_p->tdc != channel) {
14973859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
14983859Sml29623 			"<== nxge_txdma_fix_channel: channel not matched "
14993859Sml29623 			"ring tdc %d passed channel",
15003859Sml29623 			ring_p->tdc, channel));
15013859Sml29623 		return;
15023859Sml29623 	}
15033859Sml29623 
15043859Sml29623 	nxge_txdma_fixup_channel(nxgep, ring_p, channel);
15053859Sml29623 
15063859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
15073859Sml29623 }
15083859Sml29623 
15093859Sml29623 /*ARGSUSED*/
15103859Sml29623 void
15113859Sml29623 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
15123859Sml29623 {
15133859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel"));
15143859Sml29623 
15153859Sml29623 	if (ring_p == NULL) {
15163859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
15173859Sml29623 			"<== nxge_txdma_fixup_channel: NULL ring pointer"));
15183859Sml29623 		return;
15193859Sml29623 	}
15203859Sml29623 
15213859Sml29623 	if (ring_p->tdc != channel) {
15223859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
15233859Sml29623 			"<== nxge_txdma_fixup_channel: channel not matched "
15243859Sml29623 			"ring tdc %d passed channel",
15253859Sml29623 			ring_p->tdc, channel));
15263859Sml29623 		return;
15273859Sml29623 	}
15283859Sml29623 
15293859Sml29623 	MUTEX_ENTER(&ring_p->lock);
15303859Sml29623 	(void) nxge_txdma_reclaim(nxgep, ring_p, 0);
15313859Sml29623 	ring_p->rd_index = 0;
15323859Sml29623 	ring_p->wr_index = 0;
15333859Sml29623 	ring_p->ring_head.value = 0;
15343859Sml29623 	ring_p->ring_kick_tail.value = 0;
15353859Sml29623 	ring_p->descs_pending = 0;
15363859Sml29623 	MUTEX_EXIT(&ring_p->lock);
15373859Sml29623 
15383859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel"));
15393859Sml29623 }
15403859Sml29623 
15413859Sml29623 /*ARGSUSED*/
15423859Sml29623 void
15433859Sml29623 nxge_txdma_hw_kick(p_nxge_t nxgep)
15443859Sml29623 {
15456495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
15466495Sspeer 	int tdc;
15473859Sml29623 
15483859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick"));
15493859Sml29623 
15506495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
15513859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
15526495Sspeer 		    "<== nxge_txdma_hw_kick: NULL ring pointer(s)"));
15533859Sml29623 		return;
15543859Sml29623 	}
15553859Sml29623 
15566495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
15576495Sspeer 		if ((1 << tdc) & set->owned.map) {
15586495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
15596495Sspeer 			if (ring) {
15606495Sspeer 				NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
15616495Sspeer 				    "==> nxge_txdma_hw_kick: channel %d", tdc));
15626495Sspeer 				nxge_txdma_hw_kick_channel(nxgep, ring, tdc);
15636495Sspeer 			}
15646495Sspeer 		}
15653859Sml29623 	}
15663859Sml29623 
15673859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick"));
15683859Sml29623 }
15693859Sml29623 
15703859Sml29623 /*ARGSUSED*/
15713859Sml29623 void
15723859Sml29623 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel)
15733859Sml29623 {
15743859Sml29623 	p_tx_ring_t	ring_p;
15753859Sml29623 
15763859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel"));
15773859Sml29623 
15783859Sml29623 	ring_p = nxge_txdma_get_ring(nxgep, channel);
15793859Sml29623 	if (ring_p == NULL) {
15803859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
15813859Sml29623 			    " nxge_txdma_kick_channel"));
15823859Sml29623 		return;
15833859Sml29623 	}
15843859Sml29623 
15853859Sml29623 	if (ring_p->tdc != channel) {
15863859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
15873859Sml29623 			"<== nxge_txdma_kick_channel: channel not matched "
15883859Sml29623 			"ring tdc %d passed channel",
15893859Sml29623 			ring_p->tdc, channel));
15903859Sml29623 		return;
15913859Sml29623 	}
15923859Sml29623 
15933859Sml29623 	nxge_txdma_hw_kick_channel(nxgep, ring_p, channel);
15943859Sml29623 
15953859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel"));
15963859Sml29623 }
15973859Sml29623 
15983859Sml29623 /*ARGSUSED*/
15993859Sml29623 void
16003859Sml29623 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
16013859Sml29623 {
16023859Sml29623 
16033859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel"));
16043859Sml29623 
16053859Sml29623 	if (ring_p == NULL) {
16063859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
16073859Sml29623 			"<== nxge_txdma_hw_kick_channel: NULL ring pointer"));
16083859Sml29623 		return;
16093859Sml29623 	}
16103859Sml29623 
16113859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel"));
16123859Sml29623 }
16133859Sml29623 
16146495Sspeer /*
16156495Sspeer  * nxge_check_tx_hang
16166495Sspeer  *
16176495Sspeer  *	Check the state of all TDCs belonging to nxgep.
16186495Sspeer  *
16196495Sspeer  * Arguments:
16206495Sspeer  * 	nxgep
16216495Sspeer  *
16226495Sspeer  * Notes:
16236495Sspeer  *	Called by nxge_hw.c:nxge_check_hw_state().
16246495Sspeer  *
16256495Sspeer  * NPI/NXGE function calls:
16266495Sspeer  *
16276495Sspeer  * Registers accessed:
16286495Sspeer  *
16296495Sspeer  * Context:
16306495Sspeer  *	Any domain
16316495Sspeer  */
16323859Sml29623 /*ARGSUSED*/
16333859Sml29623 void
16343859Sml29623 nxge_check_tx_hang(p_nxge_t nxgep)
16353859Sml29623 {
16363859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang"));
16373859Sml29623 
16386713Sspeer 	if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
16396713Sspeer 	    (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
16406713Sspeer 		goto nxge_check_tx_hang_exit;
16416713Sspeer 	}
16426713Sspeer 
16433859Sml29623 	/*
16443859Sml29623 	 * Needs inputs from hardware for regs:
16453859Sml29623 	 *	head index had not moved since last timeout.
16463859Sml29623 	 *	packets not transmitted or stuffed registers.
16473859Sml29623 	 */
16483859Sml29623 	if (nxge_txdma_hung(nxgep)) {
16493859Sml29623 		nxge_fixup_hung_txdma_rings(nxgep);
16503859Sml29623 	}
16516713Sspeer 
16526713Sspeer nxge_check_tx_hang_exit:
16533859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang"));
16543859Sml29623 }
16553859Sml29623 
16566495Sspeer /*
16576495Sspeer  * nxge_txdma_hung
16586495Sspeer  *
16596495Sspeer  *	Reset a TDC.
16606495Sspeer  *
16616495Sspeer  * Arguments:
16626495Sspeer  * 	nxgep
16636495Sspeer  * 	channel		The channel to reset.
16646495Sspeer  * 	reg_data	The current TX_CS.
16656495Sspeer  *
16666495Sspeer  * Notes:
16676495Sspeer  *	Called by nxge_check_tx_hang()
16686495Sspeer  *
16696495Sspeer  * NPI/NXGE function calls:
16706495Sspeer  *	nxge_txdma_channel_hung()
16716495Sspeer  *
16726495Sspeer  * Registers accessed:
16736495Sspeer  *
16746495Sspeer  * Context:
16756495Sspeer  *	Any domain
16766495Sspeer  */
16773859Sml29623 int
16783859Sml29623 nxge_txdma_hung(p_nxge_t nxgep)
16793859Sml29623 {
16806495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
16816495Sspeer 	int tdc;
16823859Sml29623 
16833859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung"));
16846495Sspeer 
16856495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
16863859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
16876495Sspeer 		    "<== nxge_txdma_hung: NULL ring pointer(s)"));
16883859Sml29623 		return (B_FALSE);
16893859Sml29623 	}
16903859Sml29623 
16916495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
16926495Sspeer 		if ((1 << tdc) & set->owned.map) {
16936495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
16946495Sspeer 			if (ring) {
16956495Sspeer 				if (nxge_txdma_channel_hung(nxgep, ring, tdc)) {
16966495Sspeer 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
16976495Sspeer 					    "==> nxge_txdma_hung: TDC %d hung",
16986495Sspeer 					    tdc));
16996495Sspeer 					return (B_TRUE);
17006495Sspeer 				}
17016495Sspeer 			}
17023859Sml29623 		}
17033859Sml29623 	}
17043859Sml29623 
17053859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung"));
17063859Sml29623 
17073859Sml29623 	return (B_FALSE);
17083859Sml29623 }
17093859Sml29623 
17106495Sspeer /*
17116495Sspeer  * nxge_txdma_channel_hung
17126495Sspeer  *
17136495Sspeer  *	Reset a TDC.
17146495Sspeer  *
17156495Sspeer  * Arguments:
17166495Sspeer  * 	nxgep
17176495Sspeer  * 	ring		<channel>'s ring.
17186495Sspeer  * 	channel		The channel to reset.
17196495Sspeer  *
17206495Sspeer  * Notes:
17216495Sspeer  *	Called by nxge_txdma.c:nxge_txdma_hung()
17226495Sspeer  *
17236495Sspeer  * NPI/NXGE function calls:
17246495Sspeer  *	npi_txdma_ring_head_get()
17256495Sspeer  *
17266495Sspeer  * Registers accessed:
17276495Sspeer  *	TX_RING_HDL	DMC+0x40010 Transmit Ring Head Low
17286495Sspeer  *
17296495Sspeer  * Context:
17306495Sspeer  *	Any domain
17316495Sspeer  */
17323859Sml29623 int
17333859Sml29623 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel)
17343859Sml29623 {
17353859Sml29623 	uint16_t		head_index, tail_index;
17363859Sml29623 	boolean_t		head_wrap, tail_wrap;
17373859Sml29623 	npi_handle_t		handle;
17383859Sml29623 	tx_ring_hdl_t		tx_head;
17393859Sml29623 	uint_t			tx_rd_index;
17403859Sml29623 
17413859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung"));
17423859Sml29623 
17433859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
17443859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
17453859Sml29623 		"==> nxge_txdma_channel_hung: channel %d", channel));
17463859Sml29623 	MUTEX_ENTER(&tx_ring_p->lock);
17473859Sml29623 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
17483859Sml29623 
17493859Sml29623 	tail_index = tx_ring_p->wr_index;
17503859Sml29623 	tail_wrap = tx_ring_p->wr_index_wrap;
17513859Sml29623 	tx_rd_index = tx_ring_p->rd_index;
17523859Sml29623 	MUTEX_EXIT(&tx_ring_p->lock);
17533859Sml29623 
17543859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
17553859Sml29623 		"==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d "
17563859Sml29623 		"tail_index %d tail_wrap %d ",
17573859Sml29623 		channel, tx_rd_index, tail_index, tail_wrap));
17583859Sml29623 	/*
17593859Sml29623 	 * Read the hardware maintained transmit head
17603859Sml29623 	 * and wrap around bit.
17613859Sml29623 	 */
17623859Sml29623 	(void) npi_txdma_ring_head_get(handle, channel, &tx_head);
17633859Sml29623 	head_index =  tx_head.bits.ldw.head;
17643859Sml29623 	head_wrap = tx_head.bits.ldw.wrap;
17653859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
17663859Sml29623 		"==> nxge_txdma_channel_hung: "
17673859Sml29623 		"tx_rd_index %d tail %d tail_wrap %d "
17683859Sml29623 		"head %d wrap %d",
17693859Sml29623 		tx_rd_index, tail_index, tail_wrap,
17703859Sml29623 		head_index, head_wrap));
17713859Sml29623 
17723859Sml29623 	if (TXDMA_RING_EMPTY(head_index, head_wrap,
17733859Sml29623 			tail_index, tail_wrap) &&
17743859Sml29623 			(head_index == tx_rd_index)) {
17753859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
17763859Sml29623 			"==> nxge_txdma_channel_hung: EMPTY"));
17773859Sml29623 		return (B_FALSE);
17783859Sml29623 	}
17793859Sml29623 
17803859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
17813859Sml29623 		"==> nxge_txdma_channel_hung: Checking if ring full"));
17823859Sml29623 	if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
17833859Sml29623 			tail_wrap)) {
17843859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
17853859Sml29623 			"==> nxge_txdma_channel_hung: full"));
17863859Sml29623 		return (B_TRUE);
17873859Sml29623 	}
17883859Sml29623 
17893859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung"));
17903859Sml29623 
17913859Sml29623 	return (B_FALSE);
17923859Sml29623 }
17933859Sml29623 
17946495Sspeer /*
17956495Sspeer  * nxge_fixup_hung_txdma_rings
17966495Sspeer  *
17976495Sspeer  *	Disable a TDC.
17986495Sspeer  *
17996495Sspeer  * Arguments:
18006495Sspeer  * 	nxgep
18016495Sspeer  * 	channel		The channel to reset.
18026495Sspeer  * 	reg_data	The current TX_CS.
18036495Sspeer  *
18046495Sspeer  * Notes:
18056495Sspeer  *	Called by nxge_check_tx_hang()
18066495Sspeer  *
18076495Sspeer  * NPI/NXGE function calls:
18086495Sspeer  *	npi_txdma_ring_head_get()
18096495Sspeer  *
18106495Sspeer  * Registers accessed:
18116495Sspeer  *	TX_RING_HDL	DMC+0x40010 Transmit Ring Head Low
18126495Sspeer  *
18136495Sspeer  * Context:
18146495Sspeer  *	Any domain
18156495Sspeer  */
18163859Sml29623 /*ARGSUSED*/
18173859Sml29623 void
18183859Sml29623 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep)
18193859Sml29623 {
18206495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
18216495Sspeer 	int tdc;
18223859Sml29623 
18233859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings"));
18246495Sspeer 
18256495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
18263859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
18276495Sspeer 		    "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
18283859Sml29623 		return;
18293859Sml29623 	}
18303859Sml29623 
18316495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
18326495Sspeer 		if ((1 << tdc) & set->owned.map) {
18336495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
18346495Sspeer 			if (ring) {
18356495Sspeer 				nxge_txdma_fixup_hung_channel(nxgep, ring, tdc);
18366495Sspeer 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
18376495Sspeer 				    "==> nxge_fixup_hung_txdma_rings: TDC %d",
18386495Sspeer 				    tdc));
18396495Sspeer 			}
18406495Sspeer 		}
18413859Sml29623 	}
18423859Sml29623 
18433859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings"));
18443859Sml29623 }
18453859Sml29623 
18466495Sspeer /*
18476495Sspeer  * nxge_txdma_fixup_hung_channel
18486495Sspeer  *
18496495Sspeer  *	'Fix' a hung TDC.
18506495Sspeer  *
18516495Sspeer  * Arguments:
18526495Sspeer  * 	nxgep
18536495Sspeer  * 	channel		The channel to fix.
18546495Sspeer  *
18556495Sspeer  * Notes:
18566495Sspeer  *	Called by nxge_fixup_hung_txdma_rings()
18576495Sspeer  *
18586495Sspeer  *	1. Reclaim the TDC.
18596495Sspeer  *	2. Disable the TDC.
18606495Sspeer  *
18616495Sspeer  * NPI/NXGE function calls:
18626495Sspeer  *	nxge_txdma_reclaim()
18636495Sspeer  *	npi_txdma_channel_disable(TX_CS)
18646495Sspeer  *	npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
18656495Sspeer  *
18666495Sspeer  * Registers accessed:
18676495Sspeer  *	TX_CS		DMC+0x40028 Transmit Control And Status
18686495Sspeer  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
18696495Sspeer  *
18706495Sspeer  * Context:
18716495Sspeer  *	Any domain
18726495Sspeer  */
18733859Sml29623 /*ARGSUSED*/
18743859Sml29623 void
18753859Sml29623 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel)
18763859Sml29623 {
18773859Sml29623 	p_tx_ring_t	ring_p;
18783859Sml29623 
18793859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel"));
18803859Sml29623 	ring_p = nxge_txdma_get_ring(nxgep, channel);
18813859Sml29623 	if (ring_p == NULL) {
18823859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
18833859Sml29623 			"<== nxge_txdma_fix_hung_channel"));
18843859Sml29623 		return;
18853859Sml29623 	}
18863859Sml29623 
18873859Sml29623 	if (ring_p->tdc != channel) {
18883859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
18893859Sml29623 			"<== nxge_txdma_fix_hung_channel: channel not matched "
18903859Sml29623 			"ring tdc %d passed channel",
18913859Sml29623 			ring_p->tdc, channel));
18923859Sml29623 		return;
18933859Sml29623 	}
18943859Sml29623 
18953859Sml29623 	nxge_txdma_fixup_channel(nxgep, ring_p, channel);
18963859Sml29623 
18973859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel"));
18983859Sml29623 }
18993859Sml29623 
19003859Sml29623 /*ARGSUSED*/
19013859Sml29623 void
19023859Sml29623 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p,
19033859Sml29623 	uint16_t channel)
19043859Sml29623 {
19053859Sml29623 	npi_handle_t		handle;
19063859Sml29623 	tdmc_intr_dbg_t		intr_dbg;
19073859Sml29623 	int			status = NXGE_OK;
19083859Sml29623 
19093859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel"));
19103859Sml29623 
19113859Sml29623 	if (ring_p == NULL) {
19123859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
19133859Sml29623 			"<== nxge_txdma_fixup_channel: NULL ring pointer"));
19143859Sml29623 		return;
19153859Sml29623 	}
19163859Sml29623 
19173859Sml29623 	if (ring_p->tdc != channel) {
19183859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
19193859Sml29623 			"<== nxge_txdma_fixup_hung_channel: channel "
19203859Sml29623 			"not matched "
19213859Sml29623 			"ring tdc %d passed channel",
19223859Sml29623 			ring_p->tdc, channel));
19233859Sml29623 		return;
19243859Sml29623 	}
19253859Sml29623 
19263859Sml29623 	/* Reclaim descriptors */
19273859Sml29623 	MUTEX_ENTER(&ring_p->lock);
19283859Sml29623 	(void) nxge_txdma_reclaim(nxgep, ring_p, 0);
19293859Sml29623 	MUTEX_EXIT(&ring_p->lock);
19303859Sml29623 
19313859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
19323859Sml29623 	/*
19333859Sml29623 	 * Stop the dma channel waits for the stop done.
19343859Sml29623 	 * If the stop done bit is not set, then force
19353859Sml29623 	 * an error.
19363859Sml29623 	 */
19373859Sml29623 	status = npi_txdma_channel_disable(handle, channel);
19383859Sml29623 	if (!(status & NPI_TXDMA_STOP_FAILED)) {
19393859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
19403859Sml29623 			"<== nxge_txdma_fixup_hung_channel: stopped OK "
19413859Sml29623 			"ring tdc %d passed channel %d",
19423859Sml29623 			ring_p->tdc, channel));
19433859Sml29623 		return;
19443859Sml29623 	}
19453859Sml29623 
19463859Sml29623 	/* Inject any error */
19473859Sml29623 	intr_dbg.value = 0;
19483859Sml29623 	intr_dbg.bits.ldw.nack_pref = 1;
19493859Sml29623 	(void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
19503859Sml29623 
19513859Sml29623 	/* Stop done bit will be set as a result of error injection */
19523859Sml29623 	status = npi_txdma_channel_disable(handle, channel);
19533859Sml29623 	if (!(status & NPI_TXDMA_STOP_FAILED)) {
19543859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
19553859Sml29623 			"<== nxge_txdma_fixup_hung_channel: stopped again"
19563859Sml29623 			"ring tdc %d passed channel",
19573859Sml29623 			ring_p->tdc, channel));
19583859Sml29623 		return;
19593859Sml29623 	}
19603859Sml29623 
19613859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
19623859Sml29623 		"<== nxge_txdma_fixup_hung_channel: stop done still not set!! "
19633859Sml29623 		"ring tdc %d passed channel",
19643859Sml29623 		ring_p->tdc, channel));
19653859Sml29623 
19663859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel"));
19673859Sml29623 }
19683859Sml29623 
19693859Sml29623 /*ARGSUSED*/
19703859Sml29623 void
19713859Sml29623 nxge_reclaim_rings(p_nxge_t nxgep)
19723859Sml29623 {
19736495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
19746495Sspeer 	int tdc;
19756495Sspeer 
19766495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings"));
19776495Sspeer 
19786495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
19793859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
19806495Sspeer 		    "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
19813859Sml29623 		return;
19823859Sml29623 	}
19833859Sml29623 
19846495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
19856495Sspeer 		if ((1 << tdc) & set->owned.map) {
19866495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
19876495Sspeer 			if (ring) {
19886495Sspeer 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
19896495Sspeer 				    "==> nxge_reclaim_rings: TDC %d", tdc));
19906495Sspeer 				MUTEX_ENTER(&ring->lock);
19916495Sspeer 				(void) nxge_txdma_reclaim(nxgep, ring, tdc);
19926495Sspeer 				MUTEX_EXIT(&ring->lock);
19936495Sspeer 			}
19946495Sspeer 		}
19953859Sml29623 	}
19963859Sml29623 
19973859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings"));
19983859Sml29623 }
19993859Sml29623 
20003859Sml29623 void
20013859Sml29623 nxge_txdma_regs_dump_channels(p_nxge_t nxgep)
20023859Sml29623 {
20036495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
20046495Sspeer 	npi_handle_t handle;
20056495Sspeer 	int tdc;
20066495Sspeer 
20076495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels"));
20083859Sml29623 
20093859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
20106495Sspeer 
20116495Sspeer 	if (!isLDOMguest(nxgep)) {
20126495Sspeer 		(void) npi_txdma_dump_fzc_regs(handle);
20136495Sspeer 
20146495Sspeer 		/* Dump TXC registers. */
20156495Sspeer 		(void) npi_txc_dump_fzc_regs(handle);
20166495Sspeer 		(void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num);
20173859Sml29623 	}
20183859Sml29623 
20196495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
20203859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
20216495Sspeer 		    "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
20223859Sml29623 		return;
20233859Sml29623 	}
20243859Sml29623 
20256495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
20266495Sspeer 		if ((1 << tdc) & set->owned.map) {
20276495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
20286495Sspeer 			if (ring) {
20296495Sspeer 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
20306495Sspeer 				    "==> nxge_txdma_regs_dump_channels: "
20316495Sspeer 				    "TDC %d", tdc));
20326495Sspeer 				(void) npi_txdma_dump_tdc_regs(handle, tdc);
20336495Sspeer 
20346495Sspeer 				/* Dump TXC registers, if able to. */
20356495Sspeer 				if (!isLDOMguest(nxgep)) {
20366495Sspeer 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
20376495Sspeer 					    "==> nxge_txdma_regs_dump_channels:"
20386495Sspeer 					    " FZC TDC %d", tdc));
20396495Sspeer 					(void) npi_txc_dump_tdc_fzc_regs
20406495Sspeer 					    (handle, tdc);
20416495Sspeer 				}
20426495Sspeer 				nxge_txdma_regs_dump(nxgep, tdc);
20436495Sspeer 			}
20446495Sspeer 		}
20453859Sml29623 	}
20463859Sml29623 
20473859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump"));
20483859Sml29623 }
20493859Sml29623 
20503859Sml29623 void
20513859Sml29623 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel)
20523859Sml29623 {
20533859Sml29623 	npi_handle_t		handle;
20543859Sml29623 	tx_ring_hdl_t 		hdl;
20553859Sml29623 	tx_ring_kick_t 		kick;
20563859Sml29623 	tx_cs_t 		cs;
20573859Sml29623 	txc_control_t		control;
20583859Sml29623 	uint32_t		bitmap = 0;
20593859Sml29623 	uint32_t		burst = 0;
20603859Sml29623 	uint32_t		bytes = 0;
20613859Sml29623 	dma_log_page_t		cfg;
20623859Sml29623 
20633859Sml29623 	printf("\n\tfunc # %d tdc %d ",
20643859Sml29623 		nxgep->function_num, channel);
20653859Sml29623 	cfg.page_num = 0;
20663859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
20673859Sml29623 	(void) npi_txdma_log_page_get(handle, channel, &cfg);
20683859Sml29623 	printf("\n\tlog page func %d valid page 0 %d",
20693859Sml29623 		cfg.func_num, cfg.valid);
20703859Sml29623 	cfg.page_num = 1;
20713859Sml29623 	(void) npi_txdma_log_page_get(handle, channel, &cfg);
20723859Sml29623 	printf("\n\tlog page func %d valid page 1 %d",
20733859Sml29623 		cfg.func_num, cfg.valid);
20743859Sml29623 
20753859Sml29623 	(void) npi_txdma_ring_head_get(handle, channel, &hdl);
20763859Sml29623 	(void) npi_txdma_desc_kick_reg_get(handle, channel, &kick);
20773859Sml29623 	printf("\n\thead value is 0x%0llx",
20783859Sml29623 		(long long)hdl.value);
20793859Sml29623 	printf("\n\thead index %d", hdl.bits.ldw.head);
20803859Sml29623 	printf("\n\tkick value is 0x%0llx",
20813859Sml29623 		(long long)kick.value);
20823859Sml29623 	printf("\n\ttail index %d\n", kick.bits.ldw.tail);
20833859Sml29623 
20843859Sml29623 	(void) npi_txdma_control_status(handle, OP_GET, channel, &cs);
20853859Sml29623 	printf("\n\tControl statue is 0x%0llx", (long long)cs.value);
20863859Sml29623 	printf("\n\tControl status RST state %d", cs.bits.ldw.rst);
20873859Sml29623 
20883859Sml29623 	(void) npi_txc_control(handle, OP_GET, &control);
20893859Sml29623 	(void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap);
20903859Sml29623 	(void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst);
20913859Sml29623 	(void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes);
20923859Sml29623 
20933859Sml29623 	printf("\n\tTXC port control 0x%0llx",
20943859Sml29623 		(long long)control.value);
20953859Sml29623 	printf("\n\tTXC port bitmap 0x%x", bitmap);
20963859Sml29623 	printf("\n\tTXC max burst %d", burst);
20973859Sml29623 	printf("\n\tTXC bytes xmt %d\n", bytes);
20983859Sml29623 
20993859Sml29623 	{
21003859Sml29623 		ipp_status_t status;
21013859Sml29623 
21023859Sml29623 		(void) npi_ipp_get_status(handle, nxgep->function_num, &status);
21035125Sjoycey #if defined(__i386)
21045125Sjoycey 		printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value);
21055125Sjoycey #else
21063859Sml29623 		printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value);
21075125Sjoycey #endif
21083859Sml29623 	}
21093859Sml29623 }
21103859Sml29623 
21113859Sml29623 /*
21126495Sspeer  * nxge_tdc_hvio_setup
21136495Sspeer  *
21146495Sspeer  *	I'm not exactly sure what this code does.
21156495Sspeer  *
21166495Sspeer  * Arguments:
21176495Sspeer  * 	nxgep
21186495Sspeer  * 	channel	The channel to map.
21196495Sspeer  *
21206495Sspeer  * Notes:
21216495Sspeer  *
21226495Sspeer  * NPI/NXGE function calls:
21236495Sspeer  *	na
21246495Sspeer  *
21256495Sspeer  * Context:
21266495Sspeer  *	Service domain?
21273859Sml29623  */
21286495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
21296495Sspeer static void
21306495Sspeer nxge_tdc_hvio_setup(
21316495Sspeer 	nxge_t *nxgep, int channel)
21323859Sml29623 {
21336495Sspeer 	nxge_dma_common_t	*data;
21346495Sspeer 	nxge_dma_common_t	*control;
21356495Sspeer 	tx_ring_t 		*ring;
21366495Sspeer 
21376495Sspeer 	ring = nxgep->tx_rings->rings[channel];
21386495Sspeer 	data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
21396495Sspeer 
21406495Sspeer 	ring->hv_set = B_FALSE;
21416495Sspeer 
21426495Sspeer 	ring->hv_tx_buf_base_ioaddr_pp =
21436495Sspeer 	    (uint64_t)data->orig_ioaddr_pp;
21446495Sspeer 	ring->hv_tx_buf_ioaddr_size =
21456495Sspeer 	    (uint64_t)data->orig_alength;
21466495Sspeer 
21476495Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
21486495Sspeer 		"hv data buf base io $%p size 0x%llx (%d) buf base io $%p "
21496495Sspeer 		"orig vatopa base io $%p orig_len 0x%llx (%d)",
21506495Sspeer 		ring->hv_tx_buf_base_ioaddr_pp,
21516495Sspeer 		ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size,
21526495Sspeer 		data->ioaddr_pp, data->orig_vatopa,
21536495Sspeer 		data->orig_alength, data->orig_alength));
21546495Sspeer 
21556495Sspeer 	control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
21566495Sspeer 
21576495Sspeer 	ring->hv_tx_cntl_base_ioaddr_pp =
21586495Sspeer 	    (uint64_t)control->orig_ioaddr_pp;
21596495Sspeer 	ring->hv_tx_cntl_ioaddr_size =
21606495Sspeer 	    (uint64_t)control->orig_alength;
21616495Sspeer 
21626495Sspeer 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
21636495Sspeer 		"hv cntl base io $%p orig ioaddr_pp ($%p) "
21646495Sspeer 		"orig vatopa ($%p) size 0x%llx (%d 0x%x)",
21656495Sspeer 		ring->hv_tx_cntl_base_ioaddr_pp,
21666495Sspeer 		control->orig_ioaddr_pp, control->orig_vatopa,
21676495Sspeer 		ring->hv_tx_cntl_ioaddr_size,
21686495Sspeer 		control->orig_alength, control->orig_alength));
21696495Sspeer }
21703859Sml29623 #endif
21713859Sml29623 
21726495Sspeer static nxge_status_t
21736495Sspeer nxge_map_txdma(p_nxge_t nxgep, int channel)
21746495Sspeer {
21756495Sspeer 	nxge_dma_common_t	**pData;
21766495Sspeer 	nxge_dma_common_t	**pControl;
21776495Sspeer 	tx_ring_t 		**pRing, *ring;
21786495Sspeer 	tx_mbox_t		**mailbox;
21796495Sspeer 	uint32_t		num_chunks;
21806495Sspeer 
21816495Sspeer 	nxge_status_t		status = NXGE_OK;
21826495Sspeer 
21836495Sspeer 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma"));
21846495Sspeer 
21856495Sspeer 	if (!nxgep->tx_cntl_pool_p->buf_allocated) {
21866495Sspeer 		if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) {
21876495Sspeer 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
21886495Sspeer 			    "<== nxge_map_txdma: buf not allocated"));
21896495Sspeer 			return (NXGE_ERROR);
21906495Sspeer 		}
21913859Sml29623 	}
21923859Sml29623 
21936495Sspeer 	if (nxge_alloc_txb(nxgep, channel) != NXGE_OK)
21946495Sspeer 		return (NXGE_ERROR);
21956495Sspeer 
21966495Sspeer 	num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel];
21976495Sspeer 	pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
21986495Sspeer 	pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
21996495Sspeer 	pRing = &nxgep->tx_rings->rings[channel];
22006495Sspeer 	mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
22016495Sspeer 
22026495Sspeer 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
22033859Sml29623 		"tx_rings $%p tx_desc_rings $%p",
22046495Sspeer 		nxgep->tx_rings, nxgep->tx_rings->rings));
22053859Sml29623 
22063859Sml29623 	/*
22076495Sspeer 	 * Map descriptors from the buffer pools for <channel>.
22086495Sspeer 	 */
22096495Sspeer 
22106495Sspeer 	/*
22116495Sspeer 	 * Set up and prepare buffer blocks, descriptors
22126495Sspeer 	 * and mailbox.
22133859Sml29623 	 */
22146495Sspeer 	status = nxge_map_txdma_channel(nxgep, channel,
22156495Sspeer 	    pData, pRing, num_chunks, pControl, mailbox);
22166495Sspeer 	if (status != NXGE_OK) {
22176495Sspeer 		NXGE_ERROR_MSG((nxgep, MEM3_CTL,
22186495Sspeer 			"==> nxge_map_txdma(%d): nxge_map_txdma_channel() "
22196495Sspeer 			"returned 0x%x",
22206495Sspeer 			nxgep, channel, status));
22216495Sspeer 		return (status);
22226495Sspeer 	}
22236495Sspeer 
22246495Sspeer 	ring = *pRing;
22256495Sspeer 
22266495Sspeer 	ring->index = (uint16_t)channel;
22276495Sspeer 	ring->tdc_stats = &nxgep->statsp->tdc_stats[channel];
22286495Sspeer 
22296495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
22306495Sspeer 	if (isLDOMguest(nxgep)) {
22316495Sspeer 		(void) nxge_tdc_lp_conf(nxgep, channel);
22326495Sspeer 	} else {
22336495Sspeer 		nxge_tdc_hvio_setup(nxgep, channel);
22346495Sspeer 	}
22353859Sml29623 #endif
22366495Sspeer 
22376495Sspeer 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
22386495Sspeer 	    "(status 0x%x channel %d)", status, channel));
22393859Sml29623 
22403859Sml29623 	return (status);
22413859Sml29623 }
22423859Sml29623 
22433859Sml29623 static nxge_status_t
22443859Sml29623 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel,
22453859Sml29623 	p_nxge_dma_common_t *dma_buf_p,
22463859Sml29623 	p_tx_ring_t *tx_desc_p,
22473859Sml29623 	uint32_t num_chunks,
22483859Sml29623 	p_nxge_dma_common_t *dma_cntl_p,
22493859Sml29623 	p_tx_mbox_t *tx_mbox_p)
22503859Sml29623 {
22513859Sml29623 	int	status = NXGE_OK;
22523859Sml29623 
22533859Sml29623 	/*
22543859Sml29623 	 * Set up and prepare buffer blocks, descriptors
22553859Sml29623 	 * and mailbox.
22563859Sml29623 	 */
22576495Sspeer 	NXGE_ERROR_MSG((nxgep, MEM3_CTL,
22583859Sml29623 		"==> nxge_map_txdma_channel (channel %d)", channel));
22593859Sml29623 	/*
22603859Sml29623 	 * Transmit buffer blocks
22613859Sml29623 	 */
22623859Sml29623 	status = nxge_map_txdma_channel_buf_ring(nxgep, channel,
22633859Sml29623 			dma_buf_p, tx_desc_p, num_chunks);
22643859Sml29623 	if (status != NXGE_OK) {
22653859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
22663859Sml29623 			"==> nxge_map_txdma_channel (channel %d): "
22673859Sml29623 			"map buffer failed 0x%x", channel, status));
22683859Sml29623 		goto nxge_map_txdma_channel_exit;
22693859Sml29623 	}
22703859Sml29623 
22713859Sml29623 	/*
22723859Sml29623 	 * Transmit block ring, and mailbox.
22733859Sml29623 	 */
22743859Sml29623 	nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p,
22753859Sml29623 					tx_mbox_p);
22763859Sml29623 
22773859Sml29623 	goto nxge_map_txdma_channel_exit;
22783859Sml29623 
22793859Sml29623 nxge_map_txdma_channel_fail1:
22806495Sspeer 	NXGE_ERROR_MSG((nxgep, MEM3_CTL,
22813859Sml29623 		"==> nxge_map_txdma_channel: unmap buf"
22823859Sml29623 		"(status 0x%x channel %d)",
22833859Sml29623 		status, channel));
22843859Sml29623 	nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p);
22853859Sml29623 
22863859Sml29623 nxge_map_txdma_channel_exit:
22876495Sspeer 	NXGE_ERROR_MSG((nxgep, MEM3_CTL,
22883859Sml29623 		"<== nxge_map_txdma_channel: "
22893859Sml29623 		"(status 0x%x channel %d)",
22903859Sml29623 		status, channel));
22913859Sml29623 
22923859Sml29623 	return (status);
22933859Sml29623 }
22943859Sml29623 
22953859Sml29623 /*ARGSUSED*/
22963859Sml29623 static void
22976495Sspeer nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel)
22983859Sml29623 {
22996495Sspeer 	tx_ring_t *ring;
23006495Sspeer 	tx_mbox_t *mailbox;
23016495Sspeer 
23023859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
23033859Sml29623 		"==> nxge_unmap_txdma_channel (channel %d)", channel));
23043859Sml29623 	/*
23053859Sml29623 	 * unmap tx block ring, and mailbox.
23063859Sml29623 	 */
23076495Sspeer 	ring = nxgep->tx_rings->rings[channel];
23086495Sspeer 	mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
23096495Sspeer 
23106495Sspeer 	(void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox);
23113859Sml29623 
23123859Sml29623 	/* unmap buffer blocks */
23136495Sspeer 	(void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring);
23146495Sspeer 
23156495Sspeer 	nxge_free_txb(nxgep, channel);
23163859Sml29623 
23173859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel"));
23183859Sml29623 }
23193859Sml29623 
23206495Sspeer /*
23216495Sspeer  * nxge_map_txdma_channel_cfg_ring
23226495Sspeer  *
23236495Sspeer  *	Map a TDC into our kernel space.
23246495Sspeer  *	This function allocates all of the per-channel data structures.
23256495Sspeer  *
23266495Sspeer  * Arguments:
23276495Sspeer  * 	nxgep
23286495Sspeer  * 	dma_channel	The channel to map.
23296495Sspeer  *	dma_cntl_p
23306495Sspeer  *	tx_ring_p	dma_channel's transmit ring
23316495Sspeer  *	tx_mbox_p	dma_channel's mailbox
23326495Sspeer  *
23336495Sspeer  * Notes:
23346495Sspeer  *
23356495Sspeer  * NPI/NXGE function calls:
23366495Sspeer  *	nxge_setup_dma_common()
23376495Sspeer  *
23386495Sspeer  * Registers accessed:
23396495Sspeer  *	none.
23406495Sspeer  *
23416495Sspeer  * Context:
23426495Sspeer  *	Any domain
23436495Sspeer  */
23443859Sml29623 /*ARGSUSED*/
23453859Sml29623 static void
23463859Sml29623 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
23473859Sml29623 	p_nxge_dma_common_t *dma_cntl_p,
23483859Sml29623 	p_tx_ring_t tx_ring_p,
23493859Sml29623 	p_tx_mbox_t *tx_mbox_p)
23503859Sml29623 {
23513859Sml29623 	p_tx_mbox_t 		mboxp;
23523859Sml29623 	p_nxge_dma_common_t 	cntl_dmap;
23533859Sml29623 	p_nxge_dma_common_t 	dmap;
23543859Sml29623 	p_tx_rng_cfig_t		tx_ring_cfig_p;
23553859Sml29623 	p_tx_ring_kick_t	tx_ring_kick_p;
23563859Sml29623 	p_tx_cs_t		tx_cs_p;
23573859Sml29623 	p_tx_dma_ent_msk_t	tx_evmask_p;
23583859Sml29623 	p_txdma_mbh_t		mboxh_p;
23593859Sml29623 	p_txdma_mbl_t		mboxl_p;
23603859Sml29623 	uint64_t		tx_desc_len;
23613859Sml29623 
23623859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
23633859Sml29623 		"==> nxge_map_txdma_channel_cfg_ring"));
23643859Sml29623 
23653859Sml29623 	cntl_dmap = *dma_cntl_p;
23663859Sml29623 
23673859Sml29623 	dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc;
23683859Sml29623 	nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size,
23693859Sml29623 			sizeof (tx_desc_t));
23703859Sml29623 	/*
23713859Sml29623 	 * Zero out transmit ring descriptors.
23723859Sml29623 	 */
23733859Sml29623 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
23743859Sml29623 	tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig);
23753859Sml29623 	tx_ring_kick_p = &(tx_ring_p->tx_ring_kick);
23763859Sml29623 	tx_cs_p = &(tx_ring_p->tx_cs);
23773859Sml29623 	tx_evmask_p = &(tx_ring_p->tx_evmask);
23783859Sml29623 	tx_ring_cfig_p->value = 0;
23793859Sml29623 	tx_ring_kick_p->value = 0;
23803859Sml29623 	tx_cs_p->value = 0;
23813859Sml29623 	tx_evmask_p->value = 0;
23823859Sml29623 
23833859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
23843859Sml29623 		"==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p",
23853859Sml29623 		dma_channel,
23863859Sml29623 		dmap->dma_cookie.dmac_laddress));
23873859Sml29623 
23883859Sml29623 	tx_ring_cfig_p->value = 0;
23893859Sml29623 	tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3);
23903859Sml29623 	tx_ring_cfig_p->value =
23913859Sml29623 		(dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) |
23923859Sml29623 		(tx_desc_len << TX_RNG_CFIG_LEN_SHIFT);
23933859Sml29623 
23943859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
23953859Sml29623 		"==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
23963859Sml29623 		dma_channel,
23973859Sml29623 		tx_ring_cfig_p->value));
23983859Sml29623 
23993859Sml29623 	tx_cs_p->bits.ldw.rst = 1;
24003859Sml29623 
24013859Sml29623 	/* Map in mailbox */
24023859Sml29623 	mboxp = (p_tx_mbox_t)
24033859Sml29623 		KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP);
24043859Sml29623 	dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox;
24053859Sml29623 	nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t));
24063859Sml29623 	mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh;
24073859Sml29623 	mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl;
24083859Sml29623 	mboxh_p->value = mboxl_p->value = 0;
24093859Sml29623 
24103859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24113859Sml29623 		"==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
24123859Sml29623 		dmap->dma_cookie.dmac_laddress));
24133859Sml29623 
24143859Sml29623 	mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >>
24153859Sml29623 				TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK);
24163859Sml29623 
24173859Sml29623 	mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress &
24183859Sml29623 				TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT);
24193859Sml29623 
24203859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24213859Sml29623 		"==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
24223859Sml29623 		dmap->dma_cookie.dmac_laddress));
24233859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24243859Sml29623 		"==> nxge_map_txdma_channel_cfg_ring: hmbox $%p "
24253859Sml29623 		"mbox $%p",
24263859Sml29623 		mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr));
24273859Sml29623 	tx_ring_p->page_valid.value = 0;
24283859Sml29623 	tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0;
24293859Sml29623 	tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0;
24303859Sml29623 	tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0;
24313859Sml29623 	tx_ring_p->page_hdl.value = 0;
24323859Sml29623 
24333859Sml29623 	tx_ring_p->page_valid.bits.ldw.page0 = 1;
24343859Sml29623 	tx_ring_p->page_valid.bits.ldw.page1 = 1;
24353859Sml29623 
24363859Sml29623 	tx_ring_p->max_burst.value = 0;
24373859Sml29623 	tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT;
24383859Sml29623 
24393859Sml29623 	*tx_mbox_p = mboxp;
24403859Sml29623 
24413859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24423859Sml29623 				"<== nxge_map_txdma_channel_cfg_ring"));
24433859Sml29623 }
24443859Sml29623 
24453859Sml29623 /*ARGSUSED*/
24463859Sml29623 static void
24473859Sml29623 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep,
24483859Sml29623 	p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
24493859Sml29623 {
24503859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24513859Sml29623 		"==> nxge_unmap_txdma_channel_cfg_ring: channel %d",
24523859Sml29623 		tx_ring_p->tdc));
24533859Sml29623 
24543859Sml29623 	KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t));
24553859Sml29623 
24563859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24573859Sml29623 		"<== nxge_unmap_txdma_channel_cfg_ring"));
24583859Sml29623 }
24593859Sml29623 
24606495Sspeer /*
24616495Sspeer  * nxge_map_txdma_channel_buf_ring
24626495Sspeer  *
24636495Sspeer  *
24646495Sspeer  * Arguments:
24656495Sspeer  * 	nxgep
24666495Sspeer  * 	channel		The channel to map.
24676495Sspeer  *	dma_buf_p
24686495Sspeer  *	tx_desc_p	channel's descriptor ring
24696495Sspeer  *	num_chunks
24706495Sspeer  *
24716495Sspeer  * Notes:
24726495Sspeer  *
24736495Sspeer  * NPI/NXGE function calls:
24746495Sspeer  *	nxge_setup_dma_common()
24756495Sspeer  *
24766495Sspeer  * Registers accessed:
24776495Sspeer  *	none.
24786495Sspeer  *
24796495Sspeer  * Context:
24806495Sspeer  *	Any domain
24816495Sspeer  */
24823859Sml29623 static nxge_status_t
24833859Sml29623 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
24843859Sml29623 	p_nxge_dma_common_t *dma_buf_p,
24853859Sml29623 	p_tx_ring_t *tx_desc_p, uint32_t num_chunks)
24863859Sml29623 {
24873859Sml29623 	p_nxge_dma_common_t 	dma_bufp, tmp_bufp;
24883859Sml29623 	p_nxge_dma_common_t 	dmap;
24893859Sml29623 	nxge_os_dma_handle_t	tx_buf_dma_handle;
24903859Sml29623 	p_tx_ring_t 		tx_ring_p;
24913859Sml29623 	p_tx_msg_t 		tx_msg_ring;
24923859Sml29623 	nxge_status_t		status = NXGE_OK;
24933859Sml29623 	int			ddi_status = DDI_SUCCESS;
24943859Sml29623 	int			i, j, index;
24953859Sml29623 	uint32_t		size, bsize;
24963859Sml29623 	uint32_t 		nblocks, nmsgs;
24973859Sml29623 
24983859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24993859Sml29623 		"==> nxge_map_txdma_channel_buf_ring"));
25003859Sml29623 
25013859Sml29623 	dma_bufp = tmp_bufp = *dma_buf_p;
25023859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25033859Sml29623 		" nxge_map_txdma_channel_buf_ring: channel %d to map %d "
25043859Sml29623 		"chunks bufp $%p",
25053859Sml29623 		channel, num_chunks, dma_bufp));
25063859Sml29623 
25073859Sml29623 	nmsgs = 0;
25083859Sml29623 	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
25093859Sml29623 		nmsgs += tmp_bufp->nblocks;
25103859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25113859Sml29623 			"==> nxge_map_txdma_channel_buf_ring: channel %d "
25123859Sml29623 			"bufp $%p nblocks %d nmsgs %d",
25133859Sml29623 			channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
25143859Sml29623 	}
25153859Sml29623 	if (!nmsgs) {
25163859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25173859Sml29623 			"<== nxge_map_txdma_channel_buf_ring: channel %d "
25183859Sml29623 			"no msg blocks",
25193859Sml29623 			channel));
25203859Sml29623 		status = NXGE_ERROR;
25213859Sml29623 		goto nxge_map_txdma_channel_buf_ring_exit;
25223859Sml29623 	}
25233859Sml29623 
25243859Sml29623 	tx_ring_p = (p_tx_ring_t)
25253859Sml29623 		KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP);
25263859Sml29623 	MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER,
25273859Sml29623 		(void *)nxgep->interrupt_cookie);
25283952Sml29623 
25296713Sspeer 	(void) atomic_swap_32(&tx_ring_p->tx_ring_offline, NXGE_TX_RING_ONLINE);
2530*6886Sspeer 	tx_ring_p->tx_ring_busy = B_FALSE;
25313952Sml29623 	tx_ring_p->nxgep = nxgep;
25323952Sml29623 	tx_ring_p->serial = nxge_serialize_create(nmsgs,
25333952Sml29623 				nxge_serial_tx, tx_ring_p);
25343859Sml29623 	/*
25353859Sml29623 	 * Allocate transmit message rings and handles for packets
25363859Sml29623 	 * not to be copied to premapped buffers.
25373859Sml29623 	 */
25383859Sml29623 	size = nmsgs * sizeof (tx_msg_t);
25393859Sml29623 	tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
25403859Sml29623 	for (i = 0; i < nmsgs; i++) {
25413859Sml29623 		ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
25423859Sml29623 				DDI_DMA_DONTWAIT, 0,
25433859Sml29623 				&tx_msg_ring[i].dma_handle);
25443859Sml29623 		if (ddi_status != DDI_SUCCESS) {
25453859Sml29623 			status |= NXGE_DDI_FAILED;
25463859Sml29623 			break;
25473859Sml29623 		}
25483859Sml29623 	}
25493859Sml29623 	if (i < nmsgs) {
25504185Sspeer 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
25514185Sspeer 		    "Allocate handles failed."));
25523859Sml29623 		goto nxge_map_txdma_channel_buf_ring_fail1;
25533859Sml29623 	}
25543859Sml29623 
25553859Sml29623 	tx_ring_p->tdc = channel;
25563859Sml29623 	tx_ring_p->tx_msg_ring = tx_msg_ring;
25573859Sml29623 	tx_ring_p->tx_ring_size = nmsgs;
25583859Sml29623 	tx_ring_p->num_chunks = num_chunks;
25593859Sml29623 	if (!nxge_tx_intr_thres) {
25603859Sml29623 		nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4;
25613859Sml29623 	}
25623859Sml29623 	tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1;
25633859Sml29623 	tx_ring_p->rd_index = 0;
25643859Sml29623 	tx_ring_p->wr_index = 0;
25653859Sml29623 	tx_ring_p->ring_head.value = 0;
25663859Sml29623 	tx_ring_p->ring_kick_tail.value = 0;
25673859Sml29623 	tx_ring_p->descs_pending = 0;
25683859Sml29623 
25693859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25703859Sml29623 		"==> nxge_map_txdma_channel_buf_ring: channel %d "
25713859Sml29623 		"actual tx desc max %d nmsgs %d "
25723859Sml29623 		"(config nxge_tx_ring_size %d)",
25733859Sml29623 		channel, tx_ring_p->tx_ring_size, nmsgs,
25743859Sml29623 		nxge_tx_ring_size));
25753859Sml29623 
25763859Sml29623 	/*
25773859Sml29623 	 * Map in buffers from the buffer pool.
25783859Sml29623 	 */
25793859Sml29623 	index = 0;
25803859Sml29623 	bsize = dma_bufp->block_size;
25813859Sml29623 
25823859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: "
25833859Sml29623 		"dma_bufp $%p tx_rng_p $%p "
25843859Sml29623 		"tx_msg_rng_p $%p bsize %d",
25853859Sml29623 		dma_bufp, tx_ring_p, tx_msg_ring, bsize));
25863859Sml29623 
25873859Sml29623 	tx_buf_dma_handle = dma_bufp->dma_handle;
25883859Sml29623 	for (i = 0; i < num_chunks; i++, dma_bufp++) {
25893859Sml29623 		bsize = dma_bufp->block_size;
25903859Sml29623 		nblocks = dma_bufp->nblocks;
25913859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25923859Sml29623 			"==> nxge_map_txdma_channel_buf_ring: dma chunk %d "
25933859Sml29623 			"size %d dma_bufp $%p",
25943859Sml29623 			i, sizeof (nxge_dma_common_t), dma_bufp));
25953859Sml29623 
25963859Sml29623 		for (j = 0; j < nblocks; j++) {
25973859Sml29623 			tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle;
25983859Sml29623 			dmap = &tx_msg_ring[index++].buf_dma;
25993859Sml29623 #ifdef TX_MEM_DEBUG
26003859Sml29623 			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
26013859Sml29623 				"==> nxge_map_txdma_channel_buf_ring: j %d"
26023859Sml29623 				"dmap $%p", i, dmap));
26033859Sml29623 #endif
26043859Sml29623 			nxge_setup_dma_common(dmap, dma_bufp, 1,
26053859Sml29623 				bsize);
26063859Sml29623 		}
26073859Sml29623 	}
26083859Sml29623 
26093859Sml29623 	if (i < num_chunks) {
26104185Sspeer 		status = NXGE_ERROR;
26113859Sml29623 		goto nxge_map_txdma_channel_buf_ring_fail1;
26123859Sml29623 	}
26133859Sml29623 
26143859Sml29623 	*tx_desc_p = tx_ring_p;
26153859Sml29623 
26163859Sml29623 	goto nxge_map_txdma_channel_buf_ring_exit;
26173859Sml29623 
26183859Sml29623 nxge_map_txdma_channel_buf_ring_fail1:
26193952Sml29623 	if (tx_ring_p->serial) {
26203952Sml29623 		nxge_serialize_destroy(tx_ring_p->serial);
26213952Sml29623 		tx_ring_p->serial = NULL;
26223952Sml29623 	}
26233952Sml29623 
26243859Sml29623 	index--;
26253859Sml29623 	for (; index >= 0; index--) {
26264185Sspeer 		if (tx_msg_ring[index].dma_handle != NULL) {
26274185Sspeer 			ddi_dma_free_handle(&tx_msg_ring[index].dma_handle);
26283859Sml29623 		}
26293859Sml29623 	}
26303859Sml29623 	MUTEX_DESTROY(&tx_ring_p->lock);
26314185Sspeer 	KMEM_FREE(tx_msg_ring, size);
26323859Sml29623 	KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
26333859Sml29623 
26344185Sspeer 	status = NXGE_ERROR;
26354185Sspeer 
26363859Sml29623 nxge_map_txdma_channel_buf_ring_exit:
26373859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
26383859Sml29623 		"<== nxge_map_txdma_channel_buf_ring status 0x%x", status));
26393859Sml29623 
26403859Sml29623 	return (status);
26413859Sml29623 }
26423859Sml29623 
26433859Sml29623 /*ARGSUSED*/
26443859Sml29623 static void
26453859Sml29623 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p)
26463859Sml29623 {
26473859Sml29623 	p_tx_msg_t 		tx_msg_ring;
26483859Sml29623 	p_tx_msg_t 		tx_msg_p;
26493859Sml29623 	int			i;
26503859Sml29623 
26513859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
26523859Sml29623 		"==> nxge_unmap_txdma_channel_buf_ring"));
26533859Sml29623 	if (tx_ring_p == NULL) {
26543859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
26553859Sml29623 			"<== nxge_unmap_txdma_channel_buf_ring: NULL ringp"));
26563859Sml29623 		return;
26573859Sml29623 	}
26583859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
26593859Sml29623 		"==> nxge_unmap_txdma_channel_buf_ring: channel %d",
26603859Sml29623 		tx_ring_p->tdc));
26613859Sml29623 
26623859Sml29623 	tx_msg_ring = tx_ring_p->tx_msg_ring;
26636495Sspeer 
26646495Sspeer 	/*
26656495Sspeer 	 * Since the serialization thread, timer thread and
26666495Sspeer 	 * interrupt thread can all call the transmit reclaim,
26676495Sspeer 	 * the unmapping function needs to acquire the lock
26686495Sspeer 	 * to free those buffers which were transmitted
26696495Sspeer 	 * by the hardware already.
26706495Sspeer 	 */
26716495Sspeer 	MUTEX_ENTER(&tx_ring_p->lock);
26726495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
26736495Sspeer 	    "==> nxge_unmap_txdma_channel_buf_ring (reclaim): "
26746495Sspeer 	    "channel %d",
26756495Sspeer 	    tx_ring_p->tdc));
26766495Sspeer 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
26776495Sspeer 
26783859Sml29623 	for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
26793859Sml29623 		tx_msg_p = &tx_msg_ring[i];
26803859Sml29623 		if (tx_msg_p->tx_message != NULL) {
26813859Sml29623 			freemsg(tx_msg_p->tx_message);
26823859Sml29623 			tx_msg_p->tx_message = NULL;
26833859Sml29623 		}
26843859Sml29623 	}
26853859Sml29623 
26863859Sml29623 	for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
26873859Sml29623 		if (tx_msg_ring[i].dma_handle != NULL) {
26883859Sml29623 			ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
26893859Sml29623 		}
26906495Sspeer 		tx_msg_ring[i].dma_handle = NULL;
26913859Sml29623 	}
26923859Sml29623 
26936495Sspeer 	MUTEX_EXIT(&tx_ring_p->lock);
26946495Sspeer 
26953952Sml29623 	if (tx_ring_p->serial) {
26963952Sml29623 		nxge_serialize_destroy(tx_ring_p->serial);
26973952Sml29623 		tx_ring_p->serial = NULL;
26983952Sml29623 	}
26993952Sml29623 
27003859Sml29623 	MUTEX_DESTROY(&tx_ring_p->lock);
27013859Sml29623 	KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
27023859Sml29623 	KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
27033859Sml29623 
27043859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
27053859Sml29623 		"<== nxge_unmap_txdma_channel_buf_ring"));
27063859Sml29623 }
27073859Sml29623 
27083859Sml29623 static nxge_status_t
27096495Sspeer nxge_txdma_hw_start(p_nxge_t nxgep, int channel)
27103859Sml29623 {
27113859Sml29623 	p_tx_rings_t 		tx_rings;
27123859Sml29623 	p_tx_ring_t 		*tx_desc_rings;
27133859Sml29623 	p_tx_mbox_areas_t 	tx_mbox_areas_p;
27143859Sml29623 	p_tx_mbox_t		*tx_mbox_p;
27153859Sml29623 	nxge_status_t		status = NXGE_OK;
27163859Sml29623 
27173859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start"));
27183859Sml29623 
27193859Sml29623 	tx_rings = nxgep->tx_rings;
27203859Sml29623 	if (tx_rings == NULL) {
27213859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
27223859Sml29623 			"<== nxge_txdma_hw_start: NULL ring pointer"));
27233859Sml29623 		return (NXGE_ERROR);
27243859Sml29623 	}
27253859Sml29623 	tx_desc_rings = tx_rings->rings;
27263859Sml29623 	if (tx_desc_rings == NULL) {
27273859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
27283859Sml29623 			"<== nxge_txdma_hw_start: NULL ring pointers"));
27293859Sml29623 		return (NXGE_ERROR);
27303859Sml29623 	}
27313859Sml29623 
27326495Sspeer 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
27336495Sspeer 	    "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
27343859Sml29623 
27353859Sml29623 	tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
27363859Sml29623 	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
27373859Sml29623 
27386495Sspeer 	status = nxge_txdma_start_channel(nxgep, channel,
27396495Sspeer 	    (p_tx_ring_t)tx_desc_rings[channel],
27406495Sspeer 	    (p_tx_mbox_t)tx_mbox_p[channel]);
27416495Sspeer 	if (status != NXGE_OK) {
27426495Sspeer 		goto nxge_txdma_hw_start_fail1;
27433859Sml29623 	}
27443859Sml29623 
27453859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
27463859Sml29623 		"tx_rings $%p rings $%p",
27473859Sml29623 		nxgep->tx_rings, nxgep->tx_rings->rings));
27483859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
27493859Sml29623 		"tx_rings $%p tx_desc_rings $%p",
27503859Sml29623 		nxgep->tx_rings, tx_desc_rings));
27513859Sml29623 
27523859Sml29623 	goto nxge_txdma_hw_start_exit;
27533859Sml29623 
27543859Sml29623 nxge_txdma_hw_start_fail1:
27553859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
27563859Sml29623 		"==> nxge_txdma_hw_start: disable "
27576495Sspeer 		"(status 0x%x channel %d)", status, channel));
27583859Sml29623 
27593859Sml29623 nxge_txdma_hw_start_exit:
27603859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
27613859Sml29623 		"==> nxge_txdma_hw_start: (status 0x%x)", status));
27623859Sml29623 
27633859Sml29623 	return (status);
27643859Sml29623 }
27653859Sml29623 
27666495Sspeer /*
27676495Sspeer  * nxge_txdma_start_channel
27686495Sspeer  *
27696495Sspeer  *	Start a TDC.
27706495Sspeer  *
27716495Sspeer  * Arguments:
27726495Sspeer  * 	nxgep
27736495Sspeer  * 	channel		The channel to start.
27746495Sspeer  * 	tx_ring_p	channel's transmit descriptor ring.
27756495Sspeer  * 	tx_mbox_p	channel' smailbox.
27766495Sspeer  *
27776495Sspeer  * Notes:
27786495Sspeer  *
27796495Sspeer  * NPI/NXGE function calls:
27806495Sspeer  *	nxge_reset_txdma_channel()
27816495Sspeer  *	nxge_init_txdma_channel_event_mask()
27826495Sspeer  *	nxge_enable_txdma_channel()
27836495Sspeer  *
27846495Sspeer  * Registers accessed:
27856495Sspeer  *	none directly (see functions above).
27866495Sspeer  *
27876495Sspeer  * Context:
27886495Sspeer  *	Any domain
27896495Sspeer  */
27903859Sml29623 static nxge_status_t
27913859Sml29623 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel,
27923859Sml29623     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
27933859Sml29623 
27943859Sml29623 {
27953859Sml29623 	nxge_status_t		status = NXGE_OK;
27963859Sml29623 
27973859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
27983859Sml29623 		"==> nxge_txdma_start_channel (channel %d)", channel));
27993859Sml29623 	/*
28003859Sml29623 	 * TXDMA/TXC must be in stopped state.
28013859Sml29623 	 */
28023859Sml29623 	(void) nxge_txdma_stop_inj_err(nxgep, channel);
28033859Sml29623 
28043859Sml29623 	/*
28053859Sml29623 	 * Reset TXDMA channel
28063859Sml29623 	 */
28073859Sml29623 	tx_ring_p->tx_cs.value = 0;
28083859Sml29623 	tx_ring_p->tx_cs.bits.ldw.rst = 1;
28093859Sml29623 	status = nxge_reset_txdma_channel(nxgep, channel,
28103859Sml29623 			tx_ring_p->tx_cs.value);
28113859Sml29623 	if (status != NXGE_OK) {
28123859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
28133859Sml29623 			"==> nxge_txdma_start_channel (channel %d)"
28143859Sml29623 			" reset channel failed 0x%x", channel, status));
28153859Sml29623 		goto nxge_txdma_start_channel_exit;
28163859Sml29623 	}
28173859Sml29623 
28183859Sml29623 	/*
28193859Sml29623 	 * Initialize the TXDMA channel specific FZC control
28203859Sml29623 	 * configurations. These FZC registers are pertaining
28213859Sml29623 	 * to each TX channel (i.e. logical pages).
28223859Sml29623 	 */
28236495Sspeer 	if (!isLDOMguest(nxgep)) {
28246495Sspeer 		status = nxge_init_fzc_txdma_channel(nxgep, channel,
28256495Sspeer 		    tx_ring_p, tx_mbox_p);
28266495Sspeer 		if (status != NXGE_OK) {
28276495Sspeer 			goto nxge_txdma_start_channel_exit;
28286495Sspeer 		}
28293859Sml29623 	}
28303859Sml29623 
28313859Sml29623 	/*
28323859Sml29623 	 * Initialize the event masks.
28333859Sml29623 	 */
28343859Sml29623 	tx_ring_p->tx_evmask.value = 0;
28353859Sml29623 	status = nxge_init_txdma_channel_event_mask(nxgep,
28366495Sspeer 	    channel, &tx_ring_p->tx_evmask);
28373859Sml29623 	if (status != NXGE_OK) {
28383859Sml29623 		goto nxge_txdma_start_channel_exit;
28393859Sml29623 	}
28403859Sml29623 
28413859Sml29623 	/*
28423859Sml29623 	 * Load TXDMA descriptors, buffers, mailbox,
28433859Sml29623 	 * initialise the DMA channels and
28443859Sml29623 	 * enable each DMA channel.
28453859Sml29623 	 */
28463859Sml29623 	status = nxge_enable_txdma_channel(nxgep, channel,
28473859Sml29623 			tx_ring_p, tx_mbox_p);
28483859Sml29623 	if (status != NXGE_OK) {
28493859Sml29623 		goto nxge_txdma_start_channel_exit;
28503859Sml29623 	}
28513859Sml29623 
28523859Sml29623 nxge_txdma_start_channel_exit:
28533859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel"));
28543859Sml29623 
28553859Sml29623 	return (status);
28563859Sml29623 }
28573859Sml29623 
28586495Sspeer /*
28596495Sspeer  * nxge_txdma_stop_channel
28606495Sspeer  *
28616495Sspeer  *	Stop a TDC.
28626495Sspeer  *
28636495Sspeer  * Arguments:
28646495Sspeer  * 	nxgep
28656495Sspeer  * 	channel		The channel to stop.
28666495Sspeer  * 	tx_ring_p	channel's transmit descriptor ring.
28676495Sspeer  * 	tx_mbox_p	channel' smailbox.
28686495Sspeer  *
28696495Sspeer  * Notes:
28706495Sspeer  *
28716495Sspeer  * NPI/NXGE function calls:
28726495Sspeer  *	nxge_txdma_stop_inj_err()
28736495Sspeer  *	nxge_reset_txdma_channel()
28746495Sspeer  *	nxge_init_txdma_channel_event_mask()
28756495Sspeer  *	nxge_init_txdma_channel_cntl_stat()
28766495Sspeer  *	nxge_disable_txdma_channel()
28776495Sspeer  *
28786495Sspeer  * Registers accessed:
28796495Sspeer  *	none directly (see functions above).
28806495Sspeer  *
28816495Sspeer  * Context:
28826495Sspeer  *	Any domain
28836495Sspeer  */
28843859Sml29623 /*ARGSUSED*/
28853859Sml29623 static nxge_status_t
28866495Sspeer nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
28873859Sml29623 {
28886495Sspeer 	p_tx_ring_t tx_ring_p;
28896495Sspeer 	int status = NXGE_OK;
28903859Sml29623 
28913859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
28923859Sml29623 		"==> nxge_txdma_stop_channel: channel %d", channel));
28933859Sml29623 
28943859Sml29623 	/*
28953859Sml29623 	 * Stop (disable) TXDMA and TXC (if stop bit is set
28963859Sml29623 	 * and STOP_N_GO bit not set, the TXDMA reset state will
28973859Sml29623 	 * not be set if reset TXDMA.
28983859Sml29623 	 */
28993859Sml29623 	(void) nxge_txdma_stop_inj_err(nxgep, channel);
29003859Sml29623 
29016495Sspeer 	tx_ring_p = nxgep->tx_rings->rings[channel];
29026495Sspeer 
29033859Sml29623 	/*
29043859Sml29623 	 * Reset TXDMA channel
29053859Sml29623 	 */
29063859Sml29623 	tx_ring_p->tx_cs.value = 0;
29073859Sml29623 	tx_ring_p->tx_cs.bits.ldw.rst = 1;
29083859Sml29623 	status = nxge_reset_txdma_channel(nxgep, channel,
29093859Sml29623 			tx_ring_p->tx_cs.value);
29103859Sml29623 	if (status != NXGE_OK) {
29113859Sml29623 		goto nxge_txdma_stop_channel_exit;
29123859Sml29623 	}
29133859Sml29623 
29143859Sml29623 #ifdef HARDWARE_REQUIRED
29153859Sml29623 	/* Set up the interrupt event masks. */
29163859Sml29623 	tx_ring_p->tx_evmask.value = 0;
29173859Sml29623 	status = nxge_init_txdma_channel_event_mask(nxgep,
29183859Sml29623 			channel, &tx_ring_p->tx_evmask);
29193859Sml29623 	if (status != NXGE_OK) {
29203859Sml29623 		goto nxge_txdma_stop_channel_exit;
29213859Sml29623 	}
29223859Sml29623 
29233859Sml29623 	/* Initialize the DMA control and status register */
29243859Sml29623 	tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL;
29253859Sml29623 	status = nxge_init_txdma_channel_cntl_stat(nxgep, channel,
29263859Sml29623 			tx_ring_p->tx_cs.value);
29273859Sml29623 	if (status != NXGE_OK) {
29283859Sml29623 		goto nxge_txdma_stop_channel_exit;
29293859Sml29623 	}
29303859Sml29623 
29316495Sspeer 	tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
29326495Sspeer 
29333859Sml29623 	/* Disable channel */
29343859Sml29623 	status = nxge_disable_txdma_channel(nxgep, channel,
29356495Sspeer 	    tx_ring_p, tx_mbox_p);
29363859Sml29623 	if (status != NXGE_OK) {
29373859Sml29623 		goto nxge_txdma_start_channel_exit;
29383859Sml29623 	}
29393859Sml29623 
29403859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
29413859Sml29623 		"==> nxge_txdma_stop_channel: event done"));
29423859Sml29623 
29433859Sml29623 #endif
29443859Sml29623 
29453859Sml29623 nxge_txdma_stop_channel_exit:
29463859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel"));
29473859Sml29623 	return (status);
29483859Sml29623 }
29493859Sml29623 
29506495Sspeer /*
29516495Sspeer  * nxge_txdma_get_ring
29526495Sspeer  *
29536495Sspeer  *	Get the ring for a TDC.
29546495Sspeer  *
29556495Sspeer  * Arguments:
29566495Sspeer  * 	nxgep
29576495Sspeer  * 	channel
29586495Sspeer  *
29596495Sspeer  * Notes:
29606495Sspeer  *
29616495Sspeer  * NPI/NXGE function calls:
29626495Sspeer  *
29636495Sspeer  * Registers accessed:
29646495Sspeer  *
29656495Sspeer  * Context:
29666495Sspeer  *	Any domain
29676495Sspeer  */
29683859Sml29623 static p_tx_ring_t
29693859Sml29623 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel)
29703859Sml29623 {
29716495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
29726495Sspeer 	int tdc;
29733859Sml29623 
29743859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring"));
29753859Sml29623 
29766495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
29773859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
29786495Sspeer 		    "<== nxge_txdma_get_ring: NULL ring pointer(s)"));
29796495Sspeer 		goto return_null;
29803859Sml29623 	}
29813859Sml29623 
29826495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
29836495Sspeer 		if ((1 << tdc) & set->owned.map) {
29846495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
29856495Sspeer 			if (ring) {
29866495Sspeer 				if (channel == ring->tdc) {
29876495Sspeer 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
29886495Sspeer 					    "<== nxge_txdma_get_ring: "
29896495Sspeer 					    "tdc %d ring $%p", tdc, ring));
29906495Sspeer 					return (ring);
29916495Sspeer 				}
29926495Sspeer 			}
29933859Sml29623 		}
29943859Sml29623 	}
29953859Sml29623 
29966495Sspeer return_null:
29976495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: "
29986495Sspeer 		"ring not found"));
29996495Sspeer 
30003859Sml29623 	return (NULL);
30013859Sml29623 }
30023859Sml29623 
30036495Sspeer /*
30046495Sspeer  * nxge_txdma_get_mbox
30056495Sspeer  *
30066495Sspeer  *	Get the mailbox for a TDC.
30076495Sspeer  *
30086495Sspeer  * Arguments:
30096495Sspeer  * 	nxgep
30106495Sspeer  * 	channel
30116495Sspeer  *
30126495Sspeer  * Notes:
30136495Sspeer  *
30146495Sspeer  * NPI/NXGE function calls:
30156495Sspeer  *
30166495Sspeer  * Registers accessed:
30176495Sspeer  *
30186495Sspeer  * Context:
30196495Sspeer  *	Any domain
30206495Sspeer  */
30213859Sml29623 static p_tx_mbox_t
30223859Sml29623 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel)
30233859Sml29623 {
30246495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
30256495Sspeer 	int tdc;
30263859Sml29623 
30273859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox"));
30283859Sml29623 
30296495Sspeer 	if (nxgep->tx_mbox_areas_p == 0 ||
30306495Sspeer 	    nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) {
30316495Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
30326495Sspeer 		    "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)"));
30336495Sspeer 		goto return_null;
30343859Sml29623 	}
30353859Sml29623 
30366495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
30376495Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
30386495Sspeer 		    "<== nxge_txdma_get_mbox: NULL ring pointer(s)"));
30396495Sspeer 		goto return_null;
30403859Sml29623 	}
30413859Sml29623 
30426495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
30436495Sspeer 		if ((1 << tdc) & set->owned.map) {
30446495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
30456495Sspeer 			if (ring) {
30466495Sspeer 				if (channel == ring->tdc) {
30476495Sspeer 					tx_mbox_t *mailbox = nxgep->
30486495Sspeer 					    tx_mbox_areas_p->
30496495Sspeer 					    txmbox_areas_p[tdc];
30506495Sspeer 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
30516495Sspeer 					    "<== nxge_txdma_get_mbox: tdc %d "
30526495Sspeer 					    "ring $%p", tdc, mailbox));
30536495Sspeer 					return (mailbox);
30546495Sspeer 				}
30556495Sspeer 			}
30563859Sml29623 		}
30573859Sml29623 	}
30583859Sml29623 
30596495Sspeer return_null:
30606495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: "
30616495Sspeer 		"mailbox not found"));
30626495Sspeer 
30633859Sml29623 	return (NULL);
30643859Sml29623 }
30653859Sml29623 
30666495Sspeer /*
30676495Sspeer  * nxge_tx_err_evnts
30686495Sspeer  *
30696495Sspeer  *	Recover a TDC.
30706495Sspeer  *
30716495Sspeer  * Arguments:
30726495Sspeer  * 	nxgep
30736495Sspeer  * 	index	The index to the TDC ring.
30746495Sspeer  * 	ldvp	Used to get the channel number ONLY.
30756495Sspeer  * 	cs	A copy of the bits from TX_CS.
30766495Sspeer  *
30776495Sspeer  * Notes:
30786495Sspeer  *	Calling tree:
30796495Sspeer  *	 nxge_tx_intr()
30806495Sspeer  *
30816495Sspeer  * NPI/NXGE function calls:
30826495Sspeer  *	npi_txdma_ring_error_get()
30836495Sspeer  *	npi_txdma_inj_par_error_get()
30846495Sspeer  *	nxge_txdma_fatal_err_recover()
30856495Sspeer  *
30866495Sspeer  * Registers accessed:
30876495Sspeer  *	TX_RNG_ERR_LOGH	DMC+0x40048 Transmit Ring Error Log High
30886495Sspeer  *	TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low
30896495Sspeer  *	TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
30906495Sspeer  *
30916495Sspeer  * Context:
30926495Sspeer  *	Any domain	XXX Remove code which accesses TDMC_INJ_PAR_ERR.
30936495Sspeer  */
30943859Sml29623 /*ARGSUSED*/
30953859Sml29623 static nxge_status_t
30963859Sml29623 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs)
30973859Sml29623 {
30983859Sml29623 	npi_handle_t		handle;
30993859Sml29623 	npi_status_t		rs;
31003859Sml29623 	uint8_t			channel;
31013859Sml29623 	p_tx_ring_t 		*tx_rings;
31023859Sml29623 	p_tx_ring_t 		tx_ring_p;
31033859Sml29623 	p_nxge_tx_ring_stats_t	tdc_stats;
31043859Sml29623 	boolean_t		txchan_fatal = B_FALSE;
31053859Sml29623 	nxge_status_t		status = NXGE_OK;
31063859Sml29623 	tdmc_inj_par_err_t	par_err;
31073859Sml29623 	uint32_t		value;
31083859Sml29623 
31096495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts"));
31103859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
31113859Sml29623 	channel = ldvp->channel;
31123859Sml29623 
31133859Sml29623 	tx_rings = nxgep->tx_rings->rings;
31143859Sml29623 	tx_ring_p = tx_rings[index];
31153859Sml29623 	tdc_stats = tx_ring_p->tdc_stats;
31163859Sml29623 	if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) ||
31173859Sml29623 		(cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) ||
31183859Sml29623 		(cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) {
31193859Sml29623 		if ((rs = npi_txdma_ring_error_get(handle, channel,
31203859Sml29623 					&tdc_stats->errlog)) != NPI_SUCCESS)
31213859Sml29623 			return (NXGE_ERROR | rs);
31223859Sml29623 	}
31233859Sml29623 
31243859Sml29623 	if (cs.bits.ldw.mbox_err) {
31253859Sml29623 		tdc_stats->mbox_err++;
31263859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
31273859Sml29623 					NXGE_FM_EREPORT_TDMC_MBOX_ERR);
31283859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31293859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
31303859Sml29623 			"fatal error: mailbox", channel));
31313859Sml29623 		txchan_fatal = B_TRUE;
31323859Sml29623 	}
31333859Sml29623 	if (cs.bits.ldw.pkt_size_err) {
31343859Sml29623 		tdc_stats->pkt_size_err++;
31353859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
31363859Sml29623 					NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR);
31373859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31383859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
31393859Sml29623 			"fatal error: pkt_size_err", channel));
31403859Sml29623 		txchan_fatal = B_TRUE;
31413859Sml29623 	}
31423859Sml29623 	if (cs.bits.ldw.tx_ring_oflow) {
31433859Sml29623 		tdc_stats->tx_ring_oflow++;
31443859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
31453859Sml29623 					NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW);
31463859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31473859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
31483859Sml29623 			"fatal error: tx_ring_oflow", channel));
31493859Sml29623 		txchan_fatal = B_TRUE;
31503859Sml29623 	}
31513859Sml29623 	if (cs.bits.ldw.pref_buf_par_err) {
31523859Sml29623 		tdc_stats->pre_buf_par_err++;
31533859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
31543859Sml29623 					NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR);
31553859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31563859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
31573859Sml29623 			"fatal error: pre_buf_par_err", channel));
31583859Sml29623 		/* Clear error injection source for parity error */
31593859Sml29623 		(void) npi_txdma_inj_par_error_get(handle, &value);
31603859Sml29623 		par_err.value = value;
31613859Sml29623 		par_err.bits.ldw.inject_parity_error &= ~(1 << channel);
31623859Sml29623 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
31633859Sml29623 		txchan_fatal = B_TRUE;
31643859Sml29623 	}
31653859Sml29623 	if (cs.bits.ldw.nack_pref) {
31663859Sml29623 		tdc_stats->nack_pref++;
31673859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
31683859Sml29623 					NXGE_FM_EREPORT_TDMC_NACK_PREF);
31693859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31703859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
31713859Sml29623 			"fatal error: nack_pref", channel));
31723859Sml29623 		txchan_fatal = B_TRUE;
31733859Sml29623 	}
31743859Sml29623 	if (cs.bits.ldw.nack_pkt_rd) {
31753859Sml29623 		tdc_stats->nack_pkt_rd++;
31763859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
31773859Sml29623 					NXGE_FM_EREPORT_TDMC_NACK_PKT_RD);
31783859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31793859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
31803859Sml29623 			"fatal error: nack_pkt_rd", channel));
31813859Sml29623 		txchan_fatal = B_TRUE;
31823859Sml29623 	}
31833859Sml29623 	if (cs.bits.ldw.conf_part_err) {
31843859Sml29623 		tdc_stats->conf_part_err++;
31853859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
31863859Sml29623 					NXGE_FM_EREPORT_TDMC_CONF_PART_ERR);
31873859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31883859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
31893859Sml29623 			"fatal error: config_partition_err", channel));
31903859Sml29623 		txchan_fatal = B_TRUE;
31913859Sml29623 	}
31923859Sml29623 	if (cs.bits.ldw.pkt_prt_err) {
31933859Sml29623 		tdc_stats->pkt_part_err++;
31943859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
31953859Sml29623 					NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR);
31963859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31973859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
31983859Sml29623 			"fatal error: pkt_prt_err", channel));
31993859Sml29623 		txchan_fatal = B_TRUE;
32003859Sml29623 	}
32013859Sml29623 
32023859Sml29623 	/* Clear error injection source in case this is an injected error */
32033859Sml29623 	TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0);
32043859Sml29623 
32053859Sml29623 	if (txchan_fatal) {
32063859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32073859Sml29623 			" nxge_tx_err_evnts: "
32083859Sml29623 			" fatal error on channel %d cs 0x%llx\n",
32093859Sml29623 			channel, cs.value));
32103859Sml29623 		status = nxge_txdma_fatal_err_recover(nxgep, channel,
32113859Sml29623 								tx_ring_p);
32123859Sml29623 		if (status == NXGE_OK) {
32133859Sml29623 			FM_SERVICE_RESTORED(nxgep);
32143859Sml29623 		}
32153859Sml29623 	}
32163859Sml29623 
32176495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts"));
32183859Sml29623 
32193859Sml29623 	return (status);
32203859Sml29623 }
32213859Sml29623 
32223859Sml29623 static nxge_status_t
32236495Sspeer nxge_txdma_fatal_err_recover(
32246495Sspeer 	p_nxge_t nxgep,
32256495Sspeer 	uint16_t channel,
32266495Sspeer 	p_tx_ring_t tx_ring_p)
32273859Sml29623 {
32283859Sml29623 	npi_handle_t	handle;
32293859Sml29623 	npi_status_t	rs = NPI_SUCCESS;
32303859Sml29623 	p_tx_mbox_t	tx_mbox_p;
32313859Sml29623 	nxge_status_t	status = NXGE_OK;
32323859Sml29623 
32333859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover"));
32343859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32353859Sml29623 			"Recovering from TxDMAChannel#%d error...", channel));
32363859Sml29623 
32373859Sml29623 	/*
32383859Sml29623 	 * Stop the dma channel waits for the stop done.
32393859Sml29623 	 * If the stop done bit is not set, then create
32403859Sml29623 	 * an error.
32413859Sml29623 	 */
32423859Sml29623 
32433859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
32443859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop..."));
32453859Sml29623 	MUTEX_ENTER(&tx_ring_p->lock);
32463859Sml29623 	rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel);
32473859Sml29623 	if (rs != NPI_SUCCESS) {
32483859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32493859Sml29623 			"==> nxge_txdma_fatal_err_recover (channel %d): "
32503859Sml29623 			"stop failed ", channel));
32513859Sml29623 		goto fail;
32523859Sml29623 	}
32533859Sml29623 
32543859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim..."));
32553859Sml29623 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
32563859Sml29623 
32573859Sml29623 	/*
32583859Sml29623 	 * Reset TXDMA channel
32593859Sml29623 	 */
32603859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset..."));
32613859Sml29623 	if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) !=
32623859Sml29623 						NPI_SUCCESS) {
32633859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32643859Sml29623 			"==> nxge_txdma_fatal_err_recover (channel %d)"
32653859Sml29623 			" reset channel failed 0x%x", channel, rs));
32663859Sml29623 		goto fail;
32673859Sml29623 	}
32683859Sml29623 
32693859Sml29623 	/*
32703859Sml29623 	 * Reset the tail (kick) register to 0.
32713859Sml29623 	 * (Hardware will not reset it. Tx overflow fatal
32723859Sml29623 	 * error if tail is not set to 0 after reset!
32733859Sml29623 	 */
32743859Sml29623 	TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
32753859Sml29623 
32763859Sml29623 	/* Restart TXDMA channel */
32773859Sml29623 
32786495Sspeer 	if (!isLDOMguest(nxgep)) {
32796495Sspeer 		tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
32806495Sspeer 
32816495Sspeer 		// XXX This is a problem in HIO!
32826495Sspeer 		/*
32836495Sspeer 		 * Initialize the TXDMA channel specific FZC control
32846495Sspeer 		 * configurations. These FZC registers are pertaining
32856495Sspeer 		 * to each TX channel (i.e. logical pages).
32866495Sspeer 		 */
32876495Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart..."));
32886495Sspeer 		status = nxge_init_fzc_txdma_channel(nxgep, channel,
32896495Sspeer 		    tx_ring_p, tx_mbox_p);
32906495Sspeer 		if (status != NXGE_OK)
32916495Sspeer 			goto fail;
32926495Sspeer 	}
32933859Sml29623 
32943859Sml29623 	/*
32953859Sml29623 	 * Initialize the event masks.
32963859Sml29623 	 */
32973859Sml29623 	tx_ring_p->tx_evmask.value = 0;
32983859Sml29623 	status = nxge_init_txdma_channel_event_mask(nxgep, channel,
32993859Sml29623 							&tx_ring_p->tx_evmask);
33003859Sml29623 	if (status != NXGE_OK)
33013859Sml29623 		goto fail;
33023859Sml29623 
33033859Sml29623 	tx_ring_p->wr_index_wrap = B_FALSE;
33043859Sml29623 	tx_ring_p->wr_index = 0;
33053859Sml29623 	tx_ring_p->rd_index = 0;
33063859Sml29623 
33073859Sml29623 	/*
33083859Sml29623 	 * Load TXDMA descriptors, buffers, mailbox,
33093859Sml29623 	 * initialise the DMA channels and
33103859Sml29623 	 * enable each DMA channel.
33113859Sml29623 	 */
33123859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable..."));
33133859Sml29623 	status = nxge_enable_txdma_channel(nxgep, channel,
33143859Sml29623 						tx_ring_p, tx_mbox_p);
33153859Sml29623 	MUTEX_EXIT(&tx_ring_p->lock);
33163859Sml29623 	if (status != NXGE_OK)
33173859Sml29623 		goto fail;
33183859Sml29623 
33193859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
33203859Sml29623 			"Recovery Successful, TxDMAChannel#%d Restored",
33213859Sml29623 			channel));
33223859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover"));
33233859Sml29623 
33243859Sml29623 	return (NXGE_OK);
33253859Sml29623 
33263859Sml29623 fail:
33273859Sml29623 	MUTEX_EXIT(&tx_ring_p->lock);
33283859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
33293859Sml29623 		"nxge_txdma_fatal_err_recover (channel %d): "
33303859Sml29623 		"failed to recover this txdma channel", channel));
33313859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
33323859Sml29623 
33333859Sml29623 	return (status);
33343859Sml29623 }
33353859Sml29623 
33366495Sspeer /*
33376495Sspeer  * nxge_tx_port_fatal_err_recover
33386495Sspeer  *
33396495Sspeer  *	Attempt to recover from a fatal port error.
33406495Sspeer  *
33416495Sspeer  * Arguments:
33426495Sspeer  * 	nxgep
33436495Sspeer  *
33446495Sspeer  * Notes:
33456495Sspeer  *	How would a guest do this?
33466495Sspeer  *
33476495Sspeer  * NPI/NXGE function calls:
33486495Sspeer  *
33496495Sspeer  * Registers accessed:
33506495Sspeer  *
33516495Sspeer  * Context:
33526495Sspeer  *	Service domain
33536495Sspeer  */
33543859Sml29623 nxge_status_t
33553859Sml29623 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep)
33563859Sml29623 {
33576495Sspeer 	nxge_grp_set_t *set = &nxgep->tx_set;
33586495Sspeer 	nxge_channel_t tdc;
33596495Sspeer 
33606495Sspeer 	tx_ring_t	*ring;
33616495Sspeer 	tx_mbox_t	*mailbox;
33626495Sspeer 
33633859Sml29623 	npi_handle_t	handle;
33646495Sspeer 	nxge_status_t	status;
33656495Sspeer 	npi_status_t	rs;
33663859Sml29623 
33673859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover"));
33683859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
33696495Sspeer 	    "Recovering from TxPort error..."));
33706495Sspeer 
33716495Sspeer 	if (isLDOMguest(nxgep)) {
33726495Sspeer 		return (NXGE_OK);
33736495Sspeer 	}
33746495Sspeer 
33756495Sspeer 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
33766495Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
33776495Sspeer 		    "<== nxge_tx_port_fatal_err_recover: not initialized"));
33786495Sspeer 		return (NXGE_ERROR);
33796495Sspeer 	}
33806495Sspeer 
33816495Sspeer 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
33826495Sspeer 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
33836495Sspeer 		    "<== nxge_tx_port_fatal_err_recover: "
33846495Sspeer 		    "NULL ring pointer(s)"));
33856495Sspeer 		return (NXGE_ERROR);
33866495Sspeer 	}
33876495Sspeer 
33886495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
33896495Sspeer 		if ((1 << tdc) & set->owned.map) {
33906495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
33916495Sspeer 			if (ring)
33926495Sspeer 				MUTEX_ENTER(&ring->lock);
33936495Sspeer 		}
33946495Sspeer 	}
33953859Sml29623 
33963859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
33976495Sspeer 
33986495Sspeer 	/*
33996495Sspeer 	 * Stop all the TDCs owned by us.
34006495Sspeer 	 * (The shared TDCs will have been stopped by their owners.)
34016495Sspeer 	 */
34026495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
34036495Sspeer 		if ((1 << tdc) & set->owned.map) {
34046495Sspeer 			ring = nxgep->tx_rings->rings[tdc];
34056495Sspeer 			if (ring) {
34066495Sspeer 				rs = npi_txdma_channel_control
34076495Sspeer 				    (handle, TXDMA_STOP, tdc);
34086495Sspeer 				if (rs != NPI_SUCCESS) {
34096495Sspeer 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
34106495Sspeer 					    "nxge_tx_port_fatal_err_recover "
34116495Sspeer 					    "(channel %d): stop failed ", tdc));
34126495Sspeer 					goto fail;
34136495Sspeer 				}
34146495Sspeer 			}
34153859Sml29623 		}
34163859Sml29623 	}
34173859Sml29623 
34186495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs..."));
34196495Sspeer 
34206495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
34216495Sspeer 		if ((1 << tdc) & set->owned.map) {
34226495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
34236495Sspeer 			if (ring)
34246495Sspeer 				(void) nxge_txdma_reclaim(nxgep, ring, 0);
34253859Sml29623 		}
34263859Sml29623 	}
34273859Sml29623 
34283859Sml29623 	/*
34296495Sspeer 	 * Reset all the TDCs.
34303859Sml29623 	 */
34316495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs..."));
34326495Sspeer 
34336495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
34346495Sspeer 		if ((1 << tdc) & set->owned.map) {
34356495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
34366495Sspeer 			if (ring) {
34376495Sspeer 				if ((rs = npi_txdma_channel_control
34386495Sspeer 					(handle, TXDMA_RESET, tdc))
34396495Sspeer 				    != NPI_SUCCESS) {
34406495Sspeer 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
34416495Sspeer 					    "nxge_tx_port_fatal_err_recover "
34426495Sspeer 					    "(channel %d) reset channel "
34436495Sspeer 					    "failed 0x%x", tdc, rs));
34446495Sspeer 					goto fail;
34456495Sspeer 				}
34466495Sspeer 			}
34476495Sspeer 			/*
34486495Sspeer 			 * Reset the tail (kick) register to 0.
34496495Sspeer 			 * (Hardware will not reset it. Tx overflow fatal
34506495Sspeer 			 * error if tail is not set to 0 after reset!
34516495Sspeer 			 */
34526495Sspeer 			TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0);
34533859Sml29623 		}
34546495Sspeer 	}
34556495Sspeer 
34566495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs..."));
34576495Sspeer 
34586495Sspeer 	/* Restart all the TDCs */
34596495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
34606495Sspeer 		if ((1 << tdc) & set->owned.map) {
34616495Sspeer 			ring = nxgep->tx_rings->rings[tdc];
34626495Sspeer 			if (ring) {
34636495Sspeer 				mailbox = nxge_txdma_get_mbox(nxgep, tdc);
34646495Sspeer 				status = nxge_init_fzc_txdma_channel(nxgep, tdc,
34656495Sspeer 				    ring, mailbox);
34666495Sspeer 				ring->tx_evmask.value = 0;
34676495Sspeer 				/*
34686495Sspeer 				 * Initialize the event masks.
34696495Sspeer 				 */
34706495Sspeer 				status = nxge_init_txdma_channel_event_mask
34716495Sspeer 				    (nxgep, tdc, &ring->tx_evmask);
34726495Sspeer 
34736495Sspeer 				ring->wr_index_wrap = B_FALSE;
34746495Sspeer 				ring->wr_index = 0;
34756495Sspeer 				ring->rd_index = 0;
34766495Sspeer 
34776495Sspeer 				if (status != NXGE_OK)
34786495Sspeer 					goto fail;
34796495Sspeer 				if (status != NXGE_OK)
34806495Sspeer 					goto fail;
34816495Sspeer 			}
34823859Sml29623 		}
34836495Sspeer 	}
34846495Sspeer 
34856495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs..."));
34866495Sspeer 
34876495Sspeer 	/* Re-enable all the TDCs */
34886495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
34896495Sspeer 		if ((1 << tdc) & set->owned.map) {
34906495Sspeer 			ring = nxgep->tx_rings->rings[tdc];
34916495Sspeer 			if (ring) {
34926495Sspeer 				mailbox = nxge_txdma_get_mbox(nxgep, tdc);
34936495Sspeer 				status = nxge_enable_txdma_channel(nxgep, tdc,
34946495Sspeer 				    ring, mailbox);
34956495Sspeer 				if (status != NXGE_OK)
34966495Sspeer 					goto fail;
34976495Sspeer 			}
34986495Sspeer 		}
34993859Sml29623 	}
35003859Sml29623 
35013859Sml29623 	/*
35026495Sspeer 	 * Unlock all the TDCs.
35033859Sml29623 	 */
35046495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
35056495Sspeer 		if ((1 << tdc) & set->owned.map) {
35066495Sspeer 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
35076495Sspeer 			if (ring)
35086495Sspeer 				MUTEX_EXIT(&ring->lock);
35093859Sml29623 		}
35103859Sml29623 	}
35113859Sml29623 
35126495Sspeer 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded"));
35133859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
35143859Sml29623 
35153859Sml29623 	return (NXGE_OK);
35163859Sml29623 
35173859Sml29623 fail:
35186495Sspeer 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
35196495Sspeer 		if ((1 << tdc) & set->owned.map) {
35206495Sspeer 			ring = nxgep->tx_rings->rings[tdc];
35216495Sspeer 			if (ring)
35226495Sspeer 				MUTEX_EXIT(&ring->lock);
35233859Sml29623 		}
35243859Sml29623 	}
35253859Sml29623 
35266495Sspeer 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed"));
35276495Sspeer 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
35283859Sml29623 
35293859Sml29623 	return (status);
35303859Sml29623 }
35313859Sml29623 
35326495Sspeer /*
35336495Sspeer  * nxge_txdma_inject_err
35346495Sspeer  *
35356495Sspeer  *	Inject an error into a TDC.
35366495Sspeer  *
35376495Sspeer  * Arguments:
35386495Sspeer  * 	nxgep
35396495Sspeer  * 	err_id	The error to inject.
35406495Sspeer  * 	chan	The channel to inject into.
35416495Sspeer  *
35426495Sspeer  * Notes:
35436495Sspeer  *	This is called from nxge_main.c:nxge_err_inject()
35446495Sspeer  *	Has this ioctl ever been used?
35456495Sspeer  *
35466495Sspeer  * NPI/NXGE function calls:
35476495Sspeer  *	npi_txdma_inj_par_error_get()
35486495Sspeer  *	npi_txdma_inj_par_error_set()
35496495Sspeer  *
35506495Sspeer  * Registers accessed:
35516495Sspeer  *	TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
35526495Sspeer  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
35536495Sspeer  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
35546495Sspeer  *
35556495Sspeer  * Context:
35566495Sspeer  *	Service domain
35576495Sspeer  */
35583859Sml29623 void
35593859Sml29623 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
35603859Sml29623 {
35613859Sml29623 	tdmc_intr_dbg_t		tdi;
35623859Sml29623 	tdmc_inj_par_err_t	par_err;
35633859Sml29623 	uint32_t		value;
35643859Sml29623 	npi_handle_t		handle;
35653859Sml29623 
35663859Sml29623 	switch (err_id) {
35673859Sml29623 
35683859Sml29623 	case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR:
35693859Sml29623 		handle = NXGE_DEV_NPI_HANDLE(nxgep);
35703859Sml29623 		/* Clear error injection source for parity error */
35713859Sml29623 		(void) npi_txdma_inj_par_error_get(handle, &value);
35723859Sml29623 		par_err.value = value;
35733859Sml29623 		par_err.bits.ldw.inject_parity_error &= ~(1 << chan);
35743859Sml29623 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
35753859Sml29623 
35763859Sml29623 		par_err.bits.ldw.inject_parity_error = (1 << chan);
35773859Sml29623 		(void) npi_txdma_inj_par_error_get(handle, &value);
35783859Sml29623 		par_err.value = value;
35793859Sml29623 		par_err.bits.ldw.inject_parity_error |= (1 << chan);
35803859Sml29623 		cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n",
35813859Sml29623 				(unsigned long long)par_err.value);
35823859Sml29623 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
35833859Sml29623 		break;
35843859Sml29623 
35853859Sml29623 	case NXGE_FM_EREPORT_TDMC_MBOX_ERR:
35863859Sml29623 	case NXGE_FM_EREPORT_TDMC_NACK_PREF:
35873859Sml29623 	case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD:
35883859Sml29623 	case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR:
35893859Sml29623 	case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW:
35903859Sml29623 	case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR:
35913859Sml29623 	case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR:
35923859Sml29623 		TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
35933859Sml29623 			chan, &tdi.value);
35943859Sml29623 		if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR)
35953859Sml29623 			tdi.bits.ldw.pref_buf_par_err = 1;
35963859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR)
35973859Sml29623 			tdi.bits.ldw.mbox_err = 1;
35983859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF)
35993859Sml29623 			tdi.bits.ldw.nack_pref = 1;
36003859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD)
36013859Sml29623 			tdi.bits.ldw.nack_pkt_rd = 1;
36023859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR)
36033859Sml29623 			tdi.bits.ldw.pkt_size_err = 1;
36043859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW)
36053859Sml29623 			tdi.bits.ldw.tx_ring_oflow = 1;
36063859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR)
36073859Sml29623 			tdi.bits.ldw.conf_part_err = 1;
36083859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR)
36093859Sml29623 			tdi.bits.ldw.pkt_part_err = 1;
36105125Sjoycey #if defined(__i386)
36115125Sjoycey 		cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n",
36125125Sjoycey 				tdi.value);
36135125Sjoycey #else
36143859Sml29623 		cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n",
36153859Sml29623 				tdi.value);
36165125Sjoycey #endif
36173859Sml29623 		TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
36183859Sml29623 			chan, tdi.value);
36193859Sml29623 
36203859Sml29623 		break;
36213859Sml29623 	}
36223859Sml29623 }
3623