xref: /onnv-gate/usr/src/uts/common/io/nxge/nxge_txdma.c (revision 5512:fb091cfb942c)
13859Sml29623 /*
23859Sml29623  * CDDL HEADER START
33859Sml29623  *
43859Sml29623  * The contents of this file are subject to the terms of the
53859Sml29623  * Common Development and Distribution License (the "License").
63859Sml29623  * You may not use this file except in compliance with the License.
73859Sml29623  *
83859Sml29623  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
93859Sml29623  * or http://www.opensolaris.org/os/licensing.
103859Sml29623  * See the License for the specific language governing permissions
113859Sml29623  * and limitations under the License.
123859Sml29623  *
133859Sml29623  * When distributing Covered Code, include this CDDL HEADER in each
143859Sml29623  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
153859Sml29623  * If applicable, add the following below this CDDL HEADER, with the
163859Sml29623  * fields enclosed by brackets "[]" replaced with your own identifying
173859Sml29623  * information: Portions Copyright [yyyy] [name of copyright owner]
183859Sml29623  *
193859Sml29623  * CDDL HEADER END
203859Sml29623  */
213859Sml29623 /*
223859Sml29623  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
233859Sml29623  * Use is subject to license terms.
243859Sml29623  */
253859Sml29623 
263859Sml29623 #pragma ident	"%Z%%M%	%I%	%E% SMI"
273859Sml29623 
283859Sml29623 #include <sys/nxge/nxge_impl.h>
293859Sml29623 #include <sys/nxge/nxge_txdma.h>
303859Sml29623 #include <sys/llc1.h>
313859Sml29623 
323859Sml29623 uint32_t 	nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT;
333859Sml29623 uint32_t	nxge_tx_minfree = 32;
343859Sml29623 uint32_t	nxge_tx_intr_thres = 0;
353859Sml29623 uint32_t	nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS;
363859Sml29623 uint32_t	nxge_tx_tiny_pack = 1;
373859Sml29623 uint32_t	nxge_tx_use_bcopy = 1;
383859Sml29623 
393859Sml29623 extern uint32_t 	nxge_tx_ring_size;
403859Sml29623 extern uint32_t 	nxge_bcopy_thresh;
413859Sml29623 extern uint32_t 	nxge_dvma_thresh;
423859Sml29623 extern uint32_t 	nxge_dma_stream_thresh;
433859Sml29623 extern dma_method_t 	nxge_force_dma;
443859Sml29623 
453859Sml29623 /* Device register access attributes for PIO.  */
463859Sml29623 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr;
473859Sml29623 /* Device descriptor access attributes for DMA.  */
483859Sml29623 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr;
493859Sml29623 /* Device buffer access attributes for DMA.  */
503859Sml29623 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr;
513859Sml29623 extern ddi_dma_attr_t nxge_desc_dma_attr;
523859Sml29623 extern ddi_dma_attr_t nxge_tx_dma_attr;
533859Sml29623 
543952Sml29623 extern int nxge_serial_tx(mblk_t *mp, void *arg);
553952Sml29623 
563859Sml29623 static nxge_status_t nxge_map_txdma(p_nxge_t);
573859Sml29623 static void nxge_unmap_txdma(p_nxge_t);
583859Sml29623 
593859Sml29623 static nxge_status_t nxge_txdma_hw_start(p_nxge_t);
603859Sml29623 static void nxge_txdma_hw_stop(p_nxge_t);
613859Sml29623 
623859Sml29623 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t,
633859Sml29623 	p_nxge_dma_common_t *, p_tx_ring_t *,
643859Sml29623 	uint32_t, p_nxge_dma_common_t *,
653859Sml29623 	p_tx_mbox_t *);
663859Sml29623 static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t,
673859Sml29623 	p_tx_ring_t, p_tx_mbox_t);
683859Sml29623 
693859Sml29623 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t,
703859Sml29623 	p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t);
713859Sml29623 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t);
723859Sml29623 
733859Sml29623 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t,
743859Sml29623 	p_nxge_dma_common_t *, p_tx_ring_t,
753859Sml29623 	p_tx_mbox_t *);
763859Sml29623 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t,
773859Sml29623 	p_tx_ring_t, p_tx_mbox_t);
783859Sml29623 
793859Sml29623 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t,
803859Sml29623     p_tx_ring_t, p_tx_mbox_t);
813859Sml29623 static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t,
823859Sml29623 	p_tx_ring_t, p_tx_mbox_t);
833859Sml29623 
843859Sml29623 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t);
853859Sml29623 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t,
863859Sml29623 	p_nxge_ldv_t, tx_cs_t);
873859Sml29623 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t);
883859Sml29623 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t,
893859Sml29623 	uint16_t, p_tx_ring_t);
903859Sml29623 
913859Sml29623 nxge_status_t
923859Sml29623 nxge_init_txdma_channels(p_nxge_t nxgep)
933859Sml29623 {
943859Sml29623 	nxge_status_t		status = NXGE_OK;
953859Sml29623 
963859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_init_txdma_channels"));
973859Sml29623 
983859Sml29623 	status = nxge_map_txdma(nxgep);
993859Sml29623 	if (status != NXGE_OK) {
1003859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1013859Sml29623 			"<== nxge_init_txdma_channels: status 0x%x", status));
1023859Sml29623 		return (status);
1033859Sml29623 	}
1043859Sml29623 
1053859Sml29623 	status = nxge_txdma_hw_start(nxgep);
1063859Sml29623 	if (status != NXGE_OK) {
1073859Sml29623 		nxge_unmap_txdma(nxgep);
1083859Sml29623 		return (status);
1093859Sml29623 	}
1103859Sml29623 
1113859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1123859Sml29623 		"<== nxge_init_txdma_channels: status 0x%x", status));
1133859Sml29623 
1143859Sml29623 	return (NXGE_OK);
1153859Sml29623 }
1163859Sml29623 
1173859Sml29623 void
1183859Sml29623 nxge_uninit_txdma_channels(p_nxge_t nxgep)
1193859Sml29623 {
1203859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channels"));
1213859Sml29623 
1223859Sml29623 	nxge_txdma_hw_stop(nxgep);
1233859Sml29623 	nxge_unmap_txdma(nxgep);
1243859Sml29623 
1253859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1263859Sml29623 		"<== nxge_uinit_txdma_channels"));
1273859Sml29623 }
1283859Sml29623 
1293859Sml29623 void
1303859Sml29623 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p,
1313859Sml29623 	uint32_t entries, uint32_t size)
1323859Sml29623 {
1333859Sml29623 	size_t		tsize;
1343859Sml29623 	*dest_p = *src_p;
1353859Sml29623 	tsize = size * entries;
1363859Sml29623 	dest_p->alength = tsize;
1373859Sml29623 	dest_p->nblocks = entries;
1383859Sml29623 	dest_p->block_size = size;
1393859Sml29623 	dest_p->offset += tsize;
1403859Sml29623 
1413859Sml29623 	src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize;
1423859Sml29623 	src_p->alength -= tsize;
1433859Sml29623 	src_p->dma_cookie.dmac_laddress += tsize;
1443859Sml29623 	src_p->dma_cookie.dmac_size -= tsize;
1453859Sml29623 }
1463859Sml29623 
1473859Sml29623 nxge_status_t
1483859Sml29623 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data)
1493859Sml29623 {
1503859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
1513859Sml29623 	nxge_status_t		status = NXGE_OK;
1523859Sml29623 	npi_handle_t		handle;
1533859Sml29623 
1543859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel"));
1553859Sml29623 
1563859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1573859Sml29623 	if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) {
1583859Sml29623 		rs = npi_txdma_channel_reset(handle, channel);
1593859Sml29623 	} else {
1603859Sml29623 		rs = npi_txdma_channel_control(handle, TXDMA_RESET,
1613859Sml29623 				channel);
1623859Sml29623 	}
1633859Sml29623 
1643859Sml29623 	if (rs != NPI_SUCCESS) {
1653859Sml29623 		status = NXGE_ERROR | rs;
1663859Sml29623 	}
1673859Sml29623 
1683859Sml29623 	/*
1693859Sml29623 	 * Reset the tail (kick) register to 0.
1703859Sml29623 	 * (Hardware will not reset it. Tx overflow fatal
1713859Sml29623 	 * error if tail is not set to 0 after reset!
1723859Sml29623 	 */
1733859Sml29623 	TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
1743859Sml29623 
1753859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel"));
1763859Sml29623 	return (status);
1773859Sml29623 }
1783859Sml29623 
1793859Sml29623 nxge_status_t
1803859Sml29623 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
1813859Sml29623 		p_tx_dma_ent_msk_t mask_p)
1823859Sml29623 {
1833859Sml29623 	npi_handle_t		handle;
1843859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
1853859Sml29623 	nxge_status_t		status = NXGE_OK;
1863859Sml29623 
1873859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1883859Sml29623 		"<== nxge_init_txdma_channel_event_mask"));
1893859Sml29623 
1903859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1913859Sml29623 	rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p);
1923859Sml29623 	if (rs != NPI_SUCCESS) {
1933859Sml29623 		status = NXGE_ERROR | rs;
1943859Sml29623 	}
1953859Sml29623 
1963859Sml29623 	return (status);
1973859Sml29623 }
1983859Sml29623 
1993859Sml29623 nxge_status_t
2003859Sml29623 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
2013859Sml29623 	uint64_t reg_data)
2023859Sml29623 {
2033859Sml29623 	npi_handle_t		handle;
2043859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
2053859Sml29623 	nxge_status_t		status = NXGE_OK;
2063859Sml29623 
2073859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2083859Sml29623 		"<== nxge_init_txdma_channel_cntl_stat"));
2093859Sml29623 
2103859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
2113859Sml29623 	rs = npi_txdma_control_status(handle, OP_SET, channel,
2123859Sml29623 			(p_tx_cs_t)&reg_data);
2133859Sml29623 
2143859Sml29623 	if (rs != NPI_SUCCESS) {
2153859Sml29623 		status = NXGE_ERROR | rs;
2163859Sml29623 	}
2173859Sml29623 
2183859Sml29623 	return (status);
2193859Sml29623 }
2203859Sml29623 
2213859Sml29623 nxge_status_t
2223859Sml29623 nxge_enable_txdma_channel(p_nxge_t nxgep,
2233859Sml29623 	uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p)
2243859Sml29623 {
2253859Sml29623 	npi_handle_t		handle;
2263859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
2273859Sml29623 	nxge_status_t		status = NXGE_OK;
2283859Sml29623 
2293859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel"));
2303859Sml29623 
2313859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
2323859Sml29623 	/*
2333859Sml29623 	 * Use configuration data composed at init time.
2343859Sml29623 	 * Write to hardware the transmit ring configurations.
2353859Sml29623 	 */
2363859Sml29623 	rs = npi_txdma_ring_config(handle, OP_SET, channel,
2373859Sml29623 			(uint64_t *)&(tx_desc_p->tx_ring_cfig.value));
2383859Sml29623 
2393859Sml29623 	if (rs != NPI_SUCCESS) {
2403859Sml29623 		return (NXGE_ERROR | rs);
2413859Sml29623 	}
2423859Sml29623 
2433859Sml29623 	/* Write to hardware the mailbox */
2443859Sml29623 	rs = npi_txdma_mbox_config(handle, OP_SET, channel,
2453859Sml29623 		(uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress);
2463859Sml29623 
2473859Sml29623 	if (rs != NPI_SUCCESS) {
2483859Sml29623 		return (NXGE_ERROR | rs);
2493859Sml29623 	}
2503859Sml29623 
2513859Sml29623 	/* Start the DMA engine. */
2523859Sml29623 	rs = npi_txdma_channel_init_enable(handle, channel);
2533859Sml29623 
2543859Sml29623 	if (rs != NPI_SUCCESS) {
2553859Sml29623 		return (NXGE_ERROR | rs);
2563859Sml29623 	}
2573859Sml29623 
2583859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel"));
2593859Sml29623 
2603859Sml29623 	return (status);
2613859Sml29623 }
2623859Sml29623 
2633859Sml29623 void
2643859Sml29623 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len,
2653859Sml29623 		boolean_t l4_cksum, int pkt_len, uint8_t npads,
2663859Sml29623 		p_tx_pkt_hdr_all_t pkthdrp)
2673859Sml29623 {
2683859Sml29623 	p_tx_pkt_header_t	hdrp;
2693859Sml29623 	p_mblk_t 		nmp;
2703859Sml29623 	uint64_t		tmp;
2713859Sml29623 	size_t 			mblk_len;
2723859Sml29623 	size_t 			iph_len;
2733859Sml29623 	size_t 			hdrs_size;
2743859Sml29623 	uint8_t			hdrs_buf[sizeof (struct ether_header) +
2753859Sml29623 					64 + sizeof (uint32_t)];
2765505Smisaki 	uint8_t			*cursor;
2773859Sml29623 	uint8_t 		*ip_buf;
2783859Sml29623 	uint16_t		eth_type;
2793859Sml29623 	uint8_t			ipproto;
2803859Sml29623 	boolean_t		is_vlan = B_FALSE;
2813859Sml29623 	size_t			eth_hdr_size;
2823859Sml29623 
2833859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp));
2843859Sml29623 
2853859Sml29623 	/*
2863859Sml29623 	 * Caller should zero out the headers first.
2873859Sml29623 	 */
2883859Sml29623 	hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr;
2893859Sml29623 
2903859Sml29623 	if (fill_len) {
2913859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL,
2923859Sml29623 			"==> nxge_fill_tx_hdr: pkt_len %d "
2933859Sml29623 			"npads %d", pkt_len, npads));
2943859Sml29623 		tmp = (uint64_t)pkt_len;
2953859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
2963859Sml29623 		goto fill_tx_header_done;
2973859Sml29623 	}
2983859Sml29623 
2993859Sml29623 	tmp = (uint64_t)npads;
3003859Sml29623 	hdrp->value |= (tmp << TX_PKT_HEADER_PAD_SHIFT);
3013859Sml29623 
3023859Sml29623 	/*
3033859Sml29623 	 * mp is the original data packet (does not include the
3043859Sml29623 	 * Neptune transmit header).
3053859Sml29623 	 */
3063859Sml29623 	nmp = mp;
3073859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: "
3083859Sml29623 		"mp $%p b_rptr $%p len %d",
3095505Smisaki 		mp, nmp->b_rptr, MBLKL(nmp)));
3105505Smisaki 	/* copy ether_header from mblk to hdrs_buf */
3115505Smisaki 	cursor = &hdrs_buf[0];
3125505Smisaki 	tmp = sizeof (struct ether_vlan_header);
3135505Smisaki 	while ((nmp != NULL) && (tmp > 0)) {
3145505Smisaki 		size_t buflen;
3155505Smisaki 		mblk_len = MBLKL(nmp);
316*5512Smisaki 		buflen = min((size_t)tmp, mblk_len);
3175505Smisaki 		bcopy(nmp->b_rptr, cursor, buflen);
3185505Smisaki 		cursor += buflen;
3195505Smisaki 		tmp -= buflen;
3205505Smisaki 		nmp = nmp->b_cont;
3215505Smisaki 	}
3225505Smisaki 
3235505Smisaki 	nmp = mp;
3245505Smisaki 	mblk_len = MBLKL(nmp);
3253859Sml29623 	ip_buf = NULL;
3263859Sml29623 	eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type);
3273859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) "
3283859Sml29623 		"ether type 0x%x", eth_type, hdrp->value));
3293859Sml29623 
3303859Sml29623 	if (eth_type < ETHERMTU) {
3313859Sml29623 		tmp = 1ull;
3323859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT);
3333859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC "
3343859Sml29623 			"value 0x%llx", hdrp->value));
3353859Sml29623 		if (*(hdrs_buf + sizeof (struct ether_header))
3363859Sml29623 				== LLC_SNAP_SAP) {
3373859Sml29623 			eth_type = ntohs(*((uint16_t *)(hdrs_buf +
3383859Sml29623 					sizeof (struct ether_header) + 6)));
3393859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
3403859Sml29623 				"==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x",
3413859Sml29623 				eth_type));
3423859Sml29623 		} else {
3433859Sml29623 			goto fill_tx_header_done;
3443859Sml29623 		}
3453859Sml29623 	} else if (eth_type == VLAN_ETHERTYPE) {
3463859Sml29623 		tmp = 1ull;
3473859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT);
3483859Sml29623 
3493859Sml29623 		eth_type = ntohs(((struct ether_vlan_header *)
3503859Sml29623 			hdrs_buf)->ether_type);
3513859Sml29623 		is_vlan = B_TRUE;
3523859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN "
3533859Sml29623 			"value 0x%llx", hdrp->value));
3543859Sml29623 	}
3553859Sml29623 
3563859Sml29623 	if (!is_vlan) {
3573859Sml29623 		eth_hdr_size = sizeof (struct ether_header);
3583859Sml29623 	} else {
3593859Sml29623 		eth_hdr_size = sizeof (struct ether_vlan_header);
3603859Sml29623 	}
3613859Sml29623 
3623859Sml29623 	switch (eth_type) {
3633859Sml29623 	case ETHERTYPE_IP:
3643859Sml29623 		if (mblk_len > eth_hdr_size + sizeof (uint8_t)) {
3653859Sml29623 			ip_buf = nmp->b_rptr + eth_hdr_size;
3663859Sml29623 			mblk_len -= eth_hdr_size;
3673859Sml29623 			iph_len = ((*ip_buf) & 0x0f);
3683859Sml29623 			if (mblk_len > (iph_len + sizeof (uint32_t))) {
3693859Sml29623 				ip_buf = nmp->b_rptr;
3703859Sml29623 				ip_buf += eth_hdr_size;
3713859Sml29623 			} else {
3723859Sml29623 				ip_buf = NULL;
3733859Sml29623 			}
3743859Sml29623 
3753859Sml29623 		}
3763859Sml29623 		if (ip_buf == NULL) {
3773859Sml29623 			hdrs_size = 0;
3783859Sml29623 			((p_ether_header_t)hdrs_buf)->ether_type = 0;
3793859Sml29623 			while ((nmp) && (hdrs_size <
3803859Sml29623 					sizeof (hdrs_buf))) {
3813859Sml29623 				mblk_len = (size_t)nmp->b_wptr -
3823859Sml29623 					(size_t)nmp->b_rptr;
3833859Sml29623 				if (mblk_len >=
3843859Sml29623 					(sizeof (hdrs_buf) - hdrs_size))
3853859Sml29623 					mblk_len = sizeof (hdrs_buf) -
3863859Sml29623 						hdrs_size;
3873859Sml29623 				bcopy(nmp->b_rptr,
3883859Sml29623 					&hdrs_buf[hdrs_size], mblk_len);
3893859Sml29623 				hdrs_size += mblk_len;
3903859Sml29623 				nmp = nmp->b_cont;
3913859Sml29623 			}
3923859Sml29623 			ip_buf = hdrs_buf;
3933859Sml29623 			ip_buf += eth_hdr_size;
3943859Sml29623 			iph_len = ((*ip_buf) & 0x0f);
3953859Sml29623 		}
3963859Sml29623 
3973859Sml29623 		ipproto = ip_buf[9];
3983859Sml29623 
3993859Sml29623 		tmp = (uint64_t)iph_len;
4003859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT);
4013859Sml29623 		tmp = (uint64_t)(eth_hdr_size >> 1);
4023859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
4033859Sml29623 
4043859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 "
4053859Sml29623 			" iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
4063859Sml29623 			"tmp 0x%x",
4073859Sml29623 			iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
4083859Sml29623 			ipproto, tmp));
4093859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP "
4103859Sml29623 			"value 0x%llx", hdrp->value));
4113859Sml29623 
4123859Sml29623 		break;
4133859Sml29623 
4143859Sml29623 	case ETHERTYPE_IPV6:
4153859Sml29623 		hdrs_size = 0;
4163859Sml29623 		((p_ether_header_t)hdrs_buf)->ether_type = 0;
4173859Sml29623 		while ((nmp) && (hdrs_size <
4183859Sml29623 				sizeof (hdrs_buf))) {
4193859Sml29623 			mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
4203859Sml29623 			if (mblk_len >=
4213859Sml29623 				(sizeof (hdrs_buf) - hdrs_size))
4223859Sml29623 				mblk_len = sizeof (hdrs_buf) -
4233859Sml29623 					hdrs_size;
4243859Sml29623 			bcopy(nmp->b_rptr,
4253859Sml29623 				&hdrs_buf[hdrs_size], mblk_len);
4263859Sml29623 			hdrs_size += mblk_len;
4273859Sml29623 			nmp = nmp->b_cont;
4283859Sml29623 		}
4293859Sml29623 		ip_buf = hdrs_buf;
4303859Sml29623 		ip_buf += eth_hdr_size;
4313859Sml29623 
4323859Sml29623 		tmp = 1ull;
4333859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT);
4343859Sml29623 
4353859Sml29623 		tmp = (eth_hdr_size >> 1);
4363859Sml29623 		hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
4373859Sml29623 
4383859Sml29623 		/* byte 6 is the next header protocol */
4393859Sml29623 		ipproto = ip_buf[6];
4403859Sml29623 
4413859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 "
4423859Sml29623 			" iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
4433859Sml29623 			iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
4443859Sml29623 			ipproto));
4453859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 "
4463859Sml29623 			"value 0x%llx", hdrp->value));
4473859Sml29623 
4483859Sml29623 		break;
4493859Sml29623 
4503859Sml29623 	default:
4513859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP"));
4523859Sml29623 		goto fill_tx_header_done;
4533859Sml29623 	}
4543859Sml29623 
4553859Sml29623 	switch (ipproto) {
4563859Sml29623 	case IPPROTO_TCP:
4573859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL,
4583859Sml29623 			"==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
4593859Sml29623 		if (l4_cksum) {
4603859Sml29623 			tmp = 1ull;
4613859Sml29623 			hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT);
4623859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
4633859Sml29623 				"==> nxge_tx_pkt_hdr_init: TCP CKSUM"
4643859Sml29623 				"value 0x%llx", hdrp->value));
4653859Sml29623 		}
4663859Sml29623 
4673859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP "
4683859Sml29623 			"value 0x%llx", hdrp->value));
4693859Sml29623 		break;
4703859Sml29623 
4713859Sml29623 	case IPPROTO_UDP:
4723859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP"));
4733859Sml29623 		if (l4_cksum) {
4743859Sml29623 			tmp = 0x2ull;
4753859Sml29623 			hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT);
4763859Sml29623 		}
4773859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL,
4783859Sml29623 			"==> nxge_tx_pkt_hdr_init: UDP"
4793859Sml29623 			"value 0x%llx", hdrp->value));
4803859Sml29623 		break;
4813859Sml29623 
4823859Sml29623 	default:
4833859Sml29623 		goto fill_tx_header_done;
4843859Sml29623 	}
4853859Sml29623 
4863859Sml29623 fill_tx_header_done:
4873859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL,
4883859Sml29623 		"==> nxge_fill_tx_hdr: pkt_len %d  "
4893859Sml29623 		"npads %d value 0x%llx", pkt_len, npads, hdrp->value));
4903859Sml29623 
4913859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr"));
4923859Sml29623 }
4933859Sml29623 
4943859Sml29623 /*ARGSUSED*/
4953859Sml29623 p_mblk_t
4963859Sml29623 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads)
4973859Sml29623 {
4983859Sml29623 	p_mblk_t 		newmp = NULL;
4993859Sml29623 
5003859Sml29623 	if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) {
5013859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL,
5023859Sml29623 			"<== nxge_tx_pkt_header_reserve: allocb failed"));
5033859Sml29623 		return (NULL);
5043859Sml29623 	}
5053859Sml29623 
5063859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL,
5073859Sml29623 		"==> nxge_tx_pkt_header_reserve: get new mp"));
5083859Sml29623 	DB_TYPE(newmp) = M_DATA;
5093859Sml29623 	newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp);
5103859Sml29623 	linkb(newmp, mp);
5113859Sml29623 	newmp->b_rptr -= TX_PKT_HEADER_SIZE;
5123859Sml29623 
5133859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: "
5143859Sml29623 		"b_rptr $%p b_wptr $%p",
5153859Sml29623 		newmp->b_rptr, newmp->b_wptr));
5163859Sml29623 
5173859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL,
5183859Sml29623 		"<== nxge_tx_pkt_header_reserve: use new mp"));
5193859Sml29623 
5203859Sml29623 	return (newmp);
5213859Sml29623 }
5223859Sml29623 
5233859Sml29623 int
5243859Sml29623 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p)
5253859Sml29623 {
5263859Sml29623 	uint_t 			nmblks;
5273859Sml29623 	ssize_t			len;
5283859Sml29623 	uint_t 			pkt_len;
5293859Sml29623 	p_mblk_t 		nmp, bmp, tmp;
5303859Sml29623 	uint8_t 		*b_wptr;
5313859Sml29623 
5323859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL,
5333859Sml29623 		"==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p "
5343859Sml29623 		"len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp)));
5353859Sml29623 
5363859Sml29623 	nmp = mp;
5373859Sml29623 	bmp = mp;
5383859Sml29623 	nmblks = 0;
5393859Sml29623 	pkt_len = 0;
5403859Sml29623 	*tot_xfer_len_p = 0;
5413859Sml29623 
5423859Sml29623 	while (nmp) {
5433859Sml29623 		len = MBLKL(nmp);
5443859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
5453859Sml29623 			"len %d pkt_len %d nmblks %d tot_xfer_len %d",
5463859Sml29623 			len, pkt_len, nmblks,
5473859Sml29623 			*tot_xfer_len_p));
5483859Sml29623 
5493859Sml29623 		if (len <= 0) {
5503859Sml29623 			bmp = nmp;
5513859Sml29623 			nmp = nmp->b_cont;
5523859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
5533859Sml29623 				"==> nxge_tx_pkt_nmblocks: "
5543859Sml29623 				"len (0) pkt_len %d nmblks %d",
5553859Sml29623 				pkt_len, nmblks));
5563859Sml29623 			continue;
5573859Sml29623 		}
5583859Sml29623 
5593859Sml29623 		*tot_xfer_len_p += len;
5603859Sml29623 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
5613859Sml29623 			"len %d pkt_len %d nmblks %d tot_xfer_len %d",
5623859Sml29623 			len, pkt_len, nmblks,
5633859Sml29623 			*tot_xfer_len_p));
5643859Sml29623 
5653859Sml29623 		if (len < nxge_bcopy_thresh) {
5663859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
5673859Sml29623 				"==> nxge_tx_pkt_nmblocks: "
5683859Sml29623 				"len %d (< thresh) pkt_len %d nmblks %d",
5693859Sml29623 				len, pkt_len, nmblks));
5703859Sml29623 			if (pkt_len == 0)
5713859Sml29623 				nmblks++;
5723859Sml29623 			pkt_len += len;
5733859Sml29623 			if (pkt_len >= nxge_bcopy_thresh) {
5743859Sml29623 				pkt_len = 0;
5753859Sml29623 				len = 0;
5763859Sml29623 				nmp = bmp;
5773859Sml29623 			}
5783859Sml29623 		} else {
5793859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
5803859Sml29623 				"==> nxge_tx_pkt_nmblocks: "
5813859Sml29623 				"len %d (> thresh) pkt_len %d nmblks %d",
5823859Sml29623 				len, pkt_len, nmblks));
5833859Sml29623 			pkt_len = 0;
5843859Sml29623 			nmblks++;
5853859Sml29623 			/*
5863859Sml29623 			 * Hardware limits the transfer length to 4K.
5873859Sml29623 			 * If len is more than 4K, we need to break
5883859Sml29623 			 * it up to at most 2 more blocks.
5893859Sml29623 			 */
5903859Sml29623 			if (len > TX_MAX_TRANSFER_LENGTH) {
5913859Sml29623 				uint32_t	nsegs;
5923859Sml29623 
5933859Sml29623 				NXGE_DEBUG_MSG((NULL, TX_CTL,
5943859Sml29623 					"==> nxge_tx_pkt_nmblocks: "
5953859Sml29623 					"len %d pkt_len %d nmblks %d nsegs %d",
5963859Sml29623 					len, pkt_len, nmblks, nsegs));
5973859Sml29623 				nsegs = 1;
5983859Sml29623 				if (len % (TX_MAX_TRANSFER_LENGTH * 2)) {
5993859Sml29623 					++nsegs;
6003859Sml29623 				}
6013859Sml29623 				do {
6023859Sml29623 					b_wptr = nmp->b_rptr +
6033859Sml29623 						TX_MAX_TRANSFER_LENGTH;
6043859Sml29623 					nmp->b_wptr = b_wptr;
6053859Sml29623 					if ((tmp = dupb(nmp)) == NULL) {
6063859Sml29623 						return (0);
6073859Sml29623 					}
6083859Sml29623 					tmp->b_rptr = b_wptr;
6093859Sml29623 					tmp->b_wptr = nmp->b_wptr;
6103859Sml29623 					tmp->b_cont = nmp->b_cont;
6113859Sml29623 					nmp->b_cont = tmp;
6123859Sml29623 					nmblks++;
6133859Sml29623 					if (--nsegs) {
6143859Sml29623 						nmp = tmp;
6153859Sml29623 					}
6163859Sml29623 				} while (nsegs);
6173859Sml29623 				nmp = tmp;
6183859Sml29623 			}
6193859Sml29623 		}
6203859Sml29623 
6213859Sml29623 		/*
6223859Sml29623 		 * Hardware limits the transmit gather pointers to 15.
6233859Sml29623 		 */
6243859Sml29623 		if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) >
6253859Sml29623 				TX_MAX_GATHER_POINTERS) {
6263859Sml29623 			NXGE_DEBUG_MSG((NULL, TX_CTL,
6273859Sml29623 				"==> nxge_tx_pkt_nmblocks: pull msg - "
6283859Sml29623 				"len %d pkt_len %d nmblks %d",
6293859Sml29623 				len, pkt_len, nmblks));
6303859Sml29623 			/* Pull all message blocks from b_cont */
6313859Sml29623 			if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) {
6323859Sml29623 				return (0);
6333859Sml29623 			}
6343859Sml29623 			freemsg(nmp->b_cont);
6353859Sml29623 			nmp->b_cont = tmp;
6363859Sml29623 			pkt_len = 0;
6373859Sml29623 		}
6383859Sml29623 		bmp = nmp;
6393859Sml29623 		nmp = nmp->b_cont;
6403859Sml29623 	}
6413859Sml29623 
6423859Sml29623 	NXGE_DEBUG_MSG((NULL, TX_CTL,
6433859Sml29623 		"<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
6443859Sml29623 		"nmblks %d len %d tot_xfer_len %d",
6453859Sml29623 		mp->b_rptr, mp->b_wptr, nmblks,
6463859Sml29623 		MBLKL(mp), *tot_xfer_len_p));
6473859Sml29623 
6483859Sml29623 	return (nmblks);
6493859Sml29623 }
6503859Sml29623 
6513859Sml29623 boolean_t
6523859Sml29623 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks)
6533859Sml29623 {
6543859Sml29623 	boolean_t 		status = B_TRUE;
6553859Sml29623 	p_nxge_dma_common_t	tx_desc_dma_p;
6563859Sml29623 	nxge_dma_common_t	desc_area;
6573859Sml29623 	p_tx_desc_t 		tx_desc_ring_vp;
6583859Sml29623 	p_tx_desc_t 		tx_desc_p;
6593859Sml29623 	p_tx_desc_t 		tx_desc_pp;
6603859Sml29623 	tx_desc_t 		r_tx_desc;
6613859Sml29623 	p_tx_msg_t 		tx_msg_ring;
6623859Sml29623 	p_tx_msg_t 		tx_msg_p;
6633859Sml29623 	npi_handle_t		handle;
6643859Sml29623 	tx_ring_hdl_t		tx_head;
6653859Sml29623 	uint32_t 		pkt_len;
6663859Sml29623 	uint_t			tx_rd_index;
6673859Sml29623 	uint16_t		head_index, tail_index;
6683859Sml29623 	uint8_t			tdc;
6693859Sml29623 	boolean_t		head_wrap, tail_wrap;
6703859Sml29623 	p_nxge_tx_ring_stats_t tdc_stats;
6713859Sml29623 	int			rc;
6723859Sml29623 
6733859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim"));
6743859Sml29623 
6753859Sml29623 	status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) &&
6763859Sml29623 			(nmblks != 0));
6773859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
6783859Sml29623 		"==> nxge_txdma_reclaim: pending %d  reclaim %d nmblks %d",
6793859Sml29623 			tx_ring_p->descs_pending, nxge_reclaim_pending,
6803859Sml29623 			nmblks));
6813859Sml29623 	if (!status) {
6823859Sml29623 		tx_desc_dma_p = &tx_ring_p->tdc_desc;
6833859Sml29623 		desc_area = tx_ring_p->tdc_desc;
6843859Sml29623 		handle = NXGE_DEV_NPI_HANDLE(nxgep);
6853859Sml29623 		tx_desc_ring_vp = tx_desc_dma_p->kaddrp;
6863859Sml29623 		tx_desc_ring_vp =
6873859Sml29623 			(p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
6883859Sml29623 		tx_rd_index = tx_ring_p->rd_index;
6893859Sml29623 		tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
6903859Sml29623 		tx_msg_ring = tx_ring_p->tx_msg_ring;
6913859Sml29623 		tx_msg_p = &tx_msg_ring[tx_rd_index];
6923859Sml29623 		tdc = tx_ring_p->tdc;
6933859Sml29623 		tdc_stats = tx_ring_p->tdc_stats;
6943859Sml29623 		if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) {
6953859Sml29623 			tdc_stats->tx_max_pend = tx_ring_p->descs_pending;
6963859Sml29623 		}
6973859Sml29623 
6983859Sml29623 		tail_index = tx_ring_p->wr_index;
6993859Sml29623 		tail_wrap = tx_ring_p->wr_index_wrap;
7003859Sml29623 
7013859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
7023859Sml29623 			"==> nxge_txdma_reclaim: tdc %d tx_rd_index %d "
7033859Sml29623 			"tail_index %d tail_wrap %d "
7043859Sml29623 			"tx_desc_p $%p ($%p) ",
7053859Sml29623 			tdc, tx_rd_index, tail_index, tail_wrap,
7063859Sml29623 			tx_desc_p, (*(uint64_t *)tx_desc_p)));
7073859Sml29623 		/*
7083859Sml29623 		 * Read the hardware maintained transmit head
7093859Sml29623 		 * and wrap around bit.
7103859Sml29623 		 */
7113859Sml29623 		TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value);
7123859Sml29623 		head_index =  tx_head.bits.ldw.head;
7133859Sml29623 		head_wrap = tx_head.bits.ldw.wrap;
7143859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
7153859Sml29623 			"==> nxge_txdma_reclaim: "
7163859Sml29623 			"tx_rd_index %d tail %d tail_wrap %d "
7173859Sml29623 			"head %d wrap %d",
7183859Sml29623 			tx_rd_index, tail_index, tail_wrap,
7193859Sml29623 			head_index, head_wrap));
7203859Sml29623 
7213859Sml29623 		if (head_index == tail_index) {
7223859Sml29623 			if (TXDMA_RING_EMPTY(head_index, head_wrap,
7233859Sml29623 					tail_index, tail_wrap) &&
7243859Sml29623 					(head_index == tx_rd_index)) {
7253859Sml29623 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
7263859Sml29623 					"==> nxge_txdma_reclaim: EMPTY"));
7273859Sml29623 				return (B_TRUE);
7283859Sml29623 			}
7293859Sml29623 
7303859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
7313859Sml29623 				"==> nxge_txdma_reclaim: Checking "
7323859Sml29623 					"if ring full"));
7333859Sml29623 			if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
7343859Sml29623 					tail_wrap)) {
7353859Sml29623 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
7363859Sml29623 					"==> nxge_txdma_reclaim: full"));
7373859Sml29623 				return (B_FALSE);
7383859Sml29623 			}
7393859Sml29623 		}
7403859Sml29623 
7413859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
7423859Sml29623 			"==> nxge_txdma_reclaim: tx_rd_index and head_index"));
7433859Sml29623 
7443859Sml29623 		tx_desc_pp = &r_tx_desc;
7453859Sml29623 		while ((tx_rd_index != head_index) &&
7463859Sml29623 			(tx_ring_p->descs_pending != 0)) {
7473859Sml29623 
7483859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
7493859Sml29623 				"==> nxge_txdma_reclaim: Checking if pending"));
7503859Sml29623 
7513859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
7523859Sml29623 				"==> nxge_txdma_reclaim: "
7533859Sml29623 				"descs_pending %d ",
7543859Sml29623 				tx_ring_p->descs_pending));
7553859Sml29623 
7563859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
7573859Sml29623 				"==> nxge_txdma_reclaim: "
7583859Sml29623 				"(tx_rd_index %d head_index %d "
7593859Sml29623 				"(tx_desc_p $%p)",
7603859Sml29623 				tx_rd_index, head_index,
7613859Sml29623 				tx_desc_p));
7623859Sml29623 
7633859Sml29623 			tx_desc_pp->value = tx_desc_p->value;
7643859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
7653859Sml29623 				"==> nxge_txdma_reclaim: "
7663859Sml29623 				"(tx_rd_index %d head_index %d "
7673859Sml29623 				"tx_desc_p $%p (desc value 0x%llx) ",
7683859Sml29623 				tx_rd_index, head_index,
7693859Sml29623 				tx_desc_pp, (*(uint64_t *)tx_desc_pp)));
7703859Sml29623 
7713859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
7723859Sml29623 				"==> nxge_txdma_reclaim: dump desc:"));
7733859Sml29623 
7743859Sml29623 			pkt_len = tx_desc_pp->bits.hdw.tr_len;
7753859Sml29623 			tdc_stats->obytes += pkt_len;
7763859Sml29623 			tdc_stats->opackets += tx_desc_pp->bits.hdw.sop;
7773859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
7783859Sml29623 				"==> nxge_txdma_reclaim: pkt_len %d "
7793859Sml29623 				"tdc channel %d opackets %d",
7803859Sml29623 				pkt_len,
7813859Sml29623 				tdc,
7823859Sml29623 				tdc_stats->opackets));
7833859Sml29623 
7843859Sml29623 			if (tx_msg_p->flags.dma_type == USE_DVMA) {
7853859Sml29623 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
7863859Sml29623 					"tx_desc_p = $%p "
7873859Sml29623 					"tx_desc_pp = $%p "
7883859Sml29623 					"index = %d",
7893859Sml29623 					tx_desc_p,
7903859Sml29623 					tx_desc_pp,
7913859Sml29623 					tx_ring_p->rd_index));
7923859Sml29623 				(void) dvma_unload(tx_msg_p->dvma_handle,
7933859Sml29623 					0, -1);
7943859Sml29623 				tx_msg_p->dvma_handle = NULL;
7953859Sml29623 				if (tx_ring_p->dvma_wr_index ==
7963859Sml29623 					tx_ring_p->dvma_wrap_mask) {
7973859Sml29623 					tx_ring_p->dvma_wr_index = 0;
7983859Sml29623 				} else {
7993859Sml29623 					tx_ring_p->dvma_wr_index++;
8003859Sml29623 				}
8013859Sml29623 				tx_ring_p->dvma_pending--;
8023859Sml29623 			} else if (tx_msg_p->flags.dma_type ==
8033859Sml29623 					USE_DMA) {
8043859Sml29623 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
8053859Sml29623 					"==> nxge_txdma_reclaim: "
8063859Sml29623 					"USE DMA"));
8073859Sml29623 				if (rc = ddi_dma_unbind_handle
8083859Sml29623 					(tx_msg_p->dma_handle)) {
8093859Sml29623 					cmn_err(CE_WARN, "!nxge_reclaim: "
8103859Sml29623 						"ddi_dma_unbind_handle "
8113859Sml29623 						"failed. status %d", rc);
8123859Sml29623 				}
8133859Sml29623 			}
8143859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
8153859Sml29623 				"==> nxge_txdma_reclaim: count packets"));
8163859Sml29623 			/*
8173859Sml29623 			 * count a chained packet only once.
8183859Sml29623 			 */
8193859Sml29623 			if (tx_msg_p->tx_message != NULL) {
8203859Sml29623 				freemsg(tx_msg_p->tx_message);
8213859Sml29623 				tx_msg_p->tx_message = NULL;
8223859Sml29623 			}
8233859Sml29623 
8243859Sml29623 			tx_msg_p->flags.dma_type = USE_NONE;
8253859Sml29623 			tx_rd_index = tx_ring_p->rd_index;
8263859Sml29623 			tx_rd_index = (tx_rd_index + 1) &
8273859Sml29623 					tx_ring_p->tx_wrap_mask;
8283859Sml29623 			tx_ring_p->rd_index = tx_rd_index;
8293859Sml29623 			tx_ring_p->descs_pending--;
8303859Sml29623 			tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
8313859Sml29623 			tx_msg_p = &tx_msg_ring[tx_rd_index];
8323859Sml29623 		}
8333859Sml29623 
8343859Sml29623 		status = (nmblks <= (tx_ring_p->tx_ring_size -
8353859Sml29623 				tx_ring_p->descs_pending -
8363859Sml29623 				TX_FULL_MARK));
8373859Sml29623 		if (status) {
8383859Sml29623 			cas32((uint32_t *)&tx_ring_p->queueing, 1, 0);
8393859Sml29623 		}
8403859Sml29623 	} else {
8413859Sml29623 		status = (nmblks <=
8423859Sml29623 			(tx_ring_p->tx_ring_size -
8433859Sml29623 				tx_ring_p->descs_pending -
8443859Sml29623 				TX_FULL_MARK));
8453859Sml29623 	}
8463859Sml29623 
8473859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
8483859Sml29623 		"<== nxge_txdma_reclaim status = 0x%08x", status));
8493859Sml29623 
8503859Sml29623 	return (status);
8513859Sml29623 }
8523859Sml29623 
8533859Sml29623 uint_t
8543859Sml29623 nxge_tx_intr(void *arg1, void *arg2)
8553859Sml29623 {
8563859Sml29623 	p_nxge_ldv_t		ldvp = (p_nxge_ldv_t)arg1;
8573859Sml29623 	p_nxge_t		nxgep = (p_nxge_t)arg2;
8583859Sml29623 	p_nxge_ldg_t		ldgp;
8593859Sml29623 	uint8_t			channel;
8603859Sml29623 	uint32_t		vindex;
8613859Sml29623 	npi_handle_t		handle;
8623859Sml29623 	tx_cs_t			cs;
8633859Sml29623 	p_tx_ring_t 		*tx_rings;
8643859Sml29623 	p_tx_ring_t 		tx_ring_p;
8653859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
8663859Sml29623 	uint_t 			serviced = DDI_INTR_UNCLAIMED;
8673859Sml29623 	nxge_status_t 		status = NXGE_OK;
8683859Sml29623 
8693859Sml29623 	if (ldvp == NULL) {
8703859Sml29623 		NXGE_DEBUG_MSG((NULL, INT_CTL,
8713859Sml29623 			"<== nxge_tx_intr: nxgep $%p ldvp $%p",
8723859Sml29623 			nxgep, ldvp));
8733859Sml29623 		return (DDI_INTR_UNCLAIMED);
8743859Sml29623 	}
8753859Sml29623 
8763859Sml29623 	if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
8773859Sml29623 		nxgep = ldvp->nxgep;
8783859Sml29623 	}
8793859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
8803859Sml29623 		"==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p",
8813859Sml29623 		nxgep, ldvp));
8823859Sml29623 	/*
8833859Sml29623 	 * This interrupt handler is for a specific
8843859Sml29623 	 * transmit dma channel.
8853859Sml29623 	 */
8863859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
8873859Sml29623 	/* Get the control and status for this channel. */
8883859Sml29623 	channel = ldvp->channel;
8893859Sml29623 	ldgp = ldvp->ldgp;
8903859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
8913859Sml29623 		"==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p "
8923859Sml29623 		"channel %d",
8933859Sml29623 		nxgep, ldvp, channel));
8943859Sml29623 
8953859Sml29623 	rs = npi_txdma_control_status(handle, OP_GET, channel, &cs);
8963859Sml29623 	vindex = ldvp->vdma_index;
8973859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
8983859Sml29623 		"==> nxge_tx_intr:channel %d ring index %d status 0x%08x",
8993859Sml29623 		channel, vindex, rs));
9003859Sml29623 	if (!rs && cs.bits.ldw.mk) {
9013859Sml29623 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
9023859Sml29623 			"==> nxge_tx_intr:channel %d ring index %d "
9033859Sml29623 			"status 0x%08x (mk bit set)",
9043859Sml29623 			channel, vindex, rs));
9053859Sml29623 		tx_rings = nxgep->tx_rings->rings;
9063859Sml29623 		tx_ring_p = tx_rings[vindex];
9073859Sml29623 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
9083859Sml29623 			"==> nxge_tx_intr:channel %d ring index %d "
9093859Sml29623 			"status 0x%08x (mk bit set, calling reclaim)",
9103859Sml29623 			channel, vindex, rs));
9113859Sml29623 
9123859Sml29623 		MUTEX_ENTER(&tx_ring_p->lock);
9133859Sml29623 		(void) nxge_txdma_reclaim(nxgep, tx_rings[vindex], 0);
9143859Sml29623 		MUTEX_EXIT(&tx_ring_p->lock);
9153859Sml29623 		mac_tx_update(nxgep->mach);
9163859Sml29623 	}
9173859Sml29623 
9183859Sml29623 	/*
9193859Sml29623 	 * Process other transmit control and status.
9203859Sml29623 	 * Check the ldv state.
9213859Sml29623 	 */
9223859Sml29623 	status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs);
9233859Sml29623 	/*
9243859Sml29623 	 * Rearm this logical group if this is a single device
9253859Sml29623 	 * group.
9263859Sml29623 	 */
9273859Sml29623 	if (ldgp->nldvs == 1) {
9283859Sml29623 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
9293859Sml29623 			"==> nxge_tx_intr: rearm"));
9303859Sml29623 		if (status == NXGE_OK) {
9313859Sml29623 			(void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
9323859Sml29623 				B_TRUE, ldgp->ldg_timer);
9333859Sml29623 		}
9343859Sml29623 	}
9353859Sml29623 
9363859Sml29623 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr"));
9373859Sml29623 	serviced = DDI_INTR_CLAIMED;
9383859Sml29623 	return (serviced);
9393859Sml29623 }
9403859Sml29623 
9413859Sml29623 void
9423859Sml29623 nxge_txdma_stop(p_nxge_t nxgep)
9433859Sml29623 {
9443859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop"));
9453859Sml29623 
9463859Sml29623 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
9473859Sml29623 
9483859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop"));
9493859Sml29623 }
9503859Sml29623 
9513859Sml29623 void
9523859Sml29623 nxge_txdma_stop_start(p_nxge_t nxgep)
9533859Sml29623 {
9543859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start"));
9553859Sml29623 
9563859Sml29623 	(void) nxge_txdma_stop(nxgep);
9573859Sml29623 
9583859Sml29623 	(void) nxge_fixup_txdma_rings(nxgep);
9593859Sml29623 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
9603859Sml29623 	(void) nxge_tx_mac_enable(nxgep);
9613859Sml29623 	(void) nxge_txdma_hw_kick(nxgep);
9623859Sml29623 
9633859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start"));
9643859Sml29623 }
9653859Sml29623 
9663859Sml29623 nxge_status_t
9673859Sml29623 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
9683859Sml29623 {
9693859Sml29623 	int			i, ndmas;
9703859Sml29623 	uint16_t		channel;
9713859Sml29623 	p_tx_rings_t 		tx_rings;
9723859Sml29623 	p_tx_ring_t 		*tx_desc_rings;
9733859Sml29623 	npi_handle_t		handle;
9743859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
9753859Sml29623 	nxge_status_t		status = NXGE_OK;
9763859Sml29623 
9773859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
9783859Sml29623 		"==> nxge_txdma_hw_mode: enable mode %d", enable));
9793859Sml29623 
9803859Sml29623 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
9813859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
9823859Sml29623 			"<== nxge_txdma_mode: not initialized"));
9833859Sml29623 		return (NXGE_ERROR);
9843859Sml29623 	}
9853859Sml29623 
9863859Sml29623 	tx_rings = nxgep->tx_rings;
9873859Sml29623 	if (tx_rings == NULL) {
9883859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
9893859Sml29623 			"<== nxge_txdma_hw_mode: NULL global ring pointer"));
9903859Sml29623 		return (NXGE_ERROR);
9913859Sml29623 	}
9923859Sml29623 
9933859Sml29623 	tx_desc_rings = tx_rings->rings;
9943859Sml29623 	if (tx_desc_rings == NULL) {
9953859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
9963859Sml29623 			"<== nxge_txdma_hw_mode: NULL rings pointer"));
9973859Sml29623 		return (NXGE_ERROR);
9983859Sml29623 	}
9993859Sml29623 
10003859Sml29623 	ndmas = tx_rings->ndmas;
10013859Sml29623 	if (!ndmas) {
10023859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
10033859Sml29623 			"<== nxge_txdma_hw_mode: no dma channel allocated"));
10043859Sml29623 		return (NXGE_ERROR);
10053859Sml29623 	}
10063859Sml29623 
10073859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_mode: "
10083859Sml29623 		"tx_rings $%p tx_desc_rings $%p ndmas %d",
10093859Sml29623 		tx_rings, tx_desc_rings, ndmas));
10103859Sml29623 
10113859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
10123859Sml29623 	for (i = 0; i < ndmas; i++) {
10133859Sml29623 		if (tx_desc_rings[i] == NULL) {
10143859Sml29623 			continue;
10153859Sml29623 		}
10163859Sml29623 		channel = tx_desc_rings[i]->tdc;
10173859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
10183859Sml29623 			"==> nxge_txdma_hw_mode: channel %d", channel));
10193859Sml29623 		if (enable) {
10203859Sml29623 			rs = npi_txdma_channel_enable(handle, channel);
10213859Sml29623 			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
10223859Sml29623 				"==> nxge_txdma_hw_mode: channel %d (enable) "
10233859Sml29623 				"rs 0x%x", channel, rs));
10243859Sml29623 		} else {
10253859Sml29623 			/*
10263859Sml29623 			 * Stop the dma channel and waits for the stop done.
10273859Sml29623 			 * If the stop done bit is not set, then force
10283859Sml29623 			 * an error so TXC will stop.
10293859Sml29623 			 * All channels bound to this port need to be stopped
10303859Sml29623 			 * and reset after injecting an interrupt error.
10313859Sml29623 			 */
10323859Sml29623 			rs = npi_txdma_channel_disable(handle, channel);
10333859Sml29623 			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
10343859Sml29623 				"==> nxge_txdma_hw_mode: channel %d (disable) "
10353859Sml29623 				"rs 0x%x", channel, rs));
10363859Sml29623 			{
10373859Sml29623 				tdmc_intr_dbg_t		intr_dbg;
10383859Sml29623 
10393859Sml29623 				if (rs != NPI_SUCCESS) {
10403859Sml29623 					/* Inject any error */
10413859Sml29623 					intr_dbg.value = 0;
10423859Sml29623 					intr_dbg.bits.ldw.nack_pref = 1;
10433859Sml29623 					NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
10443859Sml29623 						"==> nxge_txdma_hw_mode: "
10453859Sml29623 						"channel %d (stop failed 0x%x) "
10463859Sml29623 						"(inject err)", rs, channel));
10473859Sml29623 					(void) npi_txdma_inj_int_error_set(
10483859Sml29623 						handle, channel, &intr_dbg);
10493859Sml29623 					rs = npi_txdma_channel_disable(handle,
10503859Sml29623 						channel);
10513859Sml29623 					NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
10523859Sml29623 						"==> nxge_txdma_hw_mode: "
10533859Sml29623 						"channel %d (stop again 0x%x) "
10543859Sml29623 						"(after inject err)",
10553859Sml29623 						rs, channel));
10563859Sml29623 				}
10573859Sml29623 			}
10583859Sml29623 		}
10593859Sml29623 	}
10603859Sml29623 
10613859Sml29623 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
10623859Sml29623 
10633859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
10643859Sml29623 		"<== nxge_txdma_hw_mode: status 0x%x", status));
10653859Sml29623 
10663859Sml29623 	return (status);
10673859Sml29623 }
10683859Sml29623 
10693859Sml29623 void
10703859Sml29623 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
10713859Sml29623 {
10723859Sml29623 	npi_handle_t		handle;
10733859Sml29623 
10743859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
10753859Sml29623 		"==> nxge_txdma_enable_channel: channel %d", channel));
10763859Sml29623 
10773859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
10783859Sml29623 	/* enable the transmit dma channels */
10793859Sml29623 	(void) npi_txdma_channel_enable(handle, channel);
10803859Sml29623 
10813859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel"));
10823859Sml29623 }
10833859Sml29623 
10843859Sml29623 void
10853859Sml29623 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
10863859Sml29623 {
10873859Sml29623 	npi_handle_t		handle;
10883859Sml29623 
10893859Sml29623 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
10903859Sml29623 		"==> nxge_txdma_disable_channel: channel %d", channel));
10913859Sml29623 
10923859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
10933859Sml29623 	/* stop the transmit dma channels */
10943859Sml29623 	(void) npi_txdma_channel_disable(handle, channel);
10953859Sml29623 
10963859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel"));
10973859Sml29623 }
10983859Sml29623 
10993859Sml29623 int
11003859Sml29623 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel)
11013859Sml29623 {
11023859Sml29623 	npi_handle_t		handle;
11033859Sml29623 	tdmc_intr_dbg_t		intr_dbg;
11043859Sml29623 	int			status;
11053859Sml29623 	npi_status_t		rs = NPI_SUCCESS;
11063859Sml29623 
11073859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err"));
11083859Sml29623 	/*
11093859Sml29623 	 * Stop the dma channel waits for the stop done.
11103859Sml29623 	 * If the stop done bit is not set, then create
11113859Sml29623 	 * an error.
11123859Sml29623 	 */
11133859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
11143859Sml29623 	rs = npi_txdma_channel_disable(handle, channel);
11153859Sml29623 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
11163859Sml29623 	if (status == NXGE_OK) {
11173859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
11183859Sml29623 			"<== nxge_txdma_stop_inj_err (channel %d): "
11193859Sml29623 			"stopped OK", channel));
11203859Sml29623 		return (status);
11213859Sml29623 	}
11223859Sml29623 
11233859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
11243859Sml29623 		"==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) "
11253859Sml29623 		"injecting error", channel, rs));
11263859Sml29623 	/* Inject any error */
11273859Sml29623 	intr_dbg.value = 0;
11283859Sml29623 	intr_dbg.bits.ldw.nack_pref = 1;
11293859Sml29623 	(void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
11303859Sml29623 
11313859Sml29623 	/* Stop done bit will be set as a result of error injection */
11323859Sml29623 	rs = npi_txdma_channel_disable(handle, channel);
11333859Sml29623 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
11343859Sml29623 	if (!(rs & NPI_TXDMA_STOP_FAILED)) {
11353859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
11363859Sml29623 			"<== nxge_txdma_stop_inj_err (channel %d): "
11373859Sml29623 			"stopped OK ", channel));
11383859Sml29623 		return (status);
11393859Sml29623 	}
11403859Sml29623 
11413859Sml29623 #if	defined(NXGE_DEBUG)
11423859Sml29623 	nxge_txdma_regs_dump_channels(nxgep);
11433859Sml29623 #endif
11443859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
11453859Sml29623 		"==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
11463859Sml29623 		" (injected error but still not stopped)", channel, rs));
11473859Sml29623 
11483859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err"));
11493859Sml29623 	return (status);
11503859Sml29623 }
11513859Sml29623 
11523859Sml29623 void
11533859Sml29623 nxge_hw_start_tx(p_nxge_t nxgep)
11543859Sml29623 {
11553859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_tx"));
11563859Sml29623 
11573859Sml29623 	(void) nxge_txdma_hw_start(nxgep);
11583859Sml29623 	(void) nxge_tx_mac_enable(nxgep);
11593859Sml29623 
11603859Sml29623 	NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_tx"));
11613859Sml29623 }
11623859Sml29623 
11633859Sml29623 /*ARGSUSED*/
11643859Sml29623 void
11653859Sml29623 nxge_fixup_txdma_rings(p_nxge_t nxgep)
11663859Sml29623 {
11673859Sml29623 	int			index, ndmas;
11683859Sml29623 	uint16_t		channel;
11693859Sml29623 	p_tx_rings_t 		tx_rings;
11703859Sml29623 
11713859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings"));
11723859Sml29623 
11733859Sml29623 	/*
11743859Sml29623 	 * For each transmit channel, reclaim each descriptor and
11753859Sml29623 	 * free buffers.
11763859Sml29623 	 */
11773859Sml29623 	tx_rings = nxgep->tx_rings;
11783859Sml29623 	if (tx_rings == NULL) {
11793859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
11803859Sml29623 			"<== nxge_fixup_txdma_rings: NULL ring pointer"));
11813859Sml29623 		return;
11823859Sml29623 	}
11833859Sml29623 
11843859Sml29623 	ndmas = tx_rings->ndmas;
11853859Sml29623 	if (!ndmas) {
11863859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
11873859Sml29623 			"<== nxge_fixup_txdma_rings: no channel allocated"));
11883859Sml29623 		return;
11893859Sml29623 	}
11903859Sml29623 
11913859Sml29623 	if (tx_rings->rings == NULL) {
11923859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
11933859Sml29623 			"<== nxge_fixup_txdma_rings: NULL rings pointer"));
11943859Sml29623 		return;
11953859Sml29623 	}
11963859Sml29623 
11973859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_fixup_txdma_rings: "
11983859Sml29623 		"tx_rings $%p tx_desc_rings $%p ndmas %d",
11993859Sml29623 		tx_rings, tx_rings->rings, ndmas));
12003859Sml29623 
12013859Sml29623 	for (index = 0; index < ndmas; index++) {
12023859Sml29623 		channel = tx_rings->rings[index]->tdc;
12033859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
12043859Sml29623 			"==> nxge_fixup_txdma_rings: channel %d", channel));
12053859Sml29623 
12063859Sml29623 		nxge_txdma_fixup_channel(nxgep, tx_rings->rings[index],
12073859Sml29623 			channel);
12083859Sml29623 	}
12093859Sml29623 
12103859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings"));
12113859Sml29623 }
12123859Sml29623 
12133859Sml29623 /*ARGSUSED*/
12143859Sml29623 void
12153859Sml29623 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
12163859Sml29623 {
12173859Sml29623 	p_tx_ring_t	ring_p;
12183859Sml29623 
12193859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel"));
12203859Sml29623 	ring_p = nxge_txdma_get_ring(nxgep, channel);
12213859Sml29623 	if (ring_p == NULL) {
12223859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
12233859Sml29623 		return;
12243859Sml29623 	}
12253859Sml29623 
12263859Sml29623 	if (ring_p->tdc != channel) {
12273859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
12283859Sml29623 			"<== nxge_txdma_fix_channel: channel not matched "
12293859Sml29623 			"ring tdc %d passed channel",
12303859Sml29623 			ring_p->tdc, channel));
12313859Sml29623 		return;
12323859Sml29623 	}
12333859Sml29623 
12343859Sml29623 	nxge_txdma_fixup_channel(nxgep, ring_p, channel);
12353859Sml29623 
12363859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
12373859Sml29623 }
12383859Sml29623 
12393859Sml29623 /*ARGSUSED*/
12403859Sml29623 void
12413859Sml29623 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
12423859Sml29623 {
12433859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel"));
12443859Sml29623 
12453859Sml29623 	if (ring_p == NULL) {
12463859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
12473859Sml29623 			"<== nxge_txdma_fixup_channel: NULL ring pointer"));
12483859Sml29623 		return;
12493859Sml29623 	}
12503859Sml29623 
12513859Sml29623 	if (ring_p->tdc != channel) {
12523859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
12533859Sml29623 			"<== nxge_txdma_fixup_channel: channel not matched "
12543859Sml29623 			"ring tdc %d passed channel",
12553859Sml29623 			ring_p->tdc, channel));
12563859Sml29623 		return;
12573859Sml29623 	}
12583859Sml29623 
12593859Sml29623 	MUTEX_ENTER(&ring_p->lock);
12603859Sml29623 	(void) nxge_txdma_reclaim(nxgep, ring_p, 0);
12613859Sml29623 	ring_p->rd_index = 0;
12623859Sml29623 	ring_p->wr_index = 0;
12633859Sml29623 	ring_p->ring_head.value = 0;
12643859Sml29623 	ring_p->ring_kick_tail.value = 0;
12653859Sml29623 	ring_p->descs_pending = 0;
12663859Sml29623 	MUTEX_EXIT(&ring_p->lock);
12673859Sml29623 
12683859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel"));
12693859Sml29623 }
12703859Sml29623 
12713859Sml29623 /*ARGSUSED*/
12723859Sml29623 void
12733859Sml29623 nxge_txdma_hw_kick(p_nxge_t nxgep)
12743859Sml29623 {
12753859Sml29623 	int			index, ndmas;
12763859Sml29623 	uint16_t		channel;
12773859Sml29623 	p_tx_rings_t 		tx_rings;
12783859Sml29623 
12793859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick"));
12803859Sml29623 
12813859Sml29623 	tx_rings = nxgep->tx_rings;
12823859Sml29623 	if (tx_rings == NULL) {
12833859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
12843859Sml29623 			"<== nxge_txdma_hw_kick: NULL ring pointer"));
12853859Sml29623 		return;
12863859Sml29623 	}
12873859Sml29623 
12883859Sml29623 	ndmas = tx_rings->ndmas;
12893859Sml29623 	if (!ndmas) {
12903859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
12913859Sml29623 			"<== nxge_txdma_hw_kick: no channel allocated"));
12923859Sml29623 		return;
12933859Sml29623 	}
12943859Sml29623 
12953859Sml29623 	if (tx_rings->rings == NULL) {
12963859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
12973859Sml29623 			"<== nxge_txdma_hw_kick: NULL rings pointer"));
12983859Sml29623 		return;
12993859Sml29623 	}
13003859Sml29623 
13013859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_kick: "
13023859Sml29623 		"tx_rings $%p tx_desc_rings $%p ndmas %d",
13033859Sml29623 		tx_rings, tx_rings->rings, ndmas));
13043859Sml29623 
13053859Sml29623 	for (index = 0; index < ndmas; index++) {
13063859Sml29623 		channel = tx_rings->rings[index]->tdc;
13073859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
13083859Sml29623 			"==> nxge_txdma_hw_kick: channel %d", channel));
13093859Sml29623 		nxge_txdma_hw_kick_channel(nxgep, tx_rings->rings[index],
13103859Sml29623 			channel);
13113859Sml29623 	}
13123859Sml29623 
13133859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick"));
13143859Sml29623 }
13153859Sml29623 
13163859Sml29623 /*ARGSUSED*/
13173859Sml29623 void
13183859Sml29623 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel)
13193859Sml29623 {
13203859Sml29623 	p_tx_ring_t	ring_p;
13213859Sml29623 
13223859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel"));
13233859Sml29623 
13243859Sml29623 	ring_p = nxge_txdma_get_ring(nxgep, channel);
13253859Sml29623 	if (ring_p == NULL) {
13263859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
13273859Sml29623 			    " nxge_txdma_kick_channel"));
13283859Sml29623 		return;
13293859Sml29623 	}
13303859Sml29623 
13313859Sml29623 	if (ring_p->tdc != channel) {
13323859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
13333859Sml29623 			"<== nxge_txdma_kick_channel: channel not matched "
13343859Sml29623 			"ring tdc %d passed channel",
13353859Sml29623 			ring_p->tdc, channel));
13363859Sml29623 		return;
13373859Sml29623 	}
13383859Sml29623 
13393859Sml29623 	nxge_txdma_hw_kick_channel(nxgep, ring_p, channel);
13403859Sml29623 
13413859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel"));
13423859Sml29623 }
13433859Sml29623 
13443859Sml29623 /*ARGSUSED*/
13453859Sml29623 void
13463859Sml29623 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
13473859Sml29623 {
13483859Sml29623 
13493859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel"));
13503859Sml29623 
13513859Sml29623 	if (ring_p == NULL) {
13523859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
13533859Sml29623 			"<== nxge_txdma_hw_kick_channel: NULL ring pointer"));
13543859Sml29623 		return;
13553859Sml29623 	}
13563859Sml29623 
13573859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel"));
13583859Sml29623 }
13593859Sml29623 
13603859Sml29623 /*ARGSUSED*/
13613859Sml29623 void
13623859Sml29623 nxge_check_tx_hang(p_nxge_t nxgep)
13633859Sml29623 {
13643859Sml29623 
13653859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang"));
13663859Sml29623 
13673859Sml29623 	/*
13683859Sml29623 	 * Needs inputs from hardware for regs:
13693859Sml29623 	 *	head index had not moved since last timeout.
13703859Sml29623 	 *	packets not transmitted or stuffed registers.
13713859Sml29623 	 */
13723859Sml29623 	if (nxge_txdma_hung(nxgep)) {
13733859Sml29623 		nxge_fixup_hung_txdma_rings(nxgep);
13743859Sml29623 	}
13753859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang"));
13763859Sml29623 }
13773859Sml29623 
13783859Sml29623 int
13793859Sml29623 nxge_txdma_hung(p_nxge_t nxgep)
13803859Sml29623 {
13813859Sml29623 	int			index, ndmas;
13823859Sml29623 	uint16_t		channel;
13833859Sml29623 	p_tx_rings_t 		tx_rings;
13843859Sml29623 	p_tx_ring_t 		tx_ring_p;
13853859Sml29623 
13863859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung"));
13873859Sml29623 	tx_rings = nxgep->tx_rings;
13883859Sml29623 	if (tx_rings == NULL) {
13893859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
13903859Sml29623 			"<== nxge_txdma_hung: NULL ring pointer"));
13913859Sml29623 		return (B_FALSE);
13923859Sml29623 	}
13933859Sml29623 
13943859Sml29623 	ndmas = tx_rings->ndmas;
13953859Sml29623 	if (!ndmas) {
13963859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
13973859Sml29623 			"<== nxge_txdma_hung: no channel "
13983859Sml29623 			"allocated"));
13993859Sml29623 		return (B_FALSE);
14003859Sml29623 	}
14013859Sml29623 
14023859Sml29623 	if (tx_rings->rings == NULL) {
14033859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
14043859Sml29623 			"<== nxge_txdma_hung: NULL rings pointer"));
14053859Sml29623 		return (B_FALSE);
14063859Sml29623 	}
14073859Sml29623 
14083859Sml29623 	for (index = 0; index < ndmas; index++) {
14093859Sml29623 		channel = tx_rings->rings[index]->tdc;
14103859Sml29623 		tx_ring_p = tx_rings->rings[index];
14113859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
14123859Sml29623 			"==> nxge_txdma_hung: channel %d", channel));
14133859Sml29623 		if (nxge_txdma_channel_hung(nxgep, tx_ring_p, channel)) {
14143859Sml29623 			return (B_TRUE);
14153859Sml29623 		}
14163859Sml29623 	}
14173859Sml29623 
14183859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung"));
14193859Sml29623 
14203859Sml29623 	return (B_FALSE);
14213859Sml29623 }
14223859Sml29623 
14233859Sml29623 int
14243859Sml29623 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel)
14253859Sml29623 {
14263859Sml29623 	uint16_t		head_index, tail_index;
14273859Sml29623 	boolean_t		head_wrap, tail_wrap;
14283859Sml29623 	npi_handle_t		handle;
14293859Sml29623 	tx_ring_hdl_t		tx_head;
14303859Sml29623 	uint_t			tx_rd_index;
14313859Sml29623 
14323859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung"));
14333859Sml29623 
14343859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
14353859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
14363859Sml29623 		"==> nxge_txdma_channel_hung: channel %d", channel));
14373859Sml29623 	MUTEX_ENTER(&tx_ring_p->lock);
14383859Sml29623 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
14393859Sml29623 
14403859Sml29623 	tail_index = tx_ring_p->wr_index;
14413859Sml29623 	tail_wrap = tx_ring_p->wr_index_wrap;
14423859Sml29623 	tx_rd_index = tx_ring_p->rd_index;
14433859Sml29623 	MUTEX_EXIT(&tx_ring_p->lock);
14443859Sml29623 
14453859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
14463859Sml29623 		"==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d "
14473859Sml29623 		"tail_index %d tail_wrap %d ",
14483859Sml29623 		channel, tx_rd_index, tail_index, tail_wrap));
14493859Sml29623 	/*
14503859Sml29623 	 * Read the hardware maintained transmit head
14513859Sml29623 	 * and wrap around bit.
14523859Sml29623 	 */
14533859Sml29623 	(void) npi_txdma_ring_head_get(handle, channel, &tx_head);
14543859Sml29623 	head_index =  tx_head.bits.ldw.head;
14553859Sml29623 	head_wrap = tx_head.bits.ldw.wrap;
14563859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
14573859Sml29623 		"==> nxge_txdma_channel_hung: "
14583859Sml29623 		"tx_rd_index %d tail %d tail_wrap %d "
14593859Sml29623 		"head %d wrap %d",
14603859Sml29623 		tx_rd_index, tail_index, tail_wrap,
14613859Sml29623 		head_index, head_wrap));
14623859Sml29623 
14633859Sml29623 	if (TXDMA_RING_EMPTY(head_index, head_wrap,
14643859Sml29623 			tail_index, tail_wrap) &&
14653859Sml29623 			(head_index == tx_rd_index)) {
14663859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
14673859Sml29623 			"==> nxge_txdma_channel_hung: EMPTY"));
14683859Sml29623 		return (B_FALSE);
14693859Sml29623 	}
14703859Sml29623 
14713859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
14723859Sml29623 		"==> nxge_txdma_channel_hung: Checking if ring full"));
14733859Sml29623 	if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
14743859Sml29623 			tail_wrap)) {
14753859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
14763859Sml29623 			"==> nxge_txdma_channel_hung: full"));
14773859Sml29623 		return (B_TRUE);
14783859Sml29623 	}
14793859Sml29623 
14803859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung"));
14813859Sml29623 
14823859Sml29623 	return (B_FALSE);
14833859Sml29623 }
14843859Sml29623 
14853859Sml29623 /*ARGSUSED*/
14863859Sml29623 void
14873859Sml29623 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep)
14883859Sml29623 {
14893859Sml29623 	int			index, ndmas;
14903859Sml29623 	uint16_t		channel;
14913859Sml29623 	p_tx_rings_t 		tx_rings;
14923859Sml29623 
14933859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings"));
14943859Sml29623 	tx_rings = nxgep->tx_rings;
14953859Sml29623 	if (tx_rings == NULL) {
14963859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
14973859Sml29623 			"<== nxge_fixup_hung_txdma_rings: NULL ring pointer"));
14983859Sml29623 		return;
14993859Sml29623 	}
15003859Sml29623 
15013859Sml29623 	ndmas = tx_rings->ndmas;
15023859Sml29623 	if (!ndmas) {
15033859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
15043859Sml29623 			"<== nxge_fixup_hung_txdma_rings: no channel "
15053859Sml29623 			"allocated"));
15063859Sml29623 		return;
15073859Sml29623 	}
15083859Sml29623 
15093859Sml29623 	if (tx_rings->rings == NULL) {
15103859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
15113859Sml29623 			"<== nxge_fixup_hung_txdma_rings: NULL rings pointer"));
15123859Sml29623 		return;
15133859Sml29623 	}
15143859Sml29623 
15153859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings: "
15163859Sml29623 		"tx_rings $%p tx_desc_rings $%p ndmas %d",
15173859Sml29623 		tx_rings, tx_rings->rings, ndmas));
15183859Sml29623 
15193859Sml29623 	for (index = 0; index < ndmas; index++) {
15203859Sml29623 		channel = tx_rings->rings[index]->tdc;
15213859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
15223859Sml29623 			"==> nxge_fixup_hung_txdma_rings: channel %d",
15233859Sml29623 			channel));
15243859Sml29623 
15253859Sml29623 		nxge_txdma_fixup_hung_channel(nxgep, tx_rings->rings[index],
15263859Sml29623 			channel);
15273859Sml29623 	}
15283859Sml29623 
15293859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings"));
15303859Sml29623 }
15313859Sml29623 
15323859Sml29623 /*ARGSUSED*/
15333859Sml29623 void
15343859Sml29623 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel)
15353859Sml29623 {
15363859Sml29623 	p_tx_ring_t	ring_p;
15373859Sml29623 
15383859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel"));
15393859Sml29623 	ring_p = nxge_txdma_get_ring(nxgep, channel);
15403859Sml29623 	if (ring_p == NULL) {
15413859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
15423859Sml29623 			"<== nxge_txdma_fix_hung_channel"));
15433859Sml29623 		return;
15443859Sml29623 	}
15453859Sml29623 
15463859Sml29623 	if (ring_p->tdc != channel) {
15473859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
15483859Sml29623 			"<== nxge_txdma_fix_hung_channel: channel not matched "
15493859Sml29623 			"ring tdc %d passed channel",
15503859Sml29623 			ring_p->tdc, channel));
15513859Sml29623 		return;
15523859Sml29623 	}
15533859Sml29623 
15543859Sml29623 	nxge_txdma_fixup_channel(nxgep, ring_p, channel);
15553859Sml29623 
15563859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel"));
15573859Sml29623 }
15583859Sml29623 
15593859Sml29623 /*ARGSUSED*/
15603859Sml29623 void
15613859Sml29623 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p,
15623859Sml29623 	uint16_t channel)
15633859Sml29623 {
15643859Sml29623 	npi_handle_t		handle;
15653859Sml29623 	tdmc_intr_dbg_t		intr_dbg;
15663859Sml29623 	int			status = NXGE_OK;
15673859Sml29623 
15683859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel"));
15693859Sml29623 
15703859Sml29623 	if (ring_p == NULL) {
15713859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
15723859Sml29623 			"<== nxge_txdma_fixup_channel: NULL ring pointer"));
15733859Sml29623 		return;
15743859Sml29623 	}
15753859Sml29623 
15763859Sml29623 	if (ring_p->tdc != channel) {
15773859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
15783859Sml29623 			"<== nxge_txdma_fixup_hung_channel: channel "
15793859Sml29623 			"not matched "
15803859Sml29623 			"ring tdc %d passed channel",
15813859Sml29623 			ring_p->tdc, channel));
15823859Sml29623 		return;
15833859Sml29623 	}
15843859Sml29623 
15853859Sml29623 	/* Reclaim descriptors */
15863859Sml29623 	MUTEX_ENTER(&ring_p->lock);
15873859Sml29623 	(void) nxge_txdma_reclaim(nxgep, ring_p, 0);
15883859Sml29623 	MUTEX_EXIT(&ring_p->lock);
15893859Sml29623 
15903859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
15913859Sml29623 	/*
15923859Sml29623 	 * Stop the dma channel waits for the stop done.
15933859Sml29623 	 * If the stop done bit is not set, then force
15943859Sml29623 	 * an error.
15953859Sml29623 	 */
15963859Sml29623 	status = npi_txdma_channel_disable(handle, channel);
15973859Sml29623 	if (!(status & NPI_TXDMA_STOP_FAILED)) {
15983859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
15993859Sml29623 			"<== nxge_txdma_fixup_hung_channel: stopped OK "
16003859Sml29623 			"ring tdc %d passed channel %d",
16013859Sml29623 			ring_p->tdc, channel));
16023859Sml29623 		return;
16033859Sml29623 	}
16043859Sml29623 
16053859Sml29623 	/* Inject any error */
16063859Sml29623 	intr_dbg.value = 0;
16073859Sml29623 	intr_dbg.bits.ldw.nack_pref = 1;
16083859Sml29623 	(void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
16093859Sml29623 
16103859Sml29623 	/* Stop done bit will be set as a result of error injection */
16113859Sml29623 	status = npi_txdma_channel_disable(handle, channel);
16123859Sml29623 	if (!(status & NPI_TXDMA_STOP_FAILED)) {
16133859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
16143859Sml29623 			"<== nxge_txdma_fixup_hung_channel: stopped again"
16153859Sml29623 			"ring tdc %d passed channel",
16163859Sml29623 			ring_p->tdc, channel));
16173859Sml29623 		return;
16183859Sml29623 	}
16193859Sml29623 
16203859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
16213859Sml29623 		"<== nxge_txdma_fixup_hung_channel: stop done still not set!! "
16223859Sml29623 		"ring tdc %d passed channel",
16233859Sml29623 		ring_p->tdc, channel));
16243859Sml29623 
16253859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel"));
16263859Sml29623 }
16273859Sml29623 
16283859Sml29623 /*ARGSUSED*/
16293859Sml29623 void
16303859Sml29623 nxge_reclaim_rings(p_nxge_t nxgep)
16313859Sml29623 {
16323859Sml29623 	int			index, ndmas;
16333859Sml29623 	uint16_t		channel;
16343859Sml29623 	p_tx_rings_t 		tx_rings;
16353859Sml29623 	p_tx_ring_t 		tx_ring_p;
16363859Sml29623 
16373859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_ring"));
16383859Sml29623 	tx_rings = nxgep->tx_rings;
16393859Sml29623 	if (tx_rings == NULL) {
16403859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
16413859Sml29623 			"<== nxge_reclain_rimgs: NULL ring pointer"));
16423859Sml29623 		return;
16433859Sml29623 	}
16443859Sml29623 
16453859Sml29623 	ndmas = tx_rings->ndmas;
16463859Sml29623 	if (!ndmas) {
16473859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
16483859Sml29623 			"<== nxge_reclain_rimgs: no channel "
16493859Sml29623 			"allocated"));
16503859Sml29623 		return;
16513859Sml29623 	}
16523859Sml29623 
16533859Sml29623 	if (tx_rings->rings == NULL) {
16543859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
16553859Sml29623 			"<== nxge_reclain_rimgs: NULL rings pointer"));
16563859Sml29623 		return;
16573859Sml29623 	}
16583859Sml29623 
16593859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclain_rimgs: "
16603859Sml29623 		"tx_rings $%p tx_desc_rings $%p ndmas %d",
16613859Sml29623 		tx_rings, tx_rings->rings, ndmas));
16623859Sml29623 
16633859Sml29623 	for (index = 0; index < ndmas; index++) {
16643859Sml29623 		channel = tx_rings->rings[index]->tdc;
16653859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
16663859Sml29623 			"==> reclain_rimgs: channel %d",
16673859Sml29623 			channel));
16683859Sml29623 		tx_ring_p = tx_rings->rings[index];
16693859Sml29623 		MUTEX_ENTER(&tx_ring_p->lock);
16703859Sml29623 		(void) nxge_txdma_reclaim(nxgep, tx_ring_p, channel);
16713859Sml29623 		MUTEX_EXIT(&tx_ring_p->lock);
16723859Sml29623 	}
16733859Sml29623 
16743859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings"));
16753859Sml29623 }
16763859Sml29623 
16773859Sml29623 void
16783859Sml29623 nxge_txdma_regs_dump_channels(p_nxge_t nxgep)
16793859Sml29623 {
16803859Sml29623 	int			index, ndmas;
16813859Sml29623 	uint16_t		channel;
16823859Sml29623 	p_tx_rings_t 		tx_rings;
16833859Sml29623 	npi_handle_t		handle;
16843859Sml29623 
16853859Sml29623 	NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_txdma_regs_dump_channels"));
16863859Sml29623 
16873859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
16883859Sml29623 	(void) npi_txdma_dump_fzc_regs(handle);
16893859Sml29623 
16903859Sml29623 	tx_rings = nxgep->tx_rings;
16913859Sml29623 	if (tx_rings == NULL) {
16923859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
16933859Sml29623 			"<== nxge_txdma_regs_dump_channels: NULL ring"));
16943859Sml29623 		return;
16953859Sml29623 	}
16963859Sml29623 
16973859Sml29623 	ndmas = tx_rings->ndmas;
16983859Sml29623 	if (!ndmas) {
16993859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
17003859Sml29623 			"<== nxge_txdma_regs_dump_channels: "
17013859Sml29623 			"no channel allocated"));
17023859Sml29623 		return;
17033859Sml29623 	}
17043859Sml29623 
17053859Sml29623 	if (tx_rings->rings == NULL) {
17063859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
17073859Sml29623 			"<== nxge_txdma_regs_dump_channels: NULL rings"));
17083859Sml29623 		return;
17093859Sml29623 	}
17103859Sml29623 
17113859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_regs_dump_channels: "
17123859Sml29623 		"tx_rings $%p tx_desc_rings $%p ndmas %d",
17133859Sml29623 		tx_rings, tx_rings->rings, ndmas));
17143859Sml29623 
17153859Sml29623 	for (index = 0; index < ndmas; index++) {
17163859Sml29623 		channel = tx_rings->rings[index]->tdc;
17173859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
17183859Sml29623 			"==> nxge_txdma_regs_dump_channels: channel %d",
17193859Sml29623 			channel));
17203859Sml29623 		(void) npi_txdma_dump_tdc_regs(handle, channel);
17213859Sml29623 	}
17223859Sml29623 
17233859Sml29623 	/* Dump TXC registers */
17243859Sml29623 	(void) npi_txc_dump_fzc_regs(handle);
17253859Sml29623 	(void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num);
17263859Sml29623 
17273859Sml29623 	for (index = 0; index < ndmas; index++) {
17283859Sml29623 		channel = tx_rings->rings[index]->tdc;
17293859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
17303859Sml29623 			"==> nxge_txdma_regs_dump_channels: channel %d",
17313859Sml29623 			channel));
17323859Sml29623 		(void) npi_txc_dump_tdc_fzc_regs(handle, channel);
17333859Sml29623 	}
17343859Sml29623 
17353859Sml29623 	for (index = 0; index < ndmas; index++) {
17363859Sml29623 		channel = tx_rings->rings[index]->tdc;
17373859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
17383859Sml29623 			"==> nxge_txdma_regs_dump_channels: channel %d",
17393859Sml29623 			channel));
17403859Sml29623 		nxge_txdma_regs_dump(nxgep, channel);
17413859Sml29623 	}
17423859Sml29623 
17433859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump"));
17443859Sml29623 
17453859Sml29623 }
17463859Sml29623 
17473859Sml29623 void
17483859Sml29623 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel)
17493859Sml29623 {
17503859Sml29623 	npi_handle_t		handle;
17513859Sml29623 	tx_ring_hdl_t 		hdl;
17523859Sml29623 	tx_ring_kick_t 		kick;
17533859Sml29623 	tx_cs_t 		cs;
17543859Sml29623 	txc_control_t		control;
17553859Sml29623 	uint32_t		bitmap = 0;
17563859Sml29623 	uint32_t		burst = 0;
17573859Sml29623 	uint32_t		bytes = 0;
17583859Sml29623 	dma_log_page_t		cfg;
17593859Sml29623 
17603859Sml29623 	printf("\n\tfunc # %d tdc %d ",
17613859Sml29623 		nxgep->function_num, channel);
17623859Sml29623 	cfg.page_num = 0;
17633859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
17643859Sml29623 	(void) npi_txdma_log_page_get(handle, channel, &cfg);
17653859Sml29623 	printf("\n\tlog page func %d valid page 0 %d",
17663859Sml29623 		cfg.func_num, cfg.valid);
17673859Sml29623 	cfg.page_num = 1;
17683859Sml29623 	(void) npi_txdma_log_page_get(handle, channel, &cfg);
17693859Sml29623 	printf("\n\tlog page func %d valid page 1 %d",
17703859Sml29623 		cfg.func_num, cfg.valid);
17713859Sml29623 
17723859Sml29623 	(void) npi_txdma_ring_head_get(handle, channel, &hdl);
17733859Sml29623 	(void) npi_txdma_desc_kick_reg_get(handle, channel, &kick);
17743859Sml29623 	printf("\n\thead value is 0x%0llx",
17753859Sml29623 		(long long)hdl.value);
17763859Sml29623 	printf("\n\thead index %d", hdl.bits.ldw.head);
17773859Sml29623 	printf("\n\tkick value is 0x%0llx",
17783859Sml29623 		(long long)kick.value);
17793859Sml29623 	printf("\n\ttail index %d\n", kick.bits.ldw.tail);
17803859Sml29623 
17813859Sml29623 	(void) npi_txdma_control_status(handle, OP_GET, channel, &cs);
17823859Sml29623 	printf("\n\tControl statue is 0x%0llx", (long long)cs.value);
17833859Sml29623 	printf("\n\tControl status RST state %d", cs.bits.ldw.rst);
17843859Sml29623 
17853859Sml29623 	(void) npi_txc_control(handle, OP_GET, &control);
17863859Sml29623 	(void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap);
17873859Sml29623 	(void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst);
17883859Sml29623 	(void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes);
17893859Sml29623 
17903859Sml29623 	printf("\n\tTXC port control 0x%0llx",
17913859Sml29623 		(long long)control.value);
17923859Sml29623 	printf("\n\tTXC port bitmap 0x%x", bitmap);
17933859Sml29623 	printf("\n\tTXC max burst %d", burst);
17943859Sml29623 	printf("\n\tTXC bytes xmt %d\n", bytes);
17953859Sml29623 
17963859Sml29623 	{
17973859Sml29623 		ipp_status_t status;
17983859Sml29623 
17993859Sml29623 		(void) npi_ipp_get_status(handle, nxgep->function_num, &status);
18005125Sjoycey #if defined(__i386)
18015125Sjoycey 		printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value);
18025125Sjoycey #else
18033859Sml29623 		printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value);
18045125Sjoycey #endif
18053859Sml29623 	}
18063859Sml29623 }
18073859Sml29623 
18083859Sml29623 /*
18093859Sml29623  * Static functions start here.
18103859Sml29623  */
18113859Sml29623 static nxge_status_t
18123859Sml29623 nxge_map_txdma(p_nxge_t nxgep)
18133859Sml29623 {
18143859Sml29623 	int			i, ndmas;
18153859Sml29623 	uint16_t		channel;
18163859Sml29623 	p_tx_rings_t 		tx_rings;
18173859Sml29623 	p_tx_ring_t 		*tx_desc_rings;
18183859Sml29623 	p_tx_mbox_areas_t 	tx_mbox_areas_p;
18193859Sml29623 	p_tx_mbox_t		*tx_mbox_p;
18203859Sml29623 	p_nxge_dma_pool_t	dma_buf_poolp;
18213859Sml29623 	p_nxge_dma_pool_t	dma_cntl_poolp;
18223859Sml29623 	p_nxge_dma_common_t	*dma_buf_p;
18233859Sml29623 	p_nxge_dma_common_t	*dma_cntl_p;
18243859Sml29623 	nxge_status_t		status = NXGE_OK;
18253859Sml29623 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
18263859Sml29623 	p_nxge_dma_common_t	t_dma_buf_p;
18273859Sml29623 	p_nxge_dma_common_t	t_dma_cntl_p;
18283859Sml29623 #endif
18293859Sml29623 
18303859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma"));
18313859Sml29623 
18323859Sml29623 	dma_buf_poolp = nxgep->tx_buf_pool_p;
18333859Sml29623 	dma_cntl_poolp = nxgep->tx_cntl_pool_p;
18343859Sml29623 
18353859Sml29623 	if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
18363859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
18373859Sml29623 			"==> nxge_map_txdma: buf not allocated"));
18383859Sml29623 		return (NXGE_ERROR);
18393859Sml29623 	}
18403859Sml29623 
18413859Sml29623 	ndmas = dma_buf_poolp->ndmas;
18423859Sml29623 	if (!ndmas) {
18433859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
18443859Sml29623 			"<== nxge_map_txdma: no dma allocated"));
18453859Sml29623 		return (NXGE_ERROR);
18463859Sml29623 	}
18473859Sml29623 
18483859Sml29623 	dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
18493859Sml29623 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
18503859Sml29623 
18513859Sml29623 	tx_rings = (p_tx_rings_t)
18523859Sml29623 			KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP);
18533859Sml29623 	tx_desc_rings = (p_tx_ring_t *)KMEM_ZALLOC(
18543859Sml29623 			sizeof (p_tx_ring_t) * ndmas, KM_SLEEP);
18553859Sml29623 
18563859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
18573859Sml29623 		"tx_rings $%p tx_desc_rings $%p",
18583859Sml29623 		tx_rings, tx_desc_rings));
18593859Sml29623 
18603859Sml29623 	tx_mbox_areas_p = (p_tx_mbox_areas_t)
18613859Sml29623 			KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP);
18623859Sml29623 	tx_mbox_p = (p_tx_mbox_t *)KMEM_ZALLOC(
18633859Sml29623 			sizeof (p_tx_mbox_t) * ndmas, KM_SLEEP);
18643859Sml29623 
18653859Sml29623 	/*
18663859Sml29623 	 * Map descriptors from the buffer pools for each dma channel.
18673859Sml29623 	 */
18683859Sml29623 	for (i = 0; i < ndmas; i++) {
18693859Sml29623 		/*
18703859Sml29623 		 * Set up and prepare buffer blocks, descriptors
18713859Sml29623 		 * and mailbox.
18723859Sml29623 		 */
18733859Sml29623 		channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel;
18743859Sml29623 		status = nxge_map_txdma_channel(nxgep, channel,
18753859Sml29623 				(p_nxge_dma_common_t *)&dma_buf_p[i],
18763859Sml29623 				(p_tx_ring_t *)&tx_desc_rings[i],
18773859Sml29623 				dma_buf_poolp->num_chunks[i],
18783859Sml29623 				(p_nxge_dma_common_t *)&dma_cntl_p[i],
18793859Sml29623 				(p_tx_mbox_t *)&tx_mbox_p[i]);
18803859Sml29623 		if (status != NXGE_OK) {
18813859Sml29623 			goto nxge_map_txdma_fail1;
18823859Sml29623 		}
18833859Sml29623 		tx_desc_rings[i]->index = (uint16_t)i;
18843859Sml29623 		tx_desc_rings[i]->tdc_stats = &nxgep->statsp->tdc_stats[i];
18853859Sml29623 
18863859Sml29623 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
18873859Sml29623 		if (nxgep->niu_type == N2_NIU && NXGE_DMA_BLOCK == 1) {
18883859Sml29623 			tx_desc_rings[i]->hv_set = B_FALSE;
18893859Sml29623 			t_dma_buf_p = (p_nxge_dma_common_t)dma_buf_p[i];
18903859Sml29623 			t_dma_cntl_p = (p_nxge_dma_common_t)dma_cntl_p[i];
18913859Sml29623 
18923859Sml29623 			tx_desc_rings[i]->hv_tx_buf_base_ioaddr_pp =
18933859Sml29623 				(uint64_t)t_dma_buf_p->orig_ioaddr_pp;
18943859Sml29623 			tx_desc_rings[i]->hv_tx_buf_ioaddr_size =
18953859Sml29623 				(uint64_t)t_dma_buf_p->orig_alength;
18963859Sml29623 
18973859Sml29623 			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
18983859Sml29623 				"==> nxge_map_txdma_channel: "
18993859Sml29623 				"hv data buf base io $%p "
19003859Sml29623 				"size 0x%llx (%d) "
19013859Sml29623 				"buf base io $%p "
19023859Sml29623 				"orig vatopa base io $%p "
19033859Sml29623 				"orig_len 0x%llx (%d)",
19043859Sml29623 				tx_desc_rings[i]->hv_tx_buf_base_ioaddr_pp,
19053859Sml29623 				tx_desc_rings[i]->hv_tx_buf_ioaddr_size,
19063859Sml29623 				tx_desc_rings[i]->hv_tx_buf_ioaddr_size,
19073859Sml29623 				t_dma_buf_p->ioaddr_pp,
19083859Sml29623 				t_dma_buf_p->orig_vatopa,
19093859Sml29623 				t_dma_buf_p->orig_alength,
19103859Sml29623 				t_dma_buf_p->orig_alength));
19113859Sml29623 
19123859Sml29623 			tx_desc_rings[i]->hv_tx_cntl_base_ioaddr_pp =
19133859Sml29623 				(uint64_t)t_dma_cntl_p->orig_ioaddr_pp;
19143859Sml29623 			tx_desc_rings[i]->hv_tx_cntl_ioaddr_size =
19153859Sml29623 				(uint64_t)t_dma_cntl_p->orig_alength;
19163859Sml29623 
19173859Sml29623 			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
19183859Sml29623 				"==> nxge_map_txdma_channel: "
19193859Sml29623 				"hv cntl base io $%p "
19203859Sml29623 				"orig ioaddr_pp ($%p) "
19213859Sml29623 				"orig vatopa ($%p) "
19223859Sml29623 				"size 0x%llx (%d 0x%x)",
19233859Sml29623 				tx_desc_rings[i]->hv_tx_cntl_base_ioaddr_pp,
19243859Sml29623 				t_dma_cntl_p->orig_ioaddr_pp,
19253859Sml29623 				t_dma_cntl_p->orig_vatopa,
19263859Sml29623 				tx_desc_rings[i]->hv_tx_cntl_ioaddr_size,
19273859Sml29623 				t_dma_cntl_p->orig_alength,
19283859Sml29623 				t_dma_cntl_p->orig_alength));
19293859Sml29623 		}
19303859Sml29623 #endif
19313859Sml29623 	}
19323859Sml29623 
19333859Sml29623 	tx_rings->ndmas = ndmas;
19343859Sml29623 	tx_rings->rings = tx_desc_rings;
19353859Sml29623 	nxgep->tx_rings = tx_rings;
19363859Sml29623 	tx_mbox_areas_p->txmbox_areas_p = tx_mbox_p;
19373859Sml29623 	nxgep->tx_mbox_areas_p = tx_mbox_areas_p;
19383859Sml29623 
19393859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
19403859Sml29623 		"tx_rings $%p rings $%p",
19413859Sml29623 		nxgep->tx_rings, nxgep->tx_rings->rings));
19423859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
19433859Sml29623 		"tx_rings $%p tx_desc_rings $%p",
19443859Sml29623 		nxgep->tx_rings, tx_desc_rings));
19453859Sml29623 
19463859Sml29623 	goto nxge_map_txdma_exit;
19473859Sml29623 
19483859Sml29623 nxge_map_txdma_fail1:
19493859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
19503859Sml29623 		"==> nxge_map_txdma: uninit tx desc "
19513859Sml29623 		"(status 0x%x channel %d i %d)",
19523859Sml29623 		nxgep, status, channel, i));
19533859Sml29623 	i--;
19543859Sml29623 	for (; i >= 0; i--) {
19553859Sml29623 		channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel;
19563859Sml29623 		nxge_unmap_txdma_channel(nxgep, channel,
19573859Sml29623 			tx_desc_rings[i],
19583859Sml29623 			tx_mbox_p[i]);
19593859Sml29623 	}
19603859Sml29623 
19613859Sml29623 	KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas);
19623859Sml29623 	KMEM_FREE(tx_rings, sizeof (tx_rings_t));
19633859Sml29623 	KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas);
19643859Sml29623 	KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
19653859Sml29623 
19663859Sml29623 nxge_map_txdma_exit:
19673859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
19683859Sml29623 		"==> nxge_map_txdma: "
19693859Sml29623 		"(status 0x%x channel %d)",
19703859Sml29623 		status, channel));
19713859Sml29623 
19723859Sml29623 	return (status);
19733859Sml29623 }
19743859Sml29623 
19753859Sml29623 static void
19763859Sml29623 nxge_unmap_txdma(p_nxge_t nxgep)
19773859Sml29623 {
19783859Sml29623 	int			i, ndmas;
19793859Sml29623 	uint8_t			channel;
19803859Sml29623 	p_tx_rings_t 		tx_rings;
19813859Sml29623 	p_tx_ring_t 		*tx_desc_rings;
19823859Sml29623 	p_tx_mbox_areas_t 	tx_mbox_areas_p;
19833859Sml29623 	p_tx_mbox_t		*tx_mbox_p;
19843859Sml29623 	p_nxge_dma_pool_t	dma_buf_poolp;
19853859Sml29623 
19863859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_unmap_txdma"));
19873859Sml29623 
19883859Sml29623 	dma_buf_poolp = nxgep->tx_buf_pool_p;
19893859Sml29623 	if (!dma_buf_poolp->buf_allocated) {
19903859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
19913859Sml29623 			"==> nxge_unmap_txdma: buf not allocated"));
19923859Sml29623 		return;
19933859Sml29623 	}
19943859Sml29623 
19953859Sml29623 	ndmas = dma_buf_poolp->ndmas;
19963859Sml29623 	if (!ndmas) {
19973859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
19983859Sml29623 			"<== nxge_unmap_txdma: no dma allocated"));
19993859Sml29623 		return;
20003859Sml29623 	}
20013859Sml29623 
20023859Sml29623 	tx_rings = nxgep->tx_rings;
20033859Sml29623 	tx_desc_rings = tx_rings->rings;
20043859Sml29623 	if (tx_rings == NULL) {
20053859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
20063859Sml29623 			"<== nxge_unmap_txdma: NULL ring pointer"));
20073859Sml29623 		return;
20083859Sml29623 	}
20093859Sml29623 
20103859Sml29623 	tx_desc_rings = tx_rings->rings;
20113859Sml29623 	if (tx_desc_rings == NULL) {
20123859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
20133859Sml29623 			"<== nxge_unmap_txdma: NULL ring pointers"));
20143859Sml29623 		return;
20153859Sml29623 	}
20163859Sml29623 
20173859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_unmap_txdma: "
20183859Sml29623 		"tx_rings $%p tx_desc_rings $%p ndmas %d",
20193859Sml29623 		tx_rings, tx_desc_rings, ndmas));
20203859Sml29623 
20213859Sml29623 	tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
20223859Sml29623 	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
20233859Sml29623 
20243859Sml29623 	for (i = 0; i < ndmas; i++) {
20253859Sml29623 		channel = tx_desc_rings[i]->tdc;
20263859Sml29623 		(void) nxge_unmap_txdma_channel(nxgep, channel,
20273859Sml29623 				(p_tx_ring_t)tx_desc_rings[i],
20283859Sml29623 				(p_tx_mbox_t)tx_mbox_p[i]);
20293859Sml29623 	}
20303859Sml29623 
20313859Sml29623 	KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas);
20323859Sml29623 	KMEM_FREE(tx_rings, sizeof (tx_rings_t));
20333859Sml29623 	KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas);
20343859Sml29623 	KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
20353859Sml29623 
20363859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
20373859Sml29623 		"<== nxge_unmap_txdma"));
20383859Sml29623 }
20393859Sml29623 
20403859Sml29623 static nxge_status_t
20413859Sml29623 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel,
20423859Sml29623 	p_nxge_dma_common_t *dma_buf_p,
20433859Sml29623 	p_tx_ring_t *tx_desc_p,
20443859Sml29623 	uint32_t num_chunks,
20453859Sml29623 	p_nxge_dma_common_t *dma_cntl_p,
20463859Sml29623 	p_tx_mbox_t *tx_mbox_p)
20473859Sml29623 {
20483859Sml29623 	int	status = NXGE_OK;
20493859Sml29623 
20503859Sml29623 	/*
20513859Sml29623 	 * Set up and prepare buffer blocks, descriptors
20523859Sml29623 	 * and mailbox.
20533859Sml29623 	 */
20543859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
20553859Sml29623 		"==> nxge_map_txdma_channel (channel %d)", channel));
20563859Sml29623 	/*
20573859Sml29623 	 * Transmit buffer blocks
20583859Sml29623 	 */
20593859Sml29623 	status = nxge_map_txdma_channel_buf_ring(nxgep, channel,
20603859Sml29623 			dma_buf_p, tx_desc_p, num_chunks);
20613859Sml29623 	if (status != NXGE_OK) {
20623859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
20633859Sml29623 			"==> nxge_map_txdma_channel (channel %d): "
20643859Sml29623 			"map buffer failed 0x%x", channel, status));
20653859Sml29623 		goto nxge_map_txdma_channel_exit;
20663859Sml29623 	}
20673859Sml29623 
20683859Sml29623 	/*
20693859Sml29623 	 * Transmit block ring, and mailbox.
20703859Sml29623 	 */
20713859Sml29623 	nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p,
20723859Sml29623 					tx_mbox_p);
20733859Sml29623 
20743859Sml29623 	goto nxge_map_txdma_channel_exit;
20753859Sml29623 
20763859Sml29623 nxge_map_txdma_channel_fail1:
20773859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
20783859Sml29623 		"==> nxge_map_txdma_channel: unmap buf"
20793859Sml29623 		"(status 0x%x channel %d)",
20803859Sml29623 		status, channel));
20813859Sml29623 	nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p);
20823859Sml29623 
20833859Sml29623 nxge_map_txdma_channel_exit:
20843859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
20853859Sml29623 		"<== nxge_map_txdma_channel: "
20863859Sml29623 		"(status 0x%x channel %d)",
20873859Sml29623 		status, channel));
20883859Sml29623 
20893859Sml29623 	return (status);
20903859Sml29623 }
20913859Sml29623 
20923859Sml29623 /*ARGSUSED*/
20933859Sml29623 static void
20943859Sml29623 nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel,
20953859Sml29623 	p_tx_ring_t tx_ring_p,
20963859Sml29623 	p_tx_mbox_t tx_mbox_p)
20973859Sml29623 {
20983859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
20993859Sml29623 		"==> nxge_unmap_txdma_channel (channel %d)", channel));
21003859Sml29623 	/*
21013859Sml29623 	 * unmap tx block ring, and mailbox.
21023859Sml29623 	 */
21033859Sml29623 	(void) nxge_unmap_txdma_channel_cfg_ring(nxgep,
21043859Sml29623 			tx_ring_p, tx_mbox_p);
21053859Sml29623 
21063859Sml29623 	/* unmap buffer blocks */
21073859Sml29623 	(void) nxge_unmap_txdma_channel_buf_ring(nxgep, tx_ring_p);
21083859Sml29623 
21093859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel"));
21103859Sml29623 }
21113859Sml29623 
21123859Sml29623 /*ARGSUSED*/
21133859Sml29623 static void
21143859Sml29623 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
21153859Sml29623 	p_nxge_dma_common_t *dma_cntl_p,
21163859Sml29623 	p_tx_ring_t tx_ring_p,
21173859Sml29623 	p_tx_mbox_t *tx_mbox_p)
21183859Sml29623 {
21193859Sml29623 	p_tx_mbox_t 		mboxp;
21203859Sml29623 	p_nxge_dma_common_t 	cntl_dmap;
21213859Sml29623 	p_nxge_dma_common_t 	dmap;
21223859Sml29623 	p_tx_rng_cfig_t		tx_ring_cfig_p;
21233859Sml29623 	p_tx_ring_kick_t	tx_ring_kick_p;
21243859Sml29623 	p_tx_cs_t		tx_cs_p;
21253859Sml29623 	p_tx_dma_ent_msk_t	tx_evmask_p;
21263859Sml29623 	p_txdma_mbh_t		mboxh_p;
21273859Sml29623 	p_txdma_mbl_t		mboxl_p;
21283859Sml29623 	uint64_t		tx_desc_len;
21293859Sml29623 
21303859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
21313859Sml29623 		"==> nxge_map_txdma_channel_cfg_ring"));
21323859Sml29623 
21333859Sml29623 	cntl_dmap = *dma_cntl_p;
21343859Sml29623 
21353859Sml29623 	dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc;
21363859Sml29623 	nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size,
21373859Sml29623 			sizeof (tx_desc_t));
21383859Sml29623 	/*
21393859Sml29623 	 * Zero out transmit ring descriptors.
21403859Sml29623 	 */
21413859Sml29623 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
21423859Sml29623 	tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig);
21433859Sml29623 	tx_ring_kick_p = &(tx_ring_p->tx_ring_kick);
21443859Sml29623 	tx_cs_p = &(tx_ring_p->tx_cs);
21453859Sml29623 	tx_evmask_p = &(tx_ring_p->tx_evmask);
21463859Sml29623 	tx_ring_cfig_p->value = 0;
21473859Sml29623 	tx_ring_kick_p->value = 0;
21483859Sml29623 	tx_cs_p->value = 0;
21493859Sml29623 	tx_evmask_p->value = 0;
21503859Sml29623 
21513859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
21523859Sml29623 		"==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p",
21533859Sml29623 		dma_channel,
21543859Sml29623 		dmap->dma_cookie.dmac_laddress));
21553859Sml29623 
21563859Sml29623 	tx_ring_cfig_p->value = 0;
21573859Sml29623 	tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3);
21583859Sml29623 	tx_ring_cfig_p->value =
21593859Sml29623 		(dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) |
21603859Sml29623 		(tx_desc_len << TX_RNG_CFIG_LEN_SHIFT);
21613859Sml29623 
21623859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
21633859Sml29623 		"==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
21643859Sml29623 		dma_channel,
21653859Sml29623 		tx_ring_cfig_p->value));
21663859Sml29623 
21673859Sml29623 	tx_cs_p->bits.ldw.rst = 1;
21683859Sml29623 
21693859Sml29623 	/* Map in mailbox */
21703859Sml29623 	mboxp = (p_tx_mbox_t)
21713859Sml29623 		KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP);
21723859Sml29623 	dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox;
21733859Sml29623 	nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t));
21743859Sml29623 	mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh;
21753859Sml29623 	mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl;
21763859Sml29623 	mboxh_p->value = mboxl_p->value = 0;
21773859Sml29623 
21783859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
21793859Sml29623 		"==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
21803859Sml29623 		dmap->dma_cookie.dmac_laddress));
21813859Sml29623 
21823859Sml29623 	mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >>
21833859Sml29623 				TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK);
21843859Sml29623 
21853859Sml29623 	mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress &
21863859Sml29623 				TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT);
21873859Sml29623 
21883859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
21893859Sml29623 		"==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
21903859Sml29623 		dmap->dma_cookie.dmac_laddress));
21913859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
21923859Sml29623 		"==> nxge_map_txdma_channel_cfg_ring: hmbox $%p "
21933859Sml29623 		"mbox $%p",
21943859Sml29623 		mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr));
21953859Sml29623 	tx_ring_p->page_valid.value = 0;
21963859Sml29623 	tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0;
21973859Sml29623 	tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0;
21983859Sml29623 	tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0;
21993859Sml29623 	tx_ring_p->page_hdl.value = 0;
22003859Sml29623 
22013859Sml29623 	tx_ring_p->page_valid.bits.ldw.page0 = 1;
22023859Sml29623 	tx_ring_p->page_valid.bits.ldw.page1 = 1;
22033859Sml29623 
22043859Sml29623 	tx_ring_p->max_burst.value = 0;
22053859Sml29623 	tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT;
22063859Sml29623 
22073859Sml29623 	*tx_mbox_p = mboxp;
22083859Sml29623 
22093859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
22103859Sml29623 				"<== nxge_map_txdma_channel_cfg_ring"));
22113859Sml29623 }
22123859Sml29623 
22133859Sml29623 /*ARGSUSED*/
22143859Sml29623 static void
22153859Sml29623 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep,
22163859Sml29623 	p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
22173859Sml29623 {
22183859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
22193859Sml29623 		"==> nxge_unmap_txdma_channel_cfg_ring: channel %d",
22203859Sml29623 		tx_ring_p->tdc));
22213859Sml29623 
22223859Sml29623 	KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t));
22233859Sml29623 
22243859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
22253859Sml29623 		"<== nxge_unmap_txdma_channel_cfg_ring"));
22263859Sml29623 }
22273859Sml29623 
22283859Sml29623 static nxge_status_t
22293859Sml29623 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
22303859Sml29623 	p_nxge_dma_common_t *dma_buf_p,
22313859Sml29623 	p_tx_ring_t *tx_desc_p, uint32_t num_chunks)
22323859Sml29623 {
22333859Sml29623 	p_nxge_dma_common_t 	dma_bufp, tmp_bufp;
22343859Sml29623 	p_nxge_dma_common_t 	dmap;
22353859Sml29623 	nxge_os_dma_handle_t	tx_buf_dma_handle;
22363859Sml29623 	p_tx_ring_t 		tx_ring_p;
22373859Sml29623 	p_tx_msg_t 		tx_msg_ring;
22383859Sml29623 	nxge_status_t		status = NXGE_OK;
22393859Sml29623 	int			ddi_status = DDI_SUCCESS;
22403859Sml29623 	int			i, j, index;
22413859Sml29623 	uint32_t		size, bsize;
22423859Sml29623 	uint32_t 		nblocks, nmsgs;
22433859Sml29623 
22443859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
22453859Sml29623 		"==> nxge_map_txdma_channel_buf_ring"));
22463859Sml29623 
22473859Sml29623 	dma_bufp = tmp_bufp = *dma_buf_p;
22483859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
22493859Sml29623 		" nxge_map_txdma_channel_buf_ring: channel %d to map %d "
22503859Sml29623 		"chunks bufp $%p",
22513859Sml29623 		channel, num_chunks, dma_bufp));
22523859Sml29623 
22533859Sml29623 	nmsgs = 0;
22543859Sml29623 	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
22553859Sml29623 		nmsgs += tmp_bufp->nblocks;
22563859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
22573859Sml29623 			"==> nxge_map_txdma_channel_buf_ring: channel %d "
22583859Sml29623 			"bufp $%p nblocks %d nmsgs %d",
22593859Sml29623 			channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
22603859Sml29623 	}
22613859Sml29623 	if (!nmsgs) {
22623859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
22633859Sml29623 			"<== nxge_map_txdma_channel_buf_ring: channel %d "
22643859Sml29623 			"no msg blocks",
22653859Sml29623 			channel));
22663859Sml29623 		status = NXGE_ERROR;
22673859Sml29623 		goto nxge_map_txdma_channel_buf_ring_exit;
22683859Sml29623 	}
22693859Sml29623 
22703859Sml29623 	tx_ring_p = (p_tx_ring_t)
22713859Sml29623 		KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP);
22723859Sml29623 	MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER,
22733859Sml29623 		(void *)nxgep->interrupt_cookie);
22743952Sml29623 
22753952Sml29623 	tx_ring_p->nxgep = nxgep;
22763952Sml29623 	tx_ring_p->serial = nxge_serialize_create(nmsgs,
22773952Sml29623 				nxge_serial_tx, tx_ring_p);
22783859Sml29623 	/*
22793859Sml29623 	 * Allocate transmit message rings and handles for packets
22803859Sml29623 	 * not to be copied to premapped buffers.
22813859Sml29623 	 */
22823859Sml29623 	size = nmsgs * sizeof (tx_msg_t);
22833859Sml29623 	tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
22843859Sml29623 	for (i = 0; i < nmsgs; i++) {
22853859Sml29623 		ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
22863859Sml29623 				DDI_DMA_DONTWAIT, 0,
22873859Sml29623 				&tx_msg_ring[i].dma_handle);
22883859Sml29623 		if (ddi_status != DDI_SUCCESS) {
22893859Sml29623 			status |= NXGE_DDI_FAILED;
22903859Sml29623 			break;
22913859Sml29623 		}
22923859Sml29623 	}
22933859Sml29623 	if (i < nmsgs) {
22944185Sspeer 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
22954185Sspeer 		    "Allocate handles failed."));
22963859Sml29623 		goto nxge_map_txdma_channel_buf_ring_fail1;
22973859Sml29623 	}
22983859Sml29623 
22993859Sml29623 	tx_ring_p->tdc = channel;
23003859Sml29623 	tx_ring_p->tx_msg_ring = tx_msg_ring;
23013859Sml29623 	tx_ring_p->tx_ring_size = nmsgs;
23023859Sml29623 	tx_ring_p->num_chunks = num_chunks;
23033859Sml29623 	if (!nxge_tx_intr_thres) {
23043859Sml29623 		nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4;
23053859Sml29623 	}
23063859Sml29623 	tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1;
23073859Sml29623 	tx_ring_p->rd_index = 0;
23083859Sml29623 	tx_ring_p->wr_index = 0;
23093859Sml29623 	tx_ring_p->ring_head.value = 0;
23103859Sml29623 	tx_ring_p->ring_kick_tail.value = 0;
23113859Sml29623 	tx_ring_p->descs_pending = 0;
23123859Sml29623 
23133859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
23143859Sml29623 		"==> nxge_map_txdma_channel_buf_ring: channel %d "
23153859Sml29623 		"actual tx desc max %d nmsgs %d "
23163859Sml29623 		"(config nxge_tx_ring_size %d)",
23173859Sml29623 		channel, tx_ring_p->tx_ring_size, nmsgs,
23183859Sml29623 		nxge_tx_ring_size));
23193859Sml29623 
23203859Sml29623 	/*
23213859Sml29623 	 * Map in buffers from the buffer pool.
23223859Sml29623 	 */
23233859Sml29623 	index = 0;
23243859Sml29623 	bsize = dma_bufp->block_size;
23253859Sml29623 
23263859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: "
23273859Sml29623 		"dma_bufp $%p tx_rng_p $%p "
23283859Sml29623 		"tx_msg_rng_p $%p bsize %d",
23293859Sml29623 		dma_bufp, tx_ring_p, tx_msg_ring, bsize));
23303859Sml29623 
23313859Sml29623 	tx_buf_dma_handle = dma_bufp->dma_handle;
23323859Sml29623 	for (i = 0; i < num_chunks; i++, dma_bufp++) {
23333859Sml29623 		bsize = dma_bufp->block_size;
23343859Sml29623 		nblocks = dma_bufp->nblocks;
23353859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
23363859Sml29623 			"==> nxge_map_txdma_channel_buf_ring: dma chunk %d "
23373859Sml29623 			"size %d dma_bufp $%p",
23383859Sml29623 			i, sizeof (nxge_dma_common_t), dma_bufp));
23393859Sml29623 
23403859Sml29623 		for (j = 0; j < nblocks; j++) {
23413859Sml29623 			tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle;
23423859Sml29623 			dmap = &tx_msg_ring[index++].buf_dma;
23433859Sml29623 #ifdef TX_MEM_DEBUG
23443859Sml29623 			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
23453859Sml29623 				"==> nxge_map_txdma_channel_buf_ring: j %d"
23463859Sml29623 				"dmap $%p", i, dmap));
23473859Sml29623 #endif
23483859Sml29623 			nxge_setup_dma_common(dmap, dma_bufp, 1,
23493859Sml29623 				bsize);
23503859Sml29623 		}
23513859Sml29623 	}
23523859Sml29623 
23533859Sml29623 	if (i < num_chunks) {
23544185Sspeer 		status = NXGE_ERROR;
23553859Sml29623 		goto nxge_map_txdma_channel_buf_ring_fail1;
23563859Sml29623 	}
23573859Sml29623 
23583859Sml29623 	*tx_desc_p = tx_ring_p;
23593859Sml29623 
23603859Sml29623 	goto nxge_map_txdma_channel_buf_ring_exit;
23613859Sml29623 
23623859Sml29623 nxge_map_txdma_channel_buf_ring_fail1:
23633952Sml29623 	if (tx_ring_p->serial) {
23643952Sml29623 		nxge_serialize_destroy(tx_ring_p->serial);
23653952Sml29623 		tx_ring_p->serial = NULL;
23663952Sml29623 	}
23673952Sml29623 
23683859Sml29623 	index--;
23693859Sml29623 	for (; index >= 0; index--) {
23704185Sspeer 		if (tx_msg_ring[index].dma_handle != NULL) {
23714185Sspeer 			ddi_dma_free_handle(&tx_msg_ring[index].dma_handle);
23723859Sml29623 		}
23733859Sml29623 	}
23743859Sml29623 	MUTEX_DESTROY(&tx_ring_p->lock);
23754185Sspeer 	KMEM_FREE(tx_msg_ring, size);
23763859Sml29623 	KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
23773859Sml29623 
23784185Sspeer 	status = NXGE_ERROR;
23794185Sspeer 
23803859Sml29623 nxge_map_txdma_channel_buf_ring_exit:
23813859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
23823859Sml29623 		"<== nxge_map_txdma_channel_buf_ring status 0x%x", status));
23833859Sml29623 
23843859Sml29623 	return (status);
23853859Sml29623 }
23863859Sml29623 
23873859Sml29623 /*ARGSUSED*/
23883859Sml29623 static void
23893859Sml29623 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p)
23903859Sml29623 {
23913859Sml29623 	p_tx_msg_t 		tx_msg_ring;
23923859Sml29623 	p_tx_msg_t 		tx_msg_p;
23933859Sml29623 	int			i;
23943859Sml29623 
23953859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
23963859Sml29623 		"==> nxge_unmap_txdma_channel_buf_ring"));
23973859Sml29623 	if (tx_ring_p == NULL) {
23983859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
23993859Sml29623 			"<== nxge_unmap_txdma_channel_buf_ring: NULL ringp"));
24003859Sml29623 		return;
24013859Sml29623 	}
24023859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24033859Sml29623 		"==> nxge_unmap_txdma_channel_buf_ring: channel %d",
24043859Sml29623 		tx_ring_p->tdc));
24053859Sml29623 
24063859Sml29623 	tx_msg_ring = tx_ring_p->tx_msg_ring;
24073859Sml29623 	for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
24083859Sml29623 		tx_msg_p = &tx_msg_ring[i];
24093859Sml29623 		if (tx_msg_p->flags.dma_type == USE_DVMA) {
24103859Sml29623 			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24113859Sml29623 				"entry = %d",
24123859Sml29623 				i));
24133859Sml29623 			(void) dvma_unload(tx_msg_p->dvma_handle,
24143859Sml29623 				0, -1);
24153859Sml29623 			tx_msg_p->dvma_handle = NULL;
24163859Sml29623 			if (tx_ring_p->dvma_wr_index ==
24173859Sml29623 				tx_ring_p->dvma_wrap_mask) {
24183859Sml29623 				tx_ring_p->dvma_wr_index = 0;
24193859Sml29623 			} else {
24203859Sml29623 				tx_ring_p->dvma_wr_index++;
24213859Sml29623 			}
24223859Sml29623 			tx_ring_p->dvma_pending--;
24233859Sml29623 		} else if (tx_msg_p->flags.dma_type ==
24243859Sml29623 				USE_DMA) {
24253859Sml29623 			if (ddi_dma_unbind_handle
24263859Sml29623 				(tx_msg_p->dma_handle)) {
24273859Sml29623 				cmn_err(CE_WARN, "!nxge_unmap_tx_bug_ring: "
24283859Sml29623 					"ddi_dma_unbind_handle "
24293859Sml29623 					"failed.");
24303859Sml29623 			}
24313859Sml29623 		}
24323859Sml29623 
24333859Sml29623 		if (tx_msg_p->tx_message != NULL) {
24343859Sml29623 			freemsg(tx_msg_p->tx_message);
24353859Sml29623 			tx_msg_p->tx_message = NULL;
24363859Sml29623 		}
24373859Sml29623 	}
24383859Sml29623 
24393859Sml29623 	for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
24403859Sml29623 		if (tx_msg_ring[i].dma_handle != NULL) {
24413859Sml29623 			ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
24423859Sml29623 		}
24433859Sml29623 	}
24443859Sml29623 
24453952Sml29623 	if (tx_ring_p->serial) {
24463952Sml29623 		nxge_serialize_destroy(tx_ring_p->serial);
24473952Sml29623 		tx_ring_p->serial = NULL;
24483952Sml29623 	}
24493952Sml29623 
24503859Sml29623 	MUTEX_DESTROY(&tx_ring_p->lock);
24513859Sml29623 	KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
24523859Sml29623 	KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
24533859Sml29623 
24543859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24553859Sml29623 		"<== nxge_unmap_txdma_channel_buf_ring"));
24563859Sml29623 }
24573859Sml29623 
24583859Sml29623 static nxge_status_t
24593859Sml29623 nxge_txdma_hw_start(p_nxge_t nxgep)
24603859Sml29623 {
24613859Sml29623 	int			i, ndmas;
24623859Sml29623 	uint16_t		channel;
24633859Sml29623 	p_tx_rings_t 		tx_rings;
24643859Sml29623 	p_tx_ring_t 		*tx_desc_rings;
24653859Sml29623 	p_tx_mbox_areas_t 	tx_mbox_areas_p;
24663859Sml29623 	p_tx_mbox_t		*tx_mbox_p;
24673859Sml29623 	nxge_status_t		status = NXGE_OK;
24683859Sml29623 
24693859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start"));
24703859Sml29623 
24713859Sml29623 	tx_rings = nxgep->tx_rings;
24723859Sml29623 	if (tx_rings == NULL) {
24733859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
24743859Sml29623 			"<== nxge_txdma_hw_start: NULL ring pointer"));
24753859Sml29623 		return (NXGE_ERROR);
24763859Sml29623 	}
24773859Sml29623 	tx_desc_rings = tx_rings->rings;
24783859Sml29623 	if (tx_desc_rings == NULL) {
24793859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
24803859Sml29623 			"<== nxge_txdma_hw_start: NULL ring pointers"));
24813859Sml29623 		return (NXGE_ERROR);
24823859Sml29623 	}
24833859Sml29623 
24843859Sml29623 	ndmas = tx_rings->ndmas;
24853859Sml29623 	if (!ndmas) {
24863859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
24873859Sml29623 			"<== nxge_txdma_hw_start: no dma channel allocated"));
24883859Sml29623 		return (NXGE_ERROR);
24893859Sml29623 	}
24903859Sml29623 
24913859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
24923859Sml29623 		"tx_rings $%p tx_desc_rings $%p ndmas %d",
24933859Sml29623 		tx_rings, tx_desc_rings, ndmas));
24943859Sml29623 
24953859Sml29623 	tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
24963859Sml29623 	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
24973859Sml29623 
24983859Sml29623 	for (i = 0; i < ndmas; i++) {
24993859Sml29623 		channel = tx_desc_rings[i]->tdc,
25003859Sml29623 		status = nxge_txdma_start_channel(nxgep, channel,
25013859Sml29623 				(p_tx_ring_t)tx_desc_rings[i],
25023859Sml29623 				(p_tx_mbox_t)tx_mbox_p[i]);
25033859Sml29623 		if (status != NXGE_OK) {
25043859Sml29623 			goto nxge_txdma_hw_start_fail1;
25053859Sml29623 		}
25063859Sml29623 	}
25073859Sml29623 
25083859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
25093859Sml29623 		"tx_rings $%p rings $%p",
25103859Sml29623 		nxgep->tx_rings, nxgep->tx_rings->rings));
25113859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
25123859Sml29623 		"tx_rings $%p tx_desc_rings $%p",
25133859Sml29623 		nxgep->tx_rings, tx_desc_rings));
25143859Sml29623 
25153859Sml29623 	goto nxge_txdma_hw_start_exit;
25163859Sml29623 
25173859Sml29623 nxge_txdma_hw_start_fail1:
25183859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25193859Sml29623 		"==> nxge_txdma_hw_start: disable "
25203859Sml29623 		"(status 0x%x channel %d i %d)", status, channel, i));
25213859Sml29623 	for (; i >= 0; i--) {
25223859Sml29623 		channel = tx_desc_rings[i]->tdc,
25233859Sml29623 		(void) nxge_txdma_stop_channel(nxgep, channel,
25243859Sml29623 			(p_tx_ring_t)tx_desc_rings[i],
25253859Sml29623 			(p_tx_mbox_t)tx_mbox_p[i]);
25263859Sml29623 	}
25273859Sml29623 
25283859Sml29623 nxge_txdma_hw_start_exit:
25293859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25303859Sml29623 		"==> nxge_txdma_hw_start: (status 0x%x)", status));
25313859Sml29623 
25323859Sml29623 	return (status);
25333859Sml29623 }
25343859Sml29623 
25353859Sml29623 static void
25363859Sml29623 nxge_txdma_hw_stop(p_nxge_t nxgep)
25373859Sml29623 {
25383859Sml29623 	int			i, ndmas;
25393859Sml29623 	uint16_t		channel;
25403859Sml29623 	p_tx_rings_t 		tx_rings;
25413859Sml29623 	p_tx_ring_t 		*tx_desc_rings;
25423859Sml29623 	p_tx_mbox_areas_t 	tx_mbox_areas_p;
25433859Sml29623 	p_tx_mbox_t		*tx_mbox_p;
25443859Sml29623 
25453859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop"));
25463859Sml29623 
25473859Sml29623 	tx_rings = nxgep->tx_rings;
25483859Sml29623 	if (tx_rings == NULL) {
25493859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
25503859Sml29623 			"<== nxge_txdma_hw_stop: NULL ring pointer"));
25513859Sml29623 		return;
25523859Sml29623 	}
25533859Sml29623 	tx_desc_rings = tx_rings->rings;
25543859Sml29623 	if (tx_desc_rings == NULL) {
25553859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
25563859Sml29623 			"<== nxge_txdma_hw_stop: NULL ring pointers"));
25573859Sml29623 		return;
25583859Sml29623 	}
25593859Sml29623 
25603859Sml29623 	ndmas = tx_rings->ndmas;
25613859Sml29623 	if (!ndmas) {
25623859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
25633859Sml29623 			"<== nxge_txdma_hw_stop: no dma channel allocated"));
25643859Sml29623 		return;
25653859Sml29623 	}
25663859Sml29623 
25673859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop: "
25683859Sml29623 		"tx_rings $%p tx_desc_rings $%p",
25693859Sml29623 		tx_rings, tx_desc_rings));
25703859Sml29623 
25713859Sml29623 	tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
25723859Sml29623 	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
25733859Sml29623 
25743859Sml29623 	for (i = 0; i < ndmas; i++) {
25753859Sml29623 		channel = tx_desc_rings[i]->tdc;
25763859Sml29623 		(void) nxge_txdma_stop_channel(nxgep, channel,
25773859Sml29623 				(p_tx_ring_t)tx_desc_rings[i],
25783859Sml29623 				(p_tx_mbox_t)tx_mbox_p[i]);
25793859Sml29623 	}
25803859Sml29623 
25813859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop: "
25823859Sml29623 		"tx_rings $%p tx_desc_rings $%p",
25833859Sml29623 		tx_rings, tx_desc_rings));
25843859Sml29623 
25853859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_hw_stop"));
25863859Sml29623 }
25873859Sml29623 
25883859Sml29623 static nxge_status_t
25893859Sml29623 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel,
25903859Sml29623     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
25913859Sml29623 
25923859Sml29623 {
25933859Sml29623 	nxge_status_t		status = NXGE_OK;
25943859Sml29623 
25953859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25963859Sml29623 		"==> nxge_txdma_start_channel (channel %d)", channel));
25973859Sml29623 	/*
25983859Sml29623 	 * TXDMA/TXC must be in stopped state.
25993859Sml29623 	 */
26003859Sml29623 	(void) nxge_txdma_stop_inj_err(nxgep, channel);
26013859Sml29623 
26023859Sml29623 	/*
26033859Sml29623 	 * Reset TXDMA channel
26043859Sml29623 	 */
26053859Sml29623 	tx_ring_p->tx_cs.value = 0;
26063859Sml29623 	tx_ring_p->tx_cs.bits.ldw.rst = 1;
26073859Sml29623 	status = nxge_reset_txdma_channel(nxgep, channel,
26083859Sml29623 			tx_ring_p->tx_cs.value);
26093859Sml29623 	if (status != NXGE_OK) {
26103859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
26113859Sml29623 			"==> nxge_txdma_start_channel (channel %d)"
26123859Sml29623 			" reset channel failed 0x%x", channel, status));
26133859Sml29623 		goto nxge_txdma_start_channel_exit;
26143859Sml29623 	}
26153859Sml29623 
26163859Sml29623 	/*
26173859Sml29623 	 * Initialize the TXDMA channel specific FZC control
26183859Sml29623 	 * configurations. These FZC registers are pertaining
26193859Sml29623 	 * to each TX channel (i.e. logical pages).
26203859Sml29623 	 */
26213859Sml29623 	status = nxge_init_fzc_txdma_channel(nxgep, channel,
26223859Sml29623 			tx_ring_p, tx_mbox_p);
26233859Sml29623 	if (status != NXGE_OK) {
26243859Sml29623 		goto nxge_txdma_start_channel_exit;
26253859Sml29623 	}
26263859Sml29623 
26273859Sml29623 	/*
26283859Sml29623 	 * Initialize the event masks.
26293859Sml29623 	 */
26303859Sml29623 	tx_ring_p->tx_evmask.value = 0;
26313859Sml29623 	status = nxge_init_txdma_channel_event_mask(nxgep,
26323859Sml29623 			channel, &tx_ring_p->tx_evmask);
26333859Sml29623 	if (status != NXGE_OK) {
26343859Sml29623 		goto nxge_txdma_start_channel_exit;
26353859Sml29623 	}
26363859Sml29623 
26373859Sml29623 	/*
26383859Sml29623 	 * Load TXDMA descriptors, buffers, mailbox,
26393859Sml29623 	 * initialise the DMA channels and
26403859Sml29623 	 * enable each DMA channel.
26413859Sml29623 	 */
26423859Sml29623 	status = nxge_enable_txdma_channel(nxgep, channel,
26433859Sml29623 			tx_ring_p, tx_mbox_p);
26443859Sml29623 	if (status != NXGE_OK) {
26453859Sml29623 		goto nxge_txdma_start_channel_exit;
26463859Sml29623 	}
26473859Sml29623 
26483859Sml29623 nxge_txdma_start_channel_exit:
26493859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel"));
26503859Sml29623 
26513859Sml29623 	return (status);
26523859Sml29623 }
26533859Sml29623 
26543859Sml29623 /*ARGSUSED*/
26553859Sml29623 static nxge_status_t
26563859Sml29623 nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel,
26573859Sml29623 	p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
26583859Sml29623 {
26593859Sml29623 	int		status = NXGE_OK;
26603859Sml29623 
26613859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
26623859Sml29623 		"==> nxge_txdma_stop_channel: channel %d", channel));
26633859Sml29623 
26643859Sml29623 	/*
26653859Sml29623 	 * Stop (disable) TXDMA and TXC (if stop bit is set
26663859Sml29623 	 * and STOP_N_GO bit not set, the TXDMA reset state will
26673859Sml29623 	 * not be set if reset TXDMA.
26683859Sml29623 	 */
26693859Sml29623 	(void) nxge_txdma_stop_inj_err(nxgep, channel);
26703859Sml29623 
26713859Sml29623 	/*
26723859Sml29623 	 * Reset TXDMA channel
26733859Sml29623 	 */
26743859Sml29623 	tx_ring_p->tx_cs.value = 0;
26753859Sml29623 	tx_ring_p->tx_cs.bits.ldw.rst = 1;
26763859Sml29623 	status = nxge_reset_txdma_channel(nxgep, channel,
26773859Sml29623 			tx_ring_p->tx_cs.value);
26783859Sml29623 	if (status != NXGE_OK) {
26793859Sml29623 		goto nxge_txdma_stop_channel_exit;
26803859Sml29623 	}
26813859Sml29623 
26823859Sml29623 #ifdef HARDWARE_REQUIRED
26833859Sml29623 	/* Set up the interrupt event masks. */
26843859Sml29623 	tx_ring_p->tx_evmask.value = 0;
26853859Sml29623 	status = nxge_init_txdma_channel_event_mask(nxgep,
26863859Sml29623 			channel, &tx_ring_p->tx_evmask);
26873859Sml29623 	if (status != NXGE_OK) {
26883859Sml29623 		goto nxge_txdma_stop_channel_exit;
26893859Sml29623 	}
26903859Sml29623 
26913859Sml29623 	/* Initialize the DMA control and status register */
26923859Sml29623 	tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL;
26933859Sml29623 	status = nxge_init_txdma_channel_cntl_stat(nxgep, channel,
26943859Sml29623 			tx_ring_p->tx_cs.value);
26953859Sml29623 	if (status != NXGE_OK) {
26963859Sml29623 		goto nxge_txdma_stop_channel_exit;
26973859Sml29623 	}
26983859Sml29623 
26993859Sml29623 	/* Disable channel */
27003859Sml29623 	status = nxge_disable_txdma_channel(nxgep, channel,
27013859Sml29623 			tx_ring_p, tx_mbox_p);
27023859Sml29623 	if (status != NXGE_OK) {
27033859Sml29623 		goto nxge_txdma_start_channel_exit;
27043859Sml29623 	}
27053859Sml29623 
27063859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
27073859Sml29623 		"==> nxge_txdma_stop_channel: event done"));
27083859Sml29623 
27093859Sml29623 #endif
27103859Sml29623 
27113859Sml29623 nxge_txdma_stop_channel_exit:
27123859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel"));
27133859Sml29623 	return (status);
27143859Sml29623 }
27153859Sml29623 
27163859Sml29623 static p_tx_ring_t
27173859Sml29623 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel)
27183859Sml29623 {
27193859Sml29623 	int			index, ndmas;
27203859Sml29623 	uint16_t		tdc;
27213859Sml29623 	p_tx_rings_t 		tx_rings;
27223859Sml29623 
27233859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring"));
27243859Sml29623 
27253859Sml29623 	tx_rings = nxgep->tx_rings;
27263859Sml29623 	if (tx_rings == NULL) {
27273859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
27283859Sml29623 			"<== nxge_txdma_get_ring: NULL ring pointer"));
27293859Sml29623 		return (NULL);
27303859Sml29623 	}
27313859Sml29623 
27323859Sml29623 	ndmas = tx_rings->ndmas;
27333859Sml29623 	if (!ndmas) {
27343859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
27353859Sml29623 			"<== nxge_txdma_get_ring: no channel allocated"));
27363859Sml29623 		return (NULL);
27373859Sml29623 	}
27383859Sml29623 
27393859Sml29623 	if (tx_rings->rings == NULL) {
27403859Sml29623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
27413859Sml29623 			"<== nxge_txdma_get_ring: NULL rings pointer"));
27423859Sml29623 		return (NULL);
27433859Sml29623 	}
27443859Sml29623 
27453859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_get_ring: "
27463859Sml29623 		"tx_rings $%p tx_desc_rings $%p ndmas %d",
27473859Sml29623 		tx_rings, tx_rings, ndmas));
27483859Sml29623 
27493859Sml29623 	for (index = 0; index < ndmas; index++) {
27503859Sml29623 		tdc = tx_rings->rings[index]->tdc;
27513859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
27523859Sml29623 			"==> nxge_fixup_txdma_rings: channel %d", tdc));
27533859Sml29623 		if (channel == tdc) {
27543859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
27553859Sml29623 				"<== nxge_txdma_get_ring: tdc %d "
27563859Sml29623 				"ring $%p",
27573859Sml29623 				tdc, tx_rings->rings[index]));
27583859Sml29623 			return (p_tx_ring_t)(tx_rings->rings[index]);
27593859Sml29623 		}
27603859Sml29623 	}
27613859Sml29623 
27623859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring"));
27633859Sml29623 	return (NULL);
27643859Sml29623 }
27653859Sml29623 
27663859Sml29623 static p_tx_mbox_t
27673859Sml29623 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel)
27683859Sml29623 {
27693859Sml29623 	int			index, tdc, ndmas;
27703859Sml29623 	p_tx_rings_t 		tx_rings;
27713859Sml29623 	p_tx_mbox_areas_t 	tx_mbox_areas_p;
27723859Sml29623 	p_tx_mbox_t		*tx_mbox_p;
27733859Sml29623 
27743859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox"));
27753859Sml29623 
27763859Sml29623 	tx_rings = nxgep->tx_rings;
27773859Sml29623 	if (tx_rings == NULL) {
27783859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
27793859Sml29623 			"<== nxge_txdma_get_mbox: NULL ring pointer"));
27803859Sml29623 		return (NULL);
27813859Sml29623 	}
27823859Sml29623 
27833859Sml29623 	tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
27843859Sml29623 	if (tx_mbox_areas_p == NULL) {
27853859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
27863859Sml29623 			"<== nxge_txdma_get_mbox: NULL mbox pointer"));
27873859Sml29623 		return (NULL);
27883859Sml29623 	}
27893859Sml29623 
27903859Sml29623 	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
27913859Sml29623 
27923859Sml29623 	ndmas = tx_rings->ndmas;
27933859Sml29623 	if (!ndmas) {
27943859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
27953859Sml29623 			"<== nxge_txdma_get_mbox: no channel allocated"));
27963859Sml29623 		return (NULL);
27973859Sml29623 	}
27983859Sml29623 
27993859Sml29623 	if (tx_rings->rings == NULL) {
28003859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
28013859Sml29623 			"<== nxge_txdma_get_mbox: NULL rings pointer"));
28023859Sml29623 		return (NULL);
28033859Sml29623 	}
28043859Sml29623 
28053859Sml29623 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_get_mbox: "
28063859Sml29623 		"tx_rings $%p tx_desc_rings $%p ndmas %d",
28073859Sml29623 		tx_rings, tx_rings, ndmas));
28083859Sml29623 
28093859Sml29623 	for (index = 0; index < ndmas; index++) {
28103859Sml29623 		tdc = tx_rings->rings[index]->tdc;
28113859Sml29623 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
28123859Sml29623 			"==> nxge_txdma_get_mbox: channel %d", tdc));
28133859Sml29623 		if (channel == tdc) {
28143859Sml29623 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
28153859Sml29623 				"<== nxge_txdma_get_mbox: tdc %d "
28163859Sml29623 				"ring $%p",
28173859Sml29623 				tdc, tx_rings->rings[index]));
28183859Sml29623 			return (p_tx_mbox_t)(tx_mbox_p[index]);
28193859Sml29623 		}
28203859Sml29623 	}
28213859Sml29623 
28223859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox"));
28233859Sml29623 	return (NULL);
28243859Sml29623 }
28253859Sml29623 
28263859Sml29623 /*ARGSUSED*/
28273859Sml29623 static nxge_status_t
28283859Sml29623 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs)
28293859Sml29623 {
28303859Sml29623 	npi_handle_t		handle;
28313859Sml29623 	npi_status_t		rs;
28323859Sml29623 	uint8_t			channel;
28333859Sml29623 	p_tx_ring_t 		*tx_rings;
28343859Sml29623 	p_tx_ring_t 		tx_ring_p;
28353859Sml29623 	p_nxge_tx_ring_stats_t	tdc_stats;
28363859Sml29623 	boolean_t		txchan_fatal = B_FALSE;
28373859Sml29623 	nxge_status_t		status = NXGE_OK;
28383859Sml29623 	tdmc_inj_par_err_t	par_err;
28393859Sml29623 	uint32_t		value;
28403859Sml29623 
28413859Sml29623 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_tx_err_evnts"));
28423859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
28433859Sml29623 	channel = ldvp->channel;
28443859Sml29623 
28453859Sml29623 	tx_rings = nxgep->tx_rings->rings;
28463859Sml29623 	tx_ring_p = tx_rings[index];
28473859Sml29623 	tdc_stats = tx_ring_p->tdc_stats;
28483859Sml29623 	if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) ||
28493859Sml29623 		(cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) ||
28503859Sml29623 		(cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) {
28513859Sml29623 		if ((rs = npi_txdma_ring_error_get(handle, channel,
28523859Sml29623 					&tdc_stats->errlog)) != NPI_SUCCESS)
28533859Sml29623 			return (NXGE_ERROR | rs);
28543859Sml29623 	}
28553859Sml29623 
28563859Sml29623 	if (cs.bits.ldw.mbox_err) {
28573859Sml29623 		tdc_stats->mbox_err++;
28583859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
28593859Sml29623 					NXGE_FM_EREPORT_TDMC_MBOX_ERR);
28603859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
28613859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
28623859Sml29623 			"fatal error: mailbox", channel));
28633859Sml29623 		txchan_fatal = B_TRUE;
28643859Sml29623 	}
28653859Sml29623 	if (cs.bits.ldw.pkt_size_err) {
28663859Sml29623 		tdc_stats->pkt_size_err++;
28673859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
28683859Sml29623 					NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR);
28693859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
28703859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
28713859Sml29623 			"fatal error: pkt_size_err", channel));
28723859Sml29623 		txchan_fatal = B_TRUE;
28733859Sml29623 	}
28743859Sml29623 	if (cs.bits.ldw.tx_ring_oflow) {
28753859Sml29623 		tdc_stats->tx_ring_oflow++;
28763859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
28773859Sml29623 					NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW);
28783859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
28793859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
28803859Sml29623 			"fatal error: tx_ring_oflow", channel));
28813859Sml29623 		txchan_fatal = B_TRUE;
28823859Sml29623 	}
28833859Sml29623 	if (cs.bits.ldw.pref_buf_par_err) {
28843859Sml29623 		tdc_stats->pre_buf_par_err++;
28853859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
28863859Sml29623 					NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR);
28873859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
28883859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
28893859Sml29623 			"fatal error: pre_buf_par_err", channel));
28903859Sml29623 		/* Clear error injection source for parity error */
28913859Sml29623 		(void) npi_txdma_inj_par_error_get(handle, &value);
28923859Sml29623 		par_err.value = value;
28933859Sml29623 		par_err.bits.ldw.inject_parity_error &= ~(1 << channel);
28943859Sml29623 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
28953859Sml29623 		txchan_fatal = B_TRUE;
28963859Sml29623 	}
28973859Sml29623 	if (cs.bits.ldw.nack_pref) {
28983859Sml29623 		tdc_stats->nack_pref++;
28993859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
29003859Sml29623 					NXGE_FM_EREPORT_TDMC_NACK_PREF);
29013859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
29023859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
29033859Sml29623 			"fatal error: nack_pref", channel));
29043859Sml29623 		txchan_fatal = B_TRUE;
29053859Sml29623 	}
29063859Sml29623 	if (cs.bits.ldw.nack_pkt_rd) {
29073859Sml29623 		tdc_stats->nack_pkt_rd++;
29083859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
29093859Sml29623 					NXGE_FM_EREPORT_TDMC_NACK_PKT_RD);
29103859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
29113859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
29123859Sml29623 			"fatal error: nack_pkt_rd", channel));
29133859Sml29623 		txchan_fatal = B_TRUE;
29143859Sml29623 	}
29153859Sml29623 	if (cs.bits.ldw.conf_part_err) {
29163859Sml29623 		tdc_stats->conf_part_err++;
29173859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
29183859Sml29623 					NXGE_FM_EREPORT_TDMC_CONF_PART_ERR);
29193859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
29203859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
29213859Sml29623 			"fatal error: config_partition_err", channel));
29223859Sml29623 		txchan_fatal = B_TRUE;
29233859Sml29623 	}
29243859Sml29623 	if (cs.bits.ldw.pkt_prt_err) {
29253859Sml29623 		tdc_stats->pkt_part_err++;
29263859Sml29623 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
29273859Sml29623 					NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR);
29283859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
29293859Sml29623 			"==> nxge_tx_err_evnts(channel %d): "
29303859Sml29623 			"fatal error: pkt_prt_err", channel));
29313859Sml29623 		txchan_fatal = B_TRUE;
29323859Sml29623 	}
29333859Sml29623 
29343859Sml29623 	/* Clear error injection source in case this is an injected error */
29353859Sml29623 	TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0);
29363859Sml29623 
29373859Sml29623 	if (txchan_fatal) {
29383859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
29393859Sml29623 			" nxge_tx_err_evnts: "
29403859Sml29623 			" fatal error on channel %d cs 0x%llx\n",
29413859Sml29623 			channel, cs.value));
29423859Sml29623 		status = nxge_txdma_fatal_err_recover(nxgep, channel,
29433859Sml29623 								tx_ring_p);
29443859Sml29623 		if (status == NXGE_OK) {
29453859Sml29623 			FM_SERVICE_RESTORED(nxgep);
29463859Sml29623 		}
29473859Sml29623 	}
29483859Sml29623 
29493859Sml29623 	NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_tx_err_evnts"));
29503859Sml29623 
29513859Sml29623 	return (status);
29523859Sml29623 }
29533859Sml29623 
29543859Sml29623 static nxge_status_t
29553859Sml29623 nxge_txdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel,
29563859Sml29623 						p_tx_ring_t tx_ring_p)
29573859Sml29623 {
29583859Sml29623 	npi_handle_t	handle;
29593859Sml29623 	npi_status_t	rs = NPI_SUCCESS;
29603859Sml29623 	p_tx_mbox_t	tx_mbox_p;
29613859Sml29623 	nxge_status_t	status = NXGE_OK;
29623859Sml29623 
29633859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover"));
29643859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
29653859Sml29623 			"Recovering from TxDMAChannel#%d error...", channel));
29663859Sml29623 
29673859Sml29623 	/*
29683859Sml29623 	 * Stop the dma channel waits for the stop done.
29693859Sml29623 	 * If the stop done bit is not set, then create
29703859Sml29623 	 * an error.
29713859Sml29623 	 */
29723859Sml29623 
29733859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
29743859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop..."));
29753859Sml29623 	MUTEX_ENTER(&tx_ring_p->lock);
29763859Sml29623 	rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel);
29773859Sml29623 	if (rs != NPI_SUCCESS) {
29783859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
29793859Sml29623 			"==> nxge_txdma_fatal_err_recover (channel %d): "
29803859Sml29623 			"stop failed ", channel));
29813859Sml29623 		goto fail;
29823859Sml29623 	}
29833859Sml29623 
29843859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim..."));
29853859Sml29623 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
29863859Sml29623 
29873859Sml29623 	/*
29883859Sml29623 	 * Reset TXDMA channel
29893859Sml29623 	 */
29903859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset..."));
29913859Sml29623 	if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) !=
29923859Sml29623 						NPI_SUCCESS) {
29933859Sml29623 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
29943859Sml29623 			"==> nxge_txdma_fatal_err_recover (channel %d)"
29953859Sml29623 			" reset channel failed 0x%x", channel, rs));
29963859Sml29623 		goto fail;
29973859Sml29623 	}
29983859Sml29623 
29993859Sml29623 	/*
30003859Sml29623 	 * Reset the tail (kick) register to 0.
30013859Sml29623 	 * (Hardware will not reset it. Tx overflow fatal
30023859Sml29623 	 * error if tail is not set to 0 after reset!
30033859Sml29623 	 */
30043859Sml29623 	TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
30053859Sml29623 
30063859Sml29623 	/* Restart TXDMA channel */
30073859Sml29623 
30083859Sml29623 	/*
30093859Sml29623 	 * Initialize the TXDMA channel specific FZC control
30103859Sml29623 	 * configurations. These FZC registers are pertaining
30113859Sml29623 	 * to each TX channel (i.e. logical pages).
30123859Sml29623 	 */
30133859Sml29623 	tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
30143859Sml29623 
30153859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart..."));
30163859Sml29623 	status = nxge_init_fzc_txdma_channel(nxgep, channel,
30173859Sml29623 						tx_ring_p, tx_mbox_p);
30183859Sml29623 	if (status != NXGE_OK)
30193859Sml29623 		goto fail;
30203859Sml29623 
30213859Sml29623 	/*
30223859Sml29623 	 * Initialize the event masks.
30233859Sml29623 	 */
30243859Sml29623 	tx_ring_p->tx_evmask.value = 0;
30253859Sml29623 	status = nxge_init_txdma_channel_event_mask(nxgep, channel,
30263859Sml29623 							&tx_ring_p->tx_evmask);
30273859Sml29623 	if (status != NXGE_OK)
30283859Sml29623 		goto fail;
30293859Sml29623 
30303859Sml29623 	tx_ring_p->wr_index_wrap = B_FALSE;
30313859Sml29623 	tx_ring_p->wr_index = 0;
30323859Sml29623 	tx_ring_p->rd_index = 0;
30333859Sml29623 
30343859Sml29623 	/*
30353859Sml29623 	 * Load TXDMA descriptors, buffers, mailbox,
30363859Sml29623 	 * initialise the DMA channels and
30373859Sml29623 	 * enable each DMA channel.
30383859Sml29623 	 */
30393859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable..."));
30403859Sml29623 	status = nxge_enable_txdma_channel(nxgep, channel,
30413859Sml29623 						tx_ring_p, tx_mbox_p);
30423859Sml29623 	MUTEX_EXIT(&tx_ring_p->lock);
30433859Sml29623 	if (status != NXGE_OK)
30443859Sml29623 		goto fail;
30453859Sml29623 
30463859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
30473859Sml29623 			"Recovery Successful, TxDMAChannel#%d Restored",
30483859Sml29623 			channel));
30493859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover"));
30503859Sml29623 
30513859Sml29623 	return (NXGE_OK);
30523859Sml29623 
30533859Sml29623 fail:
30543859Sml29623 	MUTEX_EXIT(&tx_ring_p->lock);
30553859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
30563859Sml29623 		"nxge_txdma_fatal_err_recover (channel %d): "
30573859Sml29623 		"failed to recover this txdma channel", channel));
30583859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
30593859Sml29623 
30603859Sml29623 	return (status);
30613859Sml29623 }
30623859Sml29623 
30633859Sml29623 nxge_status_t
30643859Sml29623 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep)
30653859Sml29623 {
30663859Sml29623 	npi_handle_t	handle;
30673859Sml29623 	npi_status_t	rs = NPI_SUCCESS;
30683859Sml29623 	nxge_status_t	status = NXGE_OK;
30693859Sml29623 	p_tx_ring_t 	*tx_desc_rings;
30703859Sml29623 	p_tx_rings_t	tx_rings;
30713859Sml29623 	p_tx_ring_t	tx_ring_p;
30723859Sml29623 	p_tx_mbox_t	tx_mbox_p;
30733859Sml29623 	int		i, ndmas;
30743859Sml29623 	uint16_t	channel;
30753859Sml29623 
30763859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover"));
30773859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
30783859Sml29623 			"Recovering from TxPort error..."));
30793859Sml29623 
30803859Sml29623 	/*
30813859Sml29623 	 * Stop the dma channel waits for the stop done.
30823859Sml29623 	 * If the stop done bit is not set, then create
30833859Sml29623 	 * an error.
30843859Sml29623 	 */
30853859Sml29623 
30863859Sml29623 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
30873859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort stop all DMA channels..."));
30883859Sml29623 
30893859Sml29623 	tx_rings = nxgep->tx_rings;
30903859Sml29623 	tx_desc_rings = tx_rings->rings;
30913859Sml29623 	ndmas = tx_rings->ndmas;
30923859Sml29623 
30933859Sml29623 	for (i = 0; i < ndmas; i++) {
30943859Sml29623 		if (tx_desc_rings[i] == NULL) {
30953859Sml29623 			continue;
30963859Sml29623 		}
30973859Sml29623 		tx_ring_p = tx_rings->rings[i];
30983859Sml29623 		MUTEX_ENTER(&tx_ring_p->lock);
30993859Sml29623 	}
31003859Sml29623 
31013859Sml29623 	for (i = 0; i < ndmas; i++) {
31023859Sml29623 		if (tx_desc_rings[i] == NULL) {
31033859Sml29623 			continue;
31043859Sml29623 		}
31053859Sml29623 		channel = tx_desc_rings[i]->tdc;
31063859Sml29623 		tx_ring_p = tx_rings->rings[i];
31073859Sml29623 		rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel);
31083859Sml29623 		if (rs != NPI_SUCCESS) {
31093859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31103859Sml29623 			"==> nxge_txdma_fatal_err_recover (channel %d): "
31113859Sml29623 			"stop failed ", channel));
31123859Sml29623 			goto fail;
31133859Sml29623 		}
31143859Sml29623 	}
31153859Sml29623 
31163859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort reclaim all DMA channels..."));
31173859Sml29623 
31183859Sml29623 	for (i = 0; i < ndmas; i++) {
31193859Sml29623 		if (tx_desc_rings[i] == NULL) {
31203859Sml29623 			continue;
31213859Sml29623 		}
31223859Sml29623 		tx_ring_p = tx_rings->rings[i];
31233859Sml29623 		(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
31243859Sml29623 	}
31253859Sml29623 
31263859Sml29623 	/*
31273859Sml29623 	 * Reset TXDMA channel
31283859Sml29623 	 */
31293859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort reset all DMA channels..."));
31303859Sml29623 
31313859Sml29623 	for (i = 0; i < ndmas; i++) {
31323859Sml29623 		if (tx_desc_rings[i] == NULL) {
31333859Sml29623 			continue;
31343859Sml29623 		}
31353859Sml29623 		channel = tx_desc_rings[i]->tdc;
31363859Sml29623 		tx_ring_p = tx_rings->rings[i];
31373859Sml29623 		if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET,
31383859Sml29623 				channel)) != NPI_SUCCESS) {
31393859Sml29623 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31403859Sml29623 				"==> nxge_txdma_fatal_err_recover (channel %d)"
31413859Sml29623 				" reset channel failed 0x%x", channel, rs));
31423859Sml29623 			goto fail;
31433859Sml29623 		}
31443859Sml29623 
31453859Sml29623 		/*
31463859Sml29623 		 * Reset the tail (kick) register to 0.
31473859Sml29623 		 * (Hardware will not reset it. Tx overflow fatal
31483859Sml29623 		 * error if tail is not set to 0 after reset!
31493859Sml29623 		 */
31503859Sml29623 
31513859Sml29623 		TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
31523859Sml29623 
31533859Sml29623 	}
31543859Sml29623 
31553859Sml29623 	/*
31563859Sml29623 	 * Initialize the TXDMA channel specific FZC control
31573859Sml29623 	 * configurations. These FZC registers are pertaining
31583859Sml29623 	 * to each TX channel (i.e. logical pages).
31593859Sml29623 	 */
31603859Sml29623 
31613859Sml29623 	/* Restart TXDMA channels */
31623859Sml29623 
31633859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort re-start all DMA channels..."));
31643859Sml29623 
31653859Sml29623 	for (i = 0; i < ndmas; i++) {
31663859Sml29623 		if (tx_desc_rings[i] == NULL) {
31673859Sml29623 			continue;
31683859Sml29623 		}
31693859Sml29623 		channel = tx_desc_rings[i]->tdc;
31703859Sml29623 		tx_ring_p = tx_rings->rings[i];
31713859Sml29623 		tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
31723859Sml29623 		status = nxge_init_fzc_txdma_channel(nxgep, channel,
31733859Sml29623 						tx_ring_p, tx_mbox_p);
31743859Sml29623 		tx_ring_p->tx_evmask.value = 0;
31753859Sml29623 		/*
31763859Sml29623 		 * Initialize the event masks.
31773859Sml29623 		 */
31783859Sml29623 		status = nxge_init_txdma_channel_event_mask(nxgep, channel,
31793859Sml29623 							&tx_ring_p->tx_evmask);
31803859Sml29623 
31813859Sml29623 		tx_ring_p->wr_index_wrap = B_FALSE;
31823859Sml29623 		tx_ring_p->wr_index = 0;
31833859Sml29623 		tx_ring_p->rd_index = 0;
31843859Sml29623 
31853859Sml29623 		if (status != NXGE_OK)
31863859Sml29623 			goto fail;
31873859Sml29623 		if (status != NXGE_OK)
31883859Sml29623 			goto fail;
31893859Sml29623 	}
31903859Sml29623 
31913859Sml29623 	/*
31923859Sml29623 	 * Load TXDMA descriptors, buffers, mailbox,
31933859Sml29623 	 * initialise the DMA channels and
31943859Sml29623 	 * enable each DMA channel.
31953859Sml29623 	 */
31963859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort re-enable all DMA channels..."));
31973859Sml29623 
31983859Sml29623 	for (i = 0; i < ndmas; i++) {
31993859Sml29623 		if (tx_desc_rings[i] == NULL) {
32003859Sml29623 			continue;
32013859Sml29623 		}
32023859Sml29623 		channel = tx_desc_rings[i]->tdc;
32033859Sml29623 		tx_ring_p = tx_rings->rings[i];
32043859Sml29623 		tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
32053859Sml29623 		status = nxge_enable_txdma_channel(nxgep, channel,
32063859Sml29623 						tx_ring_p, tx_mbox_p);
32073859Sml29623 		if (status != NXGE_OK)
32083859Sml29623 			goto fail;
32093859Sml29623 	}
32103859Sml29623 
32113859Sml29623 	for (i = 0; i < ndmas; i++) {
32123859Sml29623 		if (tx_desc_rings[i] == NULL) {
32133859Sml29623 			continue;
32143859Sml29623 		}
32153859Sml29623 		tx_ring_p = tx_rings->rings[i];
32163859Sml29623 		MUTEX_EXIT(&tx_ring_p->lock);
32173859Sml29623 	}
32183859Sml29623 
32193859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32203859Sml29623 			"Recovery Successful, TxPort Restored"));
32213859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
32223859Sml29623 
32233859Sml29623 	return (NXGE_OK);
32243859Sml29623 
32253859Sml29623 fail:
32263859Sml29623 	for (i = 0; i < ndmas; i++) {
32273859Sml29623 		if (tx_desc_rings[i] == NULL) {
32283859Sml29623 			continue;
32293859Sml29623 		}
32303859Sml29623 		tx_ring_p = tx_rings->rings[i];
32313859Sml29623 		MUTEX_EXIT(&tx_ring_p->lock);
32323859Sml29623 	}
32333859Sml29623 
32343859Sml29623 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
32353859Sml29623 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
32363859Sml29623 		"nxge_txdma_fatal_err_recover (channel %d): "
32373859Sml29623 		"failed to recover this txdma channel"));
32383859Sml29623 
32393859Sml29623 	return (status);
32403859Sml29623 }
32413859Sml29623 
32423859Sml29623 void
32433859Sml29623 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
32443859Sml29623 {
32453859Sml29623 	tdmc_intr_dbg_t		tdi;
32463859Sml29623 	tdmc_inj_par_err_t	par_err;
32473859Sml29623 	uint32_t		value;
32483859Sml29623 	npi_handle_t		handle;
32493859Sml29623 
32503859Sml29623 	switch (err_id) {
32513859Sml29623 
32523859Sml29623 	case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR:
32533859Sml29623 		handle = NXGE_DEV_NPI_HANDLE(nxgep);
32543859Sml29623 		/* Clear error injection source for parity error */
32553859Sml29623 		(void) npi_txdma_inj_par_error_get(handle, &value);
32563859Sml29623 		par_err.value = value;
32573859Sml29623 		par_err.bits.ldw.inject_parity_error &= ~(1 << chan);
32583859Sml29623 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
32593859Sml29623 
32603859Sml29623 		par_err.bits.ldw.inject_parity_error = (1 << chan);
32613859Sml29623 		(void) npi_txdma_inj_par_error_get(handle, &value);
32623859Sml29623 		par_err.value = value;
32633859Sml29623 		par_err.bits.ldw.inject_parity_error |= (1 << chan);
32643859Sml29623 		cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n",
32653859Sml29623 				(unsigned long long)par_err.value);
32663859Sml29623 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
32673859Sml29623 		break;
32683859Sml29623 
32693859Sml29623 	case NXGE_FM_EREPORT_TDMC_MBOX_ERR:
32703859Sml29623 	case NXGE_FM_EREPORT_TDMC_NACK_PREF:
32713859Sml29623 	case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD:
32723859Sml29623 	case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR:
32733859Sml29623 	case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW:
32743859Sml29623 	case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR:
32753859Sml29623 	case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR:
32763859Sml29623 		TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
32773859Sml29623 			chan, &tdi.value);
32783859Sml29623 		if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR)
32793859Sml29623 			tdi.bits.ldw.pref_buf_par_err = 1;
32803859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR)
32813859Sml29623 			tdi.bits.ldw.mbox_err = 1;
32823859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF)
32833859Sml29623 			tdi.bits.ldw.nack_pref = 1;
32843859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD)
32853859Sml29623 			tdi.bits.ldw.nack_pkt_rd = 1;
32863859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR)
32873859Sml29623 			tdi.bits.ldw.pkt_size_err = 1;
32883859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW)
32893859Sml29623 			tdi.bits.ldw.tx_ring_oflow = 1;
32903859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR)
32913859Sml29623 			tdi.bits.ldw.conf_part_err = 1;
32923859Sml29623 		else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR)
32933859Sml29623 			tdi.bits.ldw.pkt_part_err = 1;
32945125Sjoycey #if defined(__i386)
32955125Sjoycey 		cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n",
32965125Sjoycey 				tdi.value);
32975125Sjoycey #else
32983859Sml29623 		cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n",
32993859Sml29623 				tdi.value);
33005125Sjoycey #endif
33013859Sml29623 		TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
33023859Sml29623 			chan, tdi.value);
33033859Sml29623 
33043859Sml29623 		break;
33053859Sml29623 	}
33063859Sml29623 }
3307